xref: /linux/drivers/infiniband/core/user_mad.c (revision 9ce7677cfd7cd871adb457c80bea3b581b839641)
1 /*
2  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
4  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  *
34  * $Id: user_mad.c 4010 2005-11-09 23:11:56Z roland $
35  */
36 
37 #include <linux/module.h>
38 #include <linux/init.h>
39 #include <linux/device.h>
40 #include <linux/err.h>
41 #include <linux/fs.h>
42 #include <linux/cdev.h>
43 #include <linux/pci.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/poll.h>
46 #include <linux/rwsem.h>
47 #include <linux/kref.h>
48 
49 #include <asm/uaccess.h>
50 #include <asm/semaphore.h>
51 
52 #include <rdma/ib_mad.h>
53 #include <rdma/ib_user_mad.h>
54 
55 MODULE_AUTHOR("Roland Dreier");
56 MODULE_DESCRIPTION("InfiniBand userspace MAD packet access");
57 MODULE_LICENSE("Dual BSD/GPL");
58 
59 enum {
60 	IB_UMAD_MAX_PORTS  = 64,
61 	IB_UMAD_MAX_AGENTS = 32,
62 
63 	IB_UMAD_MAJOR      = 231,
64 	IB_UMAD_MINOR_BASE = 0
65 };
66 
67 /*
68  * Our lifetime rules for these structs are the following: each time a
69  * device special file is opened, we look up the corresponding struct
70  * ib_umad_port by minor in the umad_port[] table while holding the
71  * port_lock.  If this lookup succeeds, we take a reference on the
72  * ib_umad_port's struct ib_umad_device while still holding the
73  * port_lock; if the lookup fails, we fail the open().  We drop these
74  * references in the corresponding close().
75  *
76  * In addition to references coming from open character devices, there
77  * is one more reference to each ib_umad_device representing the
78  * module's reference taken when allocating the ib_umad_device in
79  * ib_umad_add_one().
80  *
81  * When destroying an ib_umad_device, we clear all of its
82  * ib_umad_ports from umad_port[] while holding port_lock before
83  * dropping the module's reference to the ib_umad_device.  This is
84  * always safe because any open() calls will either succeed and obtain
85  * a reference before we clear the umad_port[] entries, or fail after
86  * we clear the umad_port[] entries.
87  */
88 
89 struct ib_umad_port {
90 	struct cdev           *dev;
91 	struct class_device   *class_dev;
92 
93 	struct cdev           *sm_dev;
94 	struct class_device   *sm_class_dev;
95 	struct semaphore       sm_sem;
96 
97 	struct rw_semaphore    mutex;
98 	struct list_head       file_list;
99 
100 	struct ib_device      *ib_dev;
101 	struct ib_umad_device *umad_dev;
102 	int                    dev_num;
103 	u8                     port_num;
104 };
105 
106 struct ib_umad_device {
107 	int                  start_port, end_port;
108 	struct kref          ref;
109 	struct ib_umad_port  port[0];
110 };
111 
112 struct ib_umad_file {
113 	struct ib_umad_port    *port;
114 	struct list_head	recv_list;
115 	struct list_head	port_list;
116 	spinlock_t		recv_lock;
117 	wait_queue_head_t	recv_wait;
118 	struct ib_mad_agent    *agent[IB_UMAD_MAX_AGENTS];
119 	int			agents_dead;
120 };
121 
122 struct ib_umad_packet {
123 	struct ib_mad_send_buf *msg;
124 	struct list_head   list;
125 	int		   length;
126 	struct ib_user_mad mad;
127 };
128 
129 static struct class *umad_class;
130 
131 static const dev_t base_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE);
132 
133 static DEFINE_SPINLOCK(port_lock);
134 static struct ib_umad_port *umad_port[IB_UMAD_MAX_PORTS];
135 static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS * 2);
136 
137 static void ib_umad_add_one(struct ib_device *device);
138 static void ib_umad_remove_one(struct ib_device *device);
139 
140 static void ib_umad_release_dev(struct kref *ref)
141 {
142 	struct ib_umad_device *dev =
143 		container_of(ref, struct ib_umad_device, ref);
144 
145 	kfree(dev);
146 }
147 
148 /* caller must hold port->mutex at least for reading */
149 static struct ib_mad_agent *__get_agent(struct ib_umad_file *file, int id)
150 {
151 	return file->agents_dead ? NULL : file->agent[id];
152 }
153 
154 static int queue_packet(struct ib_umad_file *file,
155 			struct ib_mad_agent *agent,
156 			struct ib_umad_packet *packet)
157 {
158 	int ret = 1;
159 
160 	down_read(&file->port->mutex);
161 
162 	for (packet->mad.hdr.id = 0;
163 	     packet->mad.hdr.id < IB_UMAD_MAX_AGENTS;
164 	     packet->mad.hdr.id++)
165 		if (agent == __get_agent(file, packet->mad.hdr.id)) {
166 			spin_lock_irq(&file->recv_lock);
167 			list_add_tail(&packet->list, &file->recv_list);
168 			spin_unlock_irq(&file->recv_lock);
169 			wake_up_interruptible(&file->recv_wait);
170 			ret = 0;
171 			break;
172 		}
173 
174 	up_read(&file->port->mutex);
175 
176 	return ret;
177 }
178 
179 static void send_handler(struct ib_mad_agent *agent,
180 			 struct ib_mad_send_wc *send_wc)
181 {
182 	struct ib_umad_file *file = agent->context;
183 	struct ib_umad_packet *timeout;
184 	struct ib_umad_packet *packet = send_wc->send_buf->context[0];
185 
186 	ib_destroy_ah(packet->msg->ah);
187 	ib_free_send_mad(packet->msg);
188 
189 	if (send_wc->status == IB_WC_RESP_TIMEOUT_ERR) {
190 		timeout = kzalloc(sizeof *timeout + IB_MGMT_MAD_HDR, GFP_KERNEL);
191 		if (!timeout)
192 			goto out;
193 
194 		timeout->length 	= IB_MGMT_MAD_HDR;
195 		timeout->mad.hdr.id 	= packet->mad.hdr.id;
196 		timeout->mad.hdr.status = ETIMEDOUT;
197 		memcpy(timeout->mad.data, packet->mad.data,
198 		       sizeof (struct ib_mad_hdr));
199 
200 		if (!queue_packet(file, agent, timeout))
201 				return;
202 	}
203 out:
204 	kfree(packet);
205 }
206 
207 static void recv_handler(struct ib_mad_agent *agent,
208 			 struct ib_mad_recv_wc *mad_recv_wc)
209 {
210 	struct ib_umad_file *file = agent->context;
211 	struct ib_umad_packet *packet;
212 	int length;
213 
214 	if (mad_recv_wc->wc->status != IB_WC_SUCCESS)
215 		goto out;
216 
217 	length = mad_recv_wc->mad_len;
218 	packet = kzalloc(sizeof *packet + length, GFP_KERNEL);
219 	if (!packet)
220 		goto out;
221 
222 	packet->length = length;
223 
224 	ib_coalesce_recv_mad(mad_recv_wc, packet->mad.data);
225 
226 	packet->mad.hdr.status    = 0;
227 	packet->mad.hdr.length    = length + sizeof (struct ib_user_mad);
228 	packet->mad.hdr.qpn 	  = cpu_to_be32(mad_recv_wc->wc->src_qp);
229 	packet->mad.hdr.lid 	  = cpu_to_be16(mad_recv_wc->wc->slid);
230 	packet->mad.hdr.sl  	  = mad_recv_wc->wc->sl;
231 	packet->mad.hdr.path_bits = mad_recv_wc->wc->dlid_path_bits;
232 	packet->mad.hdr.grh_present = !!(mad_recv_wc->wc->wc_flags & IB_WC_GRH);
233 	if (packet->mad.hdr.grh_present) {
234 		/* XXX parse GRH */
235 		packet->mad.hdr.gid_index 	= 0;
236 		packet->mad.hdr.hop_limit 	= 0;
237 		packet->mad.hdr.traffic_class	= 0;
238 		memset(packet->mad.hdr.gid, 0, 16);
239 		packet->mad.hdr.flow_label	= 0;
240 	}
241 
242 	if (queue_packet(file, agent, packet))
243 		kfree(packet);
244 
245 out:
246 	ib_free_recv_mad(mad_recv_wc);
247 }
248 
249 static ssize_t ib_umad_read(struct file *filp, char __user *buf,
250 			    size_t count, loff_t *pos)
251 {
252 	struct ib_umad_file *file = filp->private_data;
253 	struct ib_umad_packet *packet;
254 	ssize_t ret;
255 
256 	if (count < sizeof (struct ib_user_mad) + sizeof (struct ib_mad))
257 		return -EINVAL;
258 
259 	spin_lock_irq(&file->recv_lock);
260 
261 	while (list_empty(&file->recv_list)) {
262 		spin_unlock_irq(&file->recv_lock);
263 
264 		if (filp->f_flags & O_NONBLOCK)
265 			return -EAGAIN;
266 
267 		if (wait_event_interruptible(file->recv_wait,
268 					     !list_empty(&file->recv_list)))
269 			return -ERESTARTSYS;
270 
271 		spin_lock_irq(&file->recv_lock);
272 	}
273 
274 	packet = list_entry(file->recv_list.next, struct ib_umad_packet, list);
275 	list_del(&packet->list);
276 
277 	spin_unlock_irq(&file->recv_lock);
278 
279 	if (count < packet->length + sizeof (struct ib_user_mad)) {
280 		/* Return length needed (and first RMPP segment) if too small */
281 		if (copy_to_user(buf, &packet->mad,
282 				 sizeof (struct ib_user_mad) + sizeof (struct ib_mad)))
283 			ret = -EFAULT;
284 		else
285 			ret = -ENOSPC;
286 	} else if (copy_to_user(buf, &packet->mad,
287 				packet->length + sizeof (struct ib_user_mad)))
288 		ret = -EFAULT;
289 	else
290 		ret = packet->length + sizeof (struct ib_user_mad);
291 	if (ret < 0) {
292 		/* Requeue packet */
293 		spin_lock_irq(&file->recv_lock);
294 		list_add(&packet->list, &file->recv_list);
295 		spin_unlock_irq(&file->recv_lock);
296 	} else
297 		kfree(packet);
298 	return ret;
299 }
300 
301 static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
302 			     size_t count, loff_t *pos)
303 {
304 	struct ib_umad_file *file = filp->private_data;
305 	struct ib_umad_packet *packet;
306 	struct ib_mad_agent *agent;
307 	struct ib_ah_attr ah_attr;
308 	struct ib_ah *ah;
309 	struct ib_rmpp_mad *rmpp_mad;
310 	u8 method;
311 	__be64 *tid;
312 	int ret, length, hdr_len, copy_offset;
313 	int rmpp_active, has_rmpp_header;
314 
315 	if (count < sizeof (struct ib_user_mad) + IB_MGMT_RMPP_HDR)
316 		return -EINVAL;
317 
318 	length = count - sizeof (struct ib_user_mad);
319 	packet = kmalloc(sizeof *packet + IB_MGMT_RMPP_HDR, GFP_KERNEL);
320 	if (!packet)
321 		return -ENOMEM;
322 
323 	if (copy_from_user(&packet->mad, buf,
324 			    sizeof (struct ib_user_mad) + IB_MGMT_RMPP_HDR)) {
325 		ret = -EFAULT;
326 		goto err;
327 	}
328 
329 	if (packet->mad.hdr.id < 0 ||
330 	    packet->mad.hdr.id >= IB_UMAD_MAX_AGENTS) {
331 		ret = -EINVAL;
332 		goto err;
333 	}
334 
335 	down_read(&file->port->mutex);
336 
337 	agent = __get_agent(file, packet->mad.hdr.id);
338 	if (!agent) {
339 		ret = -EINVAL;
340 		goto err_up;
341 	}
342 
343 	memset(&ah_attr, 0, sizeof ah_attr);
344 	ah_attr.dlid          = be16_to_cpu(packet->mad.hdr.lid);
345 	ah_attr.sl            = packet->mad.hdr.sl;
346 	ah_attr.src_path_bits = packet->mad.hdr.path_bits;
347 	ah_attr.port_num      = file->port->port_num;
348 	if (packet->mad.hdr.grh_present) {
349 		ah_attr.ah_flags = IB_AH_GRH;
350 		memcpy(ah_attr.grh.dgid.raw, packet->mad.hdr.gid, 16);
351 		ah_attr.grh.flow_label 	   = be32_to_cpu(packet->mad.hdr.flow_label);
352 		ah_attr.grh.hop_limit  	   = packet->mad.hdr.hop_limit;
353 		ah_attr.grh.traffic_class  = packet->mad.hdr.traffic_class;
354 	}
355 
356 	ah = ib_create_ah(agent->qp->pd, &ah_attr);
357 	if (IS_ERR(ah)) {
358 		ret = PTR_ERR(ah);
359 		goto err_up;
360 	}
361 
362 	rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data;
363 	if (rmpp_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_ADM) {
364 		hdr_len = IB_MGMT_SA_HDR;
365 		copy_offset = IB_MGMT_RMPP_HDR;
366 		has_rmpp_header = 1;
367 	} else if (rmpp_mad->mad_hdr.mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START &&
368 		   rmpp_mad->mad_hdr.mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END) {
369 			hdr_len = IB_MGMT_VENDOR_HDR;
370 			copy_offset = IB_MGMT_RMPP_HDR;
371 			has_rmpp_header = 1;
372 	} else {
373 		hdr_len = IB_MGMT_MAD_HDR;
374 		copy_offset = IB_MGMT_MAD_HDR;
375 		has_rmpp_header = 0;
376 	}
377 
378 	if (has_rmpp_header)
379 		rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
380 			      IB_MGMT_RMPP_FLAG_ACTIVE;
381 	else
382 		rmpp_active = 0;
383 
384 	/* Validate that the management class can support RMPP */
385 	if (rmpp_active && !agent->rmpp_version) {
386 		ret = -EINVAL;
387 		goto err_ah;
388 	}
389 
390 	packet->msg = ib_create_send_mad(agent,
391 					 be32_to_cpu(packet->mad.hdr.qpn),
392 					 0, rmpp_active,
393 					 hdr_len, length - hdr_len,
394 					 GFP_KERNEL);
395 	if (IS_ERR(packet->msg)) {
396 		ret = PTR_ERR(packet->msg);
397 		goto err_ah;
398 	}
399 
400 	packet->msg->ah 	= ah;
401 	packet->msg->timeout_ms = packet->mad.hdr.timeout_ms;
402 	packet->msg->retries 	= packet->mad.hdr.retries;
403 	packet->msg->context[0] = packet;
404 
405 	/* Copy MAD headers (RMPP header in place) */
406 	memcpy(packet->msg->mad, packet->mad.data, IB_MGMT_MAD_HDR);
407 	/* Now, copy rest of message from user into send buffer */
408 	if (copy_from_user(packet->msg->mad + copy_offset,
409 			   buf + sizeof (struct ib_user_mad) + copy_offset,
410 			   length - copy_offset)) {
411 		ret = -EFAULT;
412 		goto err_msg;
413 	}
414 
415 	/*
416 	 * If userspace is generating a request that will generate a
417 	 * response, we need to make sure the high-order part of the
418 	 * transaction ID matches the agent being used to send the
419 	 * MAD.
420 	 */
421 	method = ((struct ib_mad_hdr *) packet->msg->mad)->method;
422 
423 	if (!(method & IB_MGMT_METHOD_RESP)       &&
424 	    method != IB_MGMT_METHOD_TRAP_REPRESS &&
425 	    method != IB_MGMT_METHOD_SEND) {
426 		tid = &((struct ib_mad_hdr *) packet->msg->mad)->tid;
427 		*tid = cpu_to_be64(((u64) agent->hi_tid) << 32 |
428 				   (be64_to_cpup(tid) & 0xffffffff));
429 	}
430 
431 	ret = ib_post_send_mad(packet->msg, NULL);
432 	if (ret)
433 		goto err_msg;
434 
435 	up_read(&file->port->mutex);
436 
437 	return count;
438 
439 err_msg:
440 	ib_free_send_mad(packet->msg);
441 
442 err_ah:
443 	ib_destroy_ah(ah);
444 
445 err_up:
446 	up_read(&file->port->mutex);
447 
448 err:
449 	kfree(packet);
450 	return ret;
451 }
452 
453 static unsigned int ib_umad_poll(struct file *filp, struct poll_table_struct *wait)
454 {
455 	struct ib_umad_file *file = filp->private_data;
456 
457 	/* we will always be able to post a MAD send */
458 	unsigned int mask = POLLOUT | POLLWRNORM;
459 
460 	poll_wait(filp, &file->recv_wait, wait);
461 
462 	if (!list_empty(&file->recv_list))
463 		mask |= POLLIN | POLLRDNORM;
464 
465 	return mask;
466 }
467 
468 static int ib_umad_reg_agent(struct ib_umad_file *file, unsigned long arg)
469 {
470 	struct ib_user_mad_reg_req ureq;
471 	struct ib_mad_reg_req req;
472 	struct ib_mad_agent *agent;
473 	int agent_id;
474 	int ret;
475 
476 	down_write(&file->port->mutex);
477 
478 	if (!file->port->ib_dev) {
479 		ret = -EPIPE;
480 		goto out;
481 	}
482 
483 	if (copy_from_user(&ureq, (void __user *) arg, sizeof ureq)) {
484 		ret = -EFAULT;
485 		goto out;
486 	}
487 
488 	if (ureq.qpn != 0 && ureq.qpn != 1) {
489 		ret = -EINVAL;
490 		goto out;
491 	}
492 
493 	for (agent_id = 0; agent_id < IB_UMAD_MAX_AGENTS; ++agent_id)
494 		if (!__get_agent(file, agent_id))
495 			goto found;
496 
497 	ret = -ENOMEM;
498 	goto out;
499 
500 found:
501 	if (ureq.mgmt_class) {
502 		req.mgmt_class         = ureq.mgmt_class;
503 		req.mgmt_class_version = ureq.mgmt_class_version;
504 		memcpy(req.method_mask, ureq.method_mask, sizeof req.method_mask);
505 		memcpy(req.oui,         ureq.oui,         sizeof req.oui);
506 	}
507 
508 	agent = ib_register_mad_agent(file->port->ib_dev, file->port->port_num,
509 				      ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI,
510 				      ureq.mgmt_class ? &req : NULL,
511 				      ureq.rmpp_version,
512 				      send_handler, recv_handler, file);
513 	if (IS_ERR(agent)) {
514 		ret = PTR_ERR(agent);
515 		goto out;
516 	}
517 
518 	if (put_user(agent_id,
519 		     (u32 __user *) (arg + offsetof(struct ib_user_mad_reg_req, id)))) {
520 		ret = -EFAULT;
521 		ib_unregister_mad_agent(agent);
522 		goto out;
523 	}
524 
525 	file->agent[agent_id] = agent;
526 	ret = 0;
527 
528 out:
529 	up_write(&file->port->mutex);
530 	return ret;
531 }
532 
533 static int ib_umad_unreg_agent(struct ib_umad_file *file, unsigned long arg)
534 {
535 	struct ib_mad_agent *agent = NULL;
536 	u32 id;
537 	int ret = 0;
538 
539 	if (get_user(id, (u32 __user *) arg))
540 		return -EFAULT;
541 
542 	down_write(&file->port->mutex);
543 
544 	if (id < 0 || id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) {
545 		ret = -EINVAL;
546 		goto out;
547 	}
548 
549 	agent = file->agent[id];
550 	file->agent[id] = NULL;
551 
552 out:
553 	up_write(&file->port->mutex);
554 
555 	if (agent)
556 		ib_unregister_mad_agent(agent);
557 
558 	return ret;
559 }
560 
561 static long ib_umad_ioctl(struct file *filp, unsigned int cmd,
562 			  unsigned long arg)
563 {
564 	switch (cmd) {
565 	case IB_USER_MAD_REGISTER_AGENT:
566 		return ib_umad_reg_agent(filp->private_data, arg);
567 	case IB_USER_MAD_UNREGISTER_AGENT:
568 		return ib_umad_unreg_agent(filp->private_data, arg);
569 	default:
570 		return -ENOIOCTLCMD;
571 	}
572 }
573 
574 static int ib_umad_open(struct inode *inode, struct file *filp)
575 {
576 	struct ib_umad_port *port;
577 	struct ib_umad_file *file;
578 	int ret = 0;
579 
580 	spin_lock(&port_lock);
581 	port = umad_port[iminor(inode) - IB_UMAD_MINOR_BASE];
582 	if (port)
583 		kref_get(&port->umad_dev->ref);
584 	spin_unlock(&port_lock);
585 
586 	if (!port)
587 		return -ENXIO;
588 
589 	down_write(&port->mutex);
590 
591 	if (!port->ib_dev) {
592 		ret = -ENXIO;
593 		goto out;
594 	}
595 
596 	file = kzalloc(sizeof *file, GFP_KERNEL);
597 	if (!file) {
598 		kref_put(&port->umad_dev->ref, ib_umad_release_dev);
599 		ret = -ENOMEM;
600 		goto out;
601 	}
602 
603 	spin_lock_init(&file->recv_lock);
604 	INIT_LIST_HEAD(&file->recv_list);
605 	init_waitqueue_head(&file->recv_wait);
606 
607 	file->port = port;
608 	filp->private_data = file;
609 
610 	list_add_tail(&file->port_list, &port->file_list);
611 
612 out:
613 	up_write(&port->mutex);
614 	return ret;
615 }
616 
617 static int ib_umad_close(struct inode *inode, struct file *filp)
618 {
619 	struct ib_umad_file *file = filp->private_data;
620 	struct ib_umad_device *dev = file->port->umad_dev;
621 	struct ib_umad_packet *packet, *tmp;
622 	int already_dead;
623 	int i;
624 
625 	down_write(&file->port->mutex);
626 
627 	already_dead = file->agents_dead;
628 	file->agents_dead = 1;
629 
630 	list_for_each_entry_safe(packet, tmp, &file->recv_list, list)
631 		kfree(packet);
632 
633 	list_del(&file->port_list);
634 
635 	downgrade_write(&file->port->mutex);
636 
637 	if (!already_dead)
638 		for (i = 0; i < IB_UMAD_MAX_AGENTS; ++i)
639 			if (file->agent[i])
640 				ib_unregister_mad_agent(file->agent[i]);
641 
642 	up_read(&file->port->mutex);
643 
644 	kfree(file);
645 	kref_put(&dev->ref, ib_umad_release_dev);
646 
647 	return 0;
648 }
649 
650 static struct file_operations umad_fops = {
651 	.owner 	 	= THIS_MODULE,
652 	.read 	 	= ib_umad_read,
653 	.write 	 	= ib_umad_write,
654 	.poll 	 	= ib_umad_poll,
655 	.unlocked_ioctl = ib_umad_ioctl,
656 	.compat_ioctl 	= ib_umad_ioctl,
657 	.open 	 	= ib_umad_open,
658 	.release 	= ib_umad_close
659 };
660 
661 static int ib_umad_sm_open(struct inode *inode, struct file *filp)
662 {
663 	struct ib_umad_port *port;
664 	struct ib_port_modify props = {
665 		.set_port_cap_mask = IB_PORT_SM
666 	};
667 	int ret;
668 
669 	spin_lock(&port_lock);
670 	port = umad_port[iminor(inode) - IB_UMAD_MINOR_BASE - IB_UMAD_MAX_PORTS];
671 	if (port)
672 		kref_get(&port->umad_dev->ref);
673 	spin_unlock(&port_lock);
674 
675 	if (!port)
676 		return -ENXIO;
677 
678 	if (filp->f_flags & O_NONBLOCK) {
679 		if (down_trylock(&port->sm_sem)) {
680 			ret = -EAGAIN;
681 			goto fail;
682 		}
683 	} else {
684 		if (down_interruptible(&port->sm_sem)) {
685 			ret = -ERESTARTSYS;
686 			goto fail;
687 		}
688 	}
689 
690 	ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props);
691 	if (ret) {
692 		up(&port->sm_sem);
693 		goto fail;
694 	}
695 
696 	filp->private_data = port;
697 
698 	return 0;
699 
700 fail:
701 	kref_put(&port->umad_dev->ref, ib_umad_release_dev);
702 	return ret;
703 }
704 
705 static int ib_umad_sm_close(struct inode *inode, struct file *filp)
706 {
707 	struct ib_umad_port *port = filp->private_data;
708 	struct ib_port_modify props = {
709 		.clr_port_cap_mask = IB_PORT_SM
710 	};
711 	int ret = 0;
712 
713 	down_write(&port->mutex);
714 	if (port->ib_dev)
715 		ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props);
716 	up_write(&port->mutex);
717 
718 	up(&port->sm_sem);
719 
720 	kref_put(&port->umad_dev->ref, ib_umad_release_dev);
721 
722 	return ret;
723 }
724 
725 static struct file_operations umad_sm_fops = {
726 	.owner 	 = THIS_MODULE,
727 	.open 	 = ib_umad_sm_open,
728 	.release = ib_umad_sm_close
729 };
730 
731 static struct ib_client umad_client = {
732 	.name   = "umad",
733 	.add    = ib_umad_add_one,
734 	.remove = ib_umad_remove_one
735 };
736 
737 static ssize_t show_ibdev(struct class_device *class_dev, char *buf)
738 {
739 	struct ib_umad_port *port = class_get_devdata(class_dev);
740 
741 	if (!port)
742 		return -ENODEV;
743 
744 	return sprintf(buf, "%s\n", port->ib_dev->name);
745 }
746 static CLASS_DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
747 
748 static ssize_t show_port(struct class_device *class_dev, char *buf)
749 {
750 	struct ib_umad_port *port = class_get_devdata(class_dev);
751 
752 	if (!port)
753 		return -ENODEV;
754 
755 	return sprintf(buf, "%d\n", port->port_num);
756 }
757 static CLASS_DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
758 
759 static ssize_t show_abi_version(struct class *class, char *buf)
760 {
761 	return sprintf(buf, "%d\n", IB_USER_MAD_ABI_VERSION);
762 }
763 static CLASS_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
764 
765 static int ib_umad_init_port(struct ib_device *device, int port_num,
766 			     struct ib_umad_port *port)
767 {
768 	spin_lock(&port_lock);
769 	port->dev_num = find_first_zero_bit(dev_map, IB_UMAD_MAX_PORTS);
770 	if (port->dev_num >= IB_UMAD_MAX_PORTS) {
771 		spin_unlock(&port_lock);
772 		return -1;
773 	}
774 	set_bit(port->dev_num, dev_map);
775 	spin_unlock(&port_lock);
776 
777 	port->ib_dev   = device;
778 	port->port_num = port_num;
779 	init_MUTEX(&port->sm_sem);
780 	init_rwsem(&port->mutex);
781 	INIT_LIST_HEAD(&port->file_list);
782 
783 	port->dev = cdev_alloc();
784 	if (!port->dev)
785 		return -1;
786 	port->dev->owner = THIS_MODULE;
787 	port->dev->ops   = &umad_fops;
788 	kobject_set_name(&port->dev->kobj, "umad%d", port->dev_num);
789 	if (cdev_add(port->dev, base_dev + port->dev_num, 1))
790 		goto err_cdev;
791 
792 	port->class_dev = class_device_create(umad_class, NULL, port->dev->dev,
793 					      device->dma_device,
794 					      "umad%d", port->dev_num);
795 	if (IS_ERR(port->class_dev))
796 		goto err_cdev;
797 
798 	if (class_device_create_file(port->class_dev, &class_device_attr_ibdev))
799 		goto err_class;
800 	if (class_device_create_file(port->class_dev, &class_device_attr_port))
801 		goto err_class;
802 
803 	port->sm_dev = cdev_alloc();
804 	if (!port->sm_dev)
805 		goto err_class;
806 	port->sm_dev->owner = THIS_MODULE;
807 	port->sm_dev->ops   = &umad_sm_fops;
808 	kobject_set_name(&port->sm_dev->kobj, "issm%d", port->dev_num);
809 	if (cdev_add(port->sm_dev, base_dev + port->dev_num + IB_UMAD_MAX_PORTS, 1))
810 		goto err_sm_cdev;
811 
812 	port->sm_class_dev = class_device_create(umad_class, NULL, port->sm_dev->dev,
813 						 device->dma_device,
814 						 "issm%d", port->dev_num);
815 	if (IS_ERR(port->sm_class_dev))
816 		goto err_sm_cdev;
817 
818 	class_set_devdata(port->class_dev,    port);
819 	class_set_devdata(port->sm_class_dev, port);
820 
821 	if (class_device_create_file(port->sm_class_dev, &class_device_attr_ibdev))
822 		goto err_sm_class;
823 	if (class_device_create_file(port->sm_class_dev, &class_device_attr_port))
824 		goto err_sm_class;
825 
826 	spin_lock(&port_lock);
827 	umad_port[port->dev_num] = port;
828 	spin_unlock(&port_lock);
829 
830 	return 0;
831 
832 err_sm_class:
833 	class_device_destroy(umad_class, port->sm_dev->dev);
834 
835 err_sm_cdev:
836 	cdev_del(port->sm_dev);
837 
838 err_class:
839 	class_device_destroy(umad_class, port->dev->dev);
840 
841 err_cdev:
842 	cdev_del(port->dev);
843 	clear_bit(port->dev_num, dev_map);
844 
845 	return -1;
846 }
847 
848 static void ib_umad_kill_port(struct ib_umad_port *port)
849 {
850 	struct ib_umad_file *file;
851 	int id;
852 
853 	class_set_devdata(port->class_dev,    NULL);
854 	class_set_devdata(port->sm_class_dev, NULL);
855 
856 	class_device_destroy(umad_class, port->dev->dev);
857 	class_device_destroy(umad_class, port->sm_dev->dev);
858 
859 	cdev_del(port->dev);
860 	cdev_del(port->sm_dev);
861 
862 	spin_lock(&port_lock);
863 	umad_port[port->dev_num] = NULL;
864 	spin_unlock(&port_lock);
865 
866 	down_write(&port->mutex);
867 
868 	port->ib_dev = NULL;
869 
870 	/*
871 	 * Now go through the list of files attached to this port and
872 	 * unregister all of their MAD agents.  We need to hold
873 	 * port->mutex while doing this to avoid racing with
874 	 * ib_umad_close(), but we can't hold the mutex for writing
875 	 * while calling ib_unregister_mad_agent(), since that might
876 	 * deadlock by calling back into queue_packet().  So we
877 	 * downgrade our lock to a read lock, and then drop and
878 	 * reacquire the write lock for the next iteration.
879 	 *
880 	 * We do list_del_init() on the file's list_head so that the
881 	 * list_del in ib_umad_close() is still OK, even after the
882 	 * file is removed from the list.
883 	 */
884 	while (!list_empty(&port->file_list)) {
885 		file = list_entry(port->file_list.next, struct ib_umad_file,
886 				  port_list);
887 
888 		file->agents_dead = 1;
889 		list_del_init(&file->port_list);
890 
891 		downgrade_write(&port->mutex);
892 
893 		for (id = 0; id < IB_UMAD_MAX_AGENTS; ++id)
894 			if (file->agent[id])
895 				ib_unregister_mad_agent(file->agent[id]);
896 
897 		up_read(&port->mutex);
898 		down_write(&port->mutex);
899 	}
900 
901 	up_write(&port->mutex);
902 
903 	clear_bit(port->dev_num, dev_map);
904 }
905 
906 static void ib_umad_add_one(struct ib_device *device)
907 {
908 	struct ib_umad_device *umad_dev;
909 	int s, e, i;
910 
911 	if (device->node_type == IB_NODE_SWITCH)
912 		s = e = 0;
913 	else {
914 		s = 1;
915 		e = device->phys_port_cnt;
916 	}
917 
918 	umad_dev = kzalloc(sizeof *umad_dev +
919 			   (e - s + 1) * sizeof (struct ib_umad_port),
920 			   GFP_KERNEL);
921 	if (!umad_dev)
922 		return;
923 
924 	kref_init(&umad_dev->ref);
925 
926 	umad_dev->start_port = s;
927 	umad_dev->end_port   = e;
928 
929 	for (i = s; i <= e; ++i) {
930 		umad_dev->port[i - s].umad_dev = umad_dev;
931 
932 		if (ib_umad_init_port(device, i, &umad_dev->port[i - s]))
933 			goto err;
934 	}
935 
936 	ib_set_client_data(device, &umad_client, umad_dev);
937 
938 	return;
939 
940 err:
941 	while (--i >= s)
942 		ib_umad_kill_port(&umad_dev->port[i - s]);
943 
944 	kref_put(&umad_dev->ref, ib_umad_release_dev);
945 }
946 
947 static void ib_umad_remove_one(struct ib_device *device)
948 {
949 	struct ib_umad_device *umad_dev = ib_get_client_data(device, &umad_client);
950 	int i;
951 
952 	if (!umad_dev)
953 		return;
954 
955 	for (i = 0; i <= umad_dev->end_port - umad_dev->start_port; ++i)
956 		ib_umad_kill_port(&umad_dev->port[i]);
957 
958 	kref_put(&umad_dev->ref, ib_umad_release_dev);
959 }
960 
961 static int __init ib_umad_init(void)
962 {
963 	int ret;
964 
965 	ret = register_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2,
966 				     "infiniband_mad");
967 	if (ret) {
968 		printk(KERN_ERR "user_mad: couldn't register device number\n");
969 		goto out;
970 	}
971 
972 	umad_class = class_create(THIS_MODULE, "infiniband_mad");
973 	if (IS_ERR(umad_class)) {
974 		ret = PTR_ERR(umad_class);
975 		printk(KERN_ERR "user_mad: couldn't create class infiniband_mad\n");
976 		goto out_chrdev;
977 	}
978 
979 	ret = class_create_file(umad_class, &class_attr_abi_version);
980 	if (ret) {
981 		printk(KERN_ERR "user_mad: couldn't create abi_version attribute\n");
982 		goto out_class;
983 	}
984 
985 	ret = ib_register_client(&umad_client);
986 	if (ret) {
987 		printk(KERN_ERR "user_mad: couldn't register ib_umad client\n");
988 		goto out_class;
989 	}
990 
991 	return 0;
992 
993 out_class:
994 	class_destroy(umad_class);
995 
996 out_chrdev:
997 	unregister_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2);
998 
999 out:
1000 	return ret;
1001 }
1002 
1003 static void __exit ib_umad_cleanup(void)
1004 {
1005 	ib_unregister_client(&umad_client);
1006 	class_destroy(umad_class);
1007 	unregister_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2);
1008 }
1009 
1010 module_init(ib_umad_init);
1011 module_exit(ib_umad_cleanup);
1012