xref: /linux/drivers/infiniband/core/cm.c (revision 13abf8130139c2ccd4962a7e5a8902be5e6cb5a7)
1 /*
2  * Copyright (c) 2004, 2005 Intel Corporation.  All rights reserved.
3  * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
4  * Copyright (c) 2004, 2005 Voltaire Corporation.  All rights reserved.
5  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  *
35  * $Id: cm.c 2821 2005-07-08 17:07:28Z sean.hefty $
36  */
37 #include <linux/dma-mapping.h>
38 #include <linux/err.h>
39 #include <linux/idr.h>
40 #include <linux/interrupt.h>
41 #include <linux/pci.h>
42 #include <linux/rbtree.h>
43 #include <linux/spinlock.h>
44 #include <linux/workqueue.h>
45 
46 #include <rdma/ib_cache.h>
47 #include <rdma/ib_cm.h>
48 #include "cm_msgs.h"
49 
50 MODULE_AUTHOR("Sean Hefty");
51 MODULE_DESCRIPTION("InfiniBand CM");
52 MODULE_LICENSE("Dual BSD/GPL");
53 
54 static void cm_add_one(struct ib_device *device);
55 static void cm_remove_one(struct ib_device *device);
56 
57 static struct ib_client cm_client = {
58 	.name   = "cm",
59 	.add    = cm_add_one,
60 	.remove = cm_remove_one
61 };
62 
63 static struct ib_cm {
64 	spinlock_t lock;
65 	struct list_head device_list;
66 	rwlock_t device_lock;
67 	struct rb_root listen_service_table;
68 	u64 listen_service_id;
69 	/* struct rb_root peer_service_table; todo: fix peer to peer */
70 	struct rb_root remote_qp_table;
71 	struct rb_root remote_id_table;
72 	struct rb_root remote_sidr_table;
73 	struct idr local_id_table;
74 	struct workqueue_struct *wq;
75 } cm;
76 
77 struct cm_port {
78 	struct cm_device *cm_dev;
79 	struct ib_mad_agent *mad_agent;
80 	u8 port_num;
81 };
82 
83 struct cm_device {
84 	struct list_head list;
85 	struct ib_device *device;
86 	__be64 ca_guid;
87 	struct cm_port port[0];
88 };
89 
90 struct cm_av {
91 	struct cm_port *port;
92 	union ib_gid dgid;
93 	struct ib_ah_attr ah_attr;
94 	u16 pkey_index;
95 	u8 packet_life_time;
96 };
97 
98 struct cm_work {
99 	struct work_struct work;
100 	struct list_head list;
101 	struct cm_port *port;
102 	struct ib_mad_recv_wc *mad_recv_wc;	/* Received MADs */
103 	__be32 local_id;			/* Established / timewait */
104 	__be32 remote_id;
105 	struct ib_cm_event cm_event;
106 	struct ib_sa_path_rec path[0];
107 };
108 
109 struct cm_timewait_info {
110 	struct cm_work work;			/* Must be first. */
111 	struct rb_node remote_qp_node;
112 	struct rb_node remote_id_node;
113 	__be64 remote_ca_guid;
114 	__be32 remote_qpn;
115 	u8 inserted_remote_qp;
116 	u8 inserted_remote_id;
117 };
118 
119 struct cm_id_private {
120 	struct ib_cm_id	id;
121 
122 	struct rb_node service_node;
123 	struct rb_node sidr_id_node;
124 	spinlock_t lock;
125 	wait_queue_head_t wait;
126 	atomic_t refcount;
127 
128 	struct ib_mad_send_buf *msg;
129 	struct cm_timewait_info *timewait_info;
130 	/* todo: use alternate port on send failure */
131 	struct cm_av av;
132 	struct cm_av alt_av;
133 
134 	void *private_data;
135 	__be64 tid;
136 	__be32 local_qpn;
137 	__be32 remote_qpn;
138 	__be32 sq_psn;
139 	__be32 rq_psn;
140 	int timeout_ms;
141 	enum ib_mtu path_mtu;
142 	u8 private_data_len;
143 	u8 max_cm_retries;
144 	u8 peer_to_peer;
145 	u8 responder_resources;
146 	u8 initiator_depth;
147 	u8 local_ack_timeout;
148 	u8 retry_count;
149 	u8 rnr_retry_count;
150 	u8 service_timeout;
151 
152 	struct list_head work_list;
153 	atomic_t work_count;
154 };
155 
156 static void cm_work_handler(void *data);
157 
158 static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
159 {
160 	if (atomic_dec_and_test(&cm_id_priv->refcount))
161 		wake_up(&cm_id_priv->wait);
162 }
163 
164 static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
165 			struct ib_mad_send_buf **msg)
166 {
167 	struct ib_mad_agent *mad_agent;
168 	struct ib_mad_send_buf *m;
169 	struct ib_ah *ah;
170 
171 	mad_agent = cm_id_priv->av.port->mad_agent;
172 	ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr);
173 	if (IS_ERR(ah))
174 		return PTR_ERR(ah);
175 
176 	m = ib_create_send_mad(mad_agent, 1, cm_id_priv->av.pkey_index,
177 			       ah, 0, sizeof(struct ib_mad_hdr),
178 			       sizeof(struct ib_mad)-sizeof(struct ib_mad_hdr),
179 			       GFP_ATOMIC);
180 	if (IS_ERR(m)) {
181 		ib_destroy_ah(ah);
182 		return PTR_ERR(m);
183 	}
184 
185 	/* Timeout set by caller if response is expected. */
186 	m->send_wr.wr.ud.retries = cm_id_priv->max_cm_retries;
187 
188 	atomic_inc(&cm_id_priv->refcount);
189 	m->context[0] = cm_id_priv;
190 	*msg = m;
191 	return 0;
192 }
193 
194 static int cm_alloc_response_msg(struct cm_port *port,
195 				 struct ib_mad_recv_wc *mad_recv_wc,
196 				 struct ib_mad_send_buf **msg)
197 {
198 	struct ib_mad_send_buf *m;
199 	struct ib_ah *ah;
200 
201 	ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
202 				  mad_recv_wc->recv_buf.grh, port->port_num);
203 	if (IS_ERR(ah))
204 		return PTR_ERR(ah);
205 
206 	m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
207 			       ah, 0, sizeof(struct ib_mad_hdr),
208 			       sizeof(struct ib_mad)-sizeof(struct ib_mad_hdr),
209 			       GFP_ATOMIC);
210 	if (IS_ERR(m)) {
211 		ib_destroy_ah(ah);
212 		return PTR_ERR(m);
213 	}
214 	*msg = m;
215 	return 0;
216 }
217 
218 static void cm_free_msg(struct ib_mad_send_buf *msg)
219 {
220 	ib_destroy_ah(msg->send_wr.wr.ud.ah);
221 	if (msg->context[0])
222 		cm_deref_id(msg->context[0]);
223 	ib_free_send_mad(msg);
224 }
225 
226 static void * cm_copy_private_data(const void *private_data,
227 				   u8 private_data_len)
228 {
229 	void *data;
230 
231 	if (!private_data || !private_data_len)
232 		return NULL;
233 
234 	data = kmalloc(private_data_len, GFP_KERNEL);
235 	if (!data)
236 		return ERR_PTR(-ENOMEM);
237 
238 	memcpy(data, private_data, private_data_len);
239 	return data;
240 }
241 
242 static void cm_set_private_data(struct cm_id_private *cm_id_priv,
243 				 void *private_data, u8 private_data_len)
244 {
245 	if (cm_id_priv->private_data && cm_id_priv->private_data_len)
246 		kfree(cm_id_priv->private_data);
247 
248 	cm_id_priv->private_data = private_data;
249 	cm_id_priv->private_data_len = private_data_len;
250 }
251 
252 static void cm_set_ah_attr(struct ib_ah_attr *ah_attr, u8 port_num,
253 			   u16 dlid, u8 sl, u16 src_path_bits)
254 {
255 	memset(ah_attr, 0, sizeof ah_attr);
256 	ah_attr->dlid = dlid;
257 	ah_attr->sl = sl;
258 	ah_attr->src_path_bits = src_path_bits;
259 	ah_attr->port_num = port_num;
260 }
261 
262 static void cm_init_av_for_response(struct cm_port *port,
263 				    struct ib_wc *wc, struct cm_av *av)
264 {
265 	av->port = port;
266 	av->pkey_index = wc->pkey_index;
267 	cm_set_ah_attr(&av->ah_attr, port->port_num, wc->slid,
268 		       wc->sl, wc->dlid_path_bits);
269 }
270 
271 static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
272 {
273 	struct cm_device *cm_dev;
274 	struct cm_port *port = NULL;
275 	unsigned long flags;
276 	int ret;
277 	u8 p;
278 
279 	read_lock_irqsave(&cm.device_lock, flags);
280 	list_for_each_entry(cm_dev, &cm.device_list, list) {
281 		if (!ib_find_cached_gid(cm_dev->device, &path->sgid,
282 					&p, NULL)) {
283 			port = &cm_dev->port[p-1];
284 			break;
285 		}
286 	}
287 	read_unlock_irqrestore(&cm.device_lock, flags);
288 
289 	if (!port)
290 		return -EINVAL;
291 
292 	ret = ib_find_cached_pkey(cm_dev->device, port->port_num,
293 				  be16_to_cpu(path->pkey), &av->pkey_index);
294 	if (ret)
295 		return ret;
296 
297 	av->port = port;
298 	cm_set_ah_attr(&av->ah_attr, av->port->port_num,
299 		       be16_to_cpu(path->dlid), path->sl,
300 		       be16_to_cpu(path->slid) & 0x7F);
301 	av->packet_life_time = path->packet_life_time;
302 	return 0;
303 }
304 
305 static int cm_alloc_id(struct cm_id_private *cm_id_priv)
306 {
307 	unsigned long flags;
308 	int ret;
309 
310 	do {
311 		spin_lock_irqsave(&cm.lock, flags);
312 		ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, 1,
313 					(__force int *) &cm_id_priv->id.local_id);
314 		spin_unlock_irqrestore(&cm.lock, flags);
315 	} while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) );
316 	return ret;
317 }
318 
319 static void cm_free_id(__be32 local_id)
320 {
321 	unsigned long flags;
322 
323 	spin_lock_irqsave(&cm.lock, flags);
324 	idr_remove(&cm.local_id_table, (__force int) local_id);
325 	spin_unlock_irqrestore(&cm.lock, flags);
326 }
327 
328 static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
329 {
330 	struct cm_id_private *cm_id_priv;
331 
332 	cm_id_priv = idr_find(&cm.local_id_table, (__force int) local_id);
333 	if (cm_id_priv) {
334 		if (cm_id_priv->id.remote_id == remote_id)
335 			atomic_inc(&cm_id_priv->refcount);
336 		else
337 			cm_id_priv = NULL;
338 	}
339 
340 	return cm_id_priv;
341 }
342 
343 static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
344 {
345 	struct cm_id_private *cm_id_priv;
346 	unsigned long flags;
347 
348 	spin_lock_irqsave(&cm.lock, flags);
349 	cm_id_priv = cm_get_id(local_id, remote_id);
350 	spin_unlock_irqrestore(&cm.lock, flags);
351 
352 	return cm_id_priv;
353 }
354 
355 static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
356 {
357 	struct rb_node **link = &cm.listen_service_table.rb_node;
358 	struct rb_node *parent = NULL;
359 	struct cm_id_private *cur_cm_id_priv;
360 	__be64 service_id = cm_id_priv->id.service_id;
361 	__be64 service_mask = cm_id_priv->id.service_mask;
362 
363 	while (*link) {
364 		parent = *link;
365 		cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
366 					  service_node);
367 		if ((cur_cm_id_priv->id.service_mask & service_id) ==
368 		    (service_mask & cur_cm_id_priv->id.service_id))
369 			return cm_id_priv;
370 		if (service_id < cur_cm_id_priv->id.service_id)
371 			link = &(*link)->rb_left;
372 		else
373 			link = &(*link)->rb_right;
374 	}
375 	rb_link_node(&cm_id_priv->service_node, parent, link);
376 	rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
377 	return NULL;
378 }
379 
380 static struct cm_id_private * cm_find_listen(__be64 service_id)
381 {
382 	struct rb_node *node = cm.listen_service_table.rb_node;
383 	struct cm_id_private *cm_id_priv;
384 
385 	while (node) {
386 		cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
387 		if ((cm_id_priv->id.service_mask & service_id) ==
388 		    (cm_id_priv->id.service_mask & cm_id_priv->id.service_id))
389 			return cm_id_priv;
390 		if (service_id < cm_id_priv->id.service_id)
391 			node = node->rb_left;
392 		else
393 			node = node->rb_right;
394 	}
395 	return NULL;
396 }
397 
398 static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
399 						     *timewait_info)
400 {
401 	struct rb_node **link = &cm.remote_id_table.rb_node;
402 	struct rb_node *parent = NULL;
403 	struct cm_timewait_info *cur_timewait_info;
404 	__be64 remote_ca_guid = timewait_info->remote_ca_guid;
405 	__be32 remote_id = timewait_info->work.remote_id;
406 
407 	while (*link) {
408 		parent = *link;
409 		cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
410 					     remote_id_node);
411 		if (remote_id < cur_timewait_info->work.remote_id)
412 			link = &(*link)->rb_left;
413 		else if (remote_id > cur_timewait_info->work.remote_id)
414 			link = &(*link)->rb_right;
415 		else if (remote_ca_guid < cur_timewait_info->remote_ca_guid)
416 			link = &(*link)->rb_left;
417 		else if (remote_ca_guid > cur_timewait_info->remote_ca_guid)
418 			link = &(*link)->rb_right;
419 		else
420 			return cur_timewait_info;
421 	}
422 	timewait_info->inserted_remote_id = 1;
423 	rb_link_node(&timewait_info->remote_id_node, parent, link);
424 	rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
425 	return NULL;
426 }
427 
428 static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
429 						   __be32 remote_id)
430 {
431 	struct rb_node *node = cm.remote_id_table.rb_node;
432 	struct cm_timewait_info *timewait_info;
433 
434 	while (node) {
435 		timewait_info = rb_entry(node, struct cm_timewait_info,
436 					 remote_id_node);
437 		if (remote_id < timewait_info->work.remote_id)
438 			node = node->rb_left;
439 		else if (remote_id > timewait_info->work.remote_id)
440 			node = node->rb_right;
441 		else if (remote_ca_guid < timewait_info->remote_ca_guid)
442 			node = node->rb_left;
443 		else if (remote_ca_guid > timewait_info->remote_ca_guid)
444 			node = node->rb_right;
445 		else
446 			return timewait_info;
447 	}
448 	return NULL;
449 }
450 
451 static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
452 						      *timewait_info)
453 {
454 	struct rb_node **link = &cm.remote_qp_table.rb_node;
455 	struct rb_node *parent = NULL;
456 	struct cm_timewait_info *cur_timewait_info;
457 	__be64 remote_ca_guid = timewait_info->remote_ca_guid;
458 	__be32 remote_qpn = timewait_info->remote_qpn;
459 
460 	while (*link) {
461 		parent = *link;
462 		cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
463 					     remote_qp_node);
464 		if (remote_qpn < cur_timewait_info->remote_qpn)
465 			link = &(*link)->rb_left;
466 		else if (remote_qpn > cur_timewait_info->remote_qpn)
467 			link = &(*link)->rb_right;
468 		else if (remote_ca_guid < cur_timewait_info->remote_ca_guid)
469 			link = &(*link)->rb_left;
470 		else if (remote_ca_guid > cur_timewait_info->remote_ca_guid)
471 			link = &(*link)->rb_right;
472 		else
473 			return cur_timewait_info;
474 	}
475 	timewait_info->inserted_remote_qp = 1;
476 	rb_link_node(&timewait_info->remote_qp_node, parent, link);
477 	rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
478 	return NULL;
479 }
480 
481 static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
482 						    *cm_id_priv)
483 {
484 	struct rb_node **link = &cm.remote_sidr_table.rb_node;
485 	struct rb_node *parent = NULL;
486 	struct cm_id_private *cur_cm_id_priv;
487 	union ib_gid *port_gid = &cm_id_priv->av.dgid;
488 	__be32 remote_id = cm_id_priv->id.remote_id;
489 
490 	while (*link) {
491 		parent = *link;
492 		cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
493 					  sidr_id_node);
494 		if (remote_id < cur_cm_id_priv->id.remote_id)
495 			link = &(*link)->rb_left;
496 		else if (remote_id > cur_cm_id_priv->id.remote_id)
497 			link = &(*link)->rb_right;
498 		else {
499 			int cmp;
500 			cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
501 				     sizeof *port_gid);
502 			if (cmp < 0)
503 				link = &(*link)->rb_left;
504 			else if (cmp > 0)
505 				link = &(*link)->rb_right;
506 			else
507 				return cur_cm_id_priv;
508 		}
509 	}
510 	rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
511 	rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
512 	return NULL;
513 }
514 
515 static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv,
516 			       enum ib_cm_sidr_status status)
517 {
518 	struct ib_cm_sidr_rep_param param;
519 
520 	memset(&param, 0, sizeof param);
521 	param.status = status;
522 	ib_send_cm_sidr_rep(&cm_id_priv->id, &param);
523 }
524 
525 struct ib_cm_id *ib_create_cm_id(ib_cm_handler cm_handler,
526 				 void *context)
527 {
528 	struct cm_id_private *cm_id_priv;
529 	int ret;
530 
531 	cm_id_priv = kmalloc(sizeof *cm_id_priv, GFP_KERNEL);
532 	if (!cm_id_priv)
533 		return ERR_PTR(-ENOMEM);
534 
535 	memset(cm_id_priv, 0, sizeof *cm_id_priv);
536 	cm_id_priv->id.state = IB_CM_IDLE;
537 	cm_id_priv->id.cm_handler = cm_handler;
538 	cm_id_priv->id.context = context;
539 	ret = cm_alloc_id(cm_id_priv);
540 	if (ret)
541 		goto error;
542 
543 	spin_lock_init(&cm_id_priv->lock);
544 	init_waitqueue_head(&cm_id_priv->wait);
545 	INIT_LIST_HEAD(&cm_id_priv->work_list);
546 	atomic_set(&cm_id_priv->work_count, -1);
547 	atomic_set(&cm_id_priv->refcount, 1);
548 	return &cm_id_priv->id;
549 
550 error:
551 	kfree(cm_id_priv);
552 	return ERR_PTR(-ENOMEM);
553 }
554 EXPORT_SYMBOL(ib_create_cm_id);
555 
556 static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
557 {
558 	struct cm_work *work;
559 
560 	if (list_empty(&cm_id_priv->work_list))
561 		return NULL;
562 
563 	work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
564 	list_del(&work->list);
565 	return work;
566 }
567 
568 static void cm_free_work(struct cm_work *work)
569 {
570 	if (work->mad_recv_wc)
571 		ib_free_recv_mad(work->mad_recv_wc);
572 	kfree(work);
573 }
574 
575 static inline int cm_convert_to_ms(int iba_time)
576 {
577 	/* approximate conversion to ms from 4.096us x 2^iba_time */
578 	return 1 << max(iba_time - 8, 0);
579 }
580 
581 static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
582 {
583 	unsigned long flags;
584 
585 	if (!timewait_info->inserted_remote_id &&
586 	    !timewait_info->inserted_remote_qp)
587 	    return;
588 
589 	spin_lock_irqsave(&cm.lock, flags);
590 	if (timewait_info->inserted_remote_id) {
591 		rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
592 		timewait_info->inserted_remote_id = 0;
593 	}
594 
595 	if (timewait_info->inserted_remote_qp) {
596 		rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
597 		timewait_info->inserted_remote_qp = 0;
598 	}
599 	spin_unlock_irqrestore(&cm.lock, flags);
600 }
601 
602 static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
603 {
604 	struct cm_timewait_info *timewait_info;
605 
606 	timewait_info = kmalloc(sizeof *timewait_info, GFP_KERNEL);
607 	if (!timewait_info)
608 		return ERR_PTR(-ENOMEM);
609 	memset(timewait_info, 0, sizeof *timewait_info);
610 
611 	timewait_info->work.local_id = local_id;
612 	INIT_WORK(&timewait_info->work.work, cm_work_handler,
613 		  &timewait_info->work);
614 	timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
615 	return timewait_info;
616 }
617 
618 static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
619 {
620 	int wait_time;
621 
622 	/*
623 	 * The cm_id could be destroyed by the user before we exit timewait.
624 	 * To protect against this, we search for the cm_id after exiting
625 	 * timewait before notifying the user that we've exited timewait.
626 	 */
627 	cm_id_priv->id.state = IB_CM_TIMEWAIT;
628 	wait_time = cm_convert_to_ms(cm_id_priv->local_ack_timeout);
629 	queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
630 			   msecs_to_jiffies(wait_time));
631 	cm_id_priv->timewait_info = NULL;
632 }
633 
634 static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
635 {
636 	cm_id_priv->id.state = IB_CM_IDLE;
637 	if (cm_id_priv->timewait_info) {
638 		cm_cleanup_timewait(cm_id_priv->timewait_info);
639 		kfree(cm_id_priv->timewait_info);
640 		cm_id_priv->timewait_info = NULL;
641 	}
642 }
643 
644 void ib_destroy_cm_id(struct ib_cm_id *cm_id)
645 {
646 	struct cm_id_private *cm_id_priv;
647 	struct cm_work *work;
648 	unsigned long flags;
649 
650 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
651 retest:
652 	spin_lock_irqsave(&cm_id_priv->lock, flags);
653 	switch (cm_id->state) {
654 	case IB_CM_LISTEN:
655 		cm_id->state = IB_CM_IDLE;
656 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
657 		spin_lock_irqsave(&cm.lock, flags);
658 		rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
659 		spin_unlock_irqrestore(&cm.lock, flags);
660 		break;
661 	case IB_CM_SIDR_REQ_SENT:
662 		cm_id->state = IB_CM_IDLE;
663 		ib_cancel_mad(cm_id_priv->av.port->mad_agent,
664 			      (unsigned long) cm_id_priv->msg);
665 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
666 		break;
667 	case IB_CM_SIDR_REQ_RCVD:
668 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
669 		cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
670 		break;
671 	case IB_CM_REQ_SENT:
672 	case IB_CM_MRA_REQ_RCVD:
673 	case IB_CM_REP_SENT:
674 	case IB_CM_MRA_REP_RCVD:
675 		ib_cancel_mad(cm_id_priv->av.port->mad_agent,
676 			      (unsigned long) cm_id_priv->msg);
677 		/* Fall through */
678 	case IB_CM_REQ_RCVD:
679 	case IB_CM_MRA_REQ_SENT:
680 	case IB_CM_REP_RCVD:
681 	case IB_CM_MRA_REP_SENT:
682 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
683 		ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
684 			       &cm_id_priv->av.port->cm_dev->ca_guid,
685 			       sizeof cm_id_priv->av.port->cm_dev->ca_guid,
686 			       NULL, 0);
687 		break;
688 	case IB_CM_ESTABLISHED:
689 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
690 		ib_send_cm_dreq(cm_id, NULL, 0);
691 		goto retest;
692 	case IB_CM_DREQ_SENT:
693 		ib_cancel_mad(cm_id_priv->av.port->mad_agent,
694 			      (unsigned long) cm_id_priv->msg);
695 		cm_enter_timewait(cm_id_priv);
696 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
697 		break;
698 	case IB_CM_DREQ_RCVD:
699 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
700 		ib_send_cm_drep(cm_id, NULL, 0);
701 		break;
702 	default:
703 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
704 		break;
705 	}
706 
707 	cm_free_id(cm_id->local_id);
708 	atomic_dec(&cm_id_priv->refcount);
709 	wait_event(cm_id_priv->wait, !atomic_read(&cm_id_priv->refcount));
710 	while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
711 		cm_free_work(work);
712 	if (cm_id_priv->private_data && cm_id_priv->private_data_len)
713 		kfree(cm_id_priv->private_data);
714 	kfree(cm_id_priv);
715 }
716 EXPORT_SYMBOL(ib_destroy_cm_id);
717 
718 int ib_cm_listen(struct ib_cm_id *cm_id,
719 		 __be64 service_id,
720 		 __be64 service_mask)
721 {
722 	struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
723 	unsigned long flags;
724 	int ret = 0;
725 
726 	service_mask = service_mask ? service_mask :
727 		       __constant_cpu_to_be64(~0ULL);
728 	service_id &= service_mask;
729 	if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
730 	    (service_id != IB_CM_ASSIGN_SERVICE_ID))
731 		return -EINVAL;
732 
733 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
734 	BUG_ON(cm_id->state != IB_CM_IDLE);
735 
736 	cm_id->state = IB_CM_LISTEN;
737 
738 	spin_lock_irqsave(&cm.lock, flags);
739 	if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
740 		cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
741 		cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
742 	} else {
743 		cm_id->service_id = service_id;
744 		cm_id->service_mask = service_mask;
745 	}
746 	cur_cm_id_priv = cm_insert_listen(cm_id_priv);
747 	spin_unlock_irqrestore(&cm.lock, flags);
748 
749 	if (cur_cm_id_priv) {
750 		cm_id->state = IB_CM_IDLE;
751 		ret = -EBUSY;
752 	}
753 	return ret;
754 }
755 EXPORT_SYMBOL(ib_cm_listen);
756 
757 static __be64 cm_form_tid(struct cm_id_private *cm_id_priv,
758 			  enum cm_msg_sequence msg_seq)
759 {
760 	u64 hi_tid, low_tid;
761 
762 	hi_tid   = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
763 	low_tid  = (u64) ((__force u32)cm_id_priv->id.local_id |
764 			  (msg_seq << 30));
765 	return cpu_to_be64(hi_tid | low_tid);
766 }
767 
768 static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
769 			      __be16 attr_id, __be64 tid)
770 {
771 	hdr->base_version  = IB_MGMT_BASE_VERSION;
772 	hdr->mgmt_class	   = IB_MGMT_CLASS_CM;
773 	hdr->class_version = IB_CM_CLASS_VERSION;
774 	hdr->method	   = IB_MGMT_METHOD_SEND;
775 	hdr->attr_id	   = attr_id;
776 	hdr->tid	   = tid;
777 }
778 
779 static void cm_format_req(struct cm_req_msg *req_msg,
780 			  struct cm_id_private *cm_id_priv,
781 			  struct ib_cm_req_param *param)
782 {
783 	cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
784 			  cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ));
785 
786 	req_msg->local_comm_id = cm_id_priv->id.local_id;
787 	req_msg->service_id = param->service_id;
788 	req_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid;
789 	cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
790 	cm_req_set_resp_res(req_msg, param->responder_resources);
791 	cm_req_set_init_depth(req_msg, param->initiator_depth);
792 	cm_req_set_remote_resp_timeout(req_msg,
793 				       param->remote_cm_response_timeout);
794 	cm_req_set_qp_type(req_msg, param->qp_type);
795 	cm_req_set_flow_ctrl(req_msg, param->flow_control);
796 	cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn));
797 	cm_req_set_local_resp_timeout(req_msg,
798 				      param->local_cm_response_timeout);
799 	cm_req_set_retry_count(req_msg, param->retry_count);
800 	req_msg->pkey = param->primary_path->pkey;
801 	cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
802 	cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
803 	cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
804 	cm_req_set_srq(req_msg, param->srq);
805 
806 	req_msg->primary_local_lid = param->primary_path->slid;
807 	req_msg->primary_remote_lid = param->primary_path->dlid;
808 	req_msg->primary_local_gid = param->primary_path->sgid;
809 	req_msg->primary_remote_gid = param->primary_path->dgid;
810 	cm_req_set_primary_flow_label(req_msg, param->primary_path->flow_label);
811 	cm_req_set_primary_packet_rate(req_msg, param->primary_path->rate);
812 	req_msg->primary_traffic_class = param->primary_path->traffic_class;
813 	req_msg->primary_hop_limit = param->primary_path->hop_limit;
814 	cm_req_set_primary_sl(req_msg, param->primary_path->sl);
815 	cm_req_set_primary_subnet_local(req_msg, 1); /* local only... */
816 	cm_req_set_primary_local_ack_timeout(req_msg,
817 		min(31, param->primary_path->packet_life_time + 1));
818 
819 	if (param->alternate_path) {
820 		req_msg->alt_local_lid = param->alternate_path->slid;
821 		req_msg->alt_remote_lid = param->alternate_path->dlid;
822 		req_msg->alt_local_gid = param->alternate_path->sgid;
823 		req_msg->alt_remote_gid = param->alternate_path->dgid;
824 		cm_req_set_alt_flow_label(req_msg,
825 					  param->alternate_path->flow_label);
826 		cm_req_set_alt_packet_rate(req_msg, param->alternate_path->rate);
827 		req_msg->alt_traffic_class = param->alternate_path->traffic_class;
828 		req_msg->alt_hop_limit = param->alternate_path->hop_limit;
829 		cm_req_set_alt_sl(req_msg, param->alternate_path->sl);
830 		cm_req_set_alt_subnet_local(req_msg, 1); /* local only... */
831 		cm_req_set_alt_local_ack_timeout(req_msg,
832 			min(31, param->alternate_path->packet_life_time + 1));
833 	}
834 
835 	if (param->private_data && param->private_data_len)
836 		memcpy(req_msg->private_data, param->private_data,
837 		       param->private_data_len);
838 }
839 
840 static inline int cm_validate_req_param(struct ib_cm_req_param *param)
841 {
842 	/* peer-to-peer not supported */
843 	if (param->peer_to_peer)
844 		return -EINVAL;
845 
846 	if (!param->primary_path)
847 		return -EINVAL;
848 
849 	if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC)
850 		return -EINVAL;
851 
852 	if (param->private_data &&
853 	    param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
854 		return -EINVAL;
855 
856 	if (param->alternate_path &&
857 	    (param->alternate_path->pkey != param->primary_path->pkey ||
858 	     param->alternate_path->mtu != param->primary_path->mtu))
859 		return -EINVAL;
860 
861 	return 0;
862 }
863 
864 int ib_send_cm_req(struct ib_cm_id *cm_id,
865 		   struct ib_cm_req_param *param)
866 {
867 	struct cm_id_private *cm_id_priv;
868 	struct ib_send_wr *bad_send_wr;
869 	struct cm_req_msg *req_msg;
870 	unsigned long flags;
871 	int ret;
872 
873 	ret = cm_validate_req_param(param);
874 	if (ret)
875 		return ret;
876 
877 	/* Verify that we're not in timewait. */
878 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
879 	spin_lock_irqsave(&cm_id_priv->lock, flags);
880 	if (cm_id->state != IB_CM_IDLE) {
881 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
882 		ret = -EINVAL;
883 		goto out;
884 	}
885 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
886 
887 	cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
888 							    id.local_id);
889 	if (IS_ERR(cm_id_priv->timewait_info))
890 		goto out;
891 
892 	ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av);
893 	if (ret)
894 		goto error1;
895 	if (param->alternate_path) {
896 		ret = cm_init_av_by_path(param->alternate_path,
897 					 &cm_id_priv->alt_av);
898 		if (ret)
899 			goto error1;
900 	}
901 	cm_id->service_id = param->service_id;
902 	cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
903 	cm_id_priv->timeout_ms = cm_convert_to_ms(
904 				    param->primary_path->packet_life_time) * 2 +
905 				 cm_convert_to_ms(
906 				    param->remote_cm_response_timeout);
907 	cm_id_priv->max_cm_retries = param->max_cm_retries;
908 	cm_id_priv->initiator_depth = param->initiator_depth;
909 	cm_id_priv->responder_resources = param->responder_resources;
910 	cm_id_priv->retry_count = param->retry_count;
911 	cm_id_priv->path_mtu = param->primary_path->mtu;
912 
913 	ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
914 	if (ret)
915 		goto error1;
916 
917 	req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
918 	cm_format_req(req_msg, cm_id_priv, param);
919 	cm_id_priv->tid = req_msg->hdr.tid;
920 	cm_id_priv->msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms;
921 	cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
922 
923 	cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
924 	cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
925 	cm_id_priv->local_ack_timeout =
926 				cm_req_get_primary_local_ack_timeout(req_msg);
927 
928 	spin_lock_irqsave(&cm_id_priv->lock, flags);
929 	ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
930 				&cm_id_priv->msg->send_wr, &bad_send_wr);
931 	if (ret) {
932 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
933 		goto error2;
934 	}
935 	BUG_ON(cm_id->state != IB_CM_IDLE);
936 	cm_id->state = IB_CM_REQ_SENT;
937 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
938 	return 0;
939 
940 error2:	cm_free_msg(cm_id_priv->msg);
941 error1:	kfree(cm_id_priv->timewait_info);
942 out:	return ret;
943 }
944 EXPORT_SYMBOL(ib_send_cm_req);
945 
946 static int cm_issue_rej(struct cm_port *port,
947 			struct ib_mad_recv_wc *mad_recv_wc,
948 			enum ib_cm_rej_reason reason,
949 			enum cm_msg_response msg_rejected,
950 			void *ari, u8 ari_length)
951 {
952 	struct ib_mad_send_buf *msg = NULL;
953 	struct ib_send_wr *bad_send_wr;
954 	struct cm_rej_msg *rej_msg, *rcv_msg;
955 	int ret;
956 
957 	ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
958 	if (ret)
959 		return ret;
960 
961 	/* We just need common CM header information.  Cast to any message. */
962 	rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
963 	rej_msg = (struct cm_rej_msg *) msg->mad;
964 
965 	cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
966 	rej_msg->remote_comm_id = rcv_msg->local_comm_id;
967 	rej_msg->local_comm_id = rcv_msg->remote_comm_id;
968 	cm_rej_set_msg_rejected(rej_msg, msg_rejected);
969 	rej_msg->reason = cpu_to_be16(reason);
970 
971 	if (ari && ari_length) {
972 		cm_rej_set_reject_info_len(rej_msg, ari_length);
973 		memcpy(rej_msg->ari, ari, ari_length);
974 	}
975 
976 	ret = ib_post_send_mad(port->mad_agent, &msg->send_wr, &bad_send_wr);
977 	if (ret)
978 		cm_free_msg(msg);
979 
980 	return ret;
981 }
982 
983 static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid,
984 				    __be32 local_qpn, __be32 remote_qpn)
985 {
986 	return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) ||
987 		((local_ca_guid == remote_ca_guid) &&
988 		 (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn))));
989 }
990 
991 static inline void cm_format_paths_from_req(struct cm_req_msg *req_msg,
992 					    struct ib_sa_path_rec *primary_path,
993 					    struct ib_sa_path_rec *alt_path)
994 {
995 	memset(primary_path, 0, sizeof *primary_path);
996 	primary_path->dgid = req_msg->primary_local_gid;
997 	primary_path->sgid = req_msg->primary_remote_gid;
998 	primary_path->dlid = req_msg->primary_local_lid;
999 	primary_path->slid = req_msg->primary_remote_lid;
1000 	primary_path->flow_label = cm_req_get_primary_flow_label(req_msg);
1001 	primary_path->hop_limit = req_msg->primary_hop_limit;
1002 	primary_path->traffic_class = req_msg->primary_traffic_class;
1003 	primary_path->reversible = 1;
1004 	primary_path->pkey = req_msg->pkey;
1005 	primary_path->sl = cm_req_get_primary_sl(req_msg);
1006 	primary_path->mtu_selector = IB_SA_EQ;
1007 	primary_path->mtu = cm_req_get_path_mtu(req_msg);
1008 	primary_path->rate_selector = IB_SA_EQ;
1009 	primary_path->rate = cm_req_get_primary_packet_rate(req_msg);
1010 	primary_path->packet_life_time_selector = IB_SA_EQ;
1011 	primary_path->packet_life_time =
1012 		cm_req_get_primary_local_ack_timeout(req_msg);
1013 	primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
1014 
1015 	if (req_msg->alt_local_lid) {
1016 		memset(alt_path, 0, sizeof *alt_path);
1017 		alt_path->dgid = req_msg->alt_local_gid;
1018 		alt_path->sgid = req_msg->alt_remote_gid;
1019 		alt_path->dlid = req_msg->alt_local_lid;
1020 		alt_path->slid = req_msg->alt_remote_lid;
1021 		alt_path->flow_label = cm_req_get_alt_flow_label(req_msg);
1022 		alt_path->hop_limit = req_msg->alt_hop_limit;
1023 		alt_path->traffic_class = req_msg->alt_traffic_class;
1024 		alt_path->reversible = 1;
1025 		alt_path->pkey = req_msg->pkey;
1026 		alt_path->sl = cm_req_get_alt_sl(req_msg);
1027 		alt_path->mtu_selector = IB_SA_EQ;
1028 		alt_path->mtu = cm_req_get_path_mtu(req_msg);
1029 		alt_path->rate_selector = IB_SA_EQ;
1030 		alt_path->rate = cm_req_get_alt_packet_rate(req_msg);
1031 		alt_path->packet_life_time_selector = IB_SA_EQ;
1032 		alt_path->packet_life_time =
1033 			cm_req_get_alt_local_ack_timeout(req_msg);
1034 		alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
1035 	}
1036 }
1037 
1038 static void cm_format_req_event(struct cm_work *work,
1039 				struct cm_id_private *cm_id_priv,
1040 				struct ib_cm_id *listen_id)
1041 {
1042 	struct cm_req_msg *req_msg;
1043 	struct ib_cm_req_event_param *param;
1044 
1045 	req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1046 	param = &work->cm_event.param.req_rcvd;
1047 	param->listen_id = listen_id;
1048 	param->device = cm_id_priv->av.port->mad_agent->device;
1049 	param->port = cm_id_priv->av.port->port_num;
1050 	param->primary_path = &work->path[0];
1051 	if (req_msg->alt_local_lid)
1052 		param->alternate_path = &work->path[1];
1053 	else
1054 		param->alternate_path = NULL;
1055 	param->remote_ca_guid = req_msg->local_ca_guid;
1056 	param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
1057 	param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
1058 	param->qp_type = cm_req_get_qp_type(req_msg);
1059 	param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg));
1060 	param->responder_resources = cm_req_get_init_depth(req_msg);
1061 	param->initiator_depth = cm_req_get_resp_res(req_msg);
1062 	param->local_cm_response_timeout =
1063 					cm_req_get_remote_resp_timeout(req_msg);
1064 	param->flow_control = cm_req_get_flow_ctrl(req_msg);
1065 	param->remote_cm_response_timeout =
1066 					cm_req_get_local_resp_timeout(req_msg);
1067 	param->retry_count = cm_req_get_retry_count(req_msg);
1068 	param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1069 	param->srq = cm_req_get_srq(req_msg);
1070 	work->cm_event.private_data = &req_msg->private_data;
1071 }
1072 
1073 static void cm_process_work(struct cm_id_private *cm_id_priv,
1074 			    struct cm_work *work)
1075 {
1076 	unsigned long flags;
1077 	int ret;
1078 
1079 	/* We will typically only have the current event to report. */
1080 	ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
1081 	cm_free_work(work);
1082 
1083 	while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
1084 		spin_lock_irqsave(&cm_id_priv->lock, flags);
1085 		work = cm_dequeue_work(cm_id_priv);
1086 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1087 		BUG_ON(!work);
1088 		ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
1089 						&work->cm_event);
1090 		cm_free_work(work);
1091 	}
1092 	cm_deref_id(cm_id_priv);
1093 	if (ret)
1094 		ib_destroy_cm_id(&cm_id_priv->id);
1095 }
1096 
1097 static void cm_format_mra(struct cm_mra_msg *mra_msg,
1098 			  struct cm_id_private *cm_id_priv,
1099 			  enum cm_msg_response msg_mraed, u8 service_timeout,
1100 			  const void *private_data, u8 private_data_len)
1101 {
1102 	cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
1103 	cm_mra_set_msg_mraed(mra_msg, msg_mraed);
1104 	mra_msg->local_comm_id = cm_id_priv->id.local_id;
1105 	mra_msg->remote_comm_id = cm_id_priv->id.remote_id;
1106 	cm_mra_set_service_timeout(mra_msg, service_timeout);
1107 
1108 	if (private_data && private_data_len)
1109 		memcpy(mra_msg->private_data, private_data, private_data_len);
1110 }
1111 
1112 static void cm_format_rej(struct cm_rej_msg *rej_msg,
1113 			  struct cm_id_private *cm_id_priv,
1114 			  enum ib_cm_rej_reason reason,
1115 			  void *ari,
1116 			  u8 ari_length,
1117 			  const void *private_data,
1118 			  u8 private_data_len)
1119 {
1120 	cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
1121 	rej_msg->remote_comm_id = cm_id_priv->id.remote_id;
1122 
1123 	switch(cm_id_priv->id.state) {
1124 	case IB_CM_REQ_RCVD:
1125 		rej_msg->local_comm_id = 0;
1126 		cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1127 		break;
1128 	case IB_CM_MRA_REQ_SENT:
1129 		rej_msg->local_comm_id = cm_id_priv->id.local_id;
1130 		cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1131 		break;
1132 	case IB_CM_REP_RCVD:
1133 	case IB_CM_MRA_REP_SENT:
1134 		rej_msg->local_comm_id = cm_id_priv->id.local_id;
1135 		cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP);
1136 		break;
1137 	default:
1138 		rej_msg->local_comm_id = cm_id_priv->id.local_id;
1139 		cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER);
1140 		break;
1141 	}
1142 
1143 	rej_msg->reason = cpu_to_be16(reason);
1144 	if (ari && ari_length) {
1145 		cm_rej_set_reject_info_len(rej_msg, ari_length);
1146 		memcpy(rej_msg->ari, ari, ari_length);
1147 	}
1148 
1149 	if (private_data && private_data_len)
1150 		memcpy(rej_msg->private_data, private_data, private_data_len);
1151 }
1152 
1153 static void cm_dup_req_handler(struct cm_work *work,
1154 			       struct cm_id_private *cm_id_priv)
1155 {
1156 	struct ib_mad_send_buf *msg = NULL;
1157 	struct ib_send_wr *bad_send_wr;
1158 	unsigned long flags;
1159 	int ret;
1160 
1161 	/* Quick state check to discard duplicate REQs. */
1162 	if (cm_id_priv->id.state == IB_CM_REQ_RCVD)
1163 		return;
1164 
1165 	ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1166 	if (ret)
1167 		return;
1168 
1169 	spin_lock_irqsave(&cm_id_priv->lock, flags);
1170 	switch (cm_id_priv->id.state) {
1171 	case IB_CM_MRA_REQ_SENT:
1172 		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1173 			      CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
1174 			      cm_id_priv->private_data,
1175 			      cm_id_priv->private_data_len);
1176 		break;
1177 	case IB_CM_TIMEWAIT:
1178 		cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv,
1179 			      IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0);
1180 		break;
1181 	default:
1182 		goto unlock;
1183 	}
1184 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1185 
1186 	ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, &msg->send_wr,
1187 			       &bad_send_wr);
1188 	if (ret)
1189 		goto free;
1190 	return;
1191 
1192 unlock:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1193 free:	cm_free_msg(msg);
1194 }
1195 
1196 static struct cm_id_private * cm_match_req(struct cm_work *work,
1197 					   struct cm_id_private *cm_id_priv)
1198 {
1199 	struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
1200 	struct cm_timewait_info *timewait_info;
1201 	struct cm_req_msg *req_msg;
1202 	unsigned long flags;
1203 
1204 	req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1205 
1206 	/* Check for duplicate REQ and stale connections. */
1207 	spin_lock_irqsave(&cm.lock, flags);
1208 	timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
1209 	if (!timewait_info)
1210 		timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
1211 
1212 	if (timewait_info) {
1213 		cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
1214 					   timewait_info->work.remote_id);
1215 		spin_unlock_irqrestore(&cm.lock, flags);
1216 		if (cur_cm_id_priv) {
1217 			cm_dup_req_handler(work, cur_cm_id_priv);
1218 			cm_deref_id(cur_cm_id_priv);
1219 		} else
1220 			cm_issue_rej(work->port, work->mad_recv_wc,
1221 				     IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
1222 				     NULL, 0);
1223 		goto error;
1224 	}
1225 
1226 	/* Find matching listen request. */
1227 	listen_cm_id_priv = cm_find_listen(req_msg->service_id);
1228 	if (!listen_cm_id_priv) {
1229 		spin_unlock_irqrestore(&cm.lock, flags);
1230 		cm_issue_rej(work->port, work->mad_recv_wc,
1231 			     IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
1232 			     NULL, 0);
1233 		goto error;
1234 	}
1235 	atomic_inc(&listen_cm_id_priv->refcount);
1236 	atomic_inc(&cm_id_priv->refcount);
1237 	cm_id_priv->id.state = IB_CM_REQ_RCVD;
1238 	atomic_inc(&cm_id_priv->work_count);
1239 	spin_unlock_irqrestore(&cm.lock, flags);
1240 	return listen_cm_id_priv;
1241 
1242 error:	cm_cleanup_timewait(cm_id_priv->timewait_info);
1243 	return NULL;
1244 }
1245 
1246 static int cm_req_handler(struct cm_work *work)
1247 {
1248 	struct ib_cm_id *cm_id;
1249 	struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
1250 	struct cm_req_msg *req_msg;
1251 	int ret;
1252 
1253 	req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1254 
1255 	cm_id = ib_create_cm_id(NULL, NULL);
1256 	if (IS_ERR(cm_id))
1257 		return PTR_ERR(cm_id);
1258 
1259 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1260 	cm_id_priv->id.remote_id = req_msg->local_comm_id;
1261 	cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
1262 				&cm_id_priv->av);
1263 	cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1264 							    id.local_id);
1265 	if (IS_ERR(cm_id_priv->timewait_info)) {
1266 		ret = PTR_ERR(cm_id_priv->timewait_info);
1267 		goto error1;
1268 	}
1269 	cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
1270 	cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid;
1271 	cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg);
1272 
1273 	listen_cm_id_priv = cm_match_req(work, cm_id_priv);
1274 	if (!listen_cm_id_priv) {
1275 		ret = -EINVAL;
1276 		goto error2;
1277 	}
1278 
1279 	cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
1280 	cm_id_priv->id.context = listen_cm_id_priv->id.context;
1281 	cm_id_priv->id.service_id = req_msg->service_id;
1282 	cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
1283 
1284 	cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
1285 	ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
1286 	if (ret)
1287 		goto error3;
1288 	if (req_msg->alt_local_lid) {
1289 		ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av);
1290 		if (ret)
1291 			goto error3;
1292 	}
1293 	cm_id_priv->tid = req_msg->hdr.tid;
1294 	cm_id_priv->timeout_ms = cm_convert_to_ms(
1295 					cm_req_get_local_resp_timeout(req_msg));
1296 	cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg);
1297 	cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg);
1298 	cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
1299 	cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
1300 	cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
1301 	cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
1302 	cm_id_priv->local_ack_timeout =
1303 				cm_req_get_primary_local_ack_timeout(req_msg);
1304 	cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
1305 	cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1306 
1307 	cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
1308 	cm_process_work(cm_id_priv, work);
1309 	cm_deref_id(listen_cm_id_priv);
1310 	return 0;
1311 
1312 error3:	atomic_dec(&cm_id_priv->refcount);
1313 	cm_deref_id(listen_cm_id_priv);
1314 	cm_cleanup_timewait(cm_id_priv->timewait_info);
1315 error2:	kfree(cm_id_priv->timewait_info);
1316 error1:	ib_destroy_cm_id(&cm_id_priv->id);
1317 	return ret;
1318 }
1319 
1320 static void cm_format_rep(struct cm_rep_msg *rep_msg,
1321 			  struct cm_id_private *cm_id_priv,
1322 			  struct ib_cm_rep_param *param)
1323 {
1324 	cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
1325 	rep_msg->local_comm_id = cm_id_priv->id.local_id;
1326 	rep_msg->remote_comm_id = cm_id_priv->id.remote_id;
1327 	cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
1328 	cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
1329 	rep_msg->resp_resources = param->responder_resources;
1330 	rep_msg->initiator_depth = param->initiator_depth;
1331 	cm_rep_set_target_ack_delay(rep_msg, param->target_ack_delay);
1332 	cm_rep_set_failover(rep_msg, param->failover_accepted);
1333 	cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
1334 	cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
1335 	cm_rep_set_srq(rep_msg, param->srq);
1336 	rep_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid;
1337 
1338 	if (param->private_data && param->private_data_len)
1339 		memcpy(rep_msg->private_data, param->private_data,
1340 		       param->private_data_len);
1341 }
1342 
1343 int ib_send_cm_rep(struct ib_cm_id *cm_id,
1344 		   struct ib_cm_rep_param *param)
1345 {
1346 	struct cm_id_private *cm_id_priv;
1347 	struct ib_mad_send_buf *msg;
1348 	struct cm_rep_msg *rep_msg;
1349 	struct ib_send_wr *bad_send_wr;
1350 	unsigned long flags;
1351 	int ret;
1352 
1353 	if (param->private_data &&
1354 	    param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
1355 		return -EINVAL;
1356 
1357 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1358 	spin_lock_irqsave(&cm_id_priv->lock, flags);
1359 	if (cm_id->state != IB_CM_REQ_RCVD &&
1360 	    cm_id->state != IB_CM_MRA_REQ_SENT) {
1361 		ret = -EINVAL;
1362 		goto out;
1363 	}
1364 
1365 	ret = cm_alloc_msg(cm_id_priv, &msg);
1366 	if (ret)
1367 		goto out;
1368 
1369 	rep_msg = (struct cm_rep_msg *) msg->mad;
1370 	cm_format_rep(rep_msg, cm_id_priv, param);
1371 	msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms;
1372 	msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
1373 
1374 	ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
1375 			       &msg->send_wr, &bad_send_wr);
1376 	if (ret) {
1377 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1378 		cm_free_msg(msg);
1379 		return ret;
1380 	}
1381 
1382 	cm_id->state = IB_CM_REP_SENT;
1383 	cm_id_priv->msg = msg;
1384 	cm_id_priv->initiator_depth = param->initiator_depth;
1385 	cm_id_priv->responder_resources = param->responder_resources;
1386 	cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg);
1387 	cm_id_priv->local_qpn = cm_rep_get_local_qpn(rep_msg);
1388 
1389 out:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1390 	return ret;
1391 }
1392 EXPORT_SYMBOL(ib_send_cm_rep);
1393 
1394 static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
1395 			  struct cm_id_private *cm_id_priv,
1396 			  const void *private_data,
1397 			  u8 private_data_len)
1398 {
1399 	cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
1400 	rtu_msg->local_comm_id = cm_id_priv->id.local_id;
1401 	rtu_msg->remote_comm_id = cm_id_priv->id.remote_id;
1402 
1403 	if (private_data && private_data_len)
1404 		memcpy(rtu_msg->private_data, private_data, private_data_len);
1405 }
1406 
1407 int ib_send_cm_rtu(struct ib_cm_id *cm_id,
1408 		   const void *private_data,
1409 		   u8 private_data_len)
1410 {
1411 	struct cm_id_private *cm_id_priv;
1412 	struct ib_mad_send_buf *msg;
1413 	struct ib_send_wr *bad_send_wr;
1414 	unsigned long flags;
1415 	void *data;
1416 	int ret;
1417 
1418 	if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
1419 		return -EINVAL;
1420 
1421 	data = cm_copy_private_data(private_data, private_data_len);
1422 	if (IS_ERR(data))
1423 		return PTR_ERR(data);
1424 
1425 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1426 	spin_lock_irqsave(&cm_id_priv->lock, flags);
1427 	if (cm_id->state != IB_CM_REP_RCVD &&
1428 	    cm_id->state != IB_CM_MRA_REP_SENT) {
1429 		ret = -EINVAL;
1430 		goto error;
1431 	}
1432 
1433 	ret = cm_alloc_msg(cm_id_priv, &msg);
1434 	if (ret)
1435 		goto error;
1436 
1437 	cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
1438 		      private_data, private_data_len);
1439 
1440 	ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
1441 			       &msg->send_wr, &bad_send_wr);
1442 	if (ret) {
1443 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1444 		cm_free_msg(msg);
1445 		kfree(data);
1446 		return ret;
1447 	}
1448 
1449 	cm_id->state = IB_CM_ESTABLISHED;
1450 	cm_set_private_data(cm_id_priv, data, private_data_len);
1451 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1452 	return 0;
1453 
1454 error:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1455 	kfree(data);
1456 	return ret;
1457 }
1458 EXPORT_SYMBOL(ib_send_cm_rtu);
1459 
1460 static void cm_format_rep_event(struct cm_work *work)
1461 {
1462 	struct cm_rep_msg *rep_msg;
1463 	struct ib_cm_rep_event_param *param;
1464 
1465 	rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
1466 	param = &work->cm_event.param.rep_rcvd;
1467 	param->remote_ca_guid = rep_msg->local_ca_guid;
1468 	param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
1469 	param->remote_qpn = be32_to_cpu(cm_rep_get_local_qpn(rep_msg));
1470 	param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
1471 	param->responder_resources = rep_msg->initiator_depth;
1472 	param->initiator_depth = rep_msg->resp_resources;
1473 	param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
1474 	param->failover_accepted = cm_rep_get_failover(rep_msg);
1475 	param->flow_control = cm_rep_get_flow_ctrl(rep_msg);
1476 	param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
1477 	param->srq = cm_rep_get_srq(rep_msg);
1478 	work->cm_event.private_data = &rep_msg->private_data;
1479 }
1480 
1481 static void cm_dup_rep_handler(struct cm_work *work)
1482 {
1483 	struct cm_id_private *cm_id_priv;
1484 	struct cm_rep_msg *rep_msg;
1485 	struct ib_mad_send_buf *msg = NULL;
1486 	struct ib_send_wr *bad_send_wr;
1487 	unsigned long flags;
1488 	int ret;
1489 
1490 	rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
1491 	cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id,
1492 				   rep_msg->local_comm_id);
1493 	if (!cm_id_priv)
1494 		return;
1495 
1496 	ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1497 	if (ret)
1498 		goto deref;
1499 
1500 	spin_lock_irqsave(&cm_id_priv->lock, flags);
1501 	if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
1502 		cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
1503 			      cm_id_priv->private_data,
1504 			      cm_id_priv->private_data_len);
1505 	else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
1506 		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1507 			      CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
1508 			      cm_id_priv->private_data,
1509 			      cm_id_priv->private_data_len);
1510 	else
1511 		goto unlock;
1512 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1513 
1514 	ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, &msg->send_wr,
1515 			       &bad_send_wr);
1516 	if (ret)
1517 		goto free;
1518 	goto deref;
1519 
1520 unlock:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1521 free:	cm_free_msg(msg);
1522 deref:	cm_deref_id(cm_id_priv);
1523 }
1524 
1525 static int cm_rep_handler(struct cm_work *work)
1526 {
1527 	struct cm_id_private *cm_id_priv;
1528 	struct cm_rep_msg *rep_msg;
1529 	unsigned long flags;
1530 	int ret;
1531 
1532 	rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
1533 	cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
1534 	if (!cm_id_priv) {
1535 		cm_dup_rep_handler(work);
1536 		return -EINVAL;
1537 	}
1538 
1539 	cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
1540 	cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid;
1541 	cm_id_priv->timewait_info->remote_qpn = cm_rep_get_local_qpn(rep_msg);
1542 
1543 	spin_lock_irqsave(&cm.lock, flags);
1544 	/* Check for duplicate REP. */
1545 	if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
1546 		spin_unlock_irqrestore(&cm.lock, flags);
1547 		ret = -EINVAL;
1548 		goto error;
1549 	}
1550 	/* Check for a stale connection. */
1551 	if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) {
1552 		spin_unlock_irqrestore(&cm.lock, flags);
1553 		cm_issue_rej(work->port, work->mad_recv_wc,
1554 			     IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
1555 			     NULL, 0);
1556 		ret = -EINVAL;
1557 		goto error;
1558 	}
1559 	spin_unlock_irqrestore(&cm.lock, flags);
1560 
1561 	cm_format_rep_event(work);
1562 
1563 	spin_lock_irqsave(&cm_id_priv->lock, flags);
1564 	switch (cm_id_priv->id.state) {
1565 	case IB_CM_REQ_SENT:
1566 	case IB_CM_MRA_REQ_RCVD:
1567 		break;
1568 	default:
1569 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1570 		ret = -EINVAL;
1571 		goto error;
1572 	}
1573 	cm_id_priv->id.state = IB_CM_REP_RCVD;
1574 	cm_id_priv->id.remote_id = rep_msg->local_comm_id;
1575 	cm_id_priv->remote_qpn = cm_rep_get_local_qpn(rep_msg);
1576 	cm_id_priv->initiator_depth = rep_msg->resp_resources;
1577 	cm_id_priv->responder_resources = rep_msg->initiator_depth;
1578 	cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg);
1579 	cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
1580 
1581 	/* todo: handle peer_to_peer */
1582 
1583 	ib_cancel_mad(cm_id_priv->av.port->mad_agent,
1584 		      (unsigned long) cm_id_priv->msg);
1585 	ret = atomic_inc_and_test(&cm_id_priv->work_count);
1586 	if (!ret)
1587 		list_add_tail(&work->list, &cm_id_priv->work_list);
1588 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1589 
1590 	if (ret)
1591 		cm_process_work(cm_id_priv, work);
1592 	else
1593 		cm_deref_id(cm_id_priv);
1594 	return 0;
1595 
1596 error:	cm_cleanup_timewait(cm_id_priv->timewait_info);
1597 	cm_deref_id(cm_id_priv);
1598 	return ret;
1599 }
1600 
1601 static int cm_establish_handler(struct cm_work *work)
1602 {
1603 	struct cm_id_private *cm_id_priv;
1604 	unsigned long flags;
1605 	int ret;
1606 
1607 	/* See comment in ib_cm_establish about lookup. */
1608 	cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
1609 	if (!cm_id_priv)
1610 		return -EINVAL;
1611 
1612 	spin_lock_irqsave(&cm_id_priv->lock, flags);
1613 	if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
1614 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1615 		goto out;
1616 	}
1617 
1618 	ib_cancel_mad(cm_id_priv->av.port->mad_agent,
1619 		      (unsigned long) cm_id_priv->msg);
1620 	ret = atomic_inc_and_test(&cm_id_priv->work_count);
1621 	if (!ret)
1622 		list_add_tail(&work->list, &cm_id_priv->work_list);
1623 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1624 
1625 	if (ret)
1626 		cm_process_work(cm_id_priv, work);
1627 	else
1628 		cm_deref_id(cm_id_priv);
1629 	return 0;
1630 out:
1631 	cm_deref_id(cm_id_priv);
1632 	return -EINVAL;
1633 }
1634 
1635 static int cm_rtu_handler(struct cm_work *work)
1636 {
1637 	struct cm_id_private *cm_id_priv;
1638 	struct cm_rtu_msg *rtu_msg;
1639 	unsigned long flags;
1640 	int ret;
1641 
1642 	rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
1643 	cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id,
1644 				   rtu_msg->local_comm_id);
1645 	if (!cm_id_priv)
1646 		return -EINVAL;
1647 
1648 	work->cm_event.private_data = &rtu_msg->private_data;
1649 
1650 	spin_lock_irqsave(&cm_id_priv->lock, flags);
1651 	if (cm_id_priv->id.state != IB_CM_REP_SENT &&
1652 	    cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
1653 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1654 		goto out;
1655 	}
1656 	cm_id_priv->id.state = IB_CM_ESTABLISHED;
1657 
1658 	ib_cancel_mad(cm_id_priv->av.port->mad_agent,
1659 		      (unsigned long) cm_id_priv->msg);
1660 	ret = atomic_inc_and_test(&cm_id_priv->work_count);
1661 	if (!ret)
1662 		list_add_tail(&work->list, &cm_id_priv->work_list);
1663 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1664 
1665 	if (ret)
1666 		cm_process_work(cm_id_priv, work);
1667 	else
1668 		cm_deref_id(cm_id_priv);
1669 	return 0;
1670 out:
1671 	cm_deref_id(cm_id_priv);
1672 	return -EINVAL;
1673 }
1674 
1675 static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
1676 			  struct cm_id_private *cm_id_priv,
1677 			  const void *private_data,
1678 			  u8 private_data_len)
1679 {
1680 	cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
1681 			  cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ));
1682 	dreq_msg->local_comm_id = cm_id_priv->id.local_id;
1683 	dreq_msg->remote_comm_id = cm_id_priv->id.remote_id;
1684 	cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn);
1685 
1686 	if (private_data && private_data_len)
1687 		memcpy(dreq_msg->private_data, private_data, private_data_len);
1688 }
1689 
1690 int ib_send_cm_dreq(struct ib_cm_id *cm_id,
1691 		    const void *private_data,
1692 		    u8 private_data_len)
1693 {
1694 	struct cm_id_private *cm_id_priv;
1695 	struct ib_mad_send_buf *msg;
1696 	struct ib_send_wr *bad_send_wr;
1697 	unsigned long flags;
1698 	int ret;
1699 
1700 	if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
1701 		return -EINVAL;
1702 
1703 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1704 	spin_lock_irqsave(&cm_id_priv->lock, flags);
1705 	if (cm_id->state != IB_CM_ESTABLISHED) {
1706 		ret = -EINVAL;
1707 		goto out;
1708 	}
1709 
1710 	ret = cm_alloc_msg(cm_id_priv, &msg);
1711 	if (ret) {
1712 		cm_enter_timewait(cm_id_priv);
1713 		goto out;
1714 	}
1715 
1716 	cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
1717 		       private_data, private_data_len);
1718 	msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms;
1719 	msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
1720 
1721 	ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
1722 			       &msg->send_wr, &bad_send_wr);
1723 	if (ret) {
1724 		cm_enter_timewait(cm_id_priv);
1725 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1726 		cm_free_msg(msg);
1727 		return ret;
1728 	}
1729 
1730 	cm_id->state = IB_CM_DREQ_SENT;
1731 	cm_id_priv->msg = msg;
1732 out:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1733 	return ret;
1734 }
1735 EXPORT_SYMBOL(ib_send_cm_dreq);
1736 
1737 static void cm_format_drep(struct cm_drep_msg *drep_msg,
1738 			  struct cm_id_private *cm_id_priv,
1739 			  const void *private_data,
1740 			  u8 private_data_len)
1741 {
1742 	cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
1743 	drep_msg->local_comm_id = cm_id_priv->id.local_id;
1744 	drep_msg->remote_comm_id = cm_id_priv->id.remote_id;
1745 
1746 	if (private_data && private_data_len)
1747 		memcpy(drep_msg->private_data, private_data, private_data_len);
1748 }
1749 
1750 int ib_send_cm_drep(struct ib_cm_id *cm_id,
1751 		    const void *private_data,
1752 		    u8 private_data_len)
1753 {
1754 	struct cm_id_private *cm_id_priv;
1755 	struct ib_mad_send_buf *msg;
1756 	struct ib_send_wr *bad_send_wr;
1757 	unsigned long flags;
1758 	void *data;
1759 	int ret;
1760 
1761 	if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
1762 		return -EINVAL;
1763 
1764 	data = cm_copy_private_data(private_data, private_data_len);
1765 	if (IS_ERR(data))
1766 		return PTR_ERR(data);
1767 
1768 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1769 	spin_lock_irqsave(&cm_id_priv->lock, flags);
1770 	if (cm_id->state != IB_CM_DREQ_RCVD) {
1771 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1772 		kfree(data);
1773 		return -EINVAL;
1774 	}
1775 
1776 	cm_set_private_data(cm_id_priv, data, private_data_len);
1777 	cm_enter_timewait(cm_id_priv);
1778 
1779 	ret = cm_alloc_msg(cm_id_priv, &msg);
1780 	if (ret)
1781 		goto out;
1782 
1783 	cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
1784 		       private_data, private_data_len);
1785 
1786 	ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, &msg->send_wr,
1787 			       &bad_send_wr);
1788 	if (ret) {
1789 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1790 		cm_free_msg(msg);
1791 		return ret;
1792 	}
1793 
1794 out:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1795 	return ret;
1796 }
1797 EXPORT_SYMBOL(ib_send_cm_drep);
1798 
1799 static int cm_dreq_handler(struct cm_work *work)
1800 {
1801 	struct cm_id_private *cm_id_priv;
1802 	struct cm_dreq_msg *dreq_msg;
1803 	struct ib_mad_send_buf *msg = NULL;
1804 	struct ib_send_wr *bad_send_wr;
1805 	unsigned long flags;
1806 	int ret;
1807 
1808 	dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
1809 	cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
1810 				   dreq_msg->local_comm_id);
1811 	if (!cm_id_priv)
1812 		return -EINVAL;
1813 
1814 	work->cm_event.private_data = &dreq_msg->private_data;
1815 
1816 	spin_lock_irqsave(&cm_id_priv->lock, flags);
1817 	if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg))
1818 		goto unlock;
1819 
1820 	switch (cm_id_priv->id.state) {
1821 	case IB_CM_REP_SENT:
1822 	case IB_CM_DREQ_SENT:
1823 		ib_cancel_mad(cm_id_priv->av.port->mad_agent,
1824 			      (unsigned long) cm_id_priv->msg);
1825 		break;
1826 	case IB_CM_ESTABLISHED:
1827 	case IB_CM_MRA_REP_RCVD:
1828 		break;
1829 	case IB_CM_TIMEWAIT:
1830 		if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
1831 			goto unlock;
1832 
1833 		cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
1834 			       cm_id_priv->private_data,
1835 			       cm_id_priv->private_data_len);
1836 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1837 
1838 		if (ib_post_send_mad(cm_id_priv->av.port->mad_agent,
1839 				     &msg->send_wr, &bad_send_wr))
1840 			cm_free_msg(msg);
1841 		goto deref;
1842 	default:
1843 		goto unlock;
1844 	}
1845 	cm_id_priv->id.state = IB_CM_DREQ_RCVD;
1846 	cm_id_priv->tid = dreq_msg->hdr.tid;
1847 	ret = atomic_inc_and_test(&cm_id_priv->work_count);
1848 	if (!ret)
1849 		list_add_tail(&work->list, &cm_id_priv->work_list);
1850 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1851 
1852 	if (ret)
1853 		cm_process_work(cm_id_priv, work);
1854 	else
1855 		cm_deref_id(cm_id_priv);
1856 	return 0;
1857 
1858 unlock:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1859 deref:	cm_deref_id(cm_id_priv);
1860 	return -EINVAL;
1861 }
1862 
1863 static int cm_drep_handler(struct cm_work *work)
1864 {
1865 	struct cm_id_private *cm_id_priv;
1866 	struct cm_drep_msg *drep_msg;
1867 	unsigned long flags;
1868 	int ret;
1869 
1870 	drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
1871 	cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id,
1872 				   drep_msg->local_comm_id);
1873 	if (!cm_id_priv)
1874 		return -EINVAL;
1875 
1876 	work->cm_event.private_data = &drep_msg->private_data;
1877 
1878 	spin_lock_irqsave(&cm_id_priv->lock, flags);
1879 	if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
1880 	    cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
1881 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1882 		goto out;
1883 	}
1884 	cm_enter_timewait(cm_id_priv);
1885 
1886 	ib_cancel_mad(cm_id_priv->av.port->mad_agent,
1887 		      (unsigned long) cm_id_priv->msg);
1888 	ret = atomic_inc_and_test(&cm_id_priv->work_count);
1889 	if (!ret)
1890 		list_add_tail(&work->list, &cm_id_priv->work_list);
1891 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1892 
1893 	if (ret)
1894 		cm_process_work(cm_id_priv, work);
1895 	else
1896 		cm_deref_id(cm_id_priv);
1897 	return 0;
1898 out:
1899 	cm_deref_id(cm_id_priv);
1900 	return -EINVAL;
1901 }
1902 
1903 int ib_send_cm_rej(struct ib_cm_id *cm_id,
1904 		   enum ib_cm_rej_reason reason,
1905 		   void *ari,
1906 		   u8 ari_length,
1907 		   const void *private_data,
1908 		   u8 private_data_len)
1909 {
1910 	struct cm_id_private *cm_id_priv;
1911 	struct ib_mad_send_buf *msg;
1912 	struct ib_send_wr *bad_send_wr;
1913 	unsigned long flags;
1914 	int ret;
1915 
1916 	if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
1917 	    (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
1918 		return -EINVAL;
1919 
1920 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1921 
1922 	spin_lock_irqsave(&cm_id_priv->lock, flags);
1923 	switch (cm_id->state) {
1924 	case IB_CM_REQ_SENT:
1925 	case IB_CM_MRA_REQ_RCVD:
1926 	case IB_CM_REQ_RCVD:
1927 	case IB_CM_MRA_REQ_SENT:
1928 	case IB_CM_REP_RCVD:
1929 	case IB_CM_MRA_REP_SENT:
1930 		ret = cm_alloc_msg(cm_id_priv, &msg);
1931 		if (!ret)
1932 			cm_format_rej((struct cm_rej_msg *) msg->mad,
1933 				      cm_id_priv, reason, ari, ari_length,
1934 				      private_data, private_data_len);
1935 
1936 		cm_reset_to_idle(cm_id_priv);
1937 		break;
1938 	case IB_CM_REP_SENT:
1939 	case IB_CM_MRA_REP_RCVD:
1940 		ret = cm_alloc_msg(cm_id_priv, &msg);
1941 		if (!ret)
1942 			cm_format_rej((struct cm_rej_msg *) msg->mad,
1943 				      cm_id_priv, reason, ari, ari_length,
1944 				      private_data, private_data_len);
1945 
1946 		cm_enter_timewait(cm_id_priv);
1947 		break;
1948 	default:
1949 		ret = -EINVAL;
1950 		goto out;
1951 	}
1952 
1953 	if (ret)
1954 		goto out;
1955 
1956 	ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
1957 			       &msg->send_wr, &bad_send_wr);
1958 	if (ret)
1959 		cm_free_msg(msg);
1960 
1961 out:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1962 	return ret;
1963 }
1964 EXPORT_SYMBOL(ib_send_cm_rej);
1965 
1966 static void cm_format_rej_event(struct cm_work *work)
1967 {
1968 	struct cm_rej_msg *rej_msg;
1969 	struct ib_cm_rej_event_param *param;
1970 
1971 	rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
1972 	param = &work->cm_event.param.rej_rcvd;
1973 	param->ari = rej_msg->ari;
1974 	param->ari_length = cm_rej_get_reject_info_len(rej_msg);
1975 	param->reason = __be16_to_cpu(rej_msg->reason);
1976 	work->cm_event.private_data = &rej_msg->private_data;
1977 }
1978 
1979 static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
1980 {
1981 	struct cm_timewait_info *timewait_info;
1982 	struct cm_id_private *cm_id_priv;
1983 	unsigned long flags;
1984 	__be32 remote_id;
1985 
1986 	remote_id = rej_msg->local_comm_id;
1987 
1988 	if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) {
1989 		spin_lock_irqsave(&cm.lock, flags);
1990 		timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari),
1991 						  remote_id);
1992 		if (!timewait_info) {
1993 			spin_unlock_irqrestore(&cm.lock, flags);
1994 			return NULL;
1995 		}
1996 		cm_id_priv = idr_find(&cm.local_id_table,
1997 				      (__force int) timewait_info->work.local_id);
1998 		if (cm_id_priv) {
1999 			if (cm_id_priv->id.remote_id == remote_id)
2000 				atomic_inc(&cm_id_priv->refcount);
2001 			else
2002 				cm_id_priv = NULL;
2003 		}
2004 		spin_unlock_irqrestore(&cm.lock, flags);
2005 	} else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ)
2006 		cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0);
2007 	else
2008 		cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id);
2009 
2010 	return cm_id_priv;
2011 }
2012 
2013 static int cm_rej_handler(struct cm_work *work)
2014 {
2015 	struct cm_id_private *cm_id_priv;
2016 	struct cm_rej_msg *rej_msg;
2017 	unsigned long flags;
2018 	int ret;
2019 
2020 	rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2021 	cm_id_priv = cm_acquire_rejected_id(rej_msg);
2022 	if (!cm_id_priv)
2023 		return -EINVAL;
2024 
2025 	cm_format_rej_event(work);
2026 
2027 	spin_lock_irqsave(&cm_id_priv->lock, flags);
2028 	switch (cm_id_priv->id.state) {
2029 	case IB_CM_REQ_SENT:
2030 	case IB_CM_MRA_REQ_RCVD:
2031 	case IB_CM_REP_SENT:
2032 	case IB_CM_MRA_REP_RCVD:
2033 		ib_cancel_mad(cm_id_priv->av.port->mad_agent,
2034 			      (unsigned long) cm_id_priv->msg);
2035 		/* fall through */
2036 	case IB_CM_REQ_RCVD:
2037 	case IB_CM_MRA_REQ_SENT:
2038 		if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN)
2039 			cm_enter_timewait(cm_id_priv);
2040 		else
2041 			cm_reset_to_idle(cm_id_priv);
2042 		break;
2043 	case IB_CM_DREQ_SENT:
2044 		ib_cancel_mad(cm_id_priv->av.port->mad_agent,
2045 			      (unsigned long) cm_id_priv->msg);
2046 		/* fall through */
2047 	case IB_CM_REP_RCVD:
2048 	case IB_CM_MRA_REP_SENT:
2049 	case IB_CM_ESTABLISHED:
2050 		cm_enter_timewait(cm_id_priv);
2051 		break;
2052 	default:
2053 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2054 		ret = -EINVAL;
2055 		goto out;
2056 	}
2057 
2058 	ret = atomic_inc_and_test(&cm_id_priv->work_count);
2059 	if (!ret)
2060 		list_add_tail(&work->list, &cm_id_priv->work_list);
2061 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2062 
2063 	if (ret)
2064 		cm_process_work(cm_id_priv, work);
2065 	else
2066 		cm_deref_id(cm_id_priv);
2067 	return 0;
2068 out:
2069 	cm_deref_id(cm_id_priv);
2070 	return -EINVAL;
2071 }
2072 
2073 int ib_send_cm_mra(struct ib_cm_id *cm_id,
2074 		   u8 service_timeout,
2075 		   const void *private_data,
2076 		   u8 private_data_len)
2077 {
2078 	struct cm_id_private *cm_id_priv;
2079 	struct ib_mad_send_buf *msg;
2080 	struct ib_send_wr *bad_send_wr;
2081 	void *data;
2082 	unsigned long flags;
2083 	int ret;
2084 
2085 	if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
2086 		return -EINVAL;
2087 
2088 	data = cm_copy_private_data(private_data, private_data_len);
2089 	if (IS_ERR(data))
2090 		return PTR_ERR(data);
2091 
2092 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2093 
2094 	spin_lock_irqsave(&cm_id_priv->lock, flags);
2095 	switch(cm_id_priv->id.state) {
2096 	case IB_CM_REQ_RCVD:
2097 		ret = cm_alloc_msg(cm_id_priv, &msg);
2098 		if (ret)
2099 			goto error1;
2100 
2101 		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2102 			      CM_MSG_RESPONSE_REQ, service_timeout,
2103 			      private_data, private_data_len);
2104 		ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
2105 				       &msg->send_wr, &bad_send_wr);
2106 		if (ret)
2107 			goto error2;
2108 		cm_id->state = IB_CM_MRA_REQ_SENT;
2109 		break;
2110 	case IB_CM_REP_RCVD:
2111 		ret = cm_alloc_msg(cm_id_priv, &msg);
2112 		if (ret)
2113 			goto error1;
2114 
2115 		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2116 			      CM_MSG_RESPONSE_REP, service_timeout,
2117 			      private_data, private_data_len);
2118 		ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
2119 				       &msg->send_wr, &bad_send_wr);
2120 		if (ret)
2121 			goto error2;
2122 		cm_id->state = IB_CM_MRA_REP_SENT;
2123 		break;
2124 	case IB_CM_ESTABLISHED:
2125 		ret = cm_alloc_msg(cm_id_priv, &msg);
2126 		if (ret)
2127 			goto error1;
2128 
2129 		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2130 			      CM_MSG_RESPONSE_OTHER, service_timeout,
2131 			      private_data, private_data_len);
2132 		ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
2133 				       &msg->send_wr, &bad_send_wr);
2134 		if (ret)
2135 			goto error2;
2136 		cm_id->lap_state = IB_CM_MRA_LAP_SENT;
2137 		break;
2138 	default:
2139 		ret = -EINVAL;
2140 		goto error1;
2141 	}
2142 	cm_id_priv->service_timeout = service_timeout;
2143 	cm_set_private_data(cm_id_priv, data, private_data_len);
2144 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2145 	return 0;
2146 
2147 error1:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2148 	kfree(data);
2149 	return ret;
2150 
2151 error2:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2152 	kfree(data);
2153 	cm_free_msg(msg);
2154 	return ret;
2155 }
2156 EXPORT_SYMBOL(ib_send_cm_mra);
2157 
2158 static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
2159 {
2160 	switch (cm_mra_get_msg_mraed(mra_msg)) {
2161 	case CM_MSG_RESPONSE_REQ:
2162 		return cm_acquire_id(mra_msg->remote_comm_id, 0);
2163 	case CM_MSG_RESPONSE_REP:
2164 	case CM_MSG_RESPONSE_OTHER:
2165 		return cm_acquire_id(mra_msg->remote_comm_id,
2166 				     mra_msg->local_comm_id);
2167 	default:
2168 		return NULL;
2169 	}
2170 }
2171 
2172 static int cm_mra_handler(struct cm_work *work)
2173 {
2174 	struct cm_id_private *cm_id_priv;
2175 	struct cm_mra_msg *mra_msg;
2176 	unsigned long flags;
2177 	int timeout, ret;
2178 
2179 	mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
2180 	cm_id_priv = cm_acquire_mraed_id(mra_msg);
2181 	if (!cm_id_priv)
2182 		return -EINVAL;
2183 
2184 	work->cm_event.private_data = &mra_msg->private_data;
2185 	work->cm_event.param.mra_rcvd.service_timeout =
2186 					cm_mra_get_service_timeout(mra_msg);
2187 	timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) +
2188 		  cm_convert_to_ms(cm_id_priv->av.packet_life_time);
2189 
2190 	spin_lock_irqsave(&cm_id_priv->lock, flags);
2191 	switch (cm_id_priv->id.state) {
2192 	case IB_CM_REQ_SENT:
2193 		if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ ||
2194 		    ib_modify_mad(cm_id_priv->av.port->mad_agent,
2195 				  (unsigned long) cm_id_priv->msg, timeout))
2196 			goto out;
2197 		cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
2198 		break;
2199 	case IB_CM_REP_SENT:
2200 		if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP ||
2201 		    ib_modify_mad(cm_id_priv->av.port->mad_agent,
2202 				  (unsigned long) cm_id_priv->msg, timeout))
2203 			goto out;
2204 		cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
2205 		break;
2206 	case IB_CM_ESTABLISHED:
2207 		if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER ||
2208 		    cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
2209 		    ib_modify_mad(cm_id_priv->av.port->mad_agent,
2210 				  (unsigned long) cm_id_priv->msg, timeout))
2211 			goto out;
2212 		cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
2213 		break;
2214 	default:
2215 		goto out;
2216 	}
2217 
2218 	cm_id_priv->msg->context[1] = (void *) (unsigned long)
2219 				      cm_id_priv->id.state;
2220 	ret = atomic_inc_and_test(&cm_id_priv->work_count);
2221 	if (!ret)
2222 		list_add_tail(&work->list, &cm_id_priv->work_list);
2223 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2224 
2225 	if (ret)
2226 		cm_process_work(cm_id_priv, work);
2227 	else
2228 		cm_deref_id(cm_id_priv);
2229 	return 0;
2230 out:
2231 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2232 	cm_deref_id(cm_id_priv);
2233 	return -EINVAL;
2234 }
2235 
2236 static void cm_format_lap(struct cm_lap_msg *lap_msg,
2237 			  struct cm_id_private *cm_id_priv,
2238 			  struct ib_sa_path_rec *alternate_path,
2239 			  const void *private_data,
2240 			  u8 private_data_len)
2241 {
2242 	cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID,
2243 			  cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP));
2244 	lap_msg->local_comm_id = cm_id_priv->id.local_id;
2245 	lap_msg->remote_comm_id = cm_id_priv->id.remote_id;
2246 	cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn);
2247 	/* todo: need remote CM response timeout */
2248 	cm_lap_set_remote_resp_timeout(lap_msg, 0x1F);
2249 	lap_msg->alt_local_lid = alternate_path->slid;
2250 	lap_msg->alt_remote_lid = alternate_path->dlid;
2251 	lap_msg->alt_local_gid = alternate_path->sgid;
2252 	lap_msg->alt_remote_gid = alternate_path->dgid;
2253 	cm_lap_set_flow_label(lap_msg, alternate_path->flow_label);
2254 	cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class);
2255 	lap_msg->alt_hop_limit = alternate_path->hop_limit;
2256 	cm_lap_set_packet_rate(lap_msg, alternate_path->rate);
2257 	cm_lap_set_sl(lap_msg, alternate_path->sl);
2258 	cm_lap_set_subnet_local(lap_msg, 1); /* local only... */
2259 	cm_lap_set_local_ack_timeout(lap_msg,
2260 		min(31, alternate_path->packet_life_time + 1));
2261 
2262 	if (private_data && private_data_len)
2263 		memcpy(lap_msg->private_data, private_data, private_data_len);
2264 }
2265 
2266 int ib_send_cm_lap(struct ib_cm_id *cm_id,
2267 		   struct ib_sa_path_rec *alternate_path,
2268 		   const void *private_data,
2269 		   u8 private_data_len)
2270 {
2271 	struct cm_id_private *cm_id_priv;
2272 	struct ib_mad_send_buf *msg;
2273 	struct ib_send_wr *bad_send_wr;
2274 	unsigned long flags;
2275 	int ret;
2276 
2277 	if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE)
2278 		return -EINVAL;
2279 
2280 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2281 	spin_lock_irqsave(&cm_id_priv->lock, flags);
2282 	if (cm_id->state != IB_CM_ESTABLISHED ||
2283 	    cm_id->lap_state != IB_CM_LAP_IDLE) {
2284 		ret = -EINVAL;
2285 		goto out;
2286 	}
2287 
2288 	ret = cm_alloc_msg(cm_id_priv, &msg);
2289 	if (ret)
2290 		goto out;
2291 
2292 	cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv,
2293 		      alternate_path, private_data, private_data_len);
2294 	msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms;
2295 	msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED;
2296 
2297 	ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
2298 			       &msg->send_wr, &bad_send_wr);
2299 	if (ret) {
2300 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2301 		cm_free_msg(msg);
2302 		return ret;
2303 	}
2304 
2305 	cm_id->lap_state = IB_CM_LAP_SENT;
2306 	cm_id_priv->msg = msg;
2307 
2308 out:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2309 	return ret;
2310 }
2311 EXPORT_SYMBOL(ib_send_cm_lap);
2312 
2313 static void cm_format_path_from_lap(struct ib_sa_path_rec *path,
2314 				    struct cm_lap_msg *lap_msg)
2315 {
2316 	memset(path, 0, sizeof *path);
2317 	path->dgid = lap_msg->alt_local_gid;
2318 	path->sgid = lap_msg->alt_remote_gid;
2319 	path->dlid = lap_msg->alt_local_lid;
2320 	path->slid = lap_msg->alt_remote_lid;
2321 	path->flow_label = cm_lap_get_flow_label(lap_msg);
2322 	path->hop_limit = lap_msg->alt_hop_limit;
2323 	path->traffic_class = cm_lap_get_traffic_class(lap_msg);
2324 	path->reversible = 1;
2325 	/* pkey is same as in REQ */
2326 	path->sl = cm_lap_get_sl(lap_msg);
2327 	path->mtu_selector = IB_SA_EQ;
2328 	/* mtu is same as in REQ */
2329 	path->rate_selector = IB_SA_EQ;
2330 	path->rate = cm_lap_get_packet_rate(lap_msg);
2331 	path->packet_life_time_selector = IB_SA_EQ;
2332 	path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg);
2333 	path->packet_life_time -= (path->packet_life_time > 0);
2334 }
2335 
2336 static int cm_lap_handler(struct cm_work *work)
2337 {
2338 	struct cm_id_private *cm_id_priv;
2339 	struct cm_lap_msg *lap_msg;
2340 	struct ib_cm_lap_event_param *param;
2341 	struct ib_mad_send_buf *msg = NULL;
2342 	struct ib_send_wr *bad_send_wr;
2343 	unsigned long flags;
2344 	int ret;
2345 
2346 	/* todo: verify LAP request and send reject APR if invalid. */
2347 	lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
2348 	cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id,
2349 				   lap_msg->local_comm_id);
2350 	if (!cm_id_priv)
2351 		return -EINVAL;
2352 
2353 	param = &work->cm_event.param.lap_rcvd;
2354 	param->alternate_path = &work->path[0];
2355 	cm_format_path_from_lap(param->alternate_path, lap_msg);
2356 	work->cm_event.private_data = &lap_msg->private_data;
2357 
2358 	spin_lock_irqsave(&cm_id_priv->lock, flags);
2359 	if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
2360 		goto unlock;
2361 
2362 	switch (cm_id_priv->id.lap_state) {
2363 	case IB_CM_LAP_IDLE:
2364 		break;
2365 	case IB_CM_MRA_LAP_SENT:
2366 		if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
2367 			goto unlock;
2368 
2369 		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2370 			      CM_MSG_RESPONSE_OTHER,
2371 			      cm_id_priv->service_timeout,
2372 			      cm_id_priv->private_data,
2373 			      cm_id_priv->private_data_len);
2374 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2375 
2376 		if (ib_post_send_mad(cm_id_priv->av.port->mad_agent,
2377 				     &msg->send_wr, &bad_send_wr))
2378 			cm_free_msg(msg);
2379 		goto deref;
2380 	default:
2381 		goto unlock;
2382 	}
2383 
2384 	cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
2385 	cm_id_priv->tid = lap_msg->hdr.tid;
2386 	ret = atomic_inc_and_test(&cm_id_priv->work_count);
2387 	if (!ret)
2388 		list_add_tail(&work->list, &cm_id_priv->work_list);
2389 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2390 
2391 	if (ret)
2392 		cm_process_work(cm_id_priv, work);
2393 	else
2394 		cm_deref_id(cm_id_priv);
2395 	return 0;
2396 
2397 unlock:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2398 deref:	cm_deref_id(cm_id_priv);
2399 	return -EINVAL;
2400 }
2401 
2402 static void cm_format_apr(struct cm_apr_msg *apr_msg,
2403 			  struct cm_id_private *cm_id_priv,
2404 			  enum ib_cm_apr_status status,
2405 			  void *info,
2406 			  u8 info_length,
2407 			  const void *private_data,
2408 			  u8 private_data_len)
2409 {
2410 	cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid);
2411 	apr_msg->local_comm_id = cm_id_priv->id.local_id;
2412 	apr_msg->remote_comm_id = cm_id_priv->id.remote_id;
2413 	apr_msg->ap_status = (u8) status;
2414 
2415 	if (info && info_length) {
2416 		apr_msg->info_length = info_length;
2417 		memcpy(apr_msg->info, info, info_length);
2418 	}
2419 
2420 	if (private_data && private_data_len)
2421 		memcpy(apr_msg->private_data, private_data, private_data_len);
2422 }
2423 
2424 int ib_send_cm_apr(struct ib_cm_id *cm_id,
2425 		   enum ib_cm_apr_status status,
2426 		   void *info,
2427 		   u8 info_length,
2428 		   const void *private_data,
2429 		   u8 private_data_len)
2430 {
2431 	struct cm_id_private *cm_id_priv;
2432 	struct ib_mad_send_buf *msg;
2433 	struct ib_send_wr *bad_send_wr;
2434 	unsigned long flags;
2435 	int ret;
2436 
2437 	if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) ||
2438 	    (info && info_length > IB_CM_APR_INFO_LENGTH))
2439 		return -EINVAL;
2440 
2441 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2442 	spin_lock_irqsave(&cm_id_priv->lock, flags);
2443 	if (cm_id->state != IB_CM_ESTABLISHED ||
2444 	    (cm_id->lap_state != IB_CM_LAP_RCVD &&
2445 	     cm_id->lap_state != IB_CM_MRA_LAP_SENT)) {
2446 		ret = -EINVAL;
2447 		goto out;
2448 	}
2449 
2450 	ret = cm_alloc_msg(cm_id_priv, &msg);
2451 	if (ret)
2452 		goto out;
2453 
2454 	cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status,
2455 		      info, info_length, private_data, private_data_len);
2456 	ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
2457 			       &msg->send_wr, &bad_send_wr);
2458 	if (ret) {
2459 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2460 		cm_free_msg(msg);
2461 		return ret;
2462 	}
2463 
2464 	cm_id->lap_state = IB_CM_LAP_IDLE;
2465 out:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2466 	return ret;
2467 }
2468 EXPORT_SYMBOL(ib_send_cm_apr);
2469 
2470 static int cm_apr_handler(struct cm_work *work)
2471 {
2472 	struct cm_id_private *cm_id_priv;
2473 	struct cm_apr_msg *apr_msg;
2474 	unsigned long flags;
2475 	int ret;
2476 
2477 	apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
2478 	cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id,
2479 				   apr_msg->local_comm_id);
2480 	if (!cm_id_priv)
2481 		return -EINVAL; /* Unmatched reply. */
2482 
2483 	work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status;
2484 	work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info;
2485 	work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
2486 	work->cm_event.private_data = &apr_msg->private_data;
2487 
2488 	spin_lock_irqsave(&cm_id_priv->lock, flags);
2489 	if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
2490 	    (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
2491 	     cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
2492 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2493 		goto out;
2494 	}
2495 	cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
2496 	ib_cancel_mad(cm_id_priv->av.port->mad_agent,
2497 		      (unsigned long) cm_id_priv->msg);
2498 	cm_id_priv->msg = NULL;
2499 
2500 	ret = atomic_inc_and_test(&cm_id_priv->work_count);
2501 	if (!ret)
2502 		list_add_tail(&work->list, &cm_id_priv->work_list);
2503 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2504 
2505 	if (ret)
2506 		cm_process_work(cm_id_priv, work);
2507 	else
2508 		cm_deref_id(cm_id_priv);
2509 	return 0;
2510 out:
2511 	cm_deref_id(cm_id_priv);
2512 	return -EINVAL;
2513 }
2514 
2515 static int cm_timewait_handler(struct cm_work *work)
2516 {
2517 	struct cm_timewait_info *timewait_info;
2518 	struct cm_id_private *cm_id_priv;
2519 	unsigned long flags;
2520 	int ret;
2521 
2522 	timewait_info = (struct cm_timewait_info *)work;
2523 	cm_cleanup_timewait(timewait_info);
2524 
2525 	cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
2526 				   timewait_info->work.remote_id);
2527 	if (!cm_id_priv)
2528 		return -EINVAL;
2529 
2530 	spin_lock_irqsave(&cm_id_priv->lock, flags);
2531 	if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
2532 	    cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
2533 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2534 		goto out;
2535 	}
2536 	cm_id_priv->id.state = IB_CM_IDLE;
2537 	ret = atomic_inc_and_test(&cm_id_priv->work_count);
2538 	if (!ret)
2539 		list_add_tail(&work->list, &cm_id_priv->work_list);
2540 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2541 
2542 	if (ret)
2543 		cm_process_work(cm_id_priv, work);
2544 	else
2545 		cm_deref_id(cm_id_priv);
2546 	return 0;
2547 out:
2548 	cm_deref_id(cm_id_priv);
2549 	return -EINVAL;
2550 }
2551 
2552 static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
2553 			       struct cm_id_private *cm_id_priv,
2554 			       struct ib_cm_sidr_req_param *param)
2555 {
2556 	cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
2557 			  cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR));
2558 	sidr_req_msg->request_id = cm_id_priv->id.local_id;
2559 	sidr_req_msg->pkey = cpu_to_be16(param->pkey);
2560 	sidr_req_msg->service_id = param->service_id;
2561 
2562 	if (param->private_data && param->private_data_len)
2563 		memcpy(sidr_req_msg->private_data, param->private_data,
2564 		       param->private_data_len);
2565 }
2566 
2567 int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
2568 			struct ib_cm_sidr_req_param *param)
2569 {
2570 	struct cm_id_private *cm_id_priv;
2571 	struct ib_mad_send_buf *msg;
2572 	struct ib_send_wr *bad_send_wr;
2573 	unsigned long flags;
2574 	int ret;
2575 
2576 	if (!param->path || (param->private_data &&
2577 	     param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
2578 		return -EINVAL;
2579 
2580 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2581 	ret = cm_init_av_by_path(param->path, &cm_id_priv->av);
2582 	if (ret)
2583 		goto out;
2584 
2585 	cm_id->service_id = param->service_id;
2586 	cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
2587 	cm_id_priv->timeout_ms = param->timeout_ms;
2588 	cm_id_priv->max_cm_retries = param->max_cm_retries;
2589 	ret = cm_alloc_msg(cm_id_priv, &msg);
2590 	if (ret)
2591 		goto out;
2592 
2593 	cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
2594 			   param);
2595 	msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms;
2596 	msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
2597 
2598 	spin_lock_irqsave(&cm_id_priv->lock, flags);
2599 	if (cm_id->state == IB_CM_IDLE)
2600 		ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
2601 				       &msg->send_wr, &bad_send_wr);
2602 	else
2603 		ret = -EINVAL;
2604 
2605 	if (ret) {
2606 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2607 		cm_free_msg(msg);
2608 		goto out;
2609 	}
2610 	cm_id->state = IB_CM_SIDR_REQ_SENT;
2611 	cm_id_priv->msg = msg;
2612 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2613 out:
2614 	return ret;
2615 }
2616 EXPORT_SYMBOL(ib_send_cm_sidr_req);
2617 
2618 static void cm_format_sidr_req_event(struct cm_work *work,
2619 				     struct ib_cm_id *listen_id)
2620 {
2621 	struct cm_sidr_req_msg *sidr_req_msg;
2622 	struct ib_cm_sidr_req_event_param *param;
2623 
2624 	sidr_req_msg = (struct cm_sidr_req_msg *)
2625 				work->mad_recv_wc->recv_buf.mad;
2626 	param = &work->cm_event.param.sidr_req_rcvd;
2627 	param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
2628 	param->listen_id = listen_id;
2629 	param->device = work->port->mad_agent->device;
2630 	param->port = work->port->port_num;
2631 	work->cm_event.private_data = &sidr_req_msg->private_data;
2632 }
2633 
2634 static int cm_sidr_req_handler(struct cm_work *work)
2635 {
2636 	struct ib_cm_id *cm_id;
2637 	struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
2638 	struct cm_sidr_req_msg *sidr_req_msg;
2639 	struct ib_wc *wc;
2640 	unsigned long flags;
2641 
2642 	cm_id = ib_create_cm_id(NULL, NULL);
2643 	if (IS_ERR(cm_id))
2644 		return PTR_ERR(cm_id);
2645 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2646 
2647 	/* Record SGID/SLID and request ID for lookup. */
2648 	sidr_req_msg = (struct cm_sidr_req_msg *)
2649 				work->mad_recv_wc->recv_buf.mad;
2650 	wc = work->mad_recv_wc->wc;
2651 	cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
2652 	cm_id_priv->av.dgid.global.interface_id = 0;
2653 	cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
2654 				&cm_id_priv->av);
2655 	cm_id_priv->id.remote_id = sidr_req_msg->request_id;
2656 	cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
2657 	cm_id_priv->tid = sidr_req_msg->hdr.tid;
2658 	atomic_inc(&cm_id_priv->work_count);
2659 
2660 	spin_lock_irqsave(&cm.lock, flags);
2661 	cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
2662 	if (cur_cm_id_priv) {
2663 		spin_unlock_irqrestore(&cm.lock, flags);
2664 		goto out; /* Duplicate message. */
2665 	}
2666 	cur_cm_id_priv = cm_find_listen(sidr_req_msg->service_id);
2667 	if (!cur_cm_id_priv) {
2668 		rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
2669 		spin_unlock_irqrestore(&cm.lock, flags);
2670 		/* todo: reply with no match */
2671 		goto out; /* No match. */
2672 	}
2673 	atomic_inc(&cur_cm_id_priv->refcount);
2674 	spin_unlock_irqrestore(&cm.lock, flags);
2675 
2676 	cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
2677 	cm_id_priv->id.context = cur_cm_id_priv->id.context;
2678 	cm_id_priv->id.service_id = sidr_req_msg->service_id;
2679 	cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
2680 
2681 	cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
2682 	cm_process_work(cm_id_priv, work);
2683 	cm_deref_id(cur_cm_id_priv);
2684 	return 0;
2685 out:
2686 	ib_destroy_cm_id(&cm_id_priv->id);
2687 	return -EINVAL;
2688 }
2689 
2690 static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
2691 			       struct cm_id_private *cm_id_priv,
2692 			       struct ib_cm_sidr_rep_param *param)
2693 {
2694 	cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
2695 			  cm_id_priv->tid);
2696 	sidr_rep_msg->request_id = cm_id_priv->id.remote_id;
2697 	sidr_rep_msg->status = param->status;
2698 	cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num));
2699 	sidr_rep_msg->service_id = cm_id_priv->id.service_id;
2700 	sidr_rep_msg->qkey = cpu_to_be32(param->qkey);
2701 
2702 	if (param->info && param->info_length)
2703 		memcpy(sidr_rep_msg->info, param->info, param->info_length);
2704 
2705 	if (param->private_data && param->private_data_len)
2706 		memcpy(sidr_rep_msg->private_data, param->private_data,
2707 		       param->private_data_len);
2708 }
2709 
2710 int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
2711 			struct ib_cm_sidr_rep_param *param)
2712 {
2713 	struct cm_id_private *cm_id_priv;
2714 	struct ib_mad_send_buf *msg;
2715 	struct ib_send_wr *bad_send_wr;
2716 	unsigned long flags;
2717 	int ret;
2718 
2719 	if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
2720 	    (param->private_data &&
2721 	     param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
2722 		return -EINVAL;
2723 
2724 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2725 	spin_lock_irqsave(&cm_id_priv->lock, flags);
2726 	if (cm_id->state != IB_CM_SIDR_REQ_RCVD) {
2727 		ret = -EINVAL;
2728 		goto error;
2729 	}
2730 
2731 	ret = cm_alloc_msg(cm_id_priv, &msg);
2732 	if (ret)
2733 		goto error;
2734 
2735 	cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
2736 			   param);
2737 	ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
2738 			       &msg->send_wr, &bad_send_wr);
2739 	if (ret) {
2740 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2741 		cm_free_msg(msg);
2742 		return ret;
2743 	}
2744 	cm_id->state = IB_CM_IDLE;
2745 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2746 
2747 	spin_lock_irqsave(&cm.lock, flags);
2748 	rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
2749 	spin_unlock_irqrestore(&cm.lock, flags);
2750 	return 0;
2751 
2752 error:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2753 	return ret;
2754 }
2755 EXPORT_SYMBOL(ib_send_cm_sidr_rep);
2756 
2757 static void cm_format_sidr_rep_event(struct cm_work *work)
2758 {
2759 	struct cm_sidr_rep_msg *sidr_rep_msg;
2760 	struct ib_cm_sidr_rep_event_param *param;
2761 
2762 	sidr_rep_msg = (struct cm_sidr_rep_msg *)
2763 				work->mad_recv_wc->recv_buf.mad;
2764 	param = &work->cm_event.param.sidr_rep_rcvd;
2765 	param->status = sidr_rep_msg->status;
2766 	param->qkey = be32_to_cpu(sidr_rep_msg->qkey);
2767 	param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
2768 	param->info = &sidr_rep_msg->info;
2769 	param->info_len = sidr_rep_msg->info_length;
2770 	work->cm_event.private_data = &sidr_rep_msg->private_data;
2771 }
2772 
2773 static int cm_sidr_rep_handler(struct cm_work *work)
2774 {
2775 	struct cm_sidr_rep_msg *sidr_rep_msg;
2776 	struct cm_id_private *cm_id_priv;
2777 	unsigned long flags;
2778 
2779 	sidr_rep_msg = (struct cm_sidr_rep_msg *)
2780 				work->mad_recv_wc->recv_buf.mad;
2781 	cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0);
2782 	if (!cm_id_priv)
2783 		return -EINVAL; /* Unmatched reply. */
2784 
2785 	spin_lock_irqsave(&cm_id_priv->lock, flags);
2786 	if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
2787 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2788 		goto out;
2789 	}
2790 	cm_id_priv->id.state = IB_CM_IDLE;
2791 	ib_cancel_mad(cm_id_priv->av.port->mad_agent,
2792 		      (unsigned long) cm_id_priv->msg);
2793 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2794 
2795 	cm_format_sidr_rep_event(work);
2796 	cm_process_work(cm_id_priv, work);
2797 	return 0;
2798 out:
2799 	cm_deref_id(cm_id_priv);
2800 	return -EINVAL;
2801 }
2802 
2803 static void cm_process_send_error(struct ib_mad_send_buf *msg,
2804 				  enum ib_wc_status wc_status)
2805 {
2806 	struct cm_id_private *cm_id_priv;
2807 	struct ib_cm_event cm_event;
2808 	enum ib_cm_state state;
2809 	unsigned long flags;
2810 	int ret;
2811 
2812 	memset(&cm_event, 0, sizeof cm_event);
2813 	cm_id_priv = msg->context[0];
2814 
2815 	/* Discard old sends or ones without a response. */
2816 	spin_lock_irqsave(&cm_id_priv->lock, flags);
2817 	state = (enum ib_cm_state) (unsigned long) msg->context[1];
2818 	if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
2819 		goto discard;
2820 
2821 	switch (state) {
2822 	case IB_CM_REQ_SENT:
2823 	case IB_CM_MRA_REQ_RCVD:
2824 		cm_reset_to_idle(cm_id_priv);
2825 		cm_event.event = IB_CM_REQ_ERROR;
2826 		break;
2827 	case IB_CM_REP_SENT:
2828 	case IB_CM_MRA_REP_RCVD:
2829 		cm_reset_to_idle(cm_id_priv);
2830 		cm_event.event = IB_CM_REP_ERROR;
2831 		break;
2832 	case IB_CM_DREQ_SENT:
2833 		cm_enter_timewait(cm_id_priv);
2834 		cm_event.event = IB_CM_DREQ_ERROR;
2835 		break;
2836 	case IB_CM_SIDR_REQ_SENT:
2837 		cm_id_priv->id.state = IB_CM_IDLE;
2838 		cm_event.event = IB_CM_SIDR_REQ_ERROR;
2839 		break;
2840 	default:
2841 		goto discard;
2842 	}
2843 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2844 	cm_event.param.send_status = wc_status;
2845 
2846 	/* No other events can occur on the cm_id at this point. */
2847 	ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
2848 	cm_free_msg(msg);
2849 	if (ret)
2850 		ib_destroy_cm_id(&cm_id_priv->id);
2851 	return;
2852 discard:
2853 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2854 	cm_free_msg(msg);
2855 }
2856 
2857 static void cm_send_handler(struct ib_mad_agent *mad_agent,
2858 			    struct ib_mad_send_wc *mad_send_wc)
2859 {
2860 	struct ib_mad_send_buf *msg;
2861 
2862 	msg = (struct ib_mad_send_buf *)(unsigned long)mad_send_wc->wr_id;
2863 
2864 	switch (mad_send_wc->status) {
2865 	case IB_WC_SUCCESS:
2866 	case IB_WC_WR_FLUSH_ERR:
2867 		cm_free_msg(msg);
2868 		break;
2869 	default:
2870 		if (msg->context[0] && msg->context[1])
2871 			cm_process_send_error(msg, mad_send_wc->status);
2872 		else
2873 			cm_free_msg(msg);
2874 		break;
2875 	}
2876 }
2877 
2878 static void cm_work_handler(void *data)
2879 {
2880 	struct cm_work *work = data;
2881 	int ret;
2882 
2883 	switch (work->cm_event.event) {
2884 	case IB_CM_REQ_RECEIVED:
2885 		ret = cm_req_handler(work);
2886 		break;
2887 	case IB_CM_MRA_RECEIVED:
2888 		ret = cm_mra_handler(work);
2889 		break;
2890 	case IB_CM_REJ_RECEIVED:
2891 		ret = cm_rej_handler(work);
2892 		break;
2893 	case IB_CM_REP_RECEIVED:
2894 		ret = cm_rep_handler(work);
2895 		break;
2896 	case IB_CM_RTU_RECEIVED:
2897 		ret = cm_rtu_handler(work);
2898 		break;
2899 	case IB_CM_USER_ESTABLISHED:
2900 		ret = cm_establish_handler(work);
2901 		break;
2902 	case IB_CM_DREQ_RECEIVED:
2903 		ret = cm_dreq_handler(work);
2904 		break;
2905 	case IB_CM_DREP_RECEIVED:
2906 		ret = cm_drep_handler(work);
2907 		break;
2908 	case IB_CM_SIDR_REQ_RECEIVED:
2909 		ret = cm_sidr_req_handler(work);
2910 		break;
2911 	case IB_CM_SIDR_REP_RECEIVED:
2912 		ret = cm_sidr_rep_handler(work);
2913 		break;
2914 	case IB_CM_LAP_RECEIVED:
2915 		ret = cm_lap_handler(work);
2916 		break;
2917 	case IB_CM_APR_RECEIVED:
2918 		ret = cm_apr_handler(work);
2919 		break;
2920 	case IB_CM_TIMEWAIT_EXIT:
2921 		ret = cm_timewait_handler(work);
2922 		break;
2923 	default:
2924 		ret = -EINVAL;
2925 		break;
2926 	}
2927 	if (ret)
2928 		cm_free_work(work);
2929 }
2930 
2931 int ib_cm_establish(struct ib_cm_id *cm_id)
2932 {
2933 	struct cm_id_private *cm_id_priv;
2934 	struct cm_work *work;
2935 	unsigned long flags;
2936 	int ret = 0;
2937 
2938 	work = kmalloc(sizeof *work, GFP_ATOMIC);
2939 	if (!work)
2940 		return -ENOMEM;
2941 
2942 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2943 	spin_lock_irqsave(&cm_id_priv->lock, flags);
2944 	switch (cm_id->state)
2945 	{
2946 	case IB_CM_REP_SENT:
2947 	case IB_CM_MRA_REP_RCVD:
2948 		cm_id->state = IB_CM_ESTABLISHED;
2949 		break;
2950 	case IB_CM_ESTABLISHED:
2951 		ret = -EISCONN;
2952 		break;
2953 	default:
2954 		ret = -EINVAL;
2955 		break;
2956 	}
2957 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2958 
2959 	if (ret) {
2960 		kfree(work);
2961 		goto out;
2962 	}
2963 
2964 	/*
2965 	 * The CM worker thread may try to destroy the cm_id before it
2966 	 * can execute this work item.  To prevent potential deadlock,
2967 	 * we need to find the cm_id once we're in the context of the
2968 	 * worker thread, rather than holding a reference on it.
2969 	 */
2970 	INIT_WORK(&work->work, cm_work_handler, work);
2971 	work->local_id = cm_id->local_id;
2972 	work->remote_id = cm_id->remote_id;
2973 	work->mad_recv_wc = NULL;
2974 	work->cm_event.event = IB_CM_USER_ESTABLISHED;
2975 	queue_work(cm.wq, &work->work);
2976 out:
2977 	return ret;
2978 }
2979 EXPORT_SYMBOL(ib_cm_establish);
2980 
2981 static void cm_recv_handler(struct ib_mad_agent *mad_agent,
2982 			    struct ib_mad_recv_wc *mad_recv_wc)
2983 {
2984 	struct cm_work *work;
2985 	enum ib_cm_event_type event;
2986 	int paths = 0;
2987 
2988 	switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
2989 	case CM_REQ_ATTR_ID:
2990 		paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)->
2991 						    alt_local_lid != 0);
2992 		event = IB_CM_REQ_RECEIVED;
2993 		break;
2994 	case CM_MRA_ATTR_ID:
2995 		event = IB_CM_MRA_RECEIVED;
2996 		break;
2997 	case CM_REJ_ATTR_ID:
2998 		event = IB_CM_REJ_RECEIVED;
2999 		break;
3000 	case CM_REP_ATTR_ID:
3001 		event = IB_CM_REP_RECEIVED;
3002 		break;
3003 	case CM_RTU_ATTR_ID:
3004 		event = IB_CM_RTU_RECEIVED;
3005 		break;
3006 	case CM_DREQ_ATTR_ID:
3007 		event = IB_CM_DREQ_RECEIVED;
3008 		break;
3009 	case CM_DREP_ATTR_ID:
3010 		event = IB_CM_DREP_RECEIVED;
3011 		break;
3012 	case CM_SIDR_REQ_ATTR_ID:
3013 		event = IB_CM_SIDR_REQ_RECEIVED;
3014 		break;
3015 	case CM_SIDR_REP_ATTR_ID:
3016 		event = IB_CM_SIDR_REP_RECEIVED;
3017 		break;
3018 	case CM_LAP_ATTR_ID:
3019 		paths = 1;
3020 		event = IB_CM_LAP_RECEIVED;
3021 		break;
3022 	case CM_APR_ATTR_ID:
3023 		event = IB_CM_APR_RECEIVED;
3024 		break;
3025 	default:
3026 		ib_free_recv_mad(mad_recv_wc);
3027 		return;
3028 	}
3029 
3030 	work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
3031 		       GFP_KERNEL);
3032 	if (!work) {
3033 		ib_free_recv_mad(mad_recv_wc);
3034 		return;
3035 	}
3036 
3037 	INIT_WORK(&work->work, cm_work_handler, work);
3038 	work->cm_event.event = event;
3039 	work->mad_recv_wc = mad_recv_wc;
3040 	work->port = (struct cm_port *)mad_agent->context;
3041 	queue_work(cm.wq, &work->work);
3042 }
3043 
3044 static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
3045 				struct ib_qp_attr *qp_attr,
3046 				int *qp_attr_mask)
3047 {
3048 	unsigned long flags;
3049 	int ret;
3050 
3051 	spin_lock_irqsave(&cm_id_priv->lock, flags);
3052 	switch (cm_id_priv->id.state) {
3053 	case IB_CM_REQ_SENT:
3054 	case IB_CM_MRA_REQ_RCVD:
3055 	case IB_CM_REQ_RCVD:
3056 	case IB_CM_MRA_REQ_SENT:
3057 	case IB_CM_REP_RCVD:
3058 	case IB_CM_MRA_REP_SENT:
3059 	case IB_CM_REP_SENT:
3060 	case IB_CM_MRA_REP_RCVD:
3061 	case IB_CM_ESTABLISHED:
3062 		*qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
3063 				IB_QP_PKEY_INDEX | IB_QP_PORT;
3064 		qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE;
3065 		if (cm_id_priv->responder_resources)
3066 			qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_WRITE |
3067 						    IB_ACCESS_REMOTE_READ;
3068 		qp_attr->pkey_index = cm_id_priv->av.pkey_index;
3069 		qp_attr->port_num = cm_id_priv->av.port->port_num;
3070 		ret = 0;
3071 		break;
3072 	default:
3073 		ret = -EINVAL;
3074 		break;
3075 	}
3076 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3077 	return ret;
3078 }
3079 
3080 static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
3081 			       struct ib_qp_attr *qp_attr,
3082 			       int *qp_attr_mask)
3083 {
3084 	unsigned long flags;
3085 	int ret;
3086 
3087 	spin_lock_irqsave(&cm_id_priv->lock, flags);
3088 	switch (cm_id_priv->id.state) {
3089 	case IB_CM_REQ_RCVD:
3090 	case IB_CM_MRA_REQ_SENT:
3091 	case IB_CM_REP_RCVD:
3092 	case IB_CM_MRA_REP_SENT:
3093 	case IB_CM_REP_SENT:
3094 	case IB_CM_MRA_REP_RCVD:
3095 	case IB_CM_ESTABLISHED:
3096 		*qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
3097 				IB_QP_DEST_QPN | IB_QP_RQ_PSN |
3098 				IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER;
3099 		qp_attr->ah_attr = cm_id_priv->av.ah_attr;
3100 		qp_attr->path_mtu = cm_id_priv->path_mtu;
3101 		qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
3102 		qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
3103 		qp_attr->max_dest_rd_atomic = cm_id_priv->responder_resources;
3104 		qp_attr->min_rnr_timer = 0;
3105 		if (cm_id_priv->alt_av.ah_attr.dlid) {
3106 			*qp_attr_mask |= IB_QP_ALT_PATH;
3107 			qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
3108 		}
3109 		ret = 0;
3110 		break;
3111 	default:
3112 		ret = -EINVAL;
3113 		break;
3114 	}
3115 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3116 	return ret;
3117 }
3118 
3119 static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
3120 			       struct ib_qp_attr *qp_attr,
3121 			       int *qp_attr_mask)
3122 {
3123 	unsigned long flags;
3124 	int ret;
3125 
3126 	spin_lock_irqsave(&cm_id_priv->lock, flags);
3127 	switch (cm_id_priv->id.state) {
3128 	case IB_CM_REP_RCVD:
3129 	case IB_CM_MRA_REP_SENT:
3130 	case IB_CM_REP_SENT:
3131 	case IB_CM_MRA_REP_RCVD:
3132 	case IB_CM_ESTABLISHED:
3133 		*qp_attr_mask = IB_QP_STATE | IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
3134 				IB_QP_RNR_RETRY | IB_QP_SQ_PSN |
3135 				IB_QP_MAX_QP_RD_ATOMIC;
3136 		qp_attr->timeout = cm_id_priv->local_ack_timeout;
3137 		qp_attr->retry_cnt = cm_id_priv->retry_count;
3138 		qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
3139 		qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
3140 		qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
3141 		if (cm_id_priv->alt_av.ah_attr.dlid) {
3142 			*qp_attr_mask |= IB_QP_PATH_MIG_STATE;
3143 			qp_attr->path_mig_state = IB_MIG_REARM;
3144 		}
3145 		ret = 0;
3146 		break;
3147 	default:
3148 		ret = -EINVAL;
3149 		break;
3150 	}
3151 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3152 	return ret;
3153 }
3154 
3155 int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
3156 		       struct ib_qp_attr *qp_attr,
3157 		       int *qp_attr_mask)
3158 {
3159 	struct cm_id_private *cm_id_priv;
3160 	int ret;
3161 
3162 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3163 	switch (qp_attr->qp_state) {
3164 	case IB_QPS_INIT:
3165 		ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
3166 		break;
3167 	case IB_QPS_RTR:
3168 		ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
3169 		break;
3170 	case IB_QPS_RTS:
3171 		ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
3172 		break;
3173 	default:
3174 		ret = -EINVAL;
3175 		break;
3176 	}
3177 	return ret;
3178 }
3179 EXPORT_SYMBOL(ib_cm_init_qp_attr);
3180 
3181 static __be64 cm_get_ca_guid(struct ib_device *device)
3182 {
3183 	struct ib_device_attr *device_attr;
3184 	__be64 guid;
3185 	int ret;
3186 
3187 	device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL);
3188 	if (!device_attr)
3189 		return 0;
3190 
3191 	ret = ib_query_device(device, device_attr);
3192 	guid = ret ? 0 : device_attr->node_guid;
3193 	kfree(device_attr);
3194 	return guid;
3195 }
3196 
3197 static void cm_add_one(struct ib_device *device)
3198 {
3199 	struct cm_device *cm_dev;
3200 	struct cm_port *port;
3201 	struct ib_mad_reg_req reg_req = {
3202 		.mgmt_class = IB_MGMT_CLASS_CM,
3203 		.mgmt_class_version = IB_CM_CLASS_VERSION
3204 	};
3205 	struct ib_port_modify port_modify = {
3206 		.set_port_cap_mask = IB_PORT_CM_SUP
3207 	};
3208 	unsigned long flags;
3209 	int ret;
3210 	u8 i;
3211 
3212 	cm_dev = kmalloc(sizeof(*cm_dev) + sizeof(*port) *
3213 			 device->phys_port_cnt, GFP_KERNEL);
3214 	if (!cm_dev)
3215 		return;
3216 
3217 	cm_dev->device = device;
3218 	cm_dev->ca_guid = cm_get_ca_guid(device);
3219 	if (!cm_dev->ca_guid)
3220 		goto error1;
3221 
3222 	set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
3223 	for (i = 1; i <= device->phys_port_cnt; i++) {
3224 		port = &cm_dev->port[i-1];
3225 		port->cm_dev = cm_dev;
3226 		port->port_num = i;
3227 		port->mad_agent = ib_register_mad_agent(device, i,
3228 							IB_QPT_GSI,
3229 							&reg_req,
3230 							0,
3231 							cm_send_handler,
3232 							cm_recv_handler,
3233 							port);
3234 		if (IS_ERR(port->mad_agent))
3235 			goto error2;
3236 
3237 		ret = ib_modify_port(device, i, 0, &port_modify);
3238 		if (ret)
3239 			goto error3;
3240 	}
3241 	ib_set_client_data(device, &cm_client, cm_dev);
3242 
3243 	write_lock_irqsave(&cm.device_lock, flags);
3244 	list_add_tail(&cm_dev->list, &cm.device_list);
3245 	write_unlock_irqrestore(&cm.device_lock, flags);
3246 	return;
3247 
3248 error3:
3249 	ib_unregister_mad_agent(port->mad_agent);
3250 error2:
3251 	port_modify.set_port_cap_mask = 0;
3252 	port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
3253 	while (--i) {
3254 		port = &cm_dev->port[i-1];
3255 		ib_modify_port(device, port->port_num, 0, &port_modify);
3256 		ib_unregister_mad_agent(port->mad_agent);
3257 	}
3258 error1:
3259 	kfree(cm_dev);
3260 }
3261 
3262 static void cm_remove_one(struct ib_device *device)
3263 {
3264 	struct cm_device *cm_dev;
3265 	struct cm_port *port;
3266 	struct ib_port_modify port_modify = {
3267 		.clr_port_cap_mask = IB_PORT_CM_SUP
3268 	};
3269 	unsigned long flags;
3270 	int i;
3271 
3272 	cm_dev = ib_get_client_data(device, &cm_client);
3273 	if (!cm_dev)
3274 		return;
3275 
3276 	write_lock_irqsave(&cm.device_lock, flags);
3277 	list_del(&cm_dev->list);
3278 	write_unlock_irqrestore(&cm.device_lock, flags);
3279 
3280 	for (i = 1; i <= device->phys_port_cnt; i++) {
3281 		port = &cm_dev->port[i-1];
3282 		ib_modify_port(device, port->port_num, 0, &port_modify);
3283 		ib_unregister_mad_agent(port->mad_agent);
3284 	}
3285 	kfree(cm_dev);
3286 }
3287 
3288 static int __init ib_cm_init(void)
3289 {
3290 	int ret;
3291 
3292 	memset(&cm, 0, sizeof cm);
3293 	INIT_LIST_HEAD(&cm.device_list);
3294 	rwlock_init(&cm.device_lock);
3295 	spin_lock_init(&cm.lock);
3296 	cm.listen_service_table = RB_ROOT;
3297 	cm.listen_service_id = __constant_be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
3298 	cm.remote_id_table = RB_ROOT;
3299 	cm.remote_qp_table = RB_ROOT;
3300 	cm.remote_sidr_table = RB_ROOT;
3301 	idr_init(&cm.local_id_table);
3302 	idr_pre_get(&cm.local_id_table, GFP_KERNEL);
3303 
3304 	cm.wq = create_workqueue("ib_cm");
3305 	if (!cm.wq)
3306 		return -ENOMEM;
3307 
3308 	ret = ib_register_client(&cm_client);
3309 	if (ret)
3310 		goto error;
3311 
3312 	return 0;
3313 error:
3314 	destroy_workqueue(cm.wq);
3315 	return ret;
3316 }
3317 
3318 static void __exit ib_cm_cleanup(void)
3319 {
3320 	flush_workqueue(cm.wq);
3321 	destroy_workqueue(cm.wq);
3322 	ib_unregister_client(&cm_client);
3323 }
3324 
3325 module_init(ib_cm_init);
3326 module_exit(ib_cm_cleanup);
3327 
3328