xref: /linux/drivers/infiniband/core/cm.c (revision b454cc6636d254fbf6049b73e9560aee76fb04a3)
1 /*
2  * Copyright (c) 2004-2006 Intel Corporation.  All rights reserved.
3  * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
4  * Copyright (c) 2004, 2005 Voltaire Corporation.  All rights reserved.
5  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  *
35  * $Id: cm.c 4311 2005-12-05 18:42:01Z sean.hefty $
36  */
37 
38 #include <linux/completion.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/err.h>
41 #include <linux/idr.h>
42 #include <linux/interrupt.h>
43 #include <linux/pci.h>
44 #include <linux/random.h>
45 #include <linux/rbtree.h>
46 #include <linux/spinlock.h>
47 #include <linux/workqueue.h>
48 
49 #include <rdma/ib_cache.h>
50 #include <rdma/ib_cm.h>
51 #include "cm_msgs.h"
52 
53 MODULE_AUTHOR("Sean Hefty");
54 MODULE_DESCRIPTION("InfiniBand CM");
55 MODULE_LICENSE("Dual BSD/GPL");
56 
57 static void cm_add_one(struct ib_device *device);
58 static void cm_remove_one(struct ib_device *device);
59 
60 static struct ib_client cm_client = {
61 	.name   = "cm",
62 	.add    = cm_add_one,
63 	.remove = cm_remove_one
64 };
65 
66 static struct ib_cm {
67 	spinlock_t lock;
68 	struct list_head device_list;
69 	rwlock_t device_lock;
70 	struct rb_root listen_service_table;
71 	u64 listen_service_id;
72 	/* struct rb_root peer_service_table; todo: fix peer to peer */
73 	struct rb_root remote_qp_table;
74 	struct rb_root remote_id_table;
75 	struct rb_root remote_sidr_table;
76 	struct idr local_id_table;
77 	__be32 random_id_operand;
78 	struct list_head timewait_list;
79 	struct workqueue_struct *wq;
80 } cm;
81 
82 struct cm_port {
83 	struct cm_device *cm_dev;
84 	struct ib_mad_agent *mad_agent;
85 	u8 port_num;
86 };
87 
88 struct cm_device {
89 	struct list_head list;
90 	struct ib_device *device;
91 	__be64 ca_guid;
92 	struct cm_port port[0];
93 };
94 
95 struct cm_av {
96 	struct cm_port *port;
97 	union ib_gid dgid;
98 	struct ib_ah_attr ah_attr;
99 	u16 pkey_index;
100 	u8 packet_life_time;
101 };
102 
103 struct cm_work {
104 	struct delayed_work work;
105 	struct list_head list;
106 	struct cm_port *port;
107 	struct ib_mad_recv_wc *mad_recv_wc;	/* Received MADs */
108 	__be32 local_id;			/* Established / timewait */
109 	__be32 remote_id;
110 	struct ib_cm_event cm_event;
111 	struct ib_sa_path_rec path[0];
112 };
113 
114 struct cm_timewait_info {
115 	struct cm_work work;			/* Must be first. */
116 	struct list_head list;
117 	struct rb_node remote_qp_node;
118 	struct rb_node remote_id_node;
119 	__be64 remote_ca_guid;
120 	__be32 remote_qpn;
121 	u8 inserted_remote_qp;
122 	u8 inserted_remote_id;
123 };
124 
125 struct cm_id_private {
126 	struct ib_cm_id	id;
127 
128 	struct rb_node service_node;
129 	struct rb_node sidr_id_node;
130 	spinlock_t lock;	/* Do not acquire inside cm.lock */
131 	struct completion comp;
132 	atomic_t refcount;
133 
134 	struct ib_mad_send_buf *msg;
135 	struct cm_timewait_info *timewait_info;
136 	/* todo: use alternate port on send failure */
137 	struct cm_av av;
138 	struct cm_av alt_av;
139 	struct ib_cm_compare_data *compare_data;
140 
141 	void *private_data;
142 	__be64 tid;
143 	__be32 local_qpn;
144 	__be32 remote_qpn;
145 	enum ib_qp_type qp_type;
146 	__be32 sq_psn;
147 	__be32 rq_psn;
148 	int timeout_ms;
149 	enum ib_mtu path_mtu;
150 	__be16 pkey;
151 	u8 private_data_len;
152 	u8 max_cm_retries;
153 	u8 peer_to_peer;
154 	u8 responder_resources;
155 	u8 initiator_depth;
156 	u8 retry_count;
157 	u8 rnr_retry_count;
158 	u8 service_timeout;
159 
160 	struct list_head work_list;
161 	atomic_t work_count;
162 };
163 
164 static void cm_work_handler(struct work_struct *work);
165 
166 static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
167 {
168 	if (atomic_dec_and_test(&cm_id_priv->refcount))
169 		complete(&cm_id_priv->comp);
170 }
171 
172 static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
173 			struct ib_mad_send_buf **msg)
174 {
175 	struct ib_mad_agent *mad_agent;
176 	struct ib_mad_send_buf *m;
177 	struct ib_ah *ah;
178 
179 	mad_agent = cm_id_priv->av.port->mad_agent;
180 	ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr);
181 	if (IS_ERR(ah))
182 		return PTR_ERR(ah);
183 
184 	m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
185 			       cm_id_priv->av.pkey_index,
186 			       0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
187 			       GFP_ATOMIC);
188 	if (IS_ERR(m)) {
189 		ib_destroy_ah(ah);
190 		return PTR_ERR(m);
191 	}
192 
193 	/* Timeout set by caller if response is expected. */
194 	m->ah = ah;
195 	m->retries = cm_id_priv->max_cm_retries;
196 
197 	atomic_inc(&cm_id_priv->refcount);
198 	m->context[0] = cm_id_priv;
199 	*msg = m;
200 	return 0;
201 }
202 
203 static int cm_alloc_response_msg(struct cm_port *port,
204 				 struct ib_mad_recv_wc *mad_recv_wc,
205 				 struct ib_mad_send_buf **msg)
206 {
207 	struct ib_mad_send_buf *m;
208 	struct ib_ah *ah;
209 
210 	ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
211 				  mad_recv_wc->recv_buf.grh, port->port_num);
212 	if (IS_ERR(ah))
213 		return PTR_ERR(ah);
214 
215 	m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
216 			       0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
217 			       GFP_ATOMIC);
218 	if (IS_ERR(m)) {
219 		ib_destroy_ah(ah);
220 		return PTR_ERR(m);
221 	}
222 	m->ah = ah;
223 	*msg = m;
224 	return 0;
225 }
226 
227 static void cm_free_msg(struct ib_mad_send_buf *msg)
228 {
229 	ib_destroy_ah(msg->ah);
230 	if (msg->context[0])
231 		cm_deref_id(msg->context[0]);
232 	ib_free_send_mad(msg);
233 }
234 
235 static void * cm_copy_private_data(const void *private_data,
236 				   u8 private_data_len)
237 {
238 	void *data;
239 
240 	if (!private_data || !private_data_len)
241 		return NULL;
242 
243 	data = kmemdup(private_data, private_data_len, GFP_KERNEL);
244 	if (!data)
245 		return ERR_PTR(-ENOMEM);
246 
247 	return data;
248 }
249 
250 static void cm_set_private_data(struct cm_id_private *cm_id_priv,
251 				 void *private_data, u8 private_data_len)
252 {
253 	if (cm_id_priv->private_data && cm_id_priv->private_data_len)
254 		kfree(cm_id_priv->private_data);
255 
256 	cm_id_priv->private_data = private_data;
257 	cm_id_priv->private_data_len = private_data_len;
258 }
259 
260 static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
261 				    struct ib_grh *grh, struct cm_av *av)
262 {
263 	av->port = port;
264 	av->pkey_index = wc->pkey_index;
265 	ib_init_ah_from_wc(port->cm_dev->device, port->port_num, wc,
266 			   grh, &av->ah_attr);
267 }
268 
269 static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
270 {
271 	struct cm_device *cm_dev;
272 	struct cm_port *port = NULL;
273 	unsigned long flags;
274 	int ret;
275 	u8 p;
276 
277 	read_lock_irqsave(&cm.device_lock, flags);
278 	list_for_each_entry(cm_dev, &cm.device_list, list) {
279 		if (!ib_find_cached_gid(cm_dev->device, &path->sgid,
280 					&p, NULL)) {
281 			port = &cm_dev->port[p-1];
282 			break;
283 		}
284 	}
285 	read_unlock_irqrestore(&cm.device_lock, flags);
286 
287 	if (!port)
288 		return -EINVAL;
289 
290 	ret = ib_find_cached_pkey(cm_dev->device, port->port_num,
291 				  be16_to_cpu(path->pkey), &av->pkey_index);
292 	if (ret)
293 		return ret;
294 
295 	av->port = port;
296 	ib_init_ah_from_path(cm_dev->device, port->port_num, path,
297 			     &av->ah_attr);
298 	av->packet_life_time = path->packet_life_time;
299 	return 0;
300 }
301 
302 static int cm_alloc_id(struct cm_id_private *cm_id_priv)
303 {
304 	unsigned long flags;
305 	int ret, id;
306 	static int next_id;
307 
308 	do {
309 		spin_lock_irqsave(&cm.lock, flags);
310 		ret = idr_get_new_above(&cm.local_id_table, cm_id_priv,
311 					next_id++, &id);
312 		spin_unlock_irqrestore(&cm.lock, flags);
313 	} while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) );
314 
315 	cm_id_priv->id.local_id = (__force __be32) (id ^ cm.random_id_operand);
316 	return ret;
317 }
318 
319 static void cm_free_id(__be32 local_id)
320 {
321 	unsigned long flags;
322 
323 	spin_lock_irqsave(&cm.lock, flags);
324 	idr_remove(&cm.local_id_table,
325 		   (__force int) (local_id ^ cm.random_id_operand));
326 	spin_unlock_irqrestore(&cm.lock, flags);
327 }
328 
329 static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
330 {
331 	struct cm_id_private *cm_id_priv;
332 
333 	cm_id_priv = idr_find(&cm.local_id_table,
334 			      (__force int) (local_id ^ cm.random_id_operand));
335 	if (cm_id_priv) {
336 		if (cm_id_priv->id.remote_id == remote_id)
337 			atomic_inc(&cm_id_priv->refcount);
338 		else
339 			cm_id_priv = NULL;
340 	}
341 
342 	return cm_id_priv;
343 }
344 
345 static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
346 {
347 	struct cm_id_private *cm_id_priv;
348 	unsigned long flags;
349 
350 	spin_lock_irqsave(&cm.lock, flags);
351 	cm_id_priv = cm_get_id(local_id, remote_id);
352 	spin_unlock_irqrestore(&cm.lock, flags);
353 
354 	return cm_id_priv;
355 }
356 
357 static void cm_mask_copy(u8 *dst, u8 *src, u8 *mask)
358 {
359 	int i;
360 
361 	for (i = 0; i < IB_CM_COMPARE_SIZE / sizeof(unsigned long); i++)
362 		((unsigned long *) dst)[i] = ((unsigned long *) src)[i] &
363 					     ((unsigned long *) mask)[i];
364 }
365 
366 static int cm_compare_data(struct ib_cm_compare_data *src_data,
367 			   struct ib_cm_compare_data *dst_data)
368 {
369 	u8 src[IB_CM_COMPARE_SIZE];
370 	u8 dst[IB_CM_COMPARE_SIZE];
371 
372 	if (!src_data || !dst_data)
373 		return 0;
374 
375 	cm_mask_copy(src, src_data->data, dst_data->mask);
376 	cm_mask_copy(dst, dst_data->data, src_data->mask);
377 	return memcmp(src, dst, IB_CM_COMPARE_SIZE);
378 }
379 
380 static int cm_compare_private_data(u8 *private_data,
381 				   struct ib_cm_compare_data *dst_data)
382 {
383 	u8 src[IB_CM_COMPARE_SIZE];
384 
385 	if (!dst_data)
386 		return 0;
387 
388 	cm_mask_copy(src, private_data, dst_data->mask);
389 	return memcmp(src, dst_data->data, IB_CM_COMPARE_SIZE);
390 }
391 
392 static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
393 {
394 	struct rb_node **link = &cm.listen_service_table.rb_node;
395 	struct rb_node *parent = NULL;
396 	struct cm_id_private *cur_cm_id_priv;
397 	__be64 service_id = cm_id_priv->id.service_id;
398 	__be64 service_mask = cm_id_priv->id.service_mask;
399 	int data_cmp;
400 
401 	while (*link) {
402 		parent = *link;
403 		cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
404 					  service_node);
405 		data_cmp = cm_compare_data(cm_id_priv->compare_data,
406 					   cur_cm_id_priv->compare_data);
407 		if ((cur_cm_id_priv->id.service_mask & service_id) ==
408 		    (service_mask & cur_cm_id_priv->id.service_id) &&
409 		    (cm_id_priv->id.device == cur_cm_id_priv->id.device) &&
410 		    !data_cmp)
411 			return cur_cm_id_priv;
412 
413 		if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
414 			link = &(*link)->rb_left;
415 		else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
416 			link = &(*link)->rb_right;
417 		else if (service_id < cur_cm_id_priv->id.service_id)
418 			link = &(*link)->rb_left;
419 		else if (service_id > cur_cm_id_priv->id.service_id)
420 			link = &(*link)->rb_right;
421 		else if (data_cmp < 0)
422 			link = &(*link)->rb_left;
423 		else
424 			link = &(*link)->rb_right;
425 	}
426 	rb_link_node(&cm_id_priv->service_node, parent, link);
427 	rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
428 	return NULL;
429 }
430 
431 static struct cm_id_private * cm_find_listen(struct ib_device *device,
432 					     __be64 service_id,
433 					     u8 *private_data)
434 {
435 	struct rb_node *node = cm.listen_service_table.rb_node;
436 	struct cm_id_private *cm_id_priv;
437 	int data_cmp;
438 
439 	while (node) {
440 		cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
441 		data_cmp = cm_compare_private_data(private_data,
442 						   cm_id_priv->compare_data);
443 		if ((cm_id_priv->id.service_mask & service_id) ==
444 		     cm_id_priv->id.service_id &&
445 		    (cm_id_priv->id.device == device) && !data_cmp)
446 			return cm_id_priv;
447 
448 		if (device < cm_id_priv->id.device)
449 			node = node->rb_left;
450 		else if (device > cm_id_priv->id.device)
451 			node = node->rb_right;
452 		else if (service_id < cm_id_priv->id.service_id)
453 			node = node->rb_left;
454 		else if (service_id > cm_id_priv->id.service_id)
455 			node = node->rb_right;
456 		else if (data_cmp < 0)
457 			node = node->rb_left;
458 		else
459 			node = node->rb_right;
460 	}
461 	return NULL;
462 }
463 
464 static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
465 						     *timewait_info)
466 {
467 	struct rb_node **link = &cm.remote_id_table.rb_node;
468 	struct rb_node *parent = NULL;
469 	struct cm_timewait_info *cur_timewait_info;
470 	__be64 remote_ca_guid = timewait_info->remote_ca_guid;
471 	__be32 remote_id = timewait_info->work.remote_id;
472 
473 	while (*link) {
474 		parent = *link;
475 		cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
476 					     remote_id_node);
477 		if (remote_id < cur_timewait_info->work.remote_id)
478 			link = &(*link)->rb_left;
479 		else if (remote_id > cur_timewait_info->work.remote_id)
480 			link = &(*link)->rb_right;
481 		else if (remote_ca_guid < cur_timewait_info->remote_ca_guid)
482 			link = &(*link)->rb_left;
483 		else if (remote_ca_guid > cur_timewait_info->remote_ca_guid)
484 			link = &(*link)->rb_right;
485 		else
486 			return cur_timewait_info;
487 	}
488 	timewait_info->inserted_remote_id = 1;
489 	rb_link_node(&timewait_info->remote_id_node, parent, link);
490 	rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
491 	return NULL;
492 }
493 
494 static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
495 						   __be32 remote_id)
496 {
497 	struct rb_node *node = cm.remote_id_table.rb_node;
498 	struct cm_timewait_info *timewait_info;
499 
500 	while (node) {
501 		timewait_info = rb_entry(node, struct cm_timewait_info,
502 					 remote_id_node);
503 		if (remote_id < timewait_info->work.remote_id)
504 			node = node->rb_left;
505 		else if (remote_id > timewait_info->work.remote_id)
506 			node = node->rb_right;
507 		else if (remote_ca_guid < timewait_info->remote_ca_guid)
508 			node = node->rb_left;
509 		else if (remote_ca_guid > timewait_info->remote_ca_guid)
510 			node = node->rb_right;
511 		else
512 			return timewait_info;
513 	}
514 	return NULL;
515 }
516 
517 static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
518 						      *timewait_info)
519 {
520 	struct rb_node **link = &cm.remote_qp_table.rb_node;
521 	struct rb_node *parent = NULL;
522 	struct cm_timewait_info *cur_timewait_info;
523 	__be64 remote_ca_guid = timewait_info->remote_ca_guid;
524 	__be32 remote_qpn = timewait_info->remote_qpn;
525 
526 	while (*link) {
527 		parent = *link;
528 		cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
529 					     remote_qp_node);
530 		if (remote_qpn < cur_timewait_info->remote_qpn)
531 			link = &(*link)->rb_left;
532 		else if (remote_qpn > cur_timewait_info->remote_qpn)
533 			link = &(*link)->rb_right;
534 		else if (remote_ca_guid < cur_timewait_info->remote_ca_guid)
535 			link = &(*link)->rb_left;
536 		else if (remote_ca_guid > cur_timewait_info->remote_ca_guid)
537 			link = &(*link)->rb_right;
538 		else
539 			return cur_timewait_info;
540 	}
541 	timewait_info->inserted_remote_qp = 1;
542 	rb_link_node(&timewait_info->remote_qp_node, parent, link);
543 	rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
544 	return NULL;
545 }
546 
547 static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
548 						    *cm_id_priv)
549 {
550 	struct rb_node **link = &cm.remote_sidr_table.rb_node;
551 	struct rb_node *parent = NULL;
552 	struct cm_id_private *cur_cm_id_priv;
553 	union ib_gid *port_gid = &cm_id_priv->av.dgid;
554 	__be32 remote_id = cm_id_priv->id.remote_id;
555 
556 	while (*link) {
557 		parent = *link;
558 		cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
559 					  sidr_id_node);
560 		if (remote_id < cur_cm_id_priv->id.remote_id)
561 			link = &(*link)->rb_left;
562 		else if (remote_id > cur_cm_id_priv->id.remote_id)
563 			link = &(*link)->rb_right;
564 		else {
565 			int cmp;
566 			cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
567 				     sizeof *port_gid);
568 			if (cmp < 0)
569 				link = &(*link)->rb_left;
570 			else if (cmp > 0)
571 				link = &(*link)->rb_right;
572 			else
573 				return cur_cm_id_priv;
574 		}
575 	}
576 	rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
577 	rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
578 	return NULL;
579 }
580 
581 static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv,
582 			       enum ib_cm_sidr_status status)
583 {
584 	struct ib_cm_sidr_rep_param param;
585 
586 	memset(&param, 0, sizeof param);
587 	param.status = status;
588 	ib_send_cm_sidr_rep(&cm_id_priv->id, &param);
589 }
590 
591 struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
592 				 ib_cm_handler cm_handler,
593 				 void *context)
594 {
595 	struct cm_id_private *cm_id_priv;
596 	int ret;
597 
598 	cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
599 	if (!cm_id_priv)
600 		return ERR_PTR(-ENOMEM);
601 
602 	cm_id_priv->id.state = IB_CM_IDLE;
603 	cm_id_priv->id.device = device;
604 	cm_id_priv->id.cm_handler = cm_handler;
605 	cm_id_priv->id.context = context;
606 	cm_id_priv->id.remote_cm_qpn = 1;
607 	ret = cm_alloc_id(cm_id_priv);
608 	if (ret)
609 		goto error;
610 
611 	spin_lock_init(&cm_id_priv->lock);
612 	init_completion(&cm_id_priv->comp);
613 	INIT_LIST_HEAD(&cm_id_priv->work_list);
614 	atomic_set(&cm_id_priv->work_count, -1);
615 	atomic_set(&cm_id_priv->refcount, 1);
616 	return &cm_id_priv->id;
617 
618 error:
619 	kfree(cm_id_priv);
620 	return ERR_PTR(-ENOMEM);
621 }
622 EXPORT_SYMBOL(ib_create_cm_id);
623 
624 static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
625 {
626 	struct cm_work *work;
627 
628 	if (list_empty(&cm_id_priv->work_list))
629 		return NULL;
630 
631 	work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
632 	list_del(&work->list);
633 	return work;
634 }
635 
636 static void cm_free_work(struct cm_work *work)
637 {
638 	if (work->mad_recv_wc)
639 		ib_free_recv_mad(work->mad_recv_wc);
640 	kfree(work);
641 }
642 
643 static inline int cm_convert_to_ms(int iba_time)
644 {
645 	/* approximate conversion to ms from 4.096us x 2^iba_time */
646 	return 1 << max(iba_time - 8, 0);
647 }
648 
649 static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
650 {
651 	if (timewait_info->inserted_remote_id) {
652 		rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
653 		timewait_info->inserted_remote_id = 0;
654 	}
655 
656 	if (timewait_info->inserted_remote_qp) {
657 		rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
658 		timewait_info->inserted_remote_qp = 0;
659 	}
660 }
661 
662 static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
663 {
664 	struct cm_timewait_info *timewait_info;
665 
666 	timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL);
667 	if (!timewait_info)
668 		return ERR_PTR(-ENOMEM);
669 
670 	timewait_info->work.local_id = local_id;
671 	INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
672 	timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
673 	return timewait_info;
674 }
675 
676 static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
677 {
678 	int wait_time;
679 	unsigned long flags;
680 
681 	spin_lock_irqsave(&cm.lock, flags);
682 	cm_cleanup_timewait(cm_id_priv->timewait_info);
683 	list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list);
684 	spin_unlock_irqrestore(&cm.lock, flags);
685 
686 	/*
687 	 * The cm_id could be destroyed by the user before we exit timewait.
688 	 * To protect against this, we search for the cm_id after exiting
689 	 * timewait before notifying the user that we've exited timewait.
690 	 */
691 	cm_id_priv->id.state = IB_CM_TIMEWAIT;
692 	wait_time = cm_convert_to_ms(cm_id_priv->av.packet_life_time + 1);
693 	queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
694 			   msecs_to_jiffies(wait_time));
695 	cm_id_priv->timewait_info = NULL;
696 }
697 
698 static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
699 {
700 	unsigned long flags;
701 
702 	cm_id_priv->id.state = IB_CM_IDLE;
703 	if (cm_id_priv->timewait_info) {
704 		spin_lock_irqsave(&cm.lock, flags);
705 		cm_cleanup_timewait(cm_id_priv->timewait_info);
706 		spin_unlock_irqrestore(&cm.lock, flags);
707 		kfree(cm_id_priv->timewait_info);
708 		cm_id_priv->timewait_info = NULL;
709 	}
710 }
711 
712 static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
713 {
714 	struct cm_id_private *cm_id_priv;
715 	struct cm_work *work;
716 	unsigned long flags;
717 
718 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
719 retest:
720 	spin_lock_irqsave(&cm_id_priv->lock, flags);
721 	switch (cm_id->state) {
722 	case IB_CM_LISTEN:
723 		cm_id->state = IB_CM_IDLE;
724 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
725 		spin_lock_irqsave(&cm.lock, flags);
726 		rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
727 		spin_unlock_irqrestore(&cm.lock, flags);
728 		break;
729 	case IB_CM_SIDR_REQ_SENT:
730 		cm_id->state = IB_CM_IDLE;
731 		ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
732 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
733 		break;
734 	case IB_CM_SIDR_REQ_RCVD:
735 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
736 		cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
737 		break;
738 	case IB_CM_REQ_SENT:
739 		ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
740 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
741 		ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
742 			       &cm_id_priv->av.port->cm_dev->ca_guid,
743 			       sizeof cm_id_priv->av.port->cm_dev->ca_guid,
744 			       NULL, 0);
745 		break;
746 	case IB_CM_REQ_RCVD:
747 		if (err == -ENOMEM) {
748 			/* Do not reject to allow future retries. */
749 			cm_reset_to_idle(cm_id_priv);
750 			spin_unlock_irqrestore(&cm_id_priv->lock, flags);
751 		} else {
752 			spin_unlock_irqrestore(&cm_id_priv->lock, flags);
753 			ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
754 				       NULL, 0, NULL, 0);
755 		}
756 		break;
757 	case IB_CM_MRA_REQ_RCVD:
758 	case IB_CM_REP_SENT:
759 	case IB_CM_MRA_REP_RCVD:
760 		ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
761 		/* Fall through */
762 	case IB_CM_MRA_REQ_SENT:
763 	case IB_CM_REP_RCVD:
764 	case IB_CM_MRA_REP_SENT:
765 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
766 		ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
767 			       NULL, 0, NULL, 0);
768 		break;
769 	case IB_CM_ESTABLISHED:
770 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
771 		ib_send_cm_dreq(cm_id, NULL, 0);
772 		goto retest;
773 	case IB_CM_DREQ_SENT:
774 		ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
775 		cm_enter_timewait(cm_id_priv);
776 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
777 		break;
778 	case IB_CM_DREQ_RCVD:
779 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
780 		ib_send_cm_drep(cm_id, NULL, 0);
781 		break;
782 	default:
783 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
784 		break;
785 	}
786 
787 	cm_free_id(cm_id->local_id);
788 	cm_deref_id(cm_id_priv);
789 	wait_for_completion(&cm_id_priv->comp);
790 	while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
791 		cm_free_work(work);
792 	kfree(cm_id_priv->compare_data);
793 	kfree(cm_id_priv->private_data);
794 	kfree(cm_id_priv);
795 }
796 
797 void ib_destroy_cm_id(struct ib_cm_id *cm_id)
798 {
799 	cm_destroy_id(cm_id, 0);
800 }
801 EXPORT_SYMBOL(ib_destroy_cm_id);
802 
803 int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask,
804 		 struct ib_cm_compare_data *compare_data)
805 {
806 	struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
807 	unsigned long flags;
808 	int ret = 0;
809 
810 	service_mask = service_mask ? service_mask :
811 		       __constant_cpu_to_be64(~0ULL);
812 	service_id &= service_mask;
813 	if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
814 	    (service_id != IB_CM_ASSIGN_SERVICE_ID))
815 		return -EINVAL;
816 
817 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
818 	if (cm_id->state != IB_CM_IDLE)
819 		return -EINVAL;
820 
821 	if (compare_data) {
822 		cm_id_priv->compare_data = kzalloc(sizeof *compare_data,
823 						   GFP_KERNEL);
824 		if (!cm_id_priv->compare_data)
825 			return -ENOMEM;
826 		cm_mask_copy(cm_id_priv->compare_data->data,
827 			     compare_data->data, compare_data->mask);
828 		memcpy(cm_id_priv->compare_data->mask, compare_data->mask,
829 		       IB_CM_COMPARE_SIZE);
830 	}
831 
832 	cm_id->state = IB_CM_LISTEN;
833 
834 	spin_lock_irqsave(&cm.lock, flags);
835 	if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
836 		cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
837 		cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
838 	} else {
839 		cm_id->service_id = service_id;
840 		cm_id->service_mask = service_mask;
841 	}
842 	cur_cm_id_priv = cm_insert_listen(cm_id_priv);
843 	spin_unlock_irqrestore(&cm.lock, flags);
844 
845 	if (cur_cm_id_priv) {
846 		cm_id->state = IB_CM_IDLE;
847 		kfree(cm_id_priv->compare_data);
848 		cm_id_priv->compare_data = NULL;
849 		ret = -EBUSY;
850 	}
851 	return ret;
852 }
853 EXPORT_SYMBOL(ib_cm_listen);
854 
855 static __be64 cm_form_tid(struct cm_id_private *cm_id_priv,
856 			  enum cm_msg_sequence msg_seq)
857 {
858 	u64 hi_tid, low_tid;
859 
860 	hi_tid   = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
861 	low_tid  = (u64) ((__force u32)cm_id_priv->id.local_id |
862 			  (msg_seq << 30));
863 	return cpu_to_be64(hi_tid | low_tid);
864 }
865 
866 static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
867 			      __be16 attr_id, __be64 tid)
868 {
869 	hdr->base_version  = IB_MGMT_BASE_VERSION;
870 	hdr->mgmt_class	   = IB_MGMT_CLASS_CM;
871 	hdr->class_version = IB_CM_CLASS_VERSION;
872 	hdr->method	   = IB_MGMT_METHOD_SEND;
873 	hdr->attr_id	   = attr_id;
874 	hdr->tid	   = tid;
875 }
876 
877 static void cm_format_req(struct cm_req_msg *req_msg,
878 			  struct cm_id_private *cm_id_priv,
879 			  struct ib_cm_req_param *param)
880 {
881 	cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
882 			  cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ));
883 
884 	req_msg->local_comm_id = cm_id_priv->id.local_id;
885 	req_msg->service_id = param->service_id;
886 	req_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid;
887 	cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
888 	cm_req_set_resp_res(req_msg, param->responder_resources);
889 	cm_req_set_init_depth(req_msg, param->initiator_depth);
890 	cm_req_set_remote_resp_timeout(req_msg,
891 				       param->remote_cm_response_timeout);
892 	cm_req_set_qp_type(req_msg, param->qp_type);
893 	cm_req_set_flow_ctrl(req_msg, param->flow_control);
894 	cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn));
895 	cm_req_set_local_resp_timeout(req_msg,
896 				      param->local_cm_response_timeout);
897 	cm_req_set_retry_count(req_msg, param->retry_count);
898 	req_msg->pkey = param->primary_path->pkey;
899 	cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
900 	cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
901 	cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
902 	cm_req_set_srq(req_msg, param->srq);
903 
904 	req_msg->primary_local_lid = param->primary_path->slid;
905 	req_msg->primary_remote_lid = param->primary_path->dlid;
906 	req_msg->primary_local_gid = param->primary_path->sgid;
907 	req_msg->primary_remote_gid = param->primary_path->dgid;
908 	cm_req_set_primary_flow_label(req_msg, param->primary_path->flow_label);
909 	cm_req_set_primary_packet_rate(req_msg, param->primary_path->rate);
910 	req_msg->primary_traffic_class = param->primary_path->traffic_class;
911 	req_msg->primary_hop_limit = param->primary_path->hop_limit;
912 	cm_req_set_primary_sl(req_msg, param->primary_path->sl);
913 	cm_req_set_primary_subnet_local(req_msg, 1); /* local only... */
914 	cm_req_set_primary_local_ack_timeout(req_msg,
915 		min(31, param->primary_path->packet_life_time + 1));
916 
917 	if (param->alternate_path) {
918 		req_msg->alt_local_lid = param->alternate_path->slid;
919 		req_msg->alt_remote_lid = param->alternate_path->dlid;
920 		req_msg->alt_local_gid = param->alternate_path->sgid;
921 		req_msg->alt_remote_gid = param->alternate_path->dgid;
922 		cm_req_set_alt_flow_label(req_msg,
923 					  param->alternate_path->flow_label);
924 		cm_req_set_alt_packet_rate(req_msg, param->alternate_path->rate);
925 		req_msg->alt_traffic_class = param->alternate_path->traffic_class;
926 		req_msg->alt_hop_limit = param->alternate_path->hop_limit;
927 		cm_req_set_alt_sl(req_msg, param->alternate_path->sl);
928 		cm_req_set_alt_subnet_local(req_msg, 1); /* local only... */
929 		cm_req_set_alt_local_ack_timeout(req_msg,
930 			min(31, param->alternate_path->packet_life_time + 1));
931 	}
932 
933 	if (param->private_data && param->private_data_len)
934 		memcpy(req_msg->private_data, param->private_data,
935 		       param->private_data_len);
936 }
937 
938 static int cm_validate_req_param(struct ib_cm_req_param *param)
939 {
940 	/* peer-to-peer not supported */
941 	if (param->peer_to_peer)
942 		return -EINVAL;
943 
944 	if (!param->primary_path)
945 		return -EINVAL;
946 
947 	if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC)
948 		return -EINVAL;
949 
950 	if (param->private_data &&
951 	    param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
952 		return -EINVAL;
953 
954 	if (param->alternate_path &&
955 	    (param->alternate_path->pkey != param->primary_path->pkey ||
956 	     param->alternate_path->mtu != param->primary_path->mtu))
957 		return -EINVAL;
958 
959 	return 0;
960 }
961 
962 int ib_send_cm_req(struct ib_cm_id *cm_id,
963 		   struct ib_cm_req_param *param)
964 {
965 	struct cm_id_private *cm_id_priv;
966 	struct cm_req_msg *req_msg;
967 	unsigned long flags;
968 	int ret;
969 
970 	ret = cm_validate_req_param(param);
971 	if (ret)
972 		return ret;
973 
974 	/* Verify that we're not in timewait. */
975 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
976 	spin_lock_irqsave(&cm_id_priv->lock, flags);
977 	if (cm_id->state != IB_CM_IDLE) {
978 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
979 		ret = -EINVAL;
980 		goto out;
981 	}
982 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
983 
984 	cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
985 							    id.local_id);
986 	if (IS_ERR(cm_id_priv->timewait_info)) {
987 		ret = PTR_ERR(cm_id_priv->timewait_info);
988 		goto out;
989 	}
990 
991 	ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av);
992 	if (ret)
993 		goto error1;
994 	if (param->alternate_path) {
995 		ret = cm_init_av_by_path(param->alternate_path,
996 					 &cm_id_priv->alt_av);
997 		if (ret)
998 			goto error1;
999 	}
1000 	cm_id->service_id = param->service_id;
1001 	cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
1002 	cm_id_priv->timeout_ms = cm_convert_to_ms(
1003 				    param->primary_path->packet_life_time) * 2 +
1004 				 cm_convert_to_ms(
1005 				    param->remote_cm_response_timeout);
1006 	cm_id_priv->max_cm_retries = param->max_cm_retries;
1007 	cm_id_priv->initiator_depth = param->initiator_depth;
1008 	cm_id_priv->responder_resources = param->responder_resources;
1009 	cm_id_priv->retry_count = param->retry_count;
1010 	cm_id_priv->path_mtu = param->primary_path->mtu;
1011 	cm_id_priv->pkey = param->primary_path->pkey;
1012 	cm_id_priv->qp_type = param->qp_type;
1013 
1014 	ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
1015 	if (ret)
1016 		goto error1;
1017 
1018 	req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
1019 	cm_format_req(req_msg, cm_id_priv, param);
1020 	cm_id_priv->tid = req_msg->hdr.tid;
1021 	cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms;
1022 	cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
1023 
1024 	cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
1025 	cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
1026 
1027 	spin_lock_irqsave(&cm_id_priv->lock, flags);
1028 	ret = ib_post_send_mad(cm_id_priv->msg, NULL);
1029 	if (ret) {
1030 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1031 		goto error2;
1032 	}
1033 	BUG_ON(cm_id->state != IB_CM_IDLE);
1034 	cm_id->state = IB_CM_REQ_SENT;
1035 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1036 	return 0;
1037 
1038 error2:	cm_free_msg(cm_id_priv->msg);
1039 error1:	kfree(cm_id_priv->timewait_info);
1040 out:	return ret;
1041 }
1042 EXPORT_SYMBOL(ib_send_cm_req);
1043 
1044 static int cm_issue_rej(struct cm_port *port,
1045 			struct ib_mad_recv_wc *mad_recv_wc,
1046 			enum ib_cm_rej_reason reason,
1047 			enum cm_msg_response msg_rejected,
1048 			void *ari, u8 ari_length)
1049 {
1050 	struct ib_mad_send_buf *msg = NULL;
1051 	struct cm_rej_msg *rej_msg, *rcv_msg;
1052 	int ret;
1053 
1054 	ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
1055 	if (ret)
1056 		return ret;
1057 
1058 	/* We just need common CM header information.  Cast to any message. */
1059 	rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
1060 	rej_msg = (struct cm_rej_msg *) msg->mad;
1061 
1062 	cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
1063 	rej_msg->remote_comm_id = rcv_msg->local_comm_id;
1064 	rej_msg->local_comm_id = rcv_msg->remote_comm_id;
1065 	cm_rej_set_msg_rejected(rej_msg, msg_rejected);
1066 	rej_msg->reason = cpu_to_be16(reason);
1067 
1068 	if (ari && ari_length) {
1069 		cm_rej_set_reject_info_len(rej_msg, ari_length);
1070 		memcpy(rej_msg->ari, ari, ari_length);
1071 	}
1072 
1073 	ret = ib_post_send_mad(msg, NULL);
1074 	if (ret)
1075 		cm_free_msg(msg);
1076 
1077 	return ret;
1078 }
1079 
1080 static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid,
1081 				    __be32 local_qpn, __be32 remote_qpn)
1082 {
1083 	return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) ||
1084 		((local_ca_guid == remote_ca_guid) &&
1085 		 (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn))));
1086 }
1087 
1088 static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
1089 					    struct ib_sa_path_rec *primary_path,
1090 					    struct ib_sa_path_rec *alt_path)
1091 {
1092 	memset(primary_path, 0, sizeof *primary_path);
1093 	primary_path->dgid = req_msg->primary_local_gid;
1094 	primary_path->sgid = req_msg->primary_remote_gid;
1095 	primary_path->dlid = req_msg->primary_local_lid;
1096 	primary_path->slid = req_msg->primary_remote_lid;
1097 	primary_path->flow_label = cm_req_get_primary_flow_label(req_msg);
1098 	primary_path->hop_limit = req_msg->primary_hop_limit;
1099 	primary_path->traffic_class = req_msg->primary_traffic_class;
1100 	primary_path->reversible = 1;
1101 	primary_path->pkey = req_msg->pkey;
1102 	primary_path->sl = cm_req_get_primary_sl(req_msg);
1103 	primary_path->mtu_selector = IB_SA_EQ;
1104 	primary_path->mtu = cm_req_get_path_mtu(req_msg);
1105 	primary_path->rate_selector = IB_SA_EQ;
1106 	primary_path->rate = cm_req_get_primary_packet_rate(req_msg);
1107 	primary_path->packet_life_time_selector = IB_SA_EQ;
1108 	primary_path->packet_life_time =
1109 		cm_req_get_primary_local_ack_timeout(req_msg);
1110 	primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
1111 
1112 	if (req_msg->alt_local_lid) {
1113 		memset(alt_path, 0, sizeof *alt_path);
1114 		alt_path->dgid = req_msg->alt_local_gid;
1115 		alt_path->sgid = req_msg->alt_remote_gid;
1116 		alt_path->dlid = req_msg->alt_local_lid;
1117 		alt_path->slid = req_msg->alt_remote_lid;
1118 		alt_path->flow_label = cm_req_get_alt_flow_label(req_msg);
1119 		alt_path->hop_limit = req_msg->alt_hop_limit;
1120 		alt_path->traffic_class = req_msg->alt_traffic_class;
1121 		alt_path->reversible = 1;
1122 		alt_path->pkey = req_msg->pkey;
1123 		alt_path->sl = cm_req_get_alt_sl(req_msg);
1124 		alt_path->mtu_selector = IB_SA_EQ;
1125 		alt_path->mtu = cm_req_get_path_mtu(req_msg);
1126 		alt_path->rate_selector = IB_SA_EQ;
1127 		alt_path->rate = cm_req_get_alt_packet_rate(req_msg);
1128 		alt_path->packet_life_time_selector = IB_SA_EQ;
1129 		alt_path->packet_life_time =
1130 			cm_req_get_alt_local_ack_timeout(req_msg);
1131 		alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
1132 	}
1133 }
1134 
1135 static void cm_format_req_event(struct cm_work *work,
1136 				struct cm_id_private *cm_id_priv,
1137 				struct ib_cm_id *listen_id)
1138 {
1139 	struct cm_req_msg *req_msg;
1140 	struct ib_cm_req_event_param *param;
1141 
1142 	req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1143 	param = &work->cm_event.param.req_rcvd;
1144 	param->listen_id = listen_id;
1145 	param->port = cm_id_priv->av.port->port_num;
1146 	param->primary_path = &work->path[0];
1147 	if (req_msg->alt_local_lid)
1148 		param->alternate_path = &work->path[1];
1149 	else
1150 		param->alternate_path = NULL;
1151 	param->remote_ca_guid = req_msg->local_ca_guid;
1152 	param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
1153 	param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
1154 	param->qp_type = cm_req_get_qp_type(req_msg);
1155 	param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg));
1156 	param->responder_resources = cm_req_get_init_depth(req_msg);
1157 	param->initiator_depth = cm_req_get_resp_res(req_msg);
1158 	param->local_cm_response_timeout =
1159 					cm_req_get_remote_resp_timeout(req_msg);
1160 	param->flow_control = cm_req_get_flow_ctrl(req_msg);
1161 	param->remote_cm_response_timeout =
1162 					cm_req_get_local_resp_timeout(req_msg);
1163 	param->retry_count = cm_req_get_retry_count(req_msg);
1164 	param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1165 	param->srq = cm_req_get_srq(req_msg);
1166 	work->cm_event.private_data = &req_msg->private_data;
1167 }
1168 
1169 static void cm_process_work(struct cm_id_private *cm_id_priv,
1170 			    struct cm_work *work)
1171 {
1172 	unsigned long flags;
1173 	int ret;
1174 
1175 	/* We will typically only have the current event to report. */
1176 	ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
1177 	cm_free_work(work);
1178 
1179 	while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
1180 		spin_lock_irqsave(&cm_id_priv->lock, flags);
1181 		work = cm_dequeue_work(cm_id_priv);
1182 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1183 		BUG_ON(!work);
1184 		ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
1185 						&work->cm_event);
1186 		cm_free_work(work);
1187 	}
1188 	cm_deref_id(cm_id_priv);
1189 	if (ret)
1190 		cm_destroy_id(&cm_id_priv->id, ret);
1191 }
1192 
1193 static void cm_format_mra(struct cm_mra_msg *mra_msg,
1194 			  struct cm_id_private *cm_id_priv,
1195 			  enum cm_msg_response msg_mraed, u8 service_timeout,
1196 			  const void *private_data, u8 private_data_len)
1197 {
1198 	cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
1199 	cm_mra_set_msg_mraed(mra_msg, msg_mraed);
1200 	mra_msg->local_comm_id = cm_id_priv->id.local_id;
1201 	mra_msg->remote_comm_id = cm_id_priv->id.remote_id;
1202 	cm_mra_set_service_timeout(mra_msg, service_timeout);
1203 
1204 	if (private_data && private_data_len)
1205 		memcpy(mra_msg->private_data, private_data, private_data_len);
1206 }
1207 
1208 static void cm_format_rej(struct cm_rej_msg *rej_msg,
1209 			  struct cm_id_private *cm_id_priv,
1210 			  enum ib_cm_rej_reason reason,
1211 			  void *ari,
1212 			  u8 ari_length,
1213 			  const void *private_data,
1214 			  u8 private_data_len)
1215 {
1216 	cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
1217 	rej_msg->remote_comm_id = cm_id_priv->id.remote_id;
1218 
1219 	switch(cm_id_priv->id.state) {
1220 	case IB_CM_REQ_RCVD:
1221 		rej_msg->local_comm_id = 0;
1222 		cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1223 		break;
1224 	case IB_CM_MRA_REQ_SENT:
1225 		rej_msg->local_comm_id = cm_id_priv->id.local_id;
1226 		cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1227 		break;
1228 	case IB_CM_REP_RCVD:
1229 	case IB_CM_MRA_REP_SENT:
1230 		rej_msg->local_comm_id = cm_id_priv->id.local_id;
1231 		cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP);
1232 		break;
1233 	default:
1234 		rej_msg->local_comm_id = cm_id_priv->id.local_id;
1235 		cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER);
1236 		break;
1237 	}
1238 
1239 	rej_msg->reason = cpu_to_be16(reason);
1240 	if (ari && ari_length) {
1241 		cm_rej_set_reject_info_len(rej_msg, ari_length);
1242 		memcpy(rej_msg->ari, ari, ari_length);
1243 	}
1244 
1245 	if (private_data && private_data_len)
1246 		memcpy(rej_msg->private_data, private_data, private_data_len);
1247 }
1248 
1249 static void cm_dup_req_handler(struct cm_work *work,
1250 			       struct cm_id_private *cm_id_priv)
1251 {
1252 	struct ib_mad_send_buf *msg = NULL;
1253 	unsigned long flags;
1254 	int ret;
1255 
1256 	/* Quick state check to discard duplicate REQs. */
1257 	if (cm_id_priv->id.state == IB_CM_REQ_RCVD)
1258 		return;
1259 
1260 	ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1261 	if (ret)
1262 		return;
1263 
1264 	spin_lock_irqsave(&cm_id_priv->lock, flags);
1265 	switch (cm_id_priv->id.state) {
1266 	case IB_CM_MRA_REQ_SENT:
1267 		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1268 			      CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
1269 			      cm_id_priv->private_data,
1270 			      cm_id_priv->private_data_len);
1271 		break;
1272 	case IB_CM_TIMEWAIT:
1273 		cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv,
1274 			      IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0);
1275 		break;
1276 	default:
1277 		goto unlock;
1278 	}
1279 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1280 
1281 	ret = ib_post_send_mad(msg, NULL);
1282 	if (ret)
1283 		goto free;
1284 	return;
1285 
1286 unlock:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1287 free:	cm_free_msg(msg);
1288 }
1289 
1290 static struct cm_id_private * cm_match_req(struct cm_work *work,
1291 					   struct cm_id_private *cm_id_priv)
1292 {
1293 	struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
1294 	struct cm_timewait_info *timewait_info;
1295 	struct cm_req_msg *req_msg;
1296 	unsigned long flags;
1297 
1298 	req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1299 
1300 	/* Check for duplicate REQ and stale connections. */
1301 	spin_lock_irqsave(&cm.lock, flags);
1302 	timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
1303 	if (!timewait_info)
1304 		timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
1305 
1306 	if (timewait_info) {
1307 		cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
1308 					   timewait_info->work.remote_id);
1309 		cm_cleanup_timewait(cm_id_priv->timewait_info);
1310 		spin_unlock_irqrestore(&cm.lock, flags);
1311 		if (cur_cm_id_priv) {
1312 			cm_dup_req_handler(work, cur_cm_id_priv);
1313 			cm_deref_id(cur_cm_id_priv);
1314 		} else
1315 			cm_issue_rej(work->port, work->mad_recv_wc,
1316 				     IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
1317 				     NULL, 0);
1318 		listen_cm_id_priv = NULL;
1319 		goto out;
1320 	}
1321 
1322 	/* Find matching listen request. */
1323 	listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
1324 					   req_msg->service_id,
1325 					   req_msg->private_data);
1326 	if (!listen_cm_id_priv) {
1327 		cm_cleanup_timewait(cm_id_priv->timewait_info);
1328 		spin_unlock_irqrestore(&cm.lock, flags);
1329 		cm_issue_rej(work->port, work->mad_recv_wc,
1330 			     IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
1331 			     NULL, 0);
1332 		goto out;
1333 	}
1334 	atomic_inc(&listen_cm_id_priv->refcount);
1335 	atomic_inc(&cm_id_priv->refcount);
1336 	cm_id_priv->id.state = IB_CM_REQ_RCVD;
1337 	atomic_inc(&cm_id_priv->work_count);
1338 	spin_unlock_irqrestore(&cm.lock, flags);
1339 out:
1340 	return listen_cm_id_priv;
1341 }
1342 
1343 static int cm_req_handler(struct cm_work *work)
1344 {
1345 	struct ib_cm_id *cm_id;
1346 	struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
1347 	struct cm_req_msg *req_msg;
1348 	int ret;
1349 
1350 	req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1351 
1352 	cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL);
1353 	if (IS_ERR(cm_id))
1354 		return PTR_ERR(cm_id);
1355 
1356 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1357 	cm_id_priv->id.remote_id = req_msg->local_comm_id;
1358 	cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
1359 				work->mad_recv_wc->recv_buf.grh,
1360 				&cm_id_priv->av);
1361 	cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1362 							    id.local_id);
1363 	if (IS_ERR(cm_id_priv->timewait_info)) {
1364 		ret = PTR_ERR(cm_id_priv->timewait_info);
1365 		goto destroy;
1366 	}
1367 	cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
1368 	cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid;
1369 	cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg);
1370 
1371 	listen_cm_id_priv = cm_match_req(work, cm_id_priv);
1372 	if (!listen_cm_id_priv) {
1373 		ret = -EINVAL;
1374 		kfree(cm_id_priv->timewait_info);
1375 		goto destroy;
1376 	}
1377 
1378 	cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
1379 	cm_id_priv->id.context = listen_cm_id_priv->id.context;
1380 	cm_id_priv->id.service_id = req_msg->service_id;
1381 	cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
1382 
1383 	cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
1384 	ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
1385 	if (ret) {
1386 		ib_get_cached_gid(work->port->cm_dev->device,
1387 				  work->port->port_num, 0, &work->path[0].sgid);
1388 		ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
1389 			       &work->path[0].sgid, sizeof work->path[0].sgid,
1390 			       NULL, 0);
1391 		goto rejected;
1392 	}
1393 	if (req_msg->alt_local_lid) {
1394 		ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av);
1395 		if (ret) {
1396 			ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
1397 				       &work->path[0].sgid,
1398 				       sizeof work->path[0].sgid, NULL, 0);
1399 			goto rejected;
1400 		}
1401 	}
1402 	cm_id_priv->tid = req_msg->hdr.tid;
1403 	cm_id_priv->timeout_ms = cm_convert_to_ms(
1404 					cm_req_get_local_resp_timeout(req_msg));
1405 	cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg);
1406 	cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg);
1407 	cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
1408 	cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
1409 	cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
1410 	cm_id_priv->pkey = req_msg->pkey;
1411 	cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
1412 	cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
1413 	cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1414 	cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
1415 
1416 	cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
1417 	cm_process_work(cm_id_priv, work);
1418 	cm_deref_id(listen_cm_id_priv);
1419 	return 0;
1420 
1421 rejected:
1422 	atomic_dec(&cm_id_priv->refcount);
1423 	cm_deref_id(listen_cm_id_priv);
1424 destroy:
1425 	ib_destroy_cm_id(cm_id);
1426 	return ret;
1427 }
1428 
1429 static void cm_format_rep(struct cm_rep_msg *rep_msg,
1430 			  struct cm_id_private *cm_id_priv,
1431 			  struct ib_cm_rep_param *param)
1432 {
1433 	cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
1434 	rep_msg->local_comm_id = cm_id_priv->id.local_id;
1435 	rep_msg->remote_comm_id = cm_id_priv->id.remote_id;
1436 	cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
1437 	cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
1438 	rep_msg->resp_resources = param->responder_resources;
1439 	rep_msg->initiator_depth = param->initiator_depth;
1440 	cm_rep_set_target_ack_delay(rep_msg, param->target_ack_delay);
1441 	cm_rep_set_failover(rep_msg, param->failover_accepted);
1442 	cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
1443 	cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
1444 	cm_rep_set_srq(rep_msg, param->srq);
1445 	rep_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid;
1446 
1447 	if (param->private_data && param->private_data_len)
1448 		memcpy(rep_msg->private_data, param->private_data,
1449 		       param->private_data_len);
1450 }
1451 
1452 int ib_send_cm_rep(struct ib_cm_id *cm_id,
1453 		   struct ib_cm_rep_param *param)
1454 {
1455 	struct cm_id_private *cm_id_priv;
1456 	struct ib_mad_send_buf *msg;
1457 	struct cm_rep_msg *rep_msg;
1458 	unsigned long flags;
1459 	int ret;
1460 
1461 	if (param->private_data &&
1462 	    param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
1463 		return -EINVAL;
1464 
1465 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1466 	spin_lock_irqsave(&cm_id_priv->lock, flags);
1467 	if (cm_id->state != IB_CM_REQ_RCVD &&
1468 	    cm_id->state != IB_CM_MRA_REQ_SENT) {
1469 		ret = -EINVAL;
1470 		goto out;
1471 	}
1472 
1473 	ret = cm_alloc_msg(cm_id_priv, &msg);
1474 	if (ret)
1475 		goto out;
1476 
1477 	rep_msg = (struct cm_rep_msg *) msg->mad;
1478 	cm_format_rep(rep_msg, cm_id_priv, param);
1479 	msg->timeout_ms = cm_id_priv->timeout_ms;
1480 	msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
1481 
1482 	ret = ib_post_send_mad(msg, NULL);
1483 	if (ret) {
1484 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1485 		cm_free_msg(msg);
1486 		return ret;
1487 	}
1488 
1489 	cm_id->state = IB_CM_REP_SENT;
1490 	cm_id_priv->msg = msg;
1491 	cm_id_priv->initiator_depth = param->initiator_depth;
1492 	cm_id_priv->responder_resources = param->responder_resources;
1493 	cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg);
1494 	cm_id_priv->local_qpn = cm_rep_get_local_qpn(rep_msg);
1495 
1496 out:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1497 	return ret;
1498 }
1499 EXPORT_SYMBOL(ib_send_cm_rep);
1500 
1501 static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
1502 			  struct cm_id_private *cm_id_priv,
1503 			  const void *private_data,
1504 			  u8 private_data_len)
1505 {
1506 	cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
1507 	rtu_msg->local_comm_id = cm_id_priv->id.local_id;
1508 	rtu_msg->remote_comm_id = cm_id_priv->id.remote_id;
1509 
1510 	if (private_data && private_data_len)
1511 		memcpy(rtu_msg->private_data, private_data, private_data_len);
1512 }
1513 
1514 int ib_send_cm_rtu(struct ib_cm_id *cm_id,
1515 		   const void *private_data,
1516 		   u8 private_data_len)
1517 {
1518 	struct cm_id_private *cm_id_priv;
1519 	struct ib_mad_send_buf *msg;
1520 	unsigned long flags;
1521 	void *data;
1522 	int ret;
1523 
1524 	if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
1525 		return -EINVAL;
1526 
1527 	data = cm_copy_private_data(private_data, private_data_len);
1528 	if (IS_ERR(data))
1529 		return PTR_ERR(data);
1530 
1531 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1532 	spin_lock_irqsave(&cm_id_priv->lock, flags);
1533 	if (cm_id->state != IB_CM_REP_RCVD &&
1534 	    cm_id->state != IB_CM_MRA_REP_SENT) {
1535 		ret = -EINVAL;
1536 		goto error;
1537 	}
1538 
1539 	ret = cm_alloc_msg(cm_id_priv, &msg);
1540 	if (ret)
1541 		goto error;
1542 
1543 	cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
1544 		      private_data, private_data_len);
1545 
1546 	ret = ib_post_send_mad(msg, NULL);
1547 	if (ret) {
1548 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1549 		cm_free_msg(msg);
1550 		kfree(data);
1551 		return ret;
1552 	}
1553 
1554 	cm_id->state = IB_CM_ESTABLISHED;
1555 	cm_set_private_data(cm_id_priv, data, private_data_len);
1556 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1557 	return 0;
1558 
1559 error:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1560 	kfree(data);
1561 	return ret;
1562 }
1563 EXPORT_SYMBOL(ib_send_cm_rtu);
1564 
1565 static void cm_format_rep_event(struct cm_work *work)
1566 {
1567 	struct cm_rep_msg *rep_msg;
1568 	struct ib_cm_rep_event_param *param;
1569 
1570 	rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
1571 	param = &work->cm_event.param.rep_rcvd;
1572 	param->remote_ca_guid = rep_msg->local_ca_guid;
1573 	param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
1574 	param->remote_qpn = be32_to_cpu(cm_rep_get_local_qpn(rep_msg));
1575 	param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
1576 	param->responder_resources = rep_msg->initiator_depth;
1577 	param->initiator_depth = rep_msg->resp_resources;
1578 	param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
1579 	param->failover_accepted = cm_rep_get_failover(rep_msg);
1580 	param->flow_control = cm_rep_get_flow_ctrl(rep_msg);
1581 	param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
1582 	param->srq = cm_rep_get_srq(rep_msg);
1583 	work->cm_event.private_data = &rep_msg->private_data;
1584 }
1585 
1586 static void cm_dup_rep_handler(struct cm_work *work)
1587 {
1588 	struct cm_id_private *cm_id_priv;
1589 	struct cm_rep_msg *rep_msg;
1590 	struct ib_mad_send_buf *msg = NULL;
1591 	unsigned long flags;
1592 	int ret;
1593 
1594 	rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
1595 	cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id,
1596 				   rep_msg->local_comm_id);
1597 	if (!cm_id_priv)
1598 		return;
1599 
1600 	ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1601 	if (ret)
1602 		goto deref;
1603 
1604 	spin_lock_irqsave(&cm_id_priv->lock, flags);
1605 	if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
1606 		cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
1607 			      cm_id_priv->private_data,
1608 			      cm_id_priv->private_data_len);
1609 	else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
1610 		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1611 			      CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
1612 			      cm_id_priv->private_data,
1613 			      cm_id_priv->private_data_len);
1614 	else
1615 		goto unlock;
1616 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1617 
1618 	ret = ib_post_send_mad(msg, NULL);
1619 	if (ret)
1620 		goto free;
1621 	goto deref;
1622 
1623 unlock:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1624 free:	cm_free_msg(msg);
1625 deref:	cm_deref_id(cm_id_priv);
1626 }
1627 
1628 static int cm_rep_handler(struct cm_work *work)
1629 {
1630 	struct cm_id_private *cm_id_priv;
1631 	struct cm_rep_msg *rep_msg;
1632 	unsigned long flags;
1633 	int ret;
1634 
1635 	rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
1636 	cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
1637 	if (!cm_id_priv) {
1638 		cm_dup_rep_handler(work);
1639 		return -EINVAL;
1640 	}
1641 
1642 	cm_format_rep_event(work);
1643 
1644 	spin_lock_irqsave(&cm_id_priv->lock, flags);
1645 	switch (cm_id_priv->id.state) {
1646 	case IB_CM_REQ_SENT:
1647 	case IB_CM_MRA_REQ_RCVD:
1648 		break;
1649 	default:
1650 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1651 		ret = -EINVAL;
1652 		goto error;
1653 	}
1654 
1655 	cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
1656 	cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid;
1657 	cm_id_priv->timewait_info->remote_qpn = cm_rep_get_local_qpn(rep_msg);
1658 
1659 	spin_lock(&cm.lock);
1660 	/* Check for duplicate REP. */
1661 	if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
1662 		spin_unlock(&cm.lock);
1663 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1664 		ret = -EINVAL;
1665 		goto error;
1666 	}
1667 	/* Check for a stale connection. */
1668 	if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) {
1669 		rb_erase(&cm_id_priv->timewait_info->remote_id_node,
1670 			 &cm.remote_id_table);
1671 		cm_id_priv->timewait_info->inserted_remote_id = 0;
1672 		spin_unlock(&cm.lock);
1673 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1674 		cm_issue_rej(work->port, work->mad_recv_wc,
1675 			     IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
1676 			     NULL, 0);
1677 		ret = -EINVAL;
1678 		goto error;
1679 	}
1680 	spin_unlock(&cm.lock);
1681 
1682 	cm_id_priv->id.state = IB_CM_REP_RCVD;
1683 	cm_id_priv->id.remote_id = rep_msg->local_comm_id;
1684 	cm_id_priv->remote_qpn = cm_rep_get_local_qpn(rep_msg);
1685 	cm_id_priv->initiator_depth = rep_msg->resp_resources;
1686 	cm_id_priv->responder_resources = rep_msg->initiator_depth;
1687 	cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg);
1688 	cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
1689 
1690 	/* todo: handle peer_to_peer */
1691 
1692 	ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1693 	ret = atomic_inc_and_test(&cm_id_priv->work_count);
1694 	if (!ret)
1695 		list_add_tail(&work->list, &cm_id_priv->work_list);
1696 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1697 
1698 	if (ret)
1699 		cm_process_work(cm_id_priv, work);
1700 	else
1701 		cm_deref_id(cm_id_priv);
1702 	return 0;
1703 
1704 error:
1705 	cm_deref_id(cm_id_priv);
1706 	return ret;
1707 }
1708 
1709 static int cm_establish_handler(struct cm_work *work)
1710 {
1711 	struct cm_id_private *cm_id_priv;
1712 	unsigned long flags;
1713 	int ret;
1714 
1715 	/* See comment in cm_establish about lookup. */
1716 	cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
1717 	if (!cm_id_priv)
1718 		return -EINVAL;
1719 
1720 	spin_lock_irqsave(&cm_id_priv->lock, flags);
1721 	if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
1722 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1723 		goto out;
1724 	}
1725 
1726 	ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1727 	ret = atomic_inc_and_test(&cm_id_priv->work_count);
1728 	if (!ret)
1729 		list_add_tail(&work->list, &cm_id_priv->work_list);
1730 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1731 
1732 	if (ret)
1733 		cm_process_work(cm_id_priv, work);
1734 	else
1735 		cm_deref_id(cm_id_priv);
1736 	return 0;
1737 out:
1738 	cm_deref_id(cm_id_priv);
1739 	return -EINVAL;
1740 }
1741 
1742 static int cm_rtu_handler(struct cm_work *work)
1743 {
1744 	struct cm_id_private *cm_id_priv;
1745 	struct cm_rtu_msg *rtu_msg;
1746 	unsigned long flags;
1747 	int ret;
1748 
1749 	rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
1750 	cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id,
1751 				   rtu_msg->local_comm_id);
1752 	if (!cm_id_priv)
1753 		return -EINVAL;
1754 
1755 	work->cm_event.private_data = &rtu_msg->private_data;
1756 
1757 	spin_lock_irqsave(&cm_id_priv->lock, flags);
1758 	if (cm_id_priv->id.state != IB_CM_REP_SENT &&
1759 	    cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
1760 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1761 		goto out;
1762 	}
1763 	cm_id_priv->id.state = IB_CM_ESTABLISHED;
1764 
1765 	ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1766 	ret = atomic_inc_and_test(&cm_id_priv->work_count);
1767 	if (!ret)
1768 		list_add_tail(&work->list, &cm_id_priv->work_list);
1769 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1770 
1771 	if (ret)
1772 		cm_process_work(cm_id_priv, work);
1773 	else
1774 		cm_deref_id(cm_id_priv);
1775 	return 0;
1776 out:
1777 	cm_deref_id(cm_id_priv);
1778 	return -EINVAL;
1779 }
1780 
1781 static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
1782 			  struct cm_id_private *cm_id_priv,
1783 			  const void *private_data,
1784 			  u8 private_data_len)
1785 {
1786 	cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
1787 			  cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ));
1788 	dreq_msg->local_comm_id = cm_id_priv->id.local_id;
1789 	dreq_msg->remote_comm_id = cm_id_priv->id.remote_id;
1790 	cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn);
1791 
1792 	if (private_data && private_data_len)
1793 		memcpy(dreq_msg->private_data, private_data, private_data_len);
1794 }
1795 
1796 int ib_send_cm_dreq(struct ib_cm_id *cm_id,
1797 		    const void *private_data,
1798 		    u8 private_data_len)
1799 {
1800 	struct cm_id_private *cm_id_priv;
1801 	struct ib_mad_send_buf *msg;
1802 	unsigned long flags;
1803 	int ret;
1804 
1805 	if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
1806 		return -EINVAL;
1807 
1808 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1809 	spin_lock_irqsave(&cm_id_priv->lock, flags);
1810 	if (cm_id->state != IB_CM_ESTABLISHED) {
1811 		ret = -EINVAL;
1812 		goto out;
1813 	}
1814 
1815 	ret = cm_alloc_msg(cm_id_priv, &msg);
1816 	if (ret) {
1817 		cm_enter_timewait(cm_id_priv);
1818 		goto out;
1819 	}
1820 
1821 	cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
1822 		       private_data, private_data_len);
1823 	msg->timeout_ms = cm_id_priv->timeout_ms;
1824 	msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
1825 
1826 	ret = ib_post_send_mad(msg, NULL);
1827 	if (ret) {
1828 		cm_enter_timewait(cm_id_priv);
1829 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1830 		cm_free_msg(msg);
1831 		return ret;
1832 	}
1833 
1834 	cm_id->state = IB_CM_DREQ_SENT;
1835 	cm_id_priv->msg = msg;
1836 out:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1837 	return ret;
1838 }
1839 EXPORT_SYMBOL(ib_send_cm_dreq);
1840 
1841 static void cm_format_drep(struct cm_drep_msg *drep_msg,
1842 			  struct cm_id_private *cm_id_priv,
1843 			  const void *private_data,
1844 			  u8 private_data_len)
1845 {
1846 	cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
1847 	drep_msg->local_comm_id = cm_id_priv->id.local_id;
1848 	drep_msg->remote_comm_id = cm_id_priv->id.remote_id;
1849 
1850 	if (private_data && private_data_len)
1851 		memcpy(drep_msg->private_data, private_data, private_data_len);
1852 }
1853 
1854 int ib_send_cm_drep(struct ib_cm_id *cm_id,
1855 		    const void *private_data,
1856 		    u8 private_data_len)
1857 {
1858 	struct cm_id_private *cm_id_priv;
1859 	struct ib_mad_send_buf *msg;
1860 	unsigned long flags;
1861 	void *data;
1862 	int ret;
1863 
1864 	if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
1865 		return -EINVAL;
1866 
1867 	data = cm_copy_private_data(private_data, private_data_len);
1868 	if (IS_ERR(data))
1869 		return PTR_ERR(data);
1870 
1871 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1872 	spin_lock_irqsave(&cm_id_priv->lock, flags);
1873 	if (cm_id->state != IB_CM_DREQ_RCVD) {
1874 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1875 		kfree(data);
1876 		return -EINVAL;
1877 	}
1878 
1879 	cm_set_private_data(cm_id_priv, data, private_data_len);
1880 	cm_enter_timewait(cm_id_priv);
1881 
1882 	ret = cm_alloc_msg(cm_id_priv, &msg);
1883 	if (ret)
1884 		goto out;
1885 
1886 	cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
1887 		       private_data, private_data_len);
1888 
1889 	ret = ib_post_send_mad(msg, NULL);
1890 	if (ret) {
1891 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1892 		cm_free_msg(msg);
1893 		return ret;
1894 	}
1895 
1896 out:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1897 	return ret;
1898 }
1899 EXPORT_SYMBOL(ib_send_cm_drep);
1900 
1901 static int cm_issue_drep(struct cm_port *port,
1902 			 struct ib_mad_recv_wc *mad_recv_wc)
1903 {
1904 	struct ib_mad_send_buf *msg = NULL;
1905 	struct cm_dreq_msg *dreq_msg;
1906 	struct cm_drep_msg *drep_msg;
1907 	int ret;
1908 
1909 	ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
1910 	if (ret)
1911 		return ret;
1912 
1913 	dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad;
1914 	drep_msg = (struct cm_drep_msg *) msg->mad;
1915 
1916 	cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid);
1917 	drep_msg->remote_comm_id = dreq_msg->local_comm_id;
1918 	drep_msg->local_comm_id = dreq_msg->remote_comm_id;
1919 
1920 	ret = ib_post_send_mad(msg, NULL);
1921 	if (ret)
1922 		cm_free_msg(msg);
1923 
1924 	return ret;
1925 }
1926 
1927 static int cm_dreq_handler(struct cm_work *work)
1928 {
1929 	struct cm_id_private *cm_id_priv;
1930 	struct cm_dreq_msg *dreq_msg;
1931 	struct ib_mad_send_buf *msg = NULL;
1932 	unsigned long flags;
1933 	int ret;
1934 
1935 	dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
1936 	cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
1937 				   dreq_msg->local_comm_id);
1938 	if (!cm_id_priv) {
1939 		cm_issue_drep(work->port, work->mad_recv_wc);
1940 		return -EINVAL;
1941 	}
1942 
1943 	work->cm_event.private_data = &dreq_msg->private_data;
1944 
1945 	spin_lock_irqsave(&cm_id_priv->lock, flags);
1946 	if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg))
1947 		goto unlock;
1948 
1949 	switch (cm_id_priv->id.state) {
1950 	case IB_CM_REP_SENT:
1951 	case IB_CM_DREQ_SENT:
1952 		ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1953 		break;
1954 	case IB_CM_ESTABLISHED:
1955 	case IB_CM_MRA_REP_RCVD:
1956 		break;
1957 	case IB_CM_TIMEWAIT:
1958 		if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
1959 			goto unlock;
1960 
1961 		cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
1962 			       cm_id_priv->private_data,
1963 			       cm_id_priv->private_data_len);
1964 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1965 
1966 		if (ib_post_send_mad(msg, NULL))
1967 			cm_free_msg(msg);
1968 		goto deref;
1969 	default:
1970 		goto unlock;
1971 	}
1972 	cm_id_priv->id.state = IB_CM_DREQ_RCVD;
1973 	cm_id_priv->tid = dreq_msg->hdr.tid;
1974 	ret = atomic_inc_and_test(&cm_id_priv->work_count);
1975 	if (!ret)
1976 		list_add_tail(&work->list, &cm_id_priv->work_list);
1977 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1978 
1979 	if (ret)
1980 		cm_process_work(cm_id_priv, work);
1981 	else
1982 		cm_deref_id(cm_id_priv);
1983 	return 0;
1984 
1985 unlock:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1986 deref:	cm_deref_id(cm_id_priv);
1987 	return -EINVAL;
1988 }
1989 
1990 static int cm_drep_handler(struct cm_work *work)
1991 {
1992 	struct cm_id_private *cm_id_priv;
1993 	struct cm_drep_msg *drep_msg;
1994 	unsigned long flags;
1995 	int ret;
1996 
1997 	drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
1998 	cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id,
1999 				   drep_msg->local_comm_id);
2000 	if (!cm_id_priv)
2001 		return -EINVAL;
2002 
2003 	work->cm_event.private_data = &drep_msg->private_data;
2004 
2005 	spin_lock_irqsave(&cm_id_priv->lock, flags);
2006 	if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
2007 	    cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
2008 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2009 		goto out;
2010 	}
2011 	cm_enter_timewait(cm_id_priv);
2012 
2013 	ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2014 	ret = atomic_inc_and_test(&cm_id_priv->work_count);
2015 	if (!ret)
2016 		list_add_tail(&work->list, &cm_id_priv->work_list);
2017 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2018 
2019 	if (ret)
2020 		cm_process_work(cm_id_priv, work);
2021 	else
2022 		cm_deref_id(cm_id_priv);
2023 	return 0;
2024 out:
2025 	cm_deref_id(cm_id_priv);
2026 	return -EINVAL;
2027 }
2028 
2029 int ib_send_cm_rej(struct ib_cm_id *cm_id,
2030 		   enum ib_cm_rej_reason reason,
2031 		   void *ari,
2032 		   u8 ari_length,
2033 		   const void *private_data,
2034 		   u8 private_data_len)
2035 {
2036 	struct cm_id_private *cm_id_priv;
2037 	struct ib_mad_send_buf *msg;
2038 	unsigned long flags;
2039 	int ret;
2040 
2041 	if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
2042 	    (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
2043 		return -EINVAL;
2044 
2045 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2046 
2047 	spin_lock_irqsave(&cm_id_priv->lock, flags);
2048 	switch (cm_id->state) {
2049 	case IB_CM_REQ_SENT:
2050 	case IB_CM_MRA_REQ_RCVD:
2051 	case IB_CM_REQ_RCVD:
2052 	case IB_CM_MRA_REQ_SENT:
2053 	case IB_CM_REP_RCVD:
2054 	case IB_CM_MRA_REP_SENT:
2055 		ret = cm_alloc_msg(cm_id_priv, &msg);
2056 		if (!ret)
2057 			cm_format_rej((struct cm_rej_msg *) msg->mad,
2058 				      cm_id_priv, reason, ari, ari_length,
2059 				      private_data, private_data_len);
2060 
2061 		cm_reset_to_idle(cm_id_priv);
2062 		break;
2063 	case IB_CM_REP_SENT:
2064 	case IB_CM_MRA_REP_RCVD:
2065 		ret = cm_alloc_msg(cm_id_priv, &msg);
2066 		if (!ret)
2067 			cm_format_rej((struct cm_rej_msg *) msg->mad,
2068 				      cm_id_priv, reason, ari, ari_length,
2069 				      private_data, private_data_len);
2070 
2071 		cm_enter_timewait(cm_id_priv);
2072 		break;
2073 	default:
2074 		ret = -EINVAL;
2075 		goto out;
2076 	}
2077 
2078 	if (ret)
2079 		goto out;
2080 
2081 	ret = ib_post_send_mad(msg, NULL);
2082 	if (ret)
2083 		cm_free_msg(msg);
2084 
2085 out:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2086 	return ret;
2087 }
2088 EXPORT_SYMBOL(ib_send_cm_rej);
2089 
2090 static void cm_format_rej_event(struct cm_work *work)
2091 {
2092 	struct cm_rej_msg *rej_msg;
2093 	struct ib_cm_rej_event_param *param;
2094 
2095 	rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2096 	param = &work->cm_event.param.rej_rcvd;
2097 	param->ari = rej_msg->ari;
2098 	param->ari_length = cm_rej_get_reject_info_len(rej_msg);
2099 	param->reason = __be16_to_cpu(rej_msg->reason);
2100 	work->cm_event.private_data = &rej_msg->private_data;
2101 }
2102 
2103 static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
2104 {
2105 	struct cm_timewait_info *timewait_info;
2106 	struct cm_id_private *cm_id_priv;
2107 	unsigned long flags;
2108 	__be32 remote_id;
2109 
2110 	remote_id = rej_msg->local_comm_id;
2111 
2112 	if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) {
2113 		spin_lock_irqsave(&cm.lock, flags);
2114 		timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari),
2115 						  remote_id);
2116 		if (!timewait_info) {
2117 			spin_unlock_irqrestore(&cm.lock, flags);
2118 			return NULL;
2119 		}
2120 		cm_id_priv = idr_find(&cm.local_id_table, (__force int)
2121 				      (timewait_info->work.local_id ^
2122 				       cm.random_id_operand));
2123 		if (cm_id_priv) {
2124 			if (cm_id_priv->id.remote_id == remote_id)
2125 				atomic_inc(&cm_id_priv->refcount);
2126 			else
2127 				cm_id_priv = NULL;
2128 		}
2129 		spin_unlock_irqrestore(&cm.lock, flags);
2130 	} else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ)
2131 		cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0);
2132 	else
2133 		cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id);
2134 
2135 	return cm_id_priv;
2136 }
2137 
2138 static int cm_rej_handler(struct cm_work *work)
2139 {
2140 	struct cm_id_private *cm_id_priv;
2141 	struct cm_rej_msg *rej_msg;
2142 	unsigned long flags;
2143 	int ret;
2144 
2145 	rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2146 	cm_id_priv = cm_acquire_rejected_id(rej_msg);
2147 	if (!cm_id_priv)
2148 		return -EINVAL;
2149 
2150 	cm_format_rej_event(work);
2151 
2152 	spin_lock_irqsave(&cm_id_priv->lock, flags);
2153 	switch (cm_id_priv->id.state) {
2154 	case IB_CM_REQ_SENT:
2155 	case IB_CM_MRA_REQ_RCVD:
2156 	case IB_CM_REP_SENT:
2157 	case IB_CM_MRA_REP_RCVD:
2158 		ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2159 		/* fall through */
2160 	case IB_CM_REQ_RCVD:
2161 	case IB_CM_MRA_REQ_SENT:
2162 		if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN)
2163 			cm_enter_timewait(cm_id_priv);
2164 		else
2165 			cm_reset_to_idle(cm_id_priv);
2166 		break;
2167 	case IB_CM_DREQ_SENT:
2168 		ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2169 		/* fall through */
2170 	case IB_CM_REP_RCVD:
2171 	case IB_CM_MRA_REP_SENT:
2172 	case IB_CM_ESTABLISHED:
2173 		cm_enter_timewait(cm_id_priv);
2174 		break;
2175 	default:
2176 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2177 		ret = -EINVAL;
2178 		goto out;
2179 	}
2180 
2181 	ret = atomic_inc_and_test(&cm_id_priv->work_count);
2182 	if (!ret)
2183 		list_add_tail(&work->list, &cm_id_priv->work_list);
2184 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2185 
2186 	if (ret)
2187 		cm_process_work(cm_id_priv, work);
2188 	else
2189 		cm_deref_id(cm_id_priv);
2190 	return 0;
2191 out:
2192 	cm_deref_id(cm_id_priv);
2193 	return -EINVAL;
2194 }
2195 
2196 int ib_send_cm_mra(struct ib_cm_id *cm_id,
2197 		   u8 service_timeout,
2198 		   const void *private_data,
2199 		   u8 private_data_len)
2200 {
2201 	struct cm_id_private *cm_id_priv;
2202 	struct ib_mad_send_buf *msg;
2203 	void *data;
2204 	unsigned long flags;
2205 	int ret;
2206 
2207 	if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
2208 		return -EINVAL;
2209 
2210 	data = cm_copy_private_data(private_data, private_data_len);
2211 	if (IS_ERR(data))
2212 		return PTR_ERR(data);
2213 
2214 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2215 
2216 	spin_lock_irqsave(&cm_id_priv->lock, flags);
2217 	switch(cm_id_priv->id.state) {
2218 	case IB_CM_REQ_RCVD:
2219 		ret = cm_alloc_msg(cm_id_priv, &msg);
2220 		if (ret)
2221 			goto error1;
2222 
2223 		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2224 			      CM_MSG_RESPONSE_REQ, service_timeout,
2225 			      private_data, private_data_len);
2226 		ret = ib_post_send_mad(msg, NULL);
2227 		if (ret)
2228 			goto error2;
2229 		cm_id->state = IB_CM_MRA_REQ_SENT;
2230 		break;
2231 	case IB_CM_REP_RCVD:
2232 		ret = cm_alloc_msg(cm_id_priv, &msg);
2233 		if (ret)
2234 			goto error1;
2235 
2236 		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2237 			      CM_MSG_RESPONSE_REP, service_timeout,
2238 			      private_data, private_data_len);
2239 		ret = ib_post_send_mad(msg, NULL);
2240 		if (ret)
2241 			goto error2;
2242 		cm_id->state = IB_CM_MRA_REP_SENT;
2243 		break;
2244 	case IB_CM_ESTABLISHED:
2245 		ret = cm_alloc_msg(cm_id_priv, &msg);
2246 		if (ret)
2247 			goto error1;
2248 
2249 		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2250 			      CM_MSG_RESPONSE_OTHER, service_timeout,
2251 			      private_data, private_data_len);
2252 		ret = ib_post_send_mad(msg, NULL);
2253 		if (ret)
2254 			goto error2;
2255 		cm_id->lap_state = IB_CM_MRA_LAP_SENT;
2256 		break;
2257 	default:
2258 		ret = -EINVAL;
2259 		goto error1;
2260 	}
2261 	cm_id_priv->service_timeout = service_timeout;
2262 	cm_set_private_data(cm_id_priv, data, private_data_len);
2263 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2264 	return 0;
2265 
2266 error1:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2267 	kfree(data);
2268 	return ret;
2269 
2270 error2:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2271 	kfree(data);
2272 	cm_free_msg(msg);
2273 	return ret;
2274 }
2275 EXPORT_SYMBOL(ib_send_cm_mra);
2276 
2277 static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
2278 {
2279 	switch (cm_mra_get_msg_mraed(mra_msg)) {
2280 	case CM_MSG_RESPONSE_REQ:
2281 		return cm_acquire_id(mra_msg->remote_comm_id, 0);
2282 	case CM_MSG_RESPONSE_REP:
2283 	case CM_MSG_RESPONSE_OTHER:
2284 		return cm_acquire_id(mra_msg->remote_comm_id,
2285 				     mra_msg->local_comm_id);
2286 	default:
2287 		return NULL;
2288 	}
2289 }
2290 
2291 static int cm_mra_handler(struct cm_work *work)
2292 {
2293 	struct cm_id_private *cm_id_priv;
2294 	struct cm_mra_msg *mra_msg;
2295 	unsigned long flags;
2296 	int timeout, ret;
2297 
2298 	mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
2299 	cm_id_priv = cm_acquire_mraed_id(mra_msg);
2300 	if (!cm_id_priv)
2301 		return -EINVAL;
2302 
2303 	work->cm_event.private_data = &mra_msg->private_data;
2304 	work->cm_event.param.mra_rcvd.service_timeout =
2305 					cm_mra_get_service_timeout(mra_msg);
2306 	timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) +
2307 		  cm_convert_to_ms(cm_id_priv->av.packet_life_time);
2308 
2309 	spin_lock_irqsave(&cm_id_priv->lock, flags);
2310 	switch (cm_id_priv->id.state) {
2311 	case IB_CM_REQ_SENT:
2312 		if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ ||
2313 		    ib_modify_mad(cm_id_priv->av.port->mad_agent,
2314 				  cm_id_priv->msg, timeout))
2315 			goto out;
2316 		cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
2317 		break;
2318 	case IB_CM_REP_SENT:
2319 		if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP ||
2320 		    ib_modify_mad(cm_id_priv->av.port->mad_agent,
2321 				  cm_id_priv->msg, timeout))
2322 			goto out;
2323 		cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
2324 		break;
2325 	case IB_CM_ESTABLISHED:
2326 		if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER ||
2327 		    cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
2328 		    ib_modify_mad(cm_id_priv->av.port->mad_agent,
2329 				  cm_id_priv->msg, timeout))
2330 			goto out;
2331 		cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
2332 		break;
2333 	default:
2334 		goto out;
2335 	}
2336 
2337 	cm_id_priv->msg->context[1] = (void *) (unsigned long)
2338 				      cm_id_priv->id.state;
2339 	ret = atomic_inc_and_test(&cm_id_priv->work_count);
2340 	if (!ret)
2341 		list_add_tail(&work->list, &cm_id_priv->work_list);
2342 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2343 
2344 	if (ret)
2345 		cm_process_work(cm_id_priv, work);
2346 	else
2347 		cm_deref_id(cm_id_priv);
2348 	return 0;
2349 out:
2350 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2351 	cm_deref_id(cm_id_priv);
2352 	return -EINVAL;
2353 }
2354 
2355 static void cm_format_lap(struct cm_lap_msg *lap_msg,
2356 			  struct cm_id_private *cm_id_priv,
2357 			  struct ib_sa_path_rec *alternate_path,
2358 			  const void *private_data,
2359 			  u8 private_data_len)
2360 {
2361 	cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID,
2362 			  cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP));
2363 	lap_msg->local_comm_id = cm_id_priv->id.local_id;
2364 	lap_msg->remote_comm_id = cm_id_priv->id.remote_id;
2365 	cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn);
2366 	/* todo: need remote CM response timeout */
2367 	cm_lap_set_remote_resp_timeout(lap_msg, 0x1F);
2368 	lap_msg->alt_local_lid = alternate_path->slid;
2369 	lap_msg->alt_remote_lid = alternate_path->dlid;
2370 	lap_msg->alt_local_gid = alternate_path->sgid;
2371 	lap_msg->alt_remote_gid = alternate_path->dgid;
2372 	cm_lap_set_flow_label(lap_msg, alternate_path->flow_label);
2373 	cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class);
2374 	lap_msg->alt_hop_limit = alternate_path->hop_limit;
2375 	cm_lap_set_packet_rate(lap_msg, alternate_path->rate);
2376 	cm_lap_set_sl(lap_msg, alternate_path->sl);
2377 	cm_lap_set_subnet_local(lap_msg, 1); /* local only... */
2378 	cm_lap_set_local_ack_timeout(lap_msg,
2379 		min(31, alternate_path->packet_life_time + 1));
2380 
2381 	if (private_data && private_data_len)
2382 		memcpy(lap_msg->private_data, private_data, private_data_len);
2383 }
2384 
2385 int ib_send_cm_lap(struct ib_cm_id *cm_id,
2386 		   struct ib_sa_path_rec *alternate_path,
2387 		   const void *private_data,
2388 		   u8 private_data_len)
2389 {
2390 	struct cm_id_private *cm_id_priv;
2391 	struct ib_mad_send_buf *msg;
2392 	unsigned long flags;
2393 	int ret;
2394 
2395 	if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE)
2396 		return -EINVAL;
2397 
2398 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2399 	spin_lock_irqsave(&cm_id_priv->lock, flags);
2400 	if (cm_id->state != IB_CM_ESTABLISHED ||
2401 	    (cm_id->lap_state != IB_CM_LAP_UNINIT &&
2402 	     cm_id->lap_state != IB_CM_LAP_IDLE)) {
2403 		ret = -EINVAL;
2404 		goto out;
2405 	}
2406 
2407 	ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av);
2408 	if (ret)
2409 		goto out;
2410 
2411 	ret = cm_alloc_msg(cm_id_priv, &msg);
2412 	if (ret)
2413 		goto out;
2414 
2415 	cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv,
2416 		      alternate_path, private_data, private_data_len);
2417 	msg->timeout_ms = cm_id_priv->timeout_ms;
2418 	msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED;
2419 
2420 	ret = ib_post_send_mad(msg, NULL);
2421 	if (ret) {
2422 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2423 		cm_free_msg(msg);
2424 		return ret;
2425 	}
2426 
2427 	cm_id->lap_state = IB_CM_LAP_SENT;
2428 	cm_id_priv->msg = msg;
2429 
2430 out:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2431 	return ret;
2432 }
2433 EXPORT_SYMBOL(ib_send_cm_lap);
2434 
2435 static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
2436 				    struct ib_sa_path_rec *path,
2437 				    struct cm_lap_msg *lap_msg)
2438 {
2439 	memset(path, 0, sizeof *path);
2440 	path->dgid = lap_msg->alt_local_gid;
2441 	path->sgid = lap_msg->alt_remote_gid;
2442 	path->dlid = lap_msg->alt_local_lid;
2443 	path->slid = lap_msg->alt_remote_lid;
2444 	path->flow_label = cm_lap_get_flow_label(lap_msg);
2445 	path->hop_limit = lap_msg->alt_hop_limit;
2446 	path->traffic_class = cm_lap_get_traffic_class(lap_msg);
2447 	path->reversible = 1;
2448 	path->pkey = cm_id_priv->pkey;
2449 	path->sl = cm_lap_get_sl(lap_msg);
2450 	path->mtu_selector = IB_SA_EQ;
2451 	path->mtu = cm_id_priv->path_mtu;
2452 	path->rate_selector = IB_SA_EQ;
2453 	path->rate = cm_lap_get_packet_rate(lap_msg);
2454 	path->packet_life_time_selector = IB_SA_EQ;
2455 	path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg);
2456 	path->packet_life_time -= (path->packet_life_time > 0);
2457 }
2458 
2459 static int cm_lap_handler(struct cm_work *work)
2460 {
2461 	struct cm_id_private *cm_id_priv;
2462 	struct cm_lap_msg *lap_msg;
2463 	struct ib_cm_lap_event_param *param;
2464 	struct ib_mad_send_buf *msg = NULL;
2465 	unsigned long flags;
2466 	int ret;
2467 
2468 	/* todo: verify LAP request and send reject APR if invalid. */
2469 	lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
2470 	cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id,
2471 				   lap_msg->local_comm_id);
2472 	if (!cm_id_priv)
2473 		return -EINVAL;
2474 
2475 	param = &work->cm_event.param.lap_rcvd;
2476 	param->alternate_path = &work->path[0];
2477 	cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
2478 	work->cm_event.private_data = &lap_msg->private_data;
2479 
2480 	spin_lock_irqsave(&cm_id_priv->lock, flags);
2481 	if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
2482 		goto unlock;
2483 
2484 	switch (cm_id_priv->id.lap_state) {
2485 	case IB_CM_LAP_UNINIT:
2486 	case IB_CM_LAP_IDLE:
2487 		break;
2488 	case IB_CM_MRA_LAP_SENT:
2489 		if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
2490 			goto unlock;
2491 
2492 		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2493 			      CM_MSG_RESPONSE_OTHER,
2494 			      cm_id_priv->service_timeout,
2495 			      cm_id_priv->private_data,
2496 			      cm_id_priv->private_data_len);
2497 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2498 
2499 		if (ib_post_send_mad(msg, NULL))
2500 			cm_free_msg(msg);
2501 		goto deref;
2502 	default:
2503 		goto unlock;
2504 	}
2505 
2506 	cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
2507 	cm_id_priv->tid = lap_msg->hdr.tid;
2508 	cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
2509 				work->mad_recv_wc->recv_buf.grh,
2510 				&cm_id_priv->av);
2511 	cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av);
2512 	ret = atomic_inc_and_test(&cm_id_priv->work_count);
2513 	if (!ret)
2514 		list_add_tail(&work->list, &cm_id_priv->work_list);
2515 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2516 
2517 	if (ret)
2518 		cm_process_work(cm_id_priv, work);
2519 	else
2520 		cm_deref_id(cm_id_priv);
2521 	return 0;
2522 
2523 unlock:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2524 deref:	cm_deref_id(cm_id_priv);
2525 	return -EINVAL;
2526 }
2527 
2528 static void cm_format_apr(struct cm_apr_msg *apr_msg,
2529 			  struct cm_id_private *cm_id_priv,
2530 			  enum ib_cm_apr_status status,
2531 			  void *info,
2532 			  u8 info_length,
2533 			  const void *private_data,
2534 			  u8 private_data_len)
2535 {
2536 	cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid);
2537 	apr_msg->local_comm_id = cm_id_priv->id.local_id;
2538 	apr_msg->remote_comm_id = cm_id_priv->id.remote_id;
2539 	apr_msg->ap_status = (u8) status;
2540 
2541 	if (info && info_length) {
2542 		apr_msg->info_length = info_length;
2543 		memcpy(apr_msg->info, info, info_length);
2544 	}
2545 
2546 	if (private_data && private_data_len)
2547 		memcpy(apr_msg->private_data, private_data, private_data_len);
2548 }
2549 
2550 int ib_send_cm_apr(struct ib_cm_id *cm_id,
2551 		   enum ib_cm_apr_status status,
2552 		   void *info,
2553 		   u8 info_length,
2554 		   const void *private_data,
2555 		   u8 private_data_len)
2556 {
2557 	struct cm_id_private *cm_id_priv;
2558 	struct ib_mad_send_buf *msg;
2559 	unsigned long flags;
2560 	int ret;
2561 
2562 	if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) ||
2563 	    (info && info_length > IB_CM_APR_INFO_LENGTH))
2564 		return -EINVAL;
2565 
2566 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2567 	spin_lock_irqsave(&cm_id_priv->lock, flags);
2568 	if (cm_id->state != IB_CM_ESTABLISHED ||
2569 	    (cm_id->lap_state != IB_CM_LAP_RCVD &&
2570 	     cm_id->lap_state != IB_CM_MRA_LAP_SENT)) {
2571 		ret = -EINVAL;
2572 		goto out;
2573 	}
2574 
2575 	ret = cm_alloc_msg(cm_id_priv, &msg);
2576 	if (ret)
2577 		goto out;
2578 
2579 	cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status,
2580 		      info, info_length, private_data, private_data_len);
2581 	ret = ib_post_send_mad(msg, NULL);
2582 	if (ret) {
2583 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2584 		cm_free_msg(msg);
2585 		return ret;
2586 	}
2587 
2588 	cm_id->lap_state = IB_CM_LAP_IDLE;
2589 out:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2590 	return ret;
2591 }
2592 EXPORT_SYMBOL(ib_send_cm_apr);
2593 
2594 static int cm_apr_handler(struct cm_work *work)
2595 {
2596 	struct cm_id_private *cm_id_priv;
2597 	struct cm_apr_msg *apr_msg;
2598 	unsigned long flags;
2599 	int ret;
2600 
2601 	apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
2602 	cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id,
2603 				   apr_msg->local_comm_id);
2604 	if (!cm_id_priv)
2605 		return -EINVAL; /* Unmatched reply. */
2606 
2607 	work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status;
2608 	work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info;
2609 	work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
2610 	work->cm_event.private_data = &apr_msg->private_data;
2611 
2612 	spin_lock_irqsave(&cm_id_priv->lock, flags);
2613 	if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
2614 	    (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
2615 	     cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
2616 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2617 		goto out;
2618 	}
2619 	cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
2620 	ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2621 	cm_id_priv->msg = NULL;
2622 
2623 	ret = atomic_inc_and_test(&cm_id_priv->work_count);
2624 	if (!ret)
2625 		list_add_tail(&work->list, &cm_id_priv->work_list);
2626 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2627 
2628 	if (ret)
2629 		cm_process_work(cm_id_priv, work);
2630 	else
2631 		cm_deref_id(cm_id_priv);
2632 	return 0;
2633 out:
2634 	cm_deref_id(cm_id_priv);
2635 	return -EINVAL;
2636 }
2637 
2638 static int cm_timewait_handler(struct cm_work *work)
2639 {
2640 	struct cm_timewait_info *timewait_info;
2641 	struct cm_id_private *cm_id_priv;
2642 	int ret;
2643 
2644 	timewait_info = (struct cm_timewait_info *)work;
2645 	spin_lock_irq(&cm.lock);
2646 	list_del(&timewait_info->list);
2647 	spin_unlock_irq(&cm.lock);
2648 
2649 	cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
2650 				   timewait_info->work.remote_id);
2651 	if (!cm_id_priv)
2652 		return -EINVAL;
2653 
2654 	spin_lock_irq(&cm_id_priv->lock);
2655 	if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
2656 	    cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
2657 		spin_unlock_irq(&cm_id_priv->lock);
2658 		goto out;
2659 	}
2660 	cm_id_priv->id.state = IB_CM_IDLE;
2661 	ret = atomic_inc_and_test(&cm_id_priv->work_count);
2662 	if (!ret)
2663 		list_add_tail(&work->list, &cm_id_priv->work_list);
2664 	spin_unlock_irq(&cm_id_priv->lock);
2665 
2666 	if (ret)
2667 		cm_process_work(cm_id_priv, work);
2668 	else
2669 		cm_deref_id(cm_id_priv);
2670 	return 0;
2671 out:
2672 	cm_deref_id(cm_id_priv);
2673 	return -EINVAL;
2674 }
2675 
2676 static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
2677 			       struct cm_id_private *cm_id_priv,
2678 			       struct ib_cm_sidr_req_param *param)
2679 {
2680 	cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
2681 			  cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR));
2682 	sidr_req_msg->request_id = cm_id_priv->id.local_id;
2683 	sidr_req_msg->pkey = cpu_to_be16(param->path->pkey);
2684 	sidr_req_msg->service_id = param->service_id;
2685 
2686 	if (param->private_data && param->private_data_len)
2687 		memcpy(sidr_req_msg->private_data, param->private_data,
2688 		       param->private_data_len);
2689 }
2690 
2691 int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
2692 			struct ib_cm_sidr_req_param *param)
2693 {
2694 	struct cm_id_private *cm_id_priv;
2695 	struct ib_mad_send_buf *msg;
2696 	unsigned long flags;
2697 	int ret;
2698 
2699 	if (!param->path || (param->private_data &&
2700 	     param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
2701 		return -EINVAL;
2702 
2703 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2704 	ret = cm_init_av_by_path(param->path, &cm_id_priv->av);
2705 	if (ret)
2706 		goto out;
2707 
2708 	cm_id->service_id = param->service_id;
2709 	cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
2710 	cm_id_priv->timeout_ms = param->timeout_ms;
2711 	cm_id_priv->max_cm_retries = param->max_cm_retries;
2712 	ret = cm_alloc_msg(cm_id_priv, &msg);
2713 	if (ret)
2714 		goto out;
2715 
2716 	cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
2717 			   param);
2718 	msg->timeout_ms = cm_id_priv->timeout_ms;
2719 	msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
2720 
2721 	spin_lock_irqsave(&cm_id_priv->lock, flags);
2722 	if (cm_id->state == IB_CM_IDLE)
2723 		ret = ib_post_send_mad(msg, NULL);
2724 	else
2725 		ret = -EINVAL;
2726 
2727 	if (ret) {
2728 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2729 		cm_free_msg(msg);
2730 		goto out;
2731 	}
2732 	cm_id->state = IB_CM_SIDR_REQ_SENT;
2733 	cm_id_priv->msg = msg;
2734 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2735 out:
2736 	return ret;
2737 }
2738 EXPORT_SYMBOL(ib_send_cm_sidr_req);
2739 
2740 static void cm_format_sidr_req_event(struct cm_work *work,
2741 				     struct ib_cm_id *listen_id)
2742 {
2743 	struct cm_sidr_req_msg *sidr_req_msg;
2744 	struct ib_cm_sidr_req_event_param *param;
2745 
2746 	sidr_req_msg = (struct cm_sidr_req_msg *)
2747 				work->mad_recv_wc->recv_buf.mad;
2748 	param = &work->cm_event.param.sidr_req_rcvd;
2749 	param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
2750 	param->listen_id = listen_id;
2751 	param->port = work->port->port_num;
2752 	work->cm_event.private_data = &sidr_req_msg->private_data;
2753 }
2754 
2755 static int cm_sidr_req_handler(struct cm_work *work)
2756 {
2757 	struct ib_cm_id *cm_id;
2758 	struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
2759 	struct cm_sidr_req_msg *sidr_req_msg;
2760 	struct ib_wc *wc;
2761 	unsigned long flags;
2762 
2763 	cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL);
2764 	if (IS_ERR(cm_id))
2765 		return PTR_ERR(cm_id);
2766 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2767 
2768 	/* Record SGID/SLID and request ID for lookup. */
2769 	sidr_req_msg = (struct cm_sidr_req_msg *)
2770 				work->mad_recv_wc->recv_buf.mad;
2771 	wc = work->mad_recv_wc->wc;
2772 	cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
2773 	cm_id_priv->av.dgid.global.interface_id = 0;
2774 	cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
2775 				work->mad_recv_wc->recv_buf.grh,
2776 				&cm_id_priv->av);
2777 	cm_id_priv->id.remote_id = sidr_req_msg->request_id;
2778 	cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
2779 	cm_id_priv->tid = sidr_req_msg->hdr.tid;
2780 	atomic_inc(&cm_id_priv->work_count);
2781 
2782 	spin_lock_irqsave(&cm.lock, flags);
2783 	cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
2784 	if (cur_cm_id_priv) {
2785 		spin_unlock_irqrestore(&cm.lock, flags);
2786 		goto out; /* Duplicate message. */
2787 	}
2788 	cur_cm_id_priv = cm_find_listen(cm_id->device,
2789 					sidr_req_msg->service_id,
2790 					sidr_req_msg->private_data);
2791 	if (!cur_cm_id_priv) {
2792 		rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
2793 		spin_unlock_irqrestore(&cm.lock, flags);
2794 		/* todo: reply with no match */
2795 		goto out; /* No match. */
2796 	}
2797 	atomic_inc(&cur_cm_id_priv->refcount);
2798 	spin_unlock_irqrestore(&cm.lock, flags);
2799 
2800 	cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
2801 	cm_id_priv->id.context = cur_cm_id_priv->id.context;
2802 	cm_id_priv->id.service_id = sidr_req_msg->service_id;
2803 	cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
2804 
2805 	cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
2806 	cm_process_work(cm_id_priv, work);
2807 	cm_deref_id(cur_cm_id_priv);
2808 	return 0;
2809 out:
2810 	ib_destroy_cm_id(&cm_id_priv->id);
2811 	return -EINVAL;
2812 }
2813 
2814 static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
2815 			       struct cm_id_private *cm_id_priv,
2816 			       struct ib_cm_sidr_rep_param *param)
2817 {
2818 	cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
2819 			  cm_id_priv->tid);
2820 	sidr_rep_msg->request_id = cm_id_priv->id.remote_id;
2821 	sidr_rep_msg->status = param->status;
2822 	cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num));
2823 	sidr_rep_msg->service_id = cm_id_priv->id.service_id;
2824 	sidr_rep_msg->qkey = cpu_to_be32(param->qkey);
2825 
2826 	if (param->info && param->info_length)
2827 		memcpy(sidr_rep_msg->info, param->info, param->info_length);
2828 
2829 	if (param->private_data && param->private_data_len)
2830 		memcpy(sidr_rep_msg->private_data, param->private_data,
2831 		       param->private_data_len);
2832 }
2833 
2834 int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
2835 			struct ib_cm_sidr_rep_param *param)
2836 {
2837 	struct cm_id_private *cm_id_priv;
2838 	struct ib_mad_send_buf *msg;
2839 	unsigned long flags;
2840 	int ret;
2841 
2842 	if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
2843 	    (param->private_data &&
2844 	     param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
2845 		return -EINVAL;
2846 
2847 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2848 	spin_lock_irqsave(&cm_id_priv->lock, flags);
2849 	if (cm_id->state != IB_CM_SIDR_REQ_RCVD) {
2850 		ret = -EINVAL;
2851 		goto error;
2852 	}
2853 
2854 	ret = cm_alloc_msg(cm_id_priv, &msg);
2855 	if (ret)
2856 		goto error;
2857 
2858 	cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
2859 			   param);
2860 	ret = ib_post_send_mad(msg, NULL);
2861 	if (ret) {
2862 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2863 		cm_free_msg(msg);
2864 		return ret;
2865 	}
2866 	cm_id->state = IB_CM_IDLE;
2867 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2868 
2869 	spin_lock_irqsave(&cm.lock, flags);
2870 	rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
2871 	spin_unlock_irqrestore(&cm.lock, flags);
2872 	return 0;
2873 
2874 error:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2875 	return ret;
2876 }
2877 EXPORT_SYMBOL(ib_send_cm_sidr_rep);
2878 
2879 static void cm_format_sidr_rep_event(struct cm_work *work)
2880 {
2881 	struct cm_sidr_rep_msg *sidr_rep_msg;
2882 	struct ib_cm_sidr_rep_event_param *param;
2883 
2884 	sidr_rep_msg = (struct cm_sidr_rep_msg *)
2885 				work->mad_recv_wc->recv_buf.mad;
2886 	param = &work->cm_event.param.sidr_rep_rcvd;
2887 	param->status = sidr_rep_msg->status;
2888 	param->qkey = be32_to_cpu(sidr_rep_msg->qkey);
2889 	param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
2890 	param->info = &sidr_rep_msg->info;
2891 	param->info_len = sidr_rep_msg->info_length;
2892 	work->cm_event.private_data = &sidr_rep_msg->private_data;
2893 }
2894 
2895 static int cm_sidr_rep_handler(struct cm_work *work)
2896 {
2897 	struct cm_sidr_rep_msg *sidr_rep_msg;
2898 	struct cm_id_private *cm_id_priv;
2899 	unsigned long flags;
2900 
2901 	sidr_rep_msg = (struct cm_sidr_rep_msg *)
2902 				work->mad_recv_wc->recv_buf.mad;
2903 	cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0);
2904 	if (!cm_id_priv)
2905 		return -EINVAL; /* Unmatched reply. */
2906 
2907 	spin_lock_irqsave(&cm_id_priv->lock, flags);
2908 	if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
2909 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2910 		goto out;
2911 	}
2912 	cm_id_priv->id.state = IB_CM_IDLE;
2913 	ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2914 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2915 
2916 	cm_format_sidr_rep_event(work);
2917 	cm_process_work(cm_id_priv, work);
2918 	return 0;
2919 out:
2920 	cm_deref_id(cm_id_priv);
2921 	return -EINVAL;
2922 }
2923 
2924 static void cm_process_send_error(struct ib_mad_send_buf *msg,
2925 				  enum ib_wc_status wc_status)
2926 {
2927 	struct cm_id_private *cm_id_priv;
2928 	struct ib_cm_event cm_event;
2929 	enum ib_cm_state state;
2930 	unsigned long flags;
2931 	int ret;
2932 
2933 	memset(&cm_event, 0, sizeof cm_event);
2934 	cm_id_priv = msg->context[0];
2935 
2936 	/* Discard old sends or ones without a response. */
2937 	spin_lock_irqsave(&cm_id_priv->lock, flags);
2938 	state = (enum ib_cm_state) (unsigned long) msg->context[1];
2939 	if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
2940 		goto discard;
2941 
2942 	switch (state) {
2943 	case IB_CM_REQ_SENT:
2944 	case IB_CM_MRA_REQ_RCVD:
2945 		cm_reset_to_idle(cm_id_priv);
2946 		cm_event.event = IB_CM_REQ_ERROR;
2947 		break;
2948 	case IB_CM_REP_SENT:
2949 	case IB_CM_MRA_REP_RCVD:
2950 		cm_reset_to_idle(cm_id_priv);
2951 		cm_event.event = IB_CM_REP_ERROR;
2952 		break;
2953 	case IB_CM_DREQ_SENT:
2954 		cm_enter_timewait(cm_id_priv);
2955 		cm_event.event = IB_CM_DREQ_ERROR;
2956 		break;
2957 	case IB_CM_SIDR_REQ_SENT:
2958 		cm_id_priv->id.state = IB_CM_IDLE;
2959 		cm_event.event = IB_CM_SIDR_REQ_ERROR;
2960 		break;
2961 	default:
2962 		goto discard;
2963 	}
2964 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2965 	cm_event.param.send_status = wc_status;
2966 
2967 	/* No other events can occur on the cm_id at this point. */
2968 	ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
2969 	cm_free_msg(msg);
2970 	if (ret)
2971 		ib_destroy_cm_id(&cm_id_priv->id);
2972 	return;
2973 discard:
2974 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2975 	cm_free_msg(msg);
2976 }
2977 
2978 static void cm_send_handler(struct ib_mad_agent *mad_agent,
2979 			    struct ib_mad_send_wc *mad_send_wc)
2980 {
2981 	struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
2982 
2983 	switch (mad_send_wc->status) {
2984 	case IB_WC_SUCCESS:
2985 	case IB_WC_WR_FLUSH_ERR:
2986 		cm_free_msg(msg);
2987 		break;
2988 	default:
2989 		if (msg->context[0] && msg->context[1])
2990 			cm_process_send_error(msg, mad_send_wc->status);
2991 		else
2992 			cm_free_msg(msg);
2993 		break;
2994 	}
2995 }
2996 
2997 static void cm_work_handler(struct work_struct *_work)
2998 {
2999 	struct cm_work *work = container_of(_work, struct cm_work, work.work);
3000 	int ret;
3001 
3002 	switch (work->cm_event.event) {
3003 	case IB_CM_REQ_RECEIVED:
3004 		ret = cm_req_handler(work);
3005 		break;
3006 	case IB_CM_MRA_RECEIVED:
3007 		ret = cm_mra_handler(work);
3008 		break;
3009 	case IB_CM_REJ_RECEIVED:
3010 		ret = cm_rej_handler(work);
3011 		break;
3012 	case IB_CM_REP_RECEIVED:
3013 		ret = cm_rep_handler(work);
3014 		break;
3015 	case IB_CM_RTU_RECEIVED:
3016 		ret = cm_rtu_handler(work);
3017 		break;
3018 	case IB_CM_USER_ESTABLISHED:
3019 		ret = cm_establish_handler(work);
3020 		break;
3021 	case IB_CM_DREQ_RECEIVED:
3022 		ret = cm_dreq_handler(work);
3023 		break;
3024 	case IB_CM_DREP_RECEIVED:
3025 		ret = cm_drep_handler(work);
3026 		break;
3027 	case IB_CM_SIDR_REQ_RECEIVED:
3028 		ret = cm_sidr_req_handler(work);
3029 		break;
3030 	case IB_CM_SIDR_REP_RECEIVED:
3031 		ret = cm_sidr_rep_handler(work);
3032 		break;
3033 	case IB_CM_LAP_RECEIVED:
3034 		ret = cm_lap_handler(work);
3035 		break;
3036 	case IB_CM_APR_RECEIVED:
3037 		ret = cm_apr_handler(work);
3038 		break;
3039 	case IB_CM_TIMEWAIT_EXIT:
3040 		ret = cm_timewait_handler(work);
3041 		break;
3042 	default:
3043 		ret = -EINVAL;
3044 		break;
3045 	}
3046 	if (ret)
3047 		cm_free_work(work);
3048 }
3049 
3050 static int cm_establish(struct ib_cm_id *cm_id)
3051 {
3052 	struct cm_id_private *cm_id_priv;
3053 	struct cm_work *work;
3054 	unsigned long flags;
3055 	int ret = 0;
3056 
3057 	work = kmalloc(sizeof *work, GFP_ATOMIC);
3058 	if (!work)
3059 		return -ENOMEM;
3060 
3061 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3062 	spin_lock_irqsave(&cm_id_priv->lock, flags);
3063 	switch (cm_id->state)
3064 	{
3065 	case IB_CM_REP_SENT:
3066 	case IB_CM_MRA_REP_RCVD:
3067 		cm_id->state = IB_CM_ESTABLISHED;
3068 		break;
3069 	case IB_CM_ESTABLISHED:
3070 		ret = -EISCONN;
3071 		break;
3072 	default:
3073 		ret = -EINVAL;
3074 		break;
3075 	}
3076 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3077 
3078 	if (ret) {
3079 		kfree(work);
3080 		goto out;
3081 	}
3082 
3083 	/*
3084 	 * The CM worker thread may try to destroy the cm_id before it
3085 	 * can execute this work item.  To prevent potential deadlock,
3086 	 * we need to find the cm_id once we're in the context of the
3087 	 * worker thread, rather than holding a reference on it.
3088 	 */
3089 	INIT_DELAYED_WORK(&work->work, cm_work_handler);
3090 	work->local_id = cm_id->local_id;
3091 	work->remote_id = cm_id->remote_id;
3092 	work->mad_recv_wc = NULL;
3093 	work->cm_event.event = IB_CM_USER_ESTABLISHED;
3094 	queue_delayed_work(cm.wq, &work->work, 0);
3095 out:
3096 	return ret;
3097 }
3098 
3099 static int cm_migrate(struct ib_cm_id *cm_id)
3100 {
3101 	struct cm_id_private *cm_id_priv;
3102 	unsigned long flags;
3103 	int ret = 0;
3104 
3105 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3106 	spin_lock_irqsave(&cm_id_priv->lock, flags);
3107 	if (cm_id->state == IB_CM_ESTABLISHED &&
3108 	    (cm_id->lap_state == IB_CM_LAP_UNINIT ||
3109 	     cm_id->lap_state == IB_CM_LAP_IDLE)) {
3110 		cm_id->lap_state = IB_CM_LAP_IDLE;
3111 		cm_id_priv->av = cm_id_priv->alt_av;
3112 	} else
3113 		ret = -EINVAL;
3114 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3115 
3116 	return ret;
3117 }
3118 
3119 int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event)
3120 {
3121 	int ret;
3122 
3123 	switch (event) {
3124 	case IB_EVENT_COMM_EST:
3125 		ret = cm_establish(cm_id);
3126 		break;
3127 	case IB_EVENT_PATH_MIG:
3128 		ret = cm_migrate(cm_id);
3129 		break;
3130 	default:
3131 		ret = -EINVAL;
3132 	}
3133 	return ret;
3134 }
3135 EXPORT_SYMBOL(ib_cm_notify);
3136 
3137 static void cm_recv_handler(struct ib_mad_agent *mad_agent,
3138 			    struct ib_mad_recv_wc *mad_recv_wc)
3139 {
3140 	struct cm_work *work;
3141 	enum ib_cm_event_type event;
3142 	int paths = 0;
3143 
3144 	switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
3145 	case CM_REQ_ATTR_ID:
3146 		paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)->
3147 						    alt_local_lid != 0);
3148 		event = IB_CM_REQ_RECEIVED;
3149 		break;
3150 	case CM_MRA_ATTR_ID:
3151 		event = IB_CM_MRA_RECEIVED;
3152 		break;
3153 	case CM_REJ_ATTR_ID:
3154 		event = IB_CM_REJ_RECEIVED;
3155 		break;
3156 	case CM_REP_ATTR_ID:
3157 		event = IB_CM_REP_RECEIVED;
3158 		break;
3159 	case CM_RTU_ATTR_ID:
3160 		event = IB_CM_RTU_RECEIVED;
3161 		break;
3162 	case CM_DREQ_ATTR_ID:
3163 		event = IB_CM_DREQ_RECEIVED;
3164 		break;
3165 	case CM_DREP_ATTR_ID:
3166 		event = IB_CM_DREP_RECEIVED;
3167 		break;
3168 	case CM_SIDR_REQ_ATTR_ID:
3169 		event = IB_CM_SIDR_REQ_RECEIVED;
3170 		break;
3171 	case CM_SIDR_REP_ATTR_ID:
3172 		event = IB_CM_SIDR_REP_RECEIVED;
3173 		break;
3174 	case CM_LAP_ATTR_ID:
3175 		paths = 1;
3176 		event = IB_CM_LAP_RECEIVED;
3177 		break;
3178 	case CM_APR_ATTR_ID:
3179 		event = IB_CM_APR_RECEIVED;
3180 		break;
3181 	default:
3182 		ib_free_recv_mad(mad_recv_wc);
3183 		return;
3184 	}
3185 
3186 	work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
3187 		       GFP_KERNEL);
3188 	if (!work) {
3189 		ib_free_recv_mad(mad_recv_wc);
3190 		return;
3191 	}
3192 
3193 	INIT_DELAYED_WORK(&work->work, cm_work_handler);
3194 	work->cm_event.event = event;
3195 	work->mad_recv_wc = mad_recv_wc;
3196 	work->port = (struct cm_port *)mad_agent->context;
3197 	queue_delayed_work(cm.wq, &work->work, 0);
3198 }
3199 
3200 static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
3201 				struct ib_qp_attr *qp_attr,
3202 				int *qp_attr_mask)
3203 {
3204 	unsigned long flags;
3205 	int ret;
3206 
3207 	spin_lock_irqsave(&cm_id_priv->lock, flags);
3208 	switch (cm_id_priv->id.state) {
3209 	case IB_CM_REQ_SENT:
3210 	case IB_CM_MRA_REQ_RCVD:
3211 	case IB_CM_REQ_RCVD:
3212 	case IB_CM_MRA_REQ_SENT:
3213 	case IB_CM_REP_RCVD:
3214 	case IB_CM_MRA_REP_SENT:
3215 	case IB_CM_REP_SENT:
3216 	case IB_CM_MRA_REP_RCVD:
3217 	case IB_CM_ESTABLISHED:
3218 		*qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
3219 				IB_QP_PKEY_INDEX | IB_QP_PORT;
3220 		qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
3221 		if (cm_id_priv->responder_resources)
3222 			qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
3223 						    IB_ACCESS_REMOTE_ATOMIC;
3224 		qp_attr->pkey_index = cm_id_priv->av.pkey_index;
3225 		qp_attr->port_num = cm_id_priv->av.port->port_num;
3226 		ret = 0;
3227 		break;
3228 	default:
3229 		ret = -EINVAL;
3230 		break;
3231 	}
3232 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3233 	return ret;
3234 }
3235 
3236 static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
3237 			       struct ib_qp_attr *qp_attr,
3238 			       int *qp_attr_mask)
3239 {
3240 	unsigned long flags;
3241 	int ret;
3242 
3243 	spin_lock_irqsave(&cm_id_priv->lock, flags);
3244 	switch (cm_id_priv->id.state) {
3245 	case IB_CM_REQ_RCVD:
3246 	case IB_CM_MRA_REQ_SENT:
3247 	case IB_CM_REP_RCVD:
3248 	case IB_CM_MRA_REP_SENT:
3249 	case IB_CM_REP_SENT:
3250 	case IB_CM_MRA_REP_RCVD:
3251 	case IB_CM_ESTABLISHED:
3252 		*qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
3253 				IB_QP_DEST_QPN | IB_QP_RQ_PSN;
3254 		qp_attr->ah_attr = cm_id_priv->av.ah_attr;
3255 		qp_attr->path_mtu = cm_id_priv->path_mtu;
3256 		qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
3257 		qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
3258 		if (cm_id_priv->qp_type == IB_QPT_RC) {
3259 			*qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
3260 					 IB_QP_MIN_RNR_TIMER;
3261 			qp_attr->max_dest_rd_atomic =
3262 					cm_id_priv->responder_resources;
3263 			qp_attr->min_rnr_timer = 0;
3264 		}
3265 		if (cm_id_priv->alt_av.ah_attr.dlid) {
3266 			*qp_attr_mask |= IB_QP_ALT_PATH;
3267 			qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
3268 			qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
3269 			qp_attr->alt_timeout =
3270 					cm_id_priv->alt_av.packet_life_time + 1;
3271 			qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
3272 		}
3273 		ret = 0;
3274 		break;
3275 	default:
3276 		ret = -EINVAL;
3277 		break;
3278 	}
3279 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3280 	return ret;
3281 }
3282 
3283 static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
3284 			       struct ib_qp_attr *qp_attr,
3285 			       int *qp_attr_mask)
3286 {
3287 	unsigned long flags;
3288 	int ret;
3289 
3290 	spin_lock_irqsave(&cm_id_priv->lock, flags);
3291 	switch (cm_id_priv->id.state) {
3292 	/* Allow transition to RTS before sending REP */
3293 	case IB_CM_REQ_RCVD:
3294 	case IB_CM_MRA_REQ_SENT:
3295 
3296 	case IB_CM_REP_RCVD:
3297 	case IB_CM_MRA_REP_SENT:
3298 	case IB_CM_REP_SENT:
3299 	case IB_CM_MRA_REP_RCVD:
3300 	case IB_CM_ESTABLISHED:
3301 		if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
3302 			*qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
3303 			qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
3304 			if (cm_id_priv->qp_type == IB_QPT_RC) {
3305 				*qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
3306 						 IB_QP_RNR_RETRY |
3307 						 IB_QP_MAX_QP_RD_ATOMIC;
3308 				qp_attr->timeout =
3309 					cm_id_priv->av.packet_life_time + 1;
3310 				qp_attr->retry_cnt = cm_id_priv->retry_count;
3311 				qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
3312 				qp_attr->max_rd_atomic =
3313 					cm_id_priv->initiator_depth;
3314 			}
3315 			if (cm_id_priv->alt_av.ah_attr.dlid) {
3316 				*qp_attr_mask |= IB_QP_PATH_MIG_STATE;
3317 				qp_attr->path_mig_state = IB_MIG_REARM;
3318 			}
3319 		} else {
3320 			*qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
3321 			qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
3322 			qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
3323 			qp_attr->alt_timeout =
3324 				cm_id_priv->alt_av.packet_life_time + 1;
3325 			qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
3326 			qp_attr->path_mig_state = IB_MIG_REARM;
3327 		}
3328 		ret = 0;
3329 		break;
3330 	default:
3331 		ret = -EINVAL;
3332 		break;
3333 	}
3334 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3335 	return ret;
3336 }
3337 
3338 int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
3339 		       struct ib_qp_attr *qp_attr,
3340 		       int *qp_attr_mask)
3341 {
3342 	struct cm_id_private *cm_id_priv;
3343 	int ret;
3344 
3345 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3346 	switch (qp_attr->qp_state) {
3347 	case IB_QPS_INIT:
3348 		ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
3349 		break;
3350 	case IB_QPS_RTR:
3351 		ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
3352 		break;
3353 	case IB_QPS_RTS:
3354 		ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
3355 		break;
3356 	default:
3357 		ret = -EINVAL;
3358 		break;
3359 	}
3360 	return ret;
3361 }
3362 EXPORT_SYMBOL(ib_cm_init_qp_attr);
3363 
3364 static void cm_add_one(struct ib_device *device)
3365 {
3366 	struct cm_device *cm_dev;
3367 	struct cm_port *port;
3368 	struct ib_mad_reg_req reg_req = {
3369 		.mgmt_class = IB_MGMT_CLASS_CM,
3370 		.mgmt_class_version = IB_CM_CLASS_VERSION
3371 	};
3372 	struct ib_port_modify port_modify = {
3373 		.set_port_cap_mask = IB_PORT_CM_SUP
3374 	};
3375 	unsigned long flags;
3376 	int ret;
3377 	u8 i;
3378 
3379 	if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
3380 		return;
3381 
3382 	cm_dev = kmalloc(sizeof(*cm_dev) + sizeof(*port) *
3383 			 device->phys_port_cnt, GFP_KERNEL);
3384 	if (!cm_dev)
3385 		return;
3386 
3387 	cm_dev->device = device;
3388 	cm_dev->ca_guid = device->node_guid;
3389 
3390 	set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
3391 	for (i = 1; i <= device->phys_port_cnt; i++) {
3392 		port = &cm_dev->port[i-1];
3393 		port->cm_dev = cm_dev;
3394 		port->port_num = i;
3395 		port->mad_agent = ib_register_mad_agent(device, i,
3396 							IB_QPT_GSI,
3397 							&reg_req,
3398 							0,
3399 							cm_send_handler,
3400 							cm_recv_handler,
3401 							port);
3402 		if (IS_ERR(port->mad_agent))
3403 			goto error1;
3404 
3405 		ret = ib_modify_port(device, i, 0, &port_modify);
3406 		if (ret)
3407 			goto error2;
3408 	}
3409 	ib_set_client_data(device, &cm_client, cm_dev);
3410 
3411 	write_lock_irqsave(&cm.device_lock, flags);
3412 	list_add_tail(&cm_dev->list, &cm.device_list);
3413 	write_unlock_irqrestore(&cm.device_lock, flags);
3414 	return;
3415 
3416 error2:
3417 	ib_unregister_mad_agent(port->mad_agent);
3418 error1:
3419 	port_modify.set_port_cap_mask = 0;
3420 	port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
3421 	while (--i) {
3422 		port = &cm_dev->port[i-1];
3423 		ib_modify_port(device, port->port_num, 0, &port_modify);
3424 		ib_unregister_mad_agent(port->mad_agent);
3425 	}
3426 	kfree(cm_dev);
3427 }
3428 
3429 static void cm_remove_one(struct ib_device *device)
3430 {
3431 	struct cm_device *cm_dev;
3432 	struct cm_port *port;
3433 	struct ib_port_modify port_modify = {
3434 		.clr_port_cap_mask = IB_PORT_CM_SUP
3435 	};
3436 	unsigned long flags;
3437 	int i;
3438 
3439 	cm_dev = ib_get_client_data(device, &cm_client);
3440 	if (!cm_dev)
3441 		return;
3442 
3443 	write_lock_irqsave(&cm.device_lock, flags);
3444 	list_del(&cm_dev->list);
3445 	write_unlock_irqrestore(&cm.device_lock, flags);
3446 
3447 	for (i = 1; i <= device->phys_port_cnt; i++) {
3448 		port = &cm_dev->port[i-1];
3449 		ib_modify_port(device, port->port_num, 0, &port_modify);
3450 		ib_unregister_mad_agent(port->mad_agent);
3451 	}
3452 	kfree(cm_dev);
3453 }
3454 
3455 static int __init ib_cm_init(void)
3456 {
3457 	int ret;
3458 
3459 	memset(&cm, 0, sizeof cm);
3460 	INIT_LIST_HEAD(&cm.device_list);
3461 	rwlock_init(&cm.device_lock);
3462 	spin_lock_init(&cm.lock);
3463 	cm.listen_service_table = RB_ROOT;
3464 	cm.listen_service_id = __constant_be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
3465 	cm.remote_id_table = RB_ROOT;
3466 	cm.remote_qp_table = RB_ROOT;
3467 	cm.remote_sidr_table = RB_ROOT;
3468 	idr_init(&cm.local_id_table);
3469 	get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
3470 	idr_pre_get(&cm.local_id_table, GFP_KERNEL);
3471 	INIT_LIST_HEAD(&cm.timewait_list);
3472 
3473 	cm.wq = create_workqueue("ib_cm");
3474 	if (!cm.wq)
3475 		return -ENOMEM;
3476 
3477 	ret = ib_register_client(&cm_client);
3478 	if (ret)
3479 		goto error;
3480 
3481 	return 0;
3482 error:
3483 	destroy_workqueue(cm.wq);
3484 	return ret;
3485 }
3486 
3487 static void __exit ib_cm_cleanup(void)
3488 {
3489 	struct cm_timewait_info *timewait_info, *tmp;
3490 
3491 	spin_lock_irq(&cm.lock);
3492 	list_for_each_entry(timewait_info, &cm.timewait_list, list)
3493 		cancel_delayed_work(&timewait_info->work.work);
3494 	spin_unlock_irq(&cm.lock);
3495 
3496 	destroy_workqueue(cm.wq);
3497 
3498 	list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) {
3499 		list_del(&timewait_info->list);
3500 		kfree(timewait_info);
3501 	}
3502 
3503 	ib_unregister_client(&cm_client);
3504 	idr_destroy(&cm.local_id_table);
3505 }
3506 
3507 module_init(ib_cm_init);
3508 module_exit(ib_cm_cleanup);
3509 
3510