1 /* 2 * Copyright (c) 2004-2006 Intel Corporation. All rights reserved. 3 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. 5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 * 35 * $Id: cm.c 4311 2005-12-05 18:42:01Z sean.hefty $ 36 */ 37 38 #include <linux/completion.h> 39 #include <linux/dma-mapping.h> 40 #include <linux/err.h> 41 #include <linux/idr.h> 42 #include <linux/interrupt.h> 43 #include <linux/random.h> 44 #include <linux/rbtree.h> 45 #include <linux/spinlock.h> 46 #include <linux/workqueue.h> 47 48 #include <rdma/ib_cache.h> 49 #include <rdma/ib_cm.h> 50 #include "cm_msgs.h" 51 52 MODULE_AUTHOR("Sean Hefty"); 53 MODULE_DESCRIPTION("InfiniBand CM"); 54 MODULE_LICENSE("Dual BSD/GPL"); 55 56 static void cm_add_one(struct ib_device *device); 57 static void cm_remove_one(struct ib_device *device); 58 59 static struct ib_client cm_client = { 60 .name = "cm", 61 .add = cm_add_one, 62 .remove = cm_remove_one 63 }; 64 65 static struct ib_cm { 66 spinlock_t lock; 67 struct list_head device_list; 68 rwlock_t device_lock; 69 struct rb_root listen_service_table; 70 u64 listen_service_id; 71 /* struct rb_root peer_service_table; todo: fix peer to peer */ 72 struct rb_root remote_qp_table; 73 struct rb_root remote_id_table; 74 struct rb_root remote_sidr_table; 75 struct idr local_id_table; 76 __be32 random_id_operand; 77 struct list_head timewait_list; 78 struct workqueue_struct *wq; 79 } cm; 80 81 struct cm_port { 82 struct cm_device *cm_dev; 83 struct ib_mad_agent *mad_agent; 84 u8 port_num; 85 }; 86 87 struct cm_device { 88 struct list_head list; 89 struct ib_device *device; 90 struct cm_port port[0]; 91 }; 92 93 struct cm_av { 94 struct cm_port *port; 95 union ib_gid dgid; 96 struct ib_ah_attr ah_attr; 97 u16 pkey_index; 98 u8 packet_life_time; 99 }; 100 101 struct cm_work { 102 struct delayed_work work; 103 struct list_head list; 104 struct cm_port *port; 105 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */ 106 __be32 local_id; /* Established / timewait */ 107 __be32 remote_id; 108 struct ib_cm_event cm_event; 109 struct ib_sa_path_rec path[0]; 110 }; 111 112 struct cm_timewait_info { 113 struct cm_work work; /* Must be first. */ 114 struct list_head list; 115 struct rb_node remote_qp_node; 116 struct rb_node remote_id_node; 117 __be64 remote_ca_guid; 118 __be32 remote_qpn; 119 u8 inserted_remote_qp; 120 u8 inserted_remote_id; 121 }; 122 123 struct cm_id_private { 124 struct ib_cm_id id; 125 126 struct rb_node service_node; 127 struct rb_node sidr_id_node; 128 spinlock_t lock; /* Do not acquire inside cm.lock */ 129 struct completion comp; 130 atomic_t refcount; 131 132 struct ib_mad_send_buf *msg; 133 struct cm_timewait_info *timewait_info; 134 /* todo: use alternate port on send failure */ 135 struct cm_av av; 136 struct cm_av alt_av; 137 struct ib_cm_compare_data *compare_data; 138 139 void *private_data; 140 __be64 tid; 141 __be32 local_qpn; 142 __be32 remote_qpn; 143 enum ib_qp_type qp_type; 144 __be32 sq_psn; 145 __be32 rq_psn; 146 int timeout_ms; 147 enum ib_mtu path_mtu; 148 __be16 pkey; 149 u8 private_data_len; 150 u8 max_cm_retries; 151 u8 peer_to_peer; 152 u8 responder_resources; 153 u8 initiator_depth; 154 u8 retry_count; 155 u8 rnr_retry_count; 156 u8 service_timeout; 157 158 struct list_head work_list; 159 atomic_t work_count; 160 }; 161 162 static void cm_work_handler(struct work_struct *work); 163 164 static inline void cm_deref_id(struct cm_id_private *cm_id_priv) 165 { 166 if (atomic_dec_and_test(&cm_id_priv->refcount)) 167 complete(&cm_id_priv->comp); 168 } 169 170 static int cm_alloc_msg(struct cm_id_private *cm_id_priv, 171 struct ib_mad_send_buf **msg) 172 { 173 struct ib_mad_agent *mad_agent; 174 struct ib_mad_send_buf *m; 175 struct ib_ah *ah; 176 177 mad_agent = cm_id_priv->av.port->mad_agent; 178 ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr); 179 if (IS_ERR(ah)) 180 return PTR_ERR(ah); 181 182 m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn, 183 cm_id_priv->av.pkey_index, 184 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, 185 GFP_ATOMIC); 186 if (IS_ERR(m)) { 187 ib_destroy_ah(ah); 188 return PTR_ERR(m); 189 } 190 191 /* Timeout set by caller if response is expected. */ 192 m->ah = ah; 193 m->retries = cm_id_priv->max_cm_retries; 194 195 atomic_inc(&cm_id_priv->refcount); 196 m->context[0] = cm_id_priv; 197 *msg = m; 198 return 0; 199 } 200 201 static int cm_alloc_response_msg(struct cm_port *port, 202 struct ib_mad_recv_wc *mad_recv_wc, 203 struct ib_mad_send_buf **msg) 204 { 205 struct ib_mad_send_buf *m; 206 struct ib_ah *ah; 207 208 ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc, 209 mad_recv_wc->recv_buf.grh, port->port_num); 210 if (IS_ERR(ah)) 211 return PTR_ERR(ah); 212 213 m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index, 214 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, 215 GFP_ATOMIC); 216 if (IS_ERR(m)) { 217 ib_destroy_ah(ah); 218 return PTR_ERR(m); 219 } 220 m->ah = ah; 221 *msg = m; 222 return 0; 223 } 224 225 static void cm_free_msg(struct ib_mad_send_buf *msg) 226 { 227 ib_destroy_ah(msg->ah); 228 if (msg->context[0]) 229 cm_deref_id(msg->context[0]); 230 ib_free_send_mad(msg); 231 } 232 233 static void * cm_copy_private_data(const void *private_data, 234 u8 private_data_len) 235 { 236 void *data; 237 238 if (!private_data || !private_data_len) 239 return NULL; 240 241 data = kmemdup(private_data, private_data_len, GFP_KERNEL); 242 if (!data) 243 return ERR_PTR(-ENOMEM); 244 245 return data; 246 } 247 248 static void cm_set_private_data(struct cm_id_private *cm_id_priv, 249 void *private_data, u8 private_data_len) 250 { 251 if (cm_id_priv->private_data && cm_id_priv->private_data_len) 252 kfree(cm_id_priv->private_data); 253 254 cm_id_priv->private_data = private_data; 255 cm_id_priv->private_data_len = private_data_len; 256 } 257 258 static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc, 259 struct ib_grh *grh, struct cm_av *av) 260 { 261 av->port = port; 262 av->pkey_index = wc->pkey_index; 263 ib_init_ah_from_wc(port->cm_dev->device, port->port_num, wc, 264 grh, &av->ah_attr); 265 } 266 267 static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av) 268 { 269 struct cm_device *cm_dev; 270 struct cm_port *port = NULL; 271 unsigned long flags; 272 int ret; 273 u8 p; 274 275 read_lock_irqsave(&cm.device_lock, flags); 276 list_for_each_entry(cm_dev, &cm.device_list, list) { 277 if (!ib_find_cached_gid(cm_dev->device, &path->sgid, 278 &p, NULL)) { 279 port = &cm_dev->port[p-1]; 280 break; 281 } 282 } 283 read_unlock_irqrestore(&cm.device_lock, flags); 284 285 if (!port) 286 return -EINVAL; 287 288 ret = ib_find_cached_pkey(cm_dev->device, port->port_num, 289 be16_to_cpu(path->pkey), &av->pkey_index); 290 if (ret) 291 return ret; 292 293 av->port = port; 294 ib_init_ah_from_path(cm_dev->device, port->port_num, path, 295 &av->ah_attr); 296 av->packet_life_time = path->packet_life_time; 297 return 0; 298 } 299 300 static int cm_alloc_id(struct cm_id_private *cm_id_priv) 301 { 302 unsigned long flags; 303 int ret, id; 304 static int next_id; 305 306 do { 307 spin_lock_irqsave(&cm.lock, flags); 308 ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, 309 next_id++, &id); 310 spin_unlock_irqrestore(&cm.lock, flags); 311 } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) ); 312 313 cm_id_priv->id.local_id = (__force __be32) (id ^ cm.random_id_operand); 314 return ret; 315 } 316 317 static void cm_free_id(__be32 local_id) 318 { 319 unsigned long flags; 320 321 spin_lock_irqsave(&cm.lock, flags); 322 idr_remove(&cm.local_id_table, 323 (__force int) (local_id ^ cm.random_id_operand)); 324 spin_unlock_irqrestore(&cm.lock, flags); 325 } 326 327 static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id) 328 { 329 struct cm_id_private *cm_id_priv; 330 331 cm_id_priv = idr_find(&cm.local_id_table, 332 (__force int) (local_id ^ cm.random_id_operand)); 333 if (cm_id_priv) { 334 if (cm_id_priv->id.remote_id == remote_id) 335 atomic_inc(&cm_id_priv->refcount); 336 else 337 cm_id_priv = NULL; 338 } 339 340 return cm_id_priv; 341 } 342 343 static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id) 344 { 345 struct cm_id_private *cm_id_priv; 346 unsigned long flags; 347 348 spin_lock_irqsave(&cm.lock, flags); 349 cm_id_priv = cm_get_id(local_id, remote_id); 350 spin_unlock_irqrestore(&cm.lock, flags); 351 352 return cm_id_priv; 353 } 354 355 static void cm_mask_copy(u8 *dst, u8 *src, u8 *mask) 356 { 357 int i; 358 359 for (i = 0; i < IB_CM_COMPARE_SIZE / sizeof(unsigned long); i++) 360 ((unsigned long *) dst)[i] = ((unsigned long *) src)[i] & 361 ((unsigned long *) mask)[i]; 362 } 363 364 static int cm_compare_data(struct ib_cm_compare_data *src_data, 365 struct ib_cm_compare_data *dst_data) 366 { 367 u8 src[IB_CM_COMPARE_SIZE]; 368 u8 dst[IB_CM_COMPARE_SIZE]; 369 370 if (!src_data || !dst_data) 371 return 0; 372 373 cm_mask_copy(src, src_data->data, dst_data->mask); 374 cm_mask_copy(dst, dst_data->data, src_data->mask); 375 return memcmp(src, dst, IB_CM_COMPARE_SIZE); 376 } 377 378 static int cm_compare_private_data(u8 *private_data, 379 struct ib_cm_compare_data *dst_data) 380 { 381 u8 src[IB_CM_COMPARE_SIZE]; 382 383 if (!dst_data) 384 return 0; 385 386 cm_mask_copy(src, private_data, dst_data->mask); 387 return memcmp(src, dst_data->data, IB_CM_COMPARE_SIZE); 388 } 389 390 static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv) 391 { 392 struct rb_node **link = &cm.listen_service_table.rb_node; 393 struct rb_node *parent = NULL; 394 struct cm_id_private *cur_cm_id_priv; 395 __be64 service_id = cm_id_priv->id.service_id; 396 __be64 service_mask = cm_id_priv->id.service_mask; 397 int data_cmp; 398 399 while (*link) { 400 parent = *link; 401 cur_cm_id_priv = rb_entry(parent, struct cm_id_private, 402 service_node); 403 data_cmp = cm_compare_data(cm_id_priv->compare_data, 404 cur_cm_id_priv->compare_data); 405 if ((cur_cm_id_priv->id.service_mask & service_id) == 406 (service_mask & cur_cm_id_priv->id.service_id) && 407 (cm_id_priv->id.device == cur_cm_id_priv->id.device) && 408 !data_cmp) 409 return cur_cm_id_priv; 410 411 if (cm_id_priv->id.device < cur_cm_id_priv->id.device) 412 link = &(*link)->rb_left; 413 else if (cm_id_priv->id.device > cur_cm_id_priv->id.device) 414 link = &(*link)->rb_right; 415 else if (service_id < cur_cm_id_priv->id.service_id) 416 link = &(*link)->rb_left; 417 else if (service_id > cur_cm_id_priv->id.service_id) 418 link = &(*link)->rb_right; 419 else if (data_cmp < 0) 420 link = &(*link)->rb_left; 421 else 422 link = &(*link)->rb_right; 423 } 424 rb_link_node(&cm_id_priv->service_node, parent, link); 425 rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table); 426 return NULL; 427 } 428 429 static struct cm_id_private * cm_find_listen(struct ib_device *device, 430 __be64 service_id, 431 u8 *private_data) 432 { 433 struct rb_node *node = cm.listen_service_table.rb_node; 434 struct cm_id_private *cm_id_priv; 435 int data_cmp; 436 437 while (node) { 438 cm_id_priv = rb_entry(node, struct cm_id_private, service_node); 439 data_cmp = cm_compare_private_data(private_data, 440 cm_id_priv->compare_data); 441 if ((cm_id_priv->id.service_mask & service_id) == 442 cm_id_priv->id.service_id && 443 (cm_id_priv->id.device == device) && !data_cmp) 444 return cm_id_priv; 445 446 if (device < cm_id_priv->id.device) 447 node = node->rb_left; 448 else if (device > cm_id_priv->id.device) 449 node = node->rb_right; 450 else if (service_id < cm_id_priv->id.service_id) 451 node = node->rb_left; 452 else if (service_id > cm_id_priv->id.service_id) 453 node = node->rb_right; 454 else if (data_cmp < 0) 455 node = node->rb_left; 456 else 457 node = node->rb_right; 458 } 459 return NULL; 460 } 461 462 static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info 463 *timewait_info) 464 { 465 struct rb_node **link = &cm.remote_id_table.rb_node; 466 struct rb_node *parent = NULL; 467 struct cm_timewait_info *cur_timewait_info; 468 __be64 remote_ca_guid = timewait_info->remote_ca_guid; 469 __be32 remote_id = timewait_info->work.remote_id; 470 471 while (*link) { 472 parent = *link; 473 cur_timewait_info = rb_entry(parent, struct cm_timewait_info, 474 remote_id_node); 475 if (remote_id < cur_timewait_info->work.remote_id) 476 link = &(*link)->rb_left; 477 else if (remote_id > cur_timewait_info->work.remote_id) 478 link = &(*link)->rb_right; 479 else if (remote_ca_guid < cur_timewait_info->remote_ca_guid) 480 link = &(*link)->rb_left; 481 else if (remote_ca_guid > cur_timewait_info->remote_ca_guid) 482 link = &(*link)->rb_right; 483 else 484 return cur_timewait_info; 485 } 486 timewait_info->inserted_remote_id = 1; 487 rb_link_node(&timewait_info->remote_id_node, parent, link); 488 rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table); 489 return NULL; 490 } 491 492 static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid, 493 __be32 remote_id) 494 { 495 struct rb_node *node = cm.remote_id_table.rb_node; 496 struct cm_timewait_info *timewait_info; 497 498 while (node) { 499 timewait_info = rb_entry(node, struct cm_timewait_info, 500 remote_id_node); 501 if (remote_id < timewait_info->work.remote_id) 502 node = node->rb_left; 503 else if (remote_id > timewait_info->work.remote_id) 504 node = node->rb_right; 505 else if (remote_ca_guid < timewait_info->remote_ca_guid) 506 node = node->rb_left; 507 else if (remote_ca_guid > timewait_info->remote_ca_guid) 508 node = node->rb_right; 509 else 510 return timewait_info; 511 } 512 return NULL; 513 } 514 515 static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info 516 *timewait_info) 517 { 518 struct rb_node **link = &cm.remote_qp_table.rb_node; 519 struct rb_node *parent = NULL; 520 struct cm_timewait_info *cur_timewait_info; 521 __be64 remote_ca_guid = timewait_info->remote_ca_guid; 522 __be32 remote_qpn = timewait_info->remote_qpn; 523 524 while (*link) { 525 parent = *link; 526 cur_timewait_info = rb_entry(parent, struct cm_timewait_info, 527 remote_qp_node); 528 if (remote_qpn < cur_timewait_info->remote_qpn) 529 link = &(*link)->rb_left; 530 else if (remote_qpn > cur_timewait_info->remote_qpn) 531 link = &(*link)->rb_right; 532 else if (remote_ca_guid < cur_timewait_info->remote_ca_guid) 533 link = &(*link)->rb_left; 534 else if (remote_ca_guid > cur_timewait_info->remote_ca_guid) 535 link = &(*link)->rb_right; 536 else 537 return cur_timewait_info; 538 } 539 timewait_info->inserted_remote_qp = 1; 540 rb_link_node(&timewait_info->remote_qp_node, parent, link); 541 rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table); 542 return NULL; 543 } 544 545 static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private 546 *cm_id_priv) 547 { 548 struct rb_node **link = &cm.remote_sidr_table.rb_node; 549 struct rb_node *parent = NULL; 550 struct cm_id_private *cur_cm_id_priv; 551 union ib_gid *port_gid = &cm_id_priv->av.dgid; 552 __be32 remote_id = cm_id_priv->id.remote_id; 553 554 while (*link) { 555 parent = *link; 556 cur_cm_id_priv = rb_entry(parent, struct cm_id_private, 557 sidr_id_node); 558 if (remote_id < cur_cm_id_priv->id.remote_id) 559 link = &(*link)->rb_left; 560 else if (remote_id > cur_cm_id_priv->id.remote_id) 561 link = &(*link)->rb_right; 562 else { 563 int cmp; 564 cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid, 565 sizeof *port_gid); 566 if (cmp < 0) 567 link = &(*link)->rb_left; 568 else if (cmp > 0) 569 link = &(*link)->rb_right; 570 else 571 return cur_cm_id_priv; 572 } 573 } 574 rb_link_node(&cm_id_priv->sidr_id_node, parent, link); 575 rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); 576 return NULL; 577 } 578 579 static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv, 580 enum ib_cm_sidr_status status) 581 { 582 struct ib_cm_sidr_rep_param param; 583 584 memset(¶m, 0, sizeof param); 585 param.status = status; 586 ib_send_cm_sidr_rep(&cm_id_priv->id, ¶m); 587 } 588 589 struct ib_cm_id *ib_create_cm_id(struct ib_device *device, 590 ib_cm_handler cm_handler, 591 void *context) 592 { 593 struct cm_id_private *cm_id_priv; 594 int ret; 595 596 cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL); 597 if (!cm_id_priv) 598 return ERR_PTR(-ENOMEM); 599 600 cm_id_priv->id.state = IB_CM_IDLE; 601 cm_id_priv->id.device = device; 602 cm_id_priv->id.cm_handler = cm_handler; 603 cm_id_priv->id.context = context; 604 cm_id_priv->id.remote_cm_qpn = 1; 605 ret = cm_alloc_id(cm_id_priv); 606 if (ret) 607 goto error; 608 609 spin_lock_init(&cm_id_priv->lock); 610 init_completion(&cm_id_priv->comp); 611 INIT_LIST_HEAD(&cm_id_priv->work_list); 612 atomic_set(&cm_id_priv->work_count, -1); 613 atomic_set(&cm_id_priv->refcount, 1); 614 return &cm_id_priv->id; 615 616 error: 617 kfree(cm_id_priv); 618 return ERR_PTR(-ENOMEM); 619 } 620 EXPORT_SYMBOL(ib_create_cm_id); 621 622 static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv) 623 { 624 struct cm_work *work; 625 626 if (list_empty(&cm_id_priv->work_list)) 627 return NULL; 628 629 work = list_entry(cm_id_priv->work_list.next, struct cm_work, list); 630 list_del(&work->list); 631 return work; 632 } 633 634 static void cm_free_work(struct cm_work *work) 635 { 636 if (work->mad_recv_wc) 637 ib_free_recv_mad(work->mad_recv_wc); 638 kfree(work); 639 } 640 641 static inline int cm_convert_to_ms(int iba_time) 642 { 643 /* approximate conversion to ms from 4.096us x 2^iba_time */ 644 return 1 << max(iba_time - 8, 0); 645 } 646 647 static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info) 648 { 649 if (timewait_info->inserted_remote_id) { 650 rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table); 651 timewait_info->inserted_remote_id = 0; 652 } 653 654 if (timewait_info->inserted_remote_qp) { 655 rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table); 656 timewait_info->inserted_remote_qp = 0; 657 } 658 } 659 660 static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id) 661 { 662 struct cm_timewait_info *timewait_info; 663 664 timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL); 665 if (!timewait_info) 666 return ERR_PTR(-ENOMEM); 667 668 timewait_info->work.local_id = local_id; 669 INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler); 670 timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT; 671 return timewait_info; 672 } 673 674 static void cm_enter_timewait(struct cm_id_private *cm_id_priv) 675 { 676 int wait_time; 677 unsigned long flags; 678 679 spin_lock_irqsave(&cm.lock, flags); 680 cm_cleanup_timewait(cm_id_priv->timewait_info); 681 list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list); 682 spin_unlock_irqrestore(&cm.lock, flags); 683 684 /* 685 * The cm_id could be destroyed by the user before we exit timewait. 686 * To protect against this, we search for the cm_id after exiting 687 * timewait before notifying the user that we've exited timewait. 688 */ 689 cm_id_priv->id.state = IB_CM_TIMEWAIT; 690 wait_time = cm_convert_to_ms(cm_id_priv->av.packet_life_time + 1); 691 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work, 692 msecs_to_jiffies(wait_time)); 693 cm_id_priv->timewait_info = NULL; 694 } 695 696 static void cm_reset_to_idle(struct cm_id_private *cm_id_priv) 697 { 698 unsigned long flags; 699 700 cm_id_priv->id.state = IB_CM_IDLE; 701 if (cm_id_priv->timewait_info) { 702 spin_lock_irqsave(&cm.lock, flags); 703 cm_cleanup_timewait(cm_id_priv->timewait_info); 704 spin_unlock_irqrestore(&cm.lock, flags); 705 kfree(cm_id_priv->timewait_info); 706 cm_id_priv->timewait_info = NULL; 707 } 708 } 709 710 static void cm_destroy_id(struct ib_cm_id *cm_id, int err) 711 { 712 struct cm_id_private *cm_id_priv; 713 struct cm_work *work; 714 unsigned long flags; 715 716 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 717 retest: 718 spin_lock_irqsave(&cm_id_priv->lock, flags); 719 switch (cm_id->state) { 720 case IB_CM_LISTEN: 721 cm_id->state = IB_CM_IDLE; 722 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 723 spin_lock_irqsave(&cm.lock, flags); 724 rb_erase(&cm_id_priv->service_node, &cm.listen_service_table); 725 spin_unlock_irqrestore(&cm.lock, flags); 726 break; 727 case IB_CM_SIDR_REQ_SENT: 728 cm_id->state = IB_CM_IDLE; 729 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 730 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 731 break; 732 case IB_CM_SIDR_REQ_RCVD: 733 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 734 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT); 735 break; 736 case IB_CM_REQ_SENT: 737 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 738 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 739 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT, 740 &cm_id_priv->id.device->node_guid, 741 sizeof cm_id_priv->id.device->node_guid, 742 NULL, 0); 743 break; 744 case IB_CM_REQ_RCVD: 745 if (err == -ENOMEM) { 746 /* Do not reject to allow future retries. */ 747 cm_reset_to_idle(cm_id_priv); 748 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 749 } else { 750 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 751 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, 752 NULL, 0, NULL, 0); 753 } 754 break; 755 case IB_CM_MRA_REQ_RCVD: 756 case IB_CM_REP_SENT: 757 case IB_CM_MRA_REP_RCVD: 758 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 759 /* Fall through */ 760 case IB_CM_MRA_REQ_SENT: 761 case IB_CM_REP_RCVD: 762 case IB_CM_MRA_REP_SENT: 763 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 764 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, 765 NULL, 0, NULL, 0); 766 break; 767 case IB_CM_ESTABLISHED: 768 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 769 ib_send_cm_dreq(cm_id, NULL, 0); 770 goto retest; 771 case IB_CM_DREQ_SENT: 772 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 773 cm_enter_timewait(cm_id_priv); 774 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 775 break; 776 case IB_CM_DREQ_RCVD: 777 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 778 ib_send_cm_drep(cm_id, NULL, 0); 779 break; 780 default: 781 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 782 break; 783 } 784 785 cm_free_id(cm_id->local_id); 786 cm_deref_id(cm_id_priv); 787 wait_for_completion(&cm_id_priv->comp); 788 while ((work = cm_dequeue_work(cm_id_priv)) != NULL) 789 cm_free_work(work); 790 kfree(cm_id_priv->compare_data); 791 kfree(cm_id_priv->private_data); 792 kfree(cm_id_priv); 793 } 794 795 void ib_destroy_cm_id(struct ib_cm_id *cm_id) 796 { 797 cm_destroy_id(cm_id, 0); 798 } 799 EXPORT_SYMBOL(ib_destroy_cm_id); 800 801 int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask, 802 struct ib_cm_compare_data *compare_data) 803 { 804 struct cm_id_private *cm_id_priv, *cur_cm_id_priv; 805 unsigned long flags; 806 int ret = 0; 807 808 service_mask = service_mask ? service_mask : 809 __constant_cpu_to_be64(~0ULL); 810 service_id &= service_mask; 811 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID && 812 (service_id != IB_CM_ASSIGN_SERVICE_ID)) 813 return -EINVAL; 814 815 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 816 if (cm_id->state != IB_CM_IDLE) 817 return -EINVAL; 818 819 if (compare_data) { 820 cm_id_priv->compare_data = kzalloc(sizeof *compare_data, 821 GFP_KERNEL); 822 if (!cm_id_priv->compare_data) 823 return -ENOMEM; 824 cm_mask_copy(cm_id_priv->compare_data->data, 825 compare_data->data, compare_data->mask); 826 memcpy(cm_id_priv->compare_data->mask, compare_data->mask, 827 IB_CM_COMPARE_SIZE); 828 } 829 830 cm_id->state = IB_CM_LISTEN; 831 832 spin_lock_irqsave(&cm.lock, flags); 833 if (service_id == IB_CM_ASSIGN_SERVICE_ID) { 834 cm_id->service_id = cpu_to_be64(cm.listen_service_id++); 835 cm_id->service_mask = __constant_cpu_to_be64(~0ULL); 836 } else { 837 cm_id->service_id = service_id; 838 cm_id->service_mask = service_mask; 839 } 840 cur_cm_id_priv = cm_insert_listen(cm_id_priv); 841 spin_unlock_irqrestore(&cm.lock, flags); 842 843 if (cur_cm_id_priv) { 844 cm_id->state = IB_CM_IDLE; 845 kfree(cm_id_priv->compare_data); 846 cm_id_priv->compare_data = NULL; 847 ret = -EBUSY; 848 } 849 return ret; 850 } 851 EXPORT_SYMBOL(ib_cm_listen); 852 853 static __be64 cm_form_tid(struct cm_id_private *cm_id_priv, 854 enum cm_msg_sequence msg_seq) 855 { 856 u64 hi_tid, low_tid; 857 858 hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32; 859 low_tid = (u64) ((__force u32)cm_id_priv->id.local_id | 860 (msg_seq << 30)); 861 return cpu_to_be64(hi_tid | low_tid); 862 } 863 864 static void cm_format_mad_hdr(struct ib_mad_hdr *hdr, 865 __be16 attr_id, __be64 tid) 866 { 867 hdr->base_version = IB_MGMT_BASE_VERSION; 868 hdr->mgmt_class = IB_MGMT_CLASS_CM; 869 hdr->class_version = IB_CM_CLASS_VERSION; 870 hdr->method = IB_MGMT_METHOD_SEND; 871 hdr->attr_id = attr_id; 872 hdr->tid = tid; 873 } 874 875 static void cm_format_req(struct cm_req_msg *req_msg, 876 struct cm_id_private *cm_id_priv, 877 struct ib_cm_req_param *param) 878 { 879 cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID, 880 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ)); 881 882 req_msg->local_comm_id = cm_id_priv->id.local_id; 883 req_msg->service_id = param->service_id; 884 req_msg->local_ca_guid = cm_id_priv->id.device->node_guid; 885 cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num)); 886 cm_req_set_resp_res(req_msg, param->responder_resources); 887 cm_req_set_init_depth(req_msg, param->initiator_depth); 888 cm_req_set_remote_resp_timeout(req_msg, 889 param->remote_cm_response_timeout); 890 cm_req_set_qp_type(req_msg, param->qp_type); 891 cm_req_set_flow_ctrl(req_msg, param->flow_control); 892 cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn)); 893 cm_req_set_local_resp_timeout(req_msg, 894 param->local_cm_response_timeout); 895 cm_req_set_retry_count(req_msg, param->retry_count); 896 req_msg->pkey = param->primary_path->pkey; 897 cm_req_set_path_mtu(req_msg, param->primary_path->mtu); 898 cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count); 899 cm_req_set_max_cm_retries(req_msg, param->max_cm_retries); 900 cm_req_set_srq(req_msg, param->srq); 901 902 req_msg->primary_local_lid = param->primary_path->slid; 903 req_msg->primary_remote_lid = param->primary_path->dlid; 904 req_msg->primary_local_gid = param->primary_path->sgid; 905 req_msg->primary_remote_gid = param->primary_path->dgid; 906 cm_req_set_primary_flow_label(req_msg, param->primary_path->flow_label); 907 cm_req_set_primary_packet_rate(req_msg, param->primary_path->rate); 908 req_msg->primary_traffic_class = param->primary_path->traffic_class; 909 req_msg->primary_hop_limit = param->primary_path->hop_limit; 910 cm_req_set_primary_sl(req_msg, param->primary_path->sl); 911 cm_req_set_primary_subnet_local(req_msg, 1); /* local only... */ 912 cm_req_set_primary_local_ack_timeout(req_msg, 913 min(31, param->primary_path->packet_life_time + 1)); 914 915 if (param->alternate_path) { 916 req_msg->alt_local_lid = param->alternate_path->slid; 917 req_msg->alt_remote_lid = param->alternate_path->dlid; 918 req_msg->alt_local_gid = param->alternate_path->sgid; 919 req_msg->alt_remote_gid = param->alternate_path->dgid; 920 cm_req_set_alt_flow_label(req_msg, 921 param->alternate_path->flow_label); 922 cm_req_set_alt_packet_rate(req_msg, param->alternate_path->rate); 923 req_msg->alt_traffic_class = param->alternate_path->traffic_class; 924 req_msg->alt_hop_limit = param->alternate_path->hop_limit; 925 cm_req_set_alt_sl(req_msg, param->alternate_path->sl); 926 cm_req_set_alt_subnet_local(req_msg, 1); /* local only... */ 927 cm_req_set_alt_local_ack_timeout(req_msg, 928 min(31, param->alternate_path->packet_life_time + 1)); 929 } 930 931 if (param->private_data && param->private_data_len) 932 memcpy(req_msg->private_data, param->private_data, 933 param->private_data_len); 934 } 935 936 static int cm_validate_req_param(struct ib_cm_req_param *param) 937 { 938 /* peer-to-peer not supported */ 939 if (param->peer_to_peer) 940 return -EINVAL; 941 942 if (!param->primary_path) 943 return -EINVAL; 944 945 if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC) 946 return -EINVAL; 947 948 if (param->private_data && 949 param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE) 950 return -EINVAL; 951 952 if (param->alternate_path && 953 (param->alternate_path->pkey != param->primary_path->pkey || 954 param->alternate_path->mtu != param->primary_path->mtu)) 955 return -EINVAL; 956 957 return 0; 958 } 959 960 int ib_send_cm_req(struct ib_cm_id *cm_id, 961 struct ib_cm_req_param *param) 962 { 963 struct cm_id_private *cm_id_priv; 964 struct cm_req_msg *req_msg; 965 unsigned long flags; 966 int ret; 967 968 ret = cm_validate_req_param(param); 969 if (ret) 970 return ret; 971 972 /* Verify that we're not in timewait. */ 973 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 974 spin_lock_irqsave(&cm_id_priv->lock, flags); 975 if (cm_id->state != IB_CM_IDLE) { 976 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 977 ret = -EINVAL; 978 goto out; 979 } 980 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 981 982 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv-> 983 id.local_id); 984 if (IS_ERR(cm_id_priv->timewait_info)) { 985 ret = PTR_ERR(cm_id_priv->timewait_info); 986 goto out; 987 } 988 989 ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av); 990 if (ret) 991 goto error1; 992 if (param->alternate_path) { 993 ret = cm_init_av_by_path(param->alternate_path, 994 &cm_id_priv->alt_av); 995 if (ret) 996 goto error1; 997 } 998 cm_id->service_id = param->service_id; 999 cm_id->service_mask = __constant_cpu_to_be64(~0ULL); 1000 cm_id_priv->timeout_ms = cm_convert_to_ms( 1001 param->primary_path->packet_life_time) * 2 + 1002 cm_convert_to_ms( 1003 param->remote_cm_response_timeout); 1004 cm_id_priv->max_cm_retries = param->max_cm_retries; 1005 cm_id_priv->initiator_depth = param->initiator_depth; 1006 cm_id_priv->responder_resources = param->responder_resources; 1007 cm_id_priv->retry_count = param->retry_count; 1008 cm_id_priv->path_mtu = param->primary_path->mtu; 1009 cm_id_priv->pkey = param->primary_path->pkey; 1010 cm_id_priv->qp_type = param->qp_type; 1011 1012 ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg); 1013 if (ret) 1014 goto error1; 1015 1016 req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad; 1017 cm_format_req(req_msg, cm_id_priv, param); 1018 cm_id_priv->tid = req_msg->hdr.tid; 1019 cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms; 1020 cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT; 1021 1022 cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg); 1023 cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg); 1024 1025 spin_lock_irqsave(&cm_id_priv->lock, flags); 1026 ret = ib_post_send_mad(cm_id_priv->msg, NULL); 1027 if (ret) { 1028 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1029 goto error2; 1030 } 1031 BUG_ON(cm_id->state != IB_CM_IDLE); 1032 cm_id->state = IB_CM_REQ_SENT; 1033 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1034 return 0; 1035 1036 error2: cm_free_msg(cm_id_priv->msg); 1037 error1: kfree(cm_id_priv->timewait_info); 1038 out: return ret; 1039 } 1040 EXPORT_SYMBOL(ib_send_cm_req); 1041 1042 static int cm_issue_rej(struct cm_port *port, 1043 struct ib_mad_recv_wc *mad_recv_wc, 1044 enum ib_cm_rej_reason reason, 1045 enum cm_msg_response msg_rejected, 1046 void *ari, u8 ari_length) 1047 { 1048 struct ib_mad_send_buf *msg = NULL; 1049 struct cm_rej_msg *rej_msg, *rcv_msg; 1050 int ret; 1051 1052 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg); 1053 if (ret) 1054 return ret; 1055 1056 /* We just need common CM header information. Cast to any message. */ 1057 rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad; 1058 rej_msg = (struct cm_rej_msg *) msg->mad; 1059 1060 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid); 1061 rej_msg->remote_comm_id = rcv_msg->local_comm_id; 1062 rej_msg->local_comm_id = rcv_msg->remote_comm_id; 1063 cm_rej_set_msg_rejected(rej_msg, msg_rejected); 1064 rej_msg->reason = cpu_to_be16(reason); 1065 1066 if (ari && ari_length) { 1067 cm_rej_set_reject_info_len(rej_msg, ari_length); 1068 memcpy(rej_msg->ari, ari, ari_length); 1069 } 1070 1071 ret = ib_post_send_mad(msg, NULL); 1072 if (ret) 1073 cm_free_msg(msg); 1074 1075 return ret; 1076 } 1077 1078 static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid, 1079 __be32 local_qpn, __be32 remote_qpn) 1080 { 1081 return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) || 1082 ((local_ca_guid == remote_ca_guid) && 1083 (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn)))); 1084 } 1085 1086 static void cm_format_paths_from_req(struct cm_req_msg *req_msg, 1087 struct ib_sa_path_rec *primary_path, 1088 struct ib_sa_path_rec *alt_path) 1089 { 1090 memset(primary_path, 0, sizeof *primary_path); 1091 primary_path->dgid = req_msg->primary_local_gid; 1092 primary_path->sgid = req_msg->primary_remote_gid; 1093 primary_path->dlid = req_msg->primary_local_lid; 1094 primary_path->slid = req_msg->primary_remote_lid; 1095 primary_path->flow_label = cm_req_get_primary_flow_label(req_msg); 1096 primary_path->hop_limit = req_msg->primary_hop_limit; 1097 primary_path->traffic_class = req_msg->primary_traffic_class; 1098 primary_path->reversible = 1; 1099 primary_path->pkey = req_msg->pkey; 1100 primary_path->sl = cm_req_get_primary_sl(req_msg); 1101 primary_path->mtu_selector = IB_SA_EQ; 1102 primary_path->mtu = cm_req_get_path_mtu(req_msg); 1103 primary_path->rate_selector = IB_SA_EQ; 1104 primary_path->rate = cm_req_get_primary_packet_rate(req_msg); 1105 primary_path->packet_life_time_selector = IB_SA_EQ; 1106 primary_path->packet_life_time = 1107 cm_req_get_primary_local_ack_timeout(req_msg); 1108 primary_path->packet_life_time -= (primary_path->packet_life_time > 0); 1109 1110 if (req_msg->alt_local_lid) { 1111 memset(alt_path, 0, sizeof *alt_path); 1112 alt_path->dgid = req_msg->alt_local_gid; 1113 alt_path->sgid = req_msg->alt_remote_gid; 1114 alt_path->dlid = req_msg->alt_local_lid; 1115 alt_path->slid = req_msg->alt_remote_lid; 1116 alt_path->flow_label = cm_req_get_alt_flow_label(req_msg); 1117 alt_path->hop_limit = req_msg->alt_hop_limit; 1118 alt_path->traffic_class = req_msg->alt_traffic_class; 1119 alt_path->reversible = 1; 1120 alt_path->pkey = req_msg->pkey; 1121 alt_path->sl = cm_req_get_alt_sl(req_msg); 1122 alt_path->mtu_selector = IB_SA_EQ; 1123 alt_path->mtu = cm_req_get_path_mtu(req_msg); 1124 alt_path->rate_selector = IB_SA_EQ; 1125 alt_path->rate = cm_req_get_alt_packet_rate(req_msg); 1126 alt_path->packet_life_time_selector = IB_SA_EQ; 1127 alt_path->packet_life_time = 1128 cm_req_get_alt_local_ack_timeout(req_msg); 1129 alt_path->packet_life_time -= (alt_path->packet_life_time > 0); 1130 } 1131 } 1132 1133 static void cm_format_req_event(struct cm_work *work, 1134 struct cm_id_private *cm_id_priv, 1135 struct ib_cm_id *listen_id) 1136 { 1137 struct cm_req_msg *req_msg; 1138 struct ib_cm_req_event_param *param; 1139 1140 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; 1141 param = &work->cm_event.param.req_rcvd; 1142 param->listen_id = listen_id; 1143 param->port = cm_id_priv->av.port->port_num; 1144 param->primary_path = &work->path[0]; 1145 if (req_msg->alt_local_lid) 1146 param->alternate_path = &work->path[1]; 1147 else 1148 param->alternate_path = NULL; 1149 param->remote_ca_guid = req_msg->local_ca_guid; 1150 param->remote_qkey = be32_to_cpu(req_msg->local_qkey); 1151 param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg)); 1152 param->qp_type = cm_req_get_qp_type(req_msg); 1153 param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg)); 1154 param->responder_resources = cm_req_get_init_depth(req_msg); 1155 param->initiator_depth = cm_req_get_resp_res(req_msg); 1156 param->local_cm_response_timeout = 1157 cm_req_get_remote_resp_timeout(req_msg); 1158 param->flow_control = cm_req_get_flow_ctrl(req_msg); 1159 param->remote_cm_response_timeout = 1160 cm_req_get_local_resp_timeout(req_msg); 1161 param->retry_count = cm_req_get_retry_count(req_msg); 1162 param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg); 1163 param->srq = cm_req_get_srq(req_msg); 1164 work->cm_event.private_data = &req_msg->private_data; 1165 } 1166 1167 static void cm_process_work(struct cm_id_private *cm_id_priv, 1168 struct cm_work *work) 1169 { 1170 unsigned long flags; 1171 int ret; 1172 1173 /* We will typically only have the current event to report. */ 1174 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event); 1175 cm_free_work(work); 1176 1177 while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) { 1178 spin_lock_irqsave(&cm_id_priv->lock, flags); 1179 work = cm_dequeue_work(cm_id_priv); 1180 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1181 BUG_ON(!work); 1182 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, 1183 &work->cm_event); 1184 cm_free_work(work); 1185 } 1186 cm_deref_id(cm_id_priv); 1187 if (ret) 1188 cm_destroy_id(&cm_id_priv->id, ret); 1189 } 1190 1191 static void cm_format_mra(struct cm_mra_msg *mra_msg, 1192 struct cm_id_private *cm_id_priv, 1193 enum cm_msg_response msg_mraed, u8 service_timeout, 1194 const void *private_data, u8 private_data_len) 1195 { 1196 cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid); 1197 cm_mra_set_msg_mraed(mra_msg, msg_mraed); 1198 mra_msg->local_comm_id = cm_id_priv->id.local_id; 1199 mra_msg->remote_comm_id = cm_id_priv->id.remote_id; 1200 cm_mra_set_service_timeout(mra_msg, service_timeout); 1201 1202 if (private_data && private_data_len) 1203 memcpy(mra_msg->private_data, private_data, private_data_len); 1204 } 1205 1206 static void cm_format_rej(struct cm_rej_msg *rej_msg, 1207 struct cm_id_private *cm_id_priv, 1208 enum ib_cm_rej_reason reason, 1209 void *ari, 1210 u8 ari_length, 1211 const void *private_data, 1212 u8 private_data_len) 1213 { 1214 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid); 1215 rej_msg->remote_comm_id = cm_id_priv->id.remote_id; 1216 1217 switch(cm_id_priv->id.state) { 1218 case IB_CM_REQ_RCVD: 1219 rej_msg->local_comm_id = 0; 1220 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ); 1221 break; 1222 case IB_CM_MRA_REQ_SENT: 1223 rej_msg->local_comm_id = cm_id_priv->id.local_id; 1224 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ); 1225 break; 1226 case IB_CM_REP_RCVD: 1227 case IB_CM_MRA_REP_SENT: 1228 rej_msg->local_comm_id = cm_id_priv->id.local_id; 1229 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP); 1230 break; 1231 default: 1232 rej_msg->local_comm_id = cm_id_priv->id.local_id; 1233 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER); 1234 break; 1235 } 1236 1237 rej_msg->reason = cpu_to_be16(reason); 1238 if (ari && ari_length) { 1239 cm_rej_set_reject_info_len(rej_msg, ari_length); 1240 memcpy(rej_msg->ari, ari, ari_length); 1241 } 1242 1243 if (private_data && private_data_len) 1244 memcpy(rej_msg->private_data, private_data, private_data_len); 1245 } 1246 1247 static void cm_dup_req_handler(struct cm_work *work, 1248 struct cm_id_private *cm_id_priv) 1249 { 1250 struct ib_mad_send_buf *msg = NULL; 1251 unsigned long flags; 1252 int ret; 1253 1254 /* Quick state check to discard duplicate REQs. */ 1255 if (cm_id_priv->id.state == IB_CM_REQ_RCVD) 1256 return; 1257 1258 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); 1259 if (ret) 1260 return; 1261 1262 spin_lock_irqsave(&cm_id_priv->lock, flags); 1263 switch (cm_id_priv->id.state) { 1264 case IB_CM_MRA_REQ_SENT: 1265 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 1266 CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout, 1267 cm_id_priv->private_data, 1268 cm_id_priv->private_data_len); 1269 break; 1270 case IB_CM_TIMEWAIT: 1271 cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv, 1272 IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0); 1273 break; 1274 default: 1275 goto unlock; 1276 } 1277 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1278 1279 ret = ib_post_send_mad(msg, NULL); 1280 if (ret) 1281 goto free; 1282 return; 1283 1284 unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1285 free: cm_free_msg(msg); 1286 } 1287 1288 static struct cm_id_private * cm_match_req(struct cm_work *work, 1289 struct cm_id_private *cm_id_priv) 1290 { 1291 struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv; 1292 struct cm_timewait_info *timewait_info; 1293 struct cm_req_msg *req_msg; 1294 unsigned long flags; 1295 1296 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; 1297 1298 /* Check for duplicate REQ and stale connections. */ 1299 spin_lock_irqsave(&cm.lock, flags); 1300 timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info); 1301 if (!timewait_info) 1302 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info); 1303 1304 if (timewait_info) { 1305 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id, 1306 timewait_info->work.remote_id); 1307 cm_cleanup_timewait(cm_id_priv->timewait_info); 1308 spin_unlock_irqrestore(&cm.lock, flags); 1309 if (cur_cm_id_priv) { 1310 cm_dup_req_handler(work, cur_cm_id_priv); 1311 cm_deref_id(cur_cm_id_priv); 1312 } else 1313 cm_issue_rej(work->port, work->mad_recv_wc, 1314 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ, 1315 NULL, 0); 1316 listen_cm_id_priv = NULL; 1317 goto out; 1318 } 1319 1320 /* Find matching listen request. */ 1321 listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device, 1322 req_msg->service_id, 1323 req_msg->private_data); 1324 if (!listen_cm_id_priv) { 1325 cm_cleanup_timewait(cm_id_priv->timewait_info); 1326 spin_unlock_irqrestore(&cm.lock, flags); 1327 cm_issue_rej(work->port, work->mad_recv_wc, 1328 IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ, 1329 NULL, 0); 1330 goto out; 1331 } 1332 atomic_inc(&listen_cm_id_priv->refcount); 1333 atomic_inc(&cm_id_priv->refcount); 1334 cm_id_priv->id.state = IB_CM_REQ_RCVD; 1335 atomic_inc(&cm_id_priv->work_count); 1336 spin_unlock_irqrestore(&cm.lock, flags); 1337 out: 1338 return listen_cm_id_priv; 1339 } 1340 1341 static int cm_req_handler(struct cm_work *work) 1342 { 1343 struct ib_cm_id *cm_id; 1344 struct cm_id_private *cm_id_priv, *listen_cm_id_priv; 1345 struct cm_req_msg *req_msg; 1346 int ret; 1347 1348 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; 1349 1350 cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL); 1351 if (IS_ERR(cm_id)) 1352 return PTR_ERR(cm_id); 1353 1354 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1355 cm_id_priv->id.remote_id = req_msg->local_comm_id; 1356 cm_init_av_for_response(work->port, work->mad_recv_wc->wc, 1357 work->mad_recv_wc->recv_buf.grh, 1358 &cm_id_priv->av); 1359 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv-> 1360 id.local_id); 1361 if (IS_ERR(cm_id_priv->timewait_info)) { 1362 ret = PTR_ERR(cm_id_priv->timewait_info); 1363 goto destroy; 1364 } 1365 cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id; 1366 cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid; 1367 cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg); 1368 1369 listen_cm_id_priv = cm_match_req(work, cm_id_priv); 1370 if (!listen_cm_id_priv) { 1371 ret = -EINVAL; 1372 kfree(cm_id_priv->timewait_info); 1373 goto destroy; 1374 } 1375 1376 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler; 1377 cm_id_priv->id.context = listen_cm_id_priv->id.context; 1378 cm_id_priv->id.service_id = req_msg->service_id; 1379 cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL); 1380 1381 cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]); 1382 ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av); 1383 if (ret) { 1384 ib_get_cached_gid(work->port->cm_dev->device, 1385 work->port->port_num, 0, &work->path[0].sgid); 1386 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID, 1387 &work->path[0].sgid, sizeof work->path[0].sgid, 1388 NULL, 0); 1389 goto rejected; 1390 } 1391 if (req_msg->alt_local_lid) { 1392 ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av); 1393 if (ret) { 1394 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID, 1395 &work->path[0].sgid, 1396 sizeof work->path[0].sgid, NULL, 0); 1397 goto rejected; 1398 } 1399 } 1400 cm_id_priv->tid = req_msg->hdr.tid; 1401 cm_id_priv->timeout_ms = cm_convert_to_ms( 1402 cm_req_get_local_resp_timeout(req_msg)); 1403 cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg); 1404 cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg); 1405 cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg); 1406 cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg); 1407 cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg); 1408 cm_id_priv->pkey = req_msg->pkey; 1409 cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg); 1410 cm_id_priv->retry_count = cm_req_get_retry_count(req_msg); 1411 cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg); 1412 cm_id_priv->qp_type = cm_req_get_qp_type(req_msg); 1413 1414 cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id); 1415 cm_process_work(cm_id_priv, work); 1416 cm_deref_id(listen_cm_id_priv); 1417 return 0; 1418 1419 rejected: 1420 atomic_dec(&cm_id_priv->refcount); 1421 cm_deref_id(listen_cm_id_priv); 1422 destroy: 1423 ib_destroy_cm_id(cm_id); 1424 return ret; 1425 } 1426 1427 static void cm_format_rep(struct cm_rep_msg *rep_msg, 1428 struct cm_id_private *cm_id_priv, 1429 struct ib_cm_rep_param *param) 1430 { 1431 cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid); 1432 rep_msg->local_comm_id = cm_id_priv->id.local_id; 1433 rep_msg->remote_comm_id = cm_id_priv->id.remote_id; 1434 cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num)); 1435 cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn)); 1436 rep_msg->resp_resources = param->responder_resources; 1437 rep_msg->initiator_depth = param->initiator_depth; 1438 cm_rep_set_target_ack_delay(rep_msg, param->target_ack_delay); 1439 cm_rep_set_failover(rep_msg, param->failover_accepted); 1440 cm_rep_set_flow_ctrl(rep_msg, param->flow_control); 1441 cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count); 1442 cm_rep_set_srq(rep_msg, param->srq); 1443 rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid; 1444 1445 if (param->private_data && param->private_data_len) 1446 memcpy(rep_msg->private_data, param->private_data, 1447 param->private_data_len); 1448 } 1449 1450 int ib_send_cm_rep(struct ib_cm_id *cm_id, 1451 struct ib_cm_rep_param *param) 1452 { 1453 struct cm_id_private *cm_id_priv; 1454 struct ib_mad_send_buf *msg; 1455 struct cm_rep_msg *rep_msg; 1456 unsigned long flags; 1457 int ret; 1458 1459 if (param->private_data && 1460 param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE) 1461 return -EINVAL; 1462 1463 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1464 spin_lock_irqsave(&cm_id_priv->lock, flags); 1465 if (cm_id->state != IB_CM_REQ_RCVD && 1466 cm_id->state != IB_CM_MRA_REQ_SENT) { 1467 ret = -EINVAL; 1468 goto out; 1469 } 1470 1471 ret = cm_alloc_msg(cm_id_priv, &msg); 1472 if (ret) 1473 goto out; 1474 1475 rep_msg = (struct cm_rep_msg *) msg->mad; 1476 cm_format_rep(rep_msg, cm_id_priv, param); 1477 msg->timeout_ms = cm_id_priv->timeout_ms; 1478 msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT; 1479 1480 ret = ib_post_send_mad(msg, NULL); 1481 if (ret) { 1482 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1483 cm_free_msg(msg); 1484 return ret; 1485 } 1486 1487 cm_id->state = IB_CM_REP_SENT; 1488 cm_id_priv->msg = msg; 1489 cm_id_priv->initiator_depth = param->initiator_depth; 1490 cm_id_priv->responder_resources = param->responder_resources; 1491 cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg); 1492 cm_id_priv->local_qpn = cm_rep_get_local_qpn(rep_msg); 1493 1494 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1495 return ret; 1496 } 1497 EXPORT_SYMBOL(ib_send_cm_rep); 1498 1499 static void cm_format_rtu(struct cm_rtu_msg *rtu_msg, 1500 struct cm_id_private *cm_id_priv, 1501 const void *private_data, 1502 u8 private_data_len) 1503 { 1504 cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid); 1505 rtu_msg->local_comm_id = cm_id_priv->id.local_id; 1506 rtu_msg->remote_comm_id = cm_id_priv->id.remote_id; 1507 1508 if (private_data && private_data_len) 1509 memcpy(rtu_msg->private_data, private_data, private_data_len); 1510 } 1511 1512 int ib_send_cm_rtu(struct ib_cm_id *cm_id, 1513 const void *private_data, 1514 u8 private_data_len) 1515 { 1516 struct cm_id_private *cm_id_priv; 1517 struct ib_mad_send_buf *msg; 1518 unsigned long flags; 1519 void *data; 1520 int ret; 1521 1522 if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE) 1523 return -EINVAL; 1524 1525 data = cm_copy_private_data(private_data, private_data_len); 1526 if (IS_ERR(data)) 1527 return PTR_ERR(data); 1528 1529 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1530 spin_lock_irqsave(&cm_id_priv->lock, flags); 1531 if (cm_id->state != IB_CM_REP_RCVD && 1532 cm_id->state != IB_CM_MRA_REP_SENT) { 1533 ret = -EINVAL; 1534 goto error; 1535 } 1536 1537 ret = cm_alloc_msg(cm_id_priv, &msg); 1538 if (ret) 1539 goto error; 1540 1541 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv, 1542 private_data, private_data_len); 1543 1544 ret = ib_post_send_mad(msg, NULL); 1545 if (ret) { 1546 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1547 cm_free_msg(msg); 1548 kfree(data); 1549 return ret; 1550 } 1551 1552 cm_id->state = IB_CM_ESTABLISHED; 1553 cm_set_private_data(cm_id_priv, data, private_data_len); 1554 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1555 return 0; 1556 1557 error: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1558 kfree(data); 1559 return ret; 1560 } 1561 EXPORT_SYMBOL(ib_send_cm_rtu); 1562 1563 static void cm_format_rep_event(struct cm_work *work) 1564 { 1565 struct cm_rep_msg *rep_msg; 1566 struct ib_cm_rep_event_param *param; 1567 1568 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad; 1569 param = &work->cm_event.param.rep_rcvd; 1570 param->remote_ca_guid = rep_msg->local_ca_guid; 1571 param->remote_qkey = be32_to_cpu(rep_msg->local_qkey); 1572 param->remote_qpn = be32_to_cpu(cm_rep_get_local_qpn(rep_msg)); 1573 param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg)); 1574 param->responder_resources = rep_msg->initiator_depth; 1575 param->initiator_depth = rep_msg->resp_resources; 1576 param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg); 1577 param->failover_accepted = cm_rep_get_failover(rep_msg); 1578 param->flow_control = cm_rep_get_flow_ctrl(rep_msg); 1579 param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg); 1580 param->srq = cm_rep_get_srq(rep_msg); 1581 work->cm_event.private_data = &rep_msg->private_data; 1582 } 1583 1584 static void cm_dup_rep_handler(struct cm_work *work) 1585 { 1586 struct cm_id_private *cm_id_priv; 1587 struct cm_rep_msg *rep_msg; 1588 struct ib_mad_send_buf *msg = NULL; 1589 unsigned long flags; 1590 int ret; 1591 1592 rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad; 1593 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 1594 rep_msg->local_comm_id); 1595 if (!cm_id_priv) 1596 return; 1597 1598 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); 1599 if (ret) 1600 goto deref; 1601 1602 spin_lock_irqsave(&cm_id_priv->lock, flags); 1603 if (cm_id_priv->id.state == IB_CM_ESTABLISHED) 1604 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv, 1605 cm_id_priv->private_data, 1606 cm_id_priv->private_data_len); 1607 else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT) 1608 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 1609 CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout, 1610 cm_id_priv->private_data, 1611 cm_id_priv->private_data_len); 1612 else 1613 goto unlock; 1614 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1615 1616 ret = ib_post_send_mad(msg, NULL); 1617 if (ret) 1618 goto free; 1619 goto deref; 1620 1621 unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1622 free: cm_free_msg(msg); 1623 deref: cm_deref_id(cm_id_priv); 1624 } 1625 1626 static int cm_rep_handler(struct cm_work *work) 1627 { 1628 struct cm_id_private *cm_id_priv; 1629 struct cm_rep_msg *rep_msg; 1630 unsigned long flags; 1631 int ret; 1632 1633 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad; 1634 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0); 1635 if (!cm_id_priv) { 1636 cm_dup_rep_handler(work); 1637 return -EINVAL; 1638 } 1639 1640 cm_format_rep_event(work); 1641 1642 spin_lock_irqsave(&cm_id_priv->lock, flags); 1643 switch (cm_id_priv->id.state) { 1644 case IB_CM_REQ_SENT: 1645 case IB_CM_MRA_REQ_RCVD: 1646 break; 1647 default: 1648 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1649 ret = -EINVAL; 1650 goto error; 1651 } 1652 1653 cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id; 1654 cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid; 1655 cm_id_priv->timewait_info->remote_qpn = cm_rep_get_local_qpn(rep_msg); 1656 1657 spin_lock(&cm.lock); 1658 /* Check for duplicate REP. */ 1659 if (cm_insert_remote_id(cm_id_priv->timewait_info)) { 1660 spin_unlock(&cm.lock); 1661 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1662 ret = -EINVAL; 1663 goto error; 1664 } 1665 /* Check for a stale connection. */ 1666 if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) { 1667 rb_erase(&cm_id_priv->timewait_info->remote_id_node, 1668 &cm.remote_id_table); 1669 cm_id_priv->timewait_info->inserted_remote_id = 0; 1670 spin_unlock(&cm.lock); 1671 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1672 cm_issue_rej(work->port, work->mad_recv_wc, 1673 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP, 1674 NULL, 0); 1675 ret = -EINVAL; 1676 goto error; 1677 } 1678 spin_unlock(&cm.lock); 1679 1680 cm_id_priv->id.state = IB_CM_REP_RCVD; 1681 cm_id_priv->id.remote_id = rep_msg->local_comm_id; 1682 cm_id_priv->remote_qpn = cm_rep_get_local_qpn(rep_msg); 1683 cm_id_priv->initiator_depth = rep_msg->resp_resources; 1684 cm_id_priv->responder_resources = rep_msg->initiator_depth; 1685 cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg); 1686 cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg); 1687 1688 /* todo: handle peer_to_peer */ 1689 1690 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1691 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1692 if (!ret) 1693 list_add_tail(&work->list, &cm_id_priv->work_list); 1694 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1695 1696 if (ret) 1697 cm_process_work(cm_id_priv, work); 1698 else 1699 cm_deref_id(cm_id_priv); 1700 return 0; 1701 1702 error: 1703 cm_deref_id(cm_id_priv); 1704 return ret; 1705 } 1706 1707 static int cm_establish_handler(struct cm_work *work) 1708 { 1709 struct cm_id_private *cm_id_priv; 1710 unsigned long flags; 1711 int ret; 1712 1713 /* See comment in cm_establish about lookup. */ 1714 cm_id_priv = cm_acquire_id(work->local_id, work->remote_id); 1715 if (!cm_id_priv) 1716 return -EINVAL; 1717 1718 spin_lock_irqsave(&cm_id_priv->lock, flags); 1719 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) { 1720 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1721 goto out; 1722 } 1723 1724 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1725 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1726 if (!ret) 1727 list_add_tail(&work->list, &cm_id_priv->work_list); 1728 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1729 1730 if (ret) 1731 cm_process_work(cm_id_priv, work); 1732 else 1733 cm_deref_id(cm_id_priv); 1734 return 0; 1735 out: 1736 cm_deref_id(cm_id_priv); 1737 return -EINVAL; 1738 } 1739 1740 static int cm_rtu_handler(struct cm_work *work) 1741 { 1742 struct cm_id_private *cm_id_priv; 1743 struct cm_rtu_msg *rtu_msg; 1744 unsigned long flags; 1745 int ret; 1746 1747 rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad; 1748 cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id, 1749 rtu_msg->local_comm_id); 1750 if (!cm_id_priv) 1751 return -EINVAL; 1752 1753 work->cm_event.private_data = &rtu_msg->private_data; 1754 1755 spin_lock_irqsave(&cm_id_priv->lock, flags); 1756 if (cm_id_priv->id.state != IB_CM_REP_SENT && 1757 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) { 1758 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1759 goto out; 1760 } 1761 cm_id_priv->id.state = IB_CM_ESTABLISHED; 1762 1763 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1764 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1765 if (!ret) 1766 list_add_tail(&work->list, &cm_id_priv->work_list); 1767 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1768 1769 if (ret) 1770 cm_process_work(cm_id_priv, work); 1771 else 1772 cm_deref_id(cm_id_priv); 1773 return 0; 1774 out: 1775 cm_deref_id(cm_id_priv); 1776 return -EINVAL; 1777 } 1778 1779 static void cm_format_dreq(struct cm_dreq_msg *dreq_msg, 1780 struct cm_id_private *cm_id_priv, 1781 const void *private_data, 1782 u8 private_data_len) 1783 { 1784 cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID, 1785 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ)); 1786 dreq_msg->local_comm_id = cm_id_priv->id.local_id; 1787 dreq_msg->remote_comm_id = cm_id_priv->id.remote_id; 1788 cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn); 1789 1790 if (private_data && private_data_len) 1791 memcpy(dreq_msg->private_data, private_data, private_data_len); 1792 } 1793 1794 int ib_send_cm_dreq(struct ib_cm_id *cm_id, 1795 const void *private_data, 1796 u8 private_data_len) 1797 { 1798 struct cm_id_private *cm_id_priv; 1799 struct ib_mad_send_buf *msg; 1800 unsigned long flags; 1801 int ret; 1802 1803 if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE) 1804 return -EINVAL; 1805 1806 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1807 spin_lock_irqsave(&cm_id_priv->lock, flags); 1808 if (cm_id->state != IB_CM_ESTABLISHED) { 1809 ret = -EINVAL; 1810 goto out; 1811 } 1812 1813 ret = cm_alloc_msg(cm_id_priv, &msg); 1814 if (ret) { 1815 cm_enter_timewait(cm_id_priv); 1816 goto out; 1817 } 1818 1819 cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv, 1820 private_data, private_data_len); 1821 msg->timeout_ms = cm_id_priv->timeout_ms; 1822 msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT; 1823 1824 ret = ib_post_send_mad(msg, NULL); 1825 if (ret) { 1826 cm_enter_timewait(cm_id_priv); 1827 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1828 cm_free_msg(msg); 1829 return ret; 1830 } 1831 1832 cm_id->state = IB_CM_DREQ_SENT; 1833 cm_id_priv->msg = msg; 1834 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1835 return ret; 1836 } 1837 EXPORT_SYMBOL(ib_send_cm_dreq); 1838 1839 static void cm_format_drep(struct cm_drep_msg *drep_msg, 1840 struct cm_id_private *cm_id_priv, 1841 const void *private_data, 1842 u8 private_data_len) 1843 { 1844 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid); 1845 drep_msg->local_comm_id = cm_id_priv->id.local_id; 1846 drep_msg->remote_comm_id = cm_id_priv->id.remote_id; 1847 1848 if (private_data && private_data_len) 1849 memcpy(drep_msg->private_data, private_data, private_data_len); 1850 } 1851 1852 int ib_send_cm_drep(struct ib_cm_id *cm_id, 1853 const void *private_data, 1854 u8 private_data_len) 1855 { 1856 struct cm_id_private *cm_id_priv; 1857 struct ib_mad_send_buf *msg; 1858 unsigned long flags; 1859 void *data; 1860 int ret; 1861 1862 if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE) 1863 return -EINVAL; 1864 1865 data = cm_copy_private_data(private_data, private_data_len); 1866 if (IS_ERR(data)) 1867 return PTR_ERR(data); 1868 1869 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1870 spin_lock_irqsave(&cm_id_priv->lock, flags); 1871 if (cm_id->state != IB_CM_DREQ_RCVD) { 1872 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1873 kfree(data); 1874 return -EINVAL; 1875 } 1876 1877 cm_set_private_data(cm_id_priv, data, private_data_len); 1878 cm_enter_timewait(cm_id_priv); 1879 1880 ret = cm_alloc_msg(cm_id_priv, &msg); 1881 if (ret) 1882 goto out; 1883 1884 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv, 1885 private_data, private_data_len); 1886 1887 ret = ib_post_send_mad(msg, NULL); 1888 if (ret) { 1889 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1890 cm_free_msg(msg); 1891 return ret; 1892 } 1893 1894 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1895 return ret; 1896 } 1897 EXPORT_SYMBOL(ib_send_cm_drep); 1898 1899 static int cm_issue_drep(struct cm_port *port, 1900 struct ib_mad_recv_wc *mad_recv_wc) 1901 { 1902 struct ib_mad_send_buf *msg = NULL; 1903 struct cm_dreq_msg *dreq_msg; 1904 struct cm_drep_msg *drep_msg; 1905 int ret; 1906 1907 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg); 1908 if (ret) 1909 return ret; 1910 1911 dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad; 1912 drep_msg = (struct cm_drep_msg *) msg->mad; 1913 1914 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid); 1915 drep_msg->remote_comm_id = dreq_msg->local_comm_id; 1916 drep_msg->local_comm_id = dreq_msg->remote_comm_id; 1917 1918 ret = ib_post_send_mad(msg, NULL); 1919 if (ret) 1920 cm_free_msg(msg); 1921 1922 return ret; 1923 } 1924 1925 static int cm_dreq_handler(struct cm_work *work) 1926 { 1927 struct cm_id_private *cm_id_priv; 1928 struct cm_dreq_msg *dreq_msg; 1929 struct ib_mad_send_buf *msg = NULL; 1930 unsigned long flags; 1931 int ret; 1932 1933 dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad; 1934 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id, 1935 dreq_msg->local_comm_id); 1936 if (!cm_id_priv) { 1937 cm_issue_drep(work->port, work->mad_recv_wc); 1938 return -EINVAL; 1939 } 1940 1941 work->cm_event.private_data = &dreq_msg->private_data; 1942 1943 spin_lock_irqsave(&cm_id_priv->lock, flags); 1944 if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg)) 1945 goto unlock; 1946 1947 switch (cm_id_priv->id.state) { 1948 case IB_CM_REP_SENT: 1949 case IB_CM_DREQ_SENT: 1950 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1951 break; 1952 case IB_CM_ESTABLISHED: 1953 case IB_CM_MRA_REP_RCVD: 1954 break; 1955 case IB_CM_TIMEWAIT: 1956 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) 1957 goto unlock; 1958 1959 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv, 1960 cm_id_priv->private_data, 1961 cm_id_priv->private_data_len); 1962 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1963 1964 if (ib_post_send_mad(msg, NULL)) 1965 cm_free_msg(msg); 1966 goto deref; 1967 default: 1968 goto unlock; 1969 } 1970 cm_id_priv->id.state = IB_CM_DREQ_RCVD; 1971 cm_id_priv->tid = dreq_msg->hdr.tid; 1972 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1973 if (!ret) 1974 list_add_tail(&work->list, &cm_id_priv->work_list); 1975 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1976 1977 if (ret) 1978 cm_process_work(cm_id_priv, work); 1979 else 1980 cm_deref_id(cm_id_priv); 1981 return 0; 1982 1983 unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1984 deref: cm_deref_id(cm_id_priv); 1985 return -EINVAL; 1986 } 1987 1988 static int cm_drep_handler(struct cm_work *work) 1989 { 1990 struct cm_id_private *cm_id_priv; 1991 struct cm_drep_msg *drep_msg; 1992 unsigned long flags; 1993 int ret; 1994 1995 drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad; 1996 cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id, 1997 drep_msg->local_comm_id); 1998 if (!cm_id_priv) 1999 return -EINVAL; 2000 2001 work->cm_event.private_data = &drep_msg->private_data; 2002 2003 spin_lock_irqsave(&cm_id_priv->lock, flags); 2004 if (cm_id_priv->id.state != IB_CM_DREQ_SENT && 2005 cm_id_priv->id.state != IB_CM_DREQ_RCVD) { 2006 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2007 goto out; 2008 } 2009 cm_enter_timewait(cm_id_priv); 2010 2011 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2012 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2013 if (!ret) 2014 list_add_tail(&work->list, &cm_id_priv->work_list); 2015 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2016 2017 if (ret) 2018 cm_process_work(cm_id_priv, work); 2019 else 2020 cm_deref_id(cm_id_priv); 2021 return 0; 2022 out: 2023 cm_deref_id(cm_id_priv); 2024 return -EINVAL; 2025 } 2026 2027 int ib_send_cm_rej(struct ib_cm_id *cm_id, 2028 enum ib_cm_rej_reason reason, 2029 void *ari, 2030 u8 ari_length, 2031 const void *private_data, 2032 u8 private_data_len) 2033 { 2034 struct cm_id_private *cm_id_priv; 2035 struct ib_mad_send_buf *msg; 2036 unsigned long flags; 2037 int ret; 2038 2039 if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) || 2040 (ari && ari_length > IB_CM_REJ_ARI_LENGTH)) 2041 return -EINVAL; 2042 2043 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2044 2045 spin_lock_irqsave(&cm_id_priv->lock, flags); 2046 switch (cm_id->state) { 2047 case IB_CM_REQ_SENT: 2048 case IB_CM_MRA_REQ_RCVD: 2049 case IB_CM_REQ_RCVD: 2050 case IB_CM_MRA_REQ_SENT: 2051 case IB_CM_REP_RCVD: 2052 case IB_CM_MRA_REP_SENT: 2053 ret = cm_alloc_msg(cm_id_priv, &msg); 2054 if (!ret) 2055 cm_format_rej((struct cm_rej_msg *) msg->mad, 2056 cm_id_priv, reason, ari, ari_length, 2057 private_data, private_data_len); 2058 2059 cm_reset_to_idle(cm_id_priv); 2060 break; 2061 case IB_CM_REP_SENT: 2062 case IB_CM_MRA_REP_RCVD: 2063 ret = cm_alloc_msg(cm_id_priv, &msg); 2064 if (!ret) 2065 cm_format_rej((struct cm_rej_msg *) msg->mad, 2066 cm_id_priv, reason, ari, ari_length, 2067 private_data, private_data_len); 2068 2069 cm_enter_timewait(cm_id_priv); 2070 break; 2071 default: 2072 ret = -EINVAL; 2073 goto out; 2074 } 2075 2076 if (ret) 2077 goto out; 2078 2079 ret = ib_post_send_mad(msg, NULL); 2080 if (ret) 2081 cm_free_msg(msg); 2082 2083 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2084 return ret; 2085 } 2086 EXPORT_SYMBOL(ib_send_cm_rej); 2087 2088 static void cm_format_rej_event(struct cm_work *work) 2089 { 2090 struct cm_rej_msg *rej_msg; 2091 struct ib_cm_rej_event_param *param; 2092 2093 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad; 2094 param = &work->cm_event.param.rej_rcvd; 2095 param->ari = rej_msg->ari; 2096 param->ari_length = cm_rej_get_reject_info_len(rej_msg); 2097 param->reason = __be16_to_cpu(rej_msg->reason); 2098 work->cm_event.private_data = &rej_msg->private_data; 2099 } 2100 2101 static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg) 2102 { 2103 struct cm_timewait_info *timewait_info; 2104 struct cm_id_private *cm_id_priv; 2105 unsigned long flags; 2106 __be32 remote_id; 2107 2108 remote_id = rej_msg->local_comm_id; 2109 2110 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) { 2111 spin_lock_irqsave(&cm.lock, flags); 2112 timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari), 2113 remote_id); 2114 if (!timewait_info) { 2115 spin_unlock_irqrestore(&cm.lock, flags); 2116 return NULL; 2117 } 2118 cm_id_priv = idr_find(&cm.local_id_table, (__force int) 2119 (timewait_info->work.local_id ^ 2120 cm.random_id_operand)); 2121 if (cm_id_priv) { 2122 if (cm_id_priv->id.remote_id == remote_id) 2123 atomic_inc(&cm_id_priv->refcount); 2124 else 2125 cm_id_priv = NULL; 2126 } 2127 spin_unlock_irqrestore(&cm.lock, flags); 2128 } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ) 2129 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0); 2130 else 2131 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id); 2132 2133 return cm_id_priv; 2134 } 2135 2136 static int cm_rej_handler(struct cm_work *work) 2137 { 2138 struct cm_id_private *cm_id_priv; 2139 struct cm_rej_msg *rej_msg; 2140 unsigned long flags; 2141 int ret; 2142 2143 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad; 2144 cm_id_priv = cm_acquire_rejected_id(rej_msg); 2145 if (!cm_id_priv) 2146 return -EINVAL; 2147 2148 cm_format_rej_event(work); 2149 2150 spin_lock_irqsave(&cm_id_priv->lock, flags); 2151 switch (cm_id_priv->id.state) { 2152 case IB_CM_REQ_SENT: 2153 case IB_CM_MRA_REQ_RCVD: 2154 case IB_CM_REP_SENT: 2155 case IB_CM_MRA_REP_RCVD: 2156 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2157 /* fall through */ 2158 case IB_CM_REQ_RCVD: 2159 case IB_CM_MRA_REQ_SENT: 2160 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN) 2161 cm_enter_timewait(cm_id_priv); 2162 else 2163 cm_reset_to_idle(cm_id_priv); 2164 break; 2165 case IB_CM_DREQ_SENT: 2166 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2167 /* fall through */ 2168 case IB_CM_REP_RCVD: 2169 case IB_CM_MRA_REP_SENT: 2170 case IB_CM_ESTABLISHED: 2171 cm_enter_timewait(cm_id_priv); 2172 break; 2173 default: 2174 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2175 ret = -EINVAL; 2176 goto out; 2177 } 2178 2179 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2180 if (!ret) 2181 list_add_tail(&work->list, &cm_id_priv->work_list); 2182 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2183 2184 if (ret) 2185 cm_process_work(cm_id_priv, work); 2186 else 2187 cm_deref_id(cm_id_priv); 2188 return 0; 2189 out: 2190 cm_deref_id(cm_id_priv); 2191 return -EINVAL; 2192 } 2193 2194 int ib_send_cm_mra(struct ib_cm_id *cm_id, 2195 u8 service_timeout, 2196 const void *private_data, 2197 u8 private_data_len) 2198 { 2199 struct cm_id_private *cm_id_priv; 2200 struct ib_mad_send_buf *msg; 2201 void *data; 2202 unsigned long flags; 2203 int ret; 2204 2205 if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE) 2206 return -EINVAL; 2207 2208 data = cm_copy_private_data(private_data, private_data_len); 2209 if (IS_ERR(data)) 2210 return PTR_ERR(data); 2211 2212 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2213 2214 spin_lock_irqsave(&cm_id_priv->lock, flags); 2215 switch(cm_id_priv->id.state) { 2216 case IB_CM_REQ_RCVD: 2217 ret = cm_alloc_msg(cm_id_priv, &msg); 2218 if (ret) 2219 goto error1; 2220 2221 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 2222 CM_MSG_RESPONSE_REQ, service_timeout, 2223 private_data, private_data_len); 2224 ret = ib_post_send_mad(msg, NULL); 2225 if (ret) 2226 goto error2; 2227 cm_id->state = IB_CM_MRA_REQ_SENT; 2228 break; 2229 case IB_CM_REP_RCVD: 2230 ret = cm_alloc_msg(cm_id_priv, &msg); 2231 if (ret) 2232 goto error1; 2233 2234 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 2235 CM_MSG_RESPONSE_REP, service_timeout, 2236 private_data, private_data_len); 2237 ret = ib_post_send_mad(msg, NULL); 2238 if (ret) 2239 goto error2; 2240 cm_id->state = IB_CM_MRA_REP_SENT; 2241 break; 2242 case IB_CM_ESTABLISHED: 2243 ret = cm_alloc_msg(cm_id_priv, &msg); 2244 if (ret) 2245 goto error1; 2246 2247 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 2248 CM_MSG_RESPONSE_OTHER, service_timeout, 2249 private_data, private_data_len); 2250 ret = ib_post_send_mad(msg, NULL); 2251 if (ret) 2252 goto error2; 2253 cm_id->lap_state = IB_CM_MRA_LAP_SENT; 2254 break; 2255 default: 2256 ret = -EINVAL; 2257 goto error1; 2258 } 2259 cm_id_priv->service_timeout = service_timeout; 2260 cm_set_private_data(cm_id_priv, data, private_data_len); 2261 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2262 return 0; 2263 2264 error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2265 kfree(data); 2266 return ret; 2267 2268 error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2269 kfree(data); 2270 cm_free_msg(msg); 2271 return ret; 2272 } 2273 EXPORT_SYMBOL(ib_send_cm_mra); 2274 2275 static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg) 2276 { 2277 switch (cm_mra_get_msg_mraed(mra_msg)) { 2278 case CM_MSG_RESPONSE_REQ: 2279 return cm_acquire_id(mra_msg->remote_comm_id, 0); 2280 case CM_MSG_RESPONSE_REP: 2281 case CM_MSG_RESPONSE_OTHER: 2282 return cm_acquire_id(mra_msg->remote_comm_id, 2283 mra_msg->local_comm_id); 2284 default: 2285 return NULL; 2286 } 2287 } 2288 2289 static int cm_mra_handler(struct cm_work *work) 2290 { 2291 struct cm_id_private *cm_id_priv; 2292 struct cm_mra_msg *mra_msg; 2293 unsigned long flags; 2294 int timeout, ret; 2295 2296 mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad; 2297 cm_id_priv = cm_acquire_mraed_id(mra_msg); 2298 if (!cm_id_priv) 2299 return -EINVAL; 2300 2301 work->cm_event.private_data = &mra_msg->private_data; 2302 work->cm_event.param.mra_rcvd.service_timeout = 2303 cm_mra_get_service_timeout(mra_msg); 2304 timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) + 2305 cm_convert_to_ms(cm_id_priv->av.packet_life_time); 2306 2307 spin_lock_irqsave(&cm_id_priv->lock, flags); 2308 switch (cm_id_priv->id.state) { 2309 case IB_CM_REQ_SENT: 2310 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ || 2311 ib_modify_mad(cm_id_priv->av.port->mad_agent, 2312 cm_id_priv->msg, timeout)) 2313 goto out; 2314 cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD; 2315 break; 2316 case IB_CM_REP_SENT: 2317 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP || 2318 ib_modify_mad(cm_id_priv->av.port->mad_agent, 2319 cm_id_priv->msg, timeout)) 2320 goto out; 2321 cm_id_priv->id.state = IB_CM_MRA_REP_RCVD; 2322 break; 2323 case IB_CM_ESTABLISHED: 2324 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER || 2325 cm_id_priv->id.lap_state != IB_CM_LAP_SENT || 2326 ib_modify_mad(cm_id_priv->av.port->mad_agent, 2327 cm_id_priv->msg, timeout)) 2328 goto out; 2329 cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD; 2330 break; 2331 default: 2332 goto out; 2333 } 2334 2335 cm_id_priv->msg->context[1] = (void *) (unsigned long) 2336 cm_id_priv->id.state; 2337 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2338 if (!ret) 2339 list_add_tail(&work->list, &cm_id_priv->work_list); 2340 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2341 2342 if (ret) 2343 cm_process_work(cm_id_priv, work); 2344 else 2345 cm_deref_id(cm_id_priv); 2346 return 0; 2347 out: 2348 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2349 cm_deref_id(cm_id_priv); 2350 return -EINVAL; 2351 } 2352 2353 static void cm_format_lap(struct cm_lap_msg *lap_msg, 2354 struct cm_id_private *cm_id_priv, 2355 struct ib_sa_path_rec *alternate_path, 2356 const void *private_data, 2357 u8 private_data_len) 2358 { 2359 cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID, 2360 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP)); 2361 lap_msg->local_comm_id = cm_id_priv->id.local_id; 2362 lap_msg->remote_comm_id = cm_id_priv->id.remote_id; 2363 cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn); 2364 /* todo: need remote CM response timeout */ 2365 cm_lap_set_remote_resp_timeout(lap_msg, 0x1F); 2366 lap_msg->alt_local_lid = alternate_path->slid; 2367 lap_msg->alt_remote_lid = alternate_path->dlid; 2368 lap_msg->alt_local_gid = alternate_path->sgid; 2369 lap_msg->alt_remote_gid = alternate_path->dgid; 2370 cm_lap_set_flow_label(lap_msg, alternate_path->flow_label); 2371 cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class); 2372 lap_msg->alt_hop_limit = alternate_path->hop_limit; 2373 cm_lap_set_packet_rate(lap_msg, alternate_path->rate); 2374 cm_lap_set_sl(lap_msg, alternate_path->sl); 2375 cm_lap_set_subnet_local(lap_msg, 1); /* local only... */ 2376 cm_lap_set_local_ack_timeout(lap_msg, 2377 min(31, alternate_path->packet_life_time + 1)); 2378 2379 if (private_data && private_data_len) 2380 memcpy(lap_msg->private_data, private_data, private_data_len); 2381 } 2382 2383 int ib_send_cm_lap(struct ib_cm_id *cm_id, 2384 struct ib_sa_path_rec *alternate_path, 2385 const void *private_data, 2386 u8 private_data_len) 2387 { 2388 struct cm_id_private *cm_id_priv; 2389 struct ib_mad_send_buf *msg; 2390 unsigned long flags; 2391 int ret; 2392 2393 if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE) 2394 return -EINVAL; 2395 2396 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2397 spin_lock_irqsave(&cm_id_priv->lock, flags); 2398 if (cm_id->state != IB_CM_ESTABLISHED || 2399 (cm_id->lap_state != IB_CM_LAP_UNINIT && 2400 cm_id->lap_state != IB_CM_LAP_IDLE)) { 2401 ret = -EINVAL; 2402 goto out; 2403 } 2404 2405 ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av); 2406 if (ret) 2407 goto out; 2408 2409 ret = cm_alloc_msg(cm_id_priv, &msg); 2410 if (ret) 2411 goto out; 2412 2413 cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv, 2414 alternate_path, private_data, private_data_len); 2415 msg->timeout_ms = cm_id_priv->timeout_ms; 2416 msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED; 2417 2418 ret = ib_post_send_mad(msg, NULL); 2419 if (ret) { 2420 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2421 cm_free_msg(msg); 2422 return ret; 2423 } 2424 2425 cm_id->lap_state = IB_CM_LAP_SENT; 2426 cm_id_priv->msg = msg; 2427 2428 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2429 return ret; 2430 } 2431 EXPORT_SYMBOL(ib_send_cm_lap); 2432 2433 static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv, 2434 struct ib_sa_path_rec *path, 2435 struct cm_lap_msg *lap_msg) 2436 { 2437 memset(path, 0, sizeof *path); 2438 path->dgid = lap_msg->alt_local_gid; 2439 path->sgid = lap_msg->alt_remote_gid; 2440 path->dlid = lap_msg->alt_local_lid; 2441 path->slid = lap_msg->alt_remote_lid; 2442 path->flow_label = cm_lap_get_flow_label(lap_msg); 2443 path->hop_limit = lap_msg->alt_hop_limit; 2444 path->traffic_class = cm_lap_get_traffic_class(lap_msg); 2445 path->reversible = 1; 2446 path->pkey = cm_id_priv->pkey; 2447 path->sl = cm_lap_get_sl(lap_msg); 2448 path->mtu_selector = IB_SA_EQ; 2449 path->mtu = cm_id_priv->path_mtu; 2450 path->rate_selector = IB_SA_EQ; 2451 path->rate = cm_lap_get_packet_rate(lap_msg); 2452 path->packet_life_time_selector = IB_SA_EQ; 2453 path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg); 2454 path->packet_life_time -= (path->packet_life_time > 0); 2455 } 2456 2457 static int cm_lap_handler(struct cm_work *work) 2458 { 2459 struct cm_id_private *cm_id_priv; 2460 struct cm_lap_msg *lap_msg; 2461 struct ib_cm_lap_event_param *param; 2462 struct ib_mad_send_buf *msg = NULL; 2463 unsigned long flags; 2464 int ret; 2465 2466 /* todo: verify LAP request and send reject APR if invalid. */ 2467 lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad; 2468 cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id, 2469 lap_msg->local_comm_id); 2470 if (!cm_id_priv) 2471 return -EINVAL; 2472 2473 param = &work->cm_event.param.lap_rcvd; 2474 param->alternate_path = &work->path[0]; 2475 cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg); 2476 work->cm_event.private_data = &lap_msg->private_data; 2477 2478 spin_lock_irqsave(&cm_id_priv->lock, flags); 2479 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) 2480 goto unlock; 2481 2482 switch (cm_id_priv->id.lap_state) { 2483 case IB_CM_LAP_UNINIT: 2484 case IB_CM_LAP_IDLE: 2485 break; 2486 case IB_CM_MRA_LAP_SENT: 2487 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) 2488 goto unlock; 2489 2490 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 2491 CM_MSG_RESPONSE_OTHER, 2492 cm_id_priv->service_timeout, 2493 cm_id_priv->private_data, 2494 cm_id_priv->private_data_len); 2495 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2496 2497 if (ib_post_send_mad(msg, NULL)) 2498 cm_free_msg(msg); 2499 goto deref; 2500 default: 2501 goto unlock; 2502 } 2503 2504 cm_id_priv->id.lap_state = IB_CM_LAP_RCVD; 2505 cm_id_priv->tid = lap_msg->hdr.tid; 2506 cm_init_av_for_response(work->port, work->mad_recv_wc->wc, 2507 work->mad_recv_wc->recv_buf.grh, 2508 &cm_id_priv->av); 2509 cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av); 2510 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2511 if (!ret) 2512 list_add_tail(&work->list, &cm_id_priv->work_list); 2513 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2514 2515 if (ret) 2516 cm_process_work(cm_id_priv, work); 2517 else 2518 cm_deref_id(cm_id_priv); 2519 return 0; 2520 2521 unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2522 deref: cm_deref_id(cm_id_priv); 2523 return -EINVAL; 2524 } 2525 2526 static void cm_format_apr(struct cm_apr_msg *apr_msg, 2527 struct cm_id_private *cm_id_priv, 2528 enum ib_cm_apr_status status, 2529 void *info, 2530 u8 info_length, 2531 const void *private_data, 2532 u8 private_data_len) 2533 { 2534 cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid); 2535 apr_msg->local_comm_id = cm_id_priv->id.local_id; 2536 apr_msg->remote_comm_id = cm_id_priv->id.remote_id; 2537 apr_msg->ap_status = (u8) status; 2538 2539 if (info && info_length) { 2540 apr_msg->info_length = info_length; 2541 memcpy(apr_msg->info, info, info_length); 2542 } 2543 2544 if (private_data && private_data_len) 2545 memcpy(apr_msg->private_data, private_data, private_data_len); 2546 } 2547 2548 int ib_send_cm_apr(struct ib_cm_id *cm_id, 2549 enum ib_cm_apr_status status, 2550 void *info, 2551 u8 info_length, 2552 const void *private_data, 2553 u8 private_data_len) 2554 { 2555 struct cm_id_private *cm_id_priv; 2556 struct ib_mad_send_buf *msg; 2557 unsigned long flags; 2558 int ret; 2559 2560 if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) || 2561 (info && info_length > IB_CM_APR_INFO_LENGTH)) 2562 return -EINVAL; 2563 2564 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2565 spin_lock_irqsave(&cm_id_priv->lock, flags); 2566 if (cm_id->state != IB_CM_ESTABLISHED || 2567 (cm_id->lap_state != IB_CM_LAP_RCVD && 2568 cm_id->lap_state != IB_CM_MRA_LAP_SENT)) { 2569 ret = -EINVAL; 2570 goto out; 2571 } 2572 2573 ret = cm_alloc_msg(cm_id_priv, &msg); 2574 if (ret) 2575 goto out; 2576 2577 cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status, 2578 info, info_length, private_data, private_data_len); 2579 ret = ib_post_send_mad(msg, NULL); 2580 if (ret) { 2581 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2582 cm_free_msg(msg); 2583 return ret; 2584 } 2585 2586 cm_id->lap_state = IB_CM_LAP_IDLE; 2587 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2588 return ret; 2589 } 2590 EXPORT_SYMBOL(ib_send_cm_apr); 2591 2592 static int cm_apr_handler(struct cm_work *work) 2593 { 2594 struct cm_id_private *cm_id_priv; 2595 struct cm_apr_msg *apr_msg; 2596 unsigned long flags; 2597 int ret; 2598 2599 apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad; 2600 cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id, 2601 apr_msg->local_comm_id); 2602 if (!cm_id_priv) 2603 return -EINVAL; /* Unmatched reply. */ 2604 2605 work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status; 2606 work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info; 2607 work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length; 2608 work->cm_event.private_data = &apr_msg->private_data; 2609 2610 spin_lock_irqsave(&cm_id_priv->lock, flags); 2611 if (cm_id_priv->id.state != IB_CM_ESTABLISHED || 2612 (cm_id_priv->id.lap_state != IB_CM_LAP_SENT && 2613 cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) { 2614 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2615 goto out; 2616 } 2617 cm_id_priv->id.lap_state = IB_CM_LAP_IDLE; 2618 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2619 cm_id_priv->msg = NULL; 2620 2621 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2622 if (!ret) 2623 list_add_tail(&work->list, &cm_id_priv->work_list); 2624 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2625 2626 if (ret) 2627 cm_process_work(cm_id_priv, work); 2628 else 2629 cm_deref_id(cm_id_priv); 2630 return 0; 2631 out: 2632 cm_deref_id(cm_id_priv); 2633 return -EINVAL; 2634 } 2635 2636 static int cm_timewait_handler(struct cm_work *work) 2637 { 2638 struct cm_timewait_info *timewait_info; 2639 struct cm_id_private *cm_id_priv; 2640 int ret; 2641 2642 timewait_info = (struct cm_timewait_info *)work; 2643 spin_lock_irq(&cm.lock); 2644 list_del(&timewait_info->list); 2645 spin_unlock_irq(&cm.lock); 2646 2647 cm_id_priv = cm_acquire_id(timewait_info->work.local_id, 2648 timewait_info->work.remote_id); 2649 if (!cm_id_priv) 2650 return -EINVAL; 2651 2652 spin_lock_irq(&cm_id_priv->lock); 2653 if (cm_id_priv->id.state != IB_CM_TIMEWAIT || 2654 cm_id_priv->remote_qpn != timewait_info->remote_qpn) { 2655 spin_unlock_irq(&cm_id_priv->lock); 2656 goto out; 2657 } 2658 cm_id_priv->id.state = IB_CM_IDLE; 2659 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2660 if (!ret) 2661 list_add_tail(&work->list, &cm_id_priv->work_list); 2662 spin_unlock_irq(&cm_id_priv->lock); 2663 2664 if (ret) 2665 cm_process_work(cm_id_priv, work); 2666 else 2667 cm_deref_id(cm_id_priv); 2668 return 0; 2669 out: 2670 cm_deref_id(cm_id_priv); 2671 return -EINVAL; 2672 } 2673 2674 static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg, 2675 struct cm_id_private *cm_id_priv, 2676 struct ib_cm_sidr_req_param *param) 2677 { 2678 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID, 2679 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR)); 2680 sidr_req_msg->request_id = cm_id_priv->id.local_id; 2681 sidr_req_msg->pkey = cpu_to_be16(param->path->pkey); 2682 sidr_req_msg->service_id = param->service_id; 2683 2684 if (param->private_data && param->private_data_len) 2685 memcpy(sidr_req_msg->private_data, param->private_data, 2686 param->private_data_len); 2687 } 2688 2689 int ib_send_cm_sidr_req(struct ib_cm_id *cm_id, 2690 struct ib_cm_sidr_req_param *param) 2691 { 2692 struct cm_id_private *cm_id_priv; 2693 struct ib_mad_send_buf *msg; 2694 unsigned long flags; 2695 int ret; 2696 2697 if (!param->path || (param->private_data && 2698 param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE)) 2699 return -EINVAL; 2700 2701 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2702 ret = cm_init_av_by_path(param->path, &cm_id_priv->av); 2703 if (ret) 2704 goto out; 2705 2706 cm_id->service_id = param->service_id; 2707 cm_id->service_mask = __constant_cpu_to_be64(~0ULL); 2708 cm_id_priv->timeout_ms = param->timeout_ms; 2709 cm_id_priv->max_cm_retries = param->max_cm_retries; 2710 ret = cm_alloc_msg(cm_id_priv, &msg); 2711 if (ret) 2712 goto out; 2713 2714 cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv, 2715 param); 2716 msg->timeout_ms = cm_id_priv->timeout_ms; 2717 msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT; 2718 2719 spin_lock_irqsave(&cm_id_priv->lock, flags); 2720 if (cm_id->state == IB_CM_IDLE) 2721 ret = ib_post_send_mad(msg, NULL); 2722 else 2723 ret = -EINVAL; 2724 2725 if (ret) { 2726 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2727 cm_free_msg(msg); 2728 goto out; 2729 } 2730 cm_id->state = IB_CM_SIDR_REQ_SENT; 2731 cm_id_priv->msg = msg; 2732 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2733 out: 2734 return ret; 2735 } 2736 EXPORT_SYMBOL(ib_send_cm_sidr_req); 2737 2738 static void cm_format_sidr_req_event(struct cm_work *work, 2739 struct ib_cm_id *listen_id) 2740 { 2741 struct cm_sidr_req_msg *sidr_req_msg; 2742 struct ib_cm_sidr_req_event_param *param; 2743 2744 sidr_req_msg = (struct cm_sidr_req_msg *) 2745 work->mad_recv_wc->recv_buf.mad; 2746 param = &work->cm_event.param.sidr_req_rcvd; 2747 param->pkey = __be16_to_cpu(sidr_req_msg->pkey); 2748 param->listen_id = listen_id; 2749 param->port = work->port->port_num; 2750 work->cm_event.private_data = &sidr_req_msg->private_data; 2751 } 2752 2753 static int cm_sidr_req_handler(struct cm_work *work) 2754 { 2755 struct ib_cm_id *cm_id; 2756 struct cm_id_private *cm_id_priv, *cur_cm_id_priv; 2757 struct cm_sidr_req_msg *sidr_req_msg; 2758 struct ib_wc *wc; 2759 unsigned long flags; 2760 2761 cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL); 2762 if (IS_ERR(cm_id)) 2763 return PTR_ERR(cm_id); 2764 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2765 2766 /* Record SGID/SLID and request ID for lookup. */ 2767 sidr_req_msg = (struct cm_sidr_req_msg *) 2768 work->mad_recv_wc->recv_buf.mad; 2769 wc = work->mad_recv_wc->wc; 2770 cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid); 2771 cm_id_priv->av.dgid.global.interface_id = 0; 2772 cm_init_av_for_response(work->port, work->mad_recv_wc->wc, 2773 work->mad_recv_wc->recv_buf.grh, 2774 &cm_id_priv->av); 2775 cm_id_priv->id.remote_id = sidr_req_msg->request_id; 2776 cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD; 2777 cm_id_priv->tid = sidr_req_msg->hdr.tid; 2778 atomic_inc(&cm_id_priv->work_count); 2779 2780 spin_lock_irqsave(&cm.lock, flags); 2781 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv); 2782 if (cur_cm_id_priv) { 2783 spin_unlock_irqrestore(&cm.lock, flags); 2784 goto out; /* Duplicate message. */ 2785 } 2786 cur_cm_id_priv = cm_find_listen(cm_id->device, 2787 sidr_req_msg->service_id, 2788 sidr_req_msg->private_data); 2789 if (!cur_cm_id_priv) { 2790 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); 2791 spin_unlock_irqrestore(&cm.lock, flags); 2792 /* todo: reply with no match */ 2793 goto out; /* No match. */ 2794 } 2795 atomic_inc(&cur_cm_id_priv->refcount); 2796 spin_unlock_irqrestore(&cm.lock, flags); 2797 2798 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler; 2799 cm_id_priv->id.context = cur_cm_id_priv->id.context; 2800 cm_id_priv->id.service_id = sidr_req_msg->service_id; 2801 cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL); 2802 2803 cm_format_sidr_req_event(work, &cur_cm_id_priv->id); 2804 cm_process_work(cm_id_priv, work); 2805 cm_deref_id(cur_cm_id_priv); 2806 return 0; 2807 out: 2808 ib_destroy_cm_id(&cm_id_priv->id); 2809 return -EINVAL; 2810 } 2811 2812 static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg, 2813 struct cm_id_private *cm_id_priv, 2814 struct ib_cm_sidr_rep_param *param) 2815 { 2816 cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID, 2817 cm_id_priv->tid); 2818 sidr_rep_msg->request_id = cm_id_priv->id.remote_id; 2819 sidr_rep_msg->status = param->status; 2820 cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num)); 2821 sidr_rep_msg->service_id = cm_id_priv->id.service_id; 2822 sidr_rep_msg->qkey = cpu_to_be32(param->qkey); 2823 2824 if (param->info && param->info_length) 2825 memcpy(sidr_rep_msg->info, param->info, param->info_length); 2826 2827 if (param->private_data && param->private_data_len) 2828 memcpy(sidr_rep_msg->private_data, param->private_data, 2829 param->private_data_len); 2830 } 2831 2832 int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id, 2833 struct ib_cm_sidr_rep_param *param) 2834 { 2835 struct cm_id_private *cm_id_priv; 2836 struct ib_mad_send_buf *msg; 2837 unsigned long flags; 2838 int ret; 2839 2840 if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) || 2841 (param->private_data && 2842 param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE)) 2843 return -EINVAL; 2844 2845 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2846 spin_lock_irqsave(&cm_id_priv->lock, flags); 2847 if (cm_id->state != IB_CM_SIDR_REQ_RCVD) { 2848 ret = -EINVAL; 2849 goto error; 2850 } 2851 2852 ret = cm_alloc_msg(cm_id_priv, &msg); 2853 if (ret) 2854 goto error; 2855 2856 cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv, 2857 param); 2858 ret = ib_post_send_mad(msg, NULL); 2859 if (ret) { 2860 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2861 cm_free_msg(msg); 2862 return ret; 2863 } 2864 cm_id->state = IB_CM_IDLE; 2865 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2866 2867 spin_lock_irqsave(&cm.lock, flags); 2868 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); 2869 spin_unlock_irqrestore(&cm.lock, flags); 2870 return 0; 2871 2872 error: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2873 return ret; 2874 } 2875 EXPORT_SYMBOL(ib_send_cm_sidr_rep); 2876 2877 static void cm_format_sidr_rep_event(struct cm_work *work) 2878 { 2879 struct cm_sidr_rep_msg *sidr_rep_msg; 2880 struct ib_cm_sidr_rep_event_param *param; 2881 2882 sidr_rep_msg = (struct cm_sidr_rep_msg *) 2883 work->mad_recv_wc->recv_buf.mad; 2884 param = &work->cm_event.param.sidr_rep_rcvd; 2885 param->status = sidr_rep_msg->status; 2886 param->qkey = be32_to_cpu(sidr_rep_msg->qkey); 2887 param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg)); 2888 param->info = &sidr_rep_msg->info; 2889 param->info_len = sidr_rep_msg->info_length; 2890 work->cm_event.private_data = &sidr_rep_msg->private_data; 2891 } 2892 2893 static int cm_sidr_rep_handler(struct cm_work *work) 2894 { 2895 struct cm_sidr_rep_msg *sidr_rep_msg; 2896 struct cm_id_private *cm_id_priv; 2897 unsigned long flags; 2898 2899 sidr_rep_msg = (struct cm_sidr_rep_msg *) 2900 work->mad_recv_wc->recv_buf.mad; 2901 cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0); 2902 if (!cm_id_priv) 2903 return -EINVAL; /* Unmatched reply. */ 2904 2905 spin_lock_irqsave(&cm_id_priv->lock, flags); 2906 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) { 2907 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2908 goto out; 2909 } 2910 cm_id_priv->id.state = IB_CM_IDLE; 2911 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2912 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2913 2914 cm_format_sidr_rep_event(work); 2915 cm_process_work(cm_id_priv, work); 2916 return 0; 2917 out: 2918 cm_deref_id(cm_id_priv); 2919 return -EINVAL; 2920 } 2921 2922 static void cm_process_send_error(struct ib_mad_send_buf *msg, 2923 enum ib_wc_status wc_status) 2924 { 2925 struct cm_id_private *cm_id_priv; 2926 struct ib_cm_event cm_event; 2927 enum ib_cm_state state; 2928 unsigned long flags; 2929 int ret; 2930 2931 memset(&cm_event, 0, sizeof cm_event); 2932 cm_id_priv = msg->context[0]; 2933 2934 /* Discard old sends or ones without a response. */ 2935 spin_lock_irqsave(&cm_id_priv->lock, flags); 2936 state = (enum ib_cm_state) (unsigned long) msg->context[1]; 2937 if (msg != cm_id_priv->msg || state != cm_id_priv->id.state) 2938 goto discard; 2939 2940 switch (state) { 2941 case IB_CM_REQ_SENT: 2942 case IB_CM_MRA_REQ_RCVD: 2943 cm_reset_to_idle(cm_id_priv); 2944 cm_event.event = IB_CM_REQ_ERROR; 2945 break; 2946 case IB_CM_REP_SENT: 2947 case IB_CM_MRA_REP_RCVD: 2948 cm_reset_to_idle(cm_id_priv); 2949 cm_event.event = IB_CM_REP_ERROR; 2950 break; 2951 case IB_CM_DREQ_SENT: 2952 cm_enter_timewait(cm_id_priv); 2953 cm_event.event = IB_CM_DREQ_ERROR; 2954 break; 2955 case IB_CM_SIDR_REQ_SENT: 2956 cm_id_priv->id.state = IB_CM_IDLE; 2957 cm_event.event = IB_CM_SIDR_REQ_ERROR; 2958 break; 2959 default: 2960 goto discard; 2961 } 2962 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2963 cm_event.param.send_status = wc_status; 2964 2965 /* No other events can occur on the cm_id at this point. */ 2966 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event); 2967 cm_free_msg(msg); 2968 if (ret) 2969 ib_destroy_cm_id(&cm_id_priv->id); 2970 return; 2971 discard: 2972 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2973 cm_free_msg(msg); 2974 } 2975 2976 static void cm_send_handler(struct ib_mad_agent *mad_agent, 2977 struct ib_mad_send_wc *mad_send_wc) 2978 { 2979 struct ib_mad_send_buf *msg = mad_send_wc->send_buf; 2980 2981 switch (mad_send_wc->status) { 2982 case IB_WC_SUCCESS: 2983 case IB_WC_WR_FLUSH_ERR: 2984 cm_free_msg(msg); 2985 break; 2986 default: 2987 if (msg->context[0] && msg->context[1]) 2988 cm_process_send_error(msg, mad_send_wc->status); 2989 else 2990 cm_free_msg(msg); 2991 break; 2992 } 2993 } 2994 2995 static void cm_work_handler(struct work_struct *_work) 2996 { 2997 struct cm_work *work = container_of(_work, struct cm_work, work.work); 2998 int ret; 2999 3000 switch (work->cm_event.event) { 3001 case IB_CM_REQ_RECEIVED: 3002 ret = cm_req_handler(work); 3003 break; 3004 case IB_CM_MRA_RECEIVED: 3005 ret = cm_mra_handler(work); 3006 break; 3007 case IB_CM_REJ_RECEIVED: 3008 ret = cm_rej_handler(work); 3009 break; 3010 case IB_CM_REP_RECEIVED: 3011 ret = cm_rep_handler(work); 3012 break; 3013 case IB_CM_RTU_RECEIVED: 3014 ret = cm_rtu_handler(work); 3015 break; 3016 case IB_CM_USER_ESTABLISHED: 3017 ret = cm_establish_handler(work); 3018 break; 3019 case IB_CM_DREQ_RECEIVED: 3020 ret = cm_dreq_handler(work); 3021 break; 3022 case IB_CM_DREP_RECEIVED: 3023 ret = cm_drep_handler(work); 3024 break; 3025 case IB_CM_SIDR_REQ_RECEIVED: 3026 ret = cm_sidr_req_handler(work); 3027 break; 3028 case IB_CM_SIDR_REP_RECEIVED: 3029 ret = cm_sidr_rep_handler(work); 3030 break; 3031 case IB_CM_LAP_RECEIVED: 3032 ret = cm_lap_handler(work); 3033 break; 3034 case IB_CM_APR_RECEIVED: 3035 ret = cm_apr_handler(work); 3036 break; 3037 case IB_CM_TIMEWAIT_EXIT: 3038 ret = cm_timewait_handler(work); 3039 break; 3040 default: 3041 ret = -EINVAL; 3042 break; 3043 } 3044 if (ret) 3045 cm_free_work(work); 3046 } 3047 3048 static int cm_establish(struct ib_cm_id *cm_id) 3049 { 3050 struct cm_id_private *cm_id_priv; 3051 struct cm_work *work; 3052 unsigned long flags; 3053 int ret = 0; 3054 3055 work = kmalloc(sizeof *work, GFP_ATOMIC); 3056 if (!work) 3057 return -ENOMEM; 3058 3059 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 3060 spin_lock_irqsave(&cm_id_priv->lock, flags); 3061 switch (cm_id->state) 3062 { 3063 case IB_CM_REP_SENT: 3064 case IB_CM_MRA_REP_RCVD: 3065 cm_id->state = IB_CM_ESTABLISHED; 3066 break; 3067 case IB_CM_ESTABLISHED: 3068 ret = -EISCONN; 3069 break; 3070 default: 3071 ret = -EINVAL; 3072 break; 3073 } 3074 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3075 3076 if (ret) { 3077 kfree(work); 3078 goto out; 3079 } 3080 3081 /* 3082 * The CM worker thread may try to destroy the cm_id before it 3083 * can execute this work item. To prevent potential deadlock, 3084 * we need to find the cm_id once we're in the context of the 3085 * worker thread, rather than holding a reference on it. 3086 */ 3087 INIT_DELAYED_WORK(&work->work, cm_work_handler); 3088 work->local_id = cm_id->local_id; 3089 work->remote_id = cm_id->remote_id; 3090 work->mad_recv_wc = NULL; 3091 work->cm_event.event = IB_CM_USER_ESTABLISHED; 3092 queue_delayed_work(cm.wq, &work->work, 0); 3093 out: 3094 return ret; 3095 } 3096 3097 static int cm_migrate(struct ib_cm_id *cm_id) 3098 { 3099 struct cm_id_private *cm_id_priv; 3100 unsigned long flags; 3101 int ret = 0; 3102 3103 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 3104 spin_lock_irqsave(&cm_id_priv->lock, flags); 3105 if (cm_id->state == IB_CM_ESTABLISHED && 3106 (cm_id->lap_state == IB_CM_LAP_UNINIT || 3107 cm_id->lap_state == IB_CM_LAP_IDLE)) { 3108 cm_id->lap_state = IB_CM_LAP_IDLE; 3109 cm_id_priv->av = cm_id_priv->alt_av; 3110 } else 3111 ret = -EINVAL; 3112 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3113 3114 return ret; 3115 } 3116 3117 int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event) 3118 { 3119 int ret; 3120 3121 switch (event) { 3122 case IB_EVENT_COMM_EST: 3123 ret = cm_establish(cm_id); 3124 break; 3125 case IB_EVENT_PATH_MIG: 3126 ret = cm_migrate(cm_id); 3127 break; 3128 default: 3129 ret = -EINVAL; 3130 } 3131 return ret; 3132 } 3133 EXPORT_SYMBOL(ib_cm_notify); 3134 3135 static void cm_recv_handler(struct ib_mad_agent *mad_agent, 3136 struct ib_mad_recv_wc *mad_recv_wc) 3137 { 3138 struct cm_work *work; 3139 enum ib_cm_event_type event; 3140 int paths = 0; 3141 3142 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) { 3143 case CM_REQ_ATTR_ID: 3144 paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)-> 3145 alt_local_lid != 0); 3146 event = IB_CM_REQ_RECEIVED; 3147 break; 3148 case CM_MRA_ATTR_ID: 3149 event = IB_CM_MRA_RECEIVED; 3150 break; 3151 case CM_REJ_ATTR_ID: 3152 event = IB_CM_REJ_RECEIVED; 3153 break; 3154 case CM_REP_ATTR_ID: 3155 event = IB_CM_REP_RECEIVED; 3156 break; 3157 case CM_RTU_ATTR_ID: 3158 event = IB_CM_RTU_RECEIVED; 3159 break; 3160 case CM_DREQ_ATTR_ID: 3161 event = IB_CM_DREQ_RECEIVED; 3162 break; 3163 case CM_DREP_ATTR_ID: 3164 event = IB_CM_DREP_RECEIVED; 3165 break; 3166 case CM_SIDR_REQ_ATTR_ID: 3167 event = IB_CM_SIDR_REQ_RECEIVED; 3168 break; 3169 case CM_SIDR_REP_ATTR_ID: 3170 event = IB_CM_SIDR_REP_RECEIVED; 3171 break; 3172 case CM_LAP_ATTR_ID: 3173 paths = 1; 3174 event = IB_CM_LAP_RECEIVED; 3175 break; 3176 case CM_APR_ATTR_ID: 3177 event = IB_CM_APR_RECEIVED; 3178 break; 3179 default: 3180 ib_free_recv_mad(mad_recv_wc); 3181 return; 3182 } 3183 3184 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths, 3185 GFP_KERNEL); 3186 if (!work) { 3187 ib_free_recv_mad(mad_recv_wc); 3188 return; 3189 } 3190 3191 INIT_DELAYED_WORK(&work->work, cm_work_handler); 3192 work->cm_event.event = event; 3193 work->mad_recv_wc = mad_recv_wc; 3194 work->port = (struct cm_port *)mad_agent->context; 3195 queue_delayed_work(cm.wq, &work->work, 0); 3196 } 3197 3198 static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv, 3199 struct ib_qp_attr *qp_attr, 3200 int *qp_attr_mask) 3201 { 3202 unsigned long flags; 3203 int ret; 3204 3205 spin_lock_irqsave(&cm_id_priv->lock, flags); 3206 switch (cm_id_priv->id.state) { 3207 case IB_CM_REQ_SENT: 3208 case IB_CM_MRA_REQ_RCVD: 3209 case IB_CM_REQ_RCVD: 3210 case IB_CM_MRA_REQ_SENT: 3211 case IB_CM_REP_RCVD: 3212 case IB_CM_MRA_REP_SENT: 3213 case IB_CM_REP_SENT: 3214 case IB_CM_MRA_REP_RCVD: 3215 case IB_CM_ESTABLISHED: 3216 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | 3217 IB_QP_PKEY_INDEX | IB_QP_PORT; 3218 qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE; 3219 if (cm_id_priv->responder_resources) 3220 qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ | 3221 IB_ACCESS_REMOTE_ATOMIC; 3222 qp_attr->pkey_index = cm_id_priv->av.pkey_index; 3223 qp_attr->port_num = cm_id_priv->av.port->port_num; 3224 ret = 0; 3225 break; 3226 default: 3227 ret = -EINVAL; 3228 break; 3229 } 3230 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3231 return ret; 3232 } 3233 3234 static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv, 3235 struct ib_qp_attr *qp_attr, 3236 int *qp_attr_mask) 3237 { 3238 unsigned long flags; 3239 int ret; 3240 3241 spin_lock_irqsave(&cm_id_priv->lock, flags); 3242 switch (cm_id_priv->id.state) { 3243 case IB_CM_REQ_RCVD: 3244 case IB_CM_MRA_REQ_SENT: 3245 case IB_CM_REP_RCVD: 3246 case IB_CM_MRA_REP_SENT: 3247 case IB_CM_REP_SENT: 3248 case IB_CM_MRA_REP_RCVD: 3249 case IB_CM_ESTABLISHED: 3250 *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | 3251 IB_QP_DEST_QPN | IB_QP_RQ_PSN; 3252 qp_attr->ah_attr = cm_id_priv->av.ah_attr; 3253 qp_attr->path_mtu = cm_id_priv->path_mtu; 3254 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn); 3255 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn); 3256 if (cm_id_priv->qp_type == IB_QPT_RC) { 3257 *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC | 3258 IB_QP_MIN_RNR_TIMER; 3259 qp_attr->max_dest_rd_atomic = 3260 cm_id_priv->responder_resources; 3261 qp_attr->min_rnr_timer = 0; 3262 } 3263 if (cm_id_priv->alt_av.ah_attr.dlid) { 3264 *qp_attr_mask |= IB_QP_ALT_PATH; 3265 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num; 3266 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index; 3267 qp_attr->alt_timeout = 3268 cm_id_priv->alt_av.packet_life_time + 1; 3269 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr; 3270 } 3271 ret = 0; 3272 break; 3273 default: 3274 ret = -EINVAL; 3275 break; 3276 } 3277 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3278 return ret; 3279 } 3280 3281 static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv, 3282 struct ib_qp_attr *qp_attr, 3283 int *qp_attr_mask) 3284 { 3285 unsigned long flags; 3286 int ret; 3287 3288 spin_lock_irqsave(&cm_id_priv->lock, flags); 3289 switch (cm_id_priv->id.state) { 3290 /* Allow transition to RTS before sending REP */ 3291 case IB_CM_REQ_RCVD: 3292 case IB_CM_MRA_REQ_SENT: 3293 3294 case IB_CM_REP_RCVD: 3295 case IB_CM_MRA_REP_SENT: 3296 case IB_CM_REP_SENT: 3297 case IB_CM_MRA_REP_RCVD: 3298 case IB_CM_ESTABLISHED: 3299 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) { 3300 *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN; 3301 qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn); 3302 if (cm_id_priv->qp_type == IB_QPT_RC) { 3303 *qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT | 3304 IB_QP_RNR_RETRY | 3305 IB_QP_MAX_QP_RD_ATOMIC; 3306 qp_attr->timeout = 3307 cm_id_priv->av.packet_life_time + 1; 3308 qp_attr->retry_cnt = cm_id_priv->retry_count; 3309 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count; 3310 qp_attr->max_rd_atomic = 3311 cm_id_priv->initiator_depth; 3312 } 3313 if (cm_id_priv->alt_av.ah_attr.dlid) { 3314 *qp_attr_mask |= IB_QP_PATH_MIG_STATE; 3315 qp_attr->path_mig_state = IB_MIG_REARM; 3316 } 3317 } else { 3318 *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE; 3319 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num; 3320 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index; 3321 qp_attr->alt_timeout = 3322 cm_id_priv->alt_av.packet_life_time + 1; 3323 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr; 3324 qp_attr->path_mig_state = IB_MIG_REARM; 3325 } 3326 ret = 0; 3327 break; 3328 default: 3329 ret = -EINVAL; 3330 break; 3331 } 3332 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3333 return ret; 3334 } 3335 3336 int ib_cm_init_qp_attr(struct ib_cm_id *cm_id, 3337 struct ib_qp_attr *qp_attr, 3338 int *qp_attr_mask) 3339 { 3340 struct cm_id_private *cm_id_priv; 3341 int ret; 3342 3343 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 3344 switch (qp_attr->qp_state) { 3345 case IB_QPS_INIT: 3346 ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask); 3347 break; 3348 case IB_QPS_RTR: 3349 ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask); 3350 break; 3351 case IB_QPS_RTS: 3352 ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask); 3353 break; 3354 default: 3355 ret = -EINVAL; 3356 break; 3357 } 3358 return ret; 3359 } 3360 EXPORT_SYMBOL(ib_cm_init_qp_attr); 3361 3362 static void cm_add_one(struct ib_device *device) 3363 { 3364 struct cm_device *cm_dev; 3365 struct cm_port *port; 3366 struct ib_mad_reg_req reg_req = { 3367 .mgmt_class = IB_MGMT_CLASS_CM, 3368 .mgmt_class_version = IB_CM_CLASS_VERSION 3369 }; 3370 struct ib_port_modify port_modify = { 3371 .set_port_cap_mask = IB_PORT_CM_SUP 3372 }; 3373 unsigned long flags; 3374 int ret; 3375 u8 i; 3376 3377 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) 3378 return; 3379 3380 cm_dev = kmalloc(sizeof(*cm_dev) + sizeof(*port) * 3381 device->phys_port_cnt, GFP_KERNEL); 3382 if (!cm_dev) 3383 return; 3384 3385 cm_dev->device = device; 3386 3387 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask); 3388 for (i = 1; i <= device->phys_port_cnt; i++) { 3389 port = &cm_dev->port[i-1]; 3390 port->cm_dev = cm_dev; 3391 port->port_num = i; 3392 port->mad_agent = ib_register_mad_agent(device, i, 3393 IB_QPT_GSI, 3394 ®_req, 3395 0, 3396 cm_send_handler, 3397 cm_recv_handler, 3398 port); 3399 if (IS_ERR(port->mad_agent)) 3400 goto error1; 3401 3402 ret = ib_modify_port(device, i, 0, &port_modify); 3403 if (ret) 3404 goto error2; 3405 } 3406 ib_set_client_data(device, &cm_client, cm_dev); 3407 3408 write_lock_irqsave(&cm.device_lock, flags); 3409 list_add_tail(&cm_dev->list, &cm.device_list); 3410 write_unlock_irqrestore(&cm.device_lock, flags); 3411 return; 3412 3413 error2: 3414 ib_unregister_mad_agent(port->mad_agent); 3415 error1: 3416 port_modify.set_port_cap_mask = 0; 3417 port_modify.clr_port_cap_mask = IB_PORT_CM_SUP; 3418 while (--i) { 3419 port = &cm_dev->port[i-1]; 3420 ib_modify_port(device, port->port_num, 0, &port_modify); 3421 ib_unregister_mad_agent(port->mad_agent); 3422 } 3423 kfree(cm_dev); 3424 } 3425 3426 static void cm_remove_one(struct ib_device *device) 3427 { 3428 struct cm_device *cm_dev; 3429 struct cm_port *port; 3430 struct ib_port_modify port_modify = { 3431 .clr_port_cap_mask = IB_PORT_CM_SUP 3432 }; 3433 unsigned long flags; 3434 int i; 3435 3436 cm_dev = ib_get_client_data(device, &cm_client); 3437 if (!cm_dev) 3438 return; 3439 3440 write_lock_irqsave(&cm.device_lock, flags); 3441 list_del(&cm_dev->list); 3442 write_unlock_irqrestore(&cm.device_lock, flags); 3443 3444 for (i = 1; i <= device->phys_port_cnt; i++) { 3445 port = &cm_dev->port[i-1]; 3446 ib_modify_port(device, port->port_num, 0, &port_modify); 3447 ib_unregister_mad_agent(port->mad_agent); 3448 } 3449 kfree(cm_dev); 3450 } 3451 3452 static int __init ib_cm_init(void) 3453 { 3454 int ret; 3455 3456 memset(&cm, 0, sizeof cm); 3457 INIT_LIST_HEAD(&cm.device_list); 3458 rwlock_init(&cm.device_lock); 3459 spin_lock_init(&cm.lock); 3460 cm.listen_service_table = RB_ROOT; 3461 cm.listen_service_id = __constant_be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID); 3462 cm.remote_id_table = RB_ROOT; 3463 cm.remote_qp_table = RB_ROOT; 3464 cm.remote_sidr_table = RB_ROOT; 3465 idr_init(&cm.local_id_table); 3466 get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand); 3467 idr_pre_get(&cm.local_id_table, GFP_KERNEL); 3468 INIT_LIST_HEAD(&cm.timewait_list); 3469 3470 cm.wq = create_workqueue("ib_cm"); 3471 if (!cm.wq) 3472 return -ENOMEM; 3473 3474 ret = ib_register_client(&cm_client); 3475 if (ret) 3476 goto error; 3477 3478 return 0; 3479 error: 3480 destroy_workqueue(cm.wq); 3481 return ret; 3482 } 3483 3484 static void __exit ib_cm_cleanup(void) 3485 { 3486 struct cm_timewait_info *timewait_info, *tmp; 3487 3488 spin_lock_irq(&cm.lock); 3489 list_for_each_entry(timewait_info, &cm.timewait_list, list) 3490 cancel_delayed_work(&timewait_info->work.work); 3491 spin_unlock_irq(&cm.lock); 3492 3493 destroy_workqueue(cm.wq); 3494 3495 list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) { 3496 list_del(&timewait_info->list); 3497 kfree(timewait_info); 3498 } 3499 3500 ib_unregister_client(&cm_client); 3501 idr_destroy(&cm.local_id_table); 3502 } 3503 3504 module_init(ib_cm_init); 3505 module_exit(ib_cm_cleanup); 3506 3507