1 /* 2 * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved. 3 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. 5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 * 35 * $Id: cm.c 4311 2005-12-05 18:42:01Z sean.hefty $ 36 */ 37 38 #include <linux/completion.h> 39 #include <linux/dma-mapping.h> 40 #include <linux/err.h> 41 #include <linux/idr.h> 42 #include <linux/interrupt.h> 43 #include <linux/pci.h> 44 #include <linux/rbtree.h> 45 #include <linux/spinlock.h> 46 #include <linux/workqueue.h> 47 48 #include <rdma/ib_cache.h> 49 #include <rdma/ib_cm.h> 50 #include "cm_msgs.h" 51 52 MODULE_AUTHOR("Sean Hefty"); 53 MODULE_DESCRIPTION("InfiniBand CM"); 54 MODULE_LICENSE("Dual BSD/GPL"); 55 56 static void cm_add_one(struct ib_device *device); 57 static void cm_remove_one(struct ib_device *device); 58 59 static struct ib_client cm_client = { 60 .name = "cm", 61 .add = cm_add_one, 62 .remove = cm_remove_one 63 }; 64 65 static struct ib_cm { 66 spinlock_t lock; 67 struct list_head device_list; 68 rwlock_t device_lock; 69 struct rb_root listen_service_table; 70 u64 listen_service_id; 71 /* struct rb_root peer_service_table; todo: fix peer to peer */ 72 struct rb_root remote_qp_table; 73 struct rb_root remote_id_table; 74 struct rb_root remote_sidr_table; 75 struct idr local_id_table; 76 struct workqueue_struct *wq; 77 } cm; 78 79 struct cm_port { 80 struct cm_device *cm_dev; 81 struct ib_mad_agent *mad_agent; 82 u8 port_num; 83 }; 84 85 struct cm_device { 86 struct list_head list; 87 struct ib_device *device; 88 __be64 ca_guid; 89 struct cm_port port[0]; 90 }; 91 92 struct cm_av { 93 struct cm_port *port; 94 union ib_gid dgid; 95 struct ib_ah_attr ah_attr; 96 u16 pkey_index; 97 u8 packet_life_time; 98 }; 99 100 struct cm_work { 101 struct work_struct work; 102 struct list_head list; 103 struct cm_port *port; 104 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */ 105 __be32 local_id; /* Established / timewait */ 106 __be32 remote_id; 107 struct ib_cm_event cm_event; 108 struct ib_sa_path_rec path[0]; 109 }; 110 111 struct cm_timewait_info { 112 struct cm_work work; /* Must be first. */ 113 struct rb_node remote_qp_node; 114 struct rb_node remote_id_node; 115 __be64 remote_ca_guid; 116 __be32 remote_qpn; 117 u8 inserted_remote_qp; 118 u8 inserted_remote_id; 119 }; 120 121 struct cm_id_private { 122 struct ib_cm_id id; 123 124 struct rb_node service_node; 125 struct rb_node sidr_id_node; 126 spinlock_t lock; /* Do not acquire inside cm.lock */ 127 struct completion comp; 128 atomic_t refcount; 129 130 struct ib_mad_send_buf *msg; 131 struct cm_timewait_info *timewait_info; 132 /* todo: use alternate port on send failure */ 133 struct cm_av av; 134 struct cm_av alt_av; 135 struct ib_cm_compare_data *compare_data; 136 137 void *private_data; 138 __be64 tid; 139 __be32 local_qpn; 140 __be32 remote_qpn; 141 enum ib_qp_type qp_type; 142 __be32 sq_psn; 143 __be32 rq_psn; 144 int timeout_ms; 145 enum ib_mtu path_mtu; 146 u8 private_data_len; 147 u8 max_cm_retries; 148 u8 peer_to_peer; 149 u8 responder_resources; 150 u8 initiator_depth; 151 u8 local_ack_timeout; 152 u8 retry_count; 153 u8 rnr_retry_count; 154 u8 service_timeout; 155 156 struct list_head work_list; 157 atomic_t work_count; 158 }; 159 160 static void cm_work_handler(void *data); 161 162 static inline void cm_deref_id(struct cm_id_private *cm_id_priv) 163 { 164 if (atomic_dec_and_test(&cm_id_priv->refcount)) 165 complete(&cm_id_priv->comp); 166 } 167 168 static int cm_alloc_msg(struct cm_id_private *cm_id_priv, 169 struct ib_mad_send_buf **msg) 170 { 171 struct ib_mad_agent *mad_agent; 172 struct ib_mad_send_buf *m; 173 struct ib_ah *ah; 174 175 mad_agent = cm_id_priv->av.port->mad_agent; 176 ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr); 177 if (IS_ERR(ah)) 178 return PTR_ERR(ah); 179 180 m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn, 181 cm_id_priv->av.pkey_index, 182 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, 183 GFP_ATOMIC); 184 if (IS_ERR(m)) { 185 ib_destroy_ah(ah); 186 return PTR_ERR(m); 187 } 188 189 /* Timeout set by caller if response is expected. */ 190 m->ah = ah; 191 m->retries = cm_id_priv->max_cm_retries; 192 193 atomic_inc(&cm_id_priv->refcount); 194 m->context[0] = cm_id_priv; 195 *msg = m; 196 return 0; 197 } 198 199 static int cm_alloc_response_msg(struct cm_port *port, 200 struct ib_mad_recv_wc *mad_recv_wc, 201 struct ib_mad_send_buf **msg) 202 { 203 struct ib_mad_send_buf *m; 204 struct ib_ah *ah; 205 206 ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc, 207 mad_recv_wc->recv_buf.grh, port->port_num); 208 if (IS_ERR(ah)) 209 return PTR_ERR(ah); 210 211 m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index, 212 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, 213 GFP_ATOMIC); 214 if (IS_ERR(m)) { 215 ib_destroy_ah(ah); 216 return PTR_ERR(m); 217 } 218 m->ah = ah; 219 *msg = m; 220 return 0; 221 } 222 223 static void cm_free_msg(struct ib_mad_send_buf *msg) 224 { 225 ib_destroy_ah(msg->ah); 226 if (msg->context[0]) 227 cm_deref_id(msg->context[0]); 228 ib_free_send_mad(msg); 229 } 230 231 static void * cm_copy_private_data(const void *private_data, 232 u8 private_data_len) 233 { 234 void *data; 235 236 if (!private_data || !private_data_len) 237 return NULL; 238 239 data = kmalloc(private_data_len, GFP_KERNEL); 240 if (!data) 241 return ERR_PTR(-ENOMEM); 242 243 memcpy(data, private_data, private_data_len); 244 return data; 245 } 246 247 static void cm_set_private_data(struct cm_id_private *cm_id_priv, 248 void *private_data, u8 private_data_len) 249 { 250 if (cm_id_priv->private_data && cm_id_priv->private_data_len) 251 kfree(cm_id_priv->private_data); 252 253 cm_id_priv->private_data = private_data; 254 cm_id_priv->private_data_len = private_data_len; 255 } 256 257 static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc, 258 struct ib_grh *grh, struct cm_av *av) 259 { 260 av->port = port; 261 av->pkey_index = wc->pkey_index; 262 ib_init_ah_from_wc(port->cm_dev->device, port->port_num, wc, 263 grh, &av->ah_attr); 264 } 265 266 static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av) 267 { 268 struct cm_device *cm_dev; 269 struct cm_port *port = NULL; 270 unsigned long flags; 271 int ret; 272 u8 p; 273 274 read_lock_irqsave(&cm.device_lock, flags); 275 list_for_each_entry(cm_dev, &cm.device_list, list) { 276 if (!ib_find_cached_gid(cm_dev->device, &path->sgid, 277 &p, NULL)) { 278 port = &cm_dev->port[p-1]; 279 break; 280 } 281 } 282 read_unlock_irqrestore(&cm.device_lock, flags); 283 284 if (!port) 285 return -EINVAL; 286 287 ret = ib_find_cached_pkey(cm_dev->device, port->port_num, 288 be16_to_cpu(path->pkey), &av->pkey_index); 289 if (ret) 290 return ret; 291 292 av->port = port; 293 ib_init_ah_from_path(cm_dev->device, port->port_num, path, 294 &av->ah_attr); 295 av->packet_life_time = path->packet_life_time; 296 return 0; 297 } 298 299 static int cm_alloc_id(struct cm_id_private *cm_id_priv) 300 { 301 unsigned long flags; 302 int ret; 303 static int next_id; 304 305 do { 306 spin_lock_irqsave(&cm.lock, flags); 307 ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, next_id++, 308 (__force int *) &cm_id_priv->id.local_id); 309 spin_unlock_irqrestore(&cm.lock, flags); 310 } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) ); 311 return ret; 312 } 313 314 static void cm_free_id(__be32 local_id) 315 { 316 unsigned long flags; 317 318 spin_lock_irqsave(&cm.lock, flags); 319 idr_remove(&cm.local_id_table, (__force int) local_id); 320 spin_unlock_irqrestore(&cm.lock, flags); 321 } 322 323 static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id) 324 { 325 struct cm_id_private *cm_id_priv; 326 327 cm_id_priv = idr_find(&cm.local_id_table, (__force int) local_id); 328 if (cm_id_priv) { 329 if (cm_id_priv->id.remote_id == remote_id) 330 atomic_inc(&cm_id_priv->refcount); 331 else 332 cm_id_priv = NULL; 333 } 334 335 return cm_id_priv; 336 } 337 338 static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id) 339 { 340 struct cm_id_private *cm_id_priv; 341 unsigned long flags; 342 343 spin_lock_irqsave(&cm.lock, flags); 344 cm_id_priv = cm_get_id(local_id, remote_id); 345 spin_unlock_irqrestore(&cm.lock, flags); 346 347 return cm_id_priv; 348 } 349 350 static void cm_mask_copy(u8 *dst, u8 *src, u8 *mask) 351 { 352 int i; 353 354 for (i = 0; i < IB_CM_COMPARE_SIZE / sizeof(unsigned long); i++) 355 ((unsigned long *) dst)[i] = ((unsigned long *) src)[i] & 356 ((unsigned long *) mask)[i]; 357 } 358 359 static int cm_compare_data(struct ib_cm_compare_data *src_data, 360 struct ib_cm_compare_data *dst_data) 361 { 362 u8 src[IB_CM_COMPARE_SIZE]; 363 u8 dst[IB_CM_COMPARE_SIZE]; 364 365 if (!src_data || !dst_data) 366 return 0; 367 368 cm_mask_copy(src, src_data->data, dst_data->mask); 369 cm_mask_copy(dst, dst_data->data, src_data->mask); 370 return memcmp(src, dst, IB_CM_COMPARE_SIZE); 371 } 372 373 static int cm_compare_private_data(u8 *private_data, 374 struct ib_cm_compare_data *dst_data) 375 { 376 u8 src[IB_CM_COMPARE_SIZE]; 377 378 if (!dst_data) 379 return 0; 380 381 cm_mask_copy(src, private_data, dst_data->mask); 382 return memcmp(src, dst_data->data, IB_CM_COMPARE_SIZE); 383 } 384 385 static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv) 386 { 387 struct rb_node **link = &cm.listen_service_table.rb_node; 388 struct rb_node *parent = NULL; 389 struct cm_id_private *cur_cm_id_priv; 390 __be64 service_id = cm_id_priv->id.service_id; 391 __be64 service_mask = cm_id_priv->id.service_mask; 392 int data_cmp; 393 394 while (*link) { 395 parent = *link; 396 cur_cm_id_priv = rb_entry(parent, struct cm_id_private, 397 service_node); 398 data_cmp = cm_compare_data(cm_id_priv->compare_data, 399 cur_cm_id_priv->compare_data); 400 if ((cur_cm_id_priv->id.service_mask & service_id) == 401 (service_mask & cur_cm_id_priv->id.service_id) && 402 (cm_id_priv->id.device == cur_cm_id_priv->id.device) && 403 !data_cmp) 404 return cur_cm_id_priv; 405 406 if (cm_id_priv->id.device < cur_cm_id_priv->id.device) 407 link = &(*link)->rb_left; 408 else if (cm_id_priv->id.device > cur_cm_id_priv->id.device) 409 link = &(*link)->rb_right; 410 else if (service_id < cur_cm_id_priv->id.service_id) 411 link = &(*link)->rb_left; 412 else if (service_id > cur_cm_id_priv->id.service_id) 413 link = &(*link)->rb_right; 414 else if (data_cmp < 0) 415 link = &(*link)->rb_left; 416 else 417 link = &(*link)->rb_right; 418 } 419 rb_link_node(&cm_id_priv->service_node, parent, link); 420 rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table); 421 return NULL; 422 } 423 424 static struct cm_id_private * cm_find_listen(struct ib_device *device, 425 __be64 service_id, 426 u8 *private_data) 427 { 428 struct rb_node *node = cm.listen_service_table.rb_node; 429 struct cm_id_private *cm_id_priv; 430 int data_cmp; 431 432 while (node) { 433 cm_id_priv = rb_entry(node, struct cm_id_private, service_node); 434 data_cmp = cm_compare_private_data(private_data, 435 cm_id_priv->compare_data); 436 if ((cm_id_priv->id.service_mask & service_id) == 437 cm_id_priv->id.service_id && 438 (cm_id_priv->id.device == device) && !data_cmp) 439 return cm_id_priv; 440 441 if (device < cm_id_priv->id.device) 442 node = node->rb_left; 443 else if (device > cm_id_priv->id.device) 444 node = node->rb_right; 445 else if (service_id < cm_id_priv->id.service_id) 446 node = node->rb_left; 447 else if (service_id > cm_id_priv->id.service_id) 448 node = node->rb_right; 449 else if (data_cmp < 0) 450 node = node->rb_left; 451 else 452 node = node->rb_right; 453 } 454 return NULL; 455 } 456 457 static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info 458 *timewait_info) 459 { 460 struct rb_node **link = &cm.remote_id_table.rb_node; 461 struct rb_node *parent = NULL; 462 struct cm_timewait_info *cur_timewait_info; 463 __be64 remote_ca_guid = timewait_info->remote_ca_guid; 464 __be32 remote_id = timewait_info->work.remote_id; 465 466 while (*link) { 467 parent = *link; 468 cur_timewait_info = rb_entry(parent, struct cm_timewait_info, 469 remote_id_node); 470 if (remote_id < cur_timewait_info->work.remote_id) 471 link = &(*link)->rb_left; 472 else if (remote_id > cur_timewait_info->work.remote_id) 473 link = &(*link)->rb_right; 474 else if (remote_ca_guid < cur_timewait_info->remote_ca_guid) 475 link = &(*link)->rb_left; 476 else if (remote_ca_guid > cur_timewait_info->remote_ca_guid) 477 link = &(*link)->rb_right; 478 else 479 return cur_timewait_info; 480 } 481 timewait_info->inserted_remote_id = 1; 482 rb_link_node(&timewait_info->remote_id_node, parent, link); 483 rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table); 484 return NULL; 485 } 486 487 static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid, 488 __be32 remote_id) 489 { 490 struct rb_node *node = cm.remote_id_table.rb_node; 491 struct cm_timewait_info *timewait_info; 492 493 while (node) { 494 timewait_info = rb_entry(node, struct cm_timewait_info, 495 remote_id_node); 496 if (remote_id < timewait_info->work.remote_id) 497 node = node->rb_left; 498 else if (remote_id > timewait_info->work.remote_id) 499 node = node->rb_right; 500 else if (remote_ca_guid < timewait_info->remote_ca_guid) 501 node = node->rb_left; 502 else if (remote_ca_guid > timewait_info->remote_ca_guid) 503 node = node->rb_right; 504 else 505 return timewait_info; 506 } 507 return NULL; 508 } 509 510 static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info 511 *timewait_info) 512 { 513 struct rb_node **link = &cm.remote_qp_table.rb_node; 514 struct rb_node *parent = NULL; 515 struct cm_timewait_info *cur_timewait_info; 516 __be64 remote_ca_guid = timewait_info->remote_ca_guid; 517 __be32 remote_qpn = timewait_info->remote_qpn; 518 519 while (*link) { 520 parent = *link; 521 cur_timewait_info = rb_entry(parent, struct cm_timewait_info, 522 remote_qp_node); 523 if (remote_qpn < cur_timewait_info->remote_qpn) 524 link = &(*link)->rb_left; 525 else if (remote_qpn > cur_timewait_info->remote_qpn) 526 link = &(*link)->rb_right; 527 else if (remote_ca_guid < cur_timewait_info->remote_ca_guid) 528 link = &(*link)->rb_left; 529 else if (remote_ca_guid > cur_timewait_info->remote_ca_guid) 530 link = &(*link)->rb_right; 531 else 532 return cur_timewait_info; 533 } 534 timewait_info->inserted_remote_qp = 1; 535 rb_link_node(&timewait_info->remote_qp_node, parent, link); 536 rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table); 537 return NULL; 538 } 539 540 static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private 541 *cm_id_priv) 542 { 543 struct rb_node **link = &cm.remote_sidr_table.rb_node; 544 struct rb_node *parent = NULL; 545 struct cm_id_private *cur_cm_id_priv; 546 union ib_gid *port_gid = &cm_id_priv->av.dgid; 547 __be32 remote_id = cm_id_priv->id.remote_id; 548 549 while (*link) { 550 parent = *link; 551 cur_cm_id_priv = rb_entry(parent, struct cm_id_private, 552 sidr_id_node); 553 if (remote_id < cur_cm_id_priv->id.remote_id) 554 link = &(*link)->rb_left; 555 else if (remote_id > cur_cm_id_priv->id.remote_id) 556 link = &(*link)->rb_right; 557 else { 558 int cmp; 559 cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid, 560 sizeof *port_gid); 561 if (cmp < 0) 562 link = &(*link)->rb_left; 563 else if (cmp > 0) 564 link = &(*link)->rb_right; 565 else 566 return cur_cm_id_priv; 567 } 568 } 569 rb_link_node(&cm_id_priv->sidr_id_node, parent, link); 570 rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); 571 return NULL; 572 } 573 574 static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv, 575 enum ib_cm_sidr_status status) 576 { 577 struct ib_cm_sidr_rep_param param; 578 579 memset(¶m, 0, sizeof param); 580 param.status = status; 581 ib_send_cm_sidr_rep(&cm_id_priv->id, ¶m); 582 } 583 584 struct ib_cm_id *ib_create_cm_id(struct ib_device *device, 585 ib_cm_handler cm_handler, 586 void *context) 587 { 588 struct cm_id_private *cm_id_priv; 589 int ret; 590 591 cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL); 592 if (!cm_id_priv) 593 return ERR_PTR(-ENOMEM); 594 595 cm_id_priv->id.state = IB_CM_IDLE; 596 cm_id_priv->id.device = device; 597 cm_id_priv->id.cm_handler = cm_handler; 598 cm_id_priv->id.context = context; 599 cm_id_priv->id.remote_cm_qpn = 1; 600 ret = cm_alloc_id(cm_id_priv); 601 if (ret) 602 goto error; 603 604 spin_lock_init(&cm_id_priv->lock); 605 init_completion(&cm_id_priv->comp); 606 INIT_LIST_HEAD(&cm_id_priv->work_list); 607 atomic_set(&cm_id_priv->work_count, -1); 608 atomic_set(&cm_id_priv->refcount, 1); 609 return &cm_id_priv->id; 610 611 error: 612 kfree(cm_id_priv); 613 return ERR_PTR(-ENOMEM); 614 } 615 EXPORT_SYMBOL(ib_create_cm_id); 616 617 static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv) 618 { 619 struct cm_work *work; 620 621 if (list_empty(&cm_id_priv->work_list)) 622 return NULL; 623 624 work = list_entry(cm_id_priv->work_list.next, struct cm_work, list); 625 list_del(&work->list); 626 return work; 627 } 628 629 static void cm_free_work(struct cm_work *work) 630 { 631 if (work->mad_recv_wc) 632 ib_free_recv_mad(work->mad_recv_wc); 633 kfree(work); 634 } 635 636 static inline int cm_convert_to_ms(int iba_time) 637 { 638 /* approximate conversion to ms from 4.096us x 2^iba_time */ 639 return 1 << max(iba_time - 8, 0); 640 } 641 642 static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info) 643 { 644 unsigned long flags; 645 646 if (!timewait_info->inserted_remote_id && 647 !timewait_info->inserted_remote_qp) 648 return; 649 650 spin_lock_irqsave(&cm.lock, flags); 651 if (timewait_info->inserted_remote_id) { 652 rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table); 653 timewait_info->inserted_remote_id = 0; 654 } 655 656 if (timewait_info->inserted_remote_qp) { 657 rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table); 658 timewait_info->inserted_remote_qp = 0; 659 } 660 spin_unlock_irqrestore(&cm.lock, flags); 661 } 662 663 static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id) 664 { 665 struct cm_timewait_info *timewait_info; 666 667 timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL); 668 if (!timewait_info) 669 return ERR_PTR(-ENOMEM); 670 671 timewait_info->work.local_id = local_id; 672 INIT_WORK(&timewait_info->work.work, cm_work_handler, 673 &timewait_info->work); 674 timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT; 675 return timewait_info; 676 } 677 678 static void cm_enter_timewait(struct cm_id_private *cm_id_priv) 679 { 680 int wait_time; 681 682 /* 683 * The cm_id could be destroyed by the user before we exit timewait. 684 * To protect against this, we search for the cm_id after exiting 685 * timewait before notifying the user that we've exited timewait. 686 */ 687 cm_id_priv->id.state = IB_CM_TIMEWAIT; 688 wait_time = cm_convert_to_ms(cm_id_priv->local_ack_timeout); 689 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work, 690 msecs_to_jiffies(wait_time)); 691 cm_id_priv->timewait_info = NULL; 692 } 693 694 static void cm_reset_to_idle(struct cm_id_private *cm_id_priv) 695 { 696 cm_id_priv->id.state = IB_CM_IDLE; 697 if (cm_id_priv->timewait_info) { 698 cm_cleanup_timewait(cm_id_priv->timewait_info); 699 kfree(cm_id_priv->timewait_info); 700 cm_id_priv->timewait_info = NULL; 701 } 702 } 703 704 void ib_destroy_cm_id(struct ib_cm_id *cm_id) 705 { 706 struct cm_id_private *cm_id_priv; 707 struct cm_work *work; 708 unsigned long flags; 709 710 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 711 retest: 712 spin_lock_irqsave(&cm_id_priv->lock, flags); 713 switch (cm_id->state) { 714 case IB_CM_LISTEN: 715 cm_id->state = IB_CM_IDLE; 716 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 717 spin_lock_irqsave(&cm.lock, flags); 718 rb_erase(&cm_id_priv->service_node, &cm.listen_service_table); 719 spin_unlock_irqrestore(&cm.lock, flags); 720 break; 721 case IB_CM_SIDR_REQ_SENT: 722 cm_id->state = IB_CM_IDLE; 723 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 724 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 725 break; 726 case IB_CM_SIDR_REQ_RCVD: 727 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 728 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT); 729 break; 730 case IB_CM_REQ_SENT: 731 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 732 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 733 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT, 734 &cm_id_priv->av.port->cm_dev->ca_guid, 735 sizeof cm_id_priv->av.port->cm_dev->ca_guid, 736 NULL, 0); 737 break; 738 case IB_CM_MRA_REQ_RCVD: 739 case IB_CM_REP_SENT: 740 case IB_CM_MRA_REP_RCVD: 741 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 742 /* Fall through */ 743 case IB_CM_REQ_RCVD: 744 case IB_CM_MRA_REQ_SENT: 745 case IB_CM_REP_RCVD: 746 case IB_CM_MRA_REP_SENT: 747 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 748 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, 749 NULL, 0, NULL, 0); 750 break; 751 case IB_CM_ESTABLISHED: 752 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 753 ib_send_cm_dreq(cm_id, NULL, 0); 754 goto retest; 755 case IB_CM_DREQ_SENT: 756 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 757 cm_enter_timewait(cm_id_priv); 758 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 759 break; 760 case IB_CM_DREQ_RCVD: 761 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 762 ib_send_cm_drep(cm_id, NULL, 0); 763 break; 764 default: 765 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 766 break; 767 } 768 769 cm_free_id(cm_id->local_id); 770 cm_deref_id(cm_id_priv); 771 wait_for_completion(&cm_id_priv->comp); 772 while ((work = cm_dequeue_work(cm_id_priv)) != NULL) 773 cm_free_work(work); 774 kfree(cm_id_priv->compare_data); 775 kfree(cm_id_priv->private_data); 776 kfree(cm_id_priv); 777 } 778 EXPORT_SYMBOL(ib_destroy_cm_id); 779 780 int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask, 781 struct ib_cm_compare_data *compare_data) 782 { 783 struct cm_id_private *cm_id_priv, *cur_cm_id_priv; 784 unsigned long flags; 785 int ret = 0; 786 787 service_mask = service_mask ? service_mask : 788 __constant_cpu_to_be64(~0ULL); 789 service_id &= service_mask; 790 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID && 791 (service_id != IB_CM_ASSIGN_SERVICE_ID)) 792 return -EINVAL; 793 794 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 795 if (cm_id->state != IB_CM_IDLE) 796 return -EINVAL; 797 798 if (compare_data) { 799 cm_id_priv->compare_data = kzalloc(sizeof *compare_data, 800 GFP_KERNEL); 801 if (!cm_id_priv->compare_data) 802 return -ENOMEM; 803 cm_mask_copy(cm_id_priv->compare_data->data, 804 compare_data->data, compare_data->mask); 805 memcpy(cm_id_priv->compare_data->mask, compare_data->mask, 806 IB_CM_COMPARE_SIZE); 807 } 808 809 cm_id->state = IB_CM_LISTEN; 810 811 spin_lock_irqsave(&cm.lock, flags); 812 if (service_id == IB_CM_ASSIGN_SERVICE_ID) { 813 cm_id->service_id = cpu_to_be64(cm.listen_service_id++); 814 cm_id->service_mask = __constant_cpu_to_be64(~0ULL); 815 } else { 816 cm_id->service_id = service_id; 817 cm_id->service_mask = service_mask; 818 } 819 cur_cm_id_priv = cm_insert_listen(cm_id_priv); 820 spin_unlock_irqrestore(&cm.lock, flags); 821 822 if (cur_cm_id_priv) { 823 cm_id->state = IB_CM_IDLE; 824 kfree(cm_id_priv->compare_data); 825 cm_id_priv->compare_data = NULL; 826 ret = -EBUSY; 827 } 828 return ret; 829 } 830 EXPORT_SYMBOL(ib_cm_listen); 831 832 static __be64 cm_form_tid(struct cm_id_private *cm_id_priv, 833 enum cm_msg_sequence msg_seq) 834 { 835 u64 hi_tid, low_tid; 836 837 hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32; 838 low_tid = (u64) ((__force u32)cm_id_priv->id.local_id | 839 (msg_seq << 30)); 840 return cpu_to_be64(hi_tid | low_tid); 841 } 842 843 static void cm_format_mad_hdr(struct ib_mad_hdr *hdr, 844 __be16 attr_id, __be64 tid) 845 { 846 hdr->base_version = IB_MGMT_BASE_VERSION; 847 hdr->mgmt_class = IB_MGMT_CLASS_CM; 848 hdr->class_version = IB_CM_CLASS_VERSION; 849 hdr->method = IB_MGMT_METHOD_SEND; 850 hdr->attr_id = attr_id; 851 hdr->tid = tid; 852 } 853 854 static void cm_format_req(struct cm_req_msg *req_msg, 855 struct cm_id_private *cm_id_priv, 856 struct ib_cm_req_param *param) 857 { 858 cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID, 859 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ)); 860 861 req_msg->local_comm_id = cm_id_priv->id.local_id; 862 req_msg->service_id = param->service_id; 863 req_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid; 864 cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num)); 865 cm_req_set_resp_res(req_msg, param->responder_resources); 866 cm_req_set_init_depth(req_msg, param->initiator_depth); 867 cm_req_set_remote_resp_timeout(req_msg, 868 param->remote_cm_response_timeout); 869 cm_req_set_qp_type(req_msg, param->qp_type); 870 cm_req_set_flow_ctrl(req_msg, param->flow_control); 871 cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn)); 872 cm_req_set_local_resp_timeout(req_msg, 873 param->local_cm_response_timeout); 874 cm_req_set_retry_count(req_msg, param->retry_count); 875 req_msg->pkey = param->primary_path->pkey; 876 cm_req_set_path_mtu(req_msg, param->primary_path->mtu); 877 cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count); 878 cm_req_set_max_cm_retries(req_msg, param->max_cm_retries); 879 cm_req_set_srq(req_msg, param->srq); 880 881 req_msg->primary_local_lid = param->primary_path->slid; 882 req_msg->primary_remote_lid = param->primary_path->dlid; 883 req_msg->primary_local_gid = param->primary_path->sgid; 884 req_msg->primary_remote_gid = param->primary_path->dgid; 885 cm_req_set_primary_flow_label(req_msg, param->primary_path->flow_label); 886 cm_req_set_primary_packet_rate(req_msg, param->primary_path->rate); 887 req_msg->primary_traffic_class = param->primary_path->traffic_class; 888 req_msg->primary_hop_limit = param->primary_path->hop_limit; 889 cm_req_set_primary_sl(req_msg, param->primary_path->sl); 890 cm_req_set_primary_subnet_local(req_msg, 1); /* local only... */ 891 cm_req_set_primary_local_ack_timeout(req_msg, 892 min(31, param->primary_path->packet_life_time + 1)); 893 894 if (param->alternate_path) { 895 req_msg->alt_local_lid = param->alternate_path->slid; 896 req_msg->alt_remote_lid = param->alternate_path->dlid; 897 req_msg->alt_local_gid = param->alternate_path->sgid; 898 req_msg->alt_remote_gid = param->alternate_path->dgid; 899 cm_req_set_alt_flow_label(req_msg, 900 param->alternate_path->flow_label); 901 cm_req_set_alt_packet_rate(req_msg, param->alternate_path->rate); 902 req_msg->alt_traffic_class = param->alternate_path->traffic_class; 903 req_msg->alt_hop_limit = param->alternate_path->hop_limit; 904 cm_req_set_alt_sl(req_msg, param->alternate_path->sl); 905 cm_req_set_alt_subnet_local(req_msg, 1); /* local only... */ 906 cm_req_set_alt_local_ack_timeout(req_msg, 907 min(31, param->alternate_path->packet_life_time + 1)); 908 } 909 910 if (param->private_data && param->private_data_len) 911 memcpy(req_msg->private_data, param->private_data, 912 param->private_data_len); 913 } 914 915 static int cm_validate_req_param(struct ib_cm_req_param *param) 916 { 917 /* peer-to-peer not supported */ 918 if (param->peer_to_peer) 919 return -EINVAL; 920 921 if (!param->primary_path) 922 return -EINVAL; 923 924 if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC) 925 return -EINVAL; 926 927 if (param->private_data && 928 param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE) 929 return -EINVAL; 930 931 if (param->alternate_path && 932 (param->alternate_path->pkey != param->primary_path->pkey || 933 param->alternate_path->mtu != param->primary_path->mtu)) 934 return -EINVAL; 935 936 return 0; 937 } 938 939 int ib_send_cm_req(struct ib_cm_id *cm_id, 940 struct ib_cm_req_param *param) 941 { 942 struct cm_id_private *cm_id_priv; 943 struct cm_req_msg *req_msg; 944 unsigned long flags; 945 int ret; 946 947 ret = cm_validate_req_param(param); 948 if (ret) 949 return ret; 950 951 /* Verify that we're not in timewait. */ 952 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 953 spin_lock_irqsave(&cm_id_priv->lock, flags); 954 if (cm_id->state != IB_CM_IDLE) { 955 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 956 ret = -EINVAL; 957 goto out; 958 } 959 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 960 961 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv-> 962 id.local_id); 963 if (IS_ERR(cm_id_priv->timewait_info)) 964 goto out; 965 966 ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av); 967 if (ret) 968 goto error1; 969 if (param->alternate_path) { 970 ret = cm_init_av_by_path(param->alternate_path, 971 &cm_id_priv->alt_av); 972 if (ret) 973 goto error1; 974 } 975 cm_id->service_id = param->service_id; 976 cm_id->service_mask = __constant_cpu_to_be64(~0ULL); 977 cm_id_priv->timeout_ms = cm_convert_to_ms( 978 param->primary_path->packet_life_time) * 2 + 979 cm_convert_to_ms( 980 param->remote_cm_response_timeout); 981 cm_id_priv->max_cm_retries = param->max_cm_retries; 982 cm_id_priv->initiator_depth = param->initiator_depth; 983 cm_id_priv->responder_resources = param->responder_resources; 984 cm_id_priv->retry_count = param->retry_count; 985 cm_id_priv->path_mtu = param->primary_path->mtu; 986 cm_id_priv->qp_type = param->qp_type; 987 988 ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg); 989 if (ret) 990 goto error1; 991 992 req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad; 993 cm_format_req(req_msg, cm_id_priv, param); 994 cm_id_priv->tid = req_msg->hdr.tid; 995 cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms; 996 cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT; 997 998 cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg); 999 cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg); 1000 cm_id_priv->local_ack_timeout = 1001 cm_req_get_primary_local_ack_timeout(req_msg); 1002 1003 spin_lock_irqsave(&cm_id_priv->lock, flags); 1004 ret = ib_post_send_mad(cm_id_priv->msg, NULL); 1005 if (ret) { 1006 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1007 goto error2; 1008 } 1009 BUG_ON(cm_id->state != IB_CM_IDLE); 1010 cm_id->state = IB_CM_REQ_SENT; 1011 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1012 return 0; 1013 1014 error2: cm_free_msg(cm_id_priv->msg); 1015 error1: kfree(cm_id_priv->timewait_info); 1016 out: return ret; 1017 } 1018 EXPORT_SYMBOL(ib_send_cm_req); 1019 1020 static int cm_issue_rej(struct cm_port *port, 1021 struct ib_mad_recv_wc *mad_recv_wc, 1022 enum ib_cm_rej_reason reason, 1023 enum cm_msg_response msg_rejected, 1024 void *ari, u8 ari_length) 1025 { 1026 struct ib_mad_send_buf *msg = NULL; 1027 struct cm_rej_msg *rej_msg, *rcv_msg; 1028 int ret; 1029 1030 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg); 1031 if (ret) 1032 return ret; 1033 1034 /* We just need common CM header information. Cast to any message. */ 1035 rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad; 1036 rej_msg = (struct cm_rej_msg *) msg->mad; 1037 1038 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid); 1039 rej_msg->remote_comm_id = rcv_msg->local_comm_id; 1040 rej_msg->local_comm_id = rcv_msg->remote_comm_id; 1041 cm_rej_set_msg_rejected(rej_msg, msg_rejected); 1042 rej_msg->reason = cpu_to_be16(reason); 1043 1044 if (ari && ari_length) { 1045 cm_rej_set_reject_info_len(rej_msg, ari_length); 1046 memcpy(rej_msg->ari, ari, ari_length); 1047 } 1048 1049 ret = ib_post_send_mad(msg, NULL); 1050 if (ret) 1051 cm_free_msg(msg); 1052 1053 return ret; 1054 } 1055 1056 static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid, 1057 __be32 local_qpn, __be32 remote_qpn) 1058 { 1059 return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) || 1060 ((local_ca_guid == remote_ca_guid) && 1061 (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn)))); 1062 } 1063 1064 static void cm_format_paths_from_req(struct cm_req_msg *req_msg, 1065 struct ib_sa_path_rec *primary_path, 1066 struct ib_sa_path_rec *alt_path) 1067 { 1068 memset(primary_path, 0, sizeof *primary_path); 1069 primary_path->dgid = req_msg->primary_local_gid; 1070 primary_path->sgid = req_msg->primary_remote_gid; 1071 primary_path->dlid = req_msg->primary_local_lid; 1072 primary_path->slid = req_msg->primary_remote_lid; 1073 primary_path->flow_label = cm_req_get_primary_flow_label(req_msg); 1074 primary_path->hop_limit = req_msg->primary_hop_limit; 1075 primary_path->traffic_class = req_msg->primary_traffic_class; 1076 primary_path->reversible = 1; 1077 primary_path->pkey = req_msg->pkey; 1078 primary_path->sl = cm_req_get_primary_sl(req_msg); 1079 primary_path->mtu_selector = IB_SA_EQ; 1080 primary_path->mtu = cm_req_get_path_mtu(req_msg); 1081 primary_path->rate_selector = IB_SA_EQ; 1082 primary_path->rate = cm_req_get_primary_packet_rate(req_msg); 1083 primary_path->packet_life_time_selector = IB_SA_EQ; 1084 primary_path->packet_life_time = 1085 cm_req_get_primary_local_ack_timeout(req_msg); 1086 primary_path->packet_life_time -= (primary_path->packet_life_time > 0); 1087 1088 if (req_msg->alt_local_lid) { 1089 memset(alt_path, 0, sizeof *alt_path); 1090 alt_path->dgid = req_msg->alt_local_gid; 1091 alt_path->sgid = req_msg->alt_remote_gid; 1092 alt_path->dlid = req_msg->alt_local_lid; 1093 alt_path->slid = req_msg->alt_remote_lid; 1094 alt_path->flow_label = cm_req_get_alt_flow_label(req_msg); 1095 alt_path->hop_limit = req_msg->alt_hop_limit; 1096 alt_path->traffic_class = req_msg->alt_traffic_class; 1097 alt_path->reversible = 1; 1098 alt_path->pkey = req_msg->pkey; 1099 alt_path->sl = cm_req_get_alt_sl(req_msg); 1100 alt_path->mtu_selector = IB_SA_EQ; 1101 alt_path->mtu = cm_req_get_path_mtu(req_msg); 1102 alt_path->rate_selector = IB_SA_EQ; 1103 alt_path->rate = cm_req_get_alt_packet_rate(req_msg); 1104 alt_path->packet_life_time_selector = IB_SA_EQ; 1105 alt_path->packet_life_time = 1106 cm_req_get_alt_local_ack_timeout(req_msg); 1107 alt_path->packet_life_time -= (alt_path->packet_life_time > 0); 1108 } 1109 } 1110 1111 static void cm_format_req_event(struct cm_work *work, 1112 struct cm_id_private *cm_id_priv, 1113 struct ib_cm_id *listen_id) 1114 { 1115 struct cm_req_msg *req_msg; 1116 struct ib_cm_req_event_param *param; 1117 1118 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; 1119 param = &work->cm_event.param.req_rcvd; 1120 param->listen_id = listen_id; 1121 param->port = cm_id_priv->av.port->port_num; 1122 param->primary_path = &work->path[0]; 1123 if (req_msg->alt_local_lid) 1124 param->alternate_path = &work->path[1]; 1125 else 1126 param->alternate_path = NULL; 1127 param->remote_ca_guid = req_msg->local_ca_guid; 1128 param->remote_qkey = be32_to_cpu(req_msg->local_qkey); 1129 param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg)); 1130 param->qp_type = cm_req_get_qp_type(req_msg); 1131 param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg)); 1132 param->responder_resources = cm_req_get_init_depth(req_msg); 1133 param->initiator_depth = cm_req_get_resp_res(req_msg); 1134 param->local_cm_response_timeout = 1135 cm_req_get_remote_resp_timeout(req_msg); 1136 param->flow_control = cm_req_get_flow_ctrl(req_msg); 1137 param->remote_cm_response_timeout = 1138 cm_req_get_local_resp_timeout(req_msg); 1139 param->retry_count = cm_req_get_retry_count(req_msg); 1140 param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg); 1141 param->srq = cm_req_get_srq(req_msg); 1142 work->cm_event.private_data = &req_msg->private_data; 1143 } 1144 1145 static void cm_process_work(struct cm_id_private *cm_id_priv, 1146 struct cm_work *work) 1147 { 1148 unsigned long flags; 1149 int ret; 1150 1151 /* We will typically only have the current event to report. */ 1152 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event); 1153 cm_free_work(work); 1154 1155 while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) { 1156 spin_lock_irqsave(&cm_id_priv->lock, flags); 1157 work = cm_dequeue_work(cm_id_priv); 1158 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1159 BUG_ON(!work); 1160 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, 1161 &work->cm_event); 1162 cm_free_work(work); 1163 } 1164 cm_deref_id(cm_id_priv); 1165 if (ret) 1166 ib_destroy_cm_id(&cm_id_priv->id); 1167 } 1168 1169 static void cm_format_mra(struct cm_mra_msg *mra_msg, 1170 struct cm_id_private *cm_id_priv, 1171 enum cm_msg_response msg_mraed, u8 service_timeout, 1172 const void *private_data, u8 private_data_len) 1173 { 1174 cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid); 1175 cm_mra_set_msg_mraed(mra_msg, msg_mraed); 1176 mra_msg->local_comm_id = cm_id_priv->id.local_id; 1177 mra_msg->remote_comm_id = cm_id_priv->id.remote_id; 1178 cm_mra_set_service_timeout(mra_msg, service_timeout); 1179 1180 if (private_data && private_data_len) 1181 memcpy(mra_msg->private_data, private_data, private_data_len); 1182 } 1183 1184 static void cm_format_rej(struct cm_rej_msg *rej_msg, 1185 struct cm_id_private *cm_id_priv, 1186 enum ib_cm_rej_reason reason, 1187 void *ari, 1188 u8 ari_length, 1189 const void *private_data, 1190 u8 private_data_len) 1191 { 1192 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid); 1193 rej_msg->remote_comm_id = cm_id_priv->id.remote_id; 1194 1195 switch(cm_id_priv->id.state) { 1196 case IB_CM_REQ_RCVD: 1197 rej_msg->local_comm_id = 0; 1198 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ); 1199 break; 1200 case IB_CM_MRA_REQ_SENT: 1201 rej_msg->local_comm_id = cm_id_priv->id.local_id; 1202 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ); 1203 break; 1204 case IB_CM_REP_RCVD: 1205 case IB_CM_MRA_REP_SENT: 1206 rej_msg->local_comm_id = cm_id_priv->id.local_id; 1207 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP); 1208 break; 1209 default: 1210 rej_msg->local_comm_id = cm_id_priv->id.local_id; 1211 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER); 1212 break; 1213 } 1214 1215 rej_msg->reason = cpu_to_be16(reason); 1216 if (ari && ari_length) { 1217 cm_rej_set_reject_info_len(rej_msg, ari_length); 1218 memcpy(rej_msg->ari, ari, ari_length); 1219 } 1220 1221 if (private_data && private_data_len) 1222 memcpy(rej_msg->private_data, private_data, private_data_len); 1223 } 1224 1225 static void cm_dup_req_handler(struct cm_work *work, 1226 struct cm_id_private *cm_id_priv) 1227 { 1228 struct ib_mad_send_buf *msg = NULL; 1229 unsigned long flags; 1230 int ret; 1231 1232 /* Quick state check to discard duplicate REQs. */ 1233 if (cm_id_priv->id.state == IB_CM_REQ_RCVD) 1234 return; 1235 1236 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); 1237 if (ret) 1238 return; 1239 1240 spin_lock_irqsave(&cm_id_priv->lock, flags); 1241 switch (cm_id_priv->id.state) { 1242 case IB_CM_MRA_REQ_SENT: 1243 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 1244 CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout, 1245 cm_id_priv->private_data, 1246 cm_id_priv->private_data_len); 1247 break; 1248 case IB_CM_TIMEWAIT: 1249 cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv, 1250 IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0); 1251 break; 1252 default: 1253 goto unlock; 1254 } 1255 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1256 1257 ret = ib_post_send_mad(msg, NULL); 1258 if (ret) 1259 goto free; 1260 return; 1261 1262 unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1263 free: cm_free_msg(msg); 1264 } 1265 1266 static struct cm_id_private * cm_match_req(struct cm_work *work, 1267 struct cm_id_private *cm_id_priv) 1268 { 1269 struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv; 1270 struct cm_timewait_info *timewait_info; 1271 struct cm_req_msg *req_msg; 1272 unsigned long flags; 1273 1274 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; 1275 1276 /* Check for duplicate REQ and stale connections. */ 1277 spin_lock_irqsave(&cm.lock, flags); 1278 timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info); 1279 if (!timewait_info) 1280 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info); 1281 1282 if (timewait_info) { 1283 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id, 1284 timewait_info->work.remote_id); 1285 spin_unlock_irqrestore(&cm.lock, flags); 1286 if (cur_cm_id_priv) { 1287 cm_dup_req_handler(work, cur_cm_id_priv); 1288 cm_deref_id(cur_cm_id_priv); 1289 } else 1290 cm_issue_rej(work->port, work->mad_recv_wc, 1291 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ, 1292 NULL, 0); 1293 goto error; 1294 } 1295 1296 /* Find matching listen request. */ 1297 listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device, 1298 req_msg->service_id, 1299 req_msg->private_data); 1300 if (!listen_cm_id_priv) { 1301 spin_unlock_irqrestore(&cm.lock, flags); 1302 cm_issue_rej(work->port, work->mad_recv_wc, 1303 IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ, 1304 NULL, 0); 1305 goto error; 1306 } 1307 atomic_inc(&listen_cm_id_priv->refcount); 1308 atomic_inc(&cm_id_priv->refcount); 1309 cm_id_priv->id.state = IB_CM_REQ_RCVD; 1310 atomic_inc(&cm_id_priv->work_count); 1311 spin_unlock_irqrestore(&cm.lock, flags); 1312 return listen_cm_id_priv; 1313 1314 error: cm_cleanup_timewait(cm_id_priv->timewait_info); 1315 return NULL; 1316 } 1317 1318 static int cm_req_handler(struct cm_work *work) 1319 { 1320 struct ib_cm_id *cm_id; 1321 struct cm_id_private *cm_id_priv, *listen_cm_id_priv; 1322 struct cm_req_msg *req_msg; 1323 int ret; 1324 1325 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; 1326 1327 cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL); 1328 if (IS_ERR(cm_id)) 1329 return PTR_ERR(cm_id); 1330 1331 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1332 cm_id_priv->id.remote_id = req_msg->local_comm_id; 1333 cm_init_av_for_response(work->port, work->mad_recv_wc->wc, 1334 work->mad_recv_wc->recv_buf.grh, 1335 &cm_id_priv->av); 1336 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv-> 1337 id.local_id); 1338 if (IS_ERR(cm_id_priv->timewait_info)) { 1339 ret = PTR_ERR(cm_id_priv->timewait_info); 1340 goto error1; 1341 } 1342 cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id; 1343 cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid; 1344 cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg); 1345 1346 listen_cm_id_priv = cm_match_req(work, cm_id_priv); 1347 if (!listen_cm_id_priv) { 1348 ret = -EINVAL; 1349 goto error2; 1350 } 1351 1352 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler; 1353 cm_id_priv->id.context = listen_cm_id_priv->id.context; 1354 cm_id_priv->id.service_id = req_msg->service_id; 1355 cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL); 1356 1357 cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]); 1358 ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av); 1359 if (ret) 1360 goto error3; 1361 if (req_msg->alt_local_lid) { 1362 ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av); 1363 if (ret) 1364 goto error3; 1365 } 1366 cm_id_priv->tid = req_msg->hdr.tid; 1367 cm_id_priv->timeout_ms = cm_convert_to_ms( 1368 cm_req_get_local_resp_timeout(req_msg)); 1369 cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg); 1370 cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg); 1371 cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg); 1372 cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg); 1373 cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg); 1374 cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg); 1375 cm_id_priv->local_ack_timeout = 1376 cm_req_get_primary_local_ack_timeout(req_msg); 1377 cm_id_priv->retry_count = cm_req_get_retry_count(req_msg); 1378 cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg); 1379 cm_id_priv->qp_type = cm_req_get_qp_type(req_msg); 1380 1381 cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id); 1382 cm_process_work(cm_id_priv, work); 1383 cm_deref_id(listen_cm_id_priv); 1384 return 0; 1385 1386 error3: atomic_dec(&cm_id_priv->refcount); 1387 cm_deref_id(listen_cm_id_priv); 1388 cm_cleanup_timewait(cm_id_priv->timewait_info); 1389 error2: kfree(cm_id_priv->timewait_info); 1390 cm_id_priv->timewait_info = NULL; 1391 error1: ib_destroy_cm_id(&cm_id_priv->id); 1392 return ret; 1393 } 1394 1395 static void cm_format_rep(struct cm_rep_msg *rep_msg, 1396 struct cm_id_private *cm_id_priv, 1397 struct ib_cm_rep_param *param) 1398 { 1399 cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid); 1400 rep_msg->local_comm_id = cm_id_priv->id.local_id; 1401 rep_msg->remote_comm_id = cm_id_priv->id.remote_id; 1402 cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num)); 1403 cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn)); 1404 rep_msg->resp_resources = param->responder_resources; 1405 rep_msg->initiator_depth = param->initiator_depth; 1406 cm_rep_set_target_ack_delay(rep_msg, param->target_ack_delay); 1407 cm_rep_set_failover(rep_msg, param->failover_accepted); 1408 cm_rep_set_flow_ctrl(rep_msg, param->flow_control); 1409 cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count); 1410 cm_rep_set_srq(rep_msg, param->srq); 1411 rep_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid; 1412 1413 if (param->private_data && param->private_data_len) 1414 memcpy(rep_msg->private_data, param->private_data, 1415 param->private_data_len); 1416 } 1417 1418 int ib_send_cm_rep(struct ib_cm_id *cm_id, 1419 struct ib_cm_rep_param *param) 1420 { 1421 struct cm_id_private *cm_id_priv; 1422 struct ib_mad_send_buf *msg; 1423 struct cm_rep_msg *rep_msg; 1424 unsigned long flags; 1425 int ret; 1426 1427 if (param->private_data && 1428 param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE) 1429 return -EINVAL; 1430 1431 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1432 spin_lock_irqsave(&cm_id_priv->lock, flags); 1433 if (cm_id->state != IB_CM_REQ_RCVD && 1434 cm_id->state != IB_CM_MRA_REQ_SENT) { 1435 ret = -EINVAL; 1436 goto out; 1437 } 1438 1439 ret = cm_alloc_msg(cm_id_priv, &msg); 1440 if (ret) 1441 goto out; 1442 1443 rep_msg = (struct cm_rep_msg *) msg->mad; 1444 cm_format_rep(rep_msg, cm_id_priv, param); 1445 msg->timeout_ms = cm_id_priv->timeout_ms; 1446 msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT; 1447 1448 ret = ib_post_send_mad(msg, NULL); 1449 if (ret) { 1450 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1451 cm_free_msg(msg); 1452 return ret; 1453 } 1454 1455 cm_id->state = IB_CM_REP_SENT; 1456 cm_id_priv->msg = msg; 1457 cm_id_priv->initiator_depth = param->initiator_depth; 1458 cm_id_priv->responder_resources = param->responder_resources; 1459 cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg); 1460 cm_id_priv->local_qpn = cm_rep_get_local_qpn(rep_msg); 1461 1462 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1463 return ret; 1464 } 1465 EXPORT_SYMBOL(ib_send_cm_rep); 1466 1467 static void cm_format_rtu(struct cm_rtu_msg *rtu_msg, 1468 struct cm_id_private *cm_id_priv, 1469 const void *private_data, 1470 u8 private_data_len) 1471 { 1472 cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid); 1473 rtu_msg->local_comm_id = cm_id_priv->id.local_id; 1474 rtu_msg->remote_comm_id = cm_id_priv->id.remote_id; 1475 1476 if (private_data && private_data_len) 1477 memcpy(rtu_msg->private_data, private_data, private_data_len); 1478 } 1479 1480 int ib_send_cm_rtu(struct ib_cm_id *cm_id, 1481 const void *private_data, 1482 u8 private_data_len) 1483 { 1484 struct cm_id_private *cm_id_priv; 1485 struct ib_mad_send_buf *msg; 1486 unsigned long flags; 1487 void *data; 1488 int ret; 1489 1490 if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE) 1491 return -EINVAL; 1492 1493 data = cm_copy_private_data(private_data, private_data_len); 1494 if (IS_ERR(data)) 1495 return PTR_ERR(data); 1496 1497 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1498 spin_lock_irqsave(&cm_id_priv->lock, flags); 1499 if (cm_id->state != IB_CM_REP_RCVD && 1500 cm_id->state != IB_CM_MRA_REP_SENT) { 1501 ret = -EINVAL; 1502 goto error; 1503 } 1504 1505 ret = cm_alloc_msg(cm_id_priv, &msg); 1506 if (ret) 1507 goto error; 1508 1509 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv, 1510 private_data, private_data_len); 1511 1512 ret = ib_post_send_mad(msg, NULL); 1513 if (ret) { 1514 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1515 cm_free_msg(msg); 1516 kfree(data); 1517 return ret; 1518 } 1519 1520 cm_id->state = IB_CM_ESTABLISHED; 1521 cm_set_private_data(cm_id_priv, data, private_data_len); 1522 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1523 return 0; 1524 1525 error: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1526 kfree(data); 1527 return ret; 1528 } 1529 EXPORT_SYMBOL(ib_send_cm_rtu); 1530 1531 static void cm_format_rep_event(struct cm_work *work) 1532 { 1533 struct cm_rep_msg *rep_msg; 1534 struct ib_cm_rep_event_param *param; 1535 1536 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad; 1537 param = &work->cm_event.param.rep_rcvd; 1538 param->remote_ca_guid = rep_msg->local_ca_guid; 1539 param->remote_qkey = be32_to_cpu(rep_msg->local_qkey); 1540 param->remote_qpn = be32_to_cpu(cm_rep_get_local_qpn(rep_msg)); 1541 param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg)); 1542 param->responder_resources = rep_msg->initiator_depth; 1543 param->initiator_depth = rep_msg->resp_resources; 1544 param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg); 1545 param->failover_accepted = cm_rep_get_failover(rep_msg); 1546 param->flow_control = cm_rep_get_flow_ctrl(rep_msg); 1547 param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg); 1548 param->srq = cm_rep_get_srq(rep_msg); 1549 work->cm_event.private_data = &rep_msg->private_data; 1550 } 1551 1552 static void cm_dup_rep_handler(struct cm_work *work) 1553 { 1554 struct cm_id_private *cm_id_priv; 1555 struct cm_rep_msg *rep_msg; 1556 struct ib_mad_send_buf *msg = NULL; 1557 unsigned long flags; 1558 int ret; 1559 1560 rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad; 1561 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 1562 rep_msg->local_comm_id); 1563 if (!cm_id_priv) 1564 return; 1565 1566 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); 1567 if (ret) 1568 goto deref; 1569 1570 spin_lock_irqsave(&cm_id_priv->lock, flags); 1571 if (cm_id_priv->id.state == IB_CM_ESTABLISHED) 1572 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv, 1573 cm_id_priv->private_data, 1574 cm_id_priv->private_data_len); 1575 else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT) 1576 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 1577 CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout, 1578 cm_id_priv->private_data, 1579 cm_id_priv->private_data_len); 1580 else 1581 goto unlock; 1582 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1583 1584 ret = ib_post_send_mad(msg, NULL); 1585 if (ret) 1586 goto free; 1587 goto deref; 1588 1589 unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1590 free: cm_free_msg(msg); 1591 deref: cm_deref_id(cm_id_priv); 1592 } 1593 1594 static int cm_rep_handler(struct cm_work *work) 1595 { 1596 struct cm_id_private *cm_id_priv; 1597 struct cm_rep_msg *rep_msg; 1598 unsigned long flags; 1599 int ret; 1600 1601 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad; 1602 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0); 1603 if (!cm_id_priv) { 1604 cm_dup_rep_handler(work); 1605 return -EINVAL; 1606 } 1607 1608 cm_format_rep_event(work); 1609 1610 spin_lock_irqsave(&cm_id_priv->lock, flags); 1611 switch (cm_id_priv->id.state) { 1612 case IB_CM_REQ_SENT: 1613 case IB_CM_MRA_REQ_RCVD: 1614 break; 1615 default: 1616 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1617 ret = -EINVAL; 1618 goto error; 1619 } 1620 1621 cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id; 1622 cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid; 1623 cm_id_priv->timewait_info->remote_qpn = cm_rep_get_local_qpn(rep_msg); 1624 1625 spin_lock(&cm.lock); 1626 /* Check for duplicate REP. */ 1627 if (cm_insert_remote_id(cm_id_priv->timewait_info)) { 1628 spin_unlock(&cm.lock); 1629 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1630 ret = -EINVAL; 1631 goto error; 1632 } 1633 /* Check for a stale connection. */ 1634 if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) { 1635 rb_erase(&cm_id_priv->timewait_info->remote_id_node, 1636 &cm.remote_id_table); 1637 cm_id_priv->timewait_info->inserted_remote_id = 0; 1638 spin_unlock(&cm.lock); 1639 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1640 cm_issue_rej(work->port, work->mad_recv_wc, 1641 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP, 1642 NULL, 0); 1643 ret = -EINVAL; 1644 goto error; 1645 } 1646 spin_unlock(&cm.lock); 1647 1648 cm_id_priv->id.state = IB_CM_REP_RCVD; 1649 cm_id_priv->id.remote_id = rep_msg->local_comm_id; 1650 cm_id_priv->remote_qpn = cm_rep_get_local_qpn(rep_msg); 1651 cm_id_priv->initiator_depth = rep_msg->resp_resources; 1652 cm_id_priv->responder_resources = rep_msg->initiator_depth; 1653 cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg); 1654 cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg); 1655 1656 /* todo: handle peer_to_peer */ 1657 1658 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1659 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1660 if (!ret) 1661 list_add_tail(&work->list, &cm_id_priv->work_list); 1662 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1663 1664 if (ret) 1665 cm_process_work(cm_id_priv, work); 1666 else 1667 cm_deref_id(cm_id_priv); 1668 return 0; 1669 1670 error: 1671 cm_deref_id(cm_id_priv); 1672 return ret; 1673 } 1674 1675 static int cm_establish_handler(struct cm_work *work) 1676 { 1677 struct cm_id_private *cm_id_priv; 1678 unsigned long flags; 1679 int ret; 1680 1681 /* See comment in ib_cm_establish about lookup. */ 1682 cm_id_priv = cm_acquire_id(work->local_id, work->remote_id); 1683 if (!cm_id_priv) 1684 return -EINVAL; 1685 1686 spin_lock_irqsave(&cm_id_priv->lock, flags); 1687 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) { 1688 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1689 goto out; 1690 } 1691 1692 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1693 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1694 if (!ret) 1695 list_add_tail(&work->list, &cm_id_priv->work_list); 1696 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1697 1698 if (ret) 1699 cm_process_work(cm_id_priv, work); 1700 else 1701 cm_deref_id(cm_id_priv); 1702 return 0; 1703 out: 1704 cm_deref_id(cm_id_priv); 1705 return -EINVAL; 1706 } 1707 1708 static int cm_rtu_handler(struct cm_work *work) 1709 { 1710 struct cm_id_private *cm_id_priv; 1711 struct cm_rtu_msg *rtu_msg; 1712 unsigned long flags; 1713 int ret; 1714 1715 rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad; 1716 cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id, 1717 rtu_msg->local_comm_id); 1718 if (!cm_id_priv) 1719 return -EINVAL; 1720 1721 work->cm_event.private_data = &rtu_msg->private_data; 1722 1723 spin_lock_irqsave(&cm_id_priv->lock, flags); 1724 if (cm_id_priv->id.state != IB_CM_REP_SENT && 1725 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) { 1726 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1727 goto out; 1728 } 1729 cm_id_priv->id.state = IB_CM_ESTABLISHED; 1730 1731 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1732 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1733 if (!ret) 1734 list_add_tail(&work->list, &cm_id_priv->work_list); 1735 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1736 1737 if (ret) 1738 cm_process_work(cm_id_priv, work); 1739 else 1740 cm_deref_id(cm_id_priv); 1741 return 0; 1742 out: 1743 cm_deref_id(cm_id_priv); 1744 return -EINVAL; 1745 } 1746 1747 static void cm_format_dreq(struct cm_dreq_msg *dreq_msg, 1748 struct cm_id_private *cm_id_priv, 1749 const void *private_data, 1750 u8 private_data_len) 1751 { 1752 cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID, 1753 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ)); 1754 dreq_msg->local_comm_id = cm_id_priv->id.local_id; 1755 dreq_msg->remote_comm_id = cm_id_priv->id.remote_id; 1756 cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn); 1757 1758 if (private_data && private_data_len) 1759 memcpy(dreq_msg->private_data, private_data, private_data_len); 1760 } 1761 1762 int ib_send_cm_dreq(struct ib_cm_id *cm_id, 1763 const void *private_data, 1764 u8 private_data_len) 1765 { 1766 struct cm_id_private *cm_id_priv; 1767 struct ib_mad_send_buf *msg; 1768 unsigned long flags; 1769 int ret; 1770 1771 if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE) 1772 return -EINVAL; 1773 1774 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1775 spin_lock_irqsave(&cm_id_priv->lock, flags); 1776 if (cm_id->state != IB_CM_ESTABLISHED) { 1777 ret = -EINVAL; 1778 goto out; 1779 } 1780 1781 ret = cm_alloc_msg(cm_id_priv, &msg); 1782 if (ret) { 1783 cm_enter_timewait(cm_id_priv); 1784 goto out; 1785 } 1786 1787 cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv, 1788 private_data, private_data_len); 1789 msg->timeout_ms = cm_id_priv->timeout_ms; 1790 msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT; 1791 1792 ret = ib_post_send_mad(msg, NULL); 1793 if (ret) { 1794 cm_enter_timewait(cm_id_priv); 1795 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1796 cm_free_msg(msg); 1797 return ret; 1798 } 1799 1800 cm_id->state = IB_CM_DREQ_SENT; 1801 cm_id_priv->msg = msg; 1802 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1803 return ret; 1804 } 1805 EXPORT_SYMBOL(ib_send_cm_dreq); 1806 1807 static void cm_format_drep(struct cm_drep_msg *drep_msg, 1808 struct cm_id_private *cm_id_priv, 1809 const void *private_data, 1810 u8 private_data_len) 1811 { 1812 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid); 1813 drep_msg->local_comm_id = cm_id_priv->id.local_id; 1814 drep_msg->remote_comm_id = cm_id_priv->id.remote_id; 1815 1816 if (private_data && private_data_len) 1817 memcpy(drep_msg->private_data, private_data, private_data_len); 1818 } 1819 1820 int ib_send_cm_drep(struct ib_cm_id *cm_id, 1821 const void *private_data, 1822 u8 private_data_len) 1823 { 1824 struct cm_id_private *cm_id_priv; 1825 struct ib_mad_send_buf *msg; 1826 unsigned long flags; 1827 void *data; 1828 int ret; 1829 1830 if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE) 1831 return -EINVAL; 1832 1833 data = cm_copy_private_data(private_data, private_data_len); 1834 if (IS_ERR(data)) 1835 return PTR_ERR(data); 1836 1837 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1838 spin_lock_irqsave(&cm_id_priv->lock, flags); 1839 if (cm_id->state != IB_CM_DREQ_RCVD) { 1840 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1841 kfree(data); 1842 return -EINVAL; 1843 } 1844 1845 cm_set_private_data(cm_id_priv, data, private_data_len); 1846 cm_enter_timewait(cm_id_priv); 1847 1848 ret = cm_alloc_msg(cm_id_priv, &msg); 1849 if (ret) 1850 goto out; 1851 1852 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv, 1853 private_data, private_data_len); 1854 1855 ret = ib_post_send_mad(msg, NULL); 1856 if (ret) { 1857 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1858 cm_free_msg(msg); 1859 return ret; 1860 } 1861 1862 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1863 return ret; 1864 } 1865 EXPORT_SYMBOL(ib_send_cm_drep); 1866 1867 static int cm_dreq_handler(struct cm_work *work) 1868 { 1869 struct cm_id_private *cm_id_priv; 1870 struct cm_dreq_msg *dreq_msg; 1871 struct ib_mad_send_buf *msg = NULL; 1872 unsigned long flags; 1873 int ret; 1874 1875 dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad; 1876 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id, 1877 dreq_msg->local_comm_id); 1878 if (!cm_id_priv) 1879 return -EINVAL; 1880 1881 work->cm_event.private_data = &dreq_msg->private_data; 1882 1883 spin_lock_irqsave(&cm_id_priv->lock, flags); 1884 if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg)) 1885 goto unlock; 1886 1887 switch (cm_id_priv->id.state) { 1888 case IB_CM_REP_SENT: 1889 case IB_CM_DREQ_SENT: 1890 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1891 break; 1892 case IB_CM_ESTABLISHED: 1893 case IB_CM_MRA_REP_RCVD: 1894 break; 1895 case IB_CM_TIMEWAIT: 1896 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) 1897 goto unlock; 1898 1899 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv, 1900 cm_id_priv->private_data, 1901 cm_id_priv->private_data_len); 1902 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1903 1904 if (ib_post_send_mad(msg, NULL)) 1905 cm_free_msg(msg); 1906 goto deref; 1907 default: 1908 goto unlock; 1909 } 1910 cm_id_priv->id.state = IB_CM_DREQ_RCVD; 1911 cm_id_priv->tid = dreq_msg->hdr.tid; 1912 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1913 if (!ret) 1914 list_add_tail(&work->list, &cm_id_priv->work_list); 1915 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1916 1917 if (ret) 1918 cm_process_work(cm_id_priv, work); 1919 else 1920 cm_deref_id(cm_id_priv); 1921 return 0; 1922 1923 unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1924 deref: cm_deref_id(cm_id_priv); 1925 return -EINVAL; 1926 } 1927 1928 static int cm_drep_handler(struct cm_work *work) 1929 { 1930 struct cm_id_private *cm_id_priv; 1931 struct cm_drep_msg *drep_msg; 1932 unsigned long flags; 1933 int ret; 1934 1935 drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad; 1936 cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id, 1937 drep_msg->local_comm_id); 1938 if (!cm_id_priv) 1939 return -EINVAL; 1940 1941 work->cm_event.private_data = &drep_msg->private_data; 1942 1943 spin_lock_irqsave(&cm_id_priv->lock, flags); 1944 if (cm_id_priv->id.state != IB_CM_DREQ_SENT && 1945 cm_id_priv->id.state != IB_CM_DREQ_RCVD) { 1946 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1947 goto out; 1948 } 1949 cm_enter_timewait(cm_id_priv); 1950 1951 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1952 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1953 if (!ret) 1954 list_add_tail(&work->list, &cm_id_priv->work_list); 1955 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1956 1957 if (ret) 1958 cm_process_work(cm_id_priv, work); 1959 else 1960 cm_deref_id(cm_id_priv); 1961 return 0; 1962 out: 1963 cm_deref_id(cm_id_priv); 1964 return -EINVAL; 1965 } 1966 1967 int ib_send_cm_rej(struct ib_cm_id *cm_id, 1968 enum ib_cm_rej_reason reason, 1969 void *ari, 1970 u8 ari_length, 1971 const void *private_data, 1972 u8 private_data_len) 1973 { 1974 struct cm_id_private *cm_id_priv; 1975 struct ib_mad_send_buf *msg; 1976 unsigned long flags; 1977 int ret; 1978 1979 if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) || 1980 (ari && ari_length > IB_CM_REJ_ARI_LENGTH)) 1981 return -EINVAL; 1982 1983 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1984 1985 spin_lock_irqsave(&cm_id_priv->lock, flags); 1986 switch (cm_id->state) { 1987 case IB_CM_REQ_SENT: 1988 case IB_CM_MRA_REQ_RCVD: 1989 case IB_CM_REQ_RCVD: 1990 case IB_CM_MRA_REQ_SENT: 1991 case IB_CM_REP_RCVD: 1992 case IB_CM_MRA_REP_SENT: 1993 ret = cm_alloc_msg(cm_id_priv, &msg); 1994 if (!ret) 1995 cm_format_rej((struct cm_rej_msg *) msg->mad, 1996 cm_id_priv, reason, ari, ari_length, 1997 private_data, private_data_len); 1998 1999 cm_reset_to_idle(cm_id_priv); 2000 break; 2001 case IB_CM_REP_SENT: 2002 case IB_CM_MRA_REP_RCVD: 2003 ret = cm_alloc_msg(cm_id_priv, &msg); 2004 if (!ret) 2005 cm_format_rej((struct cm_rej_msg *) msg->mad, 2006 cm_id_priv, reason, ari, ari_length, 2007 private_data, private_data_len); 2008 2009 cm_enter_timewait(cm_id_priv); 2010 break; 2011 default: 2012 ret = -EINVAL; 2013 goto out; 2014 } 2015 2016 if (ret) 2017 goto out; 2018 2019 ret = ib_post_send_mad(msg, NULL); 2020 if (ret) 2021 cm_free_msg(msg); 2022 2023 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2024 return ret; 2025 } 2026 EXPORT_SYMBOL(ib_send_cm_rej); 2027 2028 static void cm_format_rej_event(struct cm_work *work) 2029 { 2030 struct cm_rej_msg *rej_msg; 2031 struct ib_cm_rej_event_param *param; 2032 2033 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad; 2034 param = &work->cm_event.param.rej_rcvd; 2035 param->ari = rej_msg->ari; 2036 param->ari_length = cm_rej_get_reject_info_len(rej_msg); 2037 param->reason = __be16_to_cpu(rej_msg->reason); 2038 work->cm_event.private_data = &rej_msg->private_data; 2039 } 2040 2041 static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg) 2042 { 2043 struct cm_timewait_info *timewait_info; 2044 struct cm_id_private *cm_id_priv; 2045 unsigned long flags; 2046 __be32 remote_id; 2047 2048 remote_id = rej_msg->local_comm_id; 2049 2050 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) { 2051 spin_lock_irqsave(&cm.lock, flags); 2052 timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari), 2053 remote_id); 2054 if (!timewait_info) { 2055 spin_unlock_irqrestore(&cm.lock, flags); 2056 return NULL; 2057 } 2058 cm_id_priv = idr_find(&cm.local_id_table, 2059 (__force int) timewait_info->work.local_id); 2060 if (cm_id_priv) { 2061 if (cm_id_priv->id.remote_id == remote_id) 2062 atomic_inc(&cm_id_priv->refcount); 2063 else 2064 cm_id_priv = NULL; 2065 } 2066 spin_unlock_irqrestore(&cm.lock, flags); 2067 } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ) 2068 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0); 2069 else 2070 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id); 2071 2072 return cm_id_priv; 2073 } 2074 2075 static int cm_rej_handler(struct cm_work *work) 2076 { 2077 struct cm_id_private *cm_id_priv; 2078 struct cm_rej_msg *rej_msg; 2079 unsigned long flags; 2080 int ret; 2081 2082 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad; 2083 cm_id_priv = cm_acquire_rejected_id(rej_msg); 2084 if (!cm_id_priv) 2085 return -EINVAL; 2086 2087 cm_format_rej_event(work); 2088 2089 spin_lock_irqsave(&cm_id_priv->lock, flags); 2090 switch (cm_id_priv->id.state) { 2091 case IB_CM_REQ_SENT: 2092 case IB_CM_MRA_REQ_RCVD: 2093 case IB_CM_REP_SENT: 2094 case IB_CM_MRA_REP_RCVD: 2095 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2096 /* fall through */ 2097 case IB_CM_REQ_RCVD: 2098 case IB_CM_MRA_REQ_SENT: 2099 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN) 2100 cm_enter_timewait(cm_id_priv); 2101 else 2102 cm_reset_to_idle(cm_id_priv); 2103 break; 2104 case IB_CM_DREQ_SENT: 2105 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2106 /* fall through */ 2107 case IB_CM_REP_RCVD: 2108 case IB_CM_MRA_REP_SENT: 2109 case IB_CM_ESTABLISHED: 2110 cm_enter_timewait(cm_id_priv); 2111 break; 2112 default: 2113 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2114 ret = -EINVAL; 2115 goto out; 2116 } 2117 2118 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2119 if (!ret) 2120 list_add_tail(&work->list, &cm_id_priv->work_list); 2121 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2122 2123 if (ret) 2124 cm_process_work(cm_id_priv, work); 2125 else 2126 cm_deref_id(cm_id_priv); 2127 return 0; 2128 out: 2129 cm_deref_id(cm_id_priv); 2130 return -EINVAL; 2131 } 2132 2133 int ib_send_cm_mra(struct ib_cm_id *cm_id, 2134 u8 service_timeout, 2135 const void *private_data, 2136 u8 private_data_len) 2137 { 2138 struct cm_id_private *cm_id_priv; 2139 struct ib_mad_send_buf *msg; 2140 void *data; 2141 unsigned long flags; 2142 int ret; 2143 2144 if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE) 2145 return -EINVAL; 2146 2147 data = cm_copy_private_data(private_data, private_data_len); 2148 if (IS_ERR(data)) 2149 return PTR_ERR(data); 2150 2151 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2152 2153 spin_lock_irqsave(&cm_id_priv->lock, flags); 2154 switch(cm_id_priv->id.state) { 2155 case IB_CM_REQ_RCVD: 2156 ret = cm_alloc_msg(cm_id_priv, &msg); 2157 if (ret) 2158 goto error1; 2159 2160 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 2161 CM_MSG_RESPONSE_REQ, service_timeout, 2162 private_data, private_data_len); 2163 ret = ib_post_send_mad(msg, NULL); 2164 if (ret) 2165 goto error2; 2166 cm_id->state = IB_CM_MRA_REQ_SENT; 2167 break; 2168 case IB_CM_REP_RCVD: 2169 ret = cm_alloc_msg(cm_id_priv, &msg); 2170 if (ret) 2171 goto error1; 2172 2173 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 2174 CM_MSG_RESPONSE_REP, service_timeout, 2175 private_data, private_data_len); 2176 ret = ib_post_send_mad(msg, NULL); 2177 if (ret) 2178 goto error2; 2179 cm_id->state = IB_CM_MRA_REP_SENT; 2180 break; 2181 case IB_CM_ESTABLISHED: 2182 ret = cm_alloc_msg(cm_id_priv, &msg); 2183 if (ret) 2184 goto error1; 2185 2186 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 2187 CM_MSG_RESPONSE_OTHER, service_timeout, 2188 private_data, private_data_len); 2189 ret = ib_post_send_mad(msg, NULL); 2190 if (ret) 2191 goto error2; 2192 cm_id->lap_state = IB_CM_MRA_LAP_SENT; 2193 break; 2194 default: 2195 ret = -EINVAL; 2196 goto error1; 2197 } 2198 cm_id_priv->service_timeout = service_timeout; 2199 cm_set_private_data(cm_id_priv, data, private_data_len); 2200 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2201 return 0; 2202 2203 error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2204 kfree(data); 2205 return ret; 2206 2207 error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2208 kfree(data); 2209 cm_free_msg(msg); 2210 return ret; 2211 } 2212 EXPORT_SYMBOL(ib_send_cm_mra); 2213 2214 static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg) 2215 { 2216 switch (cm_mra_get_msg_mraed(mra_msg)) { 2217 case CM_MSG_RESPONSE_REQ: 2218 return cm_acquire_id(mra_msg->remote_comm_id, 0); 2219 case CM_MSG_RESPONSE_REP: 2220 case CM_MSG_RESPONSE_OTHER: 2221 return cm_acquire_id(mra_msg->remote_comm_id, 2222 mra_msg->local_comm_id); 2223 default: 2224 return NULL; 2225 } 2226 } 2227 2228 static int cm_mra_handler(struct cm_work *work) 2229 { 2230 struct cm_id_private *cm_id_priv; 2231 struct cm_mra_msg *mra_msg; 2232 unsigned long flags; 2233 int timeout, ret; 2234 2235 mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad; 2236 cm_id_priv = cm_acquire_mraed_id(mra_msg); 2237 if (!cm_id_priv) 2238 return -EINVAL; 2239 2240 work->cm_event.private_data = &mra_msg->private_data; 2241 work->cm_event.param.mra_rcvd.service_timeout = 2242 cm_mra_get_service_timeout(mra_msg); 2243 timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) + 2244 cm_convert_to_ms(cm_id_priv->av.packet_life_time); 2245 2246 spin_lock_irqsave(&cm_id_priv->lock, flags); 2247 switch (cm_id_priv->id.state) { 2248 case IB_CM_REQ_SENT: 2249 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ || 2250 ib_modify_mad(cm_id_priv->av.port->mad_agent, 2251 cm_id_priv->msg, timeout)) 2252 goto out; 2253 cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD; 2254 break; 2255 case IB_CM_REP_SENT: 2256 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP || 2257 ib_modify_mad(cm_id_priv->av.port->mad_agent, 2258 cm_id_priv->msg, timeout)) 2259 goto out; 2260 cm_id_priv->id.state = IB_CM_MRA_REP_RCVD; 2261 break; 2262 case IB_CM_ESTABLISHED: 2263 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER || 2264 cm_id_priv->id.lap_state != IB_CM_LAP_SENT || 2265 ib_modify_mad(cm_id_priv->av.port->mad_agent, 2266 cm_id_priv->msg, timeout)) 2267 goto out; 2268 cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD; 2269 break; 2270 default: 2271 goto out; 2272 } 2273 2274 cm_id_priv->msg->context[1] = (void *) (unsigned long) 2275 cm_id_priv->id.state; 2276 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2277 if (!ret) 2278 list_add_tail(&work->list, &cm_id_priv->work_list); 2279 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2280 2281 if (ret) 2282 cm_process_work(cm_id_priv, work); 2283 else 2284 cm_deref_id(cm_id_priv); 2285 return 0; 2286 out: 2287 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2288 cm_deref_id(cm_id_priv); 2289 return -EINVAL; 2290 } 2291 2292 static void cm_format_lap(struct cm_lap_msg *lap_msg, 2293 struct cm_id_private *cm_id_priv, 2294 struct ib_sa_path_rec *alternate_path, 2295 const void *private_data, 2296 u8 private_data_len) 2297 { 2298 cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID, 2299 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP)); 2300 lap_msg->local_comm_id = cm_id_priv->id.local_id; 2301 lap_msg->remote_comm_id = cm_id_priv->id.remote_id; 2302 cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn); 2303 /* todo: need remote CM response timeout */ 2304 cm_lap_set_remote_resp_timeout(lap_msg, 0x1F); 2305 lap_msg->alt_local_lid = alternate_path->slid; 2306 lap_msg->alt_remote_lid = alternate_path->dlid; 2307 lap_msg->alt_local_gid = alternate_path->sgid; 2308 lap_msg->alt_remote_gid = alternate_path->dgid; 2309 cm_lap_set_flow_label(lap_msg, alternate_path->flow_label); 2310 cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class); 2311 lap_msg->alt_hop_limit = alternate_path->hop_limit; 2312 cm_lap_set_packet_rate(lap_msg, alternate_path->rate); 2313 cm_lap_set_sl(lap_msg, alternate_path->sl); 2314 cm_lap_set_subnet_local(lap_msg, 1); /* local only... */ 2315 cm_lap_set_local_ack_timeout(lap_msg, 2316 min(31, alternate_path->packet_life_time + 1)); 2317 2318 if (private_data && private_data_len) 2319 memcpy(lap_msg->private_data, private_data, private_data_len); 2320 } 2321 2322 int ib_send_cm_lap(struct ib_cm_id *cm_id, 2323 struct ib_sa_path_rec *alternate_path, 2324 const void *private_data, 2325 u8 private_data_len) 2326 { 2327 struct cm_id_private *cm_id_priv; 2328 struct ib_mad_send_buf *msg; 2329 unsigned long flags; 2330 int ret; 2331 2332 if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE) 2333 return -EINVAL; 2334 2335 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2336 spin_lock_irqsave(&cm_id_priv->lock, flags); 2337 if (cm_id->state != IB_CM_ESTABLISHED || 2338 cm_id->lap_state != IB_CM_LAP_IDLE) { 2339 ret = -EINVAL; 2340 goto out; 2341 } 2342 2343 ret = cm_alloc_msg(cm_id_priv, &msg); 2344 if (ret) 2345 goto out; 2346 2347 cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv, 2348 alternate_path, private_data, private_data_len); 2349 msg->timeout_ms = cm_id_priv->timeout_ms; 2350 msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED; 2351 2352 ret = ib_post_send_mad(msg, NULL); 2353 if (ret) { 2354 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2355 cm_free_msg(msg); 2356 return ret; 2357 } 2358 2359 cm_id->lap_state = IB_CM_LAP_SENT; 2360 cm_id_priv->msg = msg; 2361 2362 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2363 return ret; 2364 } 2365 EXPORT_SYMBOL(ib_send_cm_lap); 2366 2367 static void cm_format_path_from_lap(struct ib_sa_path_rec *path, 2368 struct cm_lap_msg *lap_msg) 2369 { 2370 memset(path, 0, sizeof *path); 2371 path->dgid = lap_msg->alt_local_gid; 2372 path->sgid = lap_msg->alt_remote_gid; 2373 path->dlid = lap_msg->alt_local_lid; 2374 path->slid = lap_msg->alt_remote_lid; 2375 path->flow_label = cm_lap_get_flow_label(lap_msg); 2376 path->hop_limit = lap_msg->alt_hop_limit; 2377 path->traffic_class = cm_lap_get_traffic_class(lap_msg); 2378 path->reversible = 1; 2379 /* pkey is same as in REQ */ 2380 path->sl = cm_lap_get_sl(lap_msg); 2381 path->mtu_selector = IB_SA_EQ; 2382 /* mtu is same as in REQ */ 2383 path->rate_selector = IB_SA_EQ; 2384 path->rate = cm_lap_get_packet_rate(lap_msg); 2385 path->packet_life_time_selector = IB_SA_EQ; 2386 path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg); 2387 path->packet_life_time -= (path->packet_life_time > 0); 2388 } 2389 2390 static int cm_lap_handler(struct cm_work *work) 2391 { 2392 struct cm_id_private *cm_id_priv; 2393 struct cm_lap_msg *lap_msg; 2394 struct ib_cm_lap_event_param *param; 2395 struct ib_mad_send_buf *msg = NULL; 2396 unsigned long flags; 2397 int ret; 2398 2399 /* todo: verify LAP request and send reject APR if invalid. */ 2400 lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad; 2401 cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id, 2402 lap_msg->local_comm_id); 2403 if (!cm_id_priv) 2404 return -EINVAL; 2405 2406 param = &work->cm_event.param.lap_rcvd; 2407 param->alternate_path = &work->path[0]; 2408 cm_format_path_from_lap(param->alternate_path, lap_msg); 2409 work->cm_event.private_data = &lap_msg->private_data; 2410 2411 spin_lock_irqsave(&cm_id_priv->lock, flags); 2412 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) 2413 goto unlock; 2414 2415 switch (cm_id_priv->id.lap_state) { 2416 case IB_CM_LAP_IDLE: 2417 break; 2418 case IB_CM_MRA_LAP_SENT: 2419 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) 2420 goto unlock; 2421 2422 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 2423 CM_MSG_RESPONSE_OTHER, 2424 cm_id_priv->service_timeout, 2425 cm_id_priv->private_data, 2426 cm_id_priv->private_data_len); 2427 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2428 2429 if (ib_post_send_mad(msg, NULL)) 2430 cm_free_msg(msg); 2431 goto deref; 2432 default: 2433 goto unlock; 2434 } 2435 2436 cm_id_priv->id.lap_state = IB_CM_LAP_RCVD; 2437 cm_id_priv->tid = lap_msg->hdr.tid; 2438 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2439 if (!ret) 2440 list_add_tail(&work->list, &cm_id_priv->work_list); 2441 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2442 2443 if (ret) 2444 cm_process_work(cm_id_priv, work); 2445 else 2446 cm_deref_id(cm_id_priv); 2447 return 0; 2448 2449 unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2450 deref: cm_deref_id(cm_id_priv); 2451 return -EINVAL; 2452 } 2453 2454 static void cm_format_apr(struct cm_apr_msg *apr_msg, 2455 struct cm_id_private *cm_id_priv, 2456 enum ib_cm_apr_status status, 2457 void *info, 2458 u8 info_length, 2459 const void *private_data, 2460 u8 private_data_len) 2461 { 2462 cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid); 2463 apr_msg->local_comm_id = cm_id_priv->id.local_id; 2464 apr_msg->remote_comm_id = cm_id_priv->id.remote_id; 2465 apr_msg->ap_status = (u8) status; 2466 2467 if (info && info_length) { 2468 apr_msg->info_length = info_length; 2469 memcpy(apr_msg->info, info, info_length); 2470 } 2471 2472 if (private_data && private_data_len) 2473 memcpy(apr_msg->private_data, private_data, private_data_len); 2474 } 2475 2476 int ib_send_cm_apr(struct ib_cm_id *cm_id, 2477 enum ib_cm_apr_status status, 2478 void *info, 2479 u8 info_length, 2480 const void *private_data, 2481 u8 private_data_len) 2482 { 2483 struct cm_id_private *cm_id_priv; 2484 struct ib_mad_send_buf *msg; 2485 unsigned long flags; 2486 int ret; 2487 2488 if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) || 2489 (info && info_length > IB_CM_APR_INFO_LENGTH)) 2490 return -EINVAL; 2491 2492 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2493 spin_lock_irqsave(&cm_id_priv->lock, flags); 2494 if (cm_id->state != IB_CM_ESTABLISHED || 2495 (cm_id->lap_state != IB_CM_LAP_RCVD && 2496 cm_id->lap_state != IB_CM_MRA_LAP_SENT)) { 2497 ret = -EINVAL; 2498 goto out; 2499 } 2500 2501 ret = cm_alloc_msg(cm_id_priv, &msg); 2502 if (ret) 2503 goto out; 2504 2505 cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status, 2506 info, info_length, private_data, private_data_len); 2507 ret = ib_post_send_mad(msg, NULL); 2508 if (ret) { 2509 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2510 cm_free_msg(msg); 2511 return ret; 2512 } 2513 2514 cm_id->lap_state = IB_CM_LAP_IDLE; 2515 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2516 return ret; 2517 } 2518 EXPORT_SYMBOL(ib_send_cm_apr); 2519 2520 static int cm_apr_handler(struct cm_work *work) 2521 { 2522 struct cm_id_private *cm_id_priv; 2523 struct cm_apr_msg *apr_msg; 2524 unsigned long flags; 2525 int ret; 2526 2527 apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad; 2528 cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id, 2529 apr_msg->local_comm_id); 2530 if (!cm_id_priv) 2531 return -EINVAL; /* Unmatched reply. */ 2532 2533 work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status; 2534 work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info; 2535 work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length; 2536 work->cm_event.private_data = &apr_msg->private_data; 2537 2538 spin_lock_irqsave(&cm_id_priv->lock, flags); 2539 if (cm_id_priv->id.state != IB_CM_ESTABLISHED || 2540 (cm_id_priv->id.lap_state != IB_CM_LAP_SENT && 2541 cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) { 2542 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2543 goto out; 2544 } 2545 cm_id_priv->id.lap_state = IB_CM_LAP_IDLE; 2546 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2547 cm_id_priv->msg = NULL; 2548 2549 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2550 if (!ret) 2551 list_add_tail(&work->list, &cm_id_priv->work_list); 2552 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2553 2554 if (ret) 2555 cm_process_work(cm_id_priv, work); 2556 else 2557 cm_deref_id(cm_id_priv); 2558 return 0; 2559 out: 2560 cm_deref_id(cm_id_priv); 2561 return -EINVAL; 2562 } 2563 2564 static int cm_timewait_handler(struct cm_work *work) 2565 { 2566 struct cm_timewait_info *timewait_info; 2567 struct cm_id_private *cm_id_priv; 2568 unsigned long flags; 2569 int ret; 2570 2571 timewait_info = (struct cm_timewait_info *)work; 2572 cm_cleanup_timewait(timewait_info); 2573 2574 cm_id_priv = cm_acquire_id(timewait_info->work.local_id, 2575 timewait_info->work.remote_id); 2576 if (!cm_id_priv) 2577 return -EINVAL; 2578 2579 spin_lock_irqsave(&cm_id_priv->lock, flags); 2580 if (cm_id_priv->id.state != IB_CM_TIMEWAIT || 2581 cm_id_priv->remote_qpn != timewait_info->remote_qpn) { 2582 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2583 goto out; 2584 } 2585 cm_id_priv->id.state = IB_CM_IDLE; 2586 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2587 if (!ret) 2588 list_add_tail(&work->list, &cm_id_priv->work_list); 2589 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2590 2591 if (ret) 2592 cm_process_work(cm_id_priv, work); 2593 else 2594 cm_deref_id(cm_id_priv); 2595 return 0; 2596 out: 2597 cm_deref_id(cm_id_priv); 2598 return -EINVAL; 2599 } 2600 2601 static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg, 2602 struct cm_id_private *cm_id_priv, 2603 struct ib_cm_sidr_req_param *param) 2604 { 2605 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID, 2606 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR)); 2607 sidr_req_msg->request_id = cm_id_priv->id.local_id; 2608 sidr_req_msg->pkey = cpu_to_be16(param->path->pkey); 2609 sidr_req_msg->service_id = param->service_id; 2610 2611 if (param->private_data && param->private_data_len) 2612 memcpy(sidr_req_msg->private_data, param->private_data, 2613 param->private_data_len); 2614 } 2615 2616 int ib_send_cm_sidr_req(struct ib_cm_id *cm_id, 2617 struct ib_cm_sidr_req_param *param) 2618 { 2619 struct cm_id_private *cm_id_priv; 2620 struct ib_mad_send_buf *msg; 2621 unsigned long flags; 2622 int ret; 2623 2624 if (!param->path || (param->private_data && 2625 param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE)) 2626 return -EINVAL; 2627 2628 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2629 ret = cm_init_av_by_path(param->path, &cm_id_priv->av); 2630 if (ret) 2631 goto out; 2632 2633 cm_id->service_id = param->service_id; 2634 cm_id->service_mask = __constant_cpu_to_be64(~0ULL); 2635 cm_id_priv->timeout_ms = param->timeout_ms; 2636 cm_id_priv->max_cm_retries = param->max_cm_retries; 2637 ret = cm_alloc_msg(cm_id_priv, &msg); 2638 if (ret) 2639 goto out; 2640 2641 cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv, 2642 param); 2643 msg->timeout_ms = cm_id_priv->timeout_ms; 2644 msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT; 2645 2646 spin_lock_irqsave(&cm_id_priv->lock, flags); 2647 if (cm_id->state == IB_CM_IDLE) 2648 ret = ib_post_send_mad(msg, NULL); 2649 else 2650 ret = -EINVAL; 2651 2652 if (ret) { 2653 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2654 cm_free_msg(msg); 2655 goto out; 2656 } 2657 cm_id->state = IB_CM_SIDR_REQ_SENT; 2658 cm_id_priv->msg = msg; 2659 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2660 out: 2661 return ret; 2662 } 2663 EXPORT_SYMBOL(ib_send_cm_sidr_req); 2664 2665 static void cm_format_sidr_req_event(struct cm_work *work, 2666 struct ib_cm_id *listen_id) 2667 { 2668 struct cm_sidr_req_msg *sidr_req_msg; 2669 struct ib_cm_sidr_req_event_param *param; 2670 2671 sidr_req_msg = (struct cm_sidr_req_msg *) 2672 work->mad_recv_wc->recv_buf.mad; 2673 param = &work->cm_event.param.sidr_req_rcvd; 2674 param->pkey = __be16_to_cpu(sidr_req_msg->pkey); 2675 param->listen_id = listen_id; 2676 param->port = work->port->port_num; 2677 work->cm_event.private_data = &sidr_req_msg->private_data; 2678 } 2679 2680 static int cm_sidr_req_handler(struct cm_work *work) 2681 { 2682 struct ib_cm_id *cm_id; 2683 struct cm_id_private *cm_id_priv, *cur_cm_id_priv; 2684 struct cm_sidr_req_msg *sidr_req_msg; 2685 struct ib_wc *wc; 2686 unsigned long flags; 2687 2688 cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL); 2689 if (IS_ERR(cm_id)) 2690 return PTR_ERR(cm_id); 2691 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2692 2693 /* Record SGID/SLID and request ID for lookup. */ 2694 sidr_req_msg = (struct cm_sidr_req_msg *) 2695 work->mad_recv_wc->recv_buf.mad; 2696 wc = work->mad_recv_wc->wc; 2697 cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid); 2698 cm_id_priv->av.dgid.global.interface_id = 0; 2699 cm_init_av_for_response(work->port, work->mad_recv_wc->wc, 2700 work->mad_recv_wc->recv_buf.grh, 2701 &cm_id_priv->av); 2702 cm_id_priv->id.remote_id = sidr_req_msg->request_id; 2703 cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD; 2704 cm_id_priv->tid = sidr_req_msg->hdr.tid; 2705 atomic_inc(&cm_id_priv->work_count); 2706 2707 spin_lock_irqsave(&cm.lock, flags); 2708 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv); 2709 if (cur_cm_id_priv) { 2710 spin_unlock_irqrestore(&cm.lock, flags); 2711 goto out; /* Duplicate message. */ 2712 } 2713 cur_cm_id_priv = cm_find_listen(cm_id->device, 2714 sidr_req_msg->service_id, 2715 sidr_req_msg->private_data); 2716 if (!cur_cm_id_priv) { 2717 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); 2718 spin_unlock_irqrestore(&cm.lock, flags); 2719 /* todo: reply with no match */ 2720 goto out; /* No match. */ 2721 } 2722 atomic_inc(&cur_cm_id_priv->refcount); 2723 spin_unlock_irqrestore(&cm.lock, flags); 2724 2725 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler; 2726 cm_id_priv->id.context = cur_cm_id_priv->id.context; 2727 cm_id_priv->id.service_id = sidr_req_msg->service_id; 2728 cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL); 2729 2730 cm_format_sidr_req_event(work, &cur_cm_id_priv->id); 2731 cm_process_work(cm_id_priv, work); 2732 cm_deref_id(cur_cm_id_priv); 2733 return 0; 2734 out: 2735 ib_destroy_cm_id(&cm_id_priv->id); 2736 return -EINVAL; 2737 } 2738 2739 static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg, 2740 struct cm_id_private *cm_id_priv, 2741 struct ib_cm_sidr_rep_param *param) 2742 { 2743 cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID, 2744 cm_id_priv->tid); 2745 sidr_rep_msg->request_id = cm_id_priv->id.remote_id; 2746 sidr_rep_msg->status = param->status; 2747 cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num)); 2748 sidr_rep_msg->service_id = cm_id_priv->id.service_id; 2749 sidr_rep_msg->qkey = cpu_to_be32(param->qkey); 2750 2751 if (param->info && param->info_length) 2752 memcpy(sidr_rep_msg->info, param->info, param->info_length); 2753 2754 if (param->private_data && param->private_data_len) 2755 memcpy(sidr_rep_msg->private_data, param->private_data, 2756 param->private_data_len); 2757 } 2758 2759 int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id, 2760 struct ib_cm_sidr_rep_param *param) 2761 { 2762 struct cm_id_private *cm_id_priv; 2763 struct ib_mad_send_buf *msg; 2764 unsigned long flags; 2765 int ret; 2766 2767 if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) || 2768 (param->private_data && 2769 param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE)) 2770 return -EINVAL; 2771 2772 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2773 spin_lock_irqsave(&cm_id_priv->lock, flags); 2774 if (cm_id->state != IB_CM_SIDR_REQ_RCVD) { 2775 ret = -EINVAL; 2776 goto error; 2777 } 2778 2779 ret = cm_alloc_msg(cm_id_priv, &msg); 2780 if (ret) 2781 goto error; 2782 2783 cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv, 2784 param); 2785 ret = ib_post_send_mad(msg, NULL); 2786 if (ret) { 2787 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2788 cm_free_msg(msg); 2789 return ret; 2790 } 2791 cm_id->state = IB_CM_IDLE; 2792 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2793 2794 spin_lock_irqsave(&cm.lock, flags); 2795 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); 2796 spin_unlock_irqrestore(&cm.lock, flags); 2797 return 0; 2798 2799 error: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2800 return ret; 2801 } 2802 EXPORT_SYMBOL(ib_send_cm_sidr_rep); 2803 2804 static void cm_format_sidr_rep_event(struct cm_work *work) 2805 { 2806 struct cm_sidr_rep_msg *sidr_rep_msg; 2807 struct ib_cm_sidr_rep_event_param *param; 2808 2809 sidr_rep_msg = (struct cm_sidr_rep_msg *) 2810 work->mad_recv_wc->recv_buf.mad; 2811 param = &work->cm_event.param.sidr_rep_rcvd; 2812 param->status = sidr_rep_msg->status; 2813 param->qkey = be32_to_cpu(sidr_rep_msg->qkey); 2814 param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg)); 2815 param->info = &sidr_rep_msg->info; 2816 param->info_len = sidr_rep_msg->info_length; 2817 work->cm_event.private_data = &sidr_rep_msg->private_data; 2818 } 2819 2820 static int cm_sidr_rep_handler(struct cm_work *work) 2821 { 2822 struct cm_sidr_rep_msg *sidr_rep_msg; 2823 struct cm_id_private *cm_id_priv; 2824 unsigned long flags; 2825 2826 sidr_rep_msg = (struct cm_sidr_rep_msg *) 2827 work->mad_recv_wc->recv_buf.mad; 2828 cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0); 2829 if (!cm_id_priv) 2830 return -EINVAL; /* Unmatched reply. */ 2831 2832 spin_lock_irqsave(&cm_id_priv->lock, flags); 2833 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) { 2834 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2835 goto out; 2836 } 2837 cm_id_priv->id.state = IB_CM_IDLE; 2838 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2839 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2840 2841 cm_format_sidr_rep_event(work); 2842 cm_process_work(cm_id_priv, work); 2843 return 0; 2844 out: 2845 cm_deref_id(cm_id_priv); 2846 return -EINVAL; 2847 } 2848 2849 static void cm_process_send_error(struct ib_mad_send_buf *msg, 2850 enum ib_wc_status wc_status) 2851 { 2852 struct cm_id_private *cm_id_priv; 2853 struct ib_cm_event cm_event; 2854 enum ib_cm_state state; 2855 unsigned long flags; 2856 int ret; 2857 2858 memset(&cm_event, 0, sizeof cm_event); 2859 cm_id_priv = msg->context[0]; 2860 2861 /* Discard old sends or ones without a response. */ 2862 spin_lock_irqsave(&cm_id_priv->lock, flags); 2863 state = (enum ib_cm_state) (unsigned long) msg->context[1]; 2864 if (msg != cm_id_priv->msg || state != cm_id_priv->id.state) 2865 goto discard; 2866 2867 switch (state) { 2868 case IB_CM_REQ_SENT: 2869 case IB_CM_MRA_REQ_RCVD: 2870 cm_reset_to_idle(cm_id_priv); 2871 cm_event.event = IB_CM_REQ_ERROR; 2872 break; 2873 case IB_CM_REP_SENT: 2874 case IB_CM_MRA_REP_RCVD: 2875 cm_reset_to_idle(cm_id_priv); 2876 cm_event.event = IB_CM_REP_ERROR; 2877 break; 2878 case IB_CM_DREQ_SENT: 2879 cm_enter_timewait(cm_id_priv); 2880 cm_event.event = IB_CM_DREQ_ERROR; 2881 break; 2882 case IB_CM_SIDR_REQ_SENT: 2883 cm_id_priv->id.state = IB_CM_IDLE; 2884 cm_event.event = IB_CM_SIDR_REQ_ERROR; 2885 break; 2886 default: 2887 goto discard; 2888 } 2889 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2890 cm_event.param.send_status = wc_status; 2891 2892 /* No other events can occur on the cm_id at this point. */ 2893 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event); 2894 cm_free_msg(msg); 2895 if (ret) 2896 ib_destroy_cm_id(&cm_id_priv->id); 2897 return; 2898 discard: 2899 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2900 cm_free_msg(msg); 2901 } 2902 2903 static void cm_send_handler(struct ib_mad_agent *mad_agent, 2904 struct ib_mad_send_wc *mad_send_wc) 2905 { 2906 struct ib_mad_send_buf *msg = mad_send_wc->send_buf; 2907 2908 switch (mad_send_wc->status) { 2909 case IB_WC_SUCCESS: 2910 case IB_WC_WR_FLUSH_ERR: 2911 cm_free_msg(msg); 2912 break; 2913 default: 2914 if (msg->context[0] && msg->context[1]) 2915 cm_process_send_error(msg, mad_send_wc->status); 2916 else 2917 cm_free_msg(msg); 2918 break; 2919 } 2920 } 2921 2922 static void cm_work_handler(void *data) 2923 { 2924 struct cm_work *work = data; 2925 int ret; 2926 2927 switch (work->cm_event.event) { 2928 case IB_CM_REQ_RECEIVED: 2929 ret = cm_req_handler(work); 2930 break; 2931 case IB_CM_MRA_RECEIVED: 2932 ret = cm_mra_handler(work); 2933 break; 2934 case IB_CM_REJ_RECEIVED: 2935 ret = cm_rej_handler(work); 2936 break; 2937 case IB_CM_REP_RECEIVED: 2938 ret = cm_rep_handler(work); 2939 break; 2940 case IB_CM_RTU_RECEIVED: 2941 ret = cm_rtu_handler(work); 2942 break; 2943 case IB_CM_USER_ESTABLISHED: 2944 ret = cm_establish_handler(work); 2945 break; 2946 case IB_CM_DREQ_RECEIVED: 2947 ret = cm_dreq_handler(work); 2948 break; 2949 case IB_CM_DREP_RECEIVED: 2950 ret = cm_drep_handler(work); 2951 break; 2952 case IB_CM_SIDR_REQ_RECEIVED: 2953 ret = cm_sidr_req_handler(work); 2954 break; 2955 case IB_CM_SIDR_REP_RECEIVED: 2956 ret = cm_sidr_rep_handler(work); 2957 break; 2958 case IB_CM_LAP_RECEIVED: 2959 ret = cm_lap_handler(work); 2960 break; 2961 case IB_CM_APR_RECEIVED: 2962 ret = cm_apr_handler(work); 2963 break; 2964 case IB_CM_TIMEWAIT_EXIT: 2965 ret = cm_timewait_handler(work); 2966 break; 2967 default: 2968 ret = -EINVAL; 2969 break; 2970 } 2971 if (ret) 2972 cm_free_work(work); 2973 } 2974 2975 int ib_cm_establish(struct ib_cm_id *cm_id) 2976 { 2977 struct cm_id_private *cm_id_priv; 2978 struct cm_work *work; 2979 unsigned long flags; 2980 int ret = 0; 2981 2982 work = kmalloc(sizeof *work, GFP_ATOMIC); 2983 if (!work) 2984 return -ENOMEM; 2985 2986 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2987 spin_lock_irqsave(&cm_id_priv->lock, flags); 2988 switch (cm_id->state) 2989 { 2990 case IB_CM_REP_SENT: 2991 case IB_CM_MRA_REP_RCVD: 2992 cm_id->state = IB_CM_ESTABLISHED; 2993 break; 2994 case IB_CM_ESTABLISHED: 2995 ret = -EISCONN; 2996 break; 2997 default: 2998 ret = -EINVAL; 2999 break; 3000 } 3001 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3002 3003 if (ret) { 3004 kfree(work); 3005 goto out; 3006 } 3007 3008 /* 3009 * The CM worker thread may try to destroy the cm_id before it 3010 * can execute this work item. To prevent potential deadlock, 3011 * we need to find the cm_id once we're in the context of the 3012 * worker thread, rather than holding a reference on it. 3013 */ 3014 INIT_WORK(&work->work, cm_work_handler, work); 3015 work->local_id = cm_id->local_id; 3016 work->remote_id = cm_id->remote_id; 3017 work->mad_recv_wc = NULL; 3018 work->cm_event.event = IB_CM_USER_ESTABLISHED; 3019 queue_work(cm.wq, &work->work); 3020 out: 3021 return ret; 3022 } 3023 EXPORT_SYMBOL(ib_cm_establish); 3024 3025 static void cm_recv_handler(struct ib_mad_agent *mad_agent, 3026 struct ib_mad_recv_wc *mad_recv_wc) 3027 { 3028 struct cm_work *work; 3029 enum ib_cm_event_type event; 3030 int paths = 0; 3031 3032 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) { 3033 case CM_REQ_ATTR_ID: 3034 paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)-> 3035 alt_local_lid != 0); 3036 event = IB_CM_REQ_RECEIVED; 3037 break; 3038 case CM_MRA_ATTR_ID: 3039 event = IB_CM_MRA_RECEIVED; 3040 break; 3041 case CM_REJ_ATTR_ID: 3042 event = IB_CM_REJ_RECEIVED; 3043 break; 3044 case CM_REP_ATTR_ID: 3045 event = IB_CM_REP_RECEIVED; 3046 break; 3047 case CM_RTU_ATTR_ID: 3048 event = IB_CM_RTU_RECEIVED; 3049 break; 3050 case CM_DREQ_ATTR_ID: 3051 event = IB_CM_DREQ_RECEIVED; 3052 break; 3053 case CM_DREP_ATTR_ID: 3054 event = IB_CM_DREP_RECEIVED; 3055 break; 3056 case CM_SIDR_REQ_ATTR_ID: 3057 event = IB_CM_SIDR_REQ_RECEIVED; 3058 break; 3059 case CM_SIDR_REP_ATTR_ID: 3060 event = IB_CM_SIDR_REP_RECEIVED; 3061 break; 3062 case CM_LAP_ATTR_ID: 3063 paths = 1; 3064 event = IB_CM_LAP_RECEIVED; 3065 break; 3066 case CM_APR_ATTR_ID: 3067 event = IB_CM_APR_RECEIVED; 3068 break; 3069 default: 3070 ib_free_recv_mad(mad_recv_wc); 3071 return; 3072 } 3073 3074 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths, 3075 GFP_KERNEL); 3076 if (!work) { 3077 ib_free_recv_mad(mad_recv_wc); 3078 return; 3079 } 3080 3081 INIT_WORK(&work->work, cm_work_handler, work); 3082 work->cm_event.event = event; 3083 work->mad_recv_wc = mad_recv_wc; 3084 work->port = (struct cm_port *)mad_agent->context; 3085 queue_work(cm.wq, &work->work); 3086 } 3087 3088 static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv, 3089 struct ib_qp_attr *qp_attr, 3090 int *qp_attr_mask) 3091 { 3092 unsigned long flags; 3093 int ret; 3094 3095 spin_lock_irqsave(&cm_id_priv->lock, flags); 3096 switch (cm_id_priv->id.state) { 3097 case IB_CM_REQ_SENT: 3098 case IB_CM_MRA_REQ_RCVD: 3099 case IB_CM_REQ_RCVD: 3100 case IB_CM_MRA_REQ_SENT: 3101 case IB_CM_REP_RCVD: 3102 case IB_CM_MRA_REP_SENT: 3103 case IB_CM_REP_SENT: 3104 case IB_CM_MRA_REP_RCVD: 3105 case IB_CM_ESTABLISHED: 3106 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | 3107 IB_QP_PKEY_INDEX | IB_QP_PORT; 3108 qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | 3109 IB_ACCESS_REMOTE_WRITE; 3110 if (cm_id_priv->responder_resources) 3111 qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ; 3112 qp_attr->pkey_index = cm_id_priv->av.pkey_index; 3113 qp_attr->port_num = cm_id_priv->av.port->port_num; 3114 ret = 0; 3115 break; 3116 default: 3117 ret = -EINVAL; 3118 break; 3119 } 3120 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3121 return ret; 3122 } 3123 3124 static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv, 3125 struct ib_qp_attr *qp_attr, 3126 int *qp_attr_mask) 3127 { 3128 unsigned long flags; 3129 int ret; 3130 3131 spin_lock_irqsave(&cm_id_priv->lock, flags); 3132 switch (cm_id_priv->id.state) { 3133 case IB_CM_REQ_RCVD: 3134 case IB_CM_MRA_REQ_SENT: 3135 case IB_CM_REP_RCVD: 3136 case IB_CM_MRA_REP_SENT: 3137 case IB_CM_REP_SENT: 3138 case IB_CM_MRA_REP_RCVD: 3139 case IB_CM_ESTABLISHED: 3140 *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | 3141 IB_QP_DEST_QPN | IB_QP_RQ_PSN; 3142 qp_attr->ah_attr = cm_id_priv->av.ah_attr; 3143 qp_attr->path_mtu = cm_id_priv->path_mtu; 3144 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn); 3145 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn); 3146 if (cm_id_priv->qp_type == IB_QPT_RC) { 3147 *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC | 3148 IB_QP_MIN_RNR_TIMER; 3149 qp_attr->max_dest_rd_atomic = 3150 cm_id_priv->responder_resources; 3151 qp_attr->min_rnr_timer = 0; 3152 } 3153 if (cm_id_priv->alt_av.ah_attr.dlid) { 3154 *qp_attr_mask |= IB_QP_ALT_PATH; 3155 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num; 3156 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr; 3157 } 3158 ret = 0; 3159 break; 3160 default: 3161 ret = -EINVAL; 3162 break; 3163 } 3164 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3165 return ret; 3166 } 3167 3168 static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv, 3169 struct ib_qp_attr *qp_attr, 3170 int *qp_attr_mask) 3171 { 3172 unsigned long flags; 3173 int ret; 3174 3175 spin_lock_irqsave(&cm_id_priv->lock, flags); 3176 switch (cm_id_priv->id.state) { 3177 case IB_CM_REP_RCVD: 3178 case IB_CM_MRA_REP_SENT: 3179 case IB_CM_REP_SENT: 3180 case IB_CM_MRA_REP_RCVD: 3181 case IB_CM_ESTABLISHED: 3182 *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN; 3183 qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn); 3184 if (cm_id_priv->qp_type == IB_QPT_RC) { 3185 *qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT | 3186 IB_QP_RNR_RETRY | 3187 IB_QP_MAX_QP_RD_ATOMIC; 3188 qp_attr->timeout = cm_id_priv->local_ack_timeout; 3189 qp_attr->retry_cnt = cm_id_priv->retry_count; 3190 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count; 3191 qp_attr->max_rd_atomic = cm_id_priv->initiator_depth; 3192 } 3193 if (cm_id_priv->alt_av.ah_attr.dlid) { 3194 *qp_attr_mask |= IB_QP_PATH_MIG_STATE; 3195 qp_attr->path_mig_state = IB_MIG_REARM; 3196 } 3197 ret = 0; 3198 break; 3199 default: 3200 ret = -EINVAL; 3201 break; 3202 } 3203 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3204 return ret; 3205 } 3206 3207 int ib_cm_init_qp_attr(struct ib_cm_id *cm_id, 3208 struct ib_qp_attr *qp_attr, 3209 int *qp_attr_mask) 3210 { 3211 struct cm_id_private *cm_id_priv; 3212 int ret; 3213 3214 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 3215 switch (qp_attr->qp_state) { 3216 case IB_QPS_INIT: 3217 ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask); 3218 break; 3219 case IB_QPS_RTR: 3220 ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask); 3221 break; 3222 case IB_QPS_RTS: 3223 ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask); 3224 break; 3225 default: 3226 ret = -EINVAL; 3227 break; 3228 } 3229 return ret; 3230 } 3231 EXPORT_SYMBOL(ib_cm_init_qp_attr); 3232 3233 static void cm_add_one(struct ib_device *device) 3234 { 3235 struct cm_device *cm_dev; 3236 struct cm_port *port; 3237 struct ib_mad_reg_req reg_req = { 3238 .mgmt_class = IB_MGMT_CLASS_CM, 3239 .mgmt_class_version = IB_CM_CLASS_VERSION 3240 }; 3241 struct ib_port_modify port_modify = { 3242 .set_port_cap_mask = IB_PORT_CM_SUP 3243 }; 3244 unsigned long flags; 3245 int ret; 3246 u8 i; 3247 3248 cm_dev = kmalloc(sizeof(*cm_dev) + sizeof(*port) * 3249 device->phys_port_cnt, GFP_KERNEL); 3250 if (!cm_dev) 3251 return; 3252 3253 cm_dev->device = device; 3254 cm_dev->ca_guid = device->node_guid; 3255 3256 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask); 3257 for (i = 1; i <= device->phys_port_cnt; i++) { 3258 port = &cm_dev->port[i-1]; 3259 port->cm_dev = cm_dev; 3260 port->port_num = i; 3261 port->mad_agent = ib_register_mad_agent(device, i, 3262 IB_QPT_GSI, 3263 ®_req, 3264 0, 3265 cm_send_handler, 3266 cm_recv_handler, 3267 port); 3268 if (IS_ERR(port->mad_agent)) 3269 goto error1; 3270 3271 ret = ib_modify_port(device, i, 0, &port_modify); 3272 if (ret) 3273 goto error2; 3274 } 3275 ib_set_client_data(device, &cm_client, cm_dev); 3276 3277 write_lock_irqsave(&cm.device_lock, flags); 3278 list_add_tail(&cm_dev->list, &cm.device_list); 3279 write_unlock_irqrestore(&cm.device_lock, flags); 3280 return; 3281 3282 error2: 3283 ib_unregister_mad_agent(port->mad_agent); 3284 error1: 3285 port_modify.set_port_cap_mask = 0; 3286 port_modify.clr_port_cap_mask = IB_PORT_CM_SUP; 3287 while (--i) { 3288 port = &cm_dev->port[i-1]; 3289 ib_modify_port(device, port->port_num, 0, &port_modify); 3290 ib_unregister_mad_agent(port->mad_agent); 3291 } 3292 kfree(cm_dev); 3293 } 3294 3295 static void cm_remove_one(struct ib_device *device) 3296 { 3297 struct cm_device *cm_dev; 3298 struct cm_port *port; 3299 struct ib_port_modify port_modify = { 3300 .clr_port_cap_mask = IB_PORT_CM_SUP 3301 }; 3302 unsigned long flags; 3303 int i; 3304 3305 cm_dev = ib_get_client_data(device, &cm_client); 3306 if (!cm_dev) 3307 return; 3308 3309 write_lock_irqsave(&cm.device_lock, flags); 3310 list_del(&cm_dev->list); 3311 write_unlock_irqrestore(&cm.device_lock, flags); 3312 3313 for (i = 1; i <= device->phys_port_cnt; i++) { 3314 port = &cm_dev->port[i-1]; 3315 ib_modify_port(device, port->port_num, 0, &port_modify); 3316 ib_unregister_mad_agent(port->mad_agent); 3317 } 3318 kfree(cm_dev); 3319 } 3320 3321 static int __init ib_cm_init(void) 3322 { 3323 int ret; 3324 3325 memset(&cm, 0, sizeof cm); 3326 INIT_LIST_HEAD(&cm.device_list); 3327 rwlock_init(&cm.device_lock); 3328 spin_lock_init(&cm.lock); 3329 cm.listen_service_table = RB_ROOT; 3330 cm.listen_service_id = __constant_be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID); 3331 cm.remote_id_table = RB_ROOT; 3332 cm.remote_qp_table = RB_ROOT; 3333 cm.remote_sidr_table = RB_ROOT; 3334 idr_init(&cm.local_id_table); 3335 idr_pre_get(&cm.local_id_table, GFP_KERNEL); 3336 3337 cm.wq = create_workqueue("ib_cm"); 3338 if (!cm.wq) 3339 return -ENOMEM; 3340 3341 ret = ib_register_client(&cm_client); 3342 if (ret) 3343 goto error; 3344 3345 return 0; 3346 error: 3347 destroy_workqueue(cm.wq); 3348 return ret; 3349 } 3350 3351 static void __exit ib_cm_cleanup(void) 3352 { 3353 destroy_workqueue(cm.wq); 3354 ib_unregister_client(&cm_client); 3355 idr_destroy(&cm.local_id_table); 3356 } 3357 3358 module_init(ib_cm_init); 3359 module_exit(ib_cm_cleanup); 3360 3361