1 /* 2 * Copyright (c) 2004-2007 Intel Corporation. All rights reserved. 3 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. 5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/completion.h> 37 #include <linux/dma-mapping.h> 38 #include <linux/device.h> 39 #include <linux/module.h> 40 #include <linux/err.h> 41 #include <linux/idr.h> 42 #include <linux/interrupt.h> 43 #include <linux/random.h> 44 #include <linux/rbtree.h> 45 #include <linux/spinlock.h> 46 #include <linux/slab.h> 47 #include <linux/sysfs.h> 48 #include <linux/workqueue.h> 49 #include <linux/kdev_t.h> 50 #include <linux/etherdevice.h> 51 52 #include <rdma/ib_cache.h> 53 #include <rdma/ib_cm.h> 54 #include "cm_msgs.h" 55 56 MODULE_AUTHOR("Sean Hefty"); 57 MODULE_DESCRIPTION("InfiniBand CM"); 58 MODULE_LICENSE("Dual BSD/GPL"); 59 60 static void cm_add_one(struct ib_device *device); 61 static void cm_remove_one(struct ib_device *device, void *client_data); 62 63 static struct ib_client cm_client = { 64 .name = "cm", 65 .add = cm_add_one, 66 .remove = cm_remove_one 67 }; 68 69 static struct ib_cm { 70 spinlock_t lock; 71 struct list_head device_list; 72 rwlock_t device_lock; 73 struct rb_root listen_service_table; 74 u64 listen_service_id; 75 /* struct rb_root peer_service_table; todo: fix peer to peer */ 76 struct rb_root remote_qp_table; 77 struct rb_root remote_id_table; 78 struct rb_root remote_sidr_table; 79 struct idr local_id_table; 80 __be32 random_id_operand; 81 struct list_head timewait_list; 82 struct workqueue_struct *wq; 83 } cm; 84 85 /* Counter indexes ordered by attribute ID */ 86 enum { 87 CM_REQ_COUNTER, 88 CM_MRA_COUNTER, 89 CM_REJ_COUNTER, 90 CM_REP_COUNTER, 91 CM_RTU_COUNTER, 92 CM_DREQ_COUNTER, 93 CM_DREP_COUNTER, 94 CM_SIDR_REQ_COUNTER, 95 CM_SIDR_REP_COUNTER, 96 CM_LAP_COUNTER, 97 CM_APR_COUNTER, 98 CM_ATTR_COUNT, 99 CM_ATTR_ID_OFFSET = 0x0010, 100 }; 101 102 enum { 103 CM_XMIT, 104 CM_XMIT_RETRIES, 105 CM_RECV, 106 CM_RECV_DUPLICATES, 107 CM_COUNTER_GROUPS 108 }; 109 110 static char const counter_group_names[CM_COUNTER_GROUPS] 111 [sizeof("cm_rx_duplicates")] = { 112 "cm_tx_msgs", "cm_tx_retries", 113 "cm_rx_msgs", "cm_rx_duplicates" 114 }; 115 116 struct cm_counter_group { 117 struct kobject obj; 118 atomic_long_t counter[CM_ATTR_COUNT]; 119 }; 120 121 struct cm_counter_attribute { 122 struct attribute attr; 123 int index; 124 }; 125 126 #define CM_COUNTER_ATTR(_name, _index) \ 127 struct cm_counter_attribute cm_##_name##_counter_attr = { \ 128 .attr = { .name = __stringify(_name), .mode = 0444 }, \ 129 .index = _index \ 130 } 131 132 static CM_COUNTER_ATTR(req, CM_REQ_COUNTER); 133 static CM_COUNTER_ATTR(mra, CM_MRA_COUNTER); 134 static CM_COUNTER_ATTR(rej, CM_REJ_COUNTER); 135 static CM_COUNTER_ATTR(rep, CM_REP_COUNTER); 136 static CM_COUNTER_ATTR(rtu, CM_RTU_COUNTER); 137 static CM_COUNTER_ATTR(dreq, CM_DREQ_COUNTER); 138 static CM_COUNTER_ATTR(drep, CM_DREP_COUNTER); 139 static CM_COUNTER_ATTR(sidr_req, CM_SIDR_REQ_COUNTER); 140 static CM_COUNTER_ATTR(sidr_rep, CM_SIDR_REP_COUNTER); 141 static CM_COUNTER_ATTR(lap, CM_LAP_COUNTER); 142 static CM_COUNTER_ATTR(apr, CM_APR_COUNTER); 143 144 static struct attribute *cm_counter_default_attrs[] = { 145 &cm_req_counter_attr.attr, 146 &cm_mra_counter_attr.attr, 147 &cm_rej_counter_attr.attr, 148 &cm_rep_counter_attr.attr, 149 &cm_rtu_counter_attr.attr, 150 &cm_dreq_counter_attr.attr, 151 &cm_drep_counter_attr.attr, 152 &cm_sidr_req_counter_attr.attr, 153 &cm_sidr_rep_counter_attr.attr, 154 &cm_lap_counter_attr.attr, 155 &cm_apr_counter_attr.attr, 156 NULL 157 }; 158 159 struct cm_port { 160 struct cm_device *cm_dev; 161 struct ib_mad_agent *mad_agent; 162 struct kobject port_obj; 163 u8 port_num; 164 struct cm_counter_group counter_group[CM_COUNTER_GROUPS]; 165 }; 166 167 struct cm_device { 168 struct list_head list; 169 struct ib_device *ib_device; 170 struct device *device; 171 u8 ack_delay; 172 int going_down; 173 struct cm_port *port[0]; 174 }; 175 176 struct cm_av { 177 struct cm_port *port; 178 union ib_gid dgid; 179 struct ib_ah_attr ah_attr; 180 u16 pkey_index; 181 u8 timeout; 182 }; 183 184 struct cm_work { 185 struct delayed_work work; 186 struct list_head list; 187 struct cm_port *port; 188 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */ 189 __be32 local_id; /* Established / timewait */ 190 __be32 remote_id; 191 struct ib_cm_event cm_event; 192 struct ib_sa_path_rec path[0]; 193 }; 194 195 struct cm_timewait_info { 196 struct cm_work work; /* Must be first. */ 197 struct list_head list; 198 struct rb_node remote_qp_node; 199 struct rb_node remote_id_node; 200 __be64 remote_ca_guid; 201 __be32 remote_qpn; 202 u8 inserted_remote_qp; 203 u8 inserted_remote_id; 204 }; 205 206 struct cm_id_private { 207 struct ib_cm_id id; 208 209 struct rb_node service_node; 210 struct rb_node sidr_id_node; 211 spinlock_t lock; /* Do not acquire inside cm.lock */ 212 struct completion comp; 213 atomic_t refcount; 214 /* Number of clients sharing this ib_cm_id. Only valid for listeners. 215 * Protected by the cm.lock spinlock. */ 216 int listen_sharecount; 217 218 struct ib_mad_send_buf *msg; 219 struct cm_timewait_info *timewait_info; 220 /* todo: use alternate port on send failure */ 221 struct cm_av av; 222 struct cm_av alt_av; 223 224 void *private_data; 225 __be64 tid; 226 __be32 local_qpn; 227 __be32 remote_qpn; 228 enum ib_qp_type qp_type; 229 __be32 sq_psn; 230 __be32 rq_psn; 231 int timeout_ms; 232 enum ib_mtu path_mtu; 233 __be16 pkey; 234 u8 private_data_len; 235 u8 max_cm_retries; 236 u8 peer_to_peer; 237 u8 responder_resources; 238 u8 initiator_depth; 239 u8 retry_count; 240 u8 rnr_retry_count; 241 u8 service_timeout; 242 u8 target_ack_delay; 243 244 struct list_head work_list; 245 atomic_t work_count; 246 }; 247 248 static void cm_work_handler(struct work_struct *work); 249 250 static inline void cm_deref_id(struct cm_id_private *cm_id_priv) 251 { 252 if (atomic_dec_and_test(&cm_id_priv->refcount)) 253 complete(&cm_id_priv->comp); 254 } 255 256 static int cm_alloc_msg(struct cm_id_private *cm_id_priv, 257 struct ib_mad_send_buf **msg) 258 { 259 struct ib_mad_agent *mad_agent; 260 struct ib_mad_send_buf *m; 261 struct ib_ah *ah; 262 263 mad_agent = cm_id_priv->av.port->mad_agent; 264 ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr); 265 if (IS_ERR(ah)) 266 return PTR_ERR(ah); 267 268 m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn, 269 cm_id_priv->av.pkey_index, 270 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, 271 GFP_ATOMIC, 272 IB_MGMT_BASE_VERSION); 273 if (IS_ERR(m)) { 274 ib_destroy_ah(ah); 275 return PTR_ERR(m); 276 } 277 278 /* Timeout set by caller if response is expected. */ 279 m->ah = ah; 280 m->retries = cm_id_priv->max_cm_retries; 281 282 atomic_inc(&cm_id_priv->refcount); 283 m->context[0] = cm_id_priv; 284 *msg = m; 285 return 0; 286 } 287 288 static int cm_alloc_response_msg(struct cm_port *port, 289 struct ib_mad_recv_wc *mad_recv_wc, 290 struct ib_mad_send_buf **msg) 291 { 292 struct ib_mad_send_buf *m; 293 struct ib_ah *ah; 294 295 ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc, 296 mad_recv_wc->recv_buf.grh, port->port_num); 297 if (IS_ERR(ah)) 298 return PTR_ERR(ah); 299 300 m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index, 301 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, 302 GFP_ATOMIC, 303 IB_MGMT_BASE_VERSION); 304 if (IS_ERR(m)) { 305 ib_destroy_ah(ah); 306 return PTR_ERR(m); 307 } 308 m->ah = ah; 309 *msg = m; 310 return 0; 311 } 312 313 static void cm_free_msg(struct ib_mad_send_buf *msg) 314 { 315 ib_destroy_ah(msg->ah); 316 if (msg->context[0]) 317 cm_deref_id(msg->context[0]); 318 ib_free_send_mad(msg); 319 } 320 321 static void * cm_copy_private_data(const void *private_data, 322 u8 private_data_len) 323 { 324 void *data; 325 326 if (!private_data || !private_data_len) 327 return NULL; 328 329 data = kmemdup(private_data, private_data_len, GFP_KERNEL); 330 if (!data) 331 return ERR_PTR(-ENOMEM); 332 333 return data; 334 } 335 336 static void cm_set_private_data(struct cm_id_private *cm_id_priv, 337 void *private_data, u8 private_data_len) 338 { 339 if (cm_id_priv->private_data && cm_id_priv->private_data_len) 340 kfree(cm_id_priv->private_data); 341 342 cm_id_priv->private_data = private_data; 343 cm_id_priv->private_data_len = private_data_len; 344 } 345 346 static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc, 347 struct ib_grh *grh, struct cm_av *av) 348 { 349 av->port = port; 350 av->pkey_index = wc->pkey_index; 351 ib_init_ah_from_wc(port->cm_dev->ib_device, port->port_num, wc, 352 grh, &av->ah_attr); 353 } 354 355 static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av) 356 { 357 struct cm_device *cm_dev; 358 struct cm_port *port = NULL; 359 unsigned long flags; 360 int ret; 361 u8 p; 362 struct net_device *ndev = ib_get_ndev_from_path(path); 363 364 read_lock_irqsave(&cm.device_lock, flags); 365 list_for_each_entry(cm_dev, &cm.device_list, list) { 366 if (!ib_find_cached_gid(cm_dev->ib_device, &path->sgid, 367 path->gid_type, ndev, &p, NULL)) { 368 port = cm_dev->port[p-1]; 369 break; 370 } 371 } 372 read_unlock_irqrestore(&cm.device_lock, flags); 373 374 if (ndev) 375 dev_put(ndev); 376 377 if (!port) 378 return -EINVAL; 379 380 ret = ib_find_cached_pkey(cm_dev->ib_device, port->port_num, 381 be16_to_cpu(path->pkey), &av->pkey_index); 382 if (ret) 383 return ret; 384 385 av->port = port; 386 ib_init_ah_from_path(cm_dev->ib_device, port->port_num, path, 387 &av->ah_attr); 388 av->timeout = path->packet_life_time + 1; 389 390 return 0; 391 } 392 393 static int cm_alloc_id(struct cm_id_private *cm_id_priv) 394 { 395 unsigned long flags; 396 int id; 397 398 idr_preload(GFP_KERNEL); 399 spin_lock_irqsave(&cm.lock, flags); 400 401 id = idr_alloc_cyclic(&cm.local_id_table, cm_id_priv, 0, 0, GFP_NOWAIT); 402 403 spin_unlock_irqrestore(&cm.lock, flags); 404 idr_preload_end(); 405 406 cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand; 407 return id < 0 ? id : 0; 408 } 409 410 static void cm_free_id(__be32 local_id) 411 { 412 spin_lock_irq(&cm.lock); 413 idr_remove(&cm.local_id_table, 414 (__force int) (local_id ^ cm.random_id_operand)); 415 spin_unlock_irq(&cm.lock); 416 } 417 418 static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id) 419 { 420 struct cm_id_private *cm_id_priv; 421 422 cm_id_priv = idr_find(&cm.local_id_table, 423 (__force int) (local_id ^ cm.random_id_operand)); 424 if (cm_id_priv) { 425 if (cm_id_priv->id.remote_id == remote_id) 426 atomic_inc(&cm_id_priv->refcount); 427 else 428 cm_id_priv = NULL; 429 } 430 431 return cm_id_priv; 432 } 433 434 static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id) 435 { 436 struct cm_id_private *cm_id_priv; 437 438 spin_lock_irq(&cm.lock); 439 cm_id_priv = cm_get_id(local_id, remote_id); 440 spin_unlock_irq(&cm.lock); 441 442 return cm_id_priv; 443 } 444 445 /* 446 * Trivial helpers to strip endian annotation and compare; the 447 * endianness doesn't actually matter since we just need a stable 448 * order for the RB tree. 449 */ 450 static int be32_lt(__be32 a, __be32 b) 451 { 452 return (__force u32) a < (__force u32) b; 453 } 454 455 static int be32_gt(__be32 a, __be32 b) 456 { 457 return (__force u32) a > (__force u32) b; 458 } 459 460 static int be64_lt(__be64 a, __be64 b) 461 { 462 return (__force u64) a < (__force u64) b; 463 } 464 465 static int be64_gt(__be64 a, __be64 b) 466 { 467 return (__force u64) a > (__force u64) b; 468 } 469 470 static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv) 471 { 472 struct rb_node **link = &cm.listen_service_table.rb_node; 473 struct rb_node *parent = NULL; 474 struct cm_id_private *cur_cm_id_priv; 475 __be64 service_id = cm_id_priv->id.service_id; 476 __be64 service_mask = cm_id_priv->id.service_mask; 477 478 while (*link) { 479 parent = *link; 480 cur_cm_id_priv = rb_entry(parent, struct cm_id_private, 481 service_node); 482 if ((cur_cm_id_priv->id.service_mask & service_id) == 483 (service_mask & cur_cm_id_priv->id.service_id) && 484 (cm_id_priv->id.device == cur_cm_id_priv->id.device)) 485 return cur_cm_id_priv; 486 487 if (cm_id_priv->id.device < cur_cm_id_priv->id.device) 488 link = &(*link)->rb_left; 489 else if (cm_id_priv->id.device > cur_cm_id_priv->id.device) 490 link = &(*link)->rb_right; 491 else if (be64_lt(service_id, cur_cm_id_priv->id.service_id)) 492 link = &(*link)->rb_left; 493 else if (be64_gt(service_id, cur_cm_id_priv->id.service_id)) 494 link = &(*link)->rb_right; 495 else 496 link = &(*link)->rb_right; 497 } 498 rb_link_node(&cm_id_priv->service_node, parent, link); 499 rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table); 500 return NULL; 501 } 502 503 static struct cm_id_private * cm_find_listen(struct ib_device *device, 504 __be64 service_id) 505 { 506 struct rb_node *node = cm.listen_service_table.rb_node; 507 struct cm_id_private *cm_id_priv; 508 509 while (node) { 510 cm_id_priv = rb_entry(node, struct cm_id_private, service_node); 511 if ((cm_id_priv->id.service_mask & service_id) == 512 cm_id_priv->id.service_id && 513 (cm_id_priv->id.device == device)) 514 return cm_id_priv; 515 516 if (device < cm_id_priv->id.device) 517 node = node->rb_left; 518 else if (device > cm_id_priv->id.device) 519 node = node->rb_right; 520 else if (be64_lt(service_id, cm_id_priv->id.service_id)) 521 node = node->rb_left; 522 else if (be64_gt(service_id, cm_id_priv->id.service_id)) 523 node = node->rb_right; 524 else 525 node = node->rb_right; 526 } 527 return NULL; 528 } 529 530 static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info 531 *timewait_info) 532 { 533 struct rb_node **link = &cm.remote_id_table.rb_node; 534 struct rb_node *parent = NULL; 535 struct cm_timewait_info *cur_timewait_info; 536 __be64 remote_ca_guid = timewait_info->remote_ca_guid; 537 __be32 remote_id = timewait_info->work.remote_id; 538 539 while (*link) { 540 parent = *link; 541 cur_timewait_info = rb_entry(parent, struct cm_timewait_info, 542 remote_id_node); 543 if (be32_lt(remote_id, cur_timewait_info->work.remote_id)) 544 link = &(*link)->rb_left; 545 else if (be32_gt(remote_id, cur_timewait_info->work.remote_id)) 546 link = &(*link)->rb_right; 547 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid)) 548 link = &(*link)->rb_left; 549 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid)) 550 link = &(*link)->rb_right; 551 else 552 return cur_timewait_info; 553 } 554 timewait_info->inserted_remote_id = 1; 555 rb_link_node(&timewait_info->remote_id_node, parent, link); 556 rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table); 557 return NULL; 558 } 559 560 static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid, 561 __be32 remote_id) 562 { 563 struct rb_node *node = cm.remote_id_table.rb_node; 564 struct cm_timewait_info *timewait_info; 565 566 while (node) { 567 timewait_info = rb_entry(node, struct cm_timewait_info, 568 remote_id_node); 569 if (be32_lt(remote_id, timewait_info->work.remote_id)) 570 node = node->rb_left; 571 else if (be32_gt(remote_id, timewait_info->work.remote_id)) 572 node = node->rb_right; 573 else if (be64_lt(remote_ca_guid, timewait_info->remote_ca_guid)) 574 node = node->rb_left; 575 else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid)) 576 node = node->rb_right; 577 else 578 return timewait_info; 579 } 580 return NULL; 581 } 582 583 static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info 584 *timewait_info) 585 { 586 struct rb_node **link = &cm.remote_qp_table.rb_node; 587 struct rb_node *parent = NULL; 588 struct cm_timewait_info *cur_timewait_info; 589 __be64 remote_ca_guid = timewait_info->remote_ca_guid; 590 __be32 remote_qpn = timewait_info->remote_qpn; 591 592 while (*link) { 593 parent = *link; 594 cur_timewait_info = rb_entry(parent, struct cm_timewait_info, 595 remote_qp_node); 596 if (be32_lt(remote_qpn, cur_timewait_info->remote_qpn)) 597 link = &(*link)->rb_left; 598 else if (be32_gt(remote_qpn, cur_timewait_info->remote_qpn)) 599 link = &(*link)->rb_right; 600 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid)) 601 link = &(*link)->rb_left; 602 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid)) 603 link = &(*link)->rb_right; 604 else 605 return cur_timewait_info; 606 } 607 timewait_info->inserted_remote_qp = 1; 608 rb_link_node(&timewait_info->remote_qp_node, parent, link); 609 rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table); 610 return NULL; 611 } 612 613 static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private 614 *cm_id_priv) 615 { 616 struct rb_node **link = &cm.remote_sidr_table.rb_node; 617 struct rb_node *parent = NULL; 618 struct cm_id_private *cur_cm_id_priv; 619 union ib_gid *port_gid = &cm_id_priv->av.dgid; 620 __be32 remote_id = cm_id_priv->id.remote_id; 621 622 while (*link) { 623 parent = *link; 624 cur_cm_id_priv = rb_entry(parent, struct cm_id_private, 625 sidr_id_node); 626 if (be32_lt(remote_id, cur_cm_id_priv->id.remote_id)) 627 link = &(*link)->rb_left; 628 else if (be32_gt(remote_id, cur_cm_id_priv->id.remote_id)) 629 link = &(*link)->rb_right; 630 else { 631 int cmp; 632 cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid, 633 sizeof *port_gid); 634 if (cmp < 0) 635 link = &(*link)->rb_left; 636 else if (cmp > 0) 637 link = &(*link)->rb_right; 638 else 639 return cur_cm_id_priv; 640 } 641 } 642 rb_link_node(&cm_id_priv->sidr_id_node, parent, link); 643 rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); 644 return NULL; 645 } 646 647 static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv, 648 enum ib_cm_sidr_status status) 649 { 650 struct ib_cm_sidr_rep_param param; 651 652 memset(¶m, 0, sizeof param); 653 param.status = status; 654 ib_send_cm_sidr_rep(&cm_id_priv->id, ¶m); 655 } 656 657 struct ib_cm_id *ib_create_cm_id(struct ib_device *device, 658 ib_cm_handler cm_handler, 659 void *context) 660 { 661 struct cm_id_private *cm_id_priv; 662 int ret; 663 664 cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL); 665 if (!cm_id_priv) 666 return ERR_PTR(-ENOMEM); 667 668 cm_id_priv->id.state = IB_CM_IDLE; 669 cm_id_priv->id.device = device; 670 cm_id_priv->id.cm_handler = cm_handler; 671 cm_id_priv->id.context = context; 672 cm_id_priv->id.remote_cm_qpn = 1; 673 ret = cm_alloc_id(cm_id_priv); 674 if (ret) 675 goto error; 676 677 spin_lock_init(&cm_id_priv->lock); 678 init_completion(&cm_id_priv->comp); 679 INIT_LIST_HEAD(&cm_id_priv->work_list); 680 atomic_set(&cm_id_priv->work_count, -1); 681 atomic_set(&cm_id_priv->refcount, 1); 682 return &cm_id_priv->id; 683 684 error: 685 kfree(cm_id_priv); 686 return ERR_PTR(-ENOMEM); 687 } 688 EXPORT_SYMBOL(ib_create_cm_id); 689 690 static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv) 691 { 692 struct cm_work *work; 693 694 if (list_empty(&cm_id_priv->work_list)) 695 return NULL; 696 697 work = list_entry(cm_id_priv->work_list.next, struct cm_work, list); 698 list_del(&work->list); 699 return work; 700 } 701 702 static void cm_free_work(struct cm_work *work) 703 { 704 if (work->mad_recv_wc) 705 ib_free_recv_mad(work->mad_recv_wc); 706 kfree(work); 707 } 708 709 static inline int cm_convert_to_ms(int iba_time) 710 { 711 /* approximate conversion to ms from 4.096us x 2^iba_time */ 712 return 1 << max(iba_time - 8, 0); 713 } 714 715 /* 716 * calculate: 4.096x2^ack_timeout = 4.096x2^ack_delay + 2x4.096x2^life_time 717 * Because of how ack_timeout is stored, adding one doubles the timeout. 718 * To avoid large timeouts, select the max(ack_delay, life_time + 1), and 719 * increment it (round up) only if the other is within 50%. 720 */ 721 static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time) 722 { 723 int ack_timeout = packet_life_time + 1; 724 725 if (ack_timeout >= ca_ack_delay) 726 ack_timeout += (ca_ack_delay >= (ack_timeout - 1)); 727 else 728 ack_timeout = ca_ack_delay + 729 (ack_timeout >= (ca_ack_delay - 1)); 730 731 return min(31, ack_timeout); 732 } 733 734 static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info) 735 { 736 if (timewait_info->inserted_remote_id) { 737 rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table); 738 timewait_info->inserted_remote_id = 0; 739 } 740 741 if (timewait_info->inserted_remote_qp) { 742 rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table); 743 timewait_info->inserted_remote_qp = 0; 744 } 745 } 746 747 static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id) 748 { 749 struct cm_timewait_info *timewait_info; 750 751 timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL); 752 if (!timewait_info) 753 return ERR_PTR(-ENOMEM); 754 755 timewait_info->work.local_id = local_id; 756 INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler); 757 timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT; 758 return timewait_info; 759 } 760 761 static void cm_enter_timewait(struct cm_id_private *cm_id_priv) 762 { 763 int wait_time; 764 unsigned long flags; 765 struct cm_device *cm_dev; 766 767 cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client); 768 if (!cm_dev) 769 return; 770 771 spin_lock_irqsave(&cm.lock, flags); 772 cm_cleanup_timewait(cm_id_priv->timewait_info); 773 list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list); 774 spin_unlock_irqrestore(&cm.lock, flags); 775 776 /* 777 * The cm_id could be destroyed by the user before we exit timewait. 778 * To protect against this, we search for the cm_id after exiting 779 * timewait before notifying the user that we've exited timewait. 780 */ 781 cm_id_priv->id.state = IB_CM_TIMEWAIT; 782 wait_time = cm_convert_to_ms(cm_id_priv->av.timeout); 783 784 /* Check if the device started its remove_one */ 785 spin_lock_irq(&cm.lock); 786 if (!cm_dev->going_down) 787 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work, 788 msecs_to_jiffies(wait_time)); 789 spin_unlock_irq(&cm.lock); 790 791 cm_id_priv->timewait_info = NULL; 792 } 793 794 static void cm_reset_to_idle(struct cm_id_private *cm_id_priv) 795 { 796 unsigned long flags; 797 798 cm_id_priv->id.state = IB_CM_IDLE; 799 if (cm_id_priv->timewait_info) { 800 spin_lock_irqsave(&cm.lock, flags); 801 cm_cleanup_timewait(cm_id_priv->timewait_info); 802 spin_unlock_irqrestore(&cm.lock, flags); 803 kfree(cm_id_priv->timewait_info); 804 cm_id_priv->timewait_info = NULL; 805 } 806 } 807 808 static void cm_destroy_id(struct ib_cm_id *cm_id, int err) 809 { 810 struct cm_id_private *cm_id_priv; 811 struct cm_work *work; 812 813 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 814 retest: 815 spin_lock_irq(&cm_id_priv->lock); 816 switch (cm_id->state) { 817 case IB_CM_LISTEN: 818 spin_unlock_irq(&cm_id_priv->lock); 819 820 spin_lock_irq(&cm.lock); 821 if (--cm_id_priv->listen_sharecount > 0) { 822 /* The id is still shared. */ 823 cm_deref_id(cm_id_priv); 824 spin_unlock_irq(&cm.lock); 825 return; 826 } 827 rb_erase(&cm_id_priv->service_node, &cm.listen_service_table); 828 spin_unlock_irq(&cm.lock); 829 break; 830 case IB_CM_SIDR_REQ_SENT: 831 cm_id->state = IB_CM_IDLE; 832 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 833 spin_unlock_irq(&cm_id_priv->lock); 834 break; 835 case IB_CM_SIDR_REQ_RCVD: 836 spin_unlock_irq(&cm_id_priv->lock); 837 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT); 838 spin_lock_irq(&cm.lock); 839 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) 840 rb_erase(&cm_id_priv->sidr_id_node, 841 &cm.remote_sidr_table); 842 spin_unlock_irq(&cm.lock); 843 break; 844 case IB_CM_REQ_SENT: 845 case IB_CM_MRA_REQ_RCVD: 846 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 847 spin_unlock_irq(&cm_id_priv->lock); 848 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT, 849 &cm_id_priv->id.device->node_guid, 850 sizeof cm_id_priv->id.device->node_guid, 851 NULL, 0); 852 break; 853 case IB_CM_REQ_RCVD: 854 if (err == -ENOMEM) { 855 /* Do not reject to allow future retries. */ 856 cm_reset_to_idle(cm_id_priv); 857 spin_unlock_irq(&cm_id_priv->lock); 858 } else { 859 spin_unlock_irq(&cm_id_priv->lock); 860 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, 861 NULL, 0, NULL, 0); 862 } 863 break; 864 case IB_CM_REP_SENT: 865 case IB_CM_MRA_REP_RCVD: 866 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 867 /* Fall through */ 868 case IB_CM_MRA_REQ_SENT: 869 case IB_CM_REP_RCVD: 870 case IB_CM_MRA_REP_SENT: 871 spin_unlock_irq(&cm_id_priv->lock); 872 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, 873 NULL, 0, NULL, 0); 874 break; 875 case IB_CM_ESTABLISHED: 876 spin_unlock_irq(&cm_id_priv->lock); 877 if (cm_id_priv->qp_type == IB_QPT_XRC_TGT) 878 break; 879 ib_send_cm_dreq(cm_id, NULL, 0); 880 goto retest; 881 case IB_CM_DREQ_SENT: 882 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 883 cm_enter_timewait(cm_id_priv); 884 spin_unlock_irq(&cm_id_priv->lock); 885 break; 886 case IB_CM_DREQ_RCVD: 887 spin_unlock_irq(&cm_id_priv->lock); 888 ib_send_cm_drep(cm_id, NULL, 0); 889 break; 890 default: 891 spin_unlock_irq(&cm_id_priv->lock); 892 break; 893 } 894 895 cm_free_id(cm_id->local_id); 896 cm_deref_id(cm_id_priv); 897 wait_for_completion(&cm_id_priv->comp); 898 while ((work = cm_dequeue_work(cm_id_priv)) != NULL) 899 cm_free_work(work); 900 kfree(cm_id_priv->private_data); 901 kfree(cm_id_priv); 902 } 903 904 void ib_destroy_cm_id(struct ib_cm_id *cm_id) 905 { 906 cm_destroy_id(cm_id, 0); 907 } 908 EXPORT_SYMBOL(ib_destroy_cm_id); 909 910 /** 911 * __ib_cm_listen - Initiates listening on the specified service ID for 912 * connection and service ID resolution requests. 913 * @cm_id: Connection identifier associated with the listen request. 914 * @service_id: Service identifier matched against incoming connection 915 * and service ID resolution requests. The service ID should be specified 916 * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will 917 * assign a service ID to the caller. 918 * @service_mask: Mask applied to service ID used to listen across a 919 * range of service IDs. If set to 0, the service ID is matched 920 * exactly. This parameter is ignored if %service_id is set to 921 * IB_CM_ASSIGN_SERVICE_ID. 922 */ 923 static int __ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, 924 __be64 service_mask) 925 { 926 struct cm_id_private *cm_id_priv, *cur_cm_id_priv; 927 int ret = 0; 928 929 service_mask = service_mask ? service_mask : ~cpu_to_be64(0); 930 service_id &= service_mask; 931 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID && 932 (service_id != IB_CM_ASSIGN_SERVICE_ID)) 933 return -EINVAL; 934 935 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 936 if (cm_id->state != IB_CM_IDLE) 937 return -EINVAL; 938 939 cm_id->state = IB_CM_LISTEN; 940 ++cm_id_priv->listen_sharecount; 941 942 if (service_id == IB_CM_ASSIGN_SERVICE_ID) { 943 cm_id->service_id = cpu_to_be64(cm.listen_service_id++); 944 cm_id->service_mask = ~cpu_to_be64(0); 945 } else { 946 cm_id->service_id = service_id; 947 cm_id->service_mask = service_mask; 948 } 949 cur_cm_id_priv = cm_insert_listen(cm_id_priv); 950 951 if (cur_cm_id_priv) { 952 cm_id->state = IB_CM_IDLE; 953 --cm_id_priv->listen_sharecount; 954 ret = -EBUSY; 955 } 956 return ret; 957 } 958 959 int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask) 960 { 961 unsigned long flags; 962 int ret; 963 964 spin_lock_irqsave(&cm.lock, flags); 965 ret = __ib_cm_listen(cm_id, service_id, service_mask); 966 spin_unlock_irqrestore(&cm.lock, flags); 967 968 return ret; 969 } 970 EXPORT_SYMBOL(ib_cm_listen); 971 972 /** 973 * Create a new listening ib_cm_id and listen on the given service ID. 974 * 975 * If there's an existing ID listening on that same device and service ID, 976 * return it. 977 * 978 * @device: Device associated with the cm_id. All related communication will 979 * be associated with the specified device. 980 * @cm_handler: Callback invoked to notify the user of CM events. 981 * @service_id: Service identifier matched against incoming connection 982 * and service ID resolution requests. The service ID should be specified 983 * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will 984 * assign a service ID to the caller. 985 * 986 * Callers should call ib_destroy_cm_id when done with the listener ID. 987 */ 988 struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device, 989 ib_cm_handler cm_handler, 990 __be64 service_id) 991 { 992 struct cm_id_private *cm_id_priv; 993 struct ib_cm_id *cm_id; 994 unsigned long flags; 995 int err = 0; 996 997 /* Create an ID in advance, since the creation may sleep */ 998 cm_id = ib_create_cm_id(device, cm_handler, NULL); 999 if (IS_ERR(cm_id)) 1000 return cm_id; 1001 1002 spin_lock_irqsave(&cm.lock, flags); 1003 1004 if (service_id == IB_CM_ASSIGN_SERVICE_ID) 1005 goto new_id; 1006 1007 /* Find an existing ID */ 1008 cm_id_priv = cm_find_listen(device, service_id); 1009 if (cm_id_priv) { 1010 if (cm_id->cm_handler != cm_handler || cm_id->context) { 1011 /* Sharing an ib_cm_id with different handlers is not 1012 * supported */ 1013 spin_unlock_irqrestore(&cm.lock, flags); 1014 return ERR_PTR(-EINVAL); 1015 } 1016 atomic_inc(&cm_id_priv->refcount); 1017 ++cm_id_priv->listen_sharecount; 1018 spin_unlock_irqrestore(&cm.lock, flags); 1019 1020 ib_destroy_cm_id(cm_id); 1021 cm_id = &cm_id_priv->id; 1022 return cm_id; 1023 } 1024 1025 new_id: 1026 /* Use newly created ID */ 1027 err = __ib_cm_listen(cm_id, service_id, 0); 1028 1029 spin_unlock_irqrestore(&cm.lock, flags); 1030 1031 if (err) { 1032 ib_destroy_cm_id(cm_id); 1033 return ERR_PTR(err); 1034 } 1035 return cm_id; 1036 } 1037 EXPORT_SYMBOL(ib_cm_insert_listen); 1038 1039 static __be64 cm_form_tid(struct cm_id_private *cm_id_priv, 1040 enum cm_msg_sequence msg_seq) 1041 { 1042 u64 hi_tid, low_tid; 1043 1044 hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32; 1045 low_tid = (u64) ((__force u32)cm_id_priv->id.local_id | 1046 (msg_seq << 30)); 1047 return cpu_to_be64(hi_tid | low_tid); 1048 } 1049 1050 static void cm_format_mad_hdr(struct ib_mad_hdr *hdr, 1051 __be16 attr_id, __be64 tid) 1052 { 1053 hdr->base_version = IB_MGMT_BASE_VERSION; 1054 hdr->mgmt_class = IB_MGMT_CLASS_CM; 1055 hdr->class_version = IB_CM_CLASS_VERSION; 1056 hdr->method = IB_MGMT_METHOD_SEND; 1057 hdr->attr_id = attr_id; 1058 hdr->tid = tid; 1059 } 1060 1061 static void cm_format_req(struct cm_req_msg *req_msg, 1062 struct cm_id_private *cm_id_priv, 1063 struct ib_cm_req_param *param) 1064 { 1065 struct ib_sa_path_rec *pri_path = param->primary_path; 1066 struct ib_sa_path_rec *alt_path = param->alternate_path; 1067 1068 cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID, 1069 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ)); 1070 1071 req_msg->local_comm_id = cm_id_priv->id.local_id; 1072 req_msg->service_id = param->service_id; 1073 req_msg->local_ca_guid = cm_id_priv->id.device->node_guid; 1074 cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num)); 1075 cm_req_set_init_depth(req_msg, param->initiator_depth); 1076 cm_req_set_remote_resp_timeout(req_msg, 1077 param->remote_cm_response_timeout); 1078 cm_req_set_qp_type(req_msg, param->qp_type); 1079 cm_req_set_flow_ctrl(req_msg, param->flow_control); 1080 cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn)); 1081 cm_req_set_local_resp_timeout(req_msg, 1082 param->local_cm_response_timeout); 1083 req_msg->pkey = param->primary_path->pkey; 1084 cm_req_set_path_mtu(req_msg, param->primary_path->mtu); 1085 cm_req_set_max_cm_retries(req_msg, param->max_cm_retries); 1086 1087 if (param->qp_type != IB_QPT_XRC_INI) { 1088 cm_req_set_resp_res(req_msg, param->responder_resources); 1089 cm_req_set_retry_count(req_msg, param->retry_count); 1090 cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count); 1091 cm_req_set_srq(req_msg, param->srq); 1092 } 1093 1094 if (pri_path->hop_limit <= 1) { 1095 req_msg->primary_local_lid = pri_path->slid; 1096 req_msg->primary_remote_lid = pri_path->dlid; 1097 } else { 1098 /* Work-around until there's a way to obtain remote LID info */ 1099 req_msg->primary_local_lid = IB_LID_PERMISSIVE; 1100 req_msg->primary_remote_lid = IB_LID_PERMISSIVE; 1101 } 1102 req_msg->primary_local_gid = pri_path->sgid; 1103 req_msg->primary_remote_gid = pri_path->dgid; 1104 cm_req_set_primary_flow_label(req_msg, pri_path->flow_label); 1105 cm_req_set_primary_packet_rate(req_msg, pri_path->rate); 1106 req_msg->primary_traffic_class = pri_path->traffic_class; 1107 req_msg->primary_hop_limit = pri_path->hop_limit; 1108 cm_req_set_primary_sl(req_msg, pri_path->sl); 1109 cm_req_set_primary_subnet_local(req_msg, (pri_path->hop_limit <= 1)); 1110 cm_req_set_primary_local_ack_timeout(req_msg, 1111 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay, 1112 pri_path->packet_life_time)); 1113 1114 if (alt_path) { 1115 if (alt_path->hop_limit <= 1) { 1116 req_msg->alt_local_lid = alt_path->slid; 1117 req_msg->alt_remote_lid = alt_path->dlid; 1118 } else { 1119 req_msg->alt_local_lid = IB_LID_PERMISSIVE; 1120 req_msg->alt_remote_lid = IB_LID_PERMISSIVE; 1121 } 1122 req_msg->alt_local_gid = alt_path->sgid; 1123 req_msg->alt_remote_gid = alt_path->dgid; 1124 cm_req_set_alt_flow_label(req_msg, 1125 alt_path->flow_label); 1126 cm_req_set_alt_packet_rate(req_msg, alt_path->rate); 1127 req_msg->alt_traffic_class = alt_path->traffic_class; 1128 req_msg->alt_hop_limit = alt_path->hop_limit; 1129 cm_req_set_alt_sl(req_msg, alt_path->sl); 1130 cm_req_set_alt_subnet_local(req_msg, (alt_path->hop_limit <= 1)); 1131 cm_req_set_alt_local_ack_timeout(req_msg, 1132 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay, 1133 alt_path->packet_life_time)); 1134 } 1135 1136 if (param->private_data && param->private_data_len) 1137 memcpy(req_msg->private_data, param->private_data, 1138 param->private_data_len); 1139 } 1140 1141 static int cm_validate_req_param(struct ib_cm_req_param *param) 1142 { 1143 /* peer-to-peer not supported */ 1144 if (param->peer_to_peer) 1145 return -EINVAL; 1146 1147 if (!param->primary_path) 1148 return -EINVAL; 1149 1150 if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC && 1151 param->qp_type != IB_QPT_XRC_INI) 1152 return -EINVAL; 1153 1154 if (param->private_data && 1155 param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE) 1156 return -EINVAL; 1157 1158 if (param->alternate_path && 1159 (param->alternate_path->pkey != param->primary_path->pkey || 1160 param->alternate_path->mtu != param->primary_path->mtu)) 1161 return -EINVAL; 1162 1163 return 0; 1164 } 1165 1166 int ib_send_cm_req(struct ib_cm_id *cm_id, 1167 struct ib_cm_req_param *param) 1168 { 1169 struct cm_id_private *cm_id_priv; 1170 struct cm_req_msg *req_msg; 1171 unsigned long flags; 1172 int ret; 1173 1174 ret = cm_validate_req_param(param); 1175 if (ret) 1176 return ret; 1177 1178 /* Verify that we're not in timewait. */ 1179 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1180 spin_lock_irqsave(&cm_id_priv->lock, flags); 1181 if (cm_id->state != IB_CM_IDLE) { 1182 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1183 ret = -EINVAL; 1184 goto out; 1185 } 1186 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1187 1188 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv-> 1189 id.local_id); 1190 if (IS_ERR(cm_id_priv->timewait_info)) { 1191 ret = PTR_ERR(cm_id_priv->timewait_info); 1192 goto out; 1193 } 1194 1195 ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av); 1196 if (ret) 1197 goto error1; 1198 if (param->alternate_path) { 1199 ret = cm_init_av_by_path(param->alternate_path, 1200 &cm_id_priv->alt_av); 1201 if (ret) 1202 goto error1; 1203 } 1204 cm_id->service_id = param->service_id; 1205 cm_id->service_mask = ~cpu_to_be64(0); 1206 cm_id_priv->timeout_ms = cm_convert_to_ms( 1207 param->primary_path->packet_life_time) * 2 + 1208 cm_convert_to_ms( 1209 param->remote_cm_response_timeout); 1210 cm_id_priv->max_cm_retries = param->max_cm_retries; 1211 cm_id_priv->initiator_depth = param->initiator_depth; 1212 cm_id_priv->responder_resources = param->responder_resources; 1213 cm_id_priv->retry_count = param->retry_count; 1214 cm_id_priv->path_mtu = param->primary_path->mtu; 1215 cm_id_priv->pkey = param->primary_path->pkey; 1216 cm_id_priv->qp_type = param->qp_type; 1217 1218 ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg); 1219 if (ret) 1220 goto error1; 1221 1222 req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad; 1223 cm_format_req(req_msg, cm_id_priv, param); 1224 cm_id_priv->tid = req_msg->hdr.tid; 1225 cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms; 1226 cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT; 1227 1228 cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg); 1229 cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg); 1230 1231 spin_lock_irqsave(&cm_id_priv->lock, flags); 1232 ret = ib_post_send_mad(cm_id_priv->msg, NULL); 1233 if (ret) { 1234 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1235 goto error2; 1236 } 1237 BUG_ON(cm_id->state != IB_CM_IDLE); 1238 cm_id->state = IB_CM_REQ_SENT; 1239 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1240 return 0; 1241 1242 error2: cm_free_msg(cm_id_priv->msg); 1243 error1: kfree(cm_id_priv->timewait_info); 1244 out: return ret; 1245 } 1246 EXPORT_SYMBOL(ib_send_cm_req); 1247 1248 static int cm_issue_rej(struct cm_port *port, 1249 struct ib_mad_recv_wc *mad_recv_wc, 1250 enum ib_cm_rej_reason reason, 1251 enum cm_msg_response msg_rejected, 1252 void *ari, u8 ari_length) 1253 { 1254 struct ib_mad_send_buf *msg = NULL; 1255 struct cm_rej_msg *rej_msg, *rcv_msg; 1256 int ret; 1257 1258 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg); 1259 if (ret) 1260 return ret; 1261 1262 /* We just need common CM header information. Cast to any message. */ 1263 rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad; 1264 rej_msg = (struct cm_rej_msg *) msg->mad; 1265 1266 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid); 1267 rej_msg->remote_comm_id = rcv_msg->local_comm_id; 1268 rej_msg->local_comm_id = rcv_msg->remote_comm_id; 1269 cm_rej_set_msg_rejected(rej_msg, msg_rejected); 1270 rej_msg->reason = cpu_to_be16(reason); 1271 1272 if (ari && ari_length) { 1273 cm_rej_set_reject_info_len(rej_msg, ari_length); 1274 memcpy(rej_msg->ari, ari, ari_length); 1275 } 1276 1277 ret = ib_post_send_mad(msg, NULL); 1278 if (ret) 1279 cm_free_msg(msg); 1280 1281 return ret; 1282 } 1283 1284 static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid, 1285 __be32 local_qpn, __be32 remote_qpn) 1286 { 1287 return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) || 1288 ((local_ca_guid == remote_ca_guid) && 1289 (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn)))); 1290 } 1291 1292 static void cm_format_paths_from_req(struct cm_req_msg *req_msg, 1293 struct ib_sa_path_rec *primary_path, 1294 struct ib_sa_path_rec *alt_path) 1295 { 1296 memset(primary_path, 0, sizeof *primary_path); 1297 primary_path->dgid = req_msg->primary_local_gid; 1298 primary_path->sgid = req_msg->primary_remote_gid; 1299 primary_path->dlid = req_msg->primary_local_lid; 1300 primary_path->slid = req_msg->primary_remote_lid; 1301 primary_path->flow_label = cm_req_get_primary_flow_label(req_msg); 1302 primary_path->hop_limit = req_msg->primary_hop_limit; 1303 primary_path->traffic_class = req_msg->primary_traffic_class; 1304 primary_path->reversible = 1; 1305 primary_path->pkey = req_msg->pkey; 1306 primary_path->sl = cm_req_get_primary_sl(req_msg); 1307 primary_path->mtu_selector = IB_SA_EQ; 1308 primary_path->mtu = cm_req_get_path_mtu(req_msg); 1309 primary_path->rate_selector = IB_SA_EQ; 1310 primary_path->rate = cm_req_get_primary_packet_rate(req_msg); 1311 primary_path->packet_life_time_selector = IB_SA_EQ; 1312 primary_path->packet_life_time = 1313 cm_req_get_primary_local_ack_timeout(req_msg); 1314 primary_path->packet_life_time -= (primary_path->packet_life_time > 0); 1315 primary_path->service_id = req_msg->service_id; 1316 1317 if (req_msg->alt_local_lid) { 1318 memset(alt_path, 0, sizeof *alt_path); 1319 alt_path->dgid = req_msg->alt_local_gid; 1320 alt_path->sgid = req_msg->alt_remote_gid; 1321 alt_path->dlid = req_msg->alt_local_lid; 1322 alt_path->slid = req_msg->alt_remote_lid; 1323 alt_path->flow_label = cm_req_get_alt_flow_label(req_msg); 1324 alt_path->hop_limit = req_msg->alt_hop_limit; 1325 alt_path->traffic_class = req_msg->alt_traffic_class; 1326 alt_path->reversible = 1; 1327 alt_path->pkey = req_msg->pkey; 1328 alt_path->sl = cm_req_get_alt_sl(req_msg); 1329 alt_path->mtu_selector = IB_SA_EQ; 1330 alt_path->mtu = cm_req_get_path_mtu(req_msg); 1331 alt_path->rate_selector = IB_SA_EQ; 1332 alt_path->rate = cm_req_get_alt_packet_rate(req_msg); 1333 alt_path->packet_life_time_selector = IB_SA_EQ; 1334 alt_path->packet_life_time = 1335 cm_req_get_alt_local_ack_timeout(req_msg); 1336 alt_path->packet_life_time -= (alt_path->packet_life_time > 0); 1337 alt_path->service_id = req_msg->service_id; 1338 } 1339 } 1340 1341 static u16 cm_get_bth_pkey(struct cm_work *work) 1342 { 1343 struct ib_device *ib_dev = work->port->cm_dev->ib_device; 1344 u8 port_num = work->port->port_num; 1345 u16 pkey_index = work->mad_recv_wc->wc->pkey_index; 1346 u16 pkey; 1347 int ret; 1348 1349 ret = ib_get_cached_pkey(ib_dev, port_num, pkey_index, &pkey); 1350 if (ret) { 1351 dev_warn_ratelimited(&ib_dev->dev, "ib_cm: Couldn't retrieve pkey for incoming request (port %d, pkey index %d). %d\n", 1352 port_num, pkey_index, ret); 1353 return 0; 1354 } 1355 1356 return pkey; 1357 } 1358 1359 static void cm_format_req_event(struct cm_work *work, 1360 struct cm_id_private *cm_id_priv, 1361 struct ib_cm_id *listen_id) 1362 { 1363 struct cm_req_msg *req_msg; 1364 struct ib_cm_req_event_param *param; 1365 1366 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; 1367 param = &work->cm_event.param.req_rcvd; 1368 param->listen_id = listen_id; 1369 param->bth_pkey = cm_get_bth_pkey(work); 1370 param->port = cm_id_priv->av.port->port_num; 1371 param->primary_path = &work->path[0]; 1372 if (req_msg->alt_local_lid) 1373 param->alternate_path = &work->path[1]; 1374 else 1375 param->alternate_path = NULL; 1376 param->remote_ca_guid = req_msg->local_ca_guid; 1377 param->remote_qkey = be32_to_cpu(req_msg->local_qkey); 1378 param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg)); 1379 param->qp_type = cm_req_get_qp_type(req_msg); 1380 param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg)); 1381 param->responder_resources = cm_req_get_init_depth(req_msg); 1382 param->initiator_depth = cm_req_get_resp_res(req_msg); 1383 param->local_cm_response_timeout = 1384 cm_req_get_remote_resp_timeout(req_msg); 1385 param->flow_control = cm_req_get_flow_ctrl(req_msg); 1386 param->remote_cm_response_timeout = 1387 cm_req_get_local_resp_timeout(req_msg); 1388 param->retry_count = cm_req_get_retry_count(req_msg); 1389 param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg); 1390 param->srq = cm_req_get_srq(req_msg); 1391 work->cm_event.private_data = &req_msg->private_data; 1392 } 1393 1394 static void cm_process_work(struct cm_id_private *cm_id_priv, 1395 struct cm_work *work) 1396 { 1397 int ret; 1398 1399 /* We will typically only have the current event to report. */ 1400 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event); 1401 cm_free_work(work); 1402 1403 while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) { 1404 spin_lock_irq(&cm_id_priv->lock); 1405 work = cm_dequeue_work(cm_id_priv); 1406 spin_unlock_irq(&cm_id_priv->lock); 1407 BUG_ON(!work); 1408 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, 1409 &work->cm_event); 1410 cm_free_work(work); 1411 } 1412 cm_deref_id(cm_id_priv); 1413 if (ret) 1414 cm_destroy_id(&cm_id_priv->id, ret); 1415 } 1416 1417 static void cm_format_mra(struct cm_mra_msg *mra_msg, 1418 struct cm_id_private *cm_id_priv, 1419 enum cm_msg_response msg_mraed, u8 service_timeout, 1420 const void *private_data, u8 private_data_len) 1421 { 1422 cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid); 1423 cm_mra_set_msg_mraed(mra_msg, msg_mraed); 1424 mra_msg->local_comm_id = cm_id_priv->id.local_id; 1425 mra_msg->remote_comm_id = cm_id_priv->id.remote_id; 1426 cm_mra_set_service_timeout(mra_msg, service_timeout); 1427 1428 if (private_data && private_data_len) 1429 memcpy(mra_msg->private_data, private_data, private_data_len); 1430 } 1431 1432 static void cm_format_rej(struct cm_rej_msg *rej_msg, 1433 struct cm_id_private *cm_id_priv, 1434 enum ib_cm_rej_reason reason, 1435 void *ari, 1436 u8 ari_length, 1437 const void *private_data, 1438 u8 private_data_len) 1439 { 1440 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid); 1441 rej_msg->remote_comm_id = cm_id_priv->id.remote_id; 1442 1443 switch(cm_id_priv->id.state) { 1444 case IB_CM_REQ_RCVD: 1445 rej_msg->local_comm_id = 0; 1446 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ); 1447 break; 1448 case IB_CM_MRA_REQ_SENT: 1449 rej_msg->local_comm_id = cm_id_priv->id.local_id; 1450 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ); 1451 break; 1452 case IB_CM_REP_RCVD: 1453 case IB_CM_MRA_REP_SENT: 1454 rej_msg->local_comm_id = cm_id_priv->id.local_id; 1455 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP); 1456 break; 1457 default: 1458 rej_msg->local_comm_id = cm_id_priv->id.local_id; 1459 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER); 1460 break; 1461 } 1462 1463 rej_msg->reason = cpu_to_be16(reason); 1464 if (ari && ari_length) { 1465 cm_rej_set_reject_info_len(rej_msg, ari_length); 1466 memcpy(rej_msg->ari, ari, ari_length); 1467 } 1468 1469 if (private_data && private_data_len) 1470 memcpy(rej_msg->private_data, private_data, private_data_len); 1471 } 1472 1473 static void cm_dup_req_handler(struct cm_work *work, 1474 struct cm_id_private *cm_id_priv) 1475 { 1476 struct ib_mad_send_buf *msg = NULL; 1477 int ret; 1478 1479 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. 1480 counter[CM_REQ_COUNTER]); 1481 1482 /* Quick state check to discard duplicate REQs. */ 1483 if (cm_id_priv->id.state == IB_CM_REQ_RCVD) 1484 return; 1485 1486 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); 1487 if (ret) 1488 return; 1489 1490 spin_lock_irq(&cm_id_priv->lock); 1491 switch (cm_id_priv->id.state) { 1492 case IB_CM_MRA_REQ_SENT: 1493 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 1494 CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout, 1495 cm_id_priv->private_data, 1496 cm_id_priv->private_data_len); 1497 break; 1498 case IB_CM_TIMEWAIT: 1499 cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv, 1500 IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0); 1501 break; 1502 default: 1503 goto unlock; 1504 } 1505 spin_unlock_irq(&cm_id_priv->lock); 1506 1507 ret = ib_post_send_mad(msg, NULL); 1508 if (ret) 1509 goto free; 1510 return; 1511 1512 unlock: spin_unlock_irq(&cm_id_priv->lock); 1513 free: cm_free_msg(msg); 1514 } 1515 1516 static struct cm_id_private * cm_match_req(struct cm_work *work, 1517 struct cm_id_private *cm_id_priv) 1518 { 1519 struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv; 1520 struct cm_timewait_info *timewait_info; 1521 struct cm_req_msg *req_msg; 1522 1523 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; 1524 1525 /* Check for possible duplicate REQ. */ 1526 spin_lock_irq(&cm.lock); 1527 timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info); 1528 if (timewait_info) { 1529 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id, 1530 timewait_info->work.remote_id); 1531 spin_unlock_irq(&cm.lock); 1532 if (cur_cm_id_priv) { 1533 cm_dup_req_handler(work, cur_cm_id_priv); 1534 cm_deref_id(cur_cm_id_priv); 1535 } 1536 return NULL; 1537 } 1538 1539 /* Check for stale connections. */ 1540 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info); 1541 if (timewait_info) { 1542 cm_cleanup_timewait(cm_id_priv->timewait_info); 1543 spin_unlock_irq(&cm.lock); 1544 cm_issue_rej(work->port, work->mad_recv_wc, 1545 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ, 1546 NULL, 0); 1547 return NULL; 1548 } 1549 1550 /* Find matching listen request. */ 1551 listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device, 1552 req_msg->service_id); 1553 if (!listen_cm_id_priv) { 1554 cm_cleanup_timewait(cm_id_priv->timewait_info); 1555 spin_unlock_irq(&cm.lock); 1556 cm_issue_rej(work->port, work->mad_recv_wc, 1557 IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ, 1558 NULL, 0); 1559 goto out; 1560 } 1561 atomic_inc(&listen_cm_id_priv->refcount); 1562 atomic_inc(&cm_id_priv->refcount); 1563 cm_id_priv->id.state = IB_CM_REQ_RCVD; 1564 atomic_inc(&cm_id_priv->work_count); 1565 spin_unlock_irq(&cm.lock); 1566 out: 1567 return listen_cm_id_priv; 1568 } 1569 1570 /* 1571 * Work-around for inter-subnet connections. If the LIDs are permissive, 1572 * we need to override the LID/SL data in the REQ with the LID information 1573 * in the work completion. 1574 */ 1575 static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc) 1576 { 1577 if (!cm_req_get_primary_subnet_local(req_msg)) { 1578 if (req_msg->primary_local_lid == IB_LID_PERMISSIVE) { 1579 req_msg->primary_local_lid = cpu_to_be16(wc->slid); 1580 cm_req_set_primary_sl(req_msg, wc->sl); 1581 } 1582 1583 if (req_msg->primary_remote_lid == IB_LID_PERMISSIVE) 1584 req_msg->primary_remote_lid = cpu_to_be16(wc->dlid_path_bits); 1585 } 1586 1587 if (!cm_req_get_alt_subnet_local(req_msg)) { 1588 if (req_msg->alt_local_lid == IB_LID_PERMISSIVE) { 1589 req_msg->alt_local_lid = cpu_to_be16(wc->slid); 1590 cm_req_set_alt_sl(req_msg, wc->sl); 1591 } 1592 1593 if (req_msg->alt_remote_lid == IB_LID_PERMISSIVE) 1594 req_msg->alt_remote_lid = cpu_to_be16(wc->dlid_path_bits); 1595 } 1596 } 1597 1598 static int cm_req_handler(struct cm_work *work) 1599 { 1600 struct ib_cm_id *cm_id; 1601 struct cm_id_private *cm_id_priv, *listen_cm_id_priv; 1602 struct cm_req_msg *req_msg; 1603 union ib_gid gid; 1604 struct ib_gid_attr gid_attr; 1605 int ret; 1606 1607 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; 1608 1609 cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL); 1610 if (IS_ERR(cm_id)) 1611 return PTR_ERR(cm_id); 1612 1613 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1614 cm_id_priv->id.remote_id = req_msg->local_comm_id; 1615 cm_init_av_for_response(work->port, work->mad_recv_wc->wc, 1616 work->mad_recv_wc->recv_buf.grh, 1617 &cm_id_priv->av); 1618 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv-> 1619 id.local_id); 1620 if (IS_ERR(cm_id_priv->timewait_info)) { 1621 ret = PTR_ERR(cm_id_priv->timewait_info); 1622 goto destroy; 1623 } 1624 cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id; 1625 cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid; 1626 cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg); 1627 1628 listen_cm_id_priv = cm_match_req(work, cm_id_priv); 1629 if (!listen_cm_id_priv) { 1630 ret = -EINVAL; 1631 kfree(cm_id_priv->timewait_info); 1632 goto destroy; 1633 } 1634 1635 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler; 1636 cm_id_priv->id.context = listen_cm_id_priv->id.context; 1637 cm_id_priv->id.service_id = req_msg->service_id; 1638 cm_id_priv->id.service_mask = ~cpu_to_be64(0); 1639 1640 cm_process_routed_req(req_msg, work->mad_recv_wc->wc); 1641 cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]); 1642 1643 memcpy(work->path[0].dmac, cm_id_priv->av.ah_attr.dmac, ETH_ALEN); 1644 ret = ib_get_cached_gid(work->port->cm_dev->ib_device, 1645 work->port->port_num, 1646 cm_id_priv->av.ah_attr.grh.sgid_index, 1647 &gid, &gid_attr); 1648 if (!ret) { 1649 if (gid_attr.ndev) { 1650 work->path[0].ifindex = gid_attr.ndev->ifindex; 1651 work->path[0].net = dev_net(gid_attr.ndev); 1652 dev_put(gid_attr.ndev); 1653 } 1654 work->path[0].gid_type = gid_attr.gid_type; 1655 ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av); 1656 } 1657 if (ret) { 1658 int err = ib_get_cached_gid(work->port->cm_dev->ib_device, 1659 work->port->port_num, 0, 1660 &work->path[0].sgid, 1661 &gid_attr); 1662 if (!err && gid_attr.ndev) { 1663 work->path[0].ifindex = gid_attr.ndev->ifindex; 1664 work->path[0].net = dev_net(gid_attr.ndev); 1665 dev_put(gid_attr.ndev); 1666 } 1667 work->path[0].gid_type = gid_attr.gid_type; 1668 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID, 1669 &work->path[0].sgid, sizeof work->path[0].sgid, 1670 NULL, 0); 1671 goto rejected; 1672 } 1673 if (req_msg->alt_local_lid) { 1674 ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av); 1675 if (ret) { 1676 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID, 1677 &work->path[0].sgid, 1678 sizeof work->path[0].sgid, NULL, 0); 1679 goto rejected; 1680 } 1681 } 1682 cm_id_priv->tid = req_msg->hdr.tid; 1683 cm_id_priv->timeout_ms = cm_convert_to_ms( 1684 cm_req_get_local_resp_timeout(req_msg)); 1685 cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg); 1686 cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg); 1687 cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg); 1688 cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg); 1689 cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg); 1690 cm_id_priv->pkey = req_msg->pkey; 1691 cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg); 1692 cm_id_priv->retry_count = cm_req_get_retry_count(req_msg); 1693 cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg); 1694 cm_id_priv->qp_type = cm_req_get_qp_type(req_msg); 1695 1696 cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id); 1697 cm_process_work(cm_id_priv, work); 1698 cm_deref_id(listen_cm_id_priv); 1699 return 0; 1700 1701 rejected: 1702 atomic_dec(&cm_id_priv->refcount); 1703 cm_deref_id(listen_cm_id_priv); 1704 destroy: 1705 ib_destroy_cm_id(cm_id); 1706 return ret; 1707 } 1708 1709 static void cm_format_rep(struct cm_rep_msg *rep_msg, 1710 struct cm_id_private *cm_id_priv, 1711 struct ib_cm_rep_param *param) 1712 { 1713 cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid); 1714 rep_msg->local_comm_id = cm_id_priv->id.local_id; 1715 rep_msg->remote_comm_id = cm_id_priv->id.remote_id; 1716 cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn)); 1717 rep_msg->resp_resources = param->responder_resources; 1718 cm_rep_set_target_ack_delay(rep_msg, 1719 cm_id_priv->av.port->cm_dev->ack_delay); 1720 cm_rep_set_failover(rep_msg, param->failover_accepted); 1721 cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count); 1722 rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid; 1723 1724 if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) { 1725 rep_msg->initiator_depth = param->initiator_depth; 1726 cm_rep_set_flow_ctrl(rep_msg, param->flow_control); 1727 cm_rep_set_srq(rep_msg, param->srq); 1728 cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num)); 1729 } else { 1730 cm_rep_set_srq(rep_msg, 1); 1731 cm_rep_set_local_eecn(rep_msg, cpu_to_be32(param->qp_num)); 1732 } 1733 1734 if (param->private_data && param->private_data_len) 1735 memcpy(rep_msg->private_data, param->private_data, 1736 param->private_data_len); 1737 } 1738 1739 int ib_send_cm_rep(struct ib_cm_id *cm_id, 1740 struct ib_cm_rep_param *param) 1741 { 1742 struct cm_id_private *cm_id_priv; 1743 struct ib_mad_send_buf *msg; 1744 struct cm_rep_msg *rep_msg; 1745 unsigned long flags; 1746 int ret; 1747 1748 if (param->private_data && 1749 param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE) 1750 return -EINVAL; 1751 1752 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1753 spin_lock_irqsave(&cm_id_priv->lock, flags); 1754 if (cm_id->state != IB_CM_REQ_RCVD && 1755 cm_id->state != IB_CM_MRA_REQ_SENT) { 1756 ret = -EINVAL; 1757 goto out; 1758 } 1759 1760 ret = cm_alloc_msg(cm_id_priv, &msg); 1761 if (ret) 1762 goto out; 1763 1764 rep_msg = (struct cm_rep_msg *) msg->mad; 1765 cm_format_rep(rep_msg, cm_id_priv, param); 1766 msg->timeout_ms = cm_id_priv->timeout_ms; 1767 msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT; 1768 1769 ret = ib_post_send_mad(msg, NULL); 1770 if (ret) { 1771 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1772 cm_free_msg(msg); 1773 return ret; 1774 } 1775 1776 cm_id->state = IB_CM_REP_SENT; 1777 cm_id_priv->msg = msg; 1778 cm_id_priv->initiator_depth = param->initiator_depth; 1779 cm_id_priv->responder_resources = param->responder_resources; 1780 cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg); 1781 cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF); 1782 1783 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1784 return ret; 1785 } 1786 EXPORT_SYMBOL(ib_send_cm_rep); 1787 1788 static void cm_format_rtu(struct cm_rtu_msg *rtu_msg, 1789 struct cm_id_private *cm_id_priv, 1790 const void *private_data, 1791 u8 private_data_len) 1792 { 1793 cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid); 1794 rtu_msg->local_comm_id = cm_id_priv->id.local_id; 1795 rtu_msg->remote_comm_id = cm_id_priv->id.remote_id; 1796 1797 if (private_data && private_data_len) 1798 memcpy(rtu_msg->private_data, private_data, private_data_len); 1799 } 1800 1801 int ib_send_cm_rtu(struct ib_cm_id *cm_id, 1802 const void *private_data, 1803 u8 private_data_len) 1804 { 1805 struct cm_id_private *cm_id_priv; 1806 struct ib_mad_send_buf *msg; 1807 unsigned long flags; 1808 void *data; 1809 int ret; 1810 1811 if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE) 1812 return -EINVAL; 1813 1814 data = cm_copy_private_data(private_data, private_data_len); 1815 if (IS_ERR(data)) 1816 return PTR_ERR(data); 1817 1818 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1819 spin_lock_irqsave(&cm_id_priv->lock, flags); 1820 if (cm_id->state != IB_CM_REP_RCVD && 1821 cm_id->state != IB_CM_MRA_REP_SENT) { 1822 ret = -EINVAL; 1823 goto error; 1824 } 1825 1826 ret = cm_alloc_msg(cm_id_priv, &msg); 1827 if (ret) 1828 goto error; 1829 1830 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv, 1831 private_data, private_data_len); 1832 1833 ret = ib_post_send_mad(msg, NULL); 1834 if (ret) { 1835 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1836 cm_free_msg(msg); 1837 kfree(data); 1838 return ret; 1839 } 1840 1841 cm_id->state = IB_CM_ESTABLISHED; 1842 cm_set_private_data(cm_id_priv, data, private_data_len); 1843 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1844 return 0; 1845 1846 error: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1847 kfree(data); 1848 return ret; 1849 } 1850 EXPORT_SYMBOL(ib_send_cm_rtu); 1851 1852 static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type) 1853 { 1854 struct cm_rep_msg *rep_msg; 1855 struct ib_cm_rep_event_param *param; 1856 1857 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad; 1858 param = &work->cm_event.param.rep_rcvd; 1859 param->remote_ca_guid = rep_msg->local_ca_guid; 1860 param->remote_qkey = be32_to_cpu(rep_msg->local_qkey); 1861 param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type)); 1862 param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg)); 1863 param->responder_resources = rep_msg->initiator_depth; 1864 param->initiator_depth = rep_msg->resp_resources; 1865 param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg); 1866 param->failover_accepted = cm_rep_get_failover(rep_msg); 1867 param->flow_control = cm_rep_get_flow_ctrl(rep_msg); 1868 param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg); 1869 param->srq = cm_rep_get_srq(rep_msg); 1870 work->cm_event.private_data = &rep_msg->private_data; 1871 } 1872 1873 static void cm_dup_rep_handler(struct cm_work *work) 1874 { 1875 struct cm_id_private *cm_id_priv; 1876 struct cm_rep_msg *rep_msg; 1877 struct ib_mad_send_buf *msg = NULL; 1878 int ret; 1879 1880 rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad; 1881 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 1882 rep_msg->local_comm_id); 1883 if (!cm_id_priv) 1884 return; 1885 1886 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. 1887 counter[CM_REP_COUNTER]); 1888 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); 1889 if (ret) 1890 goto deref; 1891 1892 spin_lock_irq(&cm_id_priv->lock); 1893 if (cm_id_priv->id.state == IB_CM_ESTABLISHED) 1894 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv, 1895 cm_id_priv->private_data, 1896 cm_id_priv->private_data_len); 1897 else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT) 1898 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 1899 CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout, 1900 cm_id_priv->private_data, 1901 cm_id_priv->private_data_len); 1902 else 1903 goto unlock; 1904 spin_unlock_irq(&cm_id_priv->lock); 1905 1906 ret = ib_post_send_mad(msg, NULL); 1907 if (ret) 1908 goto free; 1909 goto deref; 1910 1911 unlock: spin_unlock_irq(&cm_id_priv->lock); 1912 free: cm_free_msg(msg); 1913 deref: cm_deref_id(cm_id_priv); 1914 } 1915 1916 static int cm_rep_handler(struct cm_work *work) 1917 { 1918 struct cm_id_private *cm_id_priv; 1919 struct cm_rep_msg *rep_msg; 1920 int ret; 1921 1922 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad; 1923 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0); 1924 if (!cm_id_priv) { 1925 cm_dup_rep_handler(work); 1926 return -EINVAL; 1927 } 1928 1929 cm_format_rep_event(work, cm_id_priv->qp_type); 1930 1931 spin_lock_irq(&cm_id_priv->lock); 1932 switch (cm_id_priv->id.state) { 1933 case IB_CM_REQ_SENT: 1934 case IB_CM_MRA_REQ_RCVD: 1935 break; 1936 default: 1937 spin_unlock_irq(&cm_id_priv->lock); 1938 ret = -EINVAL; 1939 goto error; 1940 } 1941 1942 cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id; 1943 cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid; 1944 cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type); 1945 1946 spin_lock(&cm.lock); 1947 /* Check for duplicate REP. */ 1948 if (cm_insert_remote_id(cm_id_priv->timewait_info)) { 1949 spin_unlock(&cm.lock); 1950 spin_unlock_irq(&cm_id_priv->lock); 1951 ret = -EINVAL; 1952 goto error; 1953 } 1954 /* Check for a stale connection. */ 1955 if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) { 1956 rb_erase(&cm_id_priv->timewait_info->remote_id_node, 1957 &cm.remote_id_table); 1958 cm_id_priv->timewait_info->inserted_remote_id = 0; 1959 spin_unlock(&cm.lock); 1960 spin_unlock_irq(&cm_id_priv->lock); 1961 cm_issue_rej(work->port, work->mad_recv_wc, 1962 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP, 1963 NULL, 0); 1964 ret = -EINVAL; 1965 goto error; 1966 } 1967 spin_unlock(&cm.lock); 1968 1969 cm_id_priv->id.state = IB_CM_REP_RCVD; 1970 cm_id_priv->id.remote_id = rep_msg->local_comm_id; 1971 cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type); 1972 cm_id_priv->initiator_depth = rep_msg->resp_resources; 1973 cm_id_priv->responder_resources = rep_msg->initiator_depth; 1974 cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg); 1975 cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg); 1976 cm_id_priv->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg); 1977 cm_id_priv->av.timeout = 1978 cm_ack_timeout(cm_id_priv->target_ack_delay, 1979 cm_id_priv->av.timeout - 1); 1980 cm_id_priv->alt_av.timeout = 1981 cm_ack_timeout(cm_id_priv->target_ack_delay, 1982 cm_id_priv->alt_av.timeout - 1); 1983 1984 /* todo: handle peer_to_peer */ 1985 1986 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1987 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1988 if (!ret) 1989 list_add_tail(&work->list, &cm_id_priv->work_list); 1990 spin_unlock_irq(&cm_id_priv->lock); 1991 1992 if (ret) 1993 cm_process_work(cm_id_priv, work); 1994 else 1995 cm_deref_id(cm_id_priv); 1996 return 0; 1997 1998 error: 1999 cm_deref_id(cm_id_priv); 2000 return ret; 2001 } 2002 2003 static int cm_establish_handler(struct cm_work *work) 2004 { 2005 struct cm_id_private *cm_id_priv; 2006 int ret; 2007 2008 /* See comment in cm_establish about lookup. */ 2009 cm_id_priv = cm_acquire_id(work->local_id, work->remote_id); 2010 if (!cm_id_priv) 2011 return -EINVAL; 2012 2013 spin_lock_irq(&cm_id_priv->lock); 2014 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) { 2015 spin_unlock_irq(&cm_id_priv->lock); 2016 goto out; 2017 } 2018 2019 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2020 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2021 if (!ret) 2022 list_add_tail(&work->list, &cm_id_priv->work_list); 2023 spin_unlock_irq(&cm_id_priv->lock); 2024 2025 if (ret) 2026 cm_process_work(cm_id_priv, work); 2027 else 2028 cm_deref_id(cm_id_priv); 2029 return 0; 2030 out: 2031 cm_deref_id(cm_id_priv); 2032 return -EINVAL; 2033 } 2034 2035 static int cm_rtu_handler(struct cm_work *work) 2036 { 2037 struct cm_id_private *cm_id_priv; 2038 struct cm_rtu_msg *rtu_msg; 2039 int ret; 2040 2041 rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad; 2042 cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id, 2043 rtu_msg->local_comm_id); 2044 if (!cm_id_priv) 2045 return -EINVAL; 2046 2047 work->cm_event.private_data = &rtu_msg->private_data; 2048 2049 spin_lock_irq(&cm_id_priv->lock); 2050 if (cm_id_priv->id.state != IB_CM_REP_SENT && 2051 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) { 2052 spin_unlock_irq(&cm_id_priv->lock); 2053 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. 2054 counter[CM_RTU_COUNTER]); 2055 goto out; 2056 } 2057 cm_id_priv->id.state = IB_CM_ESTABLISHED; 2058 2059 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2060 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2061 if (!ret) 2062 list_add_tail(&work->list, &cm_id_priv->work_list); 2063 spin_unlock_irq(&cm_id_priv->lock); 2064 2065 if (ret) 2066 cm_process_work(cm_id_priv, work); 2067 else 2068 cm_deref_id(cm_id_priv); 2069 return 0; 2070 out: 2071 cm_deref_id(cm_id_priv); 2072 return -EINVAL; 2073 } 2074 2075 static void cm_format_dreq(struct cm_dreq_msg *dreq_msg, 2076 struct cm_id_private *cm_id_priv, 2077 const void *private_data, 2078 u8 private_data_len) 2079 { 2080 cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID, 2081 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ)); 2082 dreq_msg->local_comm_id = cm_id_priv->id.local_id; 2083 dreq_msg->remote_comm_id = cm_id_priv->id.remote_id; 2084 cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn); 2085 2086 if (private_data && private_data_len) 2087 memcpy(dreq_msg->private_data, private_data, private_data_len); 2088 } 2089 2090 int ib_send_cm_dreq(struct ib_cm_id *cm_id, 2091 const void *private_data, 2092 u8 private_data_len) 2093 { 2094 struct cm_id_private *cm_id_priv; 2095 struct ib_mad_send_buf *msg; 2096 unsigned long flags; 2097 int ret; 2098 2099 if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE) 2100 return -EINVAL; 2101 2102 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2103 spin_lock_irqsave(&cm_id_priv->lock, flags); 2104 if (cm_id->state != IB_CM_ESTABLISHED) { 2105 ret = -EINVAL; 2106 goto out; 2107 } 2108 2109 if (cm_id->lap_state == IB_CM_LAP_SENT || 2110 cm_id->lap_state == IB_CM_MRA_LAP_RCVD) 2111 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2112 2113 ret = cm_alloc_msg(cm_id_priv, &msg); 2114 if (ret) { 2115 cm_enter_timewait(cm_id_priv); 2116 goto out; 2117 } 2118 2119 cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv, 2120 private_data, private_data_len); 2121 msg->timeout_ms = cm_id_priv->timeout_ms; 2122 msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT; 2123 2124 ret = ib_post_send_mad(msg, NULL); 2125 if (ret) { 2126 cm_enter_timewait(cm_id_priv); 2127 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2128 cm_free_msg(msg); 2129 return ret; 2130 } 2131 2132 cm_id->state = IB_CM_DREQ_SENT; 2133 cm_id_priv->msg = msg; 2134 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2135 return ret; 2136 } 2137 EXPORT_SYMBOL(ib_send_cm_dreq); 2138 2139 static void cm_format_drep(struct cm_drep_msg *drep_msg, 2140 struct cm_id_private *cm_id_priv, 2141 const void *private_data, 2142 u8 private_data_len) 2143 { 2144 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid); 2145 drep_msg->local_comm_id = cm_id_priv->id.local_id; 2146 drep_msg->remote_comm_id = cm_id_priv->id.remote_id; 2147 2148 if (private_data && private_data_len) 2149 memcpy(drep_msg->private_data, private_data, private_data_len); 2150 } 2151 2152 int ib_send_cm_drep(struct ib_cm_id *cm_id, 2153 const void *private_data, 2154 u8 private_data_len) 2155 { 2156 struct cm_id_private *cm_id_priv; 2157 struct ib_mad_send_buf *msg; 2158 unsigned long flags; 2159 void *data; 2160 int ret; 2161 2162 if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE) 2163 return -EINVAL; 2164 2165 data = cm_copy_private_data(private_data, private_data_len); 2166 if (IS_ERR(data)) 2167 return PTR_ERR(data); 2168 2169 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2170 spin_lock_irqsave(&cm_id_priv->lock, flags); 2171 if (cm_id->state != IB_CM_DREQ_RCVD) { 2172 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2173 kfree(data); 2174 return -EINVAL; 2175 } 2176 2177 cm_set_private_data(cm_id_priv, data, private_data_len); 2178 cm_enter_timewait(cm_id_priv); 2179 2180 ret = cm_alloc_msg(cm_id_priv, &msg); 2181 if (ret) 2182 goto out; 2183 2184 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv, 2185 private_data, private_data_len); 2186 2187 ret = ib_post_send_mad(msg, NULL); 2188 if (ret) { 2189 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2190 cm_free_msg(msg); 2191 return ret; 2192 } 2193 2194 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2195 return ret; 2196 } 2197 EXPORT_SYMBOL(ib_send_cm_drep); 2198 2199 static int cm_issue_drep(struct cm_port *port, 2200 struct ib_mad_recv_wc *mad_recv_wc) 2201 { 2202 struct ib_mad_send_buf *msg = NULL; 2203 struct cm_dreq_msg *dreq_msg; 2204 struct cm_drep_msg *drep_msg; 2205 int ret; 2206 2207 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg); 2208 if (ret) 2209 return ret; 2210 2211 dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad; 2212 drep_msg = (struct cm_drep_msg *) msg->mad; 2213 2214 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid); 2215 drep_msg->remote_comm_id = dreq_msg->local_comm_id; 2216 drep_msg->local_comm_id = dreq_msg->remote_comm_id; 2217 2218 ret = ib_post_send_mad(msg, NULL); 2219 if (ret) 2220 cm_free_msg(msg); 2221 2222 return ret; 2223 } 2224 2225 static int cm_dreq_handler(struct cm_work *work) 2226 { 2227 struct cm_id_private *cm_id_priv; 2228 struct cm_dreq_msg *dreq_msg; 2229 struct ib_mad_send_buf *msg = NULL; 2230 int ret; 2231 2232 dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad; 2233 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id, 2234 dreq_msg->local_comm_id); 2235 if (!cm_id_priv) { 2236 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. 2237 counter[CM_DREQ_COUNTER]); 2238 cm_issue_drep(work->port, work->mad_recv_wc); 2239 return -EINVAL; 2240 } 2241 2242 work->cm_event.private_data = &dreq_msg->private_data; 2243 2244 spin_lock_irq(&cm_id_priv->lock); 2245 if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg)) 2246 goto unlock; 2247 2248 switch (cm_id_priv->id.state) { 2249 case IB_CM_REP_SENT: 2250 case IB_CM_DREQ_SENT: 2251 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2252 break; 2253 case IB_CM_ESTABLISHED: 2254 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT || 2255 cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD) 2256 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2257 break; 2258 case IB_CM_MRA_REP_RCVD: 2259 break; 2260 case IB_CM_TIMEWAIT: 2261 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. 2262 counter[CM_DREQ_COUNTER]); 2263 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) 2264 goto unlock; 2265 2266 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv, 2267 cm_id_priv->private_data, 2268 cm_id_priv->private_data_len); 2269 spin_unlock_irq(&cm_id_priv->lock); 2270 2271 if (ib_post_send_mad(msg, NULL)) 2272 cm_free_msg(msg); 2273 goto deref; 2274 case IB_CM_DREQ_RCVD: 2275 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. 2276 counter[CM_DREQ_COUNTER]); 2277 goto unlock; 2278 default: 2279 goto unlock; 2280 } 2281 cm_id_priv->id.state = IB_CM_DREQ_RCVD; 2282 cm_id_priv->tid = dreq_msg->hdr.tid; 2283 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2284 if (!ret) 2285 list_add_tail(&work->list, &cm_id_priv->work_list); 2286 spin_unlock_irq(&cm_id_priv->lock); 2287 2288 if (ret) 2289 cm_process_work(cm_id_priv, work); 2290 else 2291 cm_deref_id(cm_id_priv); 2292 return 0; 2293 2294 unlock: spin_unlock_irq(&cm_id_priv->lock); 2295 deref: cm_deref_id(cm_id_priv); 2296 return -EINVAL; 2297 } 2298 2299 static int cm_drep_handler(struct cm_work *work) 2300 { 2301 struct cm_id_private *cm_id_priv; 2302 struct cm_drep_msg *drep_msg; 2303 int ret; 2304 2305 drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad; 2306 cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id, 2307 drep_msg->local_comm_id); 2308 if (!cm_id_priv) 2309 return -EINVAL; 2310 2311 work->cm_event.private_data = &drep_msg->private_data; 2312 2313 spin_lock_irq(&cm_id_priv->lock); 2314 if (cm_id_priv->id.state != IB_CM_DREQ_SENT && 2315 cm_id_priv->id.state != IB_CM_DREQ_RCVD) { 2316 spin_unlock_irq(&cm_id_priv->lock); 2317 goto out; 2318 } 2319 cm_enter_timewait(cm_id_priv); 2320 2321 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2322 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2323 if (!ret) 2324 list_add_tail(&work->list, &cm_id_priv->work_list); 2325 spin_unlock_irq(&cm_id_priv->lock); 2326 2327 if (ret) 2328 cm_process_work(cm_id_priv, work); 2329 else 2330 cm_deref_id(cm_id_priv); 2331 return 0; 2332 out: 2333 cm_deref_id(cm_id_priv); 2334 return -EINVAL; 2335 } 2336 2337 int ib_send_cm_rej(struct ib_cm_id *cm_id, 2338 enum ib_cm_rej_reason reason, 2339 void *ari, 2340 u8 ari_length, 2341 const void *private_data, 2342 u8 private_data_len) 2343 { 2344 struct cm_id_private *cm_id_priv; 2345 struct ib_mad_send_buf *msg; 2346 unsigned long flags; 2347 int ret; 2348 2349 if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) || 2350 (ari && ari_length > IB_CM_REJ_ARI_LENGTH)) 2351 return -EINVAL; 2352 2353 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2354 2355 spin_lock_irqsave(&cm_id_priv->lock, flags); 2356 switch (cm_id->state) { 2357 case IB_CM_REQ_SENT: 2358 case IB_CM_MRA_REQ_RCVD: 2359 case IB_CM_REQ_RCVD: 2360 case IB_CM_MRA_REQ_SENT: 2361 case IB_CM_REP_RCVD: 2362 case IB_CM_MRA_REP_SENT: 2363 ret = cm_alloc_msg(cm_id_priv, &msg); 2364 if (!ret) 2365 cm_format_rej((struct cm_rej_msg *) msg->mad, 2366 cm_id_priv, reason, ari, ari_length, 2367 private_data, private_data_len); 2368 2369 cm_reset_to_idle(cm_id_priv); 2370 break; 2371 case IB_CM_REP_SENT: 2372 case IB_CM_MRA_REP_RCVD: 2373 ret = cm_alloc_msg(cm_id_priv, &msg); 2374 if (!ret) 2375 cm_format_rej((struct cm_rej_msg *) msg->mad, 2376 cm_id_priv, reason, ari, ari_length, 2377 private_data, private_data_len); 2378 2379 cm_enter_timewait(cm_id_priv); 2380 break; 2381 default: 2382 ret = -EINVAL; 2383 goto out; 2384 } 2385 2386 if (ret) 2387 goto out; 2388 2389 ret = ib_post_send_mad(msg, NULL); 2390 if (ret) 2391 cm_free_msg(msg); 2392 2393 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2394 return ret; 2395 } 2396 EXPORT_SYMBOL(ib_send_cm_rej); 2397 2398 static void cm_format_rej_event(struct cm_work *work) 2399 { 2400 struct cm_rej_msg *rej_msg; 2401 struct ib_cm_rej_event_param *param; 2402 2403 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad; 2404 param = &work->cm_event.param.rej_rcvd; 2405 param->ari = rej_msg->ari; 2406 param->ari_length = cm_rej_get_reject_info_len(rej_msg); 2407 param->reason = __be16_to_cpu(rej_msg->reason); 2408 work->cm_event.private_data = &rej_msg->private_data; 2409 } 2410 2411 static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg) 2412 { 2413 struct cm_timewait_info *timewait_info; 2414 struct cm_id_private *cm_id_priv; 2415 __be32 remote_id; 2416 2417 remote_id = rej_msg->local_comm_id; 2418 2419 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) { 2420 spin_lock_irq(&cm.lock); 2421 timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari), 2422 remote_id); 2423 if (!timewait_info) { 2424 spin_unlock_irq(&cm.lock); 2425 return NULL; 2426 } 2427 cm_id_priv = idr_find(&cm.local_id_table, (__force int) 2428 (timewait_info->work.local_id ^ 2429 cm.random_id_operand)); 2430 if (cm_id_priv) { 2431 if (cm_id_priv->id.remote_id == remote_id) 2432 atomic_inc(&cm_id_priv->refcount); 2433 else 2434 cm_id_priv = NULL; 2435 } 2436 spin_unlock_irq(&cm.lock); 2437 } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ) 2438 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0); 2439 else 2440 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id); 2441 2442 return cm_id_priv; 2443 } 2444 2445 static int cm_rej_handler(struct cm_work *work) 2446 { 2447 struct cm_id_private *cm_id_priv; 2448 struct cm_rej_msg *rej_msg; 2449 int ret; 2450 2451 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad; 2452 cm_id_priv = cm_acquire_rejected_id(rej_msg); 2453 if (!cm_id_priv) 2454 return -EINVAL; 2455 2456 cm_format_rej_event(work); 2457 2458 spin_lock_irq(&cm_id_priv->lock); 2459 switch (cm_id_priv->id.state) { 2460 case IB_CM_REQ_SENT: 2461 case IB_CM_MRA_REQ_RCVD: 2462 case IB_CM_REP_SENT: 2463 case IB_CM_MRA_REP_RCVD: 2464 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2465 /* fall through */ 2466 case IB_CM_REQ_RCVD: 2467 case IB_CM_MRA_REQ_SENT: 2468 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN) 2469 cm_enter_timewait(cm_id_priv); 2470 else 2471 cm_reset_to_idle(cm_id_priv); 2472 break; 2473 case IB_CM_DREQ_SENT: 2474 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2475 /* fall through */ 2476 case IB_CM_REP_RCVD: 2477 case IB_CM_MRA_REP_SENT: 2478 cm_enter_timewait(cm_id_priv); 2479 break; 2480 case IB_CM_ESTABLISHED: 2481 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT || 2482 cm_id_priv->id.lap_state == IB_CM_LAP_SENT) { 2483 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT) 2484 ib_cancel_mad(cm_id_priv->av.port->mad_agent, 2485 cm_id_priv->msg); 2486 cm_enter_timewait(cm_id_priv); 2487 break; 2488 } 2489 /* fall through */ 2490 default: 2491 spin_unlock_irq(&cm_id_priv->lock); 2492 ret = -EINVAL; 2493 goto out; 2494 } 2495 2496 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2497 if (!ret) 2498 list_add_tail(&work->list, &cm_id_priv->work_list); 2499 spin_unlock_irq(&cm_id_priv->lock); 2500 2501 if (ret) 2502 cm_process_work(cm_id_priv, work); 2503 else 2504 cm_deref_id(cm_id_priv); 2505 return 0; 2506 out: 2507 cm_deref_id(cm_id_priv); 2508 return -EINVAL; 2509 } 2510 2511 int ib_send_cm_mra(struct ib_cm_id *cm_id, 2512 u8 service_timeout, 2513 const void *private_data, 2514 u8 private_data_len) 2515 { 2516 struct cm_id_private *cm_id_priv; 2517 struct ib_mad_send_buf *msg; 2518 enum ib_cm_state cm_state; 2519 enum ib_cm_lap_state lap_state; 2520 enum cm_msg_response msg_response; 2521 void *data; 2522 unsigned long flags; 2523 int ret; 2524 2525 if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE) 2526 return -EINVAL; 2527 2528 data = cm_copy_private_data(private_data, private_data_len); 2529 if (IS_ERR(data)) 2530 return PTR_ERR(data); 2531 2532 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2533 2534 spin_lock_irqsave(&cm_id_priv->lock, flags); 2535 switch(cm_id_priv->id.state) { 2536 case IB_CM_REQ_RCVD: 2537 cm_state = IB_CM_MRA_REQ_SENT; 2538 lap_state = cm_id->lap_state; 2539 msg_response = CM_MSG_RESPONSE_REQ; 2540 break; 2541 case IB_CM_REP_RCVD: 2542 cm_state = IB_CM_MRA_REP_SENT; 2543 lap_state = cm_id->lap_state; 2544 msg_response = CM_MSG_RESPONSE_REP; 2545 break; 2546 case IB_CM_ESTABLISHED: 2547 if (cm_id->lap_state == IB_CM_LAP_RCVD) { 2548 cm_state = cm_id->state; 2549 lap_state = IB_CM_MRA_LAP_SENT; 2550 msg_response = CM_MSG_RESPONSE_OTHER; 2551 break; 2552 } 2553 default: 2554 ret = -EINVAL; 2555 goto error1; 2556 } 2557 2558 if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) { 2559 ret = cm_alloc_msg(cm_id_priv, &msg); 2560 if (ret) 2561 goto error1; 2562 2563 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 2564 msg_response, service_timeout, 2565 private_data, private_data_len); 2566 ret = ib_post_send_mad(msg, NULL); 2567 if (ret) 2568 goto error2; 2569 } 2570 2571 cm_id->state = cm_state; 2572 cm_id->lap_state = lap_state; 2573 cm_id_priv->service_timeout = service_timeout; 2574 cm_set_private_data(cm_id_priv, data, private_data_len); 2575 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2576 return 0; 2577 2578 error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2579 kfree(data); 2580 return ret; 2581 2582 error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2583 kfree(data); 2584 cm_free_msg(msg); 2585 return ret; 2586 } 2587 EXPORT_SYMBOL(ib_send_cm_mra); 2588 2589 static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg) 2590 { 2591 switch (cm_mra_get_msg_mraed(mra_msg)) { 2592 case CM_MSG_RESPONSE_REQ: 2593 return cm_acquire_id(mra_msg->remote_comm_id, 0); 2594 case CM_MSG_RESPONSE_REP: 2595 case CM_MSG_RESPONSE_OTHER: 2596 return cm_acquire_id(mra_msg->remote_comm_id, 2597 mra_msg->local_comm_id); 2598 default: 2599 return NULL; 2600 } 2601 } 2602 2603 static int cm_mra_handler(struct cm_work *work) 2604 { 2605 struct cm_id_private *cm_id_priv; 2606 struct cm_mra_msg *mra_msg; 2607 int timeout, ret; 2608 2609 mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad; 2610 cm_id_priv = cm_acquire_mraed_id(mra_msg); 2611 if (!cm_id_priv) 2612 return -EINVAL; 2613 2614 work->cm_event.private_data = &mra_msg->private_data; 2615 work->cm_event.param.mra_rcvd.service_timeout = 2616 cm_mra_get_service_timeout(mra_msg); 2617 timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) + 2618 cm_convert_to_ms(cm_id_priv->av.timeout); 2619 2620 spin_lock_irq(&cm_id_priv->lock); 2621 switch (cm_id_priv->id.state) { 2622 case IB_CM_REQ_SENT: 2623 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ || 2624 ib_modify_mad(cm_id_priv->av.port->mad_agent, 2625 cm_id_priv->msg, timeout)) 2626 goto out; 2627 cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD; 2628 break; 2629 case IB_CM_REP_SENT: 2630 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP || 2631 ib_modify_mad(cm_id_priv->av.port->mad_agent, 2632 cm_id_priv->msg, timeout)) 2633 goto out; 2634 cm_id_priv->id.state = IB_CM_MRA_REP_RCVD; 2635 break; 2636 case IB_CM_ESTABLISHED: 2637 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER || 2638 cm_id_priv->id.lap_state != IB_CM_LAP_SENT || 2639 ib_modify_mad(cm_id_priv->av.port->mad_agent, 2640 cm_id_priv->msg, timeout)) { 2641 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD) 2642 atomic_long_inc(&work->port-> 2643 counter_group[CM_RECV_DUPLICATES]. 2644 counter[CM_MRA_COUNTER]); 2645 goto out; 2646 } 2647 cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD; 2648 break; 2649 case IB_CM_MRA_REQ_RCVD: 2650 case IB_CM_MRA_REP_RCVD: 2651 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. 2652 counter[CM_MRA_COUNTER]); 2653 /* fall through */ 2654 default: 2655 goto out; 2656 } 2657 2658 cm_id_priv->msg->context[1] = (void *) (unsigned long) 2659 cm_id_priv->id.state; 2660 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2661 if (!ret) 2662 list_add_tail(&work->list, &cm_id_priv->work_list); 2663 spin_unlock_irq(&cm_id_priv->lock); 2664 2665 if (ret) 2666 cm_process_work(cm_id_priv, work); 2667 else 2668 cm_deref_id(cm_id_priv); 2669 return 0; 2670 out: 2671 spin_unlock_irq(&cm_id_priv->lock); 2672 cm_deref_id(cm_id_priv); 2673 return -EINVAL; 2674 } 2675 2676 static void cm_format_lap(struct cm_lap_msg *lap_msg, 2677 struct cm_id_private *cm_id_priv, 2678 struct ib_sa_path_rec *alternate_path, 2679 const void *private_data, 2680 u8 private_data_len) 2681 { 2682 cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID, 2683 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP)); 2684 lap_msg->local_comm_id = cm_id_priv->id.local_id; 2685 lap_msg->remote_comm_id = cm_id_priv->id.remote_id; 2686 cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn); 2687 /* todo: need remote CM response timeout */ 2688 cm_lap_set_remote_resp_timeout(lap_msg, 0x1F); 2689 lap_msg->alt_local_lid = alternate_path->slid; 2690 lap_msg->alt_remote_lid = alternate_path->dlid; 2691 lap_msg->alt_local_gid = alternate_path->sgid; 2692 lap_msg->alt_remote_gid = alternate_path->dgid; 2693 cm_lap_set_flow_label(lap_msg, alternate_path->flow_label); 2694 cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class); 2695 lap_msg->alt_hop_limit = alternate_path->hop_limit; 2696 cm_lap_set_packet_rate(lap_msg, alternate_path->rate); 2697 cm_lap_set_sl(lap_msg, alternate_path->sl); 2698 cm_lap_set_subnet_local(lap_msg, 1); /* local only... */ 2699 cm_lap_set_local_ack_timeout(lap_msg, 2700 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay, 2701 alternate_path->packet_life_time)); 2702 2703 if (private_data && private_data_len) 2704 memcpy(lap_msg->private_data, private_data, private_data_len); 2705 } 2706 2707 int ib_send_cm_lap(struct ib_cm_id *cm_id, 2708 struct ib_sa_path_rec *alternate_path, 2709 const void *private_data, 2710 u8 private_data_len) 2711 { 2712 struct cm_id_private *cm_id_priv; 2713 struct ib_mad_send_buf *msg; 2714 unsigned long flags; 2715 int ret; 2716 2717 if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE) 2718 return -EINVAL; 2719 2720 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2721 spin_lock_irqsave(&cm_id_priv->lock, flags); 2722 if (cm_id->state != IB_CM_ESTABLISHED || 2723 (cm_id->lap_state != IB_CM_LAP_UNINIT && 2724 cm_id->lap_state != IB_CM_LAP_IDLE)) { 2725 ret = -EINVAL; 2726 goto out; 2727 } 2728 2729 ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av); 2730 if (ret) 2731 goto out; 2732 cm_id_priv->alt_av.timeout = 2733 cm_ack_timeout(cm_id_priv->target_ack_delay, 2734 cm_id_priv->alt_av.timeout - 1); 2735 2736 ret = cm_alloc_msg(cm_id_priv, &msg); 2737 if (ret) 2738 goto out; 2739 2740 cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv, 2741 alternate_path, private_data, private_data_len); 2742 msg->timeout_ms = cm_id_priv->timeout_ms; 2743 msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED; 2744 2745 ret = ib_post_send_mad(msg, NULL); 2746 if (ret) { 2747 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2748 cm_free_msg(msg); 2749 return ret; 2750 } 2751 2752 cm_id->lap_state = IB_CM_LAP_SENT; 2753 cm_id_priv->msg = msg; 2754 2755 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2756 return ret; 2757 } 2758 EXPORT_SYMBOL(ib_send_cm_lap); 2759 2760 static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv, 2761 struct ib_sa_path_rec *path, 2762 struct cm_lap_msg *lap_msg) 2763 { 2764 memset(path, 0, sizeof *path); 2765 path->dgid = lap_msg->alt_local_gid; 2766 path->sgid = lap_msg->alt_remote_gid; 2767 path->dlid = lap_msg->alt_local_lid; 2768 path->slid = lap_msg->alt_remote_lid; 2769 path->flow_label = cm_lap_get_flow_label(lap_msg); 2770 path->hop_limit = lap_msg->alt_hop_limit; 2771 path->traffic_class = cm_lap_get_traffic_class(lap_msg); 2772 path->reversible = 1; 2773 path->pkey = cm_id_priv->pkey; 2774 path->sl = cm_lap_get_sl(lap_msg); 2775 path->mtu_selector = IB_SA_EQ; 2776 path->mtu = cm_id_priv->path_mtu; 2777 path->rate_selector = IB_SA_EQ; 2778 path->rate = cm_lap_get_packet_rate(lap_msg); 2779 path->packet_life_time_selector = IB_SA_EQ; 2780 path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg); 2781 path->packet_life_time -= (path->packet_life_time > 0); 2782 } 2783 2784 static int cm_lap_handler(struct cm_work *work) 2785 { 2786 struct cm_id_private *cm_id_priv; 2787 struct cm_lap_msg *lap_msg; 2788 struct ib_cm_lap_event_param *param; 2789 struct ib_mad_send_buf *msg = NULL; 2790 int ret; 2791 2792 /* todo: verify LAP request and send reject APR if invalid. */ 2793 lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad; 2794 cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id, 2795 lap_msg->local_comm_id); 2796 if (!cm_id_priv) 2797 return -EINVAL; 2798 2799 param = &work->cm_event.param.lap_rcvd; 2800 param->alternate_path = &work->path[0]; 2801 cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg); 2802 work->cm_event.private_data = &lap_msg->private_data; 2803 2804 spin_lock_irq(&cm_id_priv->lock); 2805 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) 2806 goto unlock; 2807 2808 switch (cm_id_priv->id.lap_state) { 2809 case IB_CM_LAP_UNINIT: 2810 case IB_CM_LAP_IDLE: 2811 break; 2812 case IB_CM_MRA_LAP_SENT: 2813 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. 2814 counter[CM_LAP_COUNTER]); 2815 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) 2816 goto unlock; 2817 2818 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 2819 CM_MSG_RESPONSE_OTHER, 2820 cm_id_priv->service_timeout, 2821 cm_id_priv->private_data, 2822 cm_id_priv->private_data_len); 2823 spin_unlock_irq(&cm_id_priv->lock); 2824 2825 if (ib_post_send_mad(msg, NULL)) 2826 cm_free_msg(msg); 2827 goto deref; 2828 case IB_CM_LAP_RCVD: 2829 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. 2830 counter[CM_LAP_COUNTER]); 2831 goto unlock; 2832 default: 2833 goto unlock; 2834 } 2835 2836 cm_id_priv->id.lap_state = IB_CM_LAP_RCVD; 2837 cm_id_priv->tid = lap_msg->hdr.tid; 2838 cm_init_av_for_response(work->port, work->mad_recv_wc->wc, 2839 work->mad_recv_wc->recv_buf.grh, 2840 &cm_id_priv->av); 2841 cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av); 2842 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2843 if (!ret) 2844 list_add_tail(&work->list, &cm_id_priv->work_list); 2845 spin_unlock_irq(&cm_id_priv->lock); 2846 2847 if (ret) 2848 cm_process_work(cm_id_priv, work); 2849 else 2850 cm_deref_id(cm_id_priv); 2851 return 0; 2852 2853 unlock: spin_unlock_irq(&cm_id_priv->lock); 2854 deref: cm_deref_id(cm_id_priv); 2855 return -EINVAL; 2856 } 2857 2858 static void cm_format_apr(struct cm_apr_msg *apr_msg, 2859 struct cm_id_private *cm_id_priv, 2860 enum ib_cm_apr_status status, 2861 void *info, 2862 u8 info_length, 2863 const void *private_data, 2864 u8 private_data_len) 2865 { 2866 cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid); 2867 apr_msg->local_comm_id = cm_id_priv->id.local_id; 2868 apr_msg->remote_comm_id = cm_id_priv->id.remote_id; 2869 apr_msg->ap_status = (u8) status; 2870 2871 if (info && info_length) { 2872 apr_msg->info_length = info_length; 2873 memcpy(apr_msg->info, info, info_length); 2874 } 2875 2876 if (private_data && private_data_len) 2877 memcpy(apr_msg->private_data, private_data, private_data_len); 2878 } 2879 2880 int ib_send_cm_apr(struct ib_cm_id *cm_id, 2881 enum ib_cm_apr_status status, 2882 void *info, 2883 u8 info_length, 2884 const void *private_data, 2885 u8 private_data_len) 2886 { 2887 struct cm_id_private *cm_id_priv; 2888 struct ib_mad_send_buf *msg; 2889 unsigned long flags; 2890 int ret; 2891 2892 if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) || 2893 (info && info_length > IB_CM_APR_INFO_LENGTH)) 2894 return -EINVAL; 2895 2896 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2897 spin_lock_irqsave(&cm_id_priv->lock, flags); 2898 if (cm_id->state != IB_CM_ESTABLISHED || 2899 (cm_id->lap_state != IB_CM_LAP_RCVD && 2900 cm_id->lap_state != IB_CM_MRA_LAP_SENT)) { 2901 ret = -EINVAL; 2902 goto out; 2903 } 2904 2905 ret = cm_alloc_msg(cm_id_priv, &msg); 2906 if (ret) 2907 goto out; 2908 2909 cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status, 2910 info, info_length, private_data, private_data_len); 2911 ret = ib_post_send_mad(msg, NULL); 2912 if (ret) { 2913 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2914 cm_free_msg(msg); 2915 return ret; 2916 } 2917 2918 cm_id->lap_state = IB_CM_LAP_IDLE; 2919 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2920 return ret; 2921 } 2922 EXPORT_SYMBOL(ib_send_cm_apr); 2923 2924 static int cm_apr_handler(struct cm_work *work) 2925 { 2926 struct cm_id_private *cm_id_priv; 2927 struct cm_apr_msg *apr_msg; 2928 int ret; 2929 2930 apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad; 2931 cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id, 2932 apr_msg->local_comm_id); 2933 if (!cm_id_priv) 2934 return -EINVAL; /* Unmatched reply. */ 2935 2936 work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status; 2937 work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info; 2938 work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length; 2939 work->cm_event.private_data = &apr_msg->private_data; 2940 2941 spin_lock_irq(&cm_id_priv->lock); 2942 if (cm_id_priv->id.state != IB_CM_ESTABLISHED || 2943 (cm_id_priv->id.lap_state != IB_CM_LAP_SENT && 2944 cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) { 2945 spin_unlock_irq(&cm_id_priv->lock); 2946 goto out; 2947 } 2948 cm_id_priv->id.lap_state = IB_CM_LAP_IDLE; 2949 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2950 cm_id_priv->msg = NULL; 2951 2952 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2953 if (!ret) 2954 list_add_tail(&work->list, &cm_id_priv->work_list); 2955 spin_unlock_irq(&cm_id_priv->lock); 2956 2957 if (ret) 2958 cm_process_work(cm_id_priv, work); 2959 else 2960 cm_deref_id(cm_id_priv); 2961 return 0; 2962 out: 2963 cm_deref_id(cm_id_priv); 2964 return -EINVAL; 2965 } 2966 2967 static int cm_timewait_handler(struct cm_work *work) 2968 { 2969 struct cm_timewait_info *timewait_info; 2970 struct cm_id_private *cm_id_priv; 2971 int ret; 2972 2973 timewait_info = (struct cm_timewait_info *)work; 2974 spin_lock_irq(&cm.lock); 2975 list_del(&timewait_info->list); 2976 spin_unlock_irq(&cm.lock); 2977 2978 cm_id_priv = cm_acquire_id(timewait_info->work.local_id, 2979 timewait_info->work.remote_id); 2980 if (!cm_id_priv) 2981 return -EINVAL; 2982 2983 spin_lock_irq(&cm_id_priv->lock); 2984 if (cm_id_priv->id.state != IB_CM_TIMEWAIT || 2985 cm_id_priv->remote_qpn != timewait_info->remote_qpn) { 2986 spin_unlock_irq(&cm_id_priv->lock); 2987 goto out; 2988 } 2989 cm_id_priv->id.state = IB_CM_IDLE; 2990 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2991 if (!ret) 2992 list_add_tail(&work->list, &cm_id_priv->work_list); 2993 spin_unlock_irq(&cm_id_priv->lock); 2994 2995 if (ret) 2996 cm_process_work(cm_id_priv, work); 2997 else 2998 cm_deref_id(cm_id_priv); 2999 return 0; 3000 out: 3001 cm_deref_id(cm_id_priv); 3002 return -EINVAL; 3003 } 3004 3005 static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg, 3006 struct cm_id_private *cm_id_priv, 3007 struct ib_cm_sidr_req_param *param) 3008 { 3009 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID, 3010 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR)); 3011 sidr_req_msg->request_id = cm_id_priv->id.local_id; 3012 sidr_req_msg->pkey = param->path->pkey; 3013 sidr_req_msg->service_id = param->service_id; 3014 3015 if (param->private_data && param->private_data_len) 3016 memcpy(sidr_req_msg->private_data, param->private_data, 3017 param->private_data_len); 3018 } 3019 3020 int ib_send_cm_sidr_req(struct ib_cm_id *cm_id, 3021 struct ib_cm_sidr_req_param *param) 3022 { 3023 struct cm_id_private *cm_id_priv; 3024 struct ib_mad_send_buf *msg; 3025 unsigned long flags; 3026 int ret; 3027 3028 if (!param->path || (param->private_data && 3029 param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE)) 3030 return -EINVAL; 3031 3032 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 3033 ret = cm_init_av_by_path(param->path, &cm_id_priv->av); 3034 if (ret) 3035 goto out; 3036 3037 cm_id->service_id = param->service_id; 3038 cm_id->service_mask = ~cpu_to_be64(0); 3039 cm_id_priv->timeout_ms = param->timeout_ms; 3040 cm_id_priv->max_cm_retries = param->max_cm_retries; 3041 ret = cm_alloc_msg(cm_id_priv, &msg); 3042 if (ret) 3043 goto out; 3044 3045 cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv, 3046 param); 3047 msg->timeout_ms = cm_id_priv->timeout_ms; 3048 msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT; 3049 3050 spin_lock_irqsave(&cm_id_priv->lock, flags); 3051 if (cm_id->state == IB_CM_IDLE) 3052 ret = ib_post_send_mad(msg, NULL); 3053 else 3054 ret = -EINVAL; 3055 3056 if (ret) { 3057 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3058 cm_free_msg(msg); 3059 goto out; 3060 } 3061 cm_id->state = IB_CM_SIDR_REQ_SENT; 3062 cm_id_priv->msg = msg; 3063 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3064 out: 3065 return ret; 3066 } 3067 EXPORT_SYMBOL(ib_send_cm_sidr_req); 3068 3069 static void cm_format_sidr_req_event(struct cm_work *work, 3070 struct ib_cm_id *listen_id) 3071 { 3072 struct cm_sidr_req_msg *sidr_req_msg; 3073 struct ib_cm_sidr_req_event_param *param; 3074 3075 sidr_req_msg = (struct cm_sidr_req_msg *) 3076 work->mad_recv_wc->recv_buf.mad; 3077 param = &work->cm_event.param.sidr_req_rcvd; 3078 param->pkey = __be16_to_cpu(sidr_req_msg->pkey); 3079 param->listen_id = listen_id; 3080 param->service_id = sidr_req_msg->service_id; 3081 param->bth_pkey = cm_get_bth_pkey(work); 3082 param->port = work->port->port_num; 3083 work->cm_event.private_data = &sidr_req_msg->private_data; 3084 } 3085 3086 static int cm_sidr_req_handler(struct cm_work *work) 3087 { 3088 struct ib_cm_id *cm_id; 3089 struct cm_id_private *cm_id_priv, *cur_cm_id_priv; 3090 struct cm_sidr_req_msg *sidr_req_msg; 3091 struct ib_wc *wc; 3092 3093 cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL); 3094 if (IS_ERR(cm_id)) 3095 return PTR_ERR(cm_id); 3096 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 3097 3098 /* Record SGID/SLID and request ID for lookup. */ 3099 sidr_req_msg = (struct cm_sidr_req_msg *) 3100 work->mad_recv_wc->recv_buf.mad; 3101 wc = work->mad_recv_wc->wc; 3102 cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid); 3103 cm_id_priv->av.dgid.global.interface_id = 0; 3104 cm_init_av_for_response(work->port, work->mad_recv_wc->wc, 3105 work->mad_recv_wc->recv_buf.grh, 3106 &cm_id_priv->av); 3107 cm_id_priv->id.remote_id = sidr_req_msg->request_id; 3108 cm_id_priv->tid = sidr_req_msg->hdr.tid; 3109 atomic_inc(&cm_id_priv->work_count); 3110 3111 spin_lock_irq(&cm.lock); 3112 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv); 3113 if (cur_cm_id_priv) { 3114 spin_unlock_irq(&cm.lock); 3115 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. 3116 counter[CM_SIDR_REQ_COUNTER]); 3117 goto out; /* Duplicate message. */ 3118 } 3119 cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD; 3120 cur_cm_id_priv = cm_find_listen(cm_id->device, 3121 sidr_req_msg->service_id); 3122 if (!cur_cm_id_priv) { 3123 spin_unlock_irq(&cm.lock); 3124 cm_reject_sidr_req(cm_id_priv, IB_SIDR_UNSUPPORTED); 3125 goto out; /* No match. */ 3126 } 3127 atomic_inc(&cur_cm_id_priv->refcount); 3128 atomic_inc(&cm_id_priv->refcount); 3129 spin_unlock_irq(&cm.lock); 3130 3131 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler; 3132 cm_id_priv->id.context = cur_cm_id_priv->id.context; 3133 cm_id_priv->id.service_id = sidr_req_msg->service_id; 3134 cm_id_priv->id.service_mask = ~cpu_to_be64(0); 3135 3136 cm_format_sidr_req_event(work, &cur_cm_id_priv->id); 3137 cm_process_work(cm_id_priv, work); 3138 cm_deref_id(cur_cm_id_priv); 3139 return 0; 3140 out: 3141 ib_destroy_cm_id(&cm_id_priv->id); 3142 return -EINVAL; 3143 } 3144 3145 static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg, 3146 struct cm_id_private *cm_id_priv, 3147 struct ib_cm_sidr_rep_param *param) 3148 { 3149 cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID, 3150 cm_id_priv->tid); 3151 sidr_rep_msg->request_id = cm_id_priv->id.remote_id; 3152 sidr_rep_msg->status = param->status; 3153 cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num)); 3154 sidr_rep_msg->service_id = cm_id_priv->id.service_id; 3155 sidr_rep_msg->qkey = cpu_to_be32(param->qkey); 3156 3157 if (param->info && param->info_length) 3158 memcpy(sidr_rep_msg->info, param->info, param->info_length); 3159 3160 if (param->private_data && param->private_data_len) 3161 memcpy(sidr_rep_msg->private_data, param->private_data, 3162 param->private_data_len); 3163 } 3164 3165 int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id, 3166 struct ib_cm_sidr_rep_param *param) 3167 { 3168 struct cm_id_private *cm_id_priv; 3169 struct ib_mad_send_buf *msg; 3170 unsigned long flags; 3171 int ret; 3172 3173 if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) || 3174 (param->private_data && 3175 param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE)) 3176 return -EINVAL; 3177 3178 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 3179 spin_lock_irqsave(&cm_id_priv->lock, flags); 3180 if (cm_id->state != IB_CM_SIDR_REQ_RCVD) { 3181 ret = -EINVAL; 3182 goto error; 3183 } 3184 3185 ret = cm_alloc_msg(cm_id_priv, &msg); 3186 if (ret) 3187 goto error; 3188 3189 cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv, 3190 param); 3191 ret = ib_post_send_mad(msg, NULL); 3192 if (ret) { 3193 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3194 cm_free_msg(msg); 3195 return ret; 3196 } 3197 cm_id->state = IB_CM_IDLE; 3198 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3199 3200 spin_lock_irqsave(&cm.lock, flags); 3201 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) { 3202 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); 3203 RB_CLEAR_NODE(&cm_id_priv->sidr_id_node); 3204 } 3205 spin_unlock_irqrestore(&cm.lock, flags); 3206 return 0; 3207 3208 error: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3209 return ret; 3210 } 3211 EXPORT_SYMBOL(ib_send_cm_sidr_rep); 3212 3213 static void cm_format_sidr_rep_event(struct cm_work *work) 3214 { 3215 struct cm_sidr_rep_msg *sidr_rep_msg; 3216 struct ib_cm_sidr_rep_event_param *param; 3217 3218 sidr_rep_msg = (struct cm_sidr_rep_msg *) 3219 work->mad_recv_wc->recv_buf.mad; 3220 param = &work->cm_event.param.sidr_rep_rcvd; 3221 param->status = sidr_rep_msg->status; 3222 param->qkey = be32_to_cpu(sidr_rep_msg->qkey); 3223 param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg)); 3224 param->info = &sidr_rep_msg->info; 3225 param->info_len = sidr_rep_msg->info_length; 3226 work->cm_event.private_data = &sidr_rep_msg->private_data; 3227 } 3228 3229 static int cm_sidr_rep_handler(struct cm_work *work) 3230 { 3231 struct cm_sidr_rep_msg *sidr_rep_msg; 3232 struct cm_id_private *cm_id_priv; 3233 3234 sidr_rep_msg = (struct cm_sidr_rep_msg *) 3235 work->mad_recv_wc->recv_buf.mad; 3236 cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0); 3237 if (!cm_id_priv) 3238 return -EINVAL; /* Unmatched reply. */ 3239 3240 spin_lock_irq(&cm_id_priv->lock); 3241 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) { 3242 spin_unlock_irq(&cm_id_priv->lock); 3243 goto out; 3244 } 3245 cm_id_priv->id.state = IB_CM_IDLE; 3246 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 3247 spin_unlock_irq(&cm_id_priv->lock); 3248 3249 cm_format_sidr_rep_event(work); 3250 cm_process_work(cm_id_priv, work); 3251 return 0; 3252 out: 3253 cm_deref_id(cm_id_priv); 3254 return -EINVAL; 3255 } 3256 3257 static void cm_process_send_error(struct ib_mad_send_buf *msg, 3258 enum ib_wc_status wc_status) 3259 { 3260 struct cm_id_private *cm_id_priv; 3261 struct ib_cm_event cm_event; 3262 enum ib_cm_state state; 3263 int ret; 3264 3265 memset(&cm_event, 0, sizeof cm_event); 3266 cm_id_priv = msg->context[0]; 3267 3268 /* Discard old sends or ones without a response. */ 3269 spin_lock_irq(&cm_id_priv->lock); 3270 state = (enum ib_cm_state) (unsigned long) msg->context[1]; 3271 if (msg != cm_id_priv->msg || state != cm_id_priv->id.state) 3272 goto discard; 3273 3274 switch (state) { 3275 case IB_CM_REQ_SENT: 3276 case IB_CM_MRA_REQ_RCVD: 3277 cm_reset_to_idle(cm_id_priv); 3278 cm_event.event = IB_CM_REQ_ERROR; 3279 break; 3280 case IB_CM_REP_SENT: 3281 case IB_CM_MRA_REP_RCVD: 3282 cm_reset_to_idle(cm_id_priv); 3283 cm_event.event = IB_CM_REP_ERROR; 3284 break; 3285 case IB_CM_DREQ_SENT: 3286 cm_enter_timewait(cm_id_priv); 3287 cm_event.event = IB_CM_DREQ_ERROR; 3288 break; 3289 case IB_CM_SIDR_REQ_SENT: 3290 cm_id_priv->id.state = IB_CM_IDLE; 3291 cm_event.event = IB_CM_SIDR_REQ_ERROR; 3292 break; 3293 default: 3294 goto discard; 3295 } 3296 spin_unlock_irq(&cm_id_priv->lock); 3297 cm_event.param.send_status = wc_status; 3298 3299 /* No other events can occur on the cm_id at this point. */ 3300 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event); 3301 cm_free_msg(msg); 3302 if (ret) 3303 ib_destroy_cm_id(&cm_id_priv->id); 3304 return; 3305 discard: 3306 spin_unlock_irq(&cm_id_priv->lock); 3307 cm_free_msg(msg); 3308 } 3309 3310 static void cm_send_handler(struct ib_mad_agent *mad_agent, 3311 struct ib_mad_send_wc *mad_send_wc) 3312 { 3313 struct ib_mad_send_buf *msg = mad_send_wc->send_buf; 3314 struct cm_port *port; 3315 u16 attr_index; 3316 3317 port = mad_agent->context; 3318 attr_index = be16_to_cpu(((struct ib_mad_hdr *) 3319 msg->mad)->attr_id) - CM_ATTR_ID_OFFSET; 3320 3321 /* 3322 * If the send was in response to a received message (context[0] is not 3323 * set to a cm_id), and is not a REJ, then it is a send that was 3324 * manually retried. 3325 */ 3326 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER)) 3327 msg->retries = 1; 3328 3329 atomic_long_add(1 + msg->retries, 3330 &port->counter_group[CM_XMIT].counter[attr_index]); 3331 if (msg->retries) 3332 atomic_long_add(msg->retries, 3333 &port->counter_group[CM_XMIT_RETRIES]. 3334 counter[attr_index]); 3335 3336 switch (mad_send_wc->status) { 3337 case IB_WC_SUCCESS: 3338 case IB_WC_WR_FLUSH_ERR: 3339 cm_free_msg(msg); 3340 break; 3341 default: 3342 if (msg->context[0] && msg->context[1]) 3343 cm_process_send_error(msg, mad_send_wc->status); 3344 else 3345 cm_free_msg(msg); 3346 break; 3347 } 3348 } 3349 3350 static void cm_work_handler(struct work_struct *_work) 3351 { 3352 struct cm_work *work = container_of(_work, struct cm_work, work.work); 3353 int ret; 3354 3355 switch (work->cm_event.event) { 3356 case IB_CM_REQ_RECEIVED: 3357 ret = cm_req_handler(work); 3358 break; 3359 case IB_CM_MRA_RECEIVED: 3360 ret = cm_mra_handler(work); 3361 break; 3362 case IB_CM_REJ_RECEIVED: 3363 ret = cm_rej_handler(work); 3364 break; 3365 case IB_CM_REP_RECEIVED: 3366 ret = cm_rep_handler(work); 3367 break; 3368 case IB_CM_RTU_RECEIVED: 3369 ret = cm_rtu_handler(work); 3370 break; 3371 case IB_CM_USER_ESTABLISHED: 3372 ret = cm_establish_handler(work); 3373 break; 3374 case IB_CM_DREQ_RECEIVED: 3375 ret = cm_dreq_handler(work); 3376 break; 3377 case IB_CM_DREP_RECEIVED: 3378 ret = cm_drep_handler(work); 3379 break; 3380 case IB_CM_SIDR_REQ_RECEIVED: 3381 ret = cm_sidr_req_handler(work); 3382 break; 3383 case IB_CM_SIDR_REP_RECEIVED: 3384 ret = cm_sidr_rep_handler(work); 3385 break; 3386 case IB_CM_LAP_RECEIVED: 3387 ret = cm_lap_handler(work); 3388 break; 3389 case IB_CM_APR_RECEIVED: 3390 ret = cm_apr_handler(work); 3391 break; 3392 case IB_CM_TIMEWAIT_EXIT: 3393 ret = cm_timewait_handler(work); 3394 break; 3395 default: 3396 ret = -EINVAL; 3397 break; 3398 } 3399 if (ret) 3400 cm_free_work(work); 3401 } 3402 3403 static int cm_establish(struct ib_cm_id *cm_id) 3404 { 3405 struct cm_id_private *cm_id_priv; 3406 struct cm_work *work; 3407 unsigned long flags; 3408 int ret = 0; 3409 struct cm_device *cm_dev; 3410 3411 cm_dev = ib_get_client_data(cm_id->device, &cm_client); 3412 if (!cm_dev) 3413 return -ENODEV; 3414 3415 work = kmalloc(sizeof *work, GFP_ATOMIC); 3416 if (!work) 3417 return -ENOMEM; 3418 3419 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 3420 spin_lock_irqsave(&cm_id_priv->lock, flags); 3421 switch (cm_id->state) 3422 { 3423 case IB_CM_REP_SENT: 3424 case IB_CM_MRA_REP_RCVD: 3425 cm_id->state = IB_CM_ESTABLISHED; 3426 break; 3427 case IB_CM_ESTABLISHED: 3428 ret = -EISCONN; 3429 break; 3430 default: 3431 ret = -EINVAL; 3432 break; 3433 } 3434 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3435 3436 if (ret) { 3437 kfree(work); 3438 goto out; 3439 } 3440 3441 /* 3442 * The CM worker thread may try to destroy the cm_id before it 3443 * can execute this work item. To prevent potential deadlock, 3444 * we need to find the cm_id once we're in the context of the 3445 * worker thread, rather than holding a reference on it. 3446 */ 3447 INIT_DELAYED_WORK(&work->work, cm_work_handler); 3448 work->local_id = cm_id->local_id; 3449 work->remote_id = cm_id->remote_id; 3450 work->mad_recv_wc = NULL; 3451 work->cm_event.event = IB_CM_USER_ESTABLISHED; 3452 3453 /* Check if the device started its remove_one */ 3454 spin_lock_irq(&cm.lock); 3455 if (!cm_dev->going_down) { 3456 queue_delayed_work(cm.wq, &work->work, 0); 3457 } else { 3458 kfree(work); 3459 ret = -ENODEV; 3460 } 3461 spin_unlock_irq(&cm.lock); 3462 3463 out: 3464 return ret; 3465 } 3466 3467 static int cm_migrate(struct ib_cm_id *cm_id) 3468 { 3469 struct cm_id_private *cm_id_priv; 3470 unsigned long flags; 3471 int ret = 0; 3472 3473 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 3474 spin_lock_irqsave(&cm_id_priv->lock, flags); 3475 if (cm_id->state == IB_CM_ESTABLISHED && 3476 (cm_id->lap_state == IB_CM_LAP_UNINIT || 3477 cm_id->lap_state == IB_CM_LAP_IDLE)) { 3478 cm_id->lap_state = IB_CM_LAP_IDLE; 3479 cm_id_priv->av = cm_id_priv->alt_av; 3480 } else 3481 ret = -EINVAL; 3482 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3483 3484 return ret; 3485 } 3486 3487 int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event) 3488 { 3489 int ret; 3490 3491 switch (event) { 3492 case IB_EVENT_COMM_EST: 3493 ret = cm_establish(cm_id); 3494 break; 3495 case IB_EVENT_PATH_MIG: 3496 ret = cm_migrate(cm_id); 3497 break; 3498 default: 3499 ret = -EINVAL; 3500 } 3501 return ret; 3502 } 3503 EXPORT_SYMBOL(ib_cm_notify); 3504 3505 static void cm_recv_handler(struct ib_mad_agent *mad_agent, 3506 struct ib_mad_recv_wc *mad_recv_wc) 3507 { 3508 struct cm_port *port = mad_agent->context; 3509 struct cm_work *work; 3510 enum ib_cm_event_type event; 3511 u16 attr_id; 3512 int paths = 0; 3513 int going_down = 0; 3514 3515 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) { 3516 case CM_REQ_ATTR_ID: 3517 paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)-> 3518 alt_local_lid != 0); 3519 event = IB_CM_REQ_RECEIVED; 3520 break; 3521 case CM_MRA_ATTR_ID: 3522 event = IB_CM_MRA_RECEIVED; 3523 break; 3524 case CM_REJ_ATTR_ID: 3525 event = IB_CM_REJ_RECEIVED; 3526 break; 3527 case CM_REP_ATTR_ID: 3528 event = IB_CM_REP_RECEIVED; 3529 break; 3530 case CM_RTU_ATTR_ID: 3531 event = IB_CM_RTU_RECEIVED; 3532 break; 3533 case CM_DREQ_ATTR_ID: 3534 event = IB_CM_DREQ_RECEIVED; 3535 break; 3536 case CM_DREP_ATTR_ID: 3537 event = IB_CM_DREP_RECEIVED; 3538 break; 3539 case CM_SIDR_REQ_ATTR_ID: 3540 event = IB_CM_SIDR_REQ_RECEIVED; 3541 break; 3542 case CM_SIDR_REP_ATTR_ID: 3543 event = IB_CM_SIDR_REP_RECEIVED; 3544 break; 3545 case CM_LAP_ATTR_ID: 3546 paths = 1; 3547 event = IB_CM_LAP_RECEIVED; 3548 break; 3549 case CM_APR_ATTR_ID: 3550 event = IB_CM_APR_RECEIVED; 3551 break; 3552 default: 3553 ib_free_recv_mad(mad_recv_wc); 3554 return; 3555 } 3556 3557 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id); 3558 atomic_long_inc(&port->counter_group[CM_RECV]. 3559 counter[attr_id - CM_ATTR_ID_OFFSET]); 3560 3561 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths, 3562 GFP_KERNEL); 3563 if (!work) { 3564 ib_free_recv_mad(mad_recv_wc); 3565 return; 3566 } 3567 3568 INIT_DELAYED_WORK(&work->work, cm_work_handler); 3569 work->cm_event.event = event; 3570 work->mad_recv_wc = mad_recv_wc; 3571 work->port = port; 3572 3573 /* Check if the device started its remove_one */ 3574 spin_lock_irq(&cm.lock); 3575 if (!port->cm_dev->going_down) 3576 queue_delayed_work(cm.wq, &work->work, 0); 3577 else 3578 going_down = 1; 3579 spin_unlock_irq(&cm.lock); 3580 3581 if (going_down) { 3582 kfree(work); 3583 ib_free_recv_mad(mad_recv_wc); 3584 } 3585 } 3586 3587 static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv, 3588 struct ib_qp_attr *qp_attr, 3589 int *qp_attr_mask) 3590 { 3591 unsigned long flags; 3592 int ret; 3593 3594 spin_lock_irqsave(&cm_id_priv->lock, flags); 3595 switch (cm_id_priv->id.state) { 3596 case IB_CM_REQ_SENT: 3597 case IB_CM_MRA_REQ_RCVD: 3598 case IB_CM_REQ_RCVD: 3599 case IB_CM_MRA_REQ_SENT: 3600 case IB_CM_REP_RCVD: 3601 case IB_CM_MRA_REP_SENT: 3602 case IB_CM_REP_SENT: 3603 case IB_CM_MRA_REP_RCVD: 3604 case IB_CM_ESTABLISHED: 3605 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | 3606 IB_QP_PKEY_INDEX | IB_QP_PORT; 3607 qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE; 3608 if (cm_id_priv->responder_resources) 3609 qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ | 3610 IB_ACCESS_REMOTE_ATOMIC; 3611 qp_attr->pkey_index = cm_id_priv->av.pkey_index; 3612 qp_attr->port_num = cm_id_priv->av.port->port_num; 3613 ret = 0; 3614 break; 3615 default: 3616 ret = -EINVAL; 3617 break; 3618 } 3619 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3620 return ret; 3621 } 3622 3623 static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv, 3624 struct ib_qp_attr *qp_attr, 3625 int *qp_attr_mask) 3626 { 3627 unsigned long flags; 3628 int ret; 3629 3630 spin_lock_irqsave(&cm_id_priv->lock, flags); 3631 switch (cm_id_priv->id.state) { 3632 case IB_CM_REQ_RCVD: 3633 case IB_CM_MRA_REQ_SENT: 3634 case IB_CM_REP_RCVD: 3635 case IB_CM_MRA_REP_SENT: 3636 case IB_CM_REP_SENT: 3637 case IB_CM_MRA_REP_RCVD: 3638 case IB_CM_ESTABLISHED: 3639 *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | 3640 IB_QP_DEST_QPN | IB_QP_RQ_PSN; 3641 qp_attr->ah_attr = cm_id_priv->av.ah_attr; 3642 qp_attr->path_mtu = cm_id_priv->path_mtu; 3643 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn); 3644 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn); 3645 if (cm_id_priv->qp_type == IB_QPT_RC || 3646 cm_id_priv->qp_type == IB_QPT_XRC_TGT) { 3647 *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC | 3648 IB_QP_MIN_RNR_TIMER; 3649 qp_attr->max_dest_rd_atomic = 3650 cm_id_priv->responder_resources; 3651 qp_attr->min_rnr_timer = 0; 3652 } 3653 if (cm_id_priv->alt_av.ah_attr.dlid) { 3654 *qp_attr_mask |= IB_QP_ALT_PATH; 3655 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num; 3656 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index; 3657 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout; 3658 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr; 3659 } 3660 ret = 0; 3661 break; 3662 default: 3663 ret = -EINVAL; 3664 break; 3665 } 3666 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3667 return ret; 3668 } 3669 3670 static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv, 3671 struct ib_qp_attr *qp_attr, 3672 int *qp_attr_mask) 3673 { 3674 unsigned long flags; 3675 int ret; 3676 3677 spin_lock_irqsave(&cm_id_priv->lock, flags); 3678 switch (cm_id_priv->id.state) { 3679 /* Allow transition to RTS before sending REP */ 3680 case IB_CM_REQ_RCVD: 3681 case IB_CM_MRA_REQ_SENT: 3682 3683 case IB_CM_REP_RCVD: 3684 case IB_CM_MRA_REP_SENT: 3685 case IB_CM_REP_SENT: 3686 case IB_CM_MRA_REP_RCVD: 3687 case IB_CM_ESTABLISHED: 3688 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) { 3689 *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN; 3690 qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn); 3691 switch (cm_id_priv->qp_type) { 3692 case IB_QPT_RC: 3693 case IB_QPT_XRC_INI: 3694 *qp_attr_mask |= IB_QP_RETRY_CNT | IB_QP_RNR_RETRY | 3695 IB_QP_MAX_QP_RD_ATOMIC; 3696 qp_attr->retry_cnt = cm_id_priv->retry_count; 3697 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count; 3698 qp_attr->max_rd_atomic = cm_id_priv->initiator_depth; 3699 /* fall through */ 3700 case IB_QPT_XRC_TGT: 3701 *qp_attr_mask |= IB_QP_TIMEOUT; 3702 qp_attr->timeout = cm_id_priv->av.timeout; 3703 break; 3704 default: 3705 break; 3706 } 3707 if (cm_id_priv->alt_av.ah_attr.dlid) { 3708 *qp_attr_mask |= IB_QP_PATH_MIG_STATE; 3709 qp_attr->path_mig_state = IB_MIG_REARM; 3710 } 3711 } else { 3712 *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE; 3713 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num; 3714 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index; 3715 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout; 3716 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr; 3717 qp_attr->path_mig_state = IB_MIG_REARM; 3718 } 3719 ret = 0; 3720 break; 3721 default: 3722 ret = -EINVAL; 3723 break; 3724 } 3725 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3726 return ret; 3727 } 3728 3729 int ib_cm_init_qp_attr(struct ib_cm_id *cm_id, 3730 struct ib_qp_attr *qp_attr, 3731 int *qp_attr_mask) 3732 { 3733 struct cm_id_private *cm_id_priv; 3734 int ret; 3735 3736 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 3737 switch (qp_attr->qp_state) { 3738 case IB_QPS_INIT: 3739 ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask); 3740 break; 3741 case IB_QPS_RTR: 3742 ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask); 3743 break; 3744 case IB_QPS_RTS: 3745 ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask); 3746 break; 3747 default: 3748 ret = -EINVAL; 3749 break; 3750 } 3751 return ret; 3752 } 3753 EXPORT_SYMBOL(ib_cm_init_qp_attr); 3754 3755 static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr, 3756 char *buf) 3757 { 3758 struct cm_counter_group *group; 3759 struct cm_counter_attribute *cm_attr; 3760 3761 group = container_of(obj, struct cm_counter_group, obj); 3762 cm_attr = container_of(attr, struct cm_counter_attribute, attr); 3763 3764 return sprintf(buf, "%ld\n", 3765 atomic_long_read(&group->counter[cm_attr->index])); 3766 } 3767 3768 static const struct sysfs_ops cm_counter_ops = { 3769 .show = cm_show_counter 3770 }; 3771 3772 static struct kobj_type cm_counter_obj_type = { 3773 .sysfs_ops = &cm_counter_ops, 3774 .default_attrs = cm_counter_default_attrs 3775 }; 3776 3777 static void cm_release_port_obj(struct kobject *obj) 3778 { 3779 struct cm_port *cm_port; 3780 3781 cm_port = container_of(obj, struct cm_port, port_obj); 3782 kfree(cm_port); 3783 } 3784 3785 static struct kobj_type cm_port_obj_type = { 3786 .release = cm_release_port_obj 3787 }; 3788 3789 static char *cm_devnode(struct device *dev, umode_t *mode) 3790 { 3791 if (mode) 3792 *mode = 0666; 3793 return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev)); 3794 } 3795 3796 struct class cm_class = { 3797 .owner = THIS_MODULE, 3798 .name = "infiniband_cm", 3799 .devnode = cm_devnode, 3800 }; 3801 EXPORT_SYMBOL(cm_class); 3802 3803 static int cm_create_port_fs(struct cm_port *port) 3804 { 3805 int i, ret; 3806 3807 ret = kobject_init_and_add(&port->port_obj, &cm_port_obj_type, 3808 &port->cm_dev->device->kobj, 3809 "%d", port->port_num); 3810 if (ret) { 3811 kfree(port); 3812 return ret; 3813 } 3814 3815 for (i = 0; i < CM_COUNTER_GROUPS; i++) { 3816 ret = kobject_init_and_add(&port->counter_group[i].obj, 3817 &cm_counter_obj_type, 3818 &port->port_obj, 3819 "%s", counter_group_names[i]); 3820 if (ret) 3821 goto error; 3822 } 3823 3824 return 0; 3825 3826 error: 3827 while (i--) 3828 kobject_put(&port->counter_group[i].obj); 3829 kobject_put(&port->port_obj); 3830 return ret; 3831 3832 } 3833 3834 static void cm_remove_port_fs(struct cm_port *port) 3835 { 3836 int i; 3837 3838 for (i = 0; i < CM_COUNTER_GROUPS; i++) 3839 kobject_put(&port->counter_group[i].obj); 3840 3841 kobject_put(&port->port_obj); 3842 } 3843 3844 static void cm_add_one(struct ib_device *ib_device) 3845 { 3846 struct cm_device *cm_dev; 3847 struct cm_port *port; 3848 struct ib_mad_reg_req reg_req = { 3849 .mgmt_class = IB_MGMT_CLASS_CM, 3850 .mgmt_class_version = IB_CM_CLASS_VERSION, 3851 }; 3852 struct ib_port_modify port_modify = { 3853 .set_port_cap_mask = IB_PORT_CM_SUP 3854 }; 3855 unsigned long flags; 3856 int ret; 3857 int count = 0; 3858 u8 i; 3859 3860 cm_dev = kzalloc(sizeof(*cm_dev) + sizeof(*port) * 3861 ib_device->phys_port_cnt, GFP_KERNEL); 3862 if (!cm_dev) 3863 return; 3864 3865 cm_dev->ib_device = ib_device; 3866 cm_dev->ack_delay = ib_device->attrs.local_ca_ack_delay; 3867 cm_dev->going_down = 0; 3868 cm_dev->device = device_create(&cm_class, &ib_device->dev, 3869 MKDEV(0, 0), NULL, 3870 "%s", ib_device->name); 3871 if (IS_ERR(cm_dev->device)) { 3872 kfree(cm_dev); 3873 return; 3874 } 3875 3876 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask); 3877 for (i = 1; i <= ib_device->phys_port_cnt; i++) { 3878 if (!rdma_cap_ib_cm(ib_device, i)) 3879 continue; 3880 3881 port = kzalloc(sizeof *port, GFP_KERNEL); 3882 if (!port) 3883 goto error1; 3884 3885 cm_dev->port[i-1] = port; 3886 port->cm_dev = cm_dev; 3887 port->port_num = i; 3888 3889 ret = cm_create_port_fs(port); 3890 if (ret) 3891 goto error1; 3892 3893 port->mad_agent = ib_register_mad_agent(ib_device, i, 3894 IB_QPT_GSI, 3895 ®_req, 3896 0, 3897 cm_send_handler, 3898 cm_recv_handler, 3899 port, 3900 0); 3901 if (IS_ERR(port->mad_agent)) 3902 goto error2; 3903 3904 ret = ib_modify_port(ib_device, i, 0, &port_modify); 3905 if (ret) 3906 goto error3; 3907 3908 count++; 3909 } 3910 3911 if (!count) 3912 goto free; 3913 3914 ib_set_client_data(ib_device, &cm_client, cm_dev); 3915 3916 write_lock_irqsave(&cm.device_lock, flags); 3917 list_add_tail(&cm_dev->list, &cm.device_list); 3918 write_unlock_irqrestore(&cm.device_lock, flags); 3919 return; 3920 3921 error3: 3922 ib_unregister_mad_agent(port->mad_agent); 3923 error2: 3924 cm_remove_port_fs(port); 3925 error1: 3926 port_modify.set_port_cap_mask = 0; 3927 port_modify.clr_port_cap_mask = IB_PORT_CM_SUP; 3928 while (--i) { 3929 if (!rdma_cap_ib_cm(ib_device, i)) 3930 continue; 3931 3932 port = cm_dev->port[i-1]; 3933 ib_modify_port(ib_device, port->port_num, 0, &port_modify); 3934 ib_unregister_mad_agent(port->mad_agent); 3935 cm_remove_port_fs(port); 3936 } 3937 free: 3938 device_unregister(cm_dev->device); 3939 kfree(cm_dev); 3940 } 3941 3942 static void cm_remove_one(struct ib_device *ib_device, void *client_data) 3943 { 3944 struct cm_device *cm_dev = client_data; 3945 struct cm_port *port; 3946 struct ib_port_modify port_modify = { 3947 .clr_port_cap_mask = IB_PORT_CM_SUP 3948 }; 3949 unsigned long flags; 3950 int i; 3951 3952 if (!cm_dev) 3953 return; 3954 3955 write_lock_irqsave(&cm.device_lock, flags); 3956 list_del(&cm_dev->list); 3957 write_unlock_irqrestore(&cm.device_lock, flags); 3958 3959 spin_lock_irq(&cm.lock); 3960 cm_dev->going_down = 1; 3961 spin_unlock_irq(&cm.lock); 3962 3963 for (i = 1; i <= ib_device->phys_port_cnt; i++) { 3964 if (!rdma_cap_ib_cm(ib_device, i)) 3965 continue; 3966 3967 port = cm_dev->port[i-1]; 3968 ib_modify_port(ib_device, port->port_num, 0, &port_modify); 3969 /* 3970 * We flush the queue here after the going_down set, this 3971 * verify that no new works will be queued in the recv handler, 3972 * after that we can call the unregister_mad_agent 3973 */ 3974 flush_workqueue(cm.wq); 3975 ib_unregister_mad_agent(port->mad_agent); 3976 cm_remove_port_fs(port); 3977 } 3978 device_unregister(cm_dev->device); 3979 kfree(cm_dev); 3980 } 3981 3982 static int __init ib_cm_init(void) 3983 { 3984 int ret; 3985 3986 memset(&cm, 0, sizeof cm); 3987 INIT_LIST_HEAD(&cm.device_list); 3988 rwlock_init(&cm.device_lock); 3989 spin_lock_init(&cm.lock); 3990 cm.listen_service_table = RB_ROOT; 3991 cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID); 3992 cm.remote_id_table = RB_ROOT; 3993 cm.remote_qp_table = RB_ROOT; 3994 cm.remote_sidr_table = RB_ROOT; 3995 idr_init(&cm.local_id_table); 3996 get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand); 3997 INIT_LIST_HEAD(&cm.timewait_list); 3998 3999 ret = class_register(&cm_class); 4000 if (ret) { 4001 ret = -ENOMEM; 4002 goto error1; 4003 } 4004 4005 cm.wq = create_workqueue("ib_cm"); 4006 if (!cm.wq) { 4007 ret = -ENOMEM; 4008 goto error2; 4009 } 4010 4011 ret = ib_register_client(&cm_client); 4012 if (ret) 4013 goto error3; 4014 4015 return 0; 4016 error3: 4017 destroy_workqueue(cm.wq); 4018 error2: 4019 class_unregister(&cm_class); 4020 error1: 4021 idr_destroy(&cm.local_id_table); 4022 return ret; 4023 } 4024 4025 static void __exit ib_cm_cleanup(void) 4026 { 4027 struct cm_timewait_info *timewait_info, *tmp; 4028 4029 spin_lock_irq(&cm.lock); 4030 list_for_each_entry(timewait_info, &cm.timewait_list, list) 4031 cancel_delayed_work(&timewait_info->work.work); 4032 spin_unlock_irq(&cm.lock); 4033 4034 ib_unregister_client(&cm_client); 4035 destroy_workqueue(cm.wq); 4036 4037 list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) { 4038 list_del(&timewait_info->list); 4039 kfree(timewait_info); 4040 } 4041 4042 class_unregister(&cm_class); 4043 idr_destroy(&cm.local_id_table); 4044 } 4045 4046 module_init(ib_cm_init); 4047 module_exit(ib_cm_cleanup); 4048 4049