1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* 3 * Copyright (c) 2004-2007 Intel Corporation. All rights reserved. 4 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 5 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. 6 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 7 * Copyright (c) 2019, Mellanox Technologies inc. All rights reserved. 8 */ 9 10 #include <linux/completion.h> 11 #include <linux/dma-mapping.h> 12 #include <linux/device.h> 13 #include <linux/module.h> 14 #include <linux/err.h> 15 #include <linux/idr.h> 16 #include <linux/interrupt.h> 17 #include <linux/random.h> 18 #include <linux/rbtree.h> 19 #include <linux/spinlock.h> 20 #include <linux/slab.h> 21 #include <linux/sysfs.h> 22 #include <linux/workqueue.h> 23 #include <linux/kdev_t.h> 24 #include <linux/etherdevice.h> 25 26 #include <rdma/ib_cache.h> 27 #include <rdma/ib_cm.h> 28 #include <rdma/ib_sysfs.h> 29 #include "cm_msgs.h" 30 #include "core_priv.h" 31 #include "cm_trace.h" 32 33 MODULE_AUTHOR("Sean Hefty"); 34 MODULE_DESCRIPTION("InfiniBand CM"); 35 MODULE_LICENSE("Dual BSD/GPL"); 36 37 #define CM_DIRECT_RETRY_CTX ((void *) 1UL) 38 #define CM_MRA_SETTING 24 /* 4.096us * 2^24 = ~68.7 seconds */ 39 40 static const char * const ibcm_rej_reason_strs[] = { 41 [IB_CM_REJ_NO_QP] = "no QP", 42 [IB_CM_REJ_NO_EEC] = "no EEC", 43 [IB_CM_REJ_NO_RESOURCES] = "no resources", 44 [IB_CM_REJ_TIMEOUT] = "timeout", 45 [IB_CM_REJ_UNSUPPORTED] = "unsupported", 46 [IB_CM_REJ_INVALID_COMM_ID] = "invalid comm ID", 47 [IB_CM_REJ_INVALID_COMM_INSTANCE] = "invalid comm instance", 48 [IB_CM_REJ_INVALID_SERVICE_ID] = "invalid service ID", 49 [IB_CM_REJ_INVALID_TRANSPORT_TYPE] = "invalid transport type", 50 [IB_CM_REJ_STALE_CONN] = "stale conn", 51 [IB_CM_REJ_RDC_NOT_EXIST] = "RDC not exist", 52 [IB_CM_REJ_INVALID_GID] = "invalid GID", 53 [IB_CM_REJ_INVALID_LID] = "invalid LID", 54 [IB_CM_REJ_INVALID_SL] = "invalid SL", 55 [IB_CM_REJ_INVALID_TRAFFIC_CLASS] = "invalid traffic class", 56 [IB_CM_REJ_INVALID_HOP_LIMIT] = "invalid hop limit", 57 [IB_CM_REJ_INVALID_PACKET_RATE] = "invalid packet rate", 58 [IB_CM_REJ_INVALID_ALT_GID] = "invalid alt GID", 59 [IB_CM_REJ_INVALID_ALT_LID] = "invalid alt LID", 60 [IB_CM_REJ_INVALID_ALT_SL] = "invalid alt SL", 61 [IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS] = "invalid alt traffic class", 62 [IB_CM_REJ_INVALID_ALT_HOP_LIMIT] = "invalid alt hop limit", 63 [IB_CM_REJ_INVALID_ALT_PACKET_RATE] = "invalid alt packet rate", 64 [IB_CM_REJ_PORT_CM_REDIRECT] = "port CM redirect", 65 [IB_CM_REJ_PORT_REDIRECT] = "port redirect", 66 [IB_CM_REJ_INVALID_MTU] = "invalid MTU", 67 [IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES] = "insufficient resp resources", 68 [IB_CM_REJ_CONSUMER_DEFINED] = "consumer defined", 69 [IB_CM_REJ_INVALID_RNR_RETRY] = "invalid RNR retry", 70 [IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID] = "duplicate local comm ID", 71 [IB_CM_REJ_INVALID_CLASS_VERSION] = "invalid class version", 72 [IB_CM_REJ_INVALID_FLOW_LABEL] = "invalid flow label", 73 [IB_CM_REJ_INVALID_ALT_FLOW_LABEL] = "invalid alt flow label", 74 [IB_CM_REJ_VENDOR_OPTION_NOT_SUPPORTED] = 75 "vendor option is not supported", 76 }; 77 78 const char *__attribute_const__ ibcm_reject_msg(int reason) 79 { 80 size_t index = reason; 81 82 if (index < ARRAY_SIZE(ibcm_rej_reason_strs) && 83 ibcm_rej_reason_strs[index]) 84 return ibcm_rej_reason_strs[index]; 85 else 86 return "unrecognized reason"; 87 } 88 EXPORT_SYMBOL(ibcm_reject_msg); 89 90 struct cm_id_private; 91 struct cm_work; 92 static int cm_add_one(struct ib_device *device); 93 static void cm_remove_one(struct ib_device *device, void *client_data); 94 static void cm_process_work(struct cm_id_private *cm_id_priv, 95 struct cm_work *work); 96 static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv, 97 struct ib_cm_sidr_rep_param *param); 98 static void cm_issue_dreq(struct cm_id_private *cm_id_priv); 99 static int cm_send_drep_locked(struct cm_id_private *cm_id_priv, 100 void *private_data, u8 private_data_len); 101 static int cm_send_rej_locked(struct cm_id_private *cm_id_priv, 102 enum ib_cm_rej_reason reason, void *ari, 103 u8 ari_length, const void *private_data, 104 u8 private_data_len); 105 106 static struct ib_client cm_client = { 107 .name = "cm", 108 .add = cm_add_one, 109 .remove = cm_remove_one 110 }; 111 112 static struct ib_cm { 113 spinlock_t lock; 114 struct list_head device_list; 115 rwlock_t device_lock; 116 struct rb_root listen_service_table; 117 u64 listen_service_id; 118 /* struct rb_root peer_service_table; todo: fix peer to peer */ 119 struct rb_root remote_qp_table; 120 struct rb_root remote_id_table; 121 struct rb_root remote_sidr_table; 122 struct xarray local_id_table; 123 u32 local_id_next; 124 __be32 random_id_operand; 125 struct list_head timewait_list; 126 struct workqueue_struct *wq; 127 } cm; 128 129 /* Counter indexes ordered by attribute ID */ 130 enum { 131 CM_REQ_COUNTER, 132 CM_MRA_COUNTER, 133 CM_REJ_COUNTER, 134 CM_REP_COUNTER, 135 CM_RTU_COUNTER, 136 CM_DREQ_COUNTER, 137 CM_DREP_COUNTER, 138 CM_SIDR_REQ_COUNTER, 139 CM_SIDR_REP_COUNTER, 140 CM_LAP_COUNTER, 141 CM_APR_COUNTER, 142 CM_ATTR_COUNT, 143 CM_ATTR_ID_OFFSET = 0x0010, 144 }; 145 146 enum { 147 CM_XMIT, 148 CM_XMIT_RETRIES, 149 CM_RECV, 150 CM_RECV_DUPLICATES, 151 CM_COUNTER_GROUPS 152 }; 153 154 struct cm_counter_attribute { 155 struct ib_port_attribute attr; 156 unsigned short group; 157 unsigned short index; 158 }; 159 160 struct cm_port { 161 struct cm_device *cm_dev; 162 struct ib_mad_agent *mad_agent; 163 struct ib_mad_agent *rep_agent; 164 u32 port_num; 165 atomic_long_t counters[CM_COUNTER_GROUPS][CM_ATTR_COUNT]; 166 }; 167 168 struct cm_device { 169 struct kref kref; 170 struct list_head list; 171 rwlock_t mad_agent_lock; 172 struct ib_device *ib_device; 173 u8 ack_delay; 174 int going_down; 175 struct cm_port *port[]; 176 }; 177 178 struct cm_av { 179 struct cm_port *port; 180 struct rdma_ah_attr ah_attr; 181 u16 dlid_datapath; 182 u16 pkey_index; 183 u8 timeout; 184 }; 185 186 struct cm_work { 187 struct delayed_work work; 188 struct list_head list; 189 struct cm_port *port; 190 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */ 191 __be32 local_id; /* Established / timewait */ 192 __be32 remote_id; 193 struct ib_cm_event cm_event; 194 struct sa_path_rec path[]; 195 }; 196 197 struct cm_timewait_info { 198 struct cm_work work; 199 struct list_head list; 200 struct rb_node remote_qp_node; 201 struct rb_node remote_id_node; 202 __be64 remote_ca_guid; 203 __be32 remote_qpn; 204 u8 inserted_remote_qp; 205 u8 inserted_remote_id; 206 }; 207 208 struct cm_id_private { 209 struct ib_cm_id id; 210 211 struct rb_node service_node; 212 struct rb_node sidr_id_node; 213 u32 sidr_slid; 214 spinlock_t lock; /* Do not acquire inside cm.lock */ 215 struct completion comp; 216 refcount_t refcount; 217 /* Number of clients sharing this ib_cm_id. Only valid for listeners. 218 * Protected by the cm.lock spinlock. 219 */ 220 int listen_sharecount; 221 struct rcu_head rcu; 222 223 struct ib_mad_send_buf *msg; 224 struct cm_timewait_info *timewait_info; 225 /* todo: use alternate port on send failure */ 226 struct cm_av av; 227 struct cm_av alt_av; 228 229 void *private_data; 230 __be64 tid; 231 __be32 local_qpn; 232 __be32 remote_qpn; 233 enum ib_qp_type qp_type; 234 __be32 sq_psn; 235 __be32 rq_psn; 236 int timeout_ms; 237 enum ib_mtu path_mtu; 238 __be16 pkey; 239 u8 private_data_len; 240 u8 max_cm_retries; 241 u8 responder_resources; 242 u8 initiator_depth; 243 u8 retry_count; 244 u8 rnr_retry_count; 245 u8 target_ack_delay; 246 247 struct list_head work_list; 248 atomic_t work_count; 249 250 struct rdma_ucm_ece ece; 251 }; 252 253 static void cm_dev_release(struct kref *kref) 254 { 255 struct cm_device *cm_dev = container_of(kref, struct cm_device, kref); 256 u32 i; 257 258 rdma_for_each_port(cm_dev->ib_device, i) 259 kfree(cm_dev->port[i - 1]); 260 261 kfree(cm_dev); 262 } 263 264 static void cm_device_put(struct cm_device *cm_dev) 265 { 266 kref_put(&cm_dev->kref, cm_dev_release); 267 } 268 269 static void cm_work_handler(struct work_struct *work); 270 271 static inline void cm_deref_id(struct cm_id_private *cm_id_priv) 272 { 273 if (refcount_dec_and_test(&cm_id_priv->refcount)) 274 complete(&cm_id_priv->comp); 275 } 276 277 static struct ib_mad_send_buf * 278 cm_alloc_msg_agent(struct cm_id_private *cm_id_priv, bool rep_agent) 279 { 280 struct ib_mad_agent *mad_agent; 281 struct ib_mad_send_buf *m; 282 struct ib_ah *ah; 283 284 lockdep_assert_held(&cm_id_priv->lock); 285 286 if (!cm_id_priv->av.port) 287 return ERR_PTR(-EINVAL); 288 289 read_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock); 290 mad_agent = rep_agent ? cm_id_priv->av.port->rep_agent : 291 cm_id_priv->av.port->mad_agent; 292 if (!mad_agent) { 293 m = ERR_PTR(-EINVAL); 294 goto out; 295 } 296 297 ah = rdma_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr, 0); 298 if (IS_ERR(ah)) { 299 m = ERR_CAST(ah); 300 goto out; 301 } 302 303 m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn, 304 cm_id_priv->av.pkey_index, 305 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, 306 GFP_ATOMIC, 307 IB_MGMT_BASE_VERSION); 308 if (IS_ERR(m)) { 309 rdma_destroy_ah(ah, 0); 310 goto out; 311 } 312 313 m->ah = ah; 314 315 out: 316 read_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock); 317 return m; 318 } 319 320 static struct ib_mad_send_buf *cm_alloc_msg(struct cm_id_private *cm_id_priv) 321 { 322 return cm_alloc_msg_agent(cm_id_priv, false); 323 } 324 325 static void cm_free_msg(struct ib_mad_send_buf *msg) 326 { 327 if (msg->ah) 328 rdma_destroy_ah(msg->ah, 0); 329 ib_free_send_mad(msg); 330 } 331 332 static struct ib_mad_send_buf * 333 cm_alloc_priv_msg_rep(struct cm_id_private *cm_id_priv, enum ib_cm_state state, 334 bool rep_agent) 335 { 336 struct ib_mad_send_buf *msg; 337 338 lockdep_assert_held(&cm_id_priv->lock); 339 340 msg = cm_alloc_msg_agent(cm_id_priv, rep_agent); 341 if (IS_ERR(msg)) 342 return msg; 343 344 cm_id_priv->msg = msg; 345 refcount_inc(&cm_id_priv->refcount); 346 msg->context[0] = cm_id_priv; 347 msg->context[1] = (void *) (unsigned long) state; 348 349 msg->retries = cm_id_priv->max_cm_retries; 350 msg->timeout_ms = cm_id_priv->timeout_ms; 351 352 return msg; 353 } 354 355 static struct ib_mad_send_buf * 356 cm_alloc_priv_msg(struct cm_id_private *cm_id_priv, enum ib_cm_state state) 357 { 358 return cm_alloc_priv_msg_rep(cm_id_priv, state, false); 359 } 360 361 static void cm_free_priv_msg(struct ib_mad_send_buf *msg) 362 { 363 struct cm_id_private *cm_id_priv = msg->context[0]; 364 365 lockdep_assert_held(&cm_id_priv->lock); 366 367 if (!WARN_ON(cm_id_priv->msg != msg)) 368 cm_id_priv->msg = NULL; 369 370 if (msg->ah) 371 rdma_destroy_ah(msg->ah, 0); 372 cm_deref_id(cm_id_priv); 373 ib_free_send_mad(msg); 374 } 375 376 static struct ib_mad_send_buf * 377 cm_alloc_response_msg_no_ah(struct cm_port *port, 378 struct ib_mad_recv_wc *mad_recv_wc, 379 bool direct_retry) 380 { 381 struct ib_mad_send_buf *m; 382 383 m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index, 384 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, 385 GFP_ATOMIC, IB_MGMT_BASE_VERSION); 386 if (!IS_ERR(m)) 387 m->context[0] = direct_retry ? CM_DIRECT_RETRY_CTX : NULL; 388 389 return m; 390 } 391 392 static int cm_create_response_msg_ah(struct cm_port *port, 393 struct ib_mad_recv_wc *mad_recv_wc, 394 struct ib_mad_send_buf *msg) 395 { 396 struct ib_ah *ah; 397 398 ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc, 399 mad_recv_wc->recv_buf.grh, port->port_num); 400 if (IS_ERR(ah)) 401 return PTR_ERR(ah); 402 403 msg->ah = ah; 404 return 0; 405 } 406 407 static int cm_alloc_response_msg(struct cm_port *port, 408 struct ib_mad_recv_wc *mad_recv_wc, 409 bool direct_retry, 410 struct ib_mad_send_buf **msg) 411 { 412 struct ib_mad_send_buf *m; 413 int ret; 414 415 m = cm_alloc_response_msg_no_ah(port, mad_recv_wc, direct_retry); 416 if (IS_ERR(m)) 417 return PTR_ERR(m); 418 419 ret = cm_create_response_msg_ah(port, mad_recv_wc, m); 420 if (ret) { 421 ib_free_send_mad(m); 422 return ret; 423 } 424 425 *msg = m; 426 return 0; 427 } 428 429 static void *cm_copy_private_data(const void *private_data, u8 private_data_len) 430 { 431 void *data; 432 433 if (!private_data || !private_data_len) 434 return NULL; 435 436 data = kmemdup(private_data, private_data_len, GFP_KERNEL); 437 if (!data) 438 return ERR_PTR(-ENOMEM); 439 440 return data; 441 } 442 443 static void cm_set_private_data(struct cm_id_private *cm_id_priv, 444 void *private_data, u8 private_data_len) 445 { 446 if (cm_id_priv->private_data && cm_id_priv->private_data_len) 447 kfree(cm_id_priv->private_data); 448 449 cm_id_priv->private_data = private_data; 450 cm_id_priv->private_data_len = private_data_len; 451 } 452 453 static void cm_set_av_port(struct cm_av *av, struct cm_port *port) 454 { 455 struct cm_port *old_port = av->port; 456 457 if (old_port == port) 458 return; 459 460 av->port = port; 461 if (old_port) 462 cm_device_put(old_port->cm_dev); 463 if (port) 464 kref_get(&port->cm_dev->kref); 465 } 466 467 static void cm_init_av_for_lap(struct cm_port *port, struct ib_wc *wc, 468 struct rdma_ah_attr *ah_attr, struct cm_av *av) 469 { 470 cm_set_av_port(av, port); 471 av->pkey_index = wc->pkey_index; 472 rdma_move_ah_attr(&av->ah_attr, ah_attr); 473 } 474 475 static int cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc, 476 struct ib_grh *grh, struct cm_av *av) 477 { 478 cm_set_av_port(av, port); 479 av->pkey_index = wc->pkey_index; 480 return ib_init_ah_attr_from_wc(port->cm_dev->ib_device, 481 port->port_num, wc, 482 grh, &av->ah_attr); 483 } 484 485 static struct cm_port * 486 get_cm_port_from_path(struct sa_path_rec *path, const struct ib_gid_attr *attr) 487 { 488 struct cm_device *cm_dev; 489 struct cm_port *port = NULL; 490 unsigned long flags; 491 492 if (attr) { 493 read_lock_irqsave(&cm.device_lock, flags); 494 list_for_each_entry(cm_dev, &cm.device_list, list) { 495 if (cm_dev->ib_device == attr->device) { 496 port = cm_dev->port[attr->port_num - 1]; 497 break; 498 } 499 } 500 read_unlock_irqrestore(&cm.device_lock, flags); 501 } else { 502 /* SGID attribute can be NULL in following 503 * conditions. 504 * (a) Alternative path 505 * (b) IB link layer without GRH 506 * (c) LAP send messages 507 */ 508 read_lock_irqsave(&cm.device_lock, flags); 509 list_for_each_entry(cm_dev, &cm.device_list, list) { 510 attr = rdma_find_gid(cm_dev->ib_device, 511 &path->sgid, 512 sa_conv_pathrec_to_gid_type(path), 513 NULL); 514 if (!IS_ERR(attr)) { 515 port = cm_dev->port[attr->port_num - 1]; 516 break; 517 } 518 } 519 read_unlock_irqrestore(&cm.device_lock, flags); 520 if (port) 521 rdma_put_gid_attr(attr); 522 } 523 return port; 524 } 525 526 static int cm_init_av_by_path(struct sa_path_rec *path, 527 const struct ib_gid_attr *sgid_attr, 528 struct cm_av *av) 529 { 530 struct rdma_ah_attr new_ah_attr; 531 struct cm_device *cm_dev; 532 struct cm_port *port; 533 int ret; 534 535 port = get_cm_port_from_path(path, sgid_attr); 536 if (!port) 537 return -EINVAL; 538 cm_dev = port->cm_dev; 539 540 ret = ib_find_cached_pkey(cm_dev->ib_device, port->port_num, 541 be16_to_cpu(path->pkey), &av->pkey_index); 542 if (ret) 543 return ret; 544 545 cm_set_av_port(av, port); 546 547 /* 548 * av->ah_attr might be initialized based on wc or during 549 * request processing time which might have reference to sgid_attr. 550 * So initialize a new ah_attr on stack. 551 * If initialization fails, old ah_attr is used for sending any 552 * responses. If initialization is successful, than new ah_attr 553 * is used by overwriting the old one. So that right ah_attr 554 * can be used to return an error response. 555 */ 556 ret = ib_init_ah_attr_from_path(cm_dev->ib_device, port->port_num, path, 557 &new_ah_attr, sgid_attr); 558 if (ret) 559 return ret; 560 561 av->timeout = path->packet_life_time + 1; 562 rdma_move_ah_attr(&av->ah_attr, &new_ah_attr); 563 return 0; 564 } 565 566 /* Move av created by cm_init_av_by_path(), so av.dgid is not moved */ 567 static void cm_move_av_from_path(struct cm_av *dest, struct cm_av *src) 568 { 569 cm_set_av_port(dest, src->port); 570 cm_set_av_port(src, NULL); 571 dest->pkey_index = src->pkey_index; 572 rdma_move_ah_attr(&dest->ah_attr, &src->ah_attr); 573 dest->timeout = src->timeout; 574 } 575 576 static void cm_destroy_av(struct cm_av *av) 577 { 578 rdma_destroy_ah_attr(&av->ah_attr); 579 cm_set_av_port(av, NULL); 580 } 581 582 static u32 cm_local_id(__be32 local_id) 583 { 584 return (__force u32) (local_id ^ cm.random_id_operand); 585 } 586 587 static struct cm_id_private *cm_acquire_id(__be32 local_id, __be32 remote_id) 588 { 589 struct cm_id_private *cm_id_priv; 590 591 rcu_read_lock(); 592 cm_id_priv = xa_load(&cm.local_id_table, cm_local_id(local_id)); 593 if (!cm_id_priv || cm_id_priv->id.remote_id != remote_id || 594 !refcount_inc_not_zero(&cm_id_priv->refcount)) 595 cm_id_priv = NULL; 596 rcu_read_unlock(); 597 598 return cm_id_priv; 599 } 600 601 /* 602 * Trivial helpers to strip endian annotation and compare; the 603 * endianness doesn't actually matter since we just need a stable 604 * order for the RB tree. 605 */ 606 static int be32_lt(__be32 a, __be32 b) 607 { 608 return (__force u32) a < (__force u32) b; 609 } 610 611 static int be32_gt(__be32 a, __be32 b) 612 { 613 return (__force u32) a > (__force u32) b; 614 } 615 616 static int be64_lt(__be64 a, __be64 b) 617 { 618 return (__force u64) a < (__force u64) b; 619 } 620 621 static int be64_gt(__be64 a, __be64 b) 622 { 623 return (__force u64) a > (__force u64) b; 624 } 625 626 /* 627 * Inserts a new cm_id_priv into the listen_service_table. Returns cm_id_priv 628 * if the new ID was inserted, NULL if it could not be inserted due to a 629 * collision, or the existing cm_id_priv ready for shared usage. 630 */ 631 static struct cm_id_private *cm_insert_listen(struct cm_id_private *cm_id_priv, 632 ib_cm_handler shared_handler) 633 { 634 struct rb_node **link = &cm.listen_service_table.rb_node; 635 struct rb_node *parent = NULL; 636 struct cm_id_private *cur_cm_id_priv; 637 __be64 service_id = cm_id_priv->id.service_id; 638 unsigned long flags; 639 640 spin_lock_irqsave(&cm.lock, flags); 641 while (*link) { 642 parent = *link; 643 cur_cm_id_priv = rb_entry(parent, struct cm_id_private, 644 service_node); 645 646 if (cm_id_priv->id.device < cur_cm_id_priv->id.device) 647 link = &(*link)->rb_left; 648 else if (cm_id_priv->id.device > cur_cm_id_priv->id.device) 649 link = &(*link)->rb_right; 650 else if (be64_lt(service_id, cur_cm_id_priv->id.service_id)) 651 link = &(*link)->rb_left; 652 else if (be64_gt(service_id, cur_cm_id_priv->id.service_id)) 653 link = &(*link)->rb_right; 654 else { 655 /* 656 * Sharing an ib_cm_id with different handlers is not 657 * supported 658 */ 659 if (cur_cm_id_priv->id.cm_handler != shared_handler || 660 cur_cm_id_priv->id.context || 661 WARN_ON(!cur_cm_id_priv->id.cm_handler)) { 662 spin_unlock_irqrestore(&cm.lock, flags); 663 return NULL; 664 } 665 refcount_inc(&cur_cm_id_priv->refcount); 666 cur_cm_id_priv->listen_sharecount++; 667 spin_unlock_irqrestore(&cm.lock, flags); 668 return cur_cm_id_priv; 669 } 670 } 671 cm_id_priv->listen_sharecount++; 672 rb_link_node(&cm_id_priv->service_node, parent, link); 673 rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table); 674 spin_unlock_irqrestore(&cm.lock, flags); 675 return cm_id_priv; 676 } 677 678 static struct cm_id_private *cm_find_listen(struct ib_device *device, 679 __be64 service_id) 680 { 681 struct rb_node *node = cm.listen_service_table.rb_node; 682 struct cm_id_private *cm_id_priv; 683 684 while (node) { 685 cm_id_priv = rb_entry(node, struct cm_id_private, service_node); 686 687 if (device < cm_id_priv->id.device) 688 node = node->rb_left; 689 else if (device > cm_id_priv->id.device) 690 node = node->rb_right; 691 else if (be64_lt(service_id, cm_id_priv->id.service_id)) 692 node = node->rb_left; 693 else if (be64_gt(service_id, cm_id_priv->id.service_id)) 694 node = node->rb_right; 695 else { 696 refcount_inc(&cm_id_priv->refcount); 697 return cm_id_priv; 698 } 699 } 700 return NULL; 701 } 702 703 static struct cm_timewait_info * 704 cm_insert_remote_id(struct cm_timewait_info *timewait_info) 705 { 706 struct rb_node **link = &cm.remote_id_table.rb_node; 707 struct rb_node *parent = NULL; 708 struct cm_timewait_info *cur_timewait_info; 709 __be64 remote_ca_guid = timewait_info->remote_ca_guid; 710 __be32 remote_id = timewait_info->work.remote_id; 711 712 while (*link) { 713 parent = *link; 714 cur_timewait_info = rb_entry(parent, struct cm_timewait_info, 715 remote_id_node); 716 if (be32_lt(remote_id, cur_timewait_info->work.remote_id)) 717 link = &(*link)->rb_left; 718 else if (be32_gt(remote_id, cur_timewait_info->work.remote_id)) 719 link = &(*link)->rb_right; 720 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid)) 721 link = &(*link)->rb_left; 722 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid)) 723 link = &(*link)->rb_right; 724 else 725 return cur_timewait_info; 726 } 727 timewait_info->inserted_remote_id = 1; 728 rb_link_node(&timewait_info->remote_id_node, parent, link); 729 rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table); 730 return NULL; 731 } 732 733 static struct cm_id_private *cm_find_remote_id(__be64 remote_ca_guid, 734 __be32 remote_id) 735 { 736 struct rb_node *node = cm.remote_id_table.rb_node; 737 struct cm_timewait_info *timewait_info; 738 struct cm_id_private *res = NULL; 739 740 spin_lock_irq(&cm.lock); 741 while (node) { 742 timewait_info = rb_entry(node, struct cm_timewait_info, 743 remote_id_node); 744 if (be32_lt(remote_id, timewait_info->work.remote_id)) 745 node = node->rb_left; 746 else if (be32_gt(remote_id, timewait_info->work.remote_id)) 747 node = node->rb_right; 748 else if (be64_lt(remote_ca_guid, timewait_info->remote_ca_guid)) 749 node = node->rb_left; 750 else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid)) 751 node = node->rb_right; 752 else { 753 res = cm_acquire_id(timewait_info->work.local_id, 754 timewait_info->work.remote_id); 755 break; 756 } 757 } 758 spin_unlock_irq(&cm.lock); 759 return res; 760 } 761 762 static struct cm_timewait_info * 763 cm_insert_remote_qpn(struct cm_timewait_info *timewait_info) 764 { 765 struct rb_node **link = &cm.remote_qp_table.rb_node; 766 struct rb_node *parent = NULL; 767 struct cm_timewait_info *cur_timewait_info; 768 __be64 remote_ca_guid = timewait_info->remote_ca_guid; 769 __be32 remote_qpn = timewait_info->remote_qpn; 770 771 while (*link) { 772 parent = *link; 773 cur_timewait_info = rb_entry(parent, struct cm_timewait_info, 774 remote_qp_node); 775 if (be32_lt(remote_qpn, cur_timewait_info->remote_qpn)) 776 link = &(*link)->rb_left; 777 else if (be32_gt(remote_qpn, cur_timewait_info->remote_qpn)) 778 link = &(*link)->rb_right; 779 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid)) 780 link = &(*link)->rb_left; 781 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid)) 782 link = &(*link)->rb_right; 783 else 784 return cur_timewait_info; 785 } 786 timewait_info->inserted_remote_qp = 1; 787 rb_link_node(&timewait_info->remote_qp_node, parent, link); 788 rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table); 789 return NULL; 790 } 791 792 static struct cm_id_private * 793 cm_insert_remote_sidr(struct cm_id_private *cm_id_priv) 794 { 795 struct rb_node **link = &cm.remote_sidr_table.rb_node; 796 struct rb_node *parent = NULL; 797 struct cm_id_private *cur_cm_id_priv; 798 __be32 remote_id = cm_id_priv->id.remote_id; 799 800 while (*link) { 801 parent = *link; 802 cur_cm_id_priv = rb_entry(parent, struct cm_id_private, 803 sidr_id_node); 804 if (be32_lt(remote_id, cur_cm_id_priv->id.remote_id)) 805 link = &(*link)->rb_left; 806 else if (be32_gt(remote_id, cur_cm_id_priv->id.remote_id)) 807 link = &(*link)->rb_right; 808 else { 809 if (cur_cm_id_priv->sidr_slid < cm_id_priv->sidr_slid) 810 link = &(*link)->rb_left; 811 else if (cur_cm_id_priv->sidr_slid > cm_id_priv->sidr_slid) 812 link = &(*link)->rb_right; 813 else 814 return cur_cm_id_priv; 815 } 816 } 817 rb_link_node(&cm_id_priv->sidr_id_node, parent, link); 818 rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); 819 return NULL; 820 } 821 822 static struct cm_id_private *cm_alloc_id_priv(struct ib_device *device, 823 ib_cm_handler cm_handler, 824 void *context) 825 { 826 struct cm_id_private *cm_id_priv; 827 u32 id; 828 int ret; 829 830 cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL); 831 if (!cm_id_priv) 832 return ERR_PTR(-ENOMEM); 833 834 cm_id_priv->id.state = IB_CM_IDLE; 835 cm_id_priv->id.device = device; 836 cm_id_priv->id.cm_handler = cm_handler; 837 cm_id_priv->id.context = context; 838 cm_id_priv->id.remote_cm_qpn = 1; 839 840 RB_CLEAR_NODE(&cm_id_priv->service_node); 841 RB_CLEAR_NODE(&cm_id_priv->sidr_id_node); 842 spin_lock_init(&cm_id_priv->lock); 843 init_completion(&cm_id_priv->comp); 844 INIT_LIST_HEAD(&cm_id_priv->work_list); 845 atomic_set(&cm_id_priv->work_count, -1); 846 refcount_set(&cm_id_priv->refcount, 1); 847 848 ret = xa_alloc_cyclic(&cm.local_id_table, &id, NULL, xa_limit_32b, 849 &cm.local_id_next, GFP_KERNEL); 850 if (ret < 0) 851 goto error; 852 cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand; 853 854 return cm_id_priv; 855 856 error: 857 kfree(cm_id_priv); 858 return ERR_PTR(ret); 859 } 860 861 /* 862 * Make the ID visible to the MAD handlers and other threads that use the 863 * xarray. 864 */ 865 static void cm_finalize_id(struct cm_id_private *cm_id_priv) 866 { 867 xa_store(&cm.local_id_table, cm_local_id(cm_id_priv->id.local_id), 868 cm_id_priv, GFP_ATOMIC); 869 } 870 871 struct ib_cm_id *ib_create_cm_id(struct ib_device *device, 872 ib_cm_handler cm_handler, 873 void *context) 874 { 875 struct cm_id_private *cm_id_priv; 876 877 cm_id_priv = cm_alloc_id_priv(device, cm_handler, context); 878 if (IS_ERR(cm_id_priv)) 879 return ERR_CAST(cm_id_priv); 880 881 cm_finalize_id(cm_id_priv); 882 return &cm_id_priv->id; 883 } 884 EXPORT_SYMBOL(ib_create_cm_id); 885 886 static struct cm_work *cm_dequeue_work(struct cm_id_private *cm_id_priv) 887 { 888 struct cm_work *work; 889 890 if (list_empty(&cm_id_priv->work_list)) 891 return NULL; 892 893 work = list_entry(cm_id_priv->work_list.next, struct cm_work, list); 894 list_del(&work->list); 895 return work; 896 } 897 898 static void cm_free_work(struct cm_work *work) 899 { 900 if (work->mad_recv_wc) 901 ib_free_recv_mad(work->mad_recv_wc); 902 kfree(work); 903 } 904 905 static void cm_queue_work_unlock(struct cm_id_private *cm_id_priv, 906 struct cm_work *work) 907 __releases(&cm_id_priv->lock) 908 { 909 bool immediate; 910 911 /* 912 * To deliver the event to the user callback we have the drop the 913 * spinlock, however, we need to ensure that the user callback is single 914 * threaded and receives events in the temporal order. If there are 915 * already events being processed then thread new events onto a list, 916 * the thread currently processing will pick them up. 917 */ 918 immediate = atomic_inc_and_test(&cm_id_priv->work_count); 919 if (!immediate) { 920 list_add_tail(&work->list, &cm_id_priv->work_list); 921 /* 922 * This routine always consumes incoming reference. Once queued 923 * to the work_list then a reference is held by the thread 924 * currently running cm_process_work() and this reference is not 925 * needed. 926 */ 927 cm_deref_id(cm_id_priv); 928 } 929 spin_unlock_irq(&cm_id_priv->lock); 930 931 if (immediate) 932 cm_process_work(cm_id_priv, work); 933 } 934 935 static inline int cm_convert_to_ms(int iba_time) 936 { 937 /* approximate conversion to ms from 4.096us x 2^iba_time */ 938 return 1 << max(iba_time - 8, 0); 939 } 940 941 /* 942 * calculate: 4.096x2^ack_timeout = 4.096x2^ack_delay + 2x4.096x2^life_time 943 * Because of how ack_timeout is stored, adding one doubles the timeout. 944 * To avoid large timeouts, select the max(ack_delay, life_time + 1), and 945 * increment it (round up) only if the other is within 50%. 946 */ 947 static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time) 948 { 949 int ack_timeout = packet_life_time + 1; 950 951 if (ack_timeout >= ca_ack_delay) 952 ack_timeout += (ca_ack_delay >= (ack_timeout - 1)); 953 else 954 ack_timeout = ca_ack_delay + 955 (ack_timeout >= (ca_ack_delay - 1)); 956 957 return min(31, ack_timeout); 958 } 959 960 static void cm_remove_remote(struct cm_id_private *cm_id_priv) 961 { 962 struct cm_timewait_info *timewait_info = cm_id_priv->timewait_info; 963 964 if (timewait_info->inserted_remote_id) { 965 rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table); 966 timewait_info->inserted_remote_id = 0; 967 } 968 969 if (timewait_info->inserted_remote_qp) { 970 rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table); 971 timewait_info->inserted_remote_qp = 0; 972 } 973 } 974 975 static struct cm_timewait_info *cm_create_timewait_info(__be32 local_id) 976 { 977 struct cm_timewait_info *timewait_info; 978 979 timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL); 980 if (!timewait_info) 981 return ERR_PTR(-ENOMEM); 982 983 timewait_info->work.local_id = local_id; 984 INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler); 985 timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT; 986 return timewait_info; 987 } 988 989 static void cm_enter_timewait(struct cm_id_private *cm_id_priv) 990 { 991 int wait_time; 992 unsigned long flags; 993 struct cm_device *cm_dev; 994 995 lockdep_assert_held(&cm_id_priv->lock); 996 997 cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client); 998 if (!cm_dev) 999 return; 1000 1001 spin_lock_irqsave(&cm.lock, flags); 1002 cm_remove_remote(cm_id_priv); 1003 list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list); 1004 spin_unlock_irqrestore(&cm.lock, flags); 1005 1006 /* 1007 * The cm_id could be destroyed by the user before we exit timewait. 1008 * To protect against this, we search for the cm_id after exiting 1009 * timewait before notifying the user that we've exited timewait. 1010 */ 1011 cm_id_priv->id.state = IB_CM_TIMEWAIT; 1012 wait_time = cm_convert_to_ms(cm_id_priv->av.timeout); 1013 1014 /* Check if the device started its remove_one */ 1015 spin_lock_irqsave(&cm.lock, flags); 1016 if (!cm_dev->going_down) 1017 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work, 1018 msecs_to_jiffies(wait_time)); 1019 spin_unlock_irqrestore(&cm.lock, flags); 1020 1021 /* 1022 * The timewait_info is converted into a work and gets freed during 1023 * cm_free_work() in cm_timewait_handler(). 1024 */ 1025 BUILD_BUG_ON(offsetof(struct cm_timewait_info, work) != 0); 1026 cm_id_priv->timewait_info = NULL; 1027 } 1028 1029 static void cm_reset_to_idle(struct cm_id_private *cm_id_priv) 1030 { 1031 unsigned long flags; 1032 1033 lockdep_assert_held(&cm_id_priv->lock); 1034 1035 cm_id_priv->id.state = IB_CM_IDLE; 1036 if (cm_id_priv->timewait_info) { 1037 spin_lock_irqsave(&cm.lock, flags); 1038 cm_remove_remote(cm_id_priv); 1039 spin_unlock_irqrestore(&cm.lock, flags); 1040 kfree(cm_id_priv->timewait_info); 1041 cm_id_priv->timewait_info = NULL; 1042 } 1043 } 1044 1045 static noinline void cm_destroy_id_wait_timeout(struct ib_cm_id *cm_id, 1046 enum ib_cm_state old_state) 1047 { 1048 struct cm_id_private *cm_id_priv; 1049 1050 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1051 pr_err_ratelimited("%s: cm_id=%p timed out. state %d -> %d, refcnt=%d\n", __func__, 1052 cm_id, old_state, cm_id->state, refcount_read(&cm_id_priv->refcount)); 1053 } 1054 1055 static void cm_destroy_id(struct ib_cm_id *cm_id, int err) 1056 { 1057 struct cm_id_private *cm_id_priv; 1058 enum ib_cm_state old_state; 1059 unsigned long timeout; 1060 struct cm_work *work; 1061 int ret; 1062 1063 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1064 spin_lock_irq(&cm_id_priv->lock); 1065 old_state = cm_id->state; 1066 retest: 1067 switch (cm_id->state) { 1068 case IB_CM_LISTEN: 1069 spin_lock(&cm.lock); 1070 if (--cm_id_priv->listen_sharecount > 0) { 1071 /* The id is still shared. */ 1072 WARN_ON(refcount_read(&cm_id_priv->refcount) == 1); 1073 spin_unlock(&cm.lock); 1074 spin_unlock_irq(&cm_id_priv->lock); 1075 cm_deref_id(cm_id_priv); 1076 return; 1077 } 1078 cm_id->state = IB_CM_IDLE; 1079 rb_erase(&cm_id_priv->service_node, &cm.listen_service_table); 1080 RB_CLEAR_NODE(&cm_id_priv->service_node); 1081 spin_unlock(&cm.lock); 1082 break; 1083 case IB_CM_SIDR_REQ_SENT: 1084 cm_id->state = IB_CM_IDLE; 1085 ib_cancel_mad(cm_id_priv->msg); 1086 break; 1087 case IB_CM_SIDR_REQ_RCVD: 1088 cm_send_sidr_rep_locked(cm_id_priv, 1089 &(struct ib_cm_sidr_rep_param){ 1090 .status = IB_SIDR_REJECT }); 1091 /* cm_send_sidr_rep_locked will not move to IDLE if it fails */ 1092 cm_id->state = IB_CM_IDLE; 1093 break; 1094 case IB_CM_REQ_SENT: 1095 case IB_CM_MRA_REQ_RCVD: 1096 ib_cancel_mad(cm_id_priv->msg); 1097 cm_send_rej_locked(cm_id_priv, IB_CM_REJ_TIMEOUT, 1098 &cm_id_priv->id.device->node_guid, 1099 sizeof(cm_id_priv->id.device->node_guid), 1100 NULL, 0); 1101 break; 1102 case IB_CM_REQ_RCVD: 1103 if (err == -ENOMEM) { 1104 /* Do not reject to allow future retries. */ 1105 cm_reset_to_idle(cm_id_priv); 1106 } else { 1107 cm_send_rej_locked(cm_id_priv, 1108 IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, 1109 NULL, 0); 1110 } 1111 break; 1112 case IB_CM_REP_SENT: 1113 case IB_CM_MRA_REP_RCVD: 1114 ib_cancel_mad(cm_id_priv->msg); 1115 cm_send_rej_locked(cm_id_priv, IB_CM_REJ_CONSUMER_DEFINED, NULL, 1116 0, NULL, 0); 1117 goto retest; 1118 case IB_CM_MRA_REQ_SENT: 1119 case IB_CM_REP_RCVD: 1120 case IB_CM_MRA_REP_SENT: 1121 cm_send_rej_locked(cm_id_priv, IB_CM_REJ_CONSUMER_DEFINED, NULL, 1122 0, NULL, 0); 1123 break; 1124 case IB_CM_ESTABLISHED: 1125 if (cm_id_priv->qp_type == IB_QPT_XRC_TGT) { 1126 cm_id->state = IB_CM_IDLE; 1127 break; 1128 } 1129 cm_issue_dreq(cm_id_priv); 1130 cm_enter_timewait(cm_id_priv); 1131 goto retest; 1132 case IB_CM_DREQ_SENT: 1133 ib_cancel_mad(cm_id_priv->msg); 1134 cm_enter_timewait(cm_id_priv); 1135 goto retest; 1136 case IB_CM_DREQ_RCVD: 1137 cm_send_drep_locked(cm_id_priv, NULL, 0); 1138 WARN_ON(cm_id->state != IB_CM_TIMEWAIT); 1139 goto retest; 1140 case IB_CM_TIMEWAIT: 1141 /* 1142 * The cm_acquire_id in cm_timewait_handler will stop working 1143 * once we do xa_erase below, so just move to idle here for 1144 * consistency. 1145 */ 1146 cm_id->state = IB_CM_IDLE; 1147 break; 1148 case IB_CM_IDLE: 1149 break; 1150 } 1151 WARN_ON(cm_id->state != IB_CM_IDLE); 1152 1153 spin_lock(&cm.lock); 1154 /* Required for cleanup paths related cm_req_handler() */ 1155 if (cm_id_priv->timewait_info) { 1156 cm_remove_remote(cm_id_priv); 1157 kfree(cm_id_priv->timewait_info); 1158 cm_id_priv->timewait_info = NULL; 1159 } 1160 1161 WARN_ON(cm_id_priv->listen_sharecount); 1162 WARN_ON(!RB_EMPTY_NODE(&cm_id_priv->service_node)); 1163 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) 1164 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); 1165 spin_unlock(&cm.lock); 1166 spin_unlock_irq(&cm_id_priv->lock); 1167 1168 xa_erase(&cm.local_id_table, cm_local_id(cm_id->local_id)); 1169 cm_deref_id(cm_id_priv); 1170 timeout = msecs_to_jiffies((cm_id_priv->max_cm_retries * cm_id_priv->timeout_ms * 5) / 4); 1171 do { 1172 ret = wait_for_completion_timeout(&cm_id_priv->comp, timeout); 1173 if (!ret) /* timeout happened */ 1174 cm_destroy_id_wait_timeout(cm_id, old_state); 1175 } while (!ret); 1176 1177 while ((work = cm_dequeue_work(cm_id_priv)) != NULL) 1178 cm_free_work(work); 1179 1180 cm_destroy_av(&cm_id_priv->av); 1181 cm_destroy_av(&cm_id_priv->alt_av); 1182 kfree(cm_id_priv->private_data); 1183 kfree_rcu(cm_id_priv, rcu); 1184 } 1185 1186 void ib_destroy_cm_id(struct ib_cm_id *cm_id) 1187 { 1188 cm_destroy_id(cm_id, 0); 1189 } 1190 EXPORT_SYMBOL(ib_destroy_cm_id); 1191 1192 static int cm_init_listen(struct cm_id_private *cm_id_priv, __be64 service_id) 1193 { 1194 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID && 1195 (service_id != IB_CM_ASSIGN_SERVICE_ID)) 1196 return -EINVAL; 1197 1198 if (service_id == IB_CM_ASSIGN_SERVICE_ID) 1199 cm_id_priv->id.service_id = cpu_to_be64(cm.listen_service_id++); 1200 else 1201 cm_id_priv->id.service_id = service_id; 1202 1203 return 0; 1204 } 1205 1206 /** 1207 * ib_cm_listen - Initiates listening on the specified service ID for 1208 * connection and service ID resolution requests. 1209 * @cm_id: Connection identifier associated with the listen request. 1210 * @service_id: Service identifier matched against incoming connection 1211 * and service ID resolution requests. The service ID should be specified 1212 * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will 1213 * assign a service ID to the caller. 1214 */ 1215 int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id) 1216 { 1217 struct cm_id_private *cm_id_priv = 1218 container_of(cm_id, struct cm_id_private, id); 1219 unsigned long flags; 1220 int ret; 1221 1222 spin_lock_irqsave(&cm_id_priv->lock, flags); 1223 if (cm_id_priv->id.state != IB_CM_IDLE) { 1224 ret = -EINVAL; 1225 goto out; 1226 } 1227 1228 ret = cm_init_listen(cm_id_priv, service_id); 1229 if (ret) 1230 goto out; 1231 1232 if (!cm_insert_listen(cm_id_priv, NULL)) { 1233 ret = -EBUSY; 1234 goto out; 1235 } 1236 1237 cm_id_priv->id.state = IB_CM_LISTEN; 1238 ret = 0; 1239 1240 out: 1241 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1242 return ret; 1243 } 1244 EXPORT_SYMBOL(ib_cm_listen); 1245 1246 /** 1247 * ib_cm_insert_listen - Create a new listening ib_cm_id and listen on 1248 * the given service ID. 1249 * 1250 * If there's an existing ID listening on that same device and service ID, 1251 * return it. 1252 * 1253 * @device: Device associated with the cm_id. All related communication will 1254 * be associated with the specified device. 1255 * @cm_handler: Callback invoked to notify the user of CM events. 1256 * @service_id: Service identifier matched against incoming connection 1257 * and service ID resolution requests. The service ID should be specified 1258 * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will 1259 * assign a service ID to the caller. 1260 * 1261 * Callers should call ib_destroy_cm_id when done with the listener ID. 1262 */ 1263 struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device, 1264 ib_cm_handler cm_handler, 1265 __be64 service_id) 1266 { 1267 struct cm_id_private *listen_id_priv; 1268 struct cm_id_private *cm_id_priv; 1269 int err = 0; 1270 1271 /* Create an ID in advance, since the creation may sleep */ 1272 cm_id_priv = cm_alloc_id_priv(device, cm_handler, NULL); 1273 if (IS_ERR(cm_id_priv)) 1274 return ERR_CAST(cm_id_priv); 1275 1276 err = cm_init_listen(cm_id_priv, service_id); 1277 if (err) { 1278 ib_destroy_cm_id(&cm_id_priv->id); 1279 return ERR_PTR(err); 1280 } 1281 1282 spin_lock_irq(&cm_id_priv->lock); 1283 listen_id_priv = cm_insert_listen(cm_id_priv, cm_handler); 1284 if (listen_id_priv != cm_id_priv) { 1285 spin_unlock_irq(&cm_id_priv->lock); 1286 ib_destroy_cm_id(&cm_id_priv->id); 1287 if (!listen_id_priv) 1288 return ERR_PTR(-EINVAL); 1289 return &listen_id_priv->id; 1290 } 1291 cm_id_priv->id.state = IB_CM_LISTEN; 1292 spin_unlock_irq(&cm_id_priv->lock); 1293 1294 /* 1295 * A listen ID does not need to be in the xarray since it does not 1296 * receive mads, is not placed in the remote_id or remote_qpn rbtree, 1297 * and does not enter timewait. 1298 */ 1299 1300 return &cm_id_priv->id; 1301 } 1302 EXPORT_SYMBOL(ib_cm_insert_listen); 1303 1304 static __be64 cm_form_tid(struct cm_id_private *cm_id_priv) 1305 { 1306 u64 hi_tid = 0, low_tid; 1307 1308 lockdep_assert_held(&cm_id_priv->lock); 1309 1310 low_tid = (u64)cm_id_priv->id.local_id; 1311 if (!cm_id_priv->av.port) 1312 return cpu_to_be64(low_tid); 1313 1314 read_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock); 1315 if (cm_id_priv->av.port->mad_agent) 1316 hi_tid = ((u64)cm_id_priv->av.port->mad_agent->hi_tid) << 32; 1317 read_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock); 1318 return cpu_to_be64(hi_tid | low_tid); 1319 } 1320 1321 static void cm_format_mad_hdr(struct ib_mad_hdr *hdr, 1322 __be16 attr_id, __be64 tid) 1323 { 1324 hdr->base_version = IB_MGMT_BASE_VERSION; 1325 hdr->mgmt_class = IB_MGMT_CLASS_CM; 1326 hdr->class_version = IB_CM_CLASS_VERSION; 1327 hdr->method = IB_MGMT_METHOD_SEND; 1328 hdr->attr_id = attr_id; 1329 hdr->tid = tid; 1330 } 1331 1332 static void cm_format_mad_ece_hdr(struct ib_mad_hdr *hdr, __be16 attr_id, 1333 __be64 tid, u32 attr_mod) 1334 { 1335 cm_format_mad_hdr(hdr, attr_id, tid); 1336 hdr->attr_mod = cpu_to_be32(attr_mod); 1337 } 1338 1339 static void cm_format_req(struct cm_req_msg *req_msg, 1340 struct cm_id_private *cm_id_priv, 1341 struct ib_cm_req_param *param) 1342 { 1343 struct sa_path_rec *pri_path = param->primary_path; 1344 struct sa_path_rec *alt_path = param->alternate_path; 1345 bool pri_ext = false; 1346 __be16 lid; 1347 1348 if (pri_path->rec_type == SA_PATH_REC_TYPE_OPA) 1349 pri_ext = opa_is_extended_lid(pri_path->opa.dlid, 1350 pri_path->opa.slid); 1351 1352 cm_format_mad_ece_hdr(&req_msg->hdr, CM_REQ_ATTR_ID, 1353 cm_form_tid(cm_id_priv), param->ece.attr_mod); 1354 1355 IBA_SET(CM_REQ_LOCAL_COMM_ID, req_msg, 1356 be32_to_cpu(cm_id_priv->id.local_id)); 1357 IBA_SET(CM_REQ_SERVICE_ID, req_msg, be64_to_cpu(param->service_id)); 1358 IBA_SET(CM_REQ_LOCAL_CA_GUID, req_msg, 1359 be64_to_cpu(cm_id_priv->id.device->node_guid)); 1360 IBA_SET(CM_REQ_LOCAL_QPN, req_msg, param->qp_num); 1361 IBA_SET(CM_REQ_INITIATOR_DEPTH, req_msg, param->initiator_depth); 1362 IBA_SET(CM_REQ_REMOTE_CM_RESPONSE_TIMEOUT, req_msg, 1363 param->remote_cm_response_timeout); 1364 cm_req_set_qp_type(req_msg, param->qp_type); 1365 IBA_SET(CM_REQ_END_TO_END_FLOW_CONTROL, req_msg, param->flow_control); 1366 IBA_SET(CM_REQ_STARTING_PSN, req_msg, param->starting_psn); 1367 IBA_SET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg, 1368 param->local_cm_response_timeout); 1369 IBA_SET(CM_REQ_PARTITION_KEY, req_msg, 1370 be16_to_cpu(param->primary_path->pkey)); 1371 IBA_SET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg, 1372 param->primary_path->mtu); 1373 IBA_SET(CM_REQ_MAX_CM_RETRIES, req_msg, param->max_cm_retries); 1374 1375 if (param->qp_type != IB_QPT_XRC_INI) { 1376 IBA_SET(CM_REQ_RESPONDER_RESOURCES, req_msg, 1377 param->responder_resources); 1378 IBA_SET(CM_REQ_RETRY_COUNT, req_msg, param->retry_count); 1379 IBA_SET(CM_REQ_RNR_RETRY_COUNT, req_msg, 1380 param->rnr_retry_count); 1381 IBA_SET(CM_REQ_SRQ, req_msg, param->srq); 1382 } 1383 1384 *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg) = 1385 pri_path->sgid; 1386 *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg) = 1387 pri_path->dgid; 1388 if (pri_ext) { 1389 IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg) 1390 ->global.interface_id = 1391 OPA_MAKE_ID(be32_to_cpu(pri_path->opa.slid)); 1392 IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg) 1393 ->global.interface_id = 1394 OPA_MAKE_ID(be32_to_cpu(pri_path->opa.dlid)); 1395 } 1396 if (pri_path->hop_limit <= 1) { 1397 IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg, 1398 be16_to_cpu(pri_ext ? 0 : 1399 htons(ntohl(sa_path_get_slid( 1400 pri_path))))); 1401 IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg, 1402 be16_to_cpu(pri_ext ? 0 : 1403 htons(ntohl(sa_path_get_dlid( 1404 pri_path))))); 1405 } else { 1406 1407 if (param->primary_path_inbound) { 1408 lid = param->primary_path_inbound->ib.dlid; 1409 IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg, 1410 be16_to_cpu(lid)); 1411 } else 1412 IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg, 1413 be16_to_cpu(IB_LID_PERMISSIVE)); 1414 1415 /* Work-around until there's a way to obtain remote LID info */ 1416 IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg, 1417 be16_to_cpu(IB_LID_PERMISSIVE)); 1418 } 1419 IBA_SET(CM_REQ_PRIMARY_FLOW_LABEL, req_msg, 1420 be32_to_cpu(pri_path->flow_label)); 1421 IBA_SET(CM_REQ_PRIMARY_PACKET_RATE, req_msg, pri_path->rate); 1422 IBA_SET(CM_REQ_PRIMARY_TRAFFIC_CLASS, req_msg, pri_path->traffic_class); 1423 IBA_SET(CM_REQ_PRIMARY_HOP_LIMIT, req_msg, pri_path->hop_limit); 1424 IBA_SET(CM_REQ_PRIMARY_SL, req_msg, pri_path->sl); 1425 IBA_SET(CM_REQ_PRIMARY_SUBNET_LOCAL, req_msg, 1426 (pri_path->hop_limit <= 1)); 1427 IBA_SET(CM_REQ_PRIMARY_LOCAL_ACK_TIMEOUT, req_msg, 1428 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay, 1429 pri_path->packet_life_time)); 1430 1431 if (alt_path) { 1432 bool alt_ext = false; 1433 1434 if (alt_path->rec_type == SA_PATH_REC_TYPE_OPA) 1435 alt_ext = opa_is_extended_lid(alt_path->opa.dlid, 1436 alt_path->opa.slid); 1437 1438 *IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg) = 1439 alt_path->sgid; 1440 *IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg) = 1441 alt_path->dgid; 1442 if (alt_ext) { 1443 IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID, 1444 req_msg) 1445 ->global.interface_id = 1446 OPA_MAKE_ID(be32_to_cpu(alt_path->opa.slid)); 1447 IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_REMOTE_PORT_GID, 1448 req_msg) 1449 ->global.interface_id = 1450 OPA_MAKE_ID(be32_to_cpu(alt_path->opa.dlid)); 1451 } 1452 if (alt_path->hop_limit <= 1) { 1453 IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg, 1454 be16_to_cpu( 1455 alt_ext ? 0 : 1456 htons(ntohl(sa_path_get_slid( 1457 alt_path))))); 1458 IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg, 1459 be16_to_cpu( 1460 alt_ext ? 0 : 1461 htons(ntohl(sa_path_get_dlid( 1462 alt_path))))); 1463 } else { 1464 IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg, 1465 be16_to_cpu(IB_LID_PERMISSIVE)); 1466 IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg, 1467 be16_to_cpu(IB_LID_PERMISSIVE)); 1468 } 1469 IBA_SET(CM_REQ_ALTERNATE_FLOW_LABEL, req_msg, 1470 be32_to_cpu(alt_path->flow_label)); 1471 IBA_SET(CM_REQ_ALTERNATE_PACKET_RATE, req_msg, alt_path->rate); 1472 IBA_SET(CM_REQ_ALTERNATE_TRAFFIC_CLASS, req_msg, 1473 alt_path->traffic_class); 1474 IBA_SET(CM_REQ_ALTERNATE_HOP_LIMIT, req_msg, 1475 alt_path->hop_limit); 1476 IBA_SET(CM_REQ_ALTERNATE_SL, req_msg, alt_path->sl); 1477 IBA_SET(CM_REQ_ALTERNATE_SUBNET_LOCAL, req_msg, 1478 (alt_path->hop_limit <= 1)); 1479 IBA_SET(CM_REQ_ALTERNATE_LOCAL_ACK_TIMEOUT, req_msg, 1480 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay, 1481 alt_path->packet_life_time)); 1482 } 1483 IBA_SET(CM_REQ_VENDOR_ID, req_msg, param->ece.vendor_id); 1484 1485 if (param->private_data && param->private_data_len) 1486 IBA_SET_MEM(CM_REQ_PRIVATE_DATA, req_msg, param->private_data, 1487 param->private_data_len); 1488 } 1489 1490 static int cm_validate_req_param(struct ib_cm_req_param *param) 1491 { 1492 if (!param->primary_path) 1493 return -EINVAL; 1494 1495 if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC && 1496 param->qp_type != IB_QPT_XRC_INI) 1497 return -EINVAL; 1498 1499 if (param->private_data && 1500 param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE) 1501 return -EINVAL; 1502 1503 if (param->alternate_path && 1504 (param->alternate_path->pkey != param->primary_path->pkey || 1505 param->alternate_path->mtu != param->primary_path->mtu)) 1506 return -EINVAL; 1507 1508 return 0; 1509 } 1510 1511 int ib_send_cm_req(struct ib_cm_id *cm_id, 1512 struct ib_cm_req_param *param) 1513 { 1514 struct cm_av av = {}, alt_av = {}; 1515 struct cm_id_private *cm_id_priv; 1516 struct ib_mad_send_buf *msg; 1517 struct cm_req_msg *req_msg; 1518 unsigned long flags; 1519 int ret; 1520 1521 ret = cm_validate_req_param(param); 1522 if (ret) 1523 return ret; 1524 1525 /* Verify that we're not in timewait. */ 1526 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 1527 spin_lock_irqsave(&cm_id_priv->lock, flags); 1528 if (cm_id->state != IB_CM_IDLE || WARN_ON(cm_id_priv->timewait_info)) { 1529 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1530 return -EINVAL; 1531 } 1532 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1533 1534 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv-> 1535 id.local_id); 1536 if (IS_ERR(cm_id_priv->timewait_info)) { 1537 ret = PTR_ERR(cm_id_priv->timewait_info); 1538 cm_id_priv->timewait_info = NULL; 1539 return ret; 1540 } 1541 1542 ret = cm_init_av_by_path(param->primary_path, 1543 param->ppath_sgid_attr, &av); 1544 if (ret) 1545 return ret; 1546 if (param->alternate_path) { 1547 ret = cm_init_av_by_path(param->alternate_path, NULL, 1548 &alt_av); 1549 if (ret) { 1550 cm_destroy_av(&av); 1551 return ret; 1552 } 1553 } 1554 cm_id->service_id = param->service_id; 1555 cm_id_priv->timeout_ms = cm_convert_to_ms( 1556 param->primary_path->packet_life_time) * 2 + 1557 cm_convert_to_ms( 1558 param->remote_cm_response_timeout); 1559 cm_id_priv->max_cm_retries = param->max_cm_retries; 1560 cm_id_priv->initiator_depth = param->initiator_depth; 1561 cm_id_priv->responder_resources = param->responder_resources; 1562 cm_id_priv->retry_count = param->retry_count; 1563 cm_id_priv->path_mtu = param->primary_path->mtu; 1564 cm_id_priv->pkey = param->primary_path->pkey; 1565 cm_id_priv->qp_type = param->qp_type; 1566 1567 spin_lock_irqsave(&cm_id_priv->lock, flags); 1568 1569 cm_move_av_from_path(&cm_id_priv->av, &av); 1570 if (param->primary_path_outbound) 1571 cm_id_priv->av.dlid_datapath = 1572 be16_to_cpu(param->primary_path_outbound->ib.dlid); 1573 1574 if (param->alternate_path) 1575 cm_move_av_from_path(&cm_id_priv->alt_av, &alt_av); 1576 1577 msg = cm_alloc_priv_msg(cm_id_priv, IB_CM_REQ_SENT); 1578 if (IS_ERR(msg)) { 1579 ret = PTR_ERR(msg); 1580 goto out_unlock; 1581 } 1582 1583 req_msg = (struct cm_req_msg *)msg->mad; 1584 cm_format_req(req_msg, cm_id_priv, param); 1585 cm_id_priv->tid = req_msg->hdr.tid; 1586 1587 cm_id_priv->local_qpn = cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg)); 1588 cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg)); 1589 1590 trace_icm_send_req(&cm_id_priv->id); 1591 ret = ib_post_send_mad(msg, NULL); 1592 if (ret) 1593 goto out_free; 1594 BUG_ON(cm_id->state != IB_CM_IDLE); 1595 cm_id->state = IB_CM_REQ_SENT; 1596 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1597 return 0; 1598 out_free: 1599 cm_free_priv_msg(msg); 1600 out_unlock: 1601 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1602 return ret; 1603 } 1604 EXPORT_SYMBOL(ib_send_cm_req); 1605 1606 static int cm_issue_rej(struct cm_port *port, 1607 struct ib_mad_recv_wc *mad_recv_wc, 1608 enum ib_cm_rej_reason reason, 1609 enum cm_msg_response msg_rejected, 1610 void *ari, u8 ari_length) 1611 { 1612 struct ib_mad_send_buf *msg = NULL; 1613 struct cm_rej_msg *rej_msg, *rcv_msg; 1614 int ret; 1615 1616 ret = cm_alloc_response_msg(port, mad_recv_wc, false, &msg); 1617 if (ret) 1618 return ret; 1619 1620 /* We just need common CM header information. Cast to any message. */ 1621 rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad; 1622 rej_msg = (struct cm_rej_msg *) msg->mad; 1623 1624 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid); 1625 IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg, 1626 IBA_GET(CM_REJ_LOCAL_COMM_ID, rcv_msg)); 1627 IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg, 1628 IBA_GET(CM_REJ_REMOTE_COMM_ID, rcv_msg)); 1629 IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, msg_rejected); 1630 IBA_SET(CM_REJ_REASON, rej_msg, reason); 1631 1632 if (ari && ari_length) { 1633 IBA_SET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg, ari_length); 1634 IBA_SET_MEM(CM_REJ_ARI, rej_msg, ari, ari_length); 1635 } 1636 1637 trace_icm_issue_rej( 1638 IBA_GET(CM_REJ_LOCAL_COMM_ID, rcv_msg), 1639 IBA_GET(CM_REJ_REMOTE_COMM_ID, rcv_msg)); 1640 ret = ib_post_send_mad(msg, NULL); 1641 if (ret) 1642 cm_free_msg(msg); 1643 1644 return ret; 1645 } 1646 1647 static bool cm_req_has_alt_path(struct cm_req_msg *req_msg) 1648 { 1649 return ((cpu_to_be16( 1650 IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg))) || 1651 (ib_is_opa_gid(IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID, 1652 req_msg)))); 1653 } 1654 1655 static void cm_path_set_rec_type(struct ib_device *ib_device, u32 port_num, 1656 struct sa_path_rec *path, union ib_gid *gid) 1657 { 1658 if (ib_is_opa_gid(gid) && rdma_cap_opa_ah(ib_device, port_num)) 1659 path->rec_type = SA_PATH_REC_TYPE_OPA; 1660 else 1661 path->rec_type = SA_PATH_REC_TYPE_IB; 1662 } 1663 1664 static void cm_format_path_lid_from_req(struct cm_req_msg *req_msg, 1665 struct sa_path_rec *primary_path, 1666 struct sa_path_rec *alt_path, 1667 struct ib_wc *wc) 1668 { 1669 u32 lid; 1670 1671 if (primary_path->rec_type != SA_PATH_REC_TYPE_OPA) { 1672 sa_path_set_dlid(primary_path, wc->slid); 1673 sa_path_set_slid(primary_path, 1674 IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID, 1675 req_msg)); 1676 } else { 1677 lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR( 1678 CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg)); 1679 sa_path_set_dlid(primary_path, lid); 1680 1681 lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR( 1682 CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg)); 1683 sa_path_set_slid(primary_path, lid); 1684 } 1685 1686 if (!cm_req_has_alt_path(req_msg)) 1687 return; 1688 1689 if (alt_path->rec_type != SA_PATH_REC_TYPE_OPA) { 1690 sa_path_set_dlid(alt_path, 1691 IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, 1692 req_msg)); 1693 sa_path_set_slid(alt_path, 1694 IBA_GET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, 1695 req_msg)); 1696 } else { 1697 lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR( 1698 CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg)); 1699 sa_path_set_dlid(alt_path, lid); 1700 1701 lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR( 1702 CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg)); 1703 sa_path_set_slid(alt_path, lid); 1704 } 1705 } 1706 1707 static void cm_format_paths_from_req(struct cm_req_msg *req_msg, 1708 struct sa_path_rec *primary_path, 1709 struct sa_path_rec *alt_path, 1710 struct ib_wc *wc) 1711 { 1712 primary_path->dgid = 1713 *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg); 1714 primary_path->sgid = 1715 *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg); 1716 primary_path->flow_label = 1717 cpu_to_be32(IBA_GET(CM_REQ_PRIMARY_FLOW_LABEL, req_msg)); 1718 primary_path->hop_limit = IBA_GET(CM_REQ_PRIMARY_HOP_LIMIT, req_msg); 1719 primary_path->traffic_class = 1720 IBA_GET(CM_REQ_PRIMARY_TRAFFIC_CLASS, req_msg); 1721 primary_path->reversible = 1; 1722 primary_path->pkey = 1723 cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg)); 1724 primary_path->sl = IBA_GET(CM_REQ_PRIMARY_SL, req_msg); 1725 primary_path->mtu_selector = IB_SA_EQ; 1726 primary_path->mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg); 1727 primary_path->rate_selector = IB_SA_EQ; 1728 primary_path->rate = IBA_GET(CM_REQ_PRIMARY_PACKET_RATE, req_msg); 1729 primary_path->packet_life_time_selector = IB_SA_EQ; 1730 primary_path->packet_life_time = 1731 IBA_GET(CM_REQ_PRIMARY_LOCAL_ACK_TIMEOUT, req_msg); 1732 primary_path->packet_life_time -= (primary_path->packet_life_time > 0); 1733 primary_path->service_id = 1734 cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg)); 1735 if (sa_path_is_roce(primary_path)) 1736 primary_path->roce.route_resolved = false; 1737 1738 if (cm_req_has_alt_path(req_msg)) { 1739 alt_path->dgid = *IBA_GET_MEM_PTR( 1740 CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg); 1741 alt_path->sgid = *IBA_GET_MEM_PTR( 1742 CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg); 1743 alt_path->flow_label = cpu_to_be32( 1744 IBA_GET(CM_REQ_ALTERNATE_FLOW_LABEL, req_msg)); 1745 alt_path->hop_limit = 1746 IBA_GET(CM_REQ_ALTERNATE_HOP_LIMIT, req_msg); 1747 alt_path->traffic_class = 1748 IBA_GET(CM_REQ_ALTERNATE_TRAFFIC_CLASS, req_msg); 1749 alt_path->reversible = 1; 1750 alt_path->pkey = 1751 cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg)); 1752 alt_path->sl = IBA_GET(CM_REQ_ALTERNATE_SL, req_msg); 1753 alt_path->mtu_selector = IB_SA_EQ; 1754 alt_path->mtu = 1755 IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg); 1756 alt_path->rate_selector = IB_SA_EQ; 1757 alt_path->rate = IBA_GET(CM_REQ_ALTERNATE_PACKET_RATE, req_msg); 1758 alt_path->packet_life_time_selector = IB_SA_EQ; 1759 alt_path->packet_life_time = 1760 IBA_GET(CM_REQ_ALTERNATE_LOCAL_ACK_TIMEOUT, req_msg); 1761 alt_path->packet_life_time -= (alt_path->packet_life_time > 0); 1762 alt_path->service_id = 1763 cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg)); 1764 1765 if (sa_path_is_roce(alt_path)) 1766 alt_path->roce.route_resolved = false; 1767 } 1768 cm_format_path_lid_from_req(req_msg, primary_path, alt_path, wc); 1769 } 1770 1771 static u16 cm_get_bth_pkey(struct cm_work *work) 1772 { 1773 struct ib_device *ib_dev = work->port->cm_dev->ib_device; 1774 u32 port_num = work->port->port_num; 1775 u16 pkey_index = work->mad_recv_wc->wc->pkey_index; 1776 u16 pkey; 1777 int ret; 1778 1779 ret = ib_get_cached_pkey(ib_dev, port_num, pkey_index, &pkey); 1780 if (ret) { 1781 dev_warn_ratelimited(&ib_dev->dev, "ib_cm: Couldn't retrieve pkey for incoming request (port %u, pkey index %u). %d\n", 1782 port_num, pkey_index, ret); 1783 return 0; 1784 } 1785 1786 return pkey; 1787 } 1788 1789 /** 1790 * cm_opa_to_ib_sgid - Convert OPA SGID to IB SGID 1791 * ULPs (such as IPoIB) do not understand OPA GIDs and will 1792 * reject them as the local_gid will not match the sgid. Therefore, 1793 * change the pathrec's SGID to an IB SGID. 1794 * 1795 * @work: Work completion 1796 * @path: Path record 1797 */ 1798 static void cm_opa_to_ib_sgid(struct cm_work *work, 1799 struct sa_path_rec *path) 1800 { 1801 struct ib_device *dev = work->port->cm_dev->ib_device; 1802 u32 port_num = work->port->port_num; 1803 1804 if (rdma_cap_opa_ah(dev, port_num) && 1805 (ib_is_opa_gid(&path->sgid))) { 1806 union ib_gid sgid; 1807 1808 if (rdma_query_gid(dev, port_num, 0, &sgid)) { 1809 dev_warn(&dev->dev, 1810 "Error updating sgid in CM request\n"); 1811 return; 1812 } 1813 1814 path->sgid = sgid; 1815 } 1816 } 1817 1818 static void cm_format_req_event(struct cm_work *work, 1819 struct cm_id_private *cm_id_priv, 1820 struct ib_cm_id *listen_id) 1821 { 1822 struct cm_req_msg *req_msg; 1823 struct ib_cm_req_event_param *param; 1824 1825 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; 1826 param = &work->cm_event.param.req_rcvd; 1827 param->listen_id = listen_id; 1828 param->bth_pkey = cm_get_bth_pkey(work); 1829 param->port = cm_id_priv->av.port->port_num; 1830 param->primary_path = &work->path[0]; 1831 cm_opa_to_ib_sgid(work, param->primary_path); 1832 if (cm_req_has_alt_path(req_msg)) { 1833 param->alternate_path = &work->path[1]; 1834 cm_opa_to_ib_sgid(work, param->alternate_path); 1835 } else { 1836 param->alternate_path = NULL; 1837 } 1838 param->remote_ca_guid = 1839 cpu_to_be64(IBA_GET(CM_REQ_LOCAL_CA_GUID, req_msg)); 1840 param->remote_qkey = IBA_GET(CM_REQ_LOCAL_Q_KEY, req_msg); 1841 param->remote_qpn = IBA_GET(CM_REQ_LOCAL_QPN, req_msg); 1842 param->qp_type = cm_req_get_qp_type(req_msg); 1843 param->starting_psn = IBA_GET(CM_REQ_STARTING_PSN, req_msg); 1844 param->responder_resources = IBA_GET(CM_REQ_INITIATOR_DEPTH, req_msg); 1845 param->initiator_depth = IBA_GET(CM_REQ_RESPONDER_RESOURCES, req_msg); 1846 param->local_cm_response_timeout = 1847 IBA_GET(CM_REQ_REMOTE_CM_RESPONSE_TIMEOUT, req_msg); 1848 param->flow_control = IBA_GET(CM_REQ_END_TO_END_FLOW_CONTROL, req_msg); 1849 param->remote_cm_response_timeout = 1850 IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg); 1851 param->retry_count = IBA_GET(CM_REQ_RETRY_COUNT, req_msg); 1852 param->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg); 1853 param->srq = IBA_GET(CM_REQ_SRQ, req_msg); 1854 param->ppath_sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr; 1855 param->ece.vendor_id = IBA_GET(CM_REQ_VENDOR_ID, req_msg); 1856 param->ece.attr_mod = be32_to_cpu(req_msg->hdr.attr_mod); 1857 1858 work->cm_event.private_data = 1859 IBA_GET_MEM_PTR(CM_REQ_PRIVATE_DATA, req_msg); 1860 } 1861 1862 static void cm_process_work(struct cm_id_private *cm_id_priv, 1863 struct cm_work *work) 1864 { 1865 int ret; 1866 1867 /* We will typically only have the current event to report. */ 1868 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event); 1869 cm_free_work(work); 1870 1871 while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) { 1872 spin_lock_irq(&cm_id_priv->lock); 1873 work = cm_dequeue_work(cm_id_priv); 1874 spin_unlock_irq(&cm_id_priv->lock); 1875 if (!work) 1876 return; 1877 1878 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, 1879 &work->cm_event); 1880 cm_free_work(work); 1881 } 1882 cm_deref_id(cm_id_priv); 1883 if (ret) 1884 cm_destroy_id(&cm_id_priv->id, ret); 1885 } 1886 1887 static void cm_format_mra(struct cm_mra_msg *mra_msg, 1888 struct cm_id_private *cm_id_priv, 1889 enum cm_msg_response msg_mraed, 1890 const void *private_data, u8 private_data_len) 1891 { 1892 cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid); 1893 IBA_SET(CM_MRA_MESSAGE_MRAED, mra_msg, msg_mraed); 1894 IBA_SET(CM_MRA_LOCAL_COMM_ID, mra_msg, 1895 be32_to_cpu(cm_id_priv->id.local_id)); 1896 IBA_SET(CM_MRA_REMOTE_COMM_ID, mra_msg, 1897 be32_to_cpu(cm_id_priv->id.remote_id)); 1898 IBA_SET(CM_MRA_SERVICE_TIMEOUT, mra_msg, CM_MRA_SETTING); 1899 1900 if (private_data && private_data_len) 1901 IBA_SET_MEM(CM_MRA_PRIVATE_DATA, mra_msg, private_data, 1902 private_data_len); 1903 } 1904 1905 static void cm_format_rej(struct cm_rej_msg *rej_msg, 1906 struct cm_id_private *cm_id_priv, 1907 enum ib_cm_rej_reason reason, void *ari, 1908 u8 ari_length, const void *private_data, 1909 u8 private_data_len, enum ib_cm_state state) 1910 { 1911 lockdep_assert_held(&cm_id_priv->lock); 1912 1913 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid); 1914 IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg, 1915 be32_to_cpu(cm_id_priv->id.remote_id)); 1916 1917 switch (state) { 1918 case IB_CM_REQ_RCVD: 1919 IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg, be32_to_cpu(0)); 1920 IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ); 1921 break; 1922 case IB_CM_MRA_REQ_SENT: 1923 IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg, 1924 be32_to_cpu(cm_id_priv->id.local_id)); 1925 IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ); 1926 break; 1927 case IB_CM_REP_RCVD: 1928 case IB_CM_MRA_REP_SENT: 1929 IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg, 1930 be32_to_cpu(cm_id_priv->id.local_id)); 1931 IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REP); 1932 break; 1933 default: 1934 IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg, 1935 be32_to_cpu(cm_id_priv->id.local_id)); 1936 IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, 1937 CM_MSG_RESPONSE_OTHER); 1938 break; 1939 } 1940 1941 IBA_SET(CM_REJ_REASON, rej_msg, reason); 1942 if (ari && ari_length) { 1943 IBA_SET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg, ari_length); 1944 IBA_SET_MEM(CM_REJ_ARI, rej_msg, ari, ari_length); 1945 } 1946 1947 if (private_data && private_data_len) 1948 IBA_SET_MEM(CM_REJ_PRIVATE_DATA, rej_msg, private_data, 1949 private_data_len); 1950 } 1951 1952 static void cm_dup_req_handler(struct cm_work *work, 1953 struct cm_id_private *cm_id_priv) 1954 { 1955 struct ib_mad_send_buf *msg = NULL; 1956 int ret; 1957 1958 atomic_long_inc( 1959 &work->port->counters[CM_RECV_DUPLICATES][CM_REQ_COUNTER]); 1960 1961 /* Quick state check to discard duplicate REQs. */ 1962 spin_lock_irq(&cm_id_priv->lock); 1963 if (cm_id_priv->id.state == IB_CM_REQ_RCVD) { 1964 spin_unlock_irq(&cm_id_priv->lock); 1965 return; 1966 } 1967 spin_unlock_irq(&cm_id_priv->lock); 1968 1969 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, true, &msg); 1970 if (ret) 1971 return; 1972 1973 spin_lock_irq(&cm_id_priv->lock); 1974 switch (cm_id_priv->id.state) { 1975 case IB_CM_MRA_REQ_SENT: 1976 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 1977 CM_MSG_RESPONSE_REQ, 1978 cm_id_priv->private_data, 1979 cm_id_priv->private_data_len); 1980 break; 1981 case IB_CM_TIMEWAIT: 1982 cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, 1983 IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0, 1984 IB_CM_TIMEWAIT); 1985 break; 1986 default: 1987 goto unlock; 1988 } 1989 spin_unlock_irq(&cm_id_priv->lock); 1990 1991 trace_icm_send_dup_req(&cm_id_priv->id); 1992 ret = ib_post_send_mad(msg, NULL); 1993 if (ret) 1994 goto free; 1995 return; 1996 1997 unlock: spin_unlock_irq(&cm_id_priv->lock); 1998 free: cm_free_msg(msg); 1999 } 2000 2001 static struct cm_id_private *cm_match_req(struct cm_work *work, 2002 struct cm_id_private *cm_id_priv) 2003 { 2004 struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv; 2005 struct cm_timewait_info *timewait_info; 2006 struct cm_req_msg *req_msg; 2007 2008 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; 2009 2010 /* Check for possible duplicate REQ. */ 2011 spin_lock_irq(&cm.lock); 2012 timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info); 2013 if (timewait_info) { 2014 cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id, 2015 timewait_info->work.remote_id); 2016 spin_unlock_irq(&cm.lock); 2017 if (cur_cm_id_priv) { 2018 cm_dup_req_handler(work, cur_cm_id_priv); 2019 cm_deref_id(cur_cm_id_priv); 2020 } 2021 return NULL; 2022 } 2023 2024 /* Check for stale connections. */ 2025 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info); 2026 if (timewait_info) { 2027 cm_remove_remote(cm_id_priv); 2028 cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id, 2029 timewait_info->work.remote_id); 2030 2031 spin_unlock_irq(&cm.lock); 2032 cm_issue_rej(work->port, work->mad_recv_wc, 2033 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ, 2034 NULL, 0); 2035 if (cur_cm_id_priv) { 2036 ib_send_cm_dreq(&cur_cm_id_priv->id, NULL, 0); 2037 cm_deref_id(cur_cm_id_priv); 2038 } 2039 return NULL; 2040 } 2041 2042 /* Find matching listen request. */ 2043 listen_cm_id_priv = cm_find_listen( 2044 cm_id_priv->id.device, 2045 cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg))); 2046 if (!listen_cm_id_priv) { 2047 cm_remove_remote(cm_id_priv); 2048 spin_unlock_irq(&cm.lock); 2049 cm_issue_rej(work->port, work->mad_recv_wc, 2050 IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ, 2051 NULL, 0); 2052 return NULL; 2053 } 2054 spin_unlock_irq(&cm.lock); 2055 return listen_cm_id_priv; 2056 } 2057 2058 /* 2059 * Work-around for inter-subnet connections. If the LIDs are permissive, 2060 * we need to override the LID/SL data in the REQ with the LID information 2061 * in the work completion. 2062 */ 2063 static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc) 2064 { 2065 if (!IBA_GET(CM_REQ_PRIMARY_SUBNET_LOCAL, req_msg)) { 2066 if (cpu_to_be16(IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID, 2067 req_msg)) == IB_LID_PERMISSIVE) { 2068 IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg, 2069 be16_to_cpu(ib_lid_be16(wc->slid))); 2070 IBA_SET(CM_REQ_PRIMARY_SL, req_msg, wc->sl); 2071 } 2072 2073 if (cpu_to_be16(IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID, 2074 req_msg)) == IB_LID_PERMISSIVE) 2075 IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg, 2076 wc->dlid_path_bits); 2077 } 2078 2079 if (!IBA_GET(CM_REQ_ALTERNATE_SUBNET_LOCAL, req_msg)) { 2080 if (cpu_to_be16(IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, 2081 req_msg)) == IB_LID_PERMISSIVE) { 2082 IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg, 2083 be16_to_cpu(ib_lid_be16(wc->slid))); 2084 IBA_SET(CM_REQ_ALTERNATE_SL, req_msg, wc->sl); 2085 } 2086 2087 if (cpu_to_be16(IBA_GET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, 2088 req_msg)) == IB_LID_PERMISSIVE) 2089 IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg, 2090 wc->dlid_path_bits); 2091 } 2092 } 2093 2094 static int cm_req_handler(struct cm_work *work) 2095 { 2096 struct cm_id_private *cm_id_priv, *listen_cm_id_priv; 2097 struct cm_req_msg *req_msg; 2098 const struct ib_global_route *grh; 2099 const struct ib_gid_attr *gid_attr; 2100 int ret; 2101 2102 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; 2103 2104 cm_id_priv = 2105 cm_alloc_id_priv(work->port->cm_dev->ib_device, NULL, NULL); 2106 if (IS_ERR(cm_id_priv)) 2107 return PTR_ERR(cm_id_priv); 2108 2109 cm_id_priv->id.remote_id = 2110 cpu_to_be32(IBA_GET(CM_REQ_LOCAL_COMM_ID, req_msg)); 2111 cm_id_priv->id.service_id = 2112 cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg)); 2113 cm_id_priv->tid = req_msg->hdr.tid; 2114 cm_id_priv->timeout_ms = cm_convert_to_ms( 2115 IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg)); 2116 cm_id_priv->max_cm_retries = IBA_GET(CM_REQ_MAX_CM_RETRIES, req_msg); 2117 cm_id_priv->remote_qpn = 2118 cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg)); 2119 cm_id_priv->initiator_depth = 2120 IBA_GET(CM_REQ_RESPONDER_RESOURCES, req_msg); 2121 cm_id_priv->responder_resources = 2122 IBA_GET(CM_REQ_INITIATOR_DEPTH, req_msg); 2123 cm_id_priv->path_mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg); 2124 cm_id_priv->pkey = cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg)); 2125 cm_id_priv->sq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg)); 2126 cm_id_priv->retry_count = IBA_GET(CM_REQ_RETRY_COUNT, req_msg); 2127 cm_id_priv->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg); 2128 cm_id_priv->qp_type = cm_req_get_qp_type(req_msg); 2129 2130 ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc, 2131 work->mad_recv_wc->recv_buf.grh, 2132 &cm_id_priv->av); 2133 if (ret) 2134 goto destroy; 2135 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv-> 2136 id.local_id); 2137 if (IS_ERR(cm_id_priv->timewait_info)) { 2138 ret = PTR_ERR(cm_id_priv->timewait_info); 2139 cm_id_priv->timewait_info = NULL; 2140 goto destroy; 2141 } 2142 cm_id_priv->timewait_info->work.remote_id = cm_id_priv->id.remote_id; 2143 cm_id_priv->timewait_info->remote_ca_guid = 2144 cpu_to_be64(IBA_GET(CM_REQ_LOCAL_CA_GUID, req_msg)); 2145 cm_id_priv->timewait_info->remote_qpn = cm_id_priv->remote_qpn; 2146 2147 /* 2148 * Note that the ID pointer is not in the xarray at this point, 2149 * so this set is only visible to the local thread. 2150 */ 2151 cm_id_priv->id.state = IB_CM_REQ_RCVD; 2152 2153 listen_cm_id_priv = cm_match_req(work, cm_id_priv); 2154 if (!listen_cm_id_priv) { 2155 trace_icm_no_listener_err(&cm_id_priv->id); 2156 cm_id_priv->id.state = IB_CM_IDLE; 2157 ret = -EINVAL; 2158 goto destroy; 2159 } 2160 2161 memset(&work->path[0], 0, sizeof(work->path[0])); 2162 if (cm_req_has_alt_path(req_msg)) 2163 memset(&work->path[1], 0, sizeof(work->path[1])); 2164 grh = rdma_ah_read_grh(&cm_id_priv->av.ah_attr); 2165 gid_attr = grh->sgid_attr; 2166 2167 if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE) { 2168 work->path[0].rec_type = 2169 sa_conv_gid_to_pathrec_type(gid_attr->gid_type); 2170 } else { 2171 cm_process_routed_req(req_msg, work->mad_recv_wc->wc); 2172 cm_path_set_rec_type( 2173 work->port->cm_dev->ib_device, work->port->port_num, 2174 &work->path[0], 2175 IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, 2176 req_msg)); 2177 } 2178 if (cm_req_has_alt_path(req_msg)) 2179 work->path[1].rec_type = work->path[0].rec_type; 2180 cm_format_paths_from_req(req_msg, &work->path[0], 2181 &work->path[1], work->mad_recv_wc->wc); 2182 if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE) 2183 sa_path_set_dmac(&work->path[0], 2184 cm_id_priv->av.ah_attr.roce.dmac); 2185 work->path[0].hop_limit = grh->hop_limit; 2186 2187 /* This destroy call is needed to pair with cm_init_av_for_response */ 2188 cm_destroy_av(&cm_id_priv->av); 2189 ret = cm_init_av_by_path(&work->path[0], gid_attr, &cm_id_priv->av); 2190 if (ret) { 2191 int err; 2192 2193 err = rdma_query_gid(work->port->cm_dev->ib_device, 2194 work->port->port_num, 0, 2195 &work->path[0].sgid); 2196 if (err) 2197 ib_send_cm_rej(&cm_id_priv->id, IB_CM_REJ_INVALID_GID, 2198 NULL, 0, NULL, 0); 2199 else 2200 ib_send_cm_rej(&cm_id_priv->id, IB_CM_REJ_INVALID_GID, 2201 &work->path[0].sgid, 2202 sizeof(work->path[0].sgid), 2203 NULL, 0); 2204 goto rejected; 2205 } 2206 if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_IB) 2207 cm_id_priv->av.dlid_datapath = 2208 IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg); 2209 2210 if (cm_req_has_alt_path(req_msg)) { 2211 ret = cm_init_av_by_path(&work->path[1], NULL, 2212 &cm_id_priv->alt_av); 2213 if (ret) { 2214 ib_send_cm_rej(&cm_id_priv->id, 2215 IB_CM_REJ_INVALID_ALT_GID, 2216 &work->path[0].sgid, 2217 sizeof(work->path[0].sgid), NULL, 0); 2218 goto rejected; 2219 } 2220 } 2221 2222 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler; 2223 cm_id_priv->id.context = listen_cm_id_priv->id.context; 2224 cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id); 2225 2226 /* Now MAD handlers can see the new ID */ 2227 spin_lock_irq(&cm_id_priv->lock); 2228 cm_finalize_id(cm_id_priv); 2229 2230 /* Refcount belongs to the event, pairs with cm_process_work() */ 2231 refcount_inc(&cm_id_priv->refcount); 2232 cm_queue_work_unlock(cm_id_priv, work); 2233 /* 2234 * Since this ID was just created and was not made visible to other MAD 2235 * handlers until the cm_finalize_id() above we know that the 2236 * cm_process_work() will deliver the event and the listen_cm_id 2237 * embedded in the event can be derefed here. 2238 */ 2239 cm_deref_id(listen_cm_id_priv); 2240 return 0; 2241 2242 rejected: 2243 cm_deref_id(listen_cm_id_priv); 2244 destroy: 2245 ib_destroy_cm_id(&cm_id_priv->id); 2246 return ret; 2247 } 2248 2249 static void cm_format_rep(struct cm_rep_msg *rep_msg, 2250 struct cm_id_private *cm_id_priv, 2251 struct ib_cm_rep_param *param) 2252 { 2253 cm_format_mad_ece_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid, 2254 param->ece.attr_mod); 2255 IBA_SET(CM_REP_LOCAL_COMM_ID, rep_msg, 2256 be32_to_cpu(cm_id_priv->id.local_id)); 2257 IBA_SET(CM_REP_REMOTE_COMM_ID, rep_msg, 2258 be32_to_cpu(cm_id_priv->id.remote_id)); 2259 IBA_SET(CM_REP_STARTING_PSN, rep_msg, param->starting_psn); 2260 IBA_SET(CM_REP_RESPONDER_RESOURCES, rep_msg, 2261 param->responder_resources); 2262 IBA_SET(CM_REP_TARGET_ACK_DELAY, rep_msg, 2263 cm_id_priv->av.port->cm_dev->ack_delay); 2264 IBA_SET(CM_REP_FAILOVER_ACCEPTED, rep_msg, param->failover_accepted); 2265 IBA_SET(CM_REP_RNR_RETRY_COUNT, rep_msg, param->rnr_retry_count); 2266 IBA_SET(CM_REP_LOCAL_CA_GUID, rep_msg, 2267 be64_to_cpu(cm_id_priv->id.device->node_guid)); 2268 2269 if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) { 2270 IBA_SET(CM_REP_INITIATOR_DEPTH, rep_msg, 2271 param->initiator_depth); 2272 IBA_SET(CM_REP_END_TO_END_FLOW_CONTROL, rep_msg, 2273 param->flow_control); 2274 IBA_SET(CM_REP_SRQ, rep_msg, param->srq); 2275 IBA_SET(CM_REP_LOCAL_QPN, rep_msg, param->qp_num); 2276 } else { 2277 IBA_SET(CM_REP_SRQ, rep_msg, 1); 2278 IBA_SET(CM_REP_LOCAL_EE_CONTEXT_NUMBER, rep_msg, param->qp_num); 2279 } 2280 2281 IBA_SET(CM_REP_VENDOR_ID_L, rep_msg, param->ece.vendor_id); 2282 IBA_SET(CM_REP_VENDOR_ID_M, rep_msg, param->ece.vendor_id >> 8); 2283 IBA_SET(CM_REP_VENDOR_ID_H, rep_msg, param->ece.vendor_id >> 16); 2284 2285 if (param->private_data && param->private_data_len) 2286 IBA_SET_MEM(CM_REP_PRIVATE_DATA, rep_msg, param->private_data, 2287 param->private_data_len); 2288 } 2289 2290 int ib_send_cm_rep(struct ib_cm_id *cm_id, 2291 struct ib_cm_rep_param *param) 2292 { 2293 struct cm_id_private *cm_id_priv; 2294 struct ib_mad_send_buf *msg; 2295 struct cm_rep_msg *rep_msg; 2296 unsigned long flags; 2297 int ret; 2298 2299 if (param->private_data && 2300 param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE) 2301 return -EINVAL; 2302 2303 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2304 spin_lock_irqsave(&cm_id_priv->lock, flags); 2305 if (cm_id->state != IB_CM_REQ_RCVD && 2306 cm_id->state != IB_CM_MRA_REQ_SENT) { 2307 trace_icm_send_rep_err(cm_id_priv->id.local_id, cm_id->state); 2308 ret = -EINVAL; 2309 goto out; 2310 } 2311 2312 msg = cm_alloc_priv_msg_rep(cm_id_priv, IB_CM_REP_SENT, true); 2313 if (IS_ERR(msg)) { 2314 ret = PTR_ERR(msg); 2315 goto out; 2316 } 2317 2318 rep_msg = (struct cm_rep_msg *) msg->mad; 2319 cm_format_rep(rep_msg, cm_id_priv, param); 2320 2321 trace_icm_send_rep(cm_id); 2322 ret = ib_post_send_mad(msg, NULL); 2323 if (ret) 2324 goto out_free; 2325 2326 cm_id->state = IB_CM_REP_SENT; 2327 cm_id_priv->initiator_depth = param->initiator_depth; 2328 cm_id_priv->responder_resources = param->responder_resources; 2329 cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REP_STARTING_PSN, rep_msg)); 2330 WARN_ONCE(param->qp_num & 0xFF000000, 2331 "IBTA declares QPN to be 24 bits, but it is 0x%X\n", 2332 param->qp_num); 2333 cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF); 2334 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2335 return 0; 2336 2337 out_free: 2338 cm_free_priv_msg(msg); 2339 out: 2340 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2341 return ret; 2342 } 2343 EXPORT_SYMBOL(ib_send_cm_rep); 2344 2345 static void cm_format_rtu(struct cm_rtu_msg *rtu_msg, 2346 struct cm_id_private *cm_id_priv, 2347 const void *private_data, 2348 u8 private_data_len) 2349 { 2350 cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid); 2351 IBA_SET(CM_RTU_LOCAL_COMM_ID, rtu_msg, 2352 be32_to_cpu(cm_id_priv->id.local_id)); 2353 IBA_SET(CM_RTU_REMOTE_COMM_ID, rtu_msg, 2354 be32_to_cpu(cm_id_priv->id.remote_id)); 2355 2356 if (private_data && private_data_len) 2357 IBA_SET_MEM(CM_RTU_PRIVATE_DATA, rtu_msg, private_data, 2358 private_data_len); 2359 } 2360 2361 int ib_send_cm_rtu(struct ib_cm_id *cm_id, 2362 const void *private_data, 2363 u8 private_data_len) 2364 { 2365 struct cm_id_private *cm_id_priv; 2366 struct ib_mad_send_buf *msg; 2367 unsigned long flags; 2368 void *data; 2369 int ret; 2370 2371 if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE) 2372 return -EINVAL; 2373 2374 data = cm_copy_private_data(private_data, private_data_len); 2375 if (IS_ERR(data)) 2376 return PTR_ERR(data); 2377 2378 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 2379 spin_lock_irqsave(&cm_id_priv->lock, flags); 2380 if (cm_id->state != IB_CM_REP_RCVD && 2381 cm_id->state != IB_CM_MRA_REP_SENT) { 2382 trace_icm_send_cm_rtu_err(cm_id); 2383 ret = -EINVAL; 2384 goto error; 2385 } 2386 2387 msg = cm_alloc_msg(cm_id_priv); 2388 if (IS_ERR(msg)) { 2389 ret = PTR_ERR(msg); 2390 goto error; 2391 } 2392 2393 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv, 2394 private_data, private_data_len); 2395 2396 trace_icm_send_rtu(cm_id); 2397 ret = ib_post_send_mad(msg, NULL); 2398 if (ret) { 2399 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2400 cm_free_msg(msg); 2401 kfree(data); 2402 return ret; 2403 } 2404 2405 cm_id->state = IB_CM_ESTABLISHED; 2406 cm_set_private_data(cm_id_priv, data, private_data_len); 2407 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2408 return 0; 2409 2410 error: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2411 kfree(data); 2412 return ret; 2413 } 2414 EXPORT_SYMBOL(ib_send_cm_rtu); 2415 2416 static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type) 2417 { 2418 struct cm_rep_msg *rep_msg; 2419 struct ib_cm_rep_event_param *param; 2420 2421 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad; 2422 param = &work->cm_event.param.rep_rcvd; 2423 param->remote_ca_guid = 2424 cpu_to_be64(IBA_GET(CM_REP_LOCAL_CA_GUID, rep_msg)); 2425 param->remote_qkey = IBA_GET(CM_REP_LOCAL_Q_KEY, rep_msg); 2426 param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type)); 2427 param->starting_psn = IBA_GET(CM_REP_STARTING_PSN, rep_msg); 2428 param->responder_resources = IBA_GET(CM_REP_INITIATOR_DEPTH, rep_msg); 2429 param->initiator_depth = IBA_GET(CM_REP_RESPONDER_RESOURCES, rep_msg); 2430 param->target_ack_delay = IBA_GET(CM_REP_TARGET_ACK_DELAY, rep_msg); 2431 param->failover_accepted = IBA_GET(CM_REP_FAILOVER_ACCEPTED, rep_msg); 2432 param->flow_control = IBA_GET(CM_REP_END_TO_END_FLOW_CONTROL, rep_msg); 2433 param->rnr_retry_count = IBA_GET(CM_REP_RNR_RETRY_COUNT, rep_msg); 2434 param->srq = IBA_GET(CM_REP_SRQ, rep_msg); 2435 param->ece.vendor_id = IBA_GET(CM_REP_VENDOR_ID_H, rep_msg) << 16; 2436 param->ece.vendor_id |= IBA_GET(CM_REP_VENDOR_ID_M, rep_msg) << 8; 2437 param->ece.vendor_id |= IBA_GET(CM_REP_VENDOR_ID_L, rep_msg); 2438 param->ece.attr_mod = be32_to_cpu(rep_msg->hdr.attr_mod); 2439 2440 work->cm_event.private_data = 2441 IBA_GET_MEM_PTR(CM_REP_PRIVATE_DATA, rep_msg); 2442 } 2443 2444 static void cm_dup_rep_handler(struct cm_work *work) 2445 { 2446 struct cm_id_private *cm_id_priv; 2447 struct cm_rep_msg *rep_msg; 2448 struct ib_mad_send_buf *msg = NULL; 2449 int ret; 2450 2451 rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad; 2452 cm_id_priv = cm_acquire_id( 2453 cpu_to_be32(IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)), 2454 cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg))); 2455 if (!cm_id_priv) 2456 return; 2457 2458 atomic_long_inc( 2459 &work->port->counters[CM_RECV_DUPLICATES][CM_REP_COUNTER]); 2460 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, true, &msg); 2461 if (ret) 2462 goto deref; 2463 2464 spin_lock_irq(&cm_id_priv->lock); 2465 if (cm_id_priv->id.state == IB_CM_ESTABLISHED) 2466 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv, 2467 cm_id_priv->private_data, 2468 cm_id_priv->private_data_len); 2469 else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT) 2470 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 2471 CM_MSG_RESPONSE_REP, 2472 cm_id_priv->private_data, 2473 cm_id_priv->private_data_len); 2474 else 2475 goto unlock; 2476 spin_unlock_irq(&cm_id_priv->lock); 2477 2478 trace_icm_send_dup_rep(&cm_id_priv->id); 2479 ret = ib_post_send_mad(msg, NULL); 2480 if (ret) 2481 goto free; 2482 goto deref; 2483 2484 unlock: spin_unlock_irq(&cm_id_priv->lock); 2485 free: cm_free_msg(msg); 2486 deref: cm_deref_id(cm_id_priv); 2487 } 2488 2489 static int cm_rep_handler(struct cm_work *work) 2490 { 2491 struct cm_id_private *cm_id_priv; 2492 struct cm_rep_msg *rep_msg; 2493 int ret; 2494 struct cm_id_private *cur_cm_id_priv; 2495 struct cm_timewait_info *timewait_info; 2496 2497 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad; 2498 cm_id_priv = cm_acquire_id( 2499 cpu_to_be32(IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)), 0); 2500 if (!cm_id_priv) { 2501 cm_dup_rep_handler(work); 2502 trace_icm_remote_no_priv_err( 2503 IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)); 2504 return -EINVAL; 2505 } 2506 2507 cm_format_rep_event(work, cm_id_priv->qp_type); 2508 2509 spin_lock_irq(&cm_id_priv->lock); 2510 switch (cm_id_priv->id.state) { 2511 case IB_CM_REQ_SENT: 2512 case IB_CM_MRA_REQ_RCVD: 2513 break; 2514 default: 2515 ret = -EINVAL; 2516 trace_icm_rep_unknown_err( 2517 IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg), 2518 IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg), 2519 cm_id_priv->id.state); 2520 spin_unlock_irq(&cm_id_priv->lock); 2521 goto error; 2522 } 2523 2524 cm_id_priv->timewait_info->work.remote_id = 2525 cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg)); 2526 cm_id_priv->timewait_info->remote_ca_guid = 2527 cpu_to_be64(IBA_GET(CM_REP_LOCAL_CA_GUID, rep_msg)); 2528 cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type); 2529 2530 spin_lock(&cm.lock); 2531 /* Check for duplicate REP. */ 2532 if (cm_insert_remote_id(cm_id_priv->timewait_info)) { 2533 spin_unlock(&cm.lock); 2534 spin_unlock_irq(&cm_id_priv->lock); 2535 ret = -EINVAL; 2536 trace_icm_insert_failed_err( 2537 IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)); 2538 goto error; 2539 } 2540 /* Check for a stale connection. */ 2541 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info); 2542 if (timewait_info) { 2543 cm_remove_remote(cm_id_priv); 2544 cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id, 2545 timewait_info->work.remote_id); 2546 2547 spin_unlock(&cm.lock); 2548 spin_unlock_irq(&cm_id_priv->lock); 2549 cm_issue_rej(work->port, work->mad_recv_wc, 2550 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP, 2551 NULL, 0); 2552 ret = -EINVAL; 2553 trace_icm_staleconn_err( 2554 IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg), 2555 IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)); 2556 2557 if (cur_cm_id_priv) { 2558 ib_send_cm_dreq(&cur_cm_id_priv->id, NULL, 0); 2559 cm_deref_id(cur_cm_id_priv); 2560 } 2561 2562 goto error; 2563 } 2564 spin_unlock(&cm.lock); 2565 2566 cm_id_priv->id.state = IB_CM_REP_RCVD; 2567 cm_id_priv->id.remote_id = 2568 cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg)); 2569 cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type); 2570 cm_id_priv->initiator_depth = 2571 IBA_GET(CM_REP_RESPONDER_RESOURCES, rep_msg); 2572 cm_id_priv->responder_resources = 2573 IBA_GET(CM_REP_INITIATOR_DEPTH, rep_msg); 2574 cm_id_priv->sq_psn = cpu_to_be32(IBA_GET(CM_REP_STARTING_PSN, rep_msg)); 2575 cm_id_priv->rnr_retry_count = IBA_GET(CM_REP_RNR_RETRY_COUNT, rep_msg); 2576 cm_id_priv->target_ack_delay = 2577 IBA_GET(CM_REP_TARGET_ACK_DELAY, rep_msg); 2578 cm_id_priv->av.timeout = 2579 cm_ack_timeout(cm_id_priv->target_ack_delay, 2580 cm_id_priv->av.timeout - 1); 2581 cm_id_priv->alt_av.timeout = 2582 cm_ack_timeout(cm_id_priv->target_ack_delay, 2583 cm_id_priv->alt_av.timeout - 1); 2584 2585 ib_cancel_mad(cm_id_priv->msg); 2586 cm_queue_work_unlock(cm_id_priv, work); 2587 return 0; 2588 2589 error: 2590 cm_deref_id(cm_id_priv); 2591 return ret; 2592 } 2593 2594 static int cm_establish_handler(struct cm_work *work) 2595 { 2596 struct cm_id_private *cm_id_priv; 2597 2598 /* See comment in cm_establish about lookup. */ 2599 cm_id_priv = cm_acquire_id(work->local_id, work->remote_id); 2600 if (!cm_id_priv) 2601 return -EINVAL; 2602 2603 spin_lock_irq(&cm_id_priv->lock); 2604 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) { 2605 spin_unlock_irq(&cm_id_priv->lock); 2606 goto out; 2607 } 2608 2609 ib_cancel_mad(cm_id_priv->msg); 2610 cm_queue_work_unlock(cm_id_priv, work); 2611 return 0; 2612 out: 2613 cm_deref_id(cm_id_priv); 2614 return -EINVAL; 2615 } 2616 2617 static int cm_rtu_handler(struct cm_work *work) 2618 { 2619 struct cm_id_private *cm_id_priv; 2620 struct cm_rtu_msg *rtu_msg; 2621 2622 rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad; 2623 cm_id_priv = cm_acquire_id( 2624 cpu_to_be32(IBA_GET(CM_RTU_REMOTE_COMM_ID, rtu_msg)), 2625 cpu_to_be32(IBA_GET(CM_RTU_LOCAL_COMM_ID, rtu_msg))); 2626 if (!cm_id_priv) 2627 return -EINVAL; 2628 2629 work->cm_event.private_data = 2630 IBA_GET_MEM_PTR(CM_RTU_PRIVATE_DATA, rtu_msg); 2631 2632 spin_lock_irq(&cm_id_priv->lock); 2633 if (cm_id_priv->id.state != IB_CM_REP_SENT && 2634 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) { 2635 spin_unlock_irq(&cm_id_priv->lock); 2636 atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES] 2637 [CM_RTU_COUNTER]); 2638 goto out; 2639 } 2640 cm_id_priv->id.state = IB_CM_ESTABLISHED; 2641 2642 ib_cancel_mad(cm_id_priv->msg); 2643 cm_queue_work_unlock(cm_id_priv, work); 2644 return 0; 2645 out: 2646 cm_deref_id(cm_id_priv); 2647 return -EINVAL; 2648 } 2649 2650 static void cm_format_dreq(struct cm_dreq_msg *dreq_msg, 2651 struct cm_id_private *cm_id_priv, 2652 const void *private_data, 2653 u8 private_data_len) 2654 { 2655 cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID, 2656 cm_form_tid(cm_id_priv)); 2657 IBA_SET(CM_DREQ_LOCAL_COMM_ID, dreq_msg, 2658 be32_to_cpu(cm_id_priv->id.local_id)); 2659 IBA_SET(CM_DREQ_REMOTE_COMM_ID, dreq_msg, 2660 be32_to_cpu(cm_id_priv->id.remote_id)); 2661 IBA_SET(CM_DREQ_REMOTE_QPN_EECN, dreq_msg, 2662 be32_to_cpu(cm_id_priv->remote_qpn)); 2663 2664 if (private_data && private_data_len) 2665 IBA_SET_MEM(CM_DREQ_PRIVATE_DATA, dreq_msg, private_data, 2666 private_data_len); 2667 } 2668 2669 static void cm_issue_dreq(struct cm_id_private *cm_id_priv) 2670 { 2671 struct ib_mad_send_buf *msg; 2672 int ret; 2673 2674 lockdep_assert_held(&cm_id_priv->lock); 2675 2676 msg = cm_alloc_msg(cm_id_priv); 2677 if (IS_ERR(msg)) 2678 return; 2679 2680 cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv, NULL, 0); 2681 2682 trace_icm_send_dreq(&cm_id_priv->id); 2683 ret = ib_post_send_mad(msg, NULL); 2684 if (ret) 2685 cm_free_msg(msg); 2686 } 2687 2688 int ib_send_cm_dreq(struct ib_cm_id *cm_id, const void *private_data, 2689 u8 private_data_len) 2690 { 2691 struct cm_id_private *cm_id_priv = 2692 container_of(cm_id, struct cm_id_private, id); 2693 struct ib_mad_send_buf *msg; 2694 unsigned long flags; 2695 int ret; 2696 2697 if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE) 2698 return -EINVAL; 2699 2700 spin_lock_irqsave(&cm_id_priv->lock, flags); 2701 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) { 2702 trace_icm_dreq_skipped(&cm_id_priv->id); 2703 ret = -EINVAL; 2704 goto unlock; 2705 } 2706 2707 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT || 2708 cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD) 2709 ib_cancel_mad(cm_id_priv->msg); 2710 2711 msg = cm_alloc_priv_msg(cm_id_priv, IB_CM_DREQ_SENT); 2712 if (IS_ERR(msg)) { 2713 cm_enter_timewait(cm_id_priv); 2714 ret = PTR_ERR(msg); 2715 goto unlock; 2716 } 2717 2718 cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv, 2719 private_data, private_data_len); 2720 2721 trace_icm_send_dreq(&cm_id_priv->id); 2722 ret = ib_post_send_mad(msg, NULL); 2723 if (ret) { 2724 cm_enter_timewait(cm_id_priv); 2725 cm_free_priv_msg(msg); 2726 goto unlock; 2727 } 2728 2729 cm_id_priv->id.state = IB_CM_DREQ_SENT; 2730 unlock: 2731 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2732 return ret; 2733 } 2734 EXPORT_SYMBOL(ib_send_cm_dreq); 2735 2736 static void cm_format_drep(struct cm_drep_msg *drep_msg, 2737 struct cm_id_private *cm_id_priv, 2738 const void *private_data, 2739 u8 private_data_len) 2740 { 2741 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid); 2742 IBA_SET(CM_DREP_LOCAL_COMM_ID, drep_msg, 2743 be32_to_cpu(cm_id_priv->id.local_id)); 2744 IBA_SET(CM_DREP_REMOTE_COMM_ID, drep_msg, 2745 be32_to_cpu(cm_id_priv->id.remote_id)); 2746 2747 if (private_data && private_data_len) 2748 IBA_SET_MEM(CM_DREP_PRIVATE_DATA, drep_msg, private_data, 2749 private_data_len); 2750 } 2751 2752 static int cm_send_drep_locked(struct cm_id_private *cm_id_priv, 2753 void *private_data, u8 private_data_len) 2754 { 2755 struct ib_mad_send_buf *msg; 2756 int ret; 2757 2758 lockdep_assert_held(&cm_id_priv->lock); 2759 2760 if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE) 2761 return -EINVAL; 2762 2763 if (cm_id_priv->id.state != IB_CM_DREQ_RCVD) { 2764 trace_icm_send_drep_err(&cm_id_priv->id); 2765 kfree(private_data); 2766 return -EINVAL; 2767 } 2768 2769 cm_set_private_data(cm_id_priv, private_data, private_data_len); 2770 cm_enter_timewait(cm_id_priv); 2771 2772 msg = cm_alloc_msg(cm_id_priv); 2773 if (IS_ERR(msg)) 2774 return PTR_ERR(msg); 2775 2776 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv, 2777 private_data, private_data_len); 2778 2779 trace_icm_send_drep(&cm_id_priv->id); 2780 ret = ib_post_send_mad(msg, NULL); 2781 if (ret) { 2782 cm_free_msg(msg); 2783 return ret; 2784 } 2785 return 0; 2786 } 2787 2788 int ib_send_cm_drep(struct ib_cm_id *cm_id, const void *private_data, 2789 u8 private_data_len) 2790 { 2791 struct cm_id_private *cm_id_priv = 2792 container_of(cm_id, struct cm_id_private, id); 2793 unsigned long flags; 2794 void *data; 2795 int ret; 2796 2797 data = cm_copy_private_data(private_data, private_data_len); 2798 if (IS_ERR(data)) 2799 return PTR_ERR(data); 2800 2801 spin_lock_irqsave(&cm_id_priv->lock, flags); 2802 ret = cm_send_drep_locked(cm_id_priv, data, private_data_len); 2803 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2804 return ret; 2805 } 2806 EXPORT_SYMBOL(ib_send_cm_drep); 2807 2808 static int cm_issue_drep(struct cm_port *port, 2809 struct ib_mad_recv_wc *mad_recv_wc) 2810 { 2811 struct ib_mad_send_buf *msg = NULL; 2812 struct cm_dreq_msg *dreq_msg; 2813 struct cm_drep_msg *drep_msg; 2814 int ret; 2815 2816 ret = cm_alloc_response_msg(port, mad_recv_wc, true, &msg); 2817 if (ret) 2818 return ret; 2819 2820 dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad; 2821 drep_msg = (struct cm_drep_msg *) msg->mad; 2822 2823 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid); 2824 IBA_SET(CM_DREP_REMOTE_COMM_ID, drep_msg, 2825 IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg)); 2826 IBA_SET(CM_DREP_LOCAL_COMM_ID, drep_msg, 2827 IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg)); 2828 2829 trace_icm_issue_drep( 2830 IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg), 2831 IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg)); 2832 ret = ib_post_send_mad(msg, NULL); 2833 if (ret) 2834 cm_free_msg(msg); 2835 2836 return ret; 2837 } 2838 2839 static int cm_dreq_handler(struct cm_work *work) 2840 { 2841 struct cm_id_private *cm_id_priv; 2842 struct cm_dreq_msg *dreq_msg; 2843 struct ib_mad_send_buf *msg = NULL; 2844 2845 dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad; 2846 cm_id_priv = cm_acquire_id( 2847 cpu_to_be32(IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg)), 2848 cpu_to_be32(IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg))); 2849 if (!cm_id_priv) { 2850 atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES] 2851 [CM_DREQ_COUNTER]); 2852 cm_issue_drep(work->port, work->mad_recv_wc); 2853 trace_icm_no_priv_err( 2854 IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg), 2855 IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg)); 2856 return -EINVAL; 2857 } 2858 2859 work->cm_event.private_data = 2860 IBA_GET_MEM_PTR(CM_DREQ_PRIVATE_DATA, dreq_msg); 2861 2862 spin_lock_irq(&cm_id_priv->lock); 2863 if (cm_id_priv->local_qpn != 2864 cpu_to_be32(IBA_GET(CM_DREQ_REMOTE_QPN_EECN, dreq_msg))) 2865 goto unlock; 2866 2867 switch (cm_id_priv->id.state) { 2868 case IB_CM_REP_SENT: 2869 case IB_CM_DREQ_SENT: 2870 case IB_CM_MRA_REP_RCVD: 2871 ib_cancel_mad(cm_id_priv->msg); 2872 break; 2873 case IB_CM_ESTABLISHED: 2874 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT || 2875 cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD) 2876 ib_cancel_mad(cm_id_priv->msg); 2877 break; 2878 case IB_CM_TIMEWAIT: 2879 atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES] 2880 [CM_DREQ_COUNTER]); 2881 msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc, 2882 true); 2883 if (IS_ERR(msg)) 2884 goto unlock; 2885 2886 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv, 2887 cm_id_priv->private_data, 2888 cm_id_priv->private_data_len); 2889 spin_unlock_irq(&cm_id_priv->lock); 2890 2891 if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) || 2892 ib_post_send_mad(msg, NULL)) 2893 cm_free_msg(msg); 2894 goto deref; 2895 case IB_CM_DREQ_RCVD: 2896 atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES] 2897 [CM_DREQ_COUNTER]); 2898 goto unlock; 2899 default: 2900 trace_icm_dreq_unknown_err(&cm_id_priv->id); 2901 goto unlock; 2902 } 2903 cm_id_priv->id.state = IB_CM_DREQ_RCVD; 2904 cm_id_priv->tid = dreq_msg->hdr.tid; 2905 cm_queue_work_unlock(cm_id_priv, work); 2906 return 0; 2907 2908 unlock: spin_unlock_irq(&cm_id_priv->lock); 2909 deref: cm_deref_id(cm_id_priv); 2910 return -EINVAL; 2911 } 2912 2913 static int cm_drep_handler(struct cm_work *work) 2914 { 2915 struct cm_id_private *cm_id_priv; 2916 struct cm_drep_msg *drep_msg; 2917 2918 drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad; 2919 cm_id_priv = cm_acquire_id( 2920 cpu_to_be32(IBA_GET(CM_DREP_REMOTE_COMM_ID, drep_msg)), 2921 cpu_to_be32(IBA_GET(CM_DREP_LOCAL_COMM_ID, drep_msg))); 2922 if (!cm_id_priv) 2923 return -EINVAL; 2924 2925 work->cm_event.private_data = 2926 IBA_GET_MEM_PTR(CM_DREP_PRIVATE_DATA, drep_msg); 2927 2928 spin_lock_irq(&cm_id_priv->lock); 2929 if (cm_id_priv->id.state != IB_CM_DREQ_SENT && 2930 cm_id_priv->id.state != IB_CM_DREQ_RCVD) { 2931 spin_unlock_irq(&cm_id_priv->lock); 2932 goto out; 2933 } 2934 cm_enter_timewait(cm_id_priv); 2935 2936 ib_cancel_mad(cm_id_priv->msg); 2937 cm_queue_work_unlock(cm_id_priv, work); 2938 return 0; 2939 out: 2940 cm_deref_id(cm_id_priv); 2941 return -EINVAL; 2942 } 2943 2944 static int cm_send_rej_locked(struct cm_id_private *cm_id_priv, 2945 enum ib_cm_rej_reason reason, void *ari, 2946 u8 ari_length, const void *private_data, 2947 u8 private_data_len) 2948 { 2949 enum ib_cm_state state = cm_id_priv->id.state; 2950 struct ib_mad_send_buf *msg; 2951 int ret; 2952 2953 lockdep_assert_held(&cm_id_priv->lock); 2954 2955 if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) || 2956 (ari && ari_length > IB_CM_REJ_ARI_LENGTH)) 2957 return -EINVAL; 2958 2959 trace_icm_send_rej(&cm_id_priv->id, reason); 2960 2961 switch (state) { 2962 case IB_CM_REQ_SENT: 2963 case IB_CM_MRA_REQ_RCVD: 2964 case IB_CM_REQ_RCVD: 2965 case IB_CM_MRA_REQ_SENT: 2966 case IB_CM_REP_RCVD: 2967 case IB_CM_MRA_REP_SENT: 2968 cm_reset_to_idle(cm_id_priv); 2969 msg = cm_alloc_msg(cm_id_priv); 2970 if (IS_ERR(msg)) 2971 return PTR_ERR(msg); 2972 cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason, 2973 ari, ari_length, private_data, private_data_len, 2974 state); 2975 break; 2976 case IB_CM_REP_SENT: 2977 case IB_CM_MRA_REP_RCVD: 2978 cm_enter_timewait(cm_id_priv); 2979 msg = cm_alloc_msg(cm_id_priv); 2980 if (IS_ERR(msg)) 2981 return PTR_ERR(msg); 2982 cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason, 2983 ari, ari_length, private_data, private_data_len, 2984 state); 2985 break; 2986 default: 2987 trace_icm_send_unknown_rej_err(&cm_id_priv->id); 2988 return -EINVAL; 2989 } 2990 2991 ret = ib_post_send_mad(msg, NULL); 2992 if (ret) { 2993 cm_free_msg(msg); 2994 return ret; 2995 } 2996 2997 return 0; 2998 } 2999 3000 int ib_send_cm_rej(struct ib_cm_id *cm_id, enum ib_cm_rej_reason reason, 3001 void *ari, u8 ari_length, const void *private_data, 3002 u8 private_data_len) 3003 { 3004 struct cm_id_private *cm_id_priv = 3005 container_of(cm_id, struct cm_id_private, id); 3006 unsigned long flags; 3007 int ret; 3008 3009 spin_lock_irqsave(&cm_id_priv->lock, flags); 3010 ret = cm_send_rej_locked(cm_id_priv, reason, ari, ari_length, 3011 private_data, private_data_len); 3012 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3013 return ret; 3014 } 3015 EXPORT_SYMBOL(ib_send_cm_rej); 3016 3017 static void cm_format_rej_event(struct cm_work *work) 3018 { 3019 struct cm_rej_msg *rej_msg; 3020 struct ib_cm_rej_event_param *param; 3021 3022 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad; 3023 param = &work->cm_event.param.rej_rcvd; 3024 param->ari = IBA_GET_MEM_PTR(CM_REJ_ARI, rej_msg); 3025 param->ari_length = IBA_GET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg); 3026 param->reason = IBA_GET(CM_REJ_REASON, rej_msg); 3027 work->cm_event.private_data = 3028 IBA_GET_MEM_PTR(CM_REJ_PRIVATE_DATA, rej_msg); 3029 } 3030 3031 static struct cm_id_private *cm_acquire_rejected_id(struct cm_rej_msg *rej_msg) 3032 { 3033 struct cm_id_private *cm_id_priv; 3034 __be32 remote_id; 3035 3036 remote_id = cpu_to_be32(IBA_GET(CM_REJ_LOCAL_COMM_ID, rej_msg)); 3037 3038 if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_TIMEOUT) { 3039 cm_id_priv = cm_find_remote_id( 3040 *((__be64 *)IBA_GET_MEM_PTR(CM_REJ_ARI, rej_msg)), 3041 remote_id); 3042 } else if (IBA_GET(CM_REJ_MESSAGE_REJECTED, rej_msg) == 3043 CM_MSG_RESPONSE_REQ) 3044 cm_id_priv = cm_acquire_id( 3045 cpu_to_be32(IBA_GET(CM_REJ_REMOTE_COMM_ID, rej_msg)), 3046 0); 3047 else 3048 cm_id_priv = cm_acquire_id( 3049 cpu_to_be32(IBA_GET(CM_REJ_REMOTE_COMM_ID, rej_msg)), 3050 remote_id); 3051 3052 return cm_id_priv; 3053 } 3054 3055 static int cm_rej_handler(struct cm_work *work) 3056 { 3057 struct cm_id_private *cm_id_priv; 3058 struct cm_rej_msg *rej_msg; 3059 3060 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad; 3061 cm_id_priv = cm_acquire_rejected_id(rej_msg); 3062 if (!cm_id_priv) 3063 return -EINVAL; 3064 3065 cm_format_rej_event(work); 3066 3067 spin_lock_irq(&cm_id_priv->lock); 3068 switch (cm_id_priv->id.state) { 3069 case IB_CM_REQ_SENT: 3070 case IB_CM_MRA_REQ_RCVD: 3071 case IB_CM_REP_SENT: 3072 case IB_CM_MRA_REP_RCVD: 3073 ib_cancel_mad(cm_id_priv->msg); 3074 fallthrough; 3075 case IB_CM_REQ_RCVD: 3076 case IB_CM_MRA_REQ_SENT: 3077 if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_STALE_CONN) 3078 cm_enter_timewait(cm_id_priv); 3079 else 3080 cm_reset_to_idle(cm_id_priv); 3081 break; 3082 case IB_CM_DREQ_SENT: 3083 ib_cancel_mad(cm_id_priv->msg); 3084 fallthrough; 3085 case IB_CM_REP_RCVD: 3086 case IB_CM_MRA_REP_SENT: 3087 cm_enter_timewait(cm_id_priv); 3088 break; 3089 case IB_CM_ESTABLISHED: 3090 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT || 3091 cm_id_priv->id.lap_state == IB_CM_LAP_SENT) { 3092 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT) 3093 ib_cancel_mad(cm_id_priv->msg); 3094 cm_enter_timewait(cm_id_priv); 3095 break; 3096 } 3097 fallthrough; 3098 default: 3099 trace_icm_rej_unknown_err(&cm_id_priv->id); 3100 spin_unlock_irq(&cm_id_priv->lock); 3101 goto out; 3102 } 3103 3104 cm_queue_work_unlock(cm_id_priv, work); 3105 return 0; 3106 out: 3107 cm_deref_id(cm_id_priv); 3108 return -EINVAL; 3109 } 3110 3111 int ib_prepare_cm_mra(struct ib_cm_id *cm_id) 3112 { 3113 struct cm_id_private *cm_id_priv; 3114 enum ib_cm_state cm_state; 3115 enum ib_cm_lap_state lap_state; 3116 unsigned long flags; 3117 int ret = 0; 3118 3119 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 3120 3121 spin_lock_irqsave(&cm_id_priv->lock, flags); 3122 switch (cm_id_priv->id.state) { 3123 case IB_CM_REQ_RCVD: 3124 cm_state = IB_CM_MRA_REQ_SENT; 3125 lap_state = cm_id->lap_state; 3126 break; 3127 case IB_CM_REP_RCVD: 3128 cm_state = IB_CM_MRA_REP_SENT; 3129 lap_state = cm_id->lap_state; 3130 break; 3131 case IB_CM_ESTABLISHED: 3132 if (cm_id->lap_state == IB_CM_LAP_RCVD) { 3133 cm_state = cm_id->state; 3134 lap_state = IB_CM_MRA_LAP_SENT; 3135 break; 3136 } 3137 fallthrough; 3138 default: 3139 trace_icm_prepare_mra_unknown_err(&cm_id_priv->id); 3140 ret = -EINVAL; 3141 goto error_unlock; 3142 } 3143 3144 cm_id->state = cm_state; 3145 cm_id->lap_state = lap_state; 3146 cm_set_private_data(cm_id_priv, NULL, 0); 3147 3148 error_unlock: 3149 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3150 return ret; 3151 } 3152 EXPORT_SYMBOL(ib_prepare_cm_mra); 3153 3154 static struct cm_id_private *cm_acquire_mraed_id(struct cm_mra_msg *mra_msg) 3155 { 3156 switch (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg)) { 3157 case CM_MSG_RESPONSE_REQ: 3158 return cm_acquire_id( 3159 cpu_to_be32(IBA_GET(CM_MRA_REMOTE_COMM_ID, mra_msg)), 3160 0); 3161 case CM_MSG_RESPONSE_REP: 3162 case CM_MSG_RESPONSE_OTHER: 3163 return cm_acquire_id( 3164 cpu_to_be32(IBA_GET(CM_MRA_REMOTE_COMM_ID, mra_msg)), 3165 cpu_to_be32(IBA_GET(CM_MRA_LOCAL_COMM_ID, mra_msg))); 3166 default: 3167 return NULL; 3168 } 3169 } 3170 3171 static int cm_mra_handler(struct cm_work *work) 3172 { 3173 struct cm_id_private *cm_id_priv; 3174 struct cm_mra_msg *mra_msg; 3175 int timeout; 3176 3177 mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad; 3178 cm_id_priv = cm_acquire_mraed_id(mra_msg); 3179 if (!cm_id_priv) 3180 return -EINVAL; 3181 3182 work->cm_event.private_data = 3183 IBA_GET_MEM_PTR(CM_MRA_PRIVATE_DATA, mra_msg); 3184 work->cm_event.param.mra_rcvd.service_timeout = 3185 IBA_GET(CM_MRA_SERVICE_TIMEOUT, mra_msg); 3186 timeout = cm_convert_to_ms(IBA_GET(CM_MRA_SERVICE_TIMEOUT, mra_msg)) + 3187 cm_convert_to_ms(cm_id_priv->av.timeout); 3188 3189 spin_lock_irq(&cm_id_priv->lock); 3190 switch (cm_id_priv->id.state) { 3191 case IB_CM_REQ_SENT: 3192 if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) != 3193 CM_MSG_RESPONSE_REQ || 3194 ib_modify_mad(cm_id_priv->msg, timeout)) 3195 goto out; 3196 cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD; 3197 break; 3198 case IB_CM_REP_SENT: 3199 if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) != 3200 CM_MSG_RESPONSE_REP || 3201 ib_modify_mad(cm_id_priv->msg, timeout)) 3202 goto out; 3203 cm_id_priv->id.state = IB_CM_MRA_REP_RCVD; 3204 break; 3205 case IB_CM_ESTABLISHED: 3206 if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) != 3207 CM_MSG_RESPONSE_OTHER || 3208 cm_id_priv->id.lap_state != IB_CM_LAP_SENT || 3209 ib_modify_mad(cm_id_priv->msg, timeout)) { 3210 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD) 3211 atomic_long_inc( 3212 &work->port->counters[CM_RECV_DUPLICATES] 3213 [CM_MRA_COUNTER]); 3214 goto out; 3215 } 3216 cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD; 3217 break; 3218 case IB_CM_MRA_REQ_RCVD: 3219 case IB_CM_MRA_REP_RCVD: 3220 atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES] 3221 [CM_MRA_COUNTER]); 3222 fallthrough; 3223 default: 3224 trace_icm_mra_unknown_err(&cm_id_priv->id); 3225 goto out; 3226 } 3227 3228 cm_id_priv->msg->context[1] = (void *) (unsigned long) 3229 cm_id_priv->id.state; 3230 cm_queue_work_unlock(cm_id_priv, work); 3231 return 0; 3232 out: 3233 spin_unlock_irq(&cm_id_priv->lock); 3234 cm_deref_id(cm_id_priv); 3235 return -EINVAL; 3236 } 3237 3238 static void cm_format_path_lid_from_lap(struct cm_lap_msg *lap_msg, 3239 struct sa_path_rec *path) 3240 { 3241 u32 lid; 3242 3243 if (path->rec_type != SA_PATH_REC_TYPE_OPA) { 3244 sa_path_set_dlid(path, IBA_GET(CM_LAP_ALTERNATE_LOCAL_PORT_LID, 3245 lap_msg)); 3246 sa_path_set_slid(path, IBA_GET(CM_LAP_ALTERNATE_REMOTE_PORT_LID, 3247 lap_msg)); 3248 } else { 3249 lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR( 3250 CM_LAP_ALTERNATE_LOCAL_PORT_GID, lap_msg)); 3251 sa_path_set_dlid(path, lid); 3252 3253 lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR( 3254 CM_LAP_ALTERNATE_REMOTE_PORT_GID, lap_msg)); 3255 sa_path_set_slid(path, lid); 3256 } 3257 } 3258 3259 static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv, 3260 struct sa_path_rec *path, 3261 struct cm_lap_msg *lap_msg) 3262 { 3263 path->dgid = *IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_LOCAL_PORT_GID, lap_msg); 3264 path->sgid = 3265 *IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_REMOTE_PORT_GID, lap_msg); 3266 path->flow_label = 3267 cpu_to_be32(IBA_GET(CM_LAP_ALTERNATE_FLOW_LABEL, lap_msg)); 3268 path->hop_limit = IBA_GET(CM_LAP_ALTERNATE_HOP_LIMIT, lap_msg); 3269 path->traffic_class = IBA_GET(CM_LAP_ALTERNATE_TRAFFIC_CLASS, lap_msg); 3270 path->reversible = 1; 3271 path->pkey = cm_id_priv->pkey; 3272 path->sl = IBA_GET(CM_LAP_ALTERNATE_SL, lap_msg); 3273 path->mtu_selector = IB_SA_EQ; 3274 path->mtu = cm_id_priv->path_mtu; 3275 path->rate_selector = IB_SA_EQ; 3276 path->rate = IBA_GET(CM_LAP_ALTERNATE_PACKET_RATE, lap_msg); 3277 path->packet_life_time_selector = IB_SA_EQ; 3278 path->packet_life_time = 3279 IBA_GET(CM_LAP_ALTERNATE_LOCAL_ACK_TIMEOUT, lap_msg); 3280 path->packet_life_time -= (path->packet_life_time > 0); 3281 cm_format_path_lid_from_lap(lap_msg, path); 3282 } 3283 3284 static int cm_lap_handler(struct cm_work *work) 3285 { 3286 struct cm_id_private *cm_id_priv; 3287 struct cm_lap_msg *lap_msg; 3288 struct ib_cm_lap_event_param *param; 3289 struct ib_mad_send_buf *msg = NULL; 3290 struct rdma_ah_attr ah_attr; 3291 struct cm_av alt_av = {}; 3292 int ret; 3293 3294 /* Currently Alternate path messages are not supported for 3295 * RoCE link layer. 3296 */ 3297 if (rdma_protocol_roce(work->port->cm_dev->ib_device, 3298 work->port->port_num)) 3299 return -EINVAL; 3300 3301 /* todo: verify LAP request and send reject APR if invalid. */ 3302 lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad; 3303 cm_id_priv = cm_acquire_id( 3304 cpu_to_be32(IBA_GET(CM_LAP_REMOTE_COMM_ID, lap_msg)), 3305 cpu_to_be32(IBA_GET(CM_LAP_LOCAL_COMM_ID, lap_msg))); 3306 if (!cm_id_priv) 3307 return -EINVAL; 3308 3309 param = &work->cm_event.param.lap_rcvd; 3310 memset(&work->path[0], 0, sizeof(work->path[1])); 3311 cm_path_set_rec_type(work->port->cm_dev->ib_device, 3312 work->port->port_num, &work->path[0], 3313 IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_LOCAL_PORT_GID, 3314 lap_msg)); 3315 param->alternate_path = &work->path[0]; 3316 cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg); 3317 work->cm_event.private_data = 3318 IBA_GET_MEM_PTR(CM_LAP_PRIVATE_DATA, lap_msg); 3319 3320 ret = ib_init_ah_attr_from_wc(work->port->cm_dev->ib_device, 3321 work->port->port_num, 3322 work->mad_recv_wc->wc, 3323 work->mad_recv_wc->recv_buf.grh, 3324 &ah_attr); 3325 if (ret) 3326 goto deref; 3327 3328 ret = cm_init_av_by_path(param->alternate_path, NULL, &alt_av); 3329 if (ret) { 3330 rdma_destroy_ah_attr(&ah_attr); 3331 goto deref; 3332 } 3333 3334 spin_lock_irq(&cm_id_priv->lock); 3335 cm_init_av_for_lap(work->port, work->mad_recv_wc->wc, 3336 &ah_attr, &cm_id_priv->av); 3337 cm_move_av_from_path(&cm_id_priv->alt_av, &alt_av); 3338 3339 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) 3340 goto unlock; 3341 3342 switch (cm_id_priv->id.lap_state) { 3343 case IB_CM_LAP_UNINIT: 3344 case IB_CM_LAP_IDLE: 3345 break; 3346 case IB_CM_MRA_LAP_SENT: 3347 atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES] 3348 [CM_LAP_COUNTER]); 3349 msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc, 3350 true); 3351 if (IS_ERR(msg)) 3352 goto unlock; 3353 3354 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 3355 CM_MSG_RESPONSE_OTHER, 3356 cm_id_priv->private_data, 3357 cm_id_priv->private_data_len); 3358 spin_unlock_irq(&cm_id_priv->lock); 3359 3360 if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) || 3361 ib_post_send_mad(msg, NULL)) 3362 cm_free_msg(msg); 3363 goto deref; 3364 case IB_CM_LAP_RCVD: 3365 atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES] 3366 [CM_LAP_COUNTER]); 3367 goto unlock; 3368 default: 3369 goto unlock; 3370 } 3371 3372 cm_id_priv->id.lap_state = IB_CM_LAP_RCVD; 3373 cm_id_priv->tid = lap_msg->hdr.tid; 3374 cm_queue_work_unlock(cm_id_priv, work); 3375 return 0; 3376 3377 unlock: spin_unlock_irq(&cm_id_priv->lock); 3378 deref: cm_deref_id(cm_id_priv); 3379 return -EINVAL; 3380 } 3381 3382 static int cm_apr_handler(struct cm_work *work) 3383 { 3384 struct cm_id_private *cm_id_priv; 3385 struct cm_apr_msg *apr_msg; 3386 3387 /* Currently Alternate path messages are not supported for 3388 * RoCE link layer. 3389 */ 3390 if (rdma_protocol_roce(work->port->cm_dev->ib_device, 3391 work->port->port_num)) 3392 return -EINVAL; 3393 3394 apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad; 3395 cm_id_priv = cm_acquire_id( 3396 cpu_to_be32(IBA_GET(CM_APR_REMOTE_COMM_ID, apr_msg)), 3397 cpu_to_be32(IBA_GET(CM_APR_LOCAL_COMM_ID, apr_msg))); 3398 if (!cm_id_priv) 3399 return -EINVAL; /* Unmatched reply. */ 3400 3401 work->cm_event.param.apr_rcvd.ap_status = 3402 IBA_GET(CM_APR_AR_STATUS, apr_msg); 3403 work->cm_event.param.apr_rcvd.apr_info = 3404 IBA_GET_MEM_PTR(CM_APR_ADDITIONAL_INFORMATION, apr_msg); 3405 work->cm_event.param.apr_rcvd.info_len = 3406 IBA_GET(CM_APR_ADDITIONAL_INFORMATION_LENGTH, apr_msg); 3407 work->cm_event.private_data = 3408 IBA_GET_MEM_PTR(CM_APR_PRIVATE_DATA, apr_msg); 3409 3410 spin_lock_irq(&cm_id_priv->lock); 3411 if (cm_id_priv->id.state != IB_CM_ESTABLISHED || 3412 (cm_id_priv->id.lap_state != IB_CM_LAP_SENT && 3413 cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) { 3414 spin_unlock_irq(&cm_id_priv->lock); 3415 goto out; 3416 } 3417 cm_id_priv->id.lap_state = IB_CM_LAP_IDLE; 3418 ib_cancel_mad(cm_id_priv->msg); 3419 cm_queue_work_unlock(cm_id_priv, work); 3420 return 0; 3421 out: 3422 cm_deref_id(cm_id_priv); 3423 return -EINVAL; 3424 } 3425 3426 static int cm_timewait_handler(struct cm_work *work) 3427 { 3428 struct cm_timewait_info *timewait_info; 3429 struct cm_id_private *cm_id_priv; 3430 3431 timewait_info = container_of(work, struct cm_timewait_info, work); 3432 spin_lock_irq(&cm.lock); 3433 list_del(&timewait_info->list); 3434 spin_unlock_irq(&cm.lock); 3435 3436 cm_id_priv = cm_acquire_id(timewait_info->work.local_id, 3437 timewait_info->work.remote_id); 3438 if (!cm_id_priv) 3439 return -EINVAL; 3440 3441 spin_lock_irq(&cm_id_priv->lock); 3442 if (cm_id_priv->id.state != IB_CM_TIMEWAIT || 3443 cm_id_priv->remote_qpn != timewait_info->remote_qpn) { 3444 spin_unlock_irq(&cm_id_priv->lock); 3445 goto out; 3446 } 3447 cm_id_priv->id.state = IB_CM_IDLE; 3448 cm_queue_work_unlock(cm_id_priv, work); 3449 return 0; 3450 out: 3451 cm_deref_id(cm_id_priv); 3452 return -EINVAL; 3453 } 3454 3455 static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg, 3456 struct cm_id_private *cm_id_priv, 3457 struct ib_cm_sidr_req_param *param) 3458 { 3459 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID, 3460 cm_form_tid(cm_id_priv)); 3461 IBA_SET(CM_SIDR_REQ_REQUESTID, sidr_req_msg, 3462 be32_to_cpu(cm_id_priv->id.local_id)); 3463 IBA_SET(CM_SIDR_REQ_PARTITION_KEY, sidr_req_msg, 3464 be16_to_cpu(param->path->pkey)); 3465 IBA_SET(CM_SIDR_REQ_SERVICEID, sidr_req_msg, 3466 be64_to_cpu(param->service_id)); 3467 3468 if (param->private_data && param->private_data_len) 3469 IBA_SET_MEM(CM_SIDR_REQ_PRIVATE_DATA, sidr_req_msg, 3470 param->private_data, param->private_data_len); 3471 } 3472 3473 int ib_send_cm_sidr_req(struct ib_cm_id *cm_id, 3474 struct ib_cm_sidr_req_param *param) 3475 { 3476 struct cm_id_private *cm_id_priv; 3477 struct ib_mad_send_buf *msg; 3478 struct cm_av av = {}; 3479 unsigned long flags; 3480 int ret; 3481 3482 if (!param->path || (param->private_data && 3483 param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE)) 3484 return -EINVAL; 3485 3486 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 3487 ret = cm_init_av_by_path(param->path, param->sgid_attr, &av); 3488 if (ret) 3489 return ret; 3490 3491 spin_lock_irqsave(&cm_id_priv->lock, flags); 3492 cm_move_av_from_path(&cm_id_priv->av, &av); 3493 cm_id->service_id = param->service_id; 3494 cm_id_priv->timeout_ms = param->timeout_ms; 3495 cm_id_priv->max_cm_retries = param->max_cm_retries; 3496 if (cm_id->state != IB_CM_IDLE) { 3497 ret = -EINVAL; 3498 goto out_unlock; 3499 } 3500 3501 msg = cm_alloc_priv_msg(cm_id_priv, IB_CM_SIDR_REQ_SENT); 3502 if (IS_ERR(msg)) { 3503 ret = PTR_ERR(msg); 3504 goto out_unlock; 3505 } 3506 3507 cm_format_sidr_req((struct cm_sidr_req_msg *)msg->mad, cm_id_priv, 3508 param); 3509 3510 trace_icm_send_sidr_req(&cm_id_priv->id); 3511 ret = ib_post_send_mad(msg, NULL); 3512 if (ret) 3513 goto out_free; 3514 cm_id->state = IB_CM_SIDR_REQ_SENT; 3515 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3516 return 0; 3517 out_free: 3518 cm_free_priv_msg(msg); 3519 out_unlock: 3520 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3521 return ret; 3522 } 3523 EXPORT_SYMBOL(ib_send_cm_sidr_req); 3524 3525 static void cm_format_sidr_req_event(struct cm_work *work, 3526 const struct cm_id_private *rx_cm_id, 3527 struct ib_cm_id *listen_id) 3528 { 3529 struct cm_sidr_req_msg *sidr_req_msg; 3530 struct ib_cm_sidr_req_event_param *param; 3531 3532 sidr_req_msg = (struct cm_sidr_req_msg *) 3533 work->mad_recv_wc->recv_buf.mad; 3534 param = &work->cm_event.param.sidr_req_rcvd; 3535 param->pkey = IBA_GET(CM_SIDR_REQ_PARTITION_KEY, sidr_req_msg); 3536 param->listen_id = listen_id; 3537 param->service_id = 3538 cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg)); 3539 param->bth_pkey = cm_get_bth_pkey(work); 3540 param->port = work->port->port_num; 3541 param->sgid_attr = rx_cm_id->av.ah_attr.grh.sgid_attr; 3542 work->cm_event.private_data = 3543 IBA_GET_MEM_PTR(CM_SIDR_REQ_PRIVATE_DATA, sidr_req_msg); 3544 } 3545 3546 static int cm_sidr_req_handler(struct cm_work *work) 3547 { 3548 struct cm_id_private *cm_id_priv, *listen_cm_id_priv; 3549 struct cm_sidr_req_msg *sidr_req_msg; 3550 struct ib_wc *wc; 3551 int ret; 3552 3553 cm_id_priv = 3554 cm_alloc_id_priv(work->port->cm_dev->ib_device, NULL, NULL); 3555 if (IS_ERR(cm_id_priv)) 3556 return PTR_ERR(cm_id_priv); 3557 3558 /* Record SGID/SLID and request ID for lookup. */ 3559 sidr_req_msg = (struct cm_sidr_req_msg *) 3560 work->mad_recv_wc->recv_buf.mad; 3561 3562 cm_id_priv->id.remote_id = 3563 cpu_to_be32(IBA_GET(CM_SIDR_REQ_REQUESTID, sidr_req_msg)); 3564 cm_id_priv->id.service_id = 3565 cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg)); 3566 cm_id_priv->tid = sidr_req_msg->hdr.tid; 3567 3568 wc = work->mad_recv_wc->wc; 3569 cm_id_priv->sidr_slid = wc->slid; 3570 ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc, 3571 work->mad_recv_wc->recv_buf.grh, 3572 &cm_id_priv->av); 3573 if (ret) 3574 goto out; 3575 3576 spin_lock_irq(&cm.lock); 3577 listen_cm_id_priv = cm_insert_remote_sidr(cm_id_priv); 3578 if (listen_cm_id_priv) { 3579 spin_unlock_irq(&cm.lock); 3580 atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES] 3581 [CM_SIDR_REQ_COUNTER]); 3582 goto out; /* Duplicate message. */ 3583 } 3584 cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD; 3585 listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device, 3586 cm_id_priv->id.service_id); 3587 if (!listen_cm_id_priv) { 3588 spin_unlock_irq(&cm.lock); 3589 ib_send_cm_sidr_rep(&cm_id_priv->id, 3590 &(struct ib_cm_sidr_rep_param){ 3591 .status = IB_SIDR_UNSUPPORTED }); 3592 goto out; /* No match. */ 3593 } 3594 spin_unlock_irq(&cm.lock); 3595 3596 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler; 3597 cm_id_priv->id.context = listen_cm_id_priv->id.context; 3598 3599 /* 3600 * A SIDR ID does not need to be in the xarray since it does not receive 3601 * mads, is not placed in the remote_id or remote_qpn rbtree, and does 3602 * not enter timewait. 3603 */ 3604 3605 cm_format_sidr_req_event(work, cm_id_priv, &listen_cm_id_priv->id); 3606 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event); 3607 cm_free_work(work); 3608 /* 3609 * A pointer to the listen_cm_id is held in the event, so this deref 3610 * must be after the event is delivered above. 3611 */ 3612 cm_deref_id(listen_cm_id_priv); 3613 if (ret) 3614 cm_destroy_id(&cm_id_priv->id, ret); 3615 return 0; 3616 out: 3617 ib_destroy_cm_id(&cm_id_priv->id); 3618 return -EINVAL; 3619 } 3620 3621 static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg, 3622 struct cm_id_private *cm_id_priv, 3623 struct ib_cm_sidr_rep_param *param) 3624 { 3625 cm_format_mad_ece_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID, 3626 cm_id_priv->tid, param->ece.attr_mod); 3627 IBA_SET(CM_SIDR_REP_REQUESTID, sidr_rep_msg, 3628 be32_to_cpu(cm_id_priv->id.remote_id)); 3629 IBA_SET(CM_SIDR_REP_STATUS, sidr_rep_msg, param->status); 3630 IBA_SET(CM_SIDR_REP_QPN, sidr_rep_msg, param->qp_num); 3631 IBA_SET(CM_SIDR_REP_SERVICEID, sidr_rep_msg, 3632 be64_to_cpu(cm_id_priv->id.service_id)); 3633 IBA_SET(CM_SIDR_REP_Q_KEY, sidr_rep_msg, param->qkey); 3634 IBA_SET(CM_SIDR_REP_VENDOR_ID_L, sidr_rep_msg, 3635 param->ece.vendor_id & 0xFF); 3636 IBA_SET(CM_SIDR_REP_VENDOR_ID_H, sidr_rep_msg, 3637 (param->ece.vendor_id >> 8) & 0xFF); 3638 3639 if (param->info && param->info_length) 3640 IBA_SET_MEM(CM_SIDR_REP_ADDITIONAL_INFORMATION, sidr_rep_msg, 3641 param->info, param->info_length); 3642 3643 if (param->private_data && param->private_data_len) 3644 IBA_SET_MEM(CM_SIDR_REP_PRIVATE_DATA, sidr_rep_msg, 3645 param->private_data, param->private_data_len); 3646 } 3647 3648 static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv, 3649 struct ib_cm_sidr_rep_param *param) 3650 { 3651 struct ib_mad_send_buf *msg; 3652 unsigned long flags; 3653 int ret; 3654 3655 lockdep_assert_held(&cm_id_priv->lock); 3656 3657 if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) || 3658 (param->private_data && 3659 param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE)) 3660 return -EINVAL; 3661 3662 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_RCVD) 3663 return -EINVAL; 3664 3665 msg = cm_alloc_msg(cm_id_priv); 3666 if (IS_ERR(msg)) 3667 return PTR_ERR(msg); 3668 3669 cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv, 3670 param); 3671 trace_icm_send_sidr_rep(&cm_id_priv->id); 3672 ret = ib_post_send_mad(msg, NULL); 3673 if (ret) { 3674 cm_free_msg(msg); 3675 return ret; 3676 } 3677 cm_id_priv->id.state = IB_CM_IDLE; 3678 spin_lock_irqsave(&cm.lock, flags); 3679 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) { 3680 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); 3681 RB_CLEAR_NODE(&cm_id_priv->sidr_id_node); 3682 } 3683 spin_unlock_irqrestore(&cm.lock, flags); 3684 return 0; 3685 } 3686 3687 int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id, 3688 struct ib_cm_sidr_rep_param *param) 3689 { 3690 struct cm_id_private *cm_id_priv = 3691 container_of(cm_id, struct cm_id_private, id); 3692 unsigned long flags; 3693 int ret; 3694 3695 spin_lock_irqsave(&cm_id_priv->lock, flags); 3696 ret = cm_send_sidr_rep_locked(cm_id_priv, param); 3697 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3698 return ret; 3699 } 3700 EXPORT_SYMBOL(ib_send_cm_sidr_rep); 3701 3702 static void cm_format_sidr_rep_event(struct cm_work *work, 3703 const struct cm_id_private *cm_id_priv) 3704 { 3705 struct cm_sidr_rep_msg *sidr_rep_msg; 3706 struct ib_cm_sidr_rep_event_param *param; 3707 3708 sidr_rep_msg = (struct cm_sidr_rep_msg *) 3709 work->mad_recv_wc->recv_buf.mad; 3710 param = &work->cm_event.param.sidr_rep_rcvd; 3711 param->status = IBA_GET(CM_SIDR_REP_STATUS, sidr_rep_msg); 3712 param->qkey = IBA_GET(CM_SIDR_REP_Q_KEY, sidr_rep_msg); 3713 param->qpn = IBA_GET(CM_SIDR_REP_QPN, sidr_rep_msg); 3714 param->info = IBA_GET_MEM_PTR(CM_SIDR_REP_ADDITIONAL_INFORMATION, 3715 sidr_rep_msg); 3716 param->info_len = IBA_GET(CM_SIDR_REP_ADDITIONAL_INFORMATION_LENGTH, 3717 sidr_rep_msg); 3718 param->sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr; 3719 work->cm_event.private_data = 3720 IBA_GET_MEM_PTR(CM_SIDR_REP_PRIVATE_DATA, sidr_rep_msg); 3721 } 3722 3723 static int cm_sidr_rep_handler(struct cm_work *work) 3724 { 3725 struct cm_sidr_rep_msg *sidr_rep_msg; 3726 struct cm_id_private *cm_id_priv; 3727 3728 sidr_rep_msg = (struct cm_sidr_rep_msg *) 3729 work->mad_recv_wc->recv_buf.mad; 3730 cm_id_priv = cm_acquire_id( 3731 cpu_to_be32(IBA_GET(CM_SIDR_REP_REQUESTID, sidr_rep_msg)), 0); 3732 if (!cm_id_priv) 3733 return -EINVAL; /* Unmatched reply. */ 3734 3735 spin_lock_irq(&cm_id_priv->lock); 3736 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) { 3737 spin_unlock_irq(&cm_id_priv->lock); 3738 goto out; 3739 } 3740 cm_id_priv->id.state = IB_CM_IDLE; 3741 ib_cancel_mad(cm_id_priv->msg); 3742 spin_unlock_irq(&cm_id_priv->lock); 3743 3744 cm_format_sidr_rep_event(work, cm_id_priv); 3745 cm_process_work(cm_id_priv, work); 3746 return 0; 3747 out: 3748 cm_deref_id(cm_id_priv); 3749 return -EINVAL; 3750 } 3751 3752 static void cm_process_send_error(struct cm_id_private *cm_id_priv, 3753 struct ib_mad_send_buf *msg, 3754 enum ib_wc_status wc_status) 3755 { 3756 enum ib_cm_state state = (unsigned long) msg->context[1]; 3757 struct ib_cm_event cm_event = {}; 3758 int ret; 3759 3760 /* Discard old sends. */ 3761 spin_lock_irq(&cm_id_priv->lock); 3762 if (msg != cm_id_priv->msg) { 3763 spin_unlock_irq(&cm_id_priv->lock); 3764 cm_free_msg(msg); 3765 cm_deref_id(cm_id_priv); 3766 return; 3767 } 3768 cm_free_priv_msg(msg); 3769 3770 if (state != cm_id_priv->id.state || wc_status == IB_WC_SUCCESS || 3771 wc_status == IB_WC_WR_FLUSH_ERR) 3772 goto out_unlock; 3773 3774 trace_icm_mad_send_err(state, wc_status); 3775 switch (state) { 3776 case IB_CM_REQ_SENT: 3777 case IB_CM_MRA_REQ_RCVD: 3778 cm_reset_to_idle(cm_id_priv); 3779 cm_event.event = IB_CM_REQ_ERROR; 3780 break; 3781 case IB_CM_REP_SENT: 3782 case IB_CM_MRA_REP_RCVD: 3783 cm_reset_to_idle(cm_id_priv); 3784 cm_event.event = IB_CM_REP_ERROR; 3785 break; 3786 case IB_CM_DREQ_SENT: 3787 cm_enter_timewait(cm_id_priv); 3788 cm_event.event = IB_CM_DREQ_ERROR; 3789 break; 3790 case IB_CM_SIDR_REQ_SENT: 3791 cm_id_priv->id.state = IB_CM_IDLE; 3792 cm_event.event = IB_CM_SIDR_REQ_ERROR; 3793 break; 3794 default: 3795 goto out_unlock; 3796 } 3797 spin_unlock_irq(&cm_id_priv->lock); 3798 cm_event.param.send_status = wc_status; 3799 3800 /* No other events can occur on the cm_id at this point. */ 3801 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event); 3802 if (ret) 3803 ib_destroy_cm_id(&cm_id_priv->id); 3804 return; 3805 out_unlock: 3806 spin_unlock_irq(&cm_id_priv->lock); 3807 } 3808 3809 static void cm_send_handler(struct ib_mad_agent *mad_agent, 3810 struct ib_mad_send_wc *mad_send_wc) 3811 { 3812 struct ib_mad_send_buf *msg = mad_send_wc->send_buf; 3813 struct cm_id_private *cm_id_priv; 3814 struct cm_port *port; 3815 u16 attr_index; 3816 3817 port = mad_agent->context; 3818 attr_index = be16_to_cpu(((struct ib_mad_hdr *) 3819 msg->mad)->attr_id) - CM_ATTR_ID_OFFSET; 3820 3821 if (msg->context[0] == CM_DIRECT_RETRY_CTX) { 3822 msg->retries = 1; 3823 cm_id_priv = NULL; 3824 } else { 3825 cm_id_priv = msg->context[0]; 3826 } 3827 3828 atomic_long_add(1 + msg->retries, &port->counters[CM_XMIT][attr_index]); 3829 if (msg->retries) 3830 atomic_long_add(msg->retries, 3831 &port->counters[CM_XMIT_RETRIES][attr_index]); 3832 3833 if (cm_id_priv) 3834 cm_process_send_error(cm_id_priv, msg, mad_send_wc->status); 3835 else 3836 cm_free_msg(msg); 3837 } 3838 3839 static void cm_work_handler(struct work_struct *_work) 3840 { 3841 struct cm_work *work = container_of(_work, struct cm_work, work.work); 3842 int ret; 3843 3844 switch (work->cm_event.event) { 3845 case IB_CM_REQ_RECEIVED: 3846 ret = cm_req_handler(work); 3847 break; 3848 case IB_CM_MRA_RECEIVED: 3849 ret = cm_mra_handler(work); 3850 break; 3851 case IB_CM_REJ_RECEIVED: 3852 ret = cm_rej_handler(work); 3853 break; 3854 case IB_CM_REP_RECEIVED: 3855 ret = cm_rep_handler(work); 3856 break; 3857 case IB_CM_RTU_RECEIVED: 3858 ret = cm_rtu_handler(work); 3859 break; 3860 case IB_CM_USER_ESTABLISHED: 3861 ret = cm_establish_handler(work); 3862 break; 3863 case IB_CM_DREQ_RECEIVED: 3864 ret = cm_dreq_handler(work); 3865 break; 3866 case IB_CM_DREP_RECEIVED: 3867 ret = cm_drep_handler(work); 3868 break; 3869 case IB_CM_SIDR_REQ_RECEIVED: 3870 ret = cm_sidr_req_handler(work); 3871 break; 3872 case IB_CM_SIDR_REP_RECEIVED: 3873 ret = cm_sidr_rep_handler(work); 3874 break; 3875 case IB_CM_LAP_RECEIVED: 3876 ret = cm_lap_handler(work); 3877 break; 3878 case IB_CM_APR_RECEIVED: 3879 ret = cm_apr_handler(work); 3880 break; 3881 case IB_CM_TIMEWAIT_EXIT: 3882 ret = cm_timewait_handler(work); 3883 break; 3884 default: 3885 trace_icm_handler_err(work->cm_event.event); 3886 ret = -EINVAL; 3887 break; 3888 } 3889 if (ret) 3890 cm_free_work(work); 3891 } 3892 3893 static int cm_establish(struct ib_cm_id *cm_id) 3894 { 3895 struct cm_id_private *cm_id_priv; 3896 struct cm_work *work; 3897 unsigned long flags; 3898 int ret = 0; 3899 struct cm_device *cm_dev; 3900 3901 cm_dev = ib_get_client_data(cm_id->device, &cm_client); 3902 if (!cm_dev) 3903 return -ENODEV; 3904 3905 work = kmalloc(sizeof *work, GFP_ATOMIC); 3906 if (!work) 3907 return -ENOMEM; 3908 3909 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 3910 spin_lock_irqsave(&cm_id_priv->lock, flags); 3911 switch (cm_id->state) { 3912 case IB_CM_REP_SENT: 3913 case IB_CM_MRA_REP_RCVD: 3914 cm_id->state = IB_CM_ESTABLISHED; 3915 break; 3916 case IB_CM_ESTABLISHED: 3917 ret = -EISCONN; 3918 break; 3919 default: 3920 trace_icm_establish_err(cm_id); 3921 ret = -EINVAL; 3922 break; 3923 } 3924 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3925 3926 if (ret) { 3927 kfree(work); 3928 goto out; 3929 } 3930 3931 /* 3932 * The CM worker thread may try to destroy the cm_id before it 3933 * can execute this work item. To prevent potential deadlock, 3934 * we need to find the cm_id once we're in the context of the 3935 * worker thread, rather than holding a reference on it. 3936 */ 3937 INIT_DELAYED_WORK(&work->work, cm_work_handler); 3938 work->local_id = cm_id->local_id; 3939 work->remote_id = cm_id->remote_id; 3940 work->mad_recv_wc = NULL; 3941 work->cm_event.event = IB_CM_USER_ESTABLISHED; 3942 3943 /* Check if the device started its remove_one */ 3944 spin_lock_irqsave(&cm.lock, flags); 3945 if (!cm_dev->going_down) { 3946 queue_delayed_work(cm.wq, &work->work, 0); 3947 } else { 3948 kfree(work); 3949 ret = -ENODEV; 3950 } 3951 spin_unlock_irqrestore(&cm.lock, flags); 3952 3953 out: 3954 return ret; 3955 } 3956 3957 static int cm_migrate(struct ib_cm_id *cm_id) 3958 { 3959 struct cm_id_private *cm_id_priv; 3960 unsigned long flags; 3961 int ret = 0; 3962 3963 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 3964 spin_lock_irqsave(&cm_id_priv->lock, flags); 3965 if (cm_id->state == IB_CM_ESTABLISHED && 3966 (cm_id->lap_state == IB_CM_LAP_UNINIT || 3967 cm_id->lap_state == IB_CM_LAP_IDLE)) { 3968 cm_id->lap_state = IB_CM_LAP_IDLE; 3969 cm_id_priv->av = cm_id_priv->alt_av; 3970 } else 3971 ret = -EINVAL; 3972 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3973 3974 return ret; 3975 } 3976 3977 int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event) 3978 { 3979 int ret; 3980 3981 switch (event) { 3982 case IB_EVENT_COMM_EST: 3983 ret = cm_establish(cm_id); 3984 break; 3985 case IB_EVENT_PATH_MIG: 3986 ret = cm_migrate(cm_id); 3987 break; 3988 default: 3989 ret = -EINVAL; 3990 } 3991 return ret; 3992 } 3993 EXPORT_SYMBOL(ib_cm_notify); 3994 3995 static void cm_recv_handler(struct ib_mad_agent *mad_agent, 3996 struct ib_mad_send_buf *send_buf, 3997 struct ib_mad_recv_wc *mad_recv_wc) 3998 { 3999 struct cm_port *port = mad_agent->context; 4000 struct cm_work *work; 4001 enum ib_cm_event_type event; 4002 bool alt_path = false; 4003 u16 attr_id; 4004 int paths = 0; 4005 int going_down = 0; 4006 4007 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) { 4008 case CM_REQ_ATTR_ID: 4009 alt_path = cm_req_has_alt_path((struct cm_req_msg *) 4010 mad_recv_wc->recv_buf.mad); 4011 paths = 1 + (alt_path != 0); 4012 event = IB_CM_REQ_RECEIVED; 4013 break; 4014 case CM_MRA_ATTR_ID: 4015 event = IB_CM_MRA_RECEIVED; 4016 break; 4017 case CM_REJ_ATTR_ID: 4018 event = IB_CM_REJ_RECEIVED; 4019 break; 4020 case CM_REP_ATTR_ID: 4021 event = IB_CM_REP_RECEIVED; 4022 break; 4023 case CM_RTU_ATTR_ID: 4024 event = IB_CM_RTU_RECEIVED; 4025 break; 4026 case CM_DREQ_ATTR_ID: 4027 event = IB_CM_DREQ_RECEIVED; 4028 break; 4029 case CM_DREP_ATTR_ID: 4030 event = IB_CM_DREP_RECEIVED; 4031 break; 4032 case CM_SIDR_REQ_ATTR_ID: 4033 event = IB_CM_SIDR_REQ_RECEIVED; 4034 break; 4035 case CM_SIDR_REP_ATTR_ID: 4036 event = IB_CM_SIDR_REP_RECEIVED; 4037 break; 4038 case CM_LAP_ATTR_ID: 4039 paths = 1; 4040 event = IB_CM_LAP_RECEIVED; 4041 break; 4042 case CM_APR_ATTR_ID: 4043 event = IB_CM_APR_RECEIVED; 4044 break; 4045 default: 4046 ib_free_recv_mad(mad_recv_wc); 4047 return; 4048 } 4049 4050 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id); 4051 atomic_long_inc(&port->counters[CM_RECV][attr_id - CM_ATTR_ID_OFFSET]); 4052 4053 work = kmalloc(struct_size(work, path, paths), GFP_KERNEL); 4054 if (!work) { 4055 ib_free_recv_mad(mad_recv_wc); 4056 return; 4057 } 4058 4059 INIT_DELAYED_WORK(&work->work, cm_work_handler); 4060 work->cm_event.event = event; 4061 work->mad_recv_wc = mad_recv_wc; 4062 work->port = port; 4063 4064 /* Check if the device started its remove_one */ 4065 spin_lock_irq(&cm.lock); 4066 if (!port->cm_dev->going_down) 4067 queue_delayed_work(cm.wq, &work->work, 0); 4068 else 4069 going_down = 1; 4070 spin_unlock_irq(&cm.lock); 4071 4072 if (going_down) { 4073 kfree(work); 4074 ib_free_recv_mad(mad_recv_wc); 4075 } 4076 } 4077 4078 static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv, 4079 struct ib_qp_attr *qp_attr, 4080 int *qp_attr_mask) 4081 { 4082 unsigned long flags; 4083 int ret; 4084 4085 spin_lock_irqsave(&cm_id_priv->lock, flags); 4086 switch (cm_id_priv->id.state) { 4087 case IB_CM_REQ_SENT: 4088 case IB_CM_MRA_REQ_RCVD: 4089 case IB_CM_REQ_RCVD: 4090 case IB_CM_MRA_REQ_SENT: 4091 case IB_CM_REP_RCVD: 4092 case IB_CM_MRA_REP_SENT: 4093 case IB_CM_REP_SENT: 4094 case IB_CM_MRA_REP_RCVD: 4095 case IB_CM_ESTABLISHED: 4096 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | 4097 IB_QP_PKEY_INDEX | IB_QP_PORT; 4098 qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE; 4099 if (cm_id_priv->responder_resources) { 4100 struct ib_device *ib_dev = cm_id_priv->id.device; 4101 u64 support_flush = ib_dev->attrs.device_cap_flags & 4102 (IB_DEVICE_FLUSH_GLOBAL | IB_DEVICE_FLUSH_PERSISTENT); 4103 u32 flushable = support_flush ? 4104 (IB_ACCESS_FLUSH_GLOBAL | 4105 IB_ACCESS_FLUSH_PERSISTENT) : 0; 4106 4107 qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ | 4108 IB_ACCESS_REMOTE_ATOMIC | 4109 flushable; 4110 } 4111 qp_attr->pkey_index = cm_id_priv->av.pkey_index; 4112 if (cm_id_priv->av.port) 4113 qp_attr->port_num = cm_id_priv->av.port->port_num; 4114 ret = 0; 4115 break; 4116 default: 4117 trace_icm_qp_init_err(&cm_id_priv->id); 4118 ret = -EINVAL; 4119 break; 4120 } 4121 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 4122 return ret; 4123 } 4124 4125 static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv, 4126 struct ib_qp_attr *qp_attr, 4127 int *qp_attr_mask) 4128 { 4129 unsigned long flags; 4130 int ret; 4131 4132 spin_lock_irqsave(&cm_id_priv->lock, flags); 4133 switch (cm_id_priv->id.state) { 4134 case IB_CM_REQ_RCVD: 4135 case IB_CM_MRA_REQ_SENT: 4136 case IB_CM_REP_RCVD: 4137 case IB_CM_MRA_REP_SENT: 4138 case IB_CM_REP_SENT: 4139 case IB_CM_MRA_REP_RCVD: 4140 case IB_CM_ESTABLISHED: 4141 *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | 4142 IB_QP_DEST_QPN | IB_QP_RQ_PSN; 4143 qp_attr->ah_attr = cm_id_priv->av.ah_attr; 4144 if ((qp_attr->ah_attr.type == RDMA_AH_ATTR_TYPE_IB) && 4145 cm_id_priv->av.dlid_datapath && 4146 (cm_id_priv->av.dlid_datapath != 0xffff)) 4147 qp_attr->ah_attr.ib.dlid = cm_id_priv->av.dlid_datapath; 4148 qp_attr->path_mtu = cm_id_priv->path_mtu; 4149 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn); 4150 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn); 4151 if (cm_id_priv->qp_type == IB_QPT_RC || 4152 cm_id_priv->qp_type == IB_QPT_XRC_TGT) { 4153 *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC | 4154 IB_QP_MIN_RNR_TIMER; 4155 qp_attr->max_dest_rd_atomic = 4156 cm_id_priv->responder_resources; 4157 qp_attr->min_rnr_timer = 0; 4158 } 4159 if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr) && 4160 cm_id_priv->alt_av.port) { 4161 *qp_attr_mask |= IB_QP_ALT_PATH; 4162 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num; 4163 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index; 4164 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout; 4165 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr; 4166 } 4167 ret = 0; 4168 break; 4169 default: 4170 trace_icm_qp_rtr_err(&cm_id_priv->id); 4171 ret = -EINVAL; 4172 break; 4173 } 4174 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 4175 return ret; 4176 } 4177 4178 static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv, 4179 struct ib_qp_attr *qp_attr, 4180 int *qp_attr_mask) 4181 { 4182 unsigned long flags; 4183 int ret; 4184 4185 spin_lock_irqsave(&cm_id_priv->lock, flags); 4186 switch (cm_id_priv->id.state) { 4187 /* Allow transition to RTS before sending REP */ 4188 case IB_CM_REQ_RCVD: 4189 case IB_CM_MRA_REQ_SENT: 4190 4191 case IB_CM_REP_RCVD: 4192 case IB_CM_MRA_REP_SENT: 4193 case IB_CM_REP_SENT: 4194 case IB_CM_MRA_REP_RCVD: 4195 case IB_CM_ESTABLISHED: 4196 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) { 4197 *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN; 4198 qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn); 4199 switch (cm_id_priv->qp_type) { 4200 case IB_QPT_RC: 4201 case IB_QPT_XRC_INI: 4202 *qp_attr_mask |= IB_QP_RETRY_CNT | IB_QP_RNR_RETRY | 4203 IB_QP_MAX_QP_RD_ATOMIC; 4204 qp_attr->retry_cnt = cm_id_priv->retry_count; 4205 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count; 4206 qp_attr->max_rd_atomic = cm_id_priv->initiator_depth; 4207 fallthrough; 4208 case IB_QPT_XRC_TGT: 4209 *qp_attr_mask |= IB_QP_TIMEOUT; 4210 qp_attr->timeout = cm_id_priv->av.timeout; 4211 break; 4212 default: 4213 break; 4214 } 4215 if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr)) { 4216 *qp_attr_mask |= IB_QP_PATH_MIG_STATE; 4217 qp_attr->path_mig_state = IB_MIG_REARM; 4218 } 4219 } else { 4220 *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE; 4221 if (cm_id_priv->alt_av.port) 4222 qp_attr->alt_port_num = 4223 cm_id_priv->alt_av.port->port_num; 4224 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index; 4225 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout; 4226 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr; 4227 qp_attr->path_mig_state = IB_MIG_REARM; 4228 } 4229 ret = 0; 4230 break; 4231 default: 4232 trace_icm_qp_rts_err(&cm_id_priv->id); 4233 ret = -EINVAL; 4234 break; 4235 } 4236 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 4237 return ret; 4238 } 4239 4240 int ib_cm_init_qp_attr(struct ib_cm_id *cm_id, 4241 struct ib_qp_attr *qp_attr, 4242 int *qp_attr_mask) 4243 { 4244 struct cm_id_private *cm_id_priv; 4245 int ret; 4246 4247 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 4248 switch (qp_attr->qp_state) { 4249 case IB_QPS_INIT: 4250 ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask); 4251 break; 4252 case IB_QPS_RTR: 4253 ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask); 4254 break; 4255 case IB_QPS_RTS: 4256 ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask); 4257 break; 4258 default: 4259 ret = -EINVAL; 4260 break; 4261 } 4262 return ret; 4263 } 4264 EXPORT_SYMBOL(ib_cm_init_qp_attr); 4265 4266 static ssize_t cm_show_counter(struct ib_device *ibdev, u32 port_num, 4267 struct ib_port_attribute *attr, char *buf) 4268 { 4269 struct cm_counter_attribute *cm_attr = 4270 container_of(attr, struct cm_counter_attribute, attr); 4271 struct cm_device *cm_dev = ib_get_client_data(ibdev, &cm_client); 4272 4273 if (WARN_ON(!cm_dev)) 4274 return -EINVAL; 4275 4276 return sysfs_emit( 4277 buf, "%ld\n", 4278 atomic_long_read( 4279 &cm_dev->port[port_num - 1] 4280 ->counters[cm_attr->group][cm_attr->index])); 4281 } 4282 4283 #define CM_COUNTER_ATTR(_name, _group, _index) \ 4284 { \ 4285 .attr = __ATTR(_name, 0444, cm_show_counter, NULL), \ 4286 .group = _group, .index = _index \ 4287 } 4288 4289 #define CM_COUNTER_GROUP(_group, _name) \ 4290 static struct cm_counter_attribute cm_counter_attr_##_group[] = { \ 4291 CM_COUNTER_ATTR(req, _group, CM_REQ_COUNTER), \ 4292 CM_COUNTER_ATTR(mra, _group, CM_MRA_COUNTER), \ 4293 CM_COUNTER_ATTR(rej, _group, CM_REJ_COUNTER), \ 4294 CM_COUNTER_ATTR(rep, _group, CM_REP_COUNTER), \ 4295 CM_COUNTER_ATTR(rtu, _group, CM_RTU_COUNTER), \ 4296 CM_COUNTER_ATTR(dreq, _group, CM_DREQ_COUNTER), \ 4297 CM_COUNTER_ATTR(drep, _group, CM_DREP_COUNTER), \ 4298 CM_COUNTER_ATTR(sidr_req, _group, CM_SIDR_REQ_COUNTER), \ 4299 CM_COUNTER_ATTR(sidr_rep, _group, CM_SIDR_REP_COUNTER), \ 4300 CM_COUNTER_ATTR(lap, _group, CM_LAP_COUNTER), \ 4301 CM_COUNTER_ATTR(apr, _group, CM_APR_COUNTER), \ 4302 }; \ 4303 static struct attribute *cm_counter_attrs_##_group[] = { \ 4304 &cm_counter_attr_##_group[0].attr.attr, \ 4305 &cm_counter_attr_##_group[1].attr.attr, \ 4306 &cm_counter_attr_##_group[2].attr.attr, \ 4307 &cm_counter_attr_##_group[3].attr.attr, \ 4308 &cm_counter_attr_##_group[4].attr.attr, \ 4309 &cm_counter_attr_##_group[5].attr.attr, \ 4310 &cm_counter_attr_##_group[6].attr.attr, \ 4311 &cm_counter_attr_##_group[7].attr.attr, \ 4312 &cm_counter_attr_##_group[8].attr.attr, \ 4313 &cm_counter_attr_##_group[9].attr.attr, \ 4314 &cm_counter_attr_##_group[10].attr.attr, \ 4315 NULL, \ 4316 }; \ 4317 static const struct attribute_group cm_counter_group_##_group = { \ 4318 .name = _name, \ 4319 .attrs = cm_counter_attrs_##_group, \ 4320 }; 4321 4322 CM_COUNTER_GROUP(CM_XMIT, "cm_tx_msgs") 4323 CM_COUNTER_GROUP(CM_XMIT_RETRIES, "cm_tx_retries") 4324 CM_COUNTER_GROUP(CM_RECV, "cm_rx_msgs") 4325 CM_COUNTER_GROUP(CM_RECV_DUPLICATES, "cm_rx_duplicates") 4326 4327 static const struct attribute_group *cm_counter_groups[] = { 4328 &cm_counter_group_CM_XMIT, 4329 &cm_counter_group_CM_XMIT_RETRIES, 4330 &cm_counter_group_CM_RECV, 4331 &cm_counter_group_CM_RECV_DUPLICATES, 4332 NULL, 4333 }; 4334 4335 static int cm_add_one(struct ib_device *ib_device) 4336 { 4337 struct cm_device *cm_dev; 4338 struct cm_port *port; 4339 struct ib_mad_reg_req reg_req = { 4340 .mgmt_class = IB_MGMT_CLASS_CM, 4341 .mgmt_class_version = IB_CM_CLASS_VERSION, 4342 }; 4343 struct ib_port_modify port_modify = { 4344 .set_port_cap_mask = IB_PORT_CM_SUP 4345 }; 4346 unsigned long flags; 4347 int ret; 4348 int count = 0; 4349 u32 i; 4350 4351 cm_dev = kzalloc(struct_size(cm_dev, port, ib_device->phys_port_cnt), 4352 GFP_KERNEL); 4353 if (!cm_dev) 4354 return -ENOMEM; 4355 4356 kref_init(&cm_dev->kref); 4357 rwlock_init(&cm_dev->mad_agent_lock); 4358 cm_dev->ib_device = ib_device; 4359 cm_dev->ack_delay = ib_device->attrs.local_ca_ack_delay; 4360 cm_dev->going_down = 0; 4361 4362 ib_set_client_data(ib_device, &cm_client, cm_dev); 4363 4364 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask); 4365 rdma_for_each_port (ib_device, i) { 4366 if (!rdma_cap_ib_cm(ib_device, i)) 4367 continue; 4368 4369 port = kzalloc(sizeof *port, GFP_KERNEL); 4370 if (!port) { 4371 ret = -ENOMEM; 4372 goto error1; 4373 } 4374 4375 cm_dev->port[i-1] = port; 4376 port->cm_dev = cm_dev; 4377 port->port_num = i; 4378 4379 ret = ib_port_register_client_groups(ib_device, i, 4380 cm_counter_groups); 4381 if (ret) 4382 goto error1; 4383 4384 port->mad_agent = ib_register_mad_agent(ib_device, i, 4385 IB_QPT_GSI, 4386 ®_req, 4387 0, 4388 cm_send_handler, 4389 cm_recv_handler, 4390 port, 4391 0); 4392 if (IS_ERR(port->mad_agent)) { 4393 ret = PTR_ERR(port->mad_agent); 4394 goto error2; 4395 } 4396 4397 port->rep_agent = ib_register_mad_agent(ib_device, i, 4398 IB_QPT_GSI, 4399 NULL, 4400 0, 4401 cm_send_handler, 4402 NULL, 4403 port, 4404 0); 4405 if (IS_ERR(port->rep_agent)) { 4406 ret = PTR_ERR(port->rep_agent); 4407 goto error3; 4408 } 4409 4410 ret = ib_modify_port(ib_device, i, 0, &port_modify); 4411 if (ret) 4412 goto error4; 4413 4414 count++; 4415 } 4416 4417 if (!count) { 4418 ret = -EOPNOTSUPP; 4419 goto free; 4420 } 4421 4422 write_lock_irqsave(&cm.device_lock, flags); 4423 list_add_tail(&cm_dev->list, &cm.device_list); 4424 write_unlock_irqrestore(&cm.device_lock, flags); 4425 return 0; 4426 4427 error4: 4428 ib_unregister_mad_agent(port->rep_agent); 4429 error3: 4430 ib_unregister_mad_agent(port->mad_agent); 4431 error2: 4432 ib_port_unregister_client_groups(ib_device, i, cm_counter_groups); 4433 error1: 4434 port_modify.set_port_cap_mask = 0; 4435 port_modify.clr_port_cap_mask = IB_PORT_CM_SUP; 4436 while (--i) { 4437 if (!rdma_cap_ib_cm(ib_device, i)) 4438 continue; 4439 4440 port = cm_dev->port[i-1]; 4441 ib_modify_port(ib_device, port->port_num, 0, &port_modify); 4442 ib_unregister_mad_agent(port->rep_agent); 4443 ib_unregister_mad_agent(port->mad_agent); 4444 ib_port_unregister_client_groups(ib_device, i, 4445 cm_counter_groups); 4446 } 4447 free: 4448 cm_device_put(cm_dev); 4449 return ret; 4450 } 4451 4452 static void cm_remove_one(struct ib_device *ib_device, void *client_data) 4453 { 4454 struct cm_device *cm_dev = client_data; 4455 struct cm_port *port; 4456 struct ib_port_modify port_modify = { 4457 .clr_port_cap_mask = IB_PORT_CM_SUP 4458 }; 4459 unsigned long flags; 4460 u32 i; 4461 4462 write_lock_irqsave(&cm.device_lock, flags); 4463 list_del(&cm_dev->list); 4464 write_unlock_irqrestore(&cm.device_lock, flags); 4465 4466 spin_lock_irq(&cm.lock); 4467 cm_dev->going_down = 1; 4468 spin_unlock_irq(&cm.lock); 4469 4470 rdma_for_each_port (ib_device, i) { 4471 struct ib_mad_agent *mad_agent; 4472 struct ib_mad_agent *rep_agent; 4473 4474 if (!rdma_cap_ib_cm(ib_device, i)) 4475 continue; 4476 4477 port = cm_dev->port[i-1]; 4478 mad_agent = port->mad_agent; 4479 rep_agent = port->rep_agent; 4480 ib_modify_port(ib_device, port->port_num, 0, &port_modify); 4481 /* 4482 * We flush the queue here after the going_down set, this 4483 * verify that no new works will be queued in the recv handler, 4484 * after that we can call the unregister_mad_agent 4485 */ 4486 flush_workqueue(cm.wq); 4487 /* 4488 * The above ensures no call paths from the work are running, 4489 * the remaining paths all take the mad_agent_lock. 4490 */ 4491 write_lock(&cm_dev->mad_agent_lock); 4492 port->mad_agent = NULL; 4493 port->rep_agent = NULL; 4494 write_unlock(&cm_dev->mad_agent_lock); 4495 ib_unregister_mad_agent(mad_agent); 4496 ib_unregister_mad_agent(rep_agent); 4497 ib_port_unregister_client_groups(ib_device, i, 4498 cm_counter_groups); 4499 } 4500 4501 cm_device_put(cm_dev); 4502 } 4503 4504 static int __init ib_cm_init(void) 4505 { 4506 int ret; 4507 4508 INIT_LIST_HEAD(&cm.device_list); 4509 rwlock_init(&cm.device_lock); 4510 spin_lock_init(&cm.lock); 4511 cm.listen_service_table = RB_ROOT; 4512 cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID); 4513 cm.remote_id_table = RB_ROOT; 4514 cm.remote_qp_table = RB_ROOT; 4515 cm.remote_sidr_table = RB_ROOT; 4516 xa_init_flags(&cm.local_id_table, XA_FLAGS_ALLOC); 4517 get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand); 4518 INIT_LIST_HEAD(&cm.timewait_list); 4519 4520 cm.wq = alloc_workqueue("ib_cm", WQ_PERCPU, 1); 4521 if (!cm.wq) { 4522 ret = -ENOMEM; 4523 goto error2; 4524 } 4525 4526 ret = ib_register_client(&cm_client); 4527 if (ret) 4528 goto error3; 4529 4530 return 0; 4531 error3: 4532 destroy_workqueue(cm.wq); 4533 error2: 4534 return ret; 4535 } 4536 4537 static void __exit ib_cm_cleanup(void) 4538 { 4539 struct cm_timewait_info *timewait_info, *tmp; 4540 4541 spin_lock_irq(&cm.lock); 4542 list_for_each_entry(timewait_info, &cm.timewait_list, list) 4543 cancel_delayed_work(&timewait_info->work.work); 4544 spin_unlock_irq(&cm.lock); 4545 4546 ib_unregister_client(&cm_client); 4547 destroy_workqueue(cm.wq); 4548 4549 list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) { 4550 list_del(&timewait_info->list); 4551 kfree(timewait_info); 4552 } 4553 4554 WARN_ON(!xa_empty(&cm.local_id_table)); 4555 } 4556 4557 module_init(ib_cm_init); 4558 module_exit(ib_cm_cleanup); 4559