1 /* 2 * Copyright (c) 2005 Voltaire Inc. All rights reserved. 3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved. 4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved. 5 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/completion.h> 37 #include <linux/in.h> 38 #include <linux/in6.h> 39 #include <linux/mutex.h> 40 #include <linux/random.h> 41 #include <linux/idr.h> 42 #include <linux/inetdevice.h> 43 #include <linux/slab.h> 44 45 #include <net/tcp.h> 46 #include <net/ipv6.h> 47 48 #include <rdma/rdma_cm.h> 49 #include <rdma/rdma_cm_ib.h> 50 #include <rdma/ib_cache.h> 51 #include <rdma/ib_cm.h> 52 #include <rdma/ib_sa.h> 53 #include <rdma/iw_cm.h> 54 55 MODULE_AUTHOR("Sean Hefty"); 56 MODULE_DESCRIPTION("Generic RDMA CM Agent"); 57 MODULE_LICENSE("Dual BSD/GPL"); 58 59 #define CMA_CM_RESPONSE_TIMEOUT 20 60 #define CMA_MAX_CM_RETRIES 15 61 #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24) 62 63 static void cma_add_one(struct ib_device *device); 64 static void cma_remove_one(struct ib_device *device); 65 66 static struct ib_client cma_client = { 67 .name = "cma", 68 .add = cma_add_one, 69 .remove = cma_remove_one 70 }; 71 72 static struct ib_sa_client sa_client; 73 static struct rdma_addr_client addr_client; 74 static LIST_HEAD(dev_list); 75 static LIST_HEAD(listen_any_list); 76 static DEFINE_MUTEX(lock); 77 static struct workqueue_struct *cma_wq; 78 static DEFINE_IDR(sdp_ps); 79 static DEFINE_IDR(tcp_ps); 80 static DEFINE_IDR(udp_ps); 81 static DEFINE_IDR(ipoib_ps); 82 static int next_port; 83 84 struct cma_device { 85 struct list_head list; 86 struct ib_device *device; 87 struct completion comp; 88 atomic_t refcount; 89 struct list_head id_list; 90 }; 91 92 enum cma_state { 93 CMA_IDLE, 94 CMA_ADDR_QUERY, 95 CMA_ADDR_RESOLVED, 96 CMA_ROUTE_QUERY, 97 CMA_ROUTE_RESOLVED, 98 CMA_CONNECT, 99 CMA_DISCONNECT, 100 CMA_ADDR_BOUND, 101 CMA_LISTEN, 102 CMA_DEVICE_REMOVAL, 103 CMA_DESTROYING 104 }; 105 106 struct rdma_bind_list { 107 struct idr *ps; 108 struct hlist_head owners; 109 unsigned short port; 110 }; 111 112 /* 113 * Device removal can occur at anytime, so we need extra handling to 114 * serialize notifying the user of device removal with other callbacks. 115 * We do this by disabling removal notification while a callback is in process, 116 * and reporting it after the callback completes. 117 */ 118 struct rdma_id_private { 119 struct rdma_cm_id id; 120 121 struct rdma_bind_list *bind_list; 122 struct hlist_node node; 123 struct list_head list; /* listen_any_list or cma_device.list */ 124 struct list_head listen_list; /* per device listens */ 125 struct cma_device *cma_dev; 126 struct list_head mc_list; 127 128 int internal_id; 129 enum cma_state state; 130 spinlock_t lock; 131 struct mutex qp_mutex; 132 133 struct completion comp; 134 atomic_t refcount; 135 struct mutex handler_mutex; 136 137 int backlog; 138 int timeout_ms; 139 struct ib_sa_query *query; 140 int query_id; 141 union { 142 struct ib_cm_id *ib; 143 struct iw_cm_id *iw; 144 } cm_id; 145 146 u32 seq_num; 147 u32 qkey; 148 u32 qp_num; 149 u8 srq; 150 u8 tos; 151 }; 152 153 struct cma_multicast { 154 struct rdma_id_private *id_priv; 155 union { 156 struct ib_sa_multicast *ib; 157 } multicast; 158 struct list_head list; 159 void *context; 160 struct sockaddr_storage addr; 161 }; 162 163 struct cma_work { 164 struct work_struct work; 165 struct rdma_id_private *id; 166 enum cma_state old_state; 167 enum cma_state new_state; 168 struct rdma_cm_event event; 169 }; 170 171 struct cma_ndev_work { 172 struct work_struct work; 173 struct rdma_id_private *id; 174 struct rdma_cm_event event; 175 }; 176 177 union cma_ip_addr { 178 struct in6_addr ip6; 179 struct { 180 __be32 pad[3]; 181 __be32 addr; 182 } ip4; 183 }; 184 185 struct cma_hdr { 186 u8 cma_version; 187 u8 ip_version; /* IP version: 7:4 */ 188 __be16 port; 189 union cma_ip_addr src_addr; 190 union cma_ip_addr dst_addr; 191 }; 192 193 struct sdp_hh { 194 u8 bsdh[16]; 195 u8 sdp_version; /* Major version: 7:4 */ 196 u8 ip_version; /* IP version: 7:4 */ 197 u8 sdp_specific1[10]; 198 __be16 port; 199 __be16 sdp_specific2; 200 union cma_ip_addr src_addr; 201 union cma_ip_addr dst_addr; 202 }; 203 204 struct sdp_hah { 205 u8 bsdh[16]; 206 u8 sdp_version; 207 }; 208 209 #define CMA_VERSION 0x00 210 #define SDP_MAJ_VERSION 0x2 211 212 static int cma_comp(struct rdma_id_private *id_priv, enum cma_state comp) 213 { 214 unsigned long flags; 215 int ret; 216 217 spin_lock_irqsave(&id_priv->lock, flags); 218 ret = (id_priv->state == comp); 219 spin_unlock_irqrestore(&id_priv->lock, flags); 220 return ret; 221 } 222 223 static int cma_comp_exch(struct rdma_id_private *id_priv, 224 enum cma_state comp, enum cma_state exch) 225 { 226 unsigned long flags; 227 int ret; 228 229 spin_lock_irqsave(&id_priv->lock, flags); 230 if ((ret = (id_priv->state == comp))) 231 id_priv->state = exch; 232 spin_unlock_irqrestore(&id_priv->lock, flags); 233 return ret; 234 } 235 236 static enum cma_state cma_exch(struct rdma_id_private *id_priv, 237 enum cma_state exch) 238 { 239 unsigned long flags; 240 enum cma_state old; 241 242 spin_lock_irqsave(&id_priv->lock, flags); 243 old = id_priv->state; 244 id_priv->state = exch; 245 spin_unlock_irqrestore(&id_priv->lock, flags); 246 return old; 247 } 248 249 static inline u8 cma_get_ip_ver(struct cma_hdr *hdr) 250 { 251 return hdr->ip_version >> 4; 252 } 253 254 static inline void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver) 255 { 256 hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF); 257 } 258 259 static inline u8 sdp_get_majv(u8 sdp_version) 260 { 261 return sdp_version >> 4; 262 } 263 264 static inline u8 sdp_get_ip_ver(struct sdp_hh *hh) 265 { 266 return hh->ip_version >> 4; 267 } 268 269 static inline void sdp_set_ip_ver(struct sdp_hh *hh, u8 ip_ver) 270 { 271 hh->ip_version = (ip_ver << 4) | (hh->ip_version & 0xF); 272 } 273 274 static inline int cma_is_ud_ps(enum rdma_port_space ps) 275 { 276 return (ps == RDMA_PS_UDP || ps == RDMA_PS_IPOIB); 277 } 278 279 static void cma_attach_to_dev(struct rdma_id_private *id_priv, 280 struct cma_device *cma_dev) 281 { 282 atomic_inc(&cma_dev->refcount); 283 id_priv->cma_dev = cma_dev; 284 id_priv->id.device = cma_dev->device; 285 list_add_tail(&id_priv->list, &cma_dev->id_list); 286 } 287 288 static inline void cma_deref_dev(struct cma_device *cma_dev) 289 { 290 if (atomic_dec_and_test(&cma_dev->refcount)) 291 complete(&cma_dev->comp); 292 } 293 294 static void cma_detach_from_dev(struct rdma_id_private *id_priv) 295 { 296 list_del(&id_priv->list); 297 cma_deref_dev(id_priv->cma_dev); 298 id_priv->cma_dev = NULL; 299 } 300 301 static int cma_set_qkey(struct rdma_id_private *id_priv) 302 { 303 struct ib_sa_mcmember_rec rec; 304 int ret = 0; 305 306 if (id_priv->qkey) 307 return 0; 308 309 switch (id_priv->id.ps) { 310 case RDMA_PS_UDP: 311 id_priv->qkey = RDMA_UDP_QKEY; 312 break; 313 case RDMA_PS_IPOIB: 314 ib_addr_get_mgid(&id_priv->id.route.addr.dev_addr, &rec.mgid); 315 ret = ib_sa_get_mcmember_rec(id_priv->id.device, 316 id_priv->id.port_num, &rec.mgid, 317 &rec); 318 if (!ret) 319 id_priv->qkey = be32_to_cpu(rec.qkey); 320 break; 321 default: 322 break; 323 } 324 return ret; 325 } 326 327 static int cma_acquire_dev(struct rdma_id_private *id_priv) 328 { 329 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 330 struct cma_device *cma_dev; 331 union ib_gid gid; 332 int ret = -ENODEV; 333 334 rdma_addr_get_sgid(dev_addr, &gid); 335 list_for_each_entry(cma_dev, &dev_list, list) { 336 ret = ib_find_cached_gid(cma_dev->device, &gid, 337 &id_priv->id.port_num, NULL); 338 if (!ret) { 339 cma_attach_to_dev(id_priv, cma_dev); 340 break; 341 } 342 } 343 return ret; 344 } 345 346 static void cma_deref_id(struct rdma_id_private *id_priv) 347 { 348 if (atomic_dec_and_test(&id_priv->refcount)) 349 complete(&id_priv->comp); 350 } 351 352 static int cma_disable_callback(struct rdma_id_private *id_priv, 353 enum cma_state state) 354 { 355 mutex_lock(&id_priv->handler_mutex); 356 if (id_priv->state != state) { 357 mutex_unlock(&id_priv->handler_mutex); 358 return -EINVAL; 359 } 360 return 0; 361 } 362 363 static int cma_has_cm_dev(struct rdma_id_private *id_priv) 364 { 365 return (id_priv->id.device && id_priv->cm_id.ib); 366 } 367 368 struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler, 369 void *context, enum rdma_port_space ps) 370 { 371 struct rdma_id_private *id_priv; 372 373 id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL); 374 if (!id_priv) 375 return ERR_PTR(-ENOMEM); 376 377 id_priv->state = CMA_IDLE; 378 id_priv->id.context = context; 379 id_priv->id.event_handler = event_handler; 380 id_priv->id.ps = ps; 381 spin_lock_init(&id_priv->lock); 382 mutex_init(&id_priv->qp_mutex); 383 init_completion(&id_priv->comp); 384 atomic_set(&id_priv->refcount, 1); 385 mutex_init(&id_priv->handler_mutex); 386 INIT_LIST_HEAD(&id_priv->listen_list); 387 INIT_LIST_HEAD(&id_priv->mc_list); 388 get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num); 389 390 return &id_priv->id; 391 } 392 EXPORT_SYMBOL(rdma_create_id); 393 394 static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) 395 { 396 struct ib_qp_attr qp_attr; 397 int qp_attr_mask, ret; 398 399 qp_attr.qp_state = IB_QPS_INIT; 400 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 401 if (ret) 402 return ret; 403 404 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); 405 if (ret) 406 return ret; 407 408 qp_attr.qp_state = IB_QPS_RTR; 409 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE); 410 if (ret) 411 return ret; 412 413 qp_attr.qp_state = IB_QPS_RTS; 414 qp_attr.sq_psn = 0; 415 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN); 416 417 return ret; 418 } 419 420 static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) 421 { 422 struct ib_qp_attr qp_attr; 423 int qp_attr_mask, ret; 424 425 qp_attr.qp_state = IB_QPS_INIT; 426 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 427 if (ret) 428 return ret; 429 430 return ib_modify_qp(qp, &qp_attr, qp_attr_mask); 431 } 432 433 int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd, 434 struct ib_qp_init_attr *qp_init_attr) 435 { 436 struct rdma_id_private *id_priv; 437 struct ib_qp *qp; 438 int ret; 439 440 id_priv = container_of(id, struct rdma_id_private, id); 441 if (id->device != pd->device) 442 return -EINVAL; 443 444 qp = ib_create_qp(pd, qp_init_attr); 445 if (IS_ERR(qp)) 446 return PTR_ERR(qp); 447 448 if (cma_is_ud_ps(id_priv->id.ps)) 449 ret = cma_init_ud_qp(id_priv, qp); 450 else 451 ret = cma_init_conn_qp(id_priv, qp); 452 if (ret) 453 goto err; 454 455 id->qp = qp; 456 id_priv->qp_num = qp->qp_num; 457 id_priv->srq = (qp->srq != NULL); 458 return 0; 459 err: 460 ib_destroy_qp(qp); 461 return ret; 462 } 463 EXPORT_SYMBOL(rdma_create_qp); 464 465 void rdma_destroy_qp(struct rdma_cm_id *id) 466 { 467 struct rdma_id_private *id_priv; 468 469 id_priv = container_of(id, struct rdma_id_private, id); 470 mutex_lock(&id_priv->qp_mutex); 471 ib_destroy_qp(id_priv->id.qp); 472 id_priv->id.qp = NULL; 473 mutex_unlock(&id_priv->qp_mutex); 474 } 475 EXPORT_SYMBOL(rdma_destroy_qp); 476 477 static int cma_modify_qp_rtr(struct rdma_id_private *id_priv, 478 struct rdma_conn_param *conn_param) 479 { 480 struct ib_qp_attr qp_attr; 481 int qp_attr_mask, ret; 482 483 mutex_lock(&id_priv->qp_mutex); 484 if (!id_priv->id.qp) { 485 ret = 0; 486 goto out; 487 } 488 489 /* Need to update QP attributes from default values. */ 490 qp_attr.qp_state = IB_QPS_INIT; 491 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 492 if (ret) 493 goto out; 494 495 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 496 if (ret) 497 goto out; 498 499 qp_attr.qp_state = IB_QPS_RTR; 500 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 501 if (ret) 502 goto out; 503 504 if (conn_param) 505 qp_attr.max_dest_rd_atomic = conn_param->responder_resources; 506 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 507 out: 508 mutex_unlock(&id_priv->qp_mutex); 509 return ret; 510 } 511 512 static int cma_modify_qp_rts(struct rdma_id_private *id_priv, 513 struct rdma_conn_param *conn_param) 514 { 515 struct ib_qp_attr qp_attr; 516 int qp_attr_mask, ret; 517 518 mutex_lock(&id_priv->qp_mutex); 519 if (!id_priv->id.qp) { 520 ret = 0; 521 goto out; 522 } 523 524 qp_attr.qp_state = IB_QPS_RTS; 525 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 526 if (ret) 527 goto out; 528 529 if (conn_param) 530 qp_attr.max_rd_atomic = conn_param->initiator_depth; 531 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 532 out: 533 mutex_unlock(&id_priv->qp_mutex); 534 return ret; 535 } 536 537 static int cma_modify_qp_err(struct rdma_id_private *id_priv) 538 { 539 struct ib_qp_attr qp_attr; 540 int ret; 541 542 mutex_lock(&id_priv->qp_mutex); 543 if (!id_priv->id.qp) { 544 ret = 0; 545 goto out; 546 } 547 548 qp_attr.qp_state = IB_QPS_ERR; 549 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE); 550 out: 551 mutex_unlock(&id_priv->qp_mutex); 552 return ret; 553 } 554 555 static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv, 556 struct ib_qp_attr *qp_attr, int *qp_attr_mask) 557 { 558 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 559 int ret; 560 561 ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num, 562 ib_addr_get_pkey(dev_addr), 563 &qp_attr->pkey_index); 564 if (ret) 565 return ret; 566 567 qp_attr->port_num = id_priv->id.port_num; 568 *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT; 569 570 if (cma_is_ud_ps(id_priv->id.ps)) { 571 ret = cma_set_qkey(id_priv); 572 if (ret) 573 return ret; 574 575 qp_attr->qkey = id_priv->qkey; 576 *qp_attr_mask |= IB_QP_QKEY; 577 } else { 578 qp_attr->qp_access_flags = 0; 579 *qp_attr_mask |= IB_QP_ACCESS_FLAGS; 580 } 581 return 0; 582 } 583 584 int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, 585 int *qp_attr_mask) 586 { 587 struct rdma_id_private *id_priv; 588 int ret = 0; 589 590 id_priv = container_of(id, struct rdma_id_private, id); 591 switch (rdma_node_get_transport(id_priv->id.device->node_type)) { 592 case RDMA_TRANSPORT_IB: 593 if (!id_priv->cm_id.ib || cma_is_ud_ps(id_priv->id.ps)) 594 ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask); 595 else 596 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, 597 qp_attr_mask); 598 if (qp_attr->qp_state == IB_QPS_RTR) 599 qp_attr->rq_psn = id_priv->seq_num; 600 break; 601 case RDMA_TRANSPORT_IWARP: 602 if (!id_priv->cm_id.iw) { 603 qp_attr->qp_access_flags = 0; 604 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS; 605 } else 606 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, 607 qp_attr_mask); 608 break; 609 default: 610 ret = -ENOSYS; 611 break; 612 } 613 614 return ret; 615 } 616 EXPORT_SYMBOL(rdma_init_qp_attr); 617 618 static inline int cma_zero_addr(struct sockaddr *addr) 619 { 620 struct in6_addr *ip6; 621 622 if (addr->sa_family == AF_INET) 623 return ipv4_is_zeronet( 624 ((struct sockaddr_in *)addr)->sin_addr.s_addr); 625 else { 626 ip6 = &((struct sockaddr_in6 *) addr)->sin6_addr; 627 return (ip6->s6_addr32[0] | ip6->s6_addr32[1] | 628 ip6->s6_addr32[2] | ip6->s6_addr32[3]) == 0; 629 } 630 } 631 632 static inline int cma_loopback_addr(struct sockaddr *addr) 633 { 634 if (addr->sa_family == AF_INET) 635 return ipv4_is_loopback( 636 ((struct sockaddr_in *) addr)->sin_addr.s_addr); 637 else 638 return ipv6_addr_loopback( 639 &((struct sockaddr_in6 *) addr)->sin6_addr); 640 } 641 642 static inline int cma_any_addr(struct sockaddr *addr) 643 { 644 return cma_zero_addr(addr) || cma_loopback_addr(addr); 645 } 646 647 static inline __be16 cma_port(struct sockaddr *addr) 648 { 649 if (addr->sa_family == AF_INET) 650 return ((struct sockaddr_in *) addr)->sin_port; 651 else 652 return ((struct sockaddr_in6 *) addr)->sin6_port; 653 } 654 655 static inline int cma_any_port(struct sockaddr *addr) 656 { 657 return !cma_port(addr); 658 } 659 660 static int cma_get_net_info(void *hdr, enum rdma_port_space ps, 661 u8 *ip_ver, __be16 *port, 662 union cma_ip_addr **src, union cma_ip_addr **dst) 663 { 664 switch (ps) { 665 case RDMA_PS_SDP: 666 if (sdp_get_majv(((struct sdp_hh *) hdr)->sdp_version) != 667 SDP_MAJ_VERSION) 668 return -EINVAL; 669 670 *ip_ver = sdp_get_ip_ver(hdr); 671 *port = ((struct sdp_hh *) hdr)->port; 672 *src = &((struct sdp_hh *) hdr)->src_addr; 673 *dst = &((struct sdp_hh *) hdr)->dst_addr; 674 break; 675 default: 676 if (((struct cma_hdr *) hdr)->cma_version != CMA_VERSION) 677 return -EINVAL; 678 679 *ip_ver = cma_get_ip_ver(hdr); 680 *port = ((struct cma_hdr *) hdr)->port; 681 *src = &((struct cma_hdr *) hdr)->src_addr; 682 *dst = &((struct cma_hdr *) hdr)->dst_addr; 683 break; 684 } 685 686 if (*ip_ver != 4 && *ip_ver != 6) 687 return -EINVAL; 688 return 0; 689 } 690 691 static void cma_save_net_info(struct rdma_addr *addr, 692 struct rdma_addr *listen_addr, 693 u8 ip_ver, __be16 port, 694 union cma_ip_addr *src, union cma_ip_addr *dst) 695 { 696 struct sockaddr_in *listen4, *ip4; 697 struct sockaddr_in6 *listen6, *ip6; 698 699 switch (ip_ver) { 700 case 4: 701 listen4 = (struct sockaddr_in *) &listen_addr->src_addr; 702 ip4 = (struct sockaddr_in *) &addr->src_addr; 703 ip4->sin_family = listen4->sin_family; 704 ip4->sin_addr.s_addr = dst->ip4.addr; 705 ip4->sin_port = listen4->sin_port; 706 707 ip4 = (struct sockaddr_in *) &addr->dst_addr; 708 ip4->sin_family = listen4->sin_family; 709 ip4->sin_addr.s_addr = src->ip4.addr; 710 ip4->sin_port = port; 711 break; 712 case 6: 713 listen6 = (struct sockaddr_in6 *) &listen_addr->src_addr; 714 ip6 = (struct sockaddr_in6 *) &addr->src_addr; 715 ip6->sin6_family = listen6->sin6_family; 716 ip6->sin6_addr = dst->ip6; 717 ip6->sin6_port = listen6->sin6_port; 718 719 ip6 = (struct sockaddr_in6 *) &addr->dst_addr; 720 ip6->sin6_family = listen6->sin6_family; 721 ip6->sin6_addr = src->ip6; 722 ip6->sin6_port = port; 723 break; 724 default: 725 break; 726 } 727 } 728 729 static inline int cma_user_data_offset(enum rdma_port_space ps) 730 { 731 switch (ps) { 732 case RDMA_PS_SDP: 733 return 0; 734 default: 735 return sizeof(struct cma_hdr); 736 } 737 } 738 739 static void cma_cancel_route(struct rdma_id_private *id_priv) 740 { 741 switch (rdma_node_get_transport(id_priv->id.device->node_type)) { 742 case RDMA_TRANSPORT_IB: 743 if (id_priv->query) 744 ib_sa_cancel_query(id_priv->query_id, id_priv->query); 745 break; 746 default: 747 break; 748 } 749 } 750 751 static void cma_cancel_listens(struct rdma_id_private *id_priv) 752 { 753 struct rdma_id_private *dev_id_priv; 754 755 /* 756 * Remove from listen_any_list to prevent added devices from spawning 757 * additional listen requests. 758 */ 759 mutex_lock(&lock); 760 list_del(&id_priv->list); 761 762 while (!list_empty(&id_priv->listen_list)) { 763 dev_id_priv = list_entry(id_priv->listen_list.next, 764 struct rdma_id_private, listen_list); 765 /* sync with device removal to avoid duplicate destruction */ 766 list_del_init(&dev_id_priv->list); 767 list_del(&dev_id_priv->listen_list); 768 mutex_unlock(&lock); 769 770 rdma_destroy_id(&dev_id_priv->id); 771 mutex_lock(&lock); 772 } 773 mutex_unlock(&lock); 774 } 775 776 static void cma_cancel_operation(struct rdma_id_private *id_priv, 777 enum cma_state state) 778 { 779 switch (state) { 780 case CMA_ADDR_QUERY: 781 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr); 782 break; 783 case CMA_ROUTE_QUERY: 784 cma_cancel_route(id_priv); 785 break; 786 case CMA_LISTEN: 787 if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr) 788 && !id_priv->cma_dev) 789 cma_cancel_listens(id_priv); 790 break; 791 default: 792 break; 793 } 794 } 795 796 static void cma_release_port(struct rdma_id_private *id_priv) 797 { 798 struct rdma_bind_list *bind_list = id_priv->bind_list; 799 800 if (!bind_list) 801 return; 802 803 mutex_lock(&lock); 804 hlist_del(&id_priv->node); 805 if (hlist_empty(&bind_list->owners)) { 806 idr_remove(bind_list->ps, bind_list->port); 807 kfree(bind_list); 808 } 809 mutex_unlock(&lock); 810 } 811 812 static void cma_leave_mc_groups(struct rdma_id_private *id_priv) 813 { 814 struct cma_multicast *mc; 815 816 while (!list_empty(&id_priv->mc_list)) { 817 mc = container_of(id_priv->mc_list.next, 818 struct cma_multicast, list); 819 list_del(&mc->list); 820 ib_sa_free_multicast(mc->multicast.ib); 821 kfree(mc); 822 } 823 } 824 825 void rdma_destroy_id(struct rdma_cm_id *id) 826 { 827 struct rdma_id_private *id_priv; 828 enum cma_state state; 829 830 id_priv = container_of(id, struct rdma_id_private, id); 831 state = cma_exch(id_priv, CMA_DESTROYING); 832 cma_cancel_operation(id_priv, state); 833 834 mutex_lock(&lock); 835 if (id_priv->cma_dev) { 836 mutex_unlock(&lock); 837 switch (rdma_node_get_transport(id->device->node_type)) { 838 case RDMA_TRANSPORT_IB: 839 if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib)) 840 ib_destroy_cm_id(id_priv->cm_id.ib); 841 break; 842 case RDMA_TRANSPORT_IWARP: 843 if (id_priv->cm_id.iw && !IS_ERR(id_priv->cm_id.iw)) 844 iw_destroy_cm_id(id_priv->cm_id.iw); 845 break; 846 default: 847 break; 848 } 849 cma_leave_mc_groups(id_priv); 850 mutex_lock(&lock); 851 cma_detach_from_dev(id_priv); 852 } 853 mutex_unlock(&lock); 854 855 cma_release_port(id_priv); 856 cma_deref_id(id_priv); 857 wait_for_completion(&id_priv->comp); 858 859 if (id_priv->internal_id) 860 cma_deref_id(id_priv->id.context); 861 862 kfree(id_priv->id.route.path_rec); 863 kfree(id_priv); 864 } 865 EXPORT_SYMBOL(rdma_destroy_id); 866 867 static int cma_rep_recv(struct rdma_id_private *id_priv) 868 { 869 int ret; 870 871 ret = cma_modify_qp_rtr(id_priv, NULL); 872 if (ret) 873 goto reject; 874 875 ret = cma_modify_qp_rts(id_priv, NULL); 876 if (ret) 877 goto reject; 878 879 ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0); 880 if (ret) 881 goto reject; 882 883 return 0; 884 reject: 885 cma_modify_qp_err(id_priv); 886 ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED, 887 NULL, 0, NULL, 0); 888 return ret; 889 } 890 891 static int cma_verify_rep(struct rdma_id_private *id_priv, void *data) 892 { 893 if (id_priv->id.ps == RDMA_PS_SDP && 894 sdp_get_majv(((struct sdp_hah *) data)->sdp_version) != 895 SDP_MAJ_VERSION) 896 return -EINVAL; 897 898 return 0; 899 } 900 901 static void cma_set_rep_event_data(struct rdma_cm_event *event, 902 struct ib_cm_rep_event_param *rep_data, 903 void *private_data) 904 { 905 event->param.conn.private_data = private_data; 906 event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE; 907 event->param.conn.responder_resources = rep_data->responder_resources; 908 event->param.conn.initiator_depth = rep_data->initiator_depth; 909 event->param.conn.flow_control = rep_data->flow_control; 910 event->param.conn.rnr_retry_count = rep_data->rnr_retry_count; 911 event->param.conn.srq = rep_data->srq; 912 event->param.conn.qp_num = rep_data->remote_qpn; 913 } 914 915 static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) 916 { 917 struct rdma_id_private *id_priv = cm_id->context; 918 struct rdma_cm_event event; 919 int ret = 0; 920 921 if ((ib_event->event != IB_CM_TIMEWAIT_EXIT && 922 cma_disable_callback(id_priv, CMA_CONNECT)) || 923 (ib_event->event == IB_CM_TIMEWAIT_EXIT && 924 cma_disable_callback(id_priv, CMA_DISCONNECT))) 925 return 0; 926 927 memset(&event, 0, sizeof event); 928 switch (ib_event->event) { 929 case IB_CM_REQ_ERROR: 930 case IB_CM_REP_ERROR: 931 event.event = RDMA_CM_EVENT_UNREACHABLE; 932 event.status = -ETIMEDOUT; 933 break; 934 case IB_CM_REP_RECEIVED: 935 event.status = cma_verify_rep(id_priv, ib_event->private_data); 936 if (event.status) 937 event.event = RDMA_CM_EVENT_CONNECT_ERROR; 938 else if (id_priv->id.qp && id_priv->id.ps != RDMA_PS_SDP) { 939 event.status = cma_rep_recv(id_priv); 940 event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR : 941 RDMA_CM_EVENT_ESTABLISHED; 942 } else 943 event.event = RDMA_CM_EVENT_CONNECT_RESPONSE; 944 cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd, 945 ib_event->private_data); 946 break; 947 case IB_CM_RTU_RECEIVED: 948 case IB_CM_USER_ESTABLISHED: 949 event.event = RDMA_CM_EVENT_ESTABLISHED; 950 break; 951 case IB_CM_DREQ_ERROR: 952 event.status = -ETIMEDOUT; /* fall through */ 953 case IB_CM_DREQ_RECEIVED: 954 case IB_CM_DREP_RECEIVED: 955 if (!cma_comp_exch(id_priv, CMA_CONNECT, CMA_DISCONNECT)) 956 goto out; 957 event.event = RDMA_CM_EVENT_DISCONNECTED; 958 break; 959 case IB_CM_TIMEWAIT_EXIT: 960 event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT; 961 break; 962 case IB_CM_MRA_RECEIVED: 963 /* ignore event */ 964 goto out; 965 case IB_CM_REJ_RECEIVED: 966 cma_modify_qp_err(id_priv); 967 event.status = ib_event->param.rej_rcvd.reason; 968 event.event = RDMA_CM_EVENT_REJECTED; 969 event.param.conn.private_data = ib_event->private_data; 970 event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE; 971 break; 972 default: 973 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n", 974 ib_event->event); 975 goto out; 976 } 977 978 ret = id_priv->id.event_handler(&id_priv->id, &event); 979 if (ret) { 980 /* Destroy the CM ID by returning a non-zero value. */ 981 id_priv->cm_id.ib = NULL; 982 cma_exch(id_priv, CMA_DESTROYING); 983 mutex_unlock(&id_priv->handler_mutex); 984 rdma_destroy_id(&id_priv->id); 985 return ret; 986 } 987 out: 988 mutex_unlock(&id_priv->handler_mutex); 989 return ret; 990 } 991 992 static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id, 993 struct ib_cm_event *ib_event) 994 { 995 struct rdma_id_private *id_priv; 996 struct rdma_cm_id *id; 997 struct rdma_route *rt; 998 union cma_ip_addr *src, *dst; 999 __be16 port; 1000 u8 ip_ver; 1001 int ret; 1002 1003 if (cma_get_net_info(ib_event->private_data, listen_id->ps, 1004 &ip_ver, &port, &src, &dst)) 1005 goto err; 1006 1007 id = rdma_create_id(listen_id->event_handler, listen_id->context, 1008 listen_id->ps); 1009 if (IS_ERR(id)) 1010 goto err; 1011 1012 cma_save_net_info(&id->route.addr, &listen_id->route.addr, 1013 ip_ver, port, src, dst); 1014 1015 rt = &id->route; 1016 rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1; 1017 rt->path_rec = kmalloc(sizeof *rt->path_rec * rt->num_paths, 1018 GFP_KERNEL); 1019 if (!rt->path_rec) 1020 goto destroy_id; 1021 1022 rt->path_rec[0] = *ib_event->param.req_rcvd.primary_path; 1023 if (rt->num_paths == 2) 1024 rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path; 1025 1026 if (cma_any_addr((struct sockaddr *) &rt->addr.src_addr)) { 1027 rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND; 1028 rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid); 1029 ib_addr_set_pkey(&rt->addr.dev_addr, rt->path_rec[0].pkey); 1030 } else { 1031 ret = rdma_translate_ip((struct sockaddr *) &rt->addr.src_addr, 1032 &rt->addr.dev_addr); 1033 if (ret) 1034 goto destroy_id; 1035 } 1036 rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); 1037 1038 id_priv = container_of(id, struct rdma_id_private, id); 1039 id_priv->state = CMA_CONNECT; 1040 return id_priv; 1041 1042 destroy_id: 1043 rdma_destroy_id(id); 1044 err: 1045 return NULL; 1046 } 1047 1048 static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id, 1049 struct ib_cm_event *ib_event) 1050 { 1051 struct rdma_id_private *id_priv; 1052 struct rdma_cm_id *id; 1053 union cma_ip_addr *src, *dst; 1054 __be16 port; 1055 u8 ip_ver; 1056 int ret; 1057 1058 id = rdma_create_id(listen_id->event_handler, listen_id->context, 1059 listen_id->ps); 1060 if (IS_ERR(id)) 1061 return NULL; 1062 1063 1064 if (cma_get_net_info(ib_event->private_data, listen_id->ps, 1065 &ip_ver, &port, &src, &dst)) 1066 goto err; 1067 1068 cma_save_net_info(&id->route.addr, &listen_id->route.addr, 1069 ip_ver, port, src, dst); 1070 1071 if (!cma_any_addr((struct sockaddr *) &id->route.addr.src_addr)) { 1072 ret = rdma_translate_ip((struct sockaddr *) &id->route.addr.src_addr, 1073 &id->route.addr.dev_addr); 1074 if (ret) 1075 goto err; 1076 } 1077 1078 id_priv = container_of(id, struct rdma_id_private, id); 1079 id_priv->state = CMA_CONNECT; 1080 return id_priv; 1081 err: 1082 rdma_destroy_id(id); 1083 return NULL; 1084 } 1085 1086 static void cma_set_req_event_data(struct rdma_cm_event *event, 1087 struct ib_cm_req_event_param *req_data, 1088 void *private_data, int offset) 1089 { 1090 event->param.conn.private_data = private_data + offset; 1091 event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset; 1092 event->param.conn.responder_resources = req_data->responder_resources; 1093 event->param.conn.initiator_depth = req_data->initiator_depth; 1094 event->param.conn.flow_control = req_data->flow_control; 1095 event->param.conn.retry_count = req_data->retry_count; 1096 event->param.conn.rnr_retry_count = req_data->rnr_retry_count; 1097 event->param.conn.srq = req_data->srq; 1098 event->param.conn.qp_num = req_data->remote_qpn; 1099 } 1100 1101 static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) 1102 { 1103 struct rdma_id_private *listen_id, *conn_id; 1104 struct rdma_cm_event event; 1105 int offset, ret; 1106 1107 listen_id = cm_id->context; 1108 if (cma_disable_callback(listen_id, CMA_LISTEN)) 1109 return -ECONNABORTED; 1110 1111 memset(&event, 0, sizeof event); 1112 offset = cma_user_data_offset(listen_id->id.ps); 1113 event.event = RDMA_CM_EVENT_CONNECT_REQUEST; 1114 if (cma_is_ud_ps(listen_id->id.ps)) { 1115 conn_id = cma_new_udp_id(&listen_id->id, ib_event); 1116 event.param.ud.private_data = ib_event->private_data + offset; 1117 event.param.ud.private_data_len = 1118 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset; 1119 } else { 1120 conn_id = cma_new_conn_id(&listen_id->id, ib_event); 1121 cma_set_req_event_data(&event, &ib_event->param.req_rcvd, 1122 ib_event->private_data, offset); 1123 } 1124 if (!conn_id) { 1125 ret = -ENOMEM; 1126 goto out; 1127 } 1128 1129 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); 1130 mutex_lock(&lock); 1131 ret = cma_acquire_dev(conn_id); 1132 mutex_unlock(&lock); 1133 if (ret) 1134 goto release_conn_id; 1135 1136 conn_id->cm_id.ib = cm_id; 1137 cm_id->context = conn_id; 1138 cm_id->cm_handler = cma_ib_handler; 1139 1140 ret = conn_id->id.event_handler(&conn_id->id, &event); 1141 if (!ret) { 1142 /* 1143 * Acquire mutex to prevent user executing rdma_destroy_id() 1144 * while we're accessing the cm_id. 1145 */ 1146 mutex_lock(&lock); 1147 if (cma_comp(conn_id, CMA_CONNECT) && 1148 !cma_is_ud_ps(conn_id->id.ps)) 1149 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); 1150 mutex_unlock(&lock); 1151 mutex_unlock(&conn_id->handler_mutex); 1152 goto out; 1153 } 1154 1155 /* Destroy the CM ID by returning a non-zero value. */ 1156 conn_id->cm_id.ib = NULL; 1157 1158 release_conn_id: 1159 cma_exch(conn_id, CMA_DESTROYING); 1160 mutex_unlock(&conn_id->handler_mutex); 1161 rdma_destroy_id(&conn_id->id); 1162 1163 out: 1164 mutex_unlock(&listen_id->handler_mutex); 1165 return ret; 1166 } 1167 1168 static __be64 cma_get_service_id(enum rdma_port_space ps, struct sockaddr *addr) 1169 { 1170 return cpu_to_be64(((u64)ps << 16) + be16_to_cpu(cma_port(addr))); 1171 } 1172 1173 static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr, 1174 struct ib_cm_compare_data *compare) 1175 { 1176 struct cma_hdr *cma_data, *cma_mask; 1177 struct sdp_hh *sdp_data, *sdp_mask; 1178 __be32 ip4_addr; 1179 struct in6_addr ip6_addr; 1180 1181 memset(compare, 0, sizeof *compare); 1182 cma_data = (void *) compare->data; 1183 cma_mask = (void *) compare->mask; 1184 sdp_data = (void *) compare->data; 1185 sdp_mask = (void *) compare->mask; 1186 1187 switch (addr->sa_family) { 1188 case AF_INET: 1189 ip4_addr = ((struct sockaddr_in *) addr)->sin_addr.s_addr; 1190 if (ps == RDMA_PS_SDP) { 1191 sdp_set_ip_ver(sdp_data, 4); 1192 sdp_set_ip_ver(sdp_mask, 0xF); 1193 sdp_data->dst_addr.ip4.addr = ip4_addr; 1194 sdp_mask->dst_addr.ip4.addr = htonl(~0); 1195 } else { 1196 cma_set_ip_ver(cma_data, 4); 1197 cma_set_ip_ver(cma_mask, 0xF); 1198 cma_data->dst_addr.ip4.addr = ip4_addr; 1199 cma_mask->dst_addr.ip4.addr = htonl(~0); 1200 } 1201 break; 1202 case AF_INET6: 1203 ip6_addr = ((struct sockaddr_in6 *) addr)->sin6_addr; 1204 if (ps == RDMA_PS_SDP) { 1205 sdp_set_ip_ver(sdp_data, 6); 1206 sdp_set_ip_ver(sdp_mask, 0xF); 1207 sdp_data->dst_addr.ip6 = ip6_addr; 1208 memset(&sdp_mask->dst_addr.ip6, 0xFF, 1209 sizeof sdp_mask->dst_addr.ip6); 1210 } else { 1211 cma_set_ip_ver(cma_data, 6); 1212 cma_set_ip_ver(cma_mask, 0xF); 1213 cma_data->dst_addr.ip6 = ip6_addr; 1214 memset(&cma_mask->dst_addr.ip6, 0xFF, 1215 sizeof cma_mask->dst_addr.ip6); 1216 } 1217 break; 1218 default: 1219 break; 1220 } 1221 } 1222 1223 static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) 1224 { 1225 struct rdma_id_private *id_priv = iw_id->context; 1226 struct rdma_cm_event event; 1227 struct sockaddr_in *sin; 1228 int ret = 0; 1229 1230 if (cma_disable_callback(id_priv, CMA_CONNECT)) 1231 return 0; 1232 1233 memset(&event, 0, sizeof event); 1234 switch (iw_event->event) { 1235 case IW_CM_EVENT_CLOSE: 1236 event.event = RDMA_CM_EVENT_DISCONNECTED; 1237 break; 1238 case IW_CM_EVENT_CONNECT_REPLY: 1239 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; 1240 *sin = iw_event->local_addr; 1241 sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr; 1242 *sin = iw_event->remote_addr; 1243 switch (iw_event->status) { 1244 case 0: 1245 event.event = RDMA_CM_EVENT_ESTABLISHED; 1246 break; 1247 case -ECONNRESET: 1248 case -ECONNREFUSED: 1249 event.event = RDMA_CM_EVENT_REJECTED; 1250 break; 1251 case -ETIMEDOUT: 1252 event.event = RDMA_CM_EVENT_UNREACHABLE; 1253 break; 1254 default: 1255 event.event = RDMA_CM_EVENT_CONNECT_ERROR; 1256 break; 1257 } 1258 break; 1259 case IW_CM_EVENT_ESTABLISHED: 1260 event.event = RDMA_CM_EVENT_ESTABLISHED; 1261 break; 1262 default: 1263 BUG_ON(1); 1264 } 1265 1266 event.status = iw_event->status; 1267 event.param.conn.private_data = iw_event->private_data; 1268 event.param.conn.private_data_len = iw_event->private_data_len; 1269 ret = id_priv->id.event_handler(&id_priv->id, &event); 1270 if (ret) { 1271 /* Destroy the CM ID by returning a non-zero value. */ 1272 id_priv->cm_id.iw = NULL; 1273 cma_exch(id_priv, CMA_DESTROYING); 1274 mutex_unlock(&id_priv->handler_mutex); 1275 rdma_destroy_id(&id_priv->id); 1276 return ret; 1277 } 1278 1279 mutex_unlock(&id_priv->handler_mutex); 1280 return ret; 1281 } 1282 1283 static int iw_conn_req_handler(struct iw_cm_id *cm_id, 1284 struct iw_cm_event *iw_event) 1285 { 1286 struct rdma_cm_id *new_cm_id; 1287 struct rdma_id_private *listen_id, *conn_id; 1288 struct sockaddr_in *sin; 1289 struct net_device *dev = NULL; 1290 struct rdma_cm_event event; 1291 int ret; 1292 struct ib_device_attr attr; 1293 1294 listen_id = cm_id->context; 1295 if (cma_disable_callback(listen_id, CMA_LISTEN)) 1296 return -ECONNABORTED; 1297 1298 /* Create a new RDMA id for the new IW CM ID */ 1299 new_cm_id = rdma_create_id(listen_id->id.event_handler, 1300 listen_id->id.context, 1301 RDMA_PS_TCP); 1302 if (IS_ERR(new_cm_id)) { 1303 ret = -ENOMEM; 1304 goto out; 1305 } 1306 conn_id = container_of(new_cm_id, struct rdma_id_private, id); 1307 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); 1308 conn_id->state = CMA_CONNECT; 1309 1310 dev = ip_dev_find(&init_net, iw_event->local_addr.sin_addr.s_addr); 1311 if (!dev) { 1312 ret = -EADDRNOTAVAIL; 1313 mutex_unlock(&conn_id->handler_mutex); 1314 rdma_destroy_id(new_cm_id); 1315 goto out; 1316 } 1317 ret = rdma_copy_addr(&conn_id->id.route.addr.dev_addr, dev, NULL); 1318 if (ret) { 1319 mutex_unlock(&conn_id->handler_mutex); 1320 rdma_destroy_id(new_cm_id); 1321 goto out; 1322 } 1323 1324 mutex_lock(&lock); 1325 ret = cma_acquire_dev(conn_id); 1326 mutex_unlock(&lock); 1327 if (ret) { 1328 mutex_unlock(&conn_id->handler_mutex); 1329 rdma_destroy_id(new_cm_id); 1330 goto out; 1331 } 1332 1333 conn_id->cm_id.iw = cm_id; 1334 cm_id->context = conn_id; 1335 cm_id->cm_handler = cma_iw_handler; 1336 1337 sin = (struct sockaddr_in *) &new_cm_id->route.addr.src_addr; 1338 *sin = iw_event->local_addr; 1339 sin = (struct sockaddr_in *) &new_cm_id->route.addr.dst_addr; 1340 *sin = iw_event->remote_addr; 1341 1342 ret = ib_query_device(conn_id->id.device, &attr); 1343 if (ret) { 1344 mutex_unlock(&conn_id->handler_mutex); 1345 rdma_destroy_id(new_cm_id); 1346 goto out; 1347 } 1348 1349 memset(&event, 0, sizeof event); 1350 event.event = RDMA_CM_EVENT_CONNECT_REQUEST; 1351 event.param.conn.private_data = iw_event->private_data; 1352 event.param.conn.private_data_len = iw_event->private_data_len; 1353 event.param.conn.initiator_depth = attr.max_qp_init_rd_atom; 1354 event.param.conn.responder_resources = attr.max_qp_rd_atom; 1355 ret = conn_id->id.event_handler(&conn_id->id, &event); 1356 if (ret) { 1357 /* User wants to destroy the CM ID */ 1358 conn_id->cm_id.iw = NULL; 1359 cma_exch(conn_id, CMA_DESTROYING); 1360 mutex_unlock(&conn_id->handler_mutex); 1361 rdma_destroy_id(&conn_id->id); 1362 goto out; 1363 } 1364 1365 mutex_unlock(&conn_id->handler_mutex); 1366 1367 out: 1368 if (dev) 1369 dev_put(dev); 1370 mutex_unlock(&listen_id->handler_mutex); 1371 return ret; 1372 } 1373 1374 static int cma_ib_listen(struct rdma_id_private *id_priv) 1375 { 1376 struct ib_cm_compare_data compare_data; 1377 struct sockaddr *addr; 1378 __be64 svc_id; 1379 int ret; 1380 1381 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_req_handler, 1382 id_priv); 1383 if (IS_ERR(id_priv->cm_id.ib)) 1384 return PTR_ERR(id_priv->cm_id.ib); 1385 1386 addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr; 1387 svc_id = cma_get_service_id(id_priv->id.ps, addr); 1388 if (cma_any_addr(addr)) 1389 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL); 1390 else { 1391 cma_set_compare_data(id_priv->id.ps, addr, &compare_data); 1392 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, &compare_data); 1393 } 1394 1395 if (ret) { 1396 ib_destroy_cm_id(id_priv->cm_id.ib); 1397 id_priv->cm_id.ib = NULL; 1398 } 1399 1400 return ret; 1401 } 1402 1403 static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog) 1404 { 1405 int ret; 1406 struct sockaddr_in *sin; 1407 1408 id_priv->cm_id.iw = iw_create_cm_id(id_priv->id.device, 1409 iw_conn_req_handler, 1410 id_priv); 1411 if (IS_ERR(id_priv->cm_id.iw)) 1412 return PTR_ERR(id_priv->cm_id.iw); 1413 1414 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; 1415 id_priv->cm_id.iw->local_addr = *sin; 1416 1417 ret = iw_cm_listen(id_priv->cm_id.iw, backlog); 1418 1419 if (ret) { 1420 iw_destroy_cm_id(id_priv->cm_id.iw); 1421 id_priv->cm_id.iw = NULL; 1422 } 1423 1424 return ret; 1425 } 1426 1427 static int cma_listen_handler(struct rdma_cm_id *id, 1428 struct rdma_cm_event *event) 1429 { 1430 struct rdma_id_private *id_priv = id->context; 1431 1432 id->context = id_priv->id.context; 1433 id->event_handler = id_priv->id.event_handler; 1434 return id_priv->id.event_handler(id, event); 1435 } 1436 1437 static void cma_listen_on_dev(struct rdma_id_private *id_priv, 1438 struct cma_device *cma_dev) 1439 { 1440 struct rdma_id_private *dev_id_priv; 1441 struct rdma_cm_id *id; 1442 int ret; 1443 1444 id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps); 1445 if (IS_ERR(id)) 1446 return; 1447 1448 dev_id_priv = container_of(id, struct rdma_id_private, id); 1449 1450 dev_id_priv->state = CMA_ADDR_BOUND; 1451 memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr, 1452 ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr)); 1453 1454 cma_attach_to_dev(dev_id_priv, cma_dev); 1455 list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list); 1456 atomic_inc(&id_priv->refcount); 1457 dev_id_priv->internal_id = 1; 1458 1459 ret = rdma_listen(id, id_priv->backlog); 1460 if (ret) 1461 printk(KERN_WARNING "RDMA CMA: cma_listen_on_dev, error %d, " 1462 "listening on device %s\n", ret, cma_dev->device->name); 1463 } 1464 1465 static void cma_listen_on_all(struct rdma_id_private *id_priv) 1466 { 1467 struct cma_device *cma_dev; 1468 1469 mutex_lock(&lock); 1470 list_add_tail(&id_priv->list, &listen_any_list); 1471 list_for_each_entry(cma_dev, &dev_list, list) 1472 cma_listen_on_dev(id_priv, cma_dev); 1473 mutex_unlock(&lock); 1474 } 1475 1476 int rdma_listen(struct rdma_cm_id *id, int backlog) 1477 { 1478 struct rdma_id_private *id_priv; 1479 int ret; 1480 1481 id_priv = container_of(id, struct rdma_id_private, id); 1482 if (id_priv->state == CMA_IDLE) { 1483 ((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET; 1484 ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr); 1485 if (ret) 1486 return ret; 1487 } 1488 1489 if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN)) 1490 return -EINVAL; 1491 1492 id_priv->backlog = backlog; 1493 if (id->device) { 1494 switch (rdma_node_get_transport(id->device->node_type)) { 1495 case RDMA_TRANSPORT_IB: 1496 ret = cma_ib_listen(id_priv); 1497 if (ret) 1498 goto err; 1499 break; 1500 case RDMA_TRANSPORT_IWARP: 1501 ret = cma_iw_listen(id_priv, backlog); 1502 if (ret) 1503 goto err; 1504 break; 1505 default: 1506 ret = -ENOSYS; 1507 goto err; 1508 } 1509 } else 1510 cma_listen_on_all(id_priv); 1511 1512 return 0; 1513 err: 1514 id_priv->backlog = 0; 1515 cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND); 1516 return ret; 1517 } 1518 EXPORT_SYMBOL(rdma_listen); 1519 1520 void rdma_set_service_type(struct rdma_cm_id *id, int tos) 1521 { 1522 struct rdma_id_private *id_priv; 1523 1524 id_priv = container_of(id, struct rdma_id_private, id); 1525 id_priv->tos = (u8) tos; 1526 } 1527 EXPORT_SYMBOL(rdma_set_service_type); 1528 1529 static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec, 1530 void *context) 1531 { 1532 struct cma_work *work = context; 1533 struct rdma_route *route; 1534 1535 route = &work->id->id.route; 1536 1537 if (!status) { 1538 route->num_paths = 1; 1539 *route->path_rec = *path_rec; 1540 } else { 1541 work->old_state = CMA_ROUTE_QUERY; 1542 work->new_state = CMA_ADDR_RESOLVED; 1543 work->event.event = RDMA_CM_EVENT_ROUTE_ERROR; 1544 work->event.status = status; 1545 } 1546 1547 queue_work(cma_wq, &work->work); 1548 } 1549 1550 static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms, 1551 struct cma_work *work) 1552 { 1553 struct rdma_addr *addr = &id_priv->id.route.addr; 1554 struct ib_sa_path_rec path_rec; 1555 ib_sa_comp_mask comp_mask; 1556 struct sockaddr_in6 *sin6; 1557 1558 memset(&path_rec, 0, sizeof path_rec); 1559 rdma_addr_get_sgid(&addr->dev_addr, &path_rec.sgid); 1560 rdma_addr_get_dgid(&addr->dev_addr, &path_rec.dgid); 1561 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(&addr->dev_addr)); 1562 path_rec.numb_path = 1; 1563 path_rec.reversible = 1; 1564 path_rec.service_id = cma_get_service_id(id_priv->id.ps, 1565 (struct sockaddr *) &addr->dst_addr); 1566 1567 comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | 1568 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH | 1569 IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID; 1570 1571 if (addr->src_addr.ss_family == AF_INET) { 1572 path_rec.qos_class = cpu_to_be16((u16) id_priv->tos); 1573 comp_mask |= IB_SA_PATH_REC_QOS_CLASS; 1574 } else { 1575 sin6 = (struct sockaddr_in6 *) &addr->src_addr; 1576 path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20); 1577 comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS; 1578 } 1579 1580 id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device, 1581 id_priv->id.port_num, &path_rec, 1582 comp_mask, timeout_ms, 1583 GFP_KERNEL, cma_query_handler, 1584 work, &id_priv->query); 1585 1586 return (id_priv->query_id < 0) ? id_priv->query_id : 0; 1587 } 1588 1589 static void cma_work_handler(struct work_struct *_work) 1590 { 1591 struct cma_work *work = container_of(_work, struct cma_work, work); 1592 struct rdma_id_private *id_priv = work->id; 1593 int destroy = 0; 1594 1595 mutex_lock(&id_priv->handler_mutex); 1596 if (!cma_comp_exch(id_priv, work->old_state, work->new_state)) 1597 goto out; 1598 1599 if (id_priv->id.event_handler(&id_priv->id, &work->event)) { 1600 cma_exch(id_priv, CMA_DESTROYING); 1601 destroy = 1; 1602 } 1603 out: 1604 mutex_unlock(&id_priv->handler_mutex); 1605 cma_deref_id(id_priv); 1606 if (destroy) 1607 rdma_destroy_id(&id_priv->id); 1608 kfree(work); 1609 } 1610 1611 static void cma_ndev_work_handler(struct work_struct *_work) 1612 { 1613 struct cma_ndev_work *work = container_of(_work, struct cma_ndev_work, work); 1614 struct rdma_id_private *id_priv = work->id; 1615 int destroy = 0; 1616 1617 mutex_lock(&id_priv->handler_mutex); 1618 if (id_priv->state == CMA_DESTROYING || 1619 id_priv->state == CMA_DEVICE_REMOVAL) 1620 goto out; 1621 1622 if (id_priv->id.event_handler(&id_priv->id, &work->event)) { 1623 cma_exch(id_priv, CMA_DESTROYING); 1624 destroy = 1; 1625 } 1626 1627 out: 1628 mutex_unlock(&id_priv->handler_mutex); 1629 cma_deref_id(id_priv); 1630 if (destroy) 1631 rdma_destroy_id(&id_priv->id); 1632 kfree(work); 1633 } 1634 1635 static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms) 1636 { 1637 struct rdma_route *route = &id_priv->id.route; 1638 struct cma_work *work; 1639 int ret; 1640 1641 work = kzalloc(sizeof *work, GFP_KERNEL); 1642 if (!work) 1643 return -ENOMEM; 1644 1645 work->id = id_priv; 1646 INIT_WORK(&work->work, cma_work_handler); 1647 work->old_state = CMA_ROUTE_QUERY; 1648 work->new_state = CMA_ROUTE_RESOLVED; 1649 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 1650 1651 route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL); 1652 if (!route->path_rec) { 1653 ret = -ENOMEM; 1654 goto err1; 1655 } 1656 1657 ret = cma_query_ib_route(id_priv, timeout_ms, work); 1658 if (ret) 1659 goto err2; 1660 1661 return 0; 1662 err2: 1663 kfree(route->path_rec); 1664 route->path_rec = NULL; 1665 err1: 1666 kfree(work); 1667 return ret; 1668 } 1669 1670 int rdma_set_ib_paths(struct rdma_cm_id *id, 1671 struct ib_sa_path_rec *path_rec, int num_paths) 1672 { 1673 struct rdma_id_private *id_priv; 1674 int ret; 1675 1676 id_priv = container_of(id, struct rdma_id_private, id); 1677 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_RESOLVED)) 1678 return -EINVAL; 1679 1680 id->route.path_rec = kmalloc(sizeof *path_rec * num_paths, GFP_KERNEL); 1681 if (!id->route.path_rec) { 1682 ret = -ENOMEM; 1683 goto err; 1684 } 1685 1686 memcpy(id->route.path_rec, path_rec, sizeof *path_rec * num_paths); 1687 id->route.num_paths = num_paths; 1688 return 0; 1689 err: 1690 cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_ADDR_RESOLVED); 1691 return ret; 1692 } 1693 EXPORT_SYMBOL(rdma_set_ib_paths); 1694 1695 static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms) 1696 { 1697 struct cma_work *work; 1698 1699 work = kzalloc(sizeof *work, GFP_KERNEL); 1700 if (!work) 1701 return -ENOMEM; 1702 1703 work->id = id_priv; 1704 INIT_WORK(&work->work, cma_work_handler); 1705 work->old_state = CMA_ROUTE_QUERY; 1706 work->new_state = CMA_ROUTE_RESOLVED; 1707 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 1708 queue_work(cma_wq, &work->work); 1709 return 0; 1710 } 1711 1712 int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms) 1713 { 1714 struct rdma_id_private *id_priv; 1715 int ret; 1716 1717 id_priv = container_of(id, struct rdma_id_private, id); 1718 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_QUERY)) 1719 return -EINVAL; 1720 1721 atomic_inc(&id_priv->refcount); 1722 switch (rdma_node_get_transport(id->device->node_type)) { 1723 case RDMA_TRANSPORT_IB: 1724 ret = cma_resolve_ib_route(id_priv, timeout_ms); 1725 break; 1726 case RDMA_TRANSPORT_IWARP: 1727 ret = cma_resolve_iw_route(id_priv, timeout_ms); 1728 break; 1729 default: 1730 ret = -ENOSYS; 1731 break; 1732 } 1733 if (ret) 1734 goto err; 1735 1736 return 0; 1737 err: 1738 cma_comp_exch(id_priv, CMA_ROUTE_QUERY, CMA_ADDR_RESOLVED); 1739 cma_deref_id(id_priv); 1740 return ret; 1741 } 1742 EXPORT_SYMBOL(rdma_resolve_route); 1743 1744 static int cma_bind_loopback(struct rdma_id_private *id_priv) 1745 { 1746 struct cma_device *cma_dev; 1747 struct ib_port_attr port_attr; 1748 union ib_gid gid; 1749 u16 pkey; 1750 int ret; 1751 u8 p; 1752 1753 mutex_lock(&lock); 1754 if (list_empty(&dev_list)) { 1755 ret = -ENODEV; 1756 goto out; 1757 } 1758 list_for_each_entry(cma_dev, &dev_list, list) 1759 for (p = 1; p <= cma_dev->device->phys_port_cnt; ++p) 1760 if (!ib_query_port(cma_dev->device, p, &port_attr) && 1761 port_attr.state == IB_PORT_ACTIVE) 1762 goto port_found; 1763 1764 p = 1; 1765 cma_dev = list_entry(dev_list.next, struct cma_device, list); 1766 1767 port_found: 1768 ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid); 1769 if (ret) 1770 goto out; 1771 1772 ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey); 1773 if (ret) 1774 goto out; 1775 1776 id_priv->id.route.addr.dev_addr.dev_type = 1777 (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB) ? 1778 ARPHRD_INFINIBAND : ARPHRD_ETHER; 1779 1780 rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid); 1781 ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey); 1782 id_priv->id.port_num = p; 1783 cma_attach_to_dev(id_priv, cma_dev); 1784 out: 1785 mutex_unlock(&lock); 1786 return ret; 1787 } 1788 1789 static void addr_handler(int status, struct sockaddr *src_addr, 1790 struct rdma_dev_addr *dev_addr, void *context) 1791 { 1792 struct rdma_id_private *id_priv = context; 1793 struct rdma_cm_event event; 1794 1795 memset(&event, 0, sizeof event); 1796 mutex_lock(&id_priv->handler_mutex); 1797 1798 /* 1799 * Grab mutex to block rdma_destroy_id() from removing the device while 1800 * we're trying to acquire it. 1801 */ 1802 mutex_lock(&lock); 1803 if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED)) { 1804 mutex_unlock(&lock); 1805 goto out; 1806 } 1807 1808 if (!status && !id_priv->cma_dev) 1809 status = cma_acquire_dev(id_priv); 1810 mutex_unlock(&lock); 1811 1812 if (status) { 1813 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND)) 1814 goto out; 1815 event.event = RDMA_CM_EVENT_ADDR_ERROR; 1816 event.status = status; 1817 } else { 1818 memcpy(&id_priv->id.route.addr.src_addr, src_addr, 1819 ip_addr_size(src_addr)); 1820 event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 1821 } 1822 1823 if (id_priv->id.event_handler(&id_priv->id, &event)) { 1824 cma_exch(id_priv, CMA_DESTROYING); 1825 mutex_unlock(&id_priv->handler_mutex); 1826 cma_deref_id(id_priv); 1827 rdma_destroy_id(&id_priv->id); 1828 return; 1829 } 1830 out: 1831 mutex_unlock(&id_priv->handler_mutex); 1832 cma_deref_id(id_priv); 1833 } 1834 1835 static int cma_resolve_loopback(struct rdma_id_private *id_priv) 1836 { 1837 struct cma_work *work; 1838 struct sockaddr *src, *dst; 1839 union ib_gid gid; 1840 int ret; 1841 1842 work = kzalloc(sizeof *work, GFP_KERNEL); 1843 if (!work) 1844 return -ENOMEM; 1845 1846 if (!id_priv->cma_dev) { 1847 ret = cma_bind_loopback(id_priv); 1848 if (ret) 1849 goto err; 1850 } 1851 1852 rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); 1853 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid); 1854 1855 src = (struct sockaddr *) &id_priv->id.route.addr.src_addr; 1856 if (cma_zero_addr(src)) { 1857 dst = (struct sockaddr *) &id_priv->id.route.addr.dst_addr; 1858 if ((src->sa_family = dst->sa_family) == AF_INET) { 1859 ((struct sockaddr_in *) src)->sin_addr.s_addr = 1860 ((struct sockaddr_in *) dst)->sin_addr.s_addr; 1861 } else { 1862 ipv6_addr_copy(&((struct sockaddr_in6 *) src)->sin6_addr, 1863 &((struct sockaddr_in6 *) dst)->sin6_addr); 1864 } 1865 } 1866 1867 work->id = id_priv; 1868 INIT_WORK(&work->work, cma_work_handler); 1869 work->old_state = CMA_ADDR_QUERY; 1870 work->new_state = CMA_ADDR_RESOLVED; 1871 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 1872 queue_work(cma_wq, &work->work); 1873 return 0; 1874 err: 1875 kfree(work); 1876 return ret; 1877 } 1878 1879 static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, 1880 struct sockaddr *dst_addr) 1881 { 1882 if (!src_addr || !src_addr->sa_family) { 1883 src_addr = (struct sockaddr *) &id->route.addr.src_addr; 1884 if ((src_addr->sa_family = dst_addr->sa_family) == AF_INET6) { 1885 ((struct sockaddr_in6 *) src_addr)->sin6_scope_id = 1886 ((struct sockaddr_in6 *) dst_addr)->sin6_scope_id; 1887 } 1888 } 1889 return rdma_bind_addr(id, src_addr); 1890 } 1891 1892 int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, 1893 struct sockaddr *dst_addr, int timeout_ms) 1894 { 1895 struct rdma_id_private *id_priv; 1896 int ret; 1897 1898 id_priv = container_of(id, struct rdma_id_private, id); 1899 if (id_priv->state == CMA_IDLE) { 1900 ret = cma_bind_addr(id, src_addr, dst_addr); 1901 if (ret) 1902 return ret; 1903 } 1904 1905 if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_ADDR_QUERY)) 1906 return -EINVAL; 1907 1908 atomic_inc(&id_priv->refcount); 1909 memcpy(&id->route.addr.dst_addr, dst_addr, ip_addr_size(dst_addr)); 1910 if (cma_any_addr(dst_addr)) 1911 ret = cma_resolve_loopback(id_priv); 1912 else 1913 ret = rdma_resolve_ip(&addr_client, (struct sockaddr *) &id->route.addr.src_addr, 1914 dst_addr, &id->route.addr.dev_addr, 1915 timeout_ms, addr_handler, id_priv); 1916 if (ret) 1917 goto err; 1918 1919 return 0; 1920 err: 1921 cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_BOUND); 1922 cma_deref_id(id_priv); 1923 return ret; 1924 } 1925 EXPORT_SYMBOL(rdma_resolve_addr); 1926 1927 static void cma_bind_port(struct rdma_bind_list *bind_list, 1928 struct rdma_id_private *id_priv) 1929 { 1930 struct sockaddr_in *sin; 1931 1932 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; 1933 sin->sin_port = htons(bind_list->port); 1934 id_priv->bind_list = bind_list; 1935 hlist_add_head(&id_priv->node, &bind_list->owners); 1936 } 1937 1938 static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv, 1939 unsigned short snum) 1940 { 1941 struct rdma_bind_list *bind_list; 1942 int port, ret; 1943 1944 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL); 1945 if (!bind_list) 1946 return -ENOMEM; 1947 1948 do { 1949 ret = idr_get_new_above(ps, bind_list, snum, &port); 1950 } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL)); 1951 1952 if (ret) 1953 goto err1; 1954 1955 if (port != snum) { 1956 ret = -EADDRNOTAVAIL; 1957 goto err2; 1958 } 1959 1960 bind_list->ps = ps; 1961 bind_list->port = (unsigned short) port; 1962 cma_bind_port(bind_list, id_priv); 1963 return 0; 1964 err2: 1965 idr_remove(ps, port); 1966 err1: 1967 kfree(bind_list); 1968 return ret; 1969 } 1970 1971 static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv) 1972 { 1973 struct rdma_bind_list *bind_list; 1974 int port, ret, low, high; 1975 1976 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL); 1977 if (!bind_list) 1978 return -ENOMEM; 1979 1980 retry: 1981 /* FIXME: add proper port randomization per like inet_csk_get_port */ 1982 do { 1983 ret = idr_get_new_above(ps, bind_list, next_port, &port); 1984 } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL)); 1985 1986 if (ret) 1987 goto err1; 1988 1989 inet_get_local_port_range(&low, &high); 1990 if (port > high) { 1991 if (next_port != low) { 1992 idr_remove(ps, port); 1993 next_port = low; 1994 goto retry; 1995 } 1996 ret = -EADDRNOTAVAIL; 1997 goto err2; 1998 } 1999 2000 if (port == high) 2001 next_port = low; 2002 else 2003 next_port = port + 1; 2004 2005 bind_list->ps = ps; 2006 bind_list->port = (unsigned short) port; 2007 cma_bind_port(bind_list, id_priv); 2008 return 0; 2009 err2: 2010 idr_remove(ps, port); 2011 err1: 2012 kfree(bind_list); 2013 return ret; 2014 } 2015 2016 static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv) 2017 { 2018 struct rdma_id_private *cur_id; 2019 struct sockaddr_in *sin, *cur_sin; 2020 struct rdma_bind_list *bind_list; 2021 struct hlist_node *node; 2022 unsigned short snum; 2023 2024 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; 2025 snum = ntohs(sin->sin_port); 2026 if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) 2027 return -EACCES; 2028 2029 bind_list = idr_find(ps, snum); 2030 if (!bind_list) 2031 return cma_alloc_port(ps, id_priv, snum); 2032 2033 /* 2034 * We don't support binding to any address if anyone is bound to 2035 * a specific address on the same port. 2036 */ 2037 if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)) 2038 return -EADDRNOTAVAIL; 2039 2040 hlist_for_each_entry(cur_id, node, &bind_list->owners, node) { 2041 if (cma_any_addr((struct sockaddr *) &cur_id->id.route.addr.src_addr)) 2042 return -EADDRNOTAVAIL; 2043 2044 cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr; 2045 if (sin->sin_addr.s_addr == cur_sin->sin_addr.s_addr) 2046 return -EADDRINUSE; 2047 } 2048 2049 cma_bind_port(bind_list, id_priv); 2050 return 0; 2051 } 2052 2053 static int cma_get_port(struct rdma_id_private *id_priv) 2054 { 2055 struct idr *ps; 2056 int ret; 2057 2058 switch (id_priv->id.ps) { 2059 case RDMA_PS_SDP: 2060 ps = &sdp_ps; 2061 break; 2062 case RDMA_PS_TCP: 2063 ps = &tcp_ps; 2064 break; 2065 case RDMA_PS_UDP: 2066 ps = &udp_ps; 2067 break; 2068 case RDMA_PS_IPOIB: 2069 ps = &ipoib_ps; 2070 break; 2071 default: 2072 return -EPROTONOSUPPORT; 2073 } 2074 2075 mutex_lock(&lock); 2076 if (cma_any_port((struct sockaddr *) &id_priv->id.route.addr.src_addr)) 2077 ret = cma_alloc_any_port(ps, id_priv); 2078 else 2079 ret = cma_use_port(ps, id_priv); 2080 mutex_unlock(&lock); 2081 2082 return ret; 2083 } 2084 2085 static int cma_check_linklocal(struct rdma_dev_addr *dev_addr, 2086 struct sockaddr *addr) 2087 { 2088 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 2089 struct sockaddr_in6 *sin6; 2090 2091 if (addr->sa_family != AF_INET6) 2092 return 0; 2093 2094 sin6 = (struct sockaddr_in6 *) addr; 2095 if ((ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) && 2096 !sin6->sin6_scope_id) 2097 return -EINVAL; 2098 2099 dev_addr->bound_dev_if = sin6->sin6_scope_id; 2100 #endif 2101 return 0; 2102 } 2103 2104 int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) 2105 { 2106 struct rdma_id_private *id_priv; 2107 int ret; 2108 2109 if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6) 2110 return -EAFNOSUPPORT; 2111 2112 id_priv = container_of(id, struct rdma_id_private, id); 2113 if (!cma_comp_exch(id_priv, CMA_IDLE, CMA_ADDR_BOUND)) 2114 return -EINVAL; 2115 2116 ret = cma_check_linklocal(&id->route.addr.dev_addr, addr); 2117 if (ret) 2118 goto err1; 2119 2120 if (!cma_any_addr(addr)) { 2121 ret = rdma_translate_ip(addr, &id->route.addr.dev_addr); 2122 if (ret) 2123 goto err1; 2124 2125 mutex_lock(&lock); 2126 ret = cma_acquire_dev(id_priv); 2127 mutex_unlock(&lock); 2128 if (ret) 2129 goto err1; 2130 } 2131 2132 memcpy(&id->route.addr.src_addr, addr, ip_addr_size(addr)); 2133 ret = cma_get_port(id_priv); 2134 if (ret) 2135 goto err2; 2136 2137 return 0; 2138 err2: 2139 if (id_priv->cma_dev) { 2140 mutex_lock(&lock); 2141 cma_detach_from_dev(id_priv); 2142 mutex_unlock(&lock); 2143 } 2144 err1: 2145 cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_IDLE); 2146 return ret; 2147 } 2148 EXPORT_SYMBOL(rdma_bind_addr); 2149 2150 static int cma_format_hdr(void *hdr, enum rdma_port_space ps, 2151 struct rdma_route *route) 2152 { 2153 struct cma_hdr *cma_hdr; 2154 struct sdp_hh *sdp_hdr; 2155 2156 if (route->addr.src_addr.ss_family == AF_INET) { 2157 struct sockaddr_in *src4, *dst4; 2158 2159 src4 = (struct sockaddr_in *) &route->addr.src_addr; 2160 dst4 = (struct sockaddr_in *) &route->addr.dst_addr; 2161 2162 switch (ps) { 2163 case RDMA_PS_SDP: 2164 sdp_hdr = hdr; 2165 if (sdp_get_majv(sdp_hdr->sdp_version) != SDP_MAJ_VERSION) 2166 return -EINVAL; 2167 sdp_set_ip_ver(sdp_hdr, 4); 2168 sdp_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr; 2169 sdp_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr; 2170 sdp_hdr->port = src4->sin_port; 2171 break; 2172 default: 2173 cma_hdr = hdr; 2174 cma_hdr->cma_version = CMA_VERSION; 2175 cma_set_ip_ver(cma_hdr, 4); 2176 cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr; 2177 cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr; 2178 cma_hdr->port = src4->sin_port; 2179 break; 2180 } 2181 } else { 2182 struct sockaddr_in6 *src6, *dst6; 2183 2184 src6 = (struct sockaddr_in6 *) &route->addr.src_addr; 2185 dst6 = (struct sockaddr_in6 *) &route->addr.dst_addr; 2186 2187 switch (ps) { 2188 case RDMA_PS_SDP: 2189 sdp_hdr = hdr; 2190 if (sdp_get_majv(sdp_hdr->sdp_version) != SDP_MAJ_VERSION) 2191 return -EINVAL; 2192 sdp_set_ip_ver(sdp_hdr, 6); 2193 sdp_hdr->src_addr.ip6 = src6->sin6_addr; 2194 sdp_hdr->dst_addr.ip6 = dst6->sin6_addr; 2195 sdp_hdr->port = src6->sin6_port; 2196 break; 2197 default: 2198 cma_hdr = hdr; 2199 cma_hdr->cma_version = CMA_VERSION; 2200 cma_set_ip_ver(cma_hdr, 6); 2201 cma_hdr->src_addr.ip6 = src6->sin6_addr; 2202 cma_hdr->dst_addr.ip6 = dst6->sin6_addr; 2203 cma_hdr->port = src6->sin6_port; 2204 break; 2205 } 2206 } 2207 return 0; 2208 } 2209 2210 static int cma_sidr_rep_handler(struct ib_cm_id *cm_id, 2211 struct ib_cm_event *ib_event) 2212 { 2213 struct rdma_id_private *id_priv = cm_id->context; 2214 struct rdma_cm_event event; 2215 struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd; 2216 int ret = 0; 2217 2218 if (cma_disable_callback(id_priv, CMA_CONNECT)) 2219 return 0; 2220 2221 memset(&event, 0, sizeof event); 2222 switch (ib_event->event) { 2223 case IB_CM_SIDR_REQ_ERROR: 2224 event.event = RDMA_CM_EVENT_UNREACHABLE; 2225 event.status = -ETIMEDOUT; 2226 break; 2227 case IB_CM_SIDR_REP_RECEIVED: 2228 event.param.ud.private_data = ib_event->private_data; 2229 event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE; 2230 if (rep->status != IB_SIDR_SUCCESS) { 2231 event.event = RDMA_CM_EVENT_UNREACHABLE; 2232 event.status = ib_event->param.sidr_rep_rcvd.status; 2233 break; 2234 } 2235 ret = cma_set_qkey(id_priv); 2236 if (ret) { 2237 event.event = RDMA_CM_EVENT_ADDR_ERROR; 2238 event.status = -EINVAL; 2239 break; 2240 } 2241 if (id_priv->qkey != rep->qkey) { 2242 event.event = RDMA_CM_EVENT_UNREACHABLE; 2243 event.status = -EINVAL; 2244 break; 2245 } 2246 ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num, 2247 id_priv->id.route.path_rec, 2248 &event.param.ud.ah_attr); 2249 event.param.ud.qp_num = rep->qpn; 2250 event.param.ud.qkey = rep->qkey; 2251 event.event = RDMA_CM_EVENT_ESTABLISHED; 2252 event.status = 0; 2253 break; 2254 default: 2255 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n", 2256 ib_event->event); 2257 goto out; 2258 } 2259 2260 ret = id_priv->id.event_handler(&id_priv->id, &event); 2261 if (ret) { 2262 /* Destroy the CM ID by returning a non-zero value. */ 2263 id_priv->cm_id.ib = NULL; 2264 cma_exch(id_priv, CMA_DESTROYING); 2265 mutex_unlock(&id_priv->handler_mutex); 2266 rdma_destroy_id(&id_priv->id); 2267 return ret; 2268 } 2269 out: 2270 mutex_unlock(&id_priv->handler_mutex); 2271 return ret; 2272 } 2273 2274 static int cma_resolve_ib_udp(struct rdma_id_private *id_priv, 2275 struct rdma_conn_param *conn_param) 2276 { 2277 struct ib_cm_sidr_req_param req; 2278 struct rdma_route *route; 2279 int ret; 2280 2281 req.private_data_len = sizeof(struct cma_hdr) + 2282 conn_param->private_data_len; 2283 req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC); 2284 if (!req.private_data) 2285 return -ENOMEM; 2286 2287 if (conn_param->private_data && conn_param->private_data_len) 2288 memcpy((void *) req.private_data + sizeof(struct cma_hdr), 2289 conn_param->private_data, conn_param->private_data_len); 2290 2291 route = &id_priv->id.route; 2292 ret = cma_format_hdr((void *) req.private_data, id_priv->id.ps, route); 2293 if (ret) 2294 goto out; 2295 2296 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, 2297 cma_sidr_rep_handler, id_priv); 2298 if (IS_ERR(id_priv->cm_id.ib)) { 2299 ret = PTR_ERR(id_priv->cm_id.ib); 2300 goto out; 2301 } 2302 2303 req.path = route->path_rec; 2304 req.service_id = cma_get_service_id(id_priv->id.ps, 2305 (struct sockaddr *) &route->addr.dst_addr); 2306 req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8); 2307 req.max_cm_retries = CMA_MAX_CM_RETRIES; 2308 2309 ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req); 2310 if (ret) { 2311 ib_destroy_cm_id(id_priv->cm_id.ib); 2312 id_priv->cm_id.ib = NULL; 2313 } 2314 out: 2315 kfree(req.private_data); 2316 return ret; 2317 } 2318 2319 static int cma_connect_ib(struct rdma_id_private *id_priv, 2320 struct rdma_conn_param *conn_param) 2321 { 2322 struct ib_cm_req_param req; 2323 struct rdma_route *route; 2324 void *private_data; 2325 int offset, ret; 2326 2327 memset(&req, 0, sizeof req); 2328 offset = cma_user_data_offset(id_priv->id.ps); 2329 req.private_data_len = offset + conn_param->private_data_len; 2330 private_data = kzalloc(req.private_data_len, GFP_ATOMIC); 2331 if (!private_data) 2332 return -ENOMEM; 2333 2334 if (conn_param->private_data && conn_param->private_data_len) 2335 memcpy(private_data + offset, conn_param->private_data, 2336 conn_param->private_data_len); 2337 2338 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_ib_handler, 2339 id_priv); 2340 if (IS_ERR(id_priv->cm_id.ib)) { 2341 ret = PTR_ERR(id_priv->cm_id.ib); 2342 goto out; 2343 } 2344 2345 route = &id_priv->id.route; 2346 ret = cma_format_hdr(private_data, id_priv->id.ps, route); 2347 if (ret) 2348 goto out; 2349 req.private_data = private_data; 2350 2351 req.primary_path = &route->path_rec[0]; 2352 if (route->num_paths == 2) 2353 req.alternate_path = &route->path_rec[1]; 2354 2355 req.service_id = cma_get_service_id(id_priv->id.ps, 2356 (struct sockaddr *) &route->addr.dst_addr); 2357 req.qp_num = id_priv->qp_num; 2358 req.qp_type = IB_QPT_RC; 2359 req.starting_psn = id_priv->seq_num; 2360 req.responder_resources = conn_param->responder_resources; 2361 req.initiator_depth = conn_param->initiator_depth; 2362 req.flow_control = conn_param->flow_control; 2363 req.retry_count = conn_param->retry_count; 2364 req.rnr_retry_count = conn_param->rnr_retry_count; 2365 req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; 2366 req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; 2367 req.max_cm_retries = CMA_MAX_CM_RETRIES; 2368 req.srq = id_priv->srq ? 1 : 0; 2369 2370 ret = ib_send_cm_req(id_priv->cm_id.ib, &req); 2371 out: 2372 if (ret && !IS_ERR(id_priv->cm_id.ib)) { 2373 ib_destroy_cm_id(id_priv->cm_id.ib); 2374 id_priv->cm_id.ib = NULL; 2375 } 2376 2377 kfree(private_data); 2378 return ret; 2379 } 2380 2381 static int cma_connect_iw(struct rdma_id_private *id_priv, 2382 struct rdma_conn_param *conn_param) 2383 { 2384 struct iw_cm_id *cm_id; 2385 struct sockaddr_in* sin; 2386 int ret; 2387 struct iw_cm_conn_param iw_param; 2388 2389 cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv); 2390 if (IS_ERR(cm_id)) { 2391 ret = PTR_ERR(cm_id); 2392 goto out; 2393 } 2394 2395 id_priv->cm_id.iw = cm_id; 2396 2397 sin = (struct sockaddr_in*) &id_priv->id.route.addr.src_addr; 2398 cm_id->local_addr = *sin; 2399 2400 sin = (struct sockaddr_in*) &id_priv->id.route.addr.dst_addr; 2401 cm_id->remote_addr = *sin; 2402 2403 ret = cma_modify_qp_rtr(id_priv, conn_param); 2404 if (ret) 2405 goto out; 2406 2407 iw_param.ord = conn_param->initiator_depth; 2408 iw_param.ird = conn_param->responder_resources; 2409 iw_param.private_data = conn_param->private_data; 2410 iw_param.private_data_len = conn_param->private_data_len; 2411 if (id_priv->id.qp) 2412 iw_param.qpn = id_priv->qp_num; 2413 else 2414 iw_param.qpn = conn_param->qp_num; 2415 ret = iw_cm_connect(cm_id, &iw_param); 2416 out: 2417 if (ret && !IS_ERR(cm_id)) { 2418 iw_destroy_cm_id(cm_id); 2419 id_priv->cm_id.iw = NULL; 2420 } 2421 return ret; 2422 } 2423 2424 int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) 2425 { 2426 struct rdma_id_private *id_priv; 2427 int ret; 2428 2429 id_priv = container_of(id, struct rdma_id_private, id); 2430 if (!cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_CONNECT)) 2431 return -EINVAL; 2432 2433 if (!id->qp) { 2434 id_priv->qp_num = conn_param->qp_num; 2435 id_priv->srq = conn_param->srq; 2436 } 2437 2438 switch (rdma_node_get_transport(id->device->node_type)) { 2439 case RDMA_TRANSPORT_IB: 2440 if (cma_is_ud_ps(id->ps)) 2441 ret = cma_resolve_ib_udp(id_priv, conn_param); 2442 else 2443 ret = cma_connect_ib(id_priv, conn_param); 2444 break; 2445 case RDMA_TRANSPORT_IWARP: 2446 ret = cma_connect_iw(id_priv, conn_param); 2447 break; 2448 default: 2449 ret = -ENOSYS; 2450 break; 2451 } 2452 if (ret) 2453 goto err; 2454 2455 return 0; 2456 err: 2457 cma_comp_exch(id_priv, CMA_CONNECT, CMA_ROUTE_RESOLVED); 2458 return ret; 2459 } 2460 EXPORT_SYMBOL(rdma_connect); 2461 2462 static int cma_accept_ib(struct rdma_id_private *id_priv, 2463 struct rdma_conn_param *conn_param) 2464 { 2465 struct ib_cm_rep_param rep; 2466 int ret; 2467 2468 ret = cma_modify_qp_rtr(id_priv, conn_param); 2469 if (ret) 2470 goto out; 2471 2472 ret = cma_modify_qp_rts(id_priv, conn_param); 2473 if (ret) 2474 goto out; 2475 2476 memset(&rep, 0, sizeof rep); 2477 rep.qp_num = id_priv->qp_num; 2478 rep.starting_psn = id_priv->seq_num; 2479 rep.private_data = conn_param->private_data; 2480 rep.private_data_len = conn_param->private_data_len; 2481 rep.responder_resources = conn_param->responder_resources; 2482 rep.initiator_depth = conn_param->initiator_depth; 2483 rep.failover_accepted = 0; 2484 rep.flow_control = conn_param->flow_control; 2485 rep.rnr_retry_count = conn_param->rnr_retry_count; 2486 rep.srq = id_priv->srq ? 1 : 0; 2487 2488 ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep); 2489 out: 2490 return ret; 2491 } 2492 2493 static int cma_accept_iw(struct rdma_id_private *id_priv, 2494 struct rdma_conn_param *conn_param) 2495 { 2496 struct iw_cm_conn_param iw_param; 2497 int ret; 2498 2499 ret = cma_modify_qp_rtr(id_priv, conn_param); 2500 if (ret) 2501 return ret; 2502 2503 iw_param.ord = conn_param->initiator_depth; 2504 iw_param.ird = conn_param->responder_resources; 2505 iw_param.private_data = conn_param->private_data; 2506 iw_param.private_data_len = conn_param->private_data_len; 2507 if (id_priv->id.qp) { 2508 iw_param.qpn = id_priv->qp_num; 2509 } else 2510 iw_param.qpn = conn_param->qp_num; 2511 2512 return iw_cm_accept(id_priv->cm_id.iw, &iw_param); 2513 } 2514 2515 static int cma_send_sidr_rep(struct rdma_id_private *id_priv, 2516 enum ib_cm_sidr_status status, 2517 const void *private_data, int private_data_len) 2518 { 2519 struct ib_cm_sidr_rep_param rep; 2520 int ret; 2521 2522 memset(&rep, 0, sizeof rep); 2523 rep.status = status; 2524 if (status == IB_SIDR_SUCCESS) { 2525 ret = cma_set_qkey(id_priv); 2526 if (ret) 2527 return ret; 2528 rep.qp_num = id_priv->qp_num; 2529 rep.qkey = id_priv->qkey; 2530 } 2531 rep.private_data = private_data; 2532 rep.private_data_len = private_data_len; 2533 2534 return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep); 2535 } 2536 2537 int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) 2538 { 2539 struct rdma_id_private *id_priv; 2540 int ret; 2541 2542 id_priv = container_of(id, struct rdma_id_private, id); 2543 if (!cma_comp(id_priv, CMA_CONNECT)) 2544 return -EINVAL; 2545 2546 if (!id->qp && conn_param) { 2547 id_priv->qp_num = conn_param->qp_num; 2548 id_priv->srq = conn_param->srq; 2549 } 2550 2551 switch (rdma_node_get_transport(id->device->node_type)) { 2552 case RDMA_TRANSPORT_IB: 2553 if (cma_is_ud_ps(id->ps)) 2554 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, 2555 conn_param->private_data, 2556 conn_param->private_data_len); 2557 else if (conn_param) 2558 ret = cma_accept_ib(id_priv, conn_param); 2559 else 2560 ret = cma_rep_recv(id_priv); 2561 break; 2562 case RDMA_TRANSPORT_IWARP: 2563 ret = cma_accept_iw(id_priv, conn_param); 2564 break; 2565 default: 2566 ret = -ENOSYS; 2567 break; 2568 } 2569 2570 if (ret) 2571 goto reject; 2572 2573 return 0; 2574 reject: 2575 cma_modify_qp_err(id_priv); 2576 rdma_reject(id, NULL, 0); 2577 return ret; 2578 } 2579 EXPORT_SYMBOL(rdma_accept); 2580 2581 int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event) 2582 { 2583 struct rdma_id_private *id_priv; 2584 int ret; 2585 2586 id_priv = container_of(id, struct rdma_id_private, id); 2587 if (!cma_has_cm_dev(id_priv)) 2588 return -EINVAL; 2589 2590 switch (id->device->node_type) { 2591 case RDMA_NODE_IB_CA: 2592 ret = ib_cm_notify(id_priv->cm_id.ib, event); 2593 break; 2594 default: 2595 ret = 0; 2596 break; 2597 } 2598 return ret; 2599 } 2600 EXPORT_SYMBOL(rdma_notify); 2601 2602 int rdma_reject(struct rdma_cm_id *id, const void *private_data, 2603 u8 private_data_len) 2604 { 2605 struct rdma_id_private *id_priv; 2606 int ret; 2607 2608 id_priv = container_of(id, struct rdma_id_private, id); 2609 if (!cma_has_cm_dev(id_priv)) 2610 return -EINVAL; 2611 2612 switch (rdma_node_get_transport(id->device->node_type)) { 2613 case RDMA_TRANSPORT_IB: 2614 if (cma_is_ud_ps(id->ps)) 2615 ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 2616 private_data, private_data_len); 2617 else 2618 ret = ib_send_cm_rej(id_priv->cm_id.ib, 2619 IB_CM_REJ_CONSUMER_DEFINED, NULL, 2620 0, private_data, private_data_len); 2621 break; 2622 case RDMA_TRANSPORT_IWARP: 2623 ret = iw_cm_reject(id_priv->cm_id.iw, 2624 private_data, private_data_len); 2625 break; 2626 default: 2627 ret = -ENOSYS; 2628 break; 2629 } 2630 return ret; 2631 } 2632 EXPORT_SYMBOL(rdma_reject); 2633 2634 int rdma_disconnect(struct rdma_cm_id *id) 2635 { 2636 struct rdma_id_private *id_priv; 2637 int ret; 2638 2639 id_priv = container_of(id, struct rdma_id_private, id); 2640 if (!cma_has_cm_dev(id_priv)) 2641 return -EINVAL; 2642 2643 switch (rdma_node_get_transport(id->device->node_type)) { 2644 case RDMA_TRANSPORT_IB: 2645 ret = cma_modify_qp_err(id_priv); 2646 if (ret) 2647 goto out; 2648 /* Initiate or respond to a disconnect. */ 2649 if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) 2650 ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0); 2651 break; 2652 case RDMA_TRANSPORT_IWARP: 2653 ret = iw_cm_disconnect(id_priv->cm_id.iw, 0); 2654 break; 2655 default: 2656 ret = -EINVAL; 2657 break; 2658 } 2659 out: 2660 return ret; 2661 } 2662 EXPORT_SYMBOL(rdma_disconnect); 2663 2664 static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) 2665 { 2666 struct rdma_id_private *id_priv; 2667 struct cma_multicast *mc = multicast->context; 2668 struct rdma_cm_event event; 2669 int ret; 2670 2671 id_priv = mc->id_priv; 2672 if (cma_disable_callback(id_priv, CMA_ADDR_BOUND) && 2673 cma_disable_callback(id_priv, CMA_ADDR_RESOLVED)) 2674 return 0; 2675 2676 mutex_lock(&id_priv->qp_mutex); 2677 if (!status && id_priv->id.qp) 2678 status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid, 2679 multicast->rec.mlid); 2680 mutex_unlock(&id_priv->qp_mutex); 2681 2682 memset(&event, 0, sizeof event); 2683 event.status = status; 2684 event.param.ud.private_data = mc->context; 2685 if (!status) { 2686 event.event = RDMA_CM_EVENT_MULTICAST_JOIN; 2687 ib_init_ah_from_mcmember(id_priv->id.device, 2688 id_priv->id.port_num, &multicast->rec, 2689 &event.param.ud.ah_attr); 2690 event.param.ud.qp_num = 0xFFFFFF; 2691 event.param.ud.qkey = be32_to_cpu(multicast->rec.qkey); 2692 } else 2693 event.event = RDMA_CM_EVENT_MULTICAST_ERROR; 2694 2695 ret = id_priv->id.event_handler(&id_priv->id, &event); 2696 if (ret) { 2697 cma_exch(id_priv, CMA_DESTROYING); 2698 mutex_unlock(&id_priv->handler_mutex); 2699 rdma_destroy_id(&id_priv->id); 2700 return 0; 2701 } 2702 2703 mutex_unlock(&id_priv->handler_mutex); 2704 return 0; 2705 } 2706 2707 static void cma_set_mgid(struct rdma_id_private *id_priv, 2708 struct sockaddr *addr, union ib_gid *mgid) 2709 { 2710 unsigned char mc_map[MAX_ADDR_LEN]; 2711 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 2712 struct sockaddr_in *sin = (struct sockaddr_in *) addr; 2713 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) addr; 2714 2715 if (cma_any_addr(addr)) { 2716 memset(mgid, 0, sizeof *mgid); 2717 } else if ((addr->sa_family == AF_INET6) && 2718 ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFFF0FFFF) == 2719 0xFF10A01B)) { 2720 /* IPv6 address is an SA assigned MGID. */ 2721 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); 2722 } else if ((addr->sa_family == AF_INET6)) { 2723 ipv6_ib_mc_map(&sin6->sin6_addr, dev_addr->broadcast, mc_map); 2724 if (id_priv->id.ps == RDMA_PS_UDP) 2725 mc_map[7] = 0x01; /* Use RDMA CM signature */ 2726 *mgid = *(union ib_gid *) (mc_map + 4); 2727 } else { 2728 ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map); 2729 if (id_priv->id.ps == RDMA_PS_UDP) 2730 mc_map[7] = 0x01; /* Use RDMA CM signature */ 2731 *mgid = *(union ib_gid *) (mc_map + 4); 2732 } 2733 } 2734 2735 static int cma_join_ib_multicast(struct rdma_id_private *id_priv, 2736 struct cma_multicast *mc) 2737 { 2738 struct ib_sa_mcmember_rec rec; 2739 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 2740 ib_sa_comp_mask comp_mask; 2741 int ret; 2742 2743 ib_addr_get_mgid(dev_addr, &rec.mgid); 2744 ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num, 2745 &rec.mgid, &rec); 2746 if (ret) 2747 return ret; 2748 2749 cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid); 2750 if (id_priv->id.ps == RDMA_PS_UDP) 2751 rec.qkey = cpu_to_be32(RDMA_UDP_QKEY); 2752 rdma_addr_get_sgid(dev_addr, &rec.port_gid); 2753 rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); 2754 rec.join_state = 1; 2755 2756 comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID | 2757 IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE | 2758 IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_SL | 2759 IB_SA_MCMEMBER_REC_FLOW_LABEL | 2760 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS; 2761 2762 if (id_priv->id.ps == RDMA_PS_IPOIB) 2763 comp_mask |= IB_SA_MCMEMBER_REC_RATE | 2764 IB_SA_MCMEMBER_REC_RATE_SELECTOR; 2765 2766 mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device, 2767 id_priv->id.port_num, &rec, 2768 comp_mask, GFP_KERNEL, 2769 cma_ib_mc_handler, mc); 2770 if (IS_ERR(mc->multicast.ib)) 2771 return PTR_ERR(mc->multicast.ib); 2772 2773 return 0; 2774 } 2775 2776 int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, 2777 void *context) 2778 { 2779 struct rdma_id_private *id_priv; 2780 struct cma_multicast *mc; 2781 int ret; 2782 2783 id_priv = container_of(id, struct rdma_id_private, id); 2784 if (!cma_comp(id_priv, CMA_ADDR_BOUND) && 2785 !cma_comp(id_priv, CMA_ADDR_RESOLVED)) 2786 return -EINVAL; 2787 2788 mc = kmalloc(sizeof *mc, GFP_KERNEL); 2789 if (!mc) 2790 return -ENOMEM; 2791 2792 memcpy(&mc->addr, addr, ip_addr_size(addr)); 2793 mc->context = context; 2794 mc->id_priv = id_priv; 2795 2796 spin_lock(&id_priv->lock); 2797 list_add(&mc->list, &id_priv->mc_list); 2798 spin_unlock(&id_priv->lock); 2799 2800 switch (rdma_node_get_transport(id->device->node_type)) { 2801 case RDMA_TRANSPORT_IB: 2802 ret = cma_join_ib_multicast(id_priv, mc); 2803 break; 2804 default: 2805 ret = -ENOSYS; 2806 break; 2807 } 2808 2809 if (ret) { 2810 spin_lock_irq(&id_priv->lock); 2811 list_del(&mc->list); 2812 spin_unlock_irq(&id_priv->lock); 2813 kfree(mc); 2814 } 2815 return ret; 2816 } 2817 EXPORT_SYMBOL(rdma_join_multicast); 2818 2819 void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr) 2820 { 2821 struct rdma_id_private *id_priv; 2822 struct cma_multicast *mc; 2823 2824 id_priv = container_of(id, struct rdma_id_private, id); 2825 spin_lock_irq(&id_priv->lock); 2826 list_for_each_entry(mc, &id_priv->mc_list, list) { 2827 if (!memcmp(&mc->addr, addr, ip_addr_size(addr))) { 2828 list_del(&mc->list); 2829 spin_unlock_irq(&id_priv->lock); 2830 2831 if (id->qp) 2832 ib_detach_mcast(id->qp, 2833 &mc->multicast.ib->rec.mgid, 2834 mc->multicast.ib->rec.mlid); 2835 ib_sa_free_multicast(mc->multicast.ib); 2836 kfree(mc); 2837 return; 2838 } 2839 } 2840 spin_unlock_irq(&id_priv->lock); 2841 } 2842 EXPORT_SYMBOL(rdma_leave_multicast); 2843 2844 static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv) 2845 { 2846 struct rdma_dev_addr *dev_addr; 2847 struct cma_ndev_work *work; 2848 2849 dev_addr = &id_priv->id.route.addr.dev_addr; 2850 2851 if ((dev_addr->bound_dev_if == ndev->ifindex) && 2852 memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) { 2853 printk(KERN_INFO "RDMA CM addr change for ndev %s used by id %p\n", 2854 ndev->name, &id_priv->id); 2855 work = kzalloc(sizeof *work, GFP_KERNEL); 2856 if (!work) 2857 return -ENOMEM; 2858 2859 INIT_WORK(&work->work, cma_ndev_work_handler); 2860 work->id = id_priv; 2861 work->event.event = RDMA_CM_EVENT_ADDR_CHANGE; 2862 atomic_inc(&id_priv->refcount); 2863 queue_work(cma_wq, &work->work); 2864 } 2865 2866 return 0; 2867 } 2868 2869 static int cma_netdev_callback(struct notifier_block *self, unsigned long event, 2870 void *ctx) 2871 { 2872 struct net_device *ndev = (struct net_device *)ctx; 2873 struct cma_device *cma_dev; 2874 struct rdma_id_private *id_priv; 2875 int ret = NOTIFY_DONE; 2876 2877 if (dev_net(ndev) != &init_net) 2878 return NOTIFY_DONE; 2879 2880 if (event != NETDEV_BONDING_FAILOVER) 2881 return NOTIFY_DONE; 2882 2883 if (!(ndev->flags & IFF_MASTER) || !(ndev->priv_flags & IFF_BONDING)) 2884 return NOTIFY_DONE; 2885 2886 mutex_lock(&lock); 2887 list_for_each_entry(cma_dev, &dev_list, list) 2888 list_for_each_entry(id_priv, &cma_dev->id_list, list) { 2889 ret = cma_netdev_change(ndev, id_priv); 2890 if (ret) 2891 goto out; 2892 } 2893 2894 out: 2895 mutex_unlock(&lock); 2896 return ret; 2897 } 2898 2899 static struct notifier_block cma_nb = { 2900 .notifier_call = cma_netdev_callback 2901 }; 2902 2903 static void cma_add_one(struct ib_device *device) 2904 { 2905 struct cma_device *cma_dev; 2906 struct rdma_id_private *id_priv; 2907 2908 cma_dev = kmalloc(sizeof *cma_dev, GFP_KERNEL); 2909 if (!cma_dev) 2910 return; 2911 2912 cma_dev->device = device; 2913 2914 init_completion(&cma_dev->comp); 2915 atomic_set(&cma_dev->refcount, 1); 2916 INIT_LIST_HEAD(&cma_dev->id_list); 2917 ib_set_client_data(device, &cma_client, cma_dev); 2918 2919 mutex_lock(&lock); 2920 list_add_tail(&cma_dev->list, &dev_list); 2921 list_for_each_entry(id_priv, &listen_any_list, list) 2922 cma_listen_on_dev(id_priv, cma_dev); 2923 mutex_unlock(&lock); 2924 } 2925 2926 static int cma_remove_id_dev(struct rdma_id_private *id_priv) 2927 { 2928 struct rdma_cm_event event; 2929 enum cma_state state; 2930 int ret = 0; 2931 2932 /* Record that we want to remove the device */ 2933 state = cma_exch(id_priv, CMA_DEVICE_REMOVAL); 2934 if (state == CMA_DESTROYING) 2935 return 0; 2936 2937 cma_cancel_operation(id_priv, state); 2938 mutex_lock(&id_priv->handler_mutex); 2939 2940 /* Check for destruction from another callback. */ 2941 if (!cma_comp(id_priv, CMA_DEVICE_REMOVAL)) 2942 goto out; 2943 2944 memset(&event, 0, sizeof event); 2945 event.event = RDMA_CM_EVENT_DEVICE_REMOVAL; 2946 ret = id_priv->id.event_handler(&id_priv->id, &event); 2947 out: 2948 mutex_unlock(&id_priv->handler_mutex); 2949 return ret; 2950 } 2951 2952 static void cma_process_remove(struct cma_device *cma_dev) 2953 { 2954 struct rdma_id_private *id_priv; 2955 int ret; 2956 2957 mutex_lock(&lock); 2958 while (!list_empty(&cma_dev->id_list)) { 2959 id_priv = list_entry(cma_dev->id_list.next, 2960 struct rdma_id_private, list); 2961 2962 list_del(&id_priv->listen_list); 2963 list_del_init(&id_priv->list); 2964 atomic_inc(&id_priv->refcount); 2965 mutex_unlock(&lock); 2966 2967 ret = id_priv->internal_id ? 1 : cma_remove_id_dev(id_priv); 2968 cma_deref_id(id_priv); 2969 if (ret) 2970 rdma_destroy_id(&id_priv->id); 2971 2972 mutex_lock(&lock); 2973 } 2974 mutex_unlock(&lock); 2975 2976 cma_deref_dev(cma_dev); 2977 wait_for_completion(&cma_dev->comp); 2978 } 2979 2980 static void cma_remove_one(struct ib_device *device) 2981 { 2982 struct cma_device *cma_dev; 2983 2984 cma_dev = ib_get_client_data(device, &cma_client); 2985 if (!cma_dev) 2986 return; 2987 2988 mutex_lock(&lock); 2989 list_del(&cma_dev->list); 2990 mutex_unlock(&lock); 2991 2992 cma_process_remove(cma_dev); 2993 kfree(cma_dev); 2994 } 2995 2996 static int __init cma_init(void) 2997 { 2998 int ret, low, high, remaining; 2999 3000 get_random_bytes(&next_port, sizeof next_port); 3001 inet_get_local_port_range(&low, &high); 3002 remaining = (high - low) + 1; 3003 next_port = ((unsigned int) next_port % remaining) + low; 3004 3005 cma_wq = create_singlethread_workqueue("rdma_cm"); 3006 if (!cma_wq) 3007 return -ENOMEM; 3008 3009 ib_sa_register_client(&sa_client); 3010 rdma_addr_register_client(&addr_client); 3011 register_netdevice_notifier(&cma_nb); 3012 3013 ret = ib_register_client(&cma_client); 3014 if (ret) 3015 goto err; 3016 return 0; 3017 3018 err: 3019 unregister_netdevice_notifier(&cma_nb); 3020 rdma_addr_unregister_client(&addr_client); 3021 ib_sa_unregister_client(&sa_client); 3022 destroy_workqueue(cma_wq); 3023 return ret; 3024 } 3025 3026 static void __exit cma_cleanup(void) 3027 { 3028 ib_unregister_client(&cma_client); 3029 unregister_netdevice_notifier(&cma_nb); 3030 rdma_addr_unregister_client(&addr_client); 3031 ib_sa_unregister_client(&sa_client); 3032 destroy_workqueue(cma_wq); 3033 idr_destroy(&sdp_ps); 3034 idr_destroy(&tcp_ps); 3035 idr_destroy(&udp_ps); 3036 idr_destroy(&ipoib_ps); 3037 } 3038 3039 module_init(cma_init); 3040 module_exit(cma_cleanup); 3041