1 /* 2 * Copyright (c) 2005 Voltaire Inc. All rights reserved. 3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved. 4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved. 5 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/completion.h> 37 #include <linux/in.h> 38 #include <linux/in6.h> 39 #include <linux/mutex.h> 40 #include <linux/random.h> 41 #include <linux/igmp.h> 42 #include <linux/idr.h> 43 #include <linux/inetdevice.h> 44 #include <linux/slab.h> 45 #include <linux/module.h> 46 #include <net/route.h> 47 48 #include <net/net_namespace.h> 49 #include <net/netns/generic.h> 50 #include <net/tcp.h> 51 #include <net/ipv6.h> 52 #include <net/ip_fib.h> 53 #include <net/ip6_route.h> 54 55 #include <rdma/rdma_cm.h> 56 #include <rdma/rdma_cm_ib.h> 57 #include <rdma/rdma_netlink.h> 58 #include <rdma/ib.h> 59 #include <rdma/ib_cache.h> 60 #include <rdma/ib_cm.h> 61 #include <rdma/ib_sa.h> 62 #include <rdma/iw_cm.h> 63 64 #include "core_priv.h" 65 66 MODULE_AUTHOR("Sean Hefty"); 67 MODULE_DESCRIPTION("Generic RDMA CM Agent"); 68 MODULE_LICENSE("Dual BSD/GPL"); 69 70 #define CMA_CM_RESPONSE_TIMEOUT 20 71 #define CMA_QUERY_CLASSPORT_INFO_TIMEOUT 3000 72 #define CMA_MAX_CM_RETRIES 15 73 #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24) 74 #define CMA_IBOE_PACKET_LIFETIME 18 75 76 static const char * const cma_events[] = { 77 [RDMA_CM_EVENT_ADDR_RESOLVED] = "address resolved", 78 [RDMA_CM_EVENT_ADDR_ERROR] = "address error", 79 [RDMA_CM_EVENT_ROUTE_RESOLVED] = "route resolved ", 80 [RDMA_CM_EVENT_ROUTE_ERROR] = "route error", 81 [RDMA_CM_EVENT_CONNECT_REQUEST] = "connect request", 82 [RDMA_CM_EVENT_CONNECT_RESPONSE] = "connect response", 83 [RDMA_CM_EVENT_CONNECT_ERROR] = "connect error", 84 [RDMA_CM_EVENT_UNREACHABLE] = "unreachable", 85 [RDMA_CM_EVENT_REJECTED] = "rejected", 86 [RDMA_CM_EVENT_ESTABLISHED] = "established", 87 [RDMA_CM_EVENT_DISCONNECTED] = "disconnected", 88 [RDMA_CM_EVENT_DEVICE_REMOVAL] = "device removal", 89 [RDMA_CM_EVENT_MULTICAST_JOIN] = "multicast join", 90 [RDMA_CM_EVENT_MULTICAST_ERROR] = "multicast error", 91 [RDMA_CM_EVENT_ADDR_CHANGE] = "address change", 92 [RDMA_CM_EVENT_TIMEWAIT_EXIT] = "timewait exit", 93 }; 94 95 const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event) 96 { 97 size_t index = event; 98 99 return (index < ARRAY_SIZE(cma_events) && cma_events[index]) ? 100 cma_events[index] : "unrecognized event"; 101 } 102 EXPORT_SYMBOL(rdma_event_msg); 103 104 const char *__attribute_const__ rdma_reject_msg(struct rdma_cm_id *id, 105 int reason) 106 { 107 if (rdma_ib_or_roce(id->device, id->port_num)) 108 return ibcm_reject_msg(reason); 109 110 if (rdma_protocol_iwarp(id->device, id->port_num)) 111 return iwcm_reject_msg(reason); 112 113 WARN_ON_ONCE(1); 114 return "unrecognized transport"; 115 } 116 EXPORT_SYMBOL(rdma_reject_msg); 117 118 bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason) 119 { 120 if (rdma_ib_or_roce(id->device, id->port_num)) 121 return reason == IB_CM_REJ_CONSUMER_DEFINED; 122 123 if (rdma_protocol_iwarp(id->device, id->port_num)) 124 return reason == -ECONNREFUSED; 125 126 WARN_ON_ONCE(1); 127 return false; 128 } 129 EXPORT_SYMBOL(rdma_is_consumer_reject); 130 131 const void *rdma_consumer_reject_data(struct rdma_cm_id *id, 132 struct rdma_cm_event *ev, u8 *data_len) 133 { 134 const void *p; 135 136 if (rdma_is_consumer_reject(id, ev->status)) { 137 *data_len = ev->param.conn.private_data_len; 138 p = ev->param.conn.private_data; 139 } else { 140 *data_len = 0; 141 p = NULL; 142 } 143 return p; 144 } 145 EXPORT_SYMBOL(rdma_consumer_reject_data); 146 147 static void cma_add_one(struct ib_device *device); 148 static void cma_remove_one(struct ib_device *device, void *client_data); 149 150 static struct ib_client cma_client = { 151 .name = "cma", 152 .add = cma_add_one, 153 .remove = cma_remove_one 154 }; 155 156 static struct ib_sa_client sa_client; 157 static struct rdma_addr_client addr_client; 158 static LIST_HEAD(dev_list); 159 static LIST_HEAD(listen_any_list); 160 static DEFINE_MUTEX(lock); 161 static struct workqueue_struct *cma_wq; 162 static unsigned int cma_pernet_id; 163 164 struct cma_pernet { 165 struct idr tcp_ps; 166 struct idr udp_ps; 167 struct idr ipoib_ps; 168 struct idr ib_ps; 169 }; 170 171 static struct cma_pernet *cma_pernet(struct net *net) 172 { 173 return net_generic(net, cma_pernet_id); 174 } 175 176 static struct idr *cma_pernet_idr(struct net *net, enum rdma_port_space ps) 177 { 178 struct cma_pernet *pernet = cma_pernet(net); 179 180 switch (ps) { 181 case RDMA_PS_TCP: 182 return &pernet->tcp_ps; 183 case RDMA_PS_UDP: 184 return &pernet->udp_ps; 185 case RDMA_PS_IPOIB: 186 return &pernet->ipoib_ps; 187 case RDMA_PS_IB: 188 return &pernet->ib_ps; 189 default: 190 return NULL; 191 } 192 } 193 194 struct cma_device { 195 struct list_head list; 196 struct ib_device *device; 197 struct completion comp; 198 atomic_t refcount; 199 struct list_head id_list; 200 enum ib_gid_type *default_gid_type; 201 }; 202 203 struct rdma_bind_list { 204 enum rdma_port_space ps; 205 struct hlist_head owners; 206 unsigned short port; 207 }; 208 209 struct class_port_info_context { 210 struct ib_class_port_info *class_port_info; 211 struct ib_device *device; 212 struct completion done; 213 struct ib_sa_query *sa_query; 214 u8 port_num; 215 }; 216 217 static int cma_ps_alloc(struct net *net, enum rdma_port_space ps, 218 struct rdma_bind_list *bind_list, int snum) 219 { 220 struct idr *idr = cma_pernet_idr(net, ps); 221 222 return idr_alloc(idr, bind_list, snum, snum + 1, GFP_KERNEL); 223 } 224 225 static struct rdma_bind_list *cma_ps_find(struct net *net, 226 enum rdma_port_space ps, int snum) 227 { 228 struct idr *idr = cma_pernet_idr(net, ps); 229 230 return idr_find(idr, snum); 231 } 232 233 static void cma_ps_remove(struct net *net, enum rdma_port_space ps, int snum) 234 { 235 struct idr *idr = cma_pernet_idr(net, ps); 236 237 idr_remove(idr, snum); 238 } 239 240 enum { 241 CMA_OPTION_AFONLY, 242 }; 243 244 void cma_ref_dev(struct cma_device *cma_dev) 245 { 246 atomic_inc(&cma_dev->refcount); 247 } 248 249 struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter, 250 void *cookie) 251 { 252 struct cma_device *cma_dev; 253 struct cma_device *found_cma_dev = NULL; 254 255 mutex_lock(&lock); 256 257 list_for_each_entry(cma_dev, &dev_list, list) 258 if (filter(cma_dev->device, cookie)) { 259 found_cma_dev = cma_dev; 260 break; 261 } 262 263 if (found_cma_dev) 264 cma_ref_dev(found_cma_dev); 265 mutex_unlock(&lock); 266 return found_cma_dev; 267 } 268 269 int cma_get_default_gid_type(struct cma_device *cma_dev, 270 unsigned int port) 271 { 272 if (port < rdma_start_port(cma_dev->device) || 273 port > rdma_end_port(cma_dev->device)) 274 return -EINVAL; 275 276 return cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)]; 277 } 278 279 int cma_set_default_gid_type(struct cma_device *cma_dev, 280 unsigned int port, 281 enum ib_gid_type default_gid_type) 282 { 283 unsigned long supported_gids; 284 285 if (port < rdma_start_port(cma_dev->device) || 286 port > rdma_end_port(cma_dev->device)) 287 return -EINVAL; 288 289 supported_gids = roce_gid_type_mask_support(cma_dev->device, port); 290 291 if (!(supported_gids & 1 << default_gid_type)) 292 return -EINVAL; 293 294 cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)] = 295 default_gid_type; 296 297 return 0; 298 } 299 300 struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev) 301 { 302 return cma_dev->device; 303 } 304 305 /* 306 * Device removal can occur at anytime, so we need extra handling to 307 * serialize notifying the user of device removal with other callbacks. 308 * We do this by disabling removal notification while a callback is in process, 309 * and reporting it after the callback completes. 310 */ 311 struct rdma_id_private { 312 struct rdma_cm_id id; 313 314 struct rdma_bind_list *bind_list; 315 struct hlist_node node; 316 struct list_head list; /* listen_any_list or cma_device.list */ 317 struct list_head listen_list; /* per device listens */ 318 struct cma_device *cma_dev; 319 struct list_head mc_list; 320 321 int internal_id; 322 enum rdma_cm_state state; 323 spinlock_t lock; 324 struct mutex qp_mutex; 325 326 struct completion comp; 327 atomic_t refcount; 328 struct mutex handler_mutex; 329 330 int backlog; 331 int timeout_ms; 332 struct ib_sa_query *query; 333 int query_id; 334 union { 335 struct ib_cm_id *ib; 336 struct iw_cm_id *iw; 337 } cm_id; 338 339 u32 seq_num; 340 u32 qkey; 341 u32 qp_num; 342 pid_t owner; 343 u32 options; 344 u8 srq; 345 u8 tos; 346 u8 reuseaddr; 347 u8 afonly; 348 enum ib_gid_type gid_type; 349 }; 350 351 struct cma_multicast { 352 struct rdma_id_private *id_priv; 353 union { 354 struct ib_sa_multicast *ib; 355 } multicast; 356 struct list_head list; 357 void *context; 358 struct sockaddr_storage addr; 359 struct kref mcref; 360 bool igmp_joined; 361 u8 join_state; 362 }; 363 364 struct cma_work { 365 struct work_struct work; 366 struct rdma_id_private *id; 367 enum rdma_cm_state old_state; 368 enum rdma_cm_state new_state; 369 struct rdma_cm_event event; 370 }; 371 372 struct cma_ndev_work { 373 struct work_struct work; 374 struct rdma_id_private *id; 375 struct rdma_cm_event event; 376 }; 377 378 struct iboe_mcast_work { 379 struct work_struct work; 380 struct rdma_id_private *id; 381 struct cma_multicast *mc; 382 }; 383 384 union cma_ip_addr { 385 struct in6_addr ip6; 386 struct { 387 __be32 pad[3]; 388 __be32 addr; 389 } ip4; 390 }; 391 392 struct cma_hdr { 393 u8 cma_version; 394 u8 ip_version; /* IP version: 7:4 */ 395 __be16 port; 396 union cma_ip_addr src_addr; 397 union cma_ip_addr dst_addr; 398 }; 399 400 #define CMA_VERSION 0x00 401 402 struct cma_req_info { 403 struct ib_device *device; 404 int port; 405 union ib_gid local_gid; 406 __be64 service_id; 407 u16 pkey; 408 bool has_gid:1; 409 }; 410 411 static int cma_comp(struct rdma_id_private *id_priv, enum rdma_cm_state comp) 412 { 413 unsigned long flags; 414 int ret; 415 416 spin_lock_irqsave(&id_priv->lock, flags); 417 ret = (id_priv->state == comp); 418 spin_unlock_irqrestore(&id_priv->lock, flags); 419 return ret; 420 } 421 422 static int cma_comp_exch(struct rdma_id_private *id_priv, 423 enum rdma_cm_state comp, enum rdma_cm_state exch) 424 { 425 unsigned long flags; 426 int ret; 427 428 spin_lock_irqsave(&id_priv->lock, flags); 429 if ((ret = (id_priv->state == comp))) 430 id_priv->state = exch; 431 spin_unlock_irqrestore(&id_priv->lock, flags); 432 return ret; 433 } 434 435 static enum rdma_cm_state cma_exch(struct rdma_id_private *id_priv, 436 enum rdma_cm_state exch) 437 { 438 unsigned long flags; 439 enum rdma_cm_state old; 440 441 spin_lock_irqsave(&id_priv->lock, flags); 442 old = id_priv->state; 443 id_priv->state = exch; 444 spin_unlock_irqrestore(&id_priv->lock, flags); 445 return old; 446 } 447 448 static inline u8 cma_get_ip_ver(const struct cma_hdr *hdr) 449 { 450 return hdr->ip_version >> 4; 451 } 452 453 static inline void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver) 454 { 455 hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF); 456 } 457 458 static int cma_igmp_send(struct net_device *ndev, union ib_gid *mgid, bool join) 459 { 460 struct in_device *in_dev = NULL; 461 462 if (ndev) { 463 rtnl_lock(); 464 in_dev = __in_dev_get_rtnl(ndev); 465 if (in_dev) { 466 if (join) 467 ip_mc_inc_group(in_dev, 468 *(__be32 *)(mgid->raw + 12)); 469 else 470 ip_mc_dec_group(in_dev, 471 *(__be32 *)(mgid->raw + 12)); 472 } 473 rtnl_unlock(); 474 } 475 return (in_dev) ? 0 : -ENODEV; 476 } 477 478 static void _cma_attach_to_dev(struct rdma_id_private *id_priv, 479 struct cma_device *cma_dev) 480 { 481 cma_ref_dev(cma_dev); 482 id_priv->cma_dev = cma_dev; 483 id_priv->gid_type = 0; 484 id_priv->id.device = cma_dev->device; 485 id_priv->id.route.addr.dev_addr.transport = 486 rdma_node_get_transport(cma_dev->device->node_type); 487 list_add_tail(&id_priv->list, &cma_dev->id_list); 488 } 489 490 static void cma_attach_to_dev(struct rdma_id_private *id_priv, 491 struct cma_device *cma_dev) 492 { 493 _cma_attach_to_dev(id_priv, cma_dev); 494 id_priv->gid_type = 495 cma_dev->default_gid_type[id_priv->id.port_num - 496 rdma_start_port(cma_dev->device)]; 497 } 498 499 void cma_deref_dev(struct cma_device *cma_dev) 500 { 501 if (atomic_dec_and_test(&cma_dev->refcount)) 502 complete(&cma_dev->comp); 503 } 504 505 static inline void release_mc(struct kref *kref) 506 { 507 struct cma_multicast *mc = container_of(kref, struct cma_multicast, mcref); 508 509 kfree(mc->multicast.ib); 510 kfree(mc); 511 } 512 513 static void cma_release_dev(struct rdma_id_private *id_priv) 514 { 515 mutex_lock(&lock); 516 list_del(&id_priv->list); 517 cma_deref_dev(id_priv->cma_dev); 518 id_priv->cma_dev = NULL; 519 mutex_unlock(&lock); 520 } 521 522 static inline struct sockaddr *cma_src_addr(struct rdma_id_private *id_priv) 523 { 524 return (struct sockaddr *) &id_priv->id.route.addr.src_addr; 525 } 526 527 static inline struct sockaddr *cma_dst_addr(struct rdma_id_private *id_priv) 528 { 529 return (struct sockaddr *) &id_priv->id.route.addr.dst_addr; 530 } 531 532 static inline unsigned short cma_family(struct rdma_id_private *id_priv) 533 { 534 return id_priv->id.route.addr.src_addr.ss_family; 535 } 536 537 static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey) 538 { 539 struct ib_sa_mcmember_rec rec; 540 int ret = 0; 541 542 if (id_priv->qkey) { 543 if (qkey && id_priv->qkey != qkey) 544 return -EINVAL; 545 return 0; 546 } 547 548 if (qkey) { 549 id_priv->qkey = qkey; 550 return 0; 551 } 552 553 switch (id_priv->id.ps) { 554 case RDMA_PS_UDP: 555 case RDMA_PS_IB: 556 id_priv->qkey = RDMA_UDP_QKEY; 557 break; 558 case RDMA_PS_IPOIB: 559 ib_addr_get_mgid(&id_priv->id.route.addr.dev_addr, &rec.mgid); 560 ret = ib_sa_get_mcmember_rec(id_priv->id.device, 561 id_priv->id.port_num, &rec.mgid, 562 &rec); 563 if (!ret) 564 id_priv->qkey = be32_to_cpu(rec.qkey); 565 break; 566 default: 567 break; 568 } 569 return ret; 570 } 571 572 static void cma_translate_ib(struct sockaddr_ib *sib, struct rdma_dev_addr *dev_addr) 573 { 574 dev_addr->dev_type = ARPHRD_INFINIBAND; 575 rdma_addr_set_sgid(dev_addr, (union ib_gid *) &sib->sib_addr); 576 ib_addr_set_pkey(dev_addr, ntohs(sib->sib_pkey)); 577 } 578 579 static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_addr) 580 { 581 int ret; 582 583 if (addr->sa_family != AF_IB) { 584 ret = rdma_translate_ip(addr, dev_addr, NULL); 585 } else { 586 cma_translate_ib((struct sockaddr_ib *) addr, dev_addr); 587 ret = 0; 588 } 589 590 return ret; 591 } 592 593 static inline int cma_validate_port(struct ib_device *device, u8 port, 594 enum ib_gid_type gid_type, 595 union ib_gid *gid, int dev_type, 596 int bound_if_index) 597 { 598 int ret = -ENODEV; 599 struct net_device *ndev = NULL; 600 601 if ((dev_type == ARPHRD_INFINIBAND) && !rdma_protocol_ib(device, port)) 602 return ret; 603 604 if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port)) 605 return ret; 606 607 if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port)) { 608 ndev = dev_get_by_index(&init_net, bound_if_index); 609 if (ndev && ndev->flags & IFF_LOOPBACK) { 610 pr_info("detected loopback device\n"); 611 dev_put(ndev); 612 613 if (!device->get_netdev) 614 return -EOPNOTSUPP; 615 616 ndev = device->get_netdev(device, port); 617 if (!ndev) 618 return -ENODEV; 619 } 620 } else { 621 gid_type = IB_GID_TYPE_IB; 622 } 623 624 ret = ib_find_cached_gid_by_port(device, gid, gid_type, port, 625 ndev, NULL); 626 627 if (ndev) 628 dev_put(ndev); 629 630 return ret; 631 } 632 633 static int cma_acquire_dev(struct rdma_id_private *id_priv, 634 struct rdma_id_private *listen_id_priv) 635 { 636 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 637 struct cma_device *cma_dev; 638 union ib_gid gid, iboe_gid, *gidp; 639 int ret = -ENODEV; 640 u8 port; 641 642 if (dev_addr->dev_type != ARPHRD_INFINIBAND && 643 id_priv->id.ps == RDMA_PS_IPOIB) 644 return -EINVAL; 645 646 mutex_lock(&lock); 647 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, 648 &iboe_gid); 649 650 memcpy(&gid, dev_addr->src_dev_addr + 651 rdma_addr_gid_offset(dev_addr), sizeof gid); 652 653 if (listen_id_priv) { 654 cma_dev = listen_id_priv->cma_dev; 655 port = listen_id_priv->id.port_num; 656 gidp = rdma_protocol_roce(cma_dev->device, port) ? 657 &iboe_gid : &gid; 658 659 ret = cma_validate_port(cma_dev->device, port, 660 rdma_protocol_ib(cma_dev->device, port) ? 661 IB_GID_TYPE_IB : 662 listen_id_priv->gid_type, gidp, 663 dev_addr->dev_type, 664 dev_addr->bound_dev_if); 665 if (!ret) { 666 id_priv->id.port_num = port; 667 goto out; 668 } 669 } 670 671 list_for_each_entry(cma_dev, &dev_list, list) { 672 for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) { 673 if (listen_id_priv && 674 listen_id_priv->cma_dev == cma_dev && 675 listen_id_priv->id.port_num == port) 676 continue; 677 678 gidp = rdma_protocol_roce(cma_dev->device, port) ? 679 &iboe_gid : &gid; 680 681 ret = cma_validate_port(cma_dev->device, port, 682 rdma_protocol_ib(cma_dev->device, port) ? 683 IB_GID_TYPE_IB : 684 cma_dev->default_gid_type[port - 1], 685 gidp, dev_addr->dev_type, 686 dev_addr->bound_dev_if); 687 if (!ret) { 688 id_priv->id.port_num = port; 689 goto out; 690 } 691 } 692 } 693 694 out: 695 if (!ret) 696 cma_attach_to_dev(id_priv, cma_dev); 697 698 mutex_unlock(&lock); 699 return ret; 700 } 701 702 /* 703 * Select the source IB device and address to reach the destination IB address. 704 */ 705 static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) 706 { 707 struct cma_device *cma_dev, *cur_dev; 708 struct sockaddr_ib *addr; 709 union ib_gid gid, sgid, *dgid; 710 u16 pkey, index; 711 u8 p; 712 int i; 713 714 cma_dev = NULL; 715 addr = (struct sockaddr_ib *) cma_dst_addr(id_priv); 716 dgid = (union ib_gid *) &addr->sib_addr; 717 pkey = ntohs(addr->sib_pkey); 718 719 list_for_each_entry(cur_dev, &dev_list, list) { 720 for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) { 721 if (!rdma_cap_af_ib(cur_dev->device, p)) 722 continue; 723 724 if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index)) 725 continue; 726 727 for (i = 0; !ib_get_cached_gid(cur_dev->device, p, i, 728 &gid, NULL); 729 i++) { 730 if (!memcmp(&gid, dgid, sizeof(gid))) { 731 cma_dev = cur_dev; 732 sgid = gid; 733 id_priv->id.port_num = p; 734 goto found; 735 } 736 737 if (!cma_dev && (gid.global.subnet_prefix == 738 dgid->global.subnet_prefix)) { 739 cma_dev = cur_dev; 740 sgid = gid; 741 id_priv->id.port_num = p; 742 } 743 } 744 } 745 } 746 747 if (!cma_dev) 748 return -ENODEV; 749 750 found: 751 cma_attach_to_dev(id_priv, cma_dev); 752 addr = (struct sockaddr_ib *) cma_src_addr(id_priv); 753 memcpy(&addr->sib_addr, &sgid, sizeof sgid); 754 cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr); 755 return 0; 756 } 757 758 static void cma_deref_id(struct rdma_id_private *id_priv) 759 { 760 if (atomic_dec_and_test(&id_priv->refcount)) 761 complete(&id_priv->comp); 762 } 763 764 struct rdma_cm_id *rdma_create_id(struct net *net, 765 rdma_cm_event_handler event_handler, 766 void *context, enum rdma_port_space ps, 767 enum ib_qp_type qp_type) 768 { 769 struct rdma_id_private *id_priv; 770 771 id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL); 772 if (!id_priv) 773 return ERR_PTR(-ENOMEM); 774 775 id_priv->owner = task_pid_nr(current); 776 id_priv->state = RDMA_CM_IDLE; 777 id_priv->id.context = context; 778 id_priv->id.event_handler = event_handler; 779 id_priv->id.ps = ps; 780 id_priv->id.qp_type = qp_type; 781 spin_lock_init(&id_priv->lock); 782 mutex_init(&id_priv->qp_mutex); 783 init_completion(&id_priv->comp); 784 atomic_set(&id_priv->refcount, 1); 785 mutex_init(&id_priv->handler_mutex); 786 INIT_LIST_HEAD(&id_priv->listen_list); 787 INIT_LIST_HEAD(&id_priv->mc_list); 788 get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num); 789 id_priv->id.route.addr.dev_addr.net = get_net(net); 790 791 return &id_priv->id; 792 } 793 EXPORT_SYMBOL(rdma_create_id); 794 795 static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) 796 { 797 struct ib_qp_attr qp_attr; 798 int qp_attr_mask, ret; 799 800 qp_attr.qp_state = IB_QPS_INIT; 801 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 802 if (ret) 803 return ret; 804 805 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); 806 if (ret) 807 return ret; 808 809 qp_attr.qp_state = IB_QPS_RTR; 810 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE); 811 if (ret) 812 return ret; 813 814 qp_attr.qp_state = IB_QPS_RTS; 815 qp_attr.sq_psn = 0; 816 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN); 817 818 return ret; 819 } 820 821 static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) 822 { 823 struct ib_qp_attr qp_attr; 824 int qp_attr_mask, ret; 825 826 qp_attr.qp_state = IB_QPS_INIT; 827 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 828 if (ret) 829 return ret; 830 831 return ib_modify_qp(qp, &qp_attr, qp_attr_mask); 832 } 833 834 int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd, 835 struct ib_qp_init_attr *qp_init_attr) 836 { 837 struct rdma_id_private *id_priv; 838 struct ib_qp *qp; 839 int ret; 840 841 id_priv = container_of(id, struct rdma_id_private, id); 842 if (id->device != pd->device) 843 return -EINVAL; 844 845 qp_init_attr->port_num = id->port_num; 846 qp = ib_create_qp(pd, qp_init_attr); 847 if (IS_ERR(qp)) 848 return PTR_ERR(qp); 849 850 if (id->qp_type == IB_QPT_UD) 851 ret = cma_init_ud_qp(id_priv, qp); 852 else 853 ret = cma_init_conn_qp(id_priv, qp); 854 if (ret) 855 goto err; 856 857 id->qp = qp; 858 id_priv->qp_num = qp->qp_num; 859 id_priv->srq = (qp->srq != NULL); 860 return 0; 861 err: 862 ib_destroy_qp(qp); 863 return ret; 864 } 865 EXPORT_SYMBOL(rdma_create_qp); 866 867 void rdma_destroy_qp(struct rdma_cm_id *id) 868 { 869 struct rdma_id_private *id_priv; 870 871 id_priv = container_of(id, struct rdma_id_private, id); 872 mutex_lock(&id_priv->qp_mutex); 873 ib_destroy_qp(id_priv->id.qp); 874 id_priv->id.qp = NULL; 875 mutex_unlock(&id_priv->qp_mutex); 876 } 877 EXPORT_SYMBOL(rdma_destroy_qp); 878 879 static int cma_modify_qp_rtr(struct rdma_id_private *id_priv, 880 struct rdma_conn_param *conn_param) 881 { 882 struct ib_qp_attr qp_attr; 883 int qp_attr_mask, ret; 884 union ib_gid sgid; 885 886 mutex_lock(&id_priv->qp_mutex); 887 if (!id_priv->id.qp) { 888 ret = 0; 889 goto out; 890 } 891 892 /* Need to update QP attributes from default values. */ 893 qp_attr.qp_state = IB_QPS_INIT; 894 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 895 if (ret) 896 goto out; 897 898 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 899 if (ret) 900 goto out; 901 902 qp_attr.qp_state = IB_QPS_RTR; 903 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 904 if (ret) 905 goto out; 906 907 ret = ib_query_gid(id_priv->id.device, id_priv->id.port_num, 908 qp_attr.ah_attr.grh.sgid_index, &sgid, NULL); 909 if (ret) 910 goto out; 911 912 BUG_ON(id_priv->cma_dev->device != id_priv->id.device); 913 914 if (conn_param) 915 qp_attr.max_dest_rd_atomic = conn_param->responder_resources; 916 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 917 out: 918 mutex_unlock(&id_priv->qp_mutex); 919 return ret; 920 } 921 922 static int cma_modify_qp_rts(struct rdma_id_private *id_priv, 923 struct rdma_conn_param *conn_param) 924 { 925 struct ib_qp_attr qp_attr; 926 int qp_attr_mask, ret; 927 928 mutex_lock(&id_priv->qp_mutex); 929 if (!id_priv->id.qp) { 930 ret = 0; 931 goto out; 932 } 933 934 qp_attr.qp_state = IB_QPS_RTS; 935 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 936 if (ret) 937 goto out; 938 939 if (conn_param) 940 qp_attr.max_rd_atomic = conn_param->initiator_depth; 941 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 942 out: 943 mutex_unlock(&id_priv->qp_mutex); 944 return ret; 945 } 946 947 static int cma_modify_qp_err(struct rdma_id_private *id_priv) 948 { 949 struct ib_qp_attr qp_attr; 950 int ret; 951 952 mutex_lock(&id_priv->qp_mutex); 953 if (!id_priv->id.qp) { 954 ret = 0; 955 goto out; 956 } 957 958 qp_attr.qp_state = IB_QPS_ERR; 959 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE); 960 out: 961 mutex_unlock(&id_priv->qp_mutex); 962 return ret; 963 } 964 965 static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv, 966 struct ib_qp_attr *qp_attr, int *qp_attr_mask) 967 { 968 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 969 int ret; 970 u16 pkey; 971 972 if (rdma_cap_eth_ah(id_priv->id.device, id_priv->id.port_num)) 973 pkey = 0xffff; 974 else 975 pkey = ib_addr_get_pkey(dev_addr); 976 977 ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num, 978 pkey, &qp_attr->pkey_index); 979 if (ret) 980 return ret; 981 982 qp_attr->port_num = id_priv->id.port_num; 983 *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT; 984 985 if (id_priv->id.qp_type == IB_QPT_UD) { 986 ret = cma_set_qkey(id_priv, 0); 987 if (ret) 988 return ret; 989 990 qp_attr->qkey = id_priv->qkey; 991 *qp_attr_mask |= IB_QP_QKEY; 992 } else { 993 qp_attr->qp_access_flags = 0; 994 *qp_attr_mask |= IB_QP_ACCESS_FLAGS; 995 } 996 return 0; 997 } 998 999 int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, 1000 int *qp_attr_mask) 1001 { 1002 struct rdma_id_private *id_priv; 1003 int ret = 0; 1004 1005 id_priv = container_of(id, struct rdma_id_private, id); 1006 if (rdma_cap_ib_cm(id->device, id->port_num)) { 1007 if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD)) 1008 ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask); 1009 else 1010 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, 1011 qp_attr_mask); 1012 1013 if (qp_attr->qp_state == IB_QPS_RTR) 1014 qp_attr->rq_psn = id_priv->seq_num; 1015 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { 1016 if (!id_priv->cm_id.iw) { 1017 qp_attr->qp_access_flags = 0; 1018 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS; 1019 } else 1020 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, 1021 qp_attr_mask); 1022 } else 1023 ret = -ENOSYS; 1024 1025 return ret; 1026 } 1027 EXPORT_SYMBOL(rdma_init_qp_attr); 1028 1029 static inline int cma_zero_addr(struct sockaddr *addr) 1030 { 1031 switch (addr->sa_family) { 1032 case AF_INET: 1033 return ipv4_is_zeronet(((struct sockaddr_in *)addr)->sin_addr.s_addr); 1034 case AF_INET6: 1035 return ipv6_addr_any(&((struct sockaddr_in6 *) addr)->sin6_addr); 1036 case AF_IB: 1037 return ib_addr_any(&((struct sockaddr_ib *) addr)->sib_addr); 1038 default: 1039 return 0; 1040 } 1041 } 1042 1043 static inline int cma_loopback_addr(struct sockaddr *addr) 1044 { 1045 switch (addr->sa_family) { 1046 case AF_INET: 1047 return ipv4_is_loopback(((struct sockaddr_in *) addr)->sin_addr.s_addr); 1048 case AF_INET6: 1049 return ipv6_addr_loopback(&((struct sockaddr_in6 *) addr)->sin6_addr); 1050 case AF_IB: 1051 return ib_addr_loopback(&((struct sockaddr_ib *) addr)->sib_addr); 1052 default: 1053 return 0; 1054 } 1055 } 1056 1057 static inline int cma_any_addr(struct sockaddr *addr) 1058 { 1059 return cma_zero_addr(addr) || cma_loopback_addr(addr); 1060 } 1061 1062 static int cma_addr_cmp(struct sockaddr *src, struct sockaddr *dst) 1063 { 1064 if (src->sa_family != dst->sa_family) 1065 return -1; 1066 1067 switch (src->sa_family) { 1068 case AF_INET: 1069 return ((struct sockaddr_in *) src)->sin_addr.s_addr != 1070 ((struct sockaddr_in *) dst)->sin_addr.s_addr; 1071 case AF_INET6: 1072 return ipv6_addr_cmp(&((struct sockaddr_in6 *) src)->sin6_addr, 1073 &((struct sockaddr_in6 *) dst)->sin6_addr); 1074 default: 1075 return ib_addr_cmp(&((struct sockaddr_ib *) src)->sib_addr, 1076 &((struct sockaddr_ib *) dst)->sib_addr); 1077 } 1078 } 1079 1080 static __be16 cma_port(struct sockaddr *addr) 1081 { 1082 struct sockaddr_ib *sib; 1083 1084 switch (addr->sa_family) { 1085 case AF_INET: 1086 return ((struct sockaddr_in *) addr)->sin_port; 1087 case AF_INET6: 1088 return ((struct sockaddr_in6 *) addr)->sin6_port; 1089 case AF_IB: 1090 sib = (struct sockaddr_ib *) addr; 1091 return htons((u16) (be64_to_cpu(sib->sib_sid) & 1092 be64_to_cpu(sib->sib_sid_mask))); 1093 default: 1094 return 0; 1095 } 1096 } 1097 1098 static inline int cma_any_port(struct sockaddr *addr) 1099 { 1100 return !cma_port(addr); 1101 } 1102 1103 static void cma_save_ib_info(struct sockaddr *src_addr, 1104 struct sockaddr *dst_addr, 1105 struct rdma_cm_id *listen_id, 1106 struct ib_sa_path_rec *path) 1107 { 1108 struct sockaddr_ib *listen_ib, *ib; 1109 1110 listen_ib = (struct sockaddr_ib *) &listen_id->route.addr.src_addr; 1111 if (src_addr) { 1112 ib = (struct sockaddr_ib *)src_addr; 1113 ib->sib_family = AF_IB; 1114 if (path) { 1115 ib->sib_pkey = path->pkey; 1116 ib->sib_flowinfo = path->flow_label; 1117 memcpy(&ib->sib_addr, &path->sgid, 16); 1118 ib->sib_sid = path->service_id; 1119 ib->sib_scope_id = 0; 1120 } else { 1121 ib->sib_pkey = listen_ib->sib_pkey; 1122 ib->sib_flowinfo = listen_ib->sib_flowinfo; 1123 ib->sib_addr = listen_ib->sib_addr; 1124 ib->sib_sid = listen_ib->sib_sid; 1125 ib->sib_scope_id = listen_ib->sib_scope_id; 1126 } 1127 ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL); 1128 } 1129 if (dst_addr) { 1130 ib = (struct sockaddr_ib *)dst_addr; 1131 ib->sib_family = AF_IB; 1132 if (path) { 1133 ib->sib_pkey = path->pkey; 1134 ib->sib_flowinfo = path->flow_label; 1135 memcpy(&ib->sib_addr, &path->dgid, 16); 1136 } 1137 } 1138 } 1139 1140 static void cma_save_ip4_info(struct sockaddr_in *src_addr, 1141 struct sockaddr_in *dst_addr, 1142 struct cma_hdr *hdr, 1143 __be16 local_port) 1144 { 1145 if (src_addr) { 1146 *src_addr = (struct sockaddr_in) { 1147 .sin_family = AF_INET, 1148 .sin_addr.s_addr = hdr->dst_addr.ip4.addr, 1149 .sin_port = local_port, 1150 }; 1151 } 1152 1153 if (dst_addr) { 1154 *dst_addr = (struct sockaddr_in) { 1155 .sin_family = AF_INET, 1156 .sin_addr.s_addr = hdr->src_addr.ip4.addr, 1157 .sin_port = hdr->port, 1158 }; 1159 } 1160 } 1161 1162 static void cma_save_ip6_info(struct sockaddr_in6 *src_addr, 1163 struct sockaddr_in6 *dst_addr, 1164 struct cma_hdr *hdr, 1165 __be16 local_port) 1166 { 1167 if (src_addr) { 1168 *src_addr = (struct sockaddr_in6) { 1169 .sin6_family = AF_INET6, 1170 .sin6_addr = hdr->dst_addr.ip6, 1171 .sin6_port = local_port, 1172 }; 1173 } 1174 1175 if (dst_addr) { 1176 *dst_addr = (struct sockaddr_in6) { 1177 .sin6_family = AF_INET6, 1178 .sin6_addr = hdr->src_addr.ip6, 1179 .sin6_port = hdr->port, 1180 }; 1181 } 1182 } 1183 1184 static u16 cma_port_from_service_id(__be64 service_id) 1185 { 1186 return (u16)be64_to_cpu(service_id); 1187 } 1188 1189 static int cma_save_ip_info(struct sockaddr *src_addr, 1190 struct sockaddr *dst_addr, 1191 struct ib_cm_event *ib_event, 1192 __be64 service_id) 1193 { 1194 struct cma_hdr *hdr; 1195 __be16 port; 1196 1197 hdr = ib_event->private_data; 1198 if (hdr->cma_version != CMA_VERSION) 1199 return -EINVAL; 1200 1201 port = htons(cma_port_from_service_id(service_id)); 1202 1203 switch (cma_get_ip_ver(hdr)) { 1204 case 4: 1205 cma_save_ip4_info((struct sockaddr_in *)src_addr, 1206 (struct sockaddr_in *)dst_addr, hdr, port); 1207 break; 1208 case 6: 1209 cma_save_ip6_info((struct sockaddr_in6 *)src_addr, 1210 (struct sockaddr_in6 *)dst_addr, hdr, port); 1211 break; 1212 default: 1213 return -EAFNOSUPPORT; 1214 } 1215 1216 return 0; 1217 } 1218 1219 static int cma_save_net_info(struct sockaddr *src_addr, 1220 struct sockaddr *dst_addr, 1221 struct rdma_cm_id *listen_id, 1222 struct ib_cm_event *ib_event, 1223 sa_family_t sa_family, __be64 service_id) 1224 { 1225 if (sa_family == AF_IB) { 1226 if (ib_event->event == IB_CM_REQ_RECEIVED) 1227 cma_save_ib_info(src_addr, dst_addr, listen_id, 1228 ib_event->param.req_rcvd.primary_path); 1229 else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) 1230 cma_save_ib_info(src_addr, dst_addr, listen_id, NULL); 1231 return 0; 1232 } 1233 1234 return cma_save_ip_info(src_addr, dst_addr, ib_event, service_id); 1235 } 1236 1237 static int cma_save_req_info(const struct ib_cm_event *ib_event, 1238 struct cma_req_info *req) 1239 { 1240 const struct ib_cm_req_event_param *req_param = 1241 &ib_event->param.req_rcvd; 1242 const struct ib_cm_sidr_req_event_param *sidr_param = 1243 &ib_event->param.sidr_req_rcvd; 1244 1245 switch (ib_event->event) { 1246 case IB_CM_REQ_RECEIVED: 1247 req->device = req_param->listen_id->device; 1248 req->port = req_param->port; 1249 memcpy(&req->local_gid, &req_param->primary_path->sgid, 1250 sizeof(req->local_gid)); 1251 req->has_gid = true; 1252 req->service_id = req_param->primary_path->service_id; 1253 req->pkey = be16_to_cpu(req_param->primary_path->pkey); 1254 if (req->pkey != req_param->bth_pkey) 1255 pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n" 1256 "RDMA CMA: in the future this may cause the request to be dropped\n", 1257 req_param->bth_pkey, req->pkey); 1258 break; 1259 case IB_CM_SIDR_REQ_RECEIVED: 1260 req->device = sidr_param->listen_id->device; 1261 req->port = sidr_param->port; 1262 req->has_gid = false; 1263 req->service_id = sidr_param->service_id; 1264 req->pkey = sidr_param->pkey; 1265 if (req->pkey != sidr_param->bth_pkey) 1266 pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and SIDR request payload P_Key (0x%x)\n" 1267 "RDMA CMA: in the future this may cause the request to be dropped\n", 1268 sidr_param->bth_pkey, req->pkey); 1269 break; 1270 default: 1271 return -EINVAL; 1272 } 1273 1274 return 0; 1275 } 1276 1277 static bool validate_ipv4_net_dev(struct net_device *net_dev, 1278 const struct sockaddr_in *dst_addr, 1279 const struct sockaddr_in *src_addr) 1280 { 1281 __be32 daddr = dst_addr->sin_addr.s_addr, 1282 saddr = src_addr->sin_addr.s_addr; 1283 struct fib_result res; 1284 struct flowi4 fl4; 1285 int err; 1286 bool ret; 1287 1288 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) || 1289 ipv4_is_lbcast(daddr) || ipv4_is_zeronet(saddr) || 1290 ipv4_is_zeronet(daddr) || ipv4_is_loopback(daddr) || 1291 ipv4_is_loopback(saddr)) 1292 return false; 1293 1294 memset(&fl4, 0, sizeof(fl4)); 1295 fl4.flowi4_iif = net_dev->ifindex; 1296 fl4.daddr = daddr; 1297 fl4.saddr = saddr; 1298 1299 rcu_read_lock(); 1300 err = fib_lookup(dev_net(net_dev), &fl4, &res, 0); 1301 ret = err == 0 && FIB_RES_DEV(res) == net_dev; 1302 rcu_read_unlock(); 1303 1304 return ret; 1305 } 1306 1307 static bool validate_ipv6_net_dev(struct net_device *net_dev, 1308 const struct sockaddr_in6 *dst_addr, 1309 const struct sockaddr_in6 *src_addr) 1310 { 1311 #if IS_ENABLED(CONFIG_IPV6) 1312 const int strict = ipv6_addr_type(&dst_addr->sin6_addr) & 1313 IPV6_ADDR_LINKLOCAL; 1314 struct rt6_info *rt = rt6_lookup(dev_net(net_dev), &dst_addr->sin6_addr, 1315 &src_addr->sin6_addr, net_dev->ifindex, 1316 strict); 1317 bool ret; 1318 1319 if (!rt) 1320 return false; 1321 1322 ret = rt->rt6i_idev->dev == net_dev; 1323 ip6_rt_put(rt); 1324 1325 return ret; 1326 #else 1327 return false; 1328 #endif 1329 } 1330 1331 static bool validate_net_dev(struct net_device *net_dev, 1332 const struct sockaddr *daddr, 1333 const struct sockaddr *saddr) 1334 { 1335 const struct sockaddr_in *daddr4 = (const struct sockaddr_in *)daddr; 1336 const struct sockaddr_in *saddr4 = (const struct sockaddr_in *)saddr; 1337 const struct sockaddr_in6 *daddr6 = (const struct sockaddr_in6 *)daddr; 1338 const struct sockaddr_in6 *saddr6 = (const struct sockaddr_in6 *)saddr; 1339 1340 switch (daddr->sa_family) { 1341 case AF_INET: 1342 return saddr->sa_family == AF_INET && 1343 validate_ipv4_net_dev(net_dev, daddr4, saddr4); 1344 1345 case AF_INET6: 1346 return saddr->sa_family == AF_INET6 && 1347 validate_ipv6_net_dev(net_dev, daddr6, saddr6); 1348 1349 default: 1350 return false; 1351 } 1352 } 1353 1354 static struct net_device *cma_get_net_dev(struct ib_cm_event *ib_event, 1355 const struct cma_req_info *req) 1356 { 1357 struct sockaddr_storage listen_addr_storage, src_addr_storage; 1358 struct sockaddr *listen_addr = (struct sockaddr *)&listen_addr_storage, 1359 *src_addr = (struct sockaddr *)&src_addr_storage; 1360 struct net_device *net_dev; 1361 const union ib_gid *gid = req->has_gid ? &req->local_gid : NULL; 1362 int err; 1363 1364 err = cma_save_ip_info(listen_addr, src_addr, ib_event, 1365 req->service_id); 1366 if (err) 1367 return ERR_PTR(err); 1368 1369 net_dev = ib_get_net_dev_by_params(req->device, req->port, req->pkey, 1370 gid, listen_addr); 1371 if (!net_dev) 1372 return ERR_PTR(-ENODEV); 1373 1374 if (!validate_net_dev(net_dev, listen_addr, src_addr)) { 1375 dev_put(net_dev); 1376 return ERR_PTR(-EHOSTUNREACH); 1377 } 1378 1379 return net_dev; 1380 } 1381 1382 static enum rdma_port_space rdma_ps_from_service_id(__be64 service_id) 1383 { 1384 return (be64_to_cpu(service_id) >> 16) & 0xffff; 1385 } 1386 1387 static bool cma_match_private_data(struct rdma_id_private *id_priv, 1388 const struct cma_hdr *hdr) 1389 { 1390 struct sockaddr *addr = cma_src_addr(id_priv); 1391 __be32 ip4_addr; 1392 struct in6_addr ip6_addr; 1393 1394 if (cma_any_addr(addr) && !id_priv->afonly) 1395 return true; 1396 1397 switch (addr->sa_family) { 1398 case AF_INET: 1399 ip4_addr = ((struct sockaddr_in *)addr)->sin_addr.s_addr; 1400 if (cma_get_ip_ver(hdr) != 4) 1401 return false; 1402 if (!cma_any_addr(addr) && 1403 hdr->dst_addr.ip4.addr != ip4_addr) 1404 return false; 1405 break; 1406 case AF_INET6: 1407 ip6_addr = ((struct sockaddr_in6 *)addr)->sin6_addr; 1408 if (cma_get_ip_ver(hdr) != 6) 1409 return false; 1410 if (!cma_any_addr(addr) && 1411 memcmp(&hdr->dst_addr.ip6, &ip6_addr, sizeof(ip6_addr))) 1412 return false; 1413 break; 1414 case AF_IB: 1415 return true; 1416 default: 1417 return false; 1418 } 1419 1420 return true; 1421 } 1422 1423 static bool cma_protocol_roce_dev_port(struct ib_device *device, int port_num) 1424 { 1425 enum rdma_link_layer ll = rdma_port_get_link_layer(device, port_num); 1426 enum rdma_transport_type transport = 1427 rdma_node_get_transport(device->node_type); 1428 1429 return ll == IB_LINK_LAYER_ETHERNET && transport == RDMA_TRANSPORT_IB; 1430 } 1431 1432 static bool cma_protocol_roce(const struct rdma_cm_id *id) 1433 { 1434 struct ib_device *device = id->device; 1435 const int port_num = id->port_num ?: rdma_start_port(device); 1436 1437 return cma_protocol_roce_dev_port(device, port_num); 1438 } 1439 1440 static bool cma_match_net_dev(const struct rdma_cm_id *id, 1441 const struct net_device *net_dev, 1442 u8 port_num) 1443 { 1444 const struct rdma_addr *addr = &id->route.addr; 1445 1446 if (!net_dev) 1447 /* This request is an AF_IB request or a RoCE request */ 1448 return (!id->port_num || id->port_num == port_num) && 1449 (addr->src_addr.ss_family == AF_IB || 1450 cma_protocol_roce_dev_port(id->device, port_num)); 1451 1452 return !addr->dev_addr.bound_dev_if || 1453 (net_eq(dev_net(net_dev), addr->dev_addr.net) && 1454 addr->dev_addr.bound_dev_if == net_dev->ifindex); 1455 } 1456 1457 static struct rdma_id_private *cma_find_listener( 1458 const struct rdma_bind_list *bind_list, 1459 const struct ib_cm_id *cm_id, 1460 const struct ib_cm_event *ib_event, 1461 const struct cma_req_info *req, 1462 const struct net_device *net_dev) 1463 { 1464 struct rdma_id_private *id_priv, *id_priv_dev; 1465 1466 if (!bind_list) 1467 return ERR_PTR(-EINVAL); 1468 1469 hlist_for_each_entry(id_priv, &bind_list->owners, node) { 1470 if (cma_match_private_data(id_priv, ib_event->private_data)) { 1471 if (id_priv->id.device == cm_id->device && 1472 cma_match_net_dev(&id_priv->id, net_dev, req->port)) 1473 return id_priv; 1474 list_for_each_entry(id_priv_dev, 1475 &id_priv->listen_list, 1476 listen_list) { 1477 if (id_priv_dev->id.device == cm_id->device && 1478 cma_match_net_dev(&id_priv_dev->id, net_dev, req->port)) 1479 return id_priv_dev; 1480 } 1481 } 1482 } 1483 1484 return ERR_PTR(-EINVAL); 1485 } 1486 1487 static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id, 1488 struct ib_cm_event *ib_event, 1489 struct net_device **net_dev) 1490 { 1491 struct cma_req_info req; 1492 struct rdma_bind_list *bind_list; 1493 struct rdma_id_private *id_priv; 1494 int err; 1495 1496 err = cma_save_req_info(ib_event, &req); 1497 if (err) 1498 return ERR_PTR(err); 1499 1500 *net_dev = cma_get_net_dev(ib_event, &req); 1501 if (IS_ERR(*net_dev)) { 1502 if (PTR_ERR(*net_dev) == -EAFNOSUPPORT) { 1503 /* Assuming the protocol is AF_IB */ 1504 *net_dev = NULL; 1505 } else if (cma_protocol_roce_dev_port(req.device, req.port)) { 1506 /* TODO find the net dev matching the request parameters 1507 * through the RoCE GID table */ 1508 *net_dev = NULL; 1509 } else { 1510 return ERR_CAST(*net_dev); 1511 } 1512 } 1513 1514 bind_list = cma_ps_find(*net_dev ? dev_net(*net_dev) : &init_net, 1515 rdma_ps_from_service_id(req.service_id), 1516 cma_port_from_service_id(req.service_id)); 1517 id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev); 1518 if (IS_ERR(id_priv) && *net_dev) { 1519 dev_put(*net_dev); 1520 *net_dev = NULL; 1521 } 1522 1523 return id_priv; 1524 } 1525 1526 static inline int cma_user_data_offset(struct rdma_id_private *id_priv) 1527 { 1528 return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr); 1529 } 1530 1531 static void cma_cancel_route(struct rdma_id_private *id_priv) 1532 { 1533 if (rdma_cap_ib_sa(id_priv->id.device, id_priv->id.port_num)) { 1534 if (id_priv->query) 1535 ib_sa_cancel_query(id_priv->query_id, id_priv->query); 1536 } 1537 } 1538 1539 static void cma_cancel_listens(struct rdma_id_private *id_priv) 1540 { 1541 struct rdma_id_private *dev_id_priv; 1542 1543 /* 1544 * Remove from listen_any_list to prevent added devices from spawning 1545 * additional listen requests. 1546 */ 1547 mutex_lock(&lock); 1548 list_del(&id_priv->list); 1549 1550 while (!list_empty(&id_priv->listen_list)) { 1551 dev_id_priv = list_entry(id_priv->listen_list.next, 1552 struct rdma_id_private, listen_list); 1553 /* sync with device removal to avoid duplicate destruction */ 1554 list_del_init(&dev_id_priv->list); 1555 list_del(&dev_id_priv->listen_list); 1556 mutex_unlock(&lock); 1557 1558 rdma_destroy_id(&dev_id_priv->id); 1559 mutex_lock(&lock); 1560 } 1561 mutex_unlock(&lock); 1562 } 1563 1564 static void cma_cancel_operation(struct rdma_id_private *id_priv, 1565 enum rdma_cm_state state) 1566 { 1567 switch (state) { 1568 case RDMA_CM_ADDR_QUERY: 1569 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr); 1570 break; 1571 case RDMA_CM_ROUTE_QUERY: 1572 cma_cancel_route(id_priv); 1573 break; 1574 case RDMA_CM_LISTEN: 1575 if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev) 1576 cma_cancel_listens(id_priv); 1577 break; 1578 default: 1579 break; 1580 } 1581 } 1582 1583 static void cma_release_port(struct rdma_id_private *id_priv) 1584 { 1585 struct rdma_bind_list *bind_list = id_priv->bind_list; 1586 struct net *net = id_priv->id.route.addr.dev_addr.net; 1587 1588 if (!bind_list) 1589 return; 1590 1591 mutex_lock(&lock); 1592 hlist_del(&id_priv->node); 1593 if (hlist_empty(&bind_list->owners)) { 1594 cma_ps_remove(net, bind_list->ps, bind_list->port); 1595 kfree(bind_list); 1596 } 1597 mutex_unlock(&lock); 1598 } 1599 1600 static void cma_leave_mc_groups(struct rdma_id_private *id_priv) 1601 { 1602 struct cma_multicast *mc; 1603 1604 while (!list_empty(&id_priv->mc_list)) { 1605 mc = container_of(id_priv->mc_list.next, 1606 struct cma_multicast, list); 1607 list_del(&mc->list); 1608 if (rdma_cap_ib_mcast(id_priv->cma_dev->device, 1609 id_priv->id.port_num)) { 1610 ib_sa_free_multicast(mc->multicast.ib); 1611 kfree(mc); 1612 } else { 1613 if (mc->igmp_joined) { 1614 struct rdma_dev_addr *dev_addr = 1615 &id_priv->id.route.addr.dev_addr; 1616 struct net_device *ndev = NULL; 1617 1618 if (dev_addr->bound_dev_if) 1619 ndev = dev_get_by_index(&init_net, 1620 dev_addr->bound_dev_if); 1621 if (ndev) { 1622 cma_igmp_send(ndev, 1623 &mc->multicast.ib->rec.mgid, 1624 false); 1625 dev_put(ndev); 1626 } 1627 } 1628 kref_put(&mc->mcref, release_mc); 1629 } 1630 } 1631 } 1632 1633 void rdma_destroy_id(struct rdma_cm_id *id) 1634 { 1635 struct rdma_id_private *id_priv; 1636 enum rdma_cm_state state; 1637 1638 id_priv = container_of(id, struct rdma_id_private, id); 1639 state = cma_exch(id_priv, RDMA_CM_DESTROYING); 1640 cma_cancel_operation(id_priv, state); 1641 1642 /* 1643 * Wait for any active callback to finish. New callbacks will find 1644 * the id_priv state set to destroying and abort. 1645 */ 1646 mutex_lock(&id_priv->handler_mutex); 1647 mutex_unlock(&id_priv->handler_mutex); 1648 1649 if (id_priv->cma_dev) { 1650 if (rdma_cap_ib_cm(id_priv->id.device, 1)) { 1651 if (id_priv->cm_id.ib) 1652 ib_destroy_cm_id(id_priv->cm_id.ib); 1653 } else if (rdma_cap_iw_cm(id_priv->id.device, 1)) { 1654 if (id_priv->cm_id.iw) 1655 iw_destroy_cm_id(id_priv->cm_id.iw); 1656 } 1657 cma_leave_mc_groups(id_priv); 1658 cma_release_dev(id_priv); 1659 } 1660 1661 cma_release_port(id_priv); 1662 cma_deref_id(id_priv); 1663 wait_for_completion(&id_priv->comp); 1664 1665 if (id_priv->internal_id) 1666 cma_deref_id(id_priv->id.context); 1667 1668 kfree(id_priv->id.route.path_rec); 1669 put_net(id_priv->id.route.addr.dev_addr.net); 1670 kfree(id_priv); 1671 } 1672 EXPORT_SYMBOL(rdma_destroy_id); 1673 1674 static int cma_rep_recv(struct rdma_id_private *id_priv) 1675 { 1676 int ret; 1677 1678 ret = cma_modify_qp_rtr(id_priv, NULL); 1679 if (ret) 1680 goto reject; 1681 1682 ret = cma_modify_qp_rts(id_priv, NULL); 1683 if (ret) 1684 goto reject; 1685 1686 ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0); 1687 if (ret) 1688 goto reject; 1689 1690 return 0; 1691 reject: 1692 cma_modify_qp_err(id_priv); 1693 ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED, 1694 NULL, 0, NULL, 0); 1695 return ret; 1696 } 1697 1698 static void cma_set_rep_event_data(struct rdma_cm_event *event, 1699 struct ib_cm_rep_event_param *rep_data, 1700 void *private_data) 1701 { 1702 event->param.conn.private_data = private_data; 1703 event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE; 1704 event->param.conn.responder_resources = rep_data->responder_resources; 1705 event->param.conn.initiator_depth = rep_data->initiator_depth; 1706 event->param.conn.flow_control = rep_data->flow_control; 1707 event->param.conn.rnr_retry_count = rep_data->rnr_retry_count; 1708 event->param.conn.srq = rep_data->srq; 1709 event->param.conn.qp_num = rep_data->remote_qpn; 1710 } 1711 1712 static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) 1713 { 1714 struct rdma_id_private *id_priv = cm_id->context; 1715 struct rdma_cm_event event; 1716 int ret = 0; 1717 1718 mutex_lock(&id_priv->handler_mutex); 1719 if ((ib_event->event != IB_CM_TIMEWAIT_EXIT && 1720 id_priv->state != RDMA_CM_CONNECT) || 1721 (ib_event->event == IB_CM_TIMEWAIT_EXIT && 1722 id_priv->state != RDMA_CM_DISCONNECT)) 1723 goto out; 1724 1725 memset(&event, 0, sizeof event); 1726 switch (ib_event->event) { 1727 case IB_CM_REQ_ERROR: 1728 case IB_CM_REP_ERROR: 1729 event.event = RDMA_CM_EVENT_UNREACHABLE; 1730 event.status = -ETIMEDOUT; 1731 break; 1732 case IB_CM_REP_RECEIVED: 1733 if (id_priv->id.qp) { 1734 event.status = cma_rep_recv(id_priv); 1735 event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR : 1736 RDMA_CM_EVENT_ESTABLISHED; 1737 } else { 1738 event.event = RDMA_CM_EVENT_CONNECT_RESPONSE; 1739 } 1740 cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd, 1741 ib_event->private_data); 1742 break; 1743 case IB_CM_RTU_RECEIVED: 1744 case IB_CM_USER_ESTABLISHED: 1745 event.event = RDMA_CM_EVENT_ESTABLISHED; 1746 break; 1747 case IB_CM_DREQ_ERROR: 1748 event.status = -ETIMEDOUT; /* fall through */ 1749 case IB_CM_DREQ_RECEIVED: 1750 case IB_CM_DREP_RECEIVED: 1751 if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT, 1752 RDMA_CM_DISCONNECT)) 1753 goto out; 1754 event.event = RDMA_CM_EVENT_DISCONNECTED; 1755 break; 1756 case IB_CM_TIMEWAIT_EXIT: 1757 event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT; 1758 break; 1759 case IB_CM_MRA_RECEIVED: 1760 /* ignore event */ 1761 goto out; 1762 case IB_CM_REJ_RECEIVED: 1763 cma_modify_qp_err(id_priv); 1764 event.status = ib_event->param.rej_rcvd.reason; 1765 event.event = RDMA_CM_EVENT_REJECTED; 1766 event.param.conn.private_data = ib_event->private_data; 1767 event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE; 1768 break; 1769 default: 1770 pr_err("RDMA CMA: unexpected IB CM event: %d\n", 1771 ib_event->event); 1772 goto out; 1773 } 1774 1775 ret = id_priv->id.event_handler(&id_priv->id, &event); 1776 if (ret) { 1777 /* Destroy the CM ID by returning a non-zero value. */ 1778 id_priv->cm_id.ib = NULL; 1779 cma_exch(id_priv, RDMA_CM_DESTROYING); 1780 mutex_unlock(&id_priv->handler_mutex); 1781 rdma_destroy_id(&id_priv->id); 1782 return ret; 1783 } 1784 out: 1785 mutex_unlock(&id_priv->handler_mutex); 1786 return ret; 1787 } 1788 1789 static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id, 1790 struct ib_cm_event *ib_event, 1791 struct net_device *net_dev) 1792 { 1793 struct rdma_id_private *id_priv; 1794 struct rdma_cm_id *id; 1795 struct rdma_route *rt; 1796 const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family; 1797 const __be64 service_id = 1798 ib_event->param.req_rcvd.primary_path->service_id; 1799 int ret; 1800 1801 id = rdma_create_id(listen_id->route.addr.dev_addr.net, 1802 listen_id->event_handler, listen_id->context, 1803 listen_id->ps, ib_event->param.req_rcvd.qp_type); 1804 if (IS_ERR(id)) 1805 return NULL; 1806 1807 id_priv = container_of(id, struct rdma_id_private, id); 1808 if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr, 1809 (struct sockaddr *)&id->route.addr.dst_addr, 1810 listen_id, ib_event, ss_family, service_id)) 1811 goto err; 1812 1813 rt = &id->route; 1814 rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1; 1815 rt->path_rec = kmalloc(sizeof *rt->path_rec * rt->num_paths, 1816 GFP_KERNEL); 1817 if (!rt->path_rec) 1818 goto err; 1819 1820 rt->path_rec[0] = *ib_event->param.req_rcvd.primary_path; 1821 if (rt->num_paths == 2) 1822 rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path; 1823 1824 if (net_dev) { 1825 ret = rdma_copy_addr(&rt->addr.dev_addr, net_dev, NULL); 1826 if (ret) 1827 goto err; 1828 } else { 1829 if (!cma_protocol_roce(listen_id) && 1830 cma_any_addr(cma_src_addr(id_priv))) { 1831 rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND; 1832 rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid); 1833 ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey)); 1834 } else if (!cma_any_addr(cma_src_addr(id_priv))) { 1835 ret = cma_translate_addr(cma_src_addr(id_priv), &rt->addr.dev_addr); 1836 if (ret) 1837 goto err; 1838 } 1839 } 1840 rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); 1841 1842 id_priv->state = RDMA_CM_CONNECT; 1843 return id_priv; 1844 1845 err: 1846 rdma_destroy_id(id); 1847 return NULL; 1848 } 1849 1850 static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id, 1851 struct ib_cm_event *ib_event, 1852 struct net_device *net_dev) 1853 { 1854 struct rdma_id_private *id_priv; 1855 struct rdma_cm_id *id; 1856 const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family; 1857 struct net *net = listen_id->route.addr.dev_addr.net; 1858 int ret; 1859 1860 id = rdma_create_id(net, listen_id->event_handler, listen_id->context, 1861 listen_id->ps, IB_QPT_UD); 1862 if (IS_ERR(id)) 1863 return NULL; 1864 1865 id_priv = container_of(id, struct rdma_id_private, id); 1866 if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr, 1867 (struct sockaddr *)&id->route.addr.dst_addr, 1868 listen_id, ib_event, ss_family, 1869 ib_event->param.sidr_req_rcvd.service_id)) 1870 goto err; 1871 1872 if (net_dev) { 1873 ret = rdma_copy_addr(&id->route.addr.dev_addr, net_dev, NULL); 1874 if (ret) 1875 goto err; 1876 } else { 1877 if (!cma_any_addr(cma_src_addr(id_priv))) { 1878 ret = cma_translate_addr(cma_src_addr(id_priv), 1879 &id->route.addr.dev_addr); 1880 if (ret) 1881 goto err; 1882 } 1883 } 1884 1885 id_priv->state = RDMA_CM_CONNECT; 1886 return id_priv; 1887 err: 1888 rdma_destroy_id(id); 1889 return NULL; 1890 } 1891 1892 static void cma_set_req_event_data(struct rdma_cm_event *event, 1893 struct ib_cm_req_event_param *req_data, 1894 void *private_data, int offset) 1895 { 1896 event->param.conn.private_data = private_data + offset; 1897 event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset; 1898 event->param.conn.responder_resources = req_data->responder_resources; 1899 event->param.conn.initiator_depth = req_data->initiator_depth; 1900 event->param.conn.flow_control = req_data->flow_control; 1901 event->param.conn.retry_count = req_data->retry_count; 1902 event->param.conn.rnr_retry_count = req_data->rnr_retry_count; 1903 event->param.conn.srq = req_data->srq; 1904 event->param.conn.qp_num = req_data->remote_qpn; 1905 } 1906 1907 static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_event) 1908 { 1909 return (((ib_event->event == IB_CM_REQ_RECEIVED) && 1910 (ib_event->param.req_rcvd.qp_type == id->qp_type)) || 1911 ((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) && 1912 (id->qp_type == IB_QPT_UD)) || 1913 (!id->qp_type)); 1914 } 1915 1916 static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) 1917 { 1918 struct rdma_id_private *listen_id, *conn_id = NULL; 1919 struct rdma_cm_event event; 1920 struct net_device *net_dev; 1921 int offset, ret; 1922 1923 listen_id = cma_id_from_event(cm_id, ib_event, &net_dev); 1924 if (IS_ERR(listen_id)) 1925 return PTR_ERR(listen_id); 1926 1927 if (!cma_check_req_qp_type(&listen_id->id, ib_event)) { 1928 ret = -EINVAL; 1929 goto net_dev_put; 1930 } 1931 1932 mutex_lock(&listen_id->handler_mutex); 1933 if (listen_id->state != RDMA_CM_LISTEN) { 1934 ret = -ECONNABORTED; 1935 goto err1; 1936 } 1937 1938 memset(&event, 0, sizeof event); 1939 offset = cma_user_data_offset(listen_id); 1940 event.event = RDMA_CM_EVENT_CONNECT_REQUEST; 1941 if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) { 1942 conn_id = cma_new_udp_id(&listen_id->id, ib_event, net_dev); 1943 event.param.ud.private_data = ib_event->private_data + offset; 1944 event.param.ud.private_data_len = 1945 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset; 1946 } else { 1947 conn_id = cma_new_conn_id(&listen_id->id, ib_event, net_dev); 1948 cma_set_req_event_data(&event, &ib_event->param.req_rcvd, 1949 ib_event->private_data, offset); 1950 } 1951 if (!conn_id) { 1952 ret = -ENOMEM; 1953 goto err1; 1954 } 1955 1956 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); 1957 ret = cma_acquire_dev(conn_id, listen_id); 1958 if (ret) 1959 goto err2; 1960 1961 conn_id->cm_id.ib = cm_id; 1962 cm_id->context = conn_id; 1963 cm_id->cm_handler = cma_ib_handler; 1964 1965 /* 1966 * Protect against the user destroying conn_id from another thread 1967 * until we're done accessing it. 1968 */ 1969 atomic_inc(&conn_id->refcount); 1970 ret = conn_id->id.event_handler(&conn_id->id, &event); 1971 if (ret) 1972 goto err3; 1973 /* 1974 * Acquire mutex to prevent user executing rdma_destroy_id() 1975 * while we're accessing the cm_id. 1976 */ 1977 mutex_lock(&lock); 1978 if (cma_comp(conn_id, RDMA_CM_CONNECT) && 1979 (conn_id->id.qp_type != IB_QPT_UD)) 1980 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); 1981 mutex_unlock(&lock); 1982 mutex_unlock(&conn_id->handler_mutex); 1983 mutex_unlock(&listen_id->handler_mutex); 1984 cma_deref_id(conn_id); 1985 if (net_dev) 1986 dev_put(net_dev); 1987 return 0; 1988 1989 err3: 1990 cma_deref_id(conn_id); 1991 /* Destroy the CM ID by returning a non-zero value. */ 1992 conn_id->cm_id.ib = NULL; 1993 err2: 1994 cma_exch(conn_id, RDMA_CM_DESTROYING); 1995 mutex_unlock(&conn_id->handler_mutex); 1996 err1: 1997 mutex_unlock(&listen_id->handler_mutex); 1998 if (conn_id) 1999 rdma_destroy_id(&conn_id->id); 2000 2001 net_dev_put: 2002 if (net_dev) 2003 dev_put(net_dev); 2004 2005 return ret; 2006 } 2007 2008 __be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr) 2009 { 2010 if (addr->sa_family == AF_IB) 2011 return ((struct sockaddr_ib *) addr)->sib_sid; 2012 2013 return cpu_to_be64(((u64)id->ps << 16) + be16_to_cpu(cma_port(addr))); 2014 } 2015 EXPORT_SYMBOL(rdma_get_service_id); 2016 2017 static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) 2018 { 2019 struct rdma_id_private *id_priv = iw_id->context; 2020 struct rdma_cm_event event; 2021 int ret = 0; 2022 struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; 2023 struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; 2024 2025 mutex_lock(&id_priv->handler_mutex); 2026 if (id_priv->state != RDMA_CM_CONNECT) 2027 goto out; 2028 2029 memset(&event, 0, sizeof event); 2030 switch (iw_event->event) { 2031 case IW_CM_EVENT_CLOSE: 2032 event.event = RDMA_CM_EVENT_DISCONNECTED; 2033 break; 2034 case IW_CM_EVENT_CONNECT_REPLY: 2035 memcpy(cma_src_addr(id_priv), laddr, 2036 rdma_addr_size(laddr)); 2037 memcpy(cma_dst_addr(id_priv), raddr, 2038 rdma_addr_size(raddr)); 2039 switch (iw_event->status) { 2040 case 0: 2041 event.event = RDMA_CM_EVENT_ESTABLISHED; 2042 event.param.conn.initiator_depth = iw_event->ird; 2043 event.param.conn.responder_resources = iw_event->ord; 2044 break; 2045 case -ECONNRESET: 2046 case -ECONNREFUSED: 2047 event.event = RDMA_CM_EVENT_REJECTED; 2048 break; 2049 case -ETIMEDOUT: 2050 event.event = RDMA_CM_EVENT_UNREACHABLE; 2051 break; 2052 default: 2053 event.event = RDMA_CM_EVENT_CONNECT_ERROR; 2054 break; 2055 } 2056 break; 2057 case IW_CM_EVENT_ESTABLISHED: 2058 event.event = RDMA_CM_EVENT_ESTABLISHED; 2059 event.param.conn.initiator_depth = iw_event->ird; 2060 event.param.conn.responder_resources = iw_event->ord; 2061 break; 2062 default: 2063 BUG_ON(1); 2064 } 2065 2066 event.status = iw_event->status; 2067 event.param.conn.private_data = iw_event->private_data; 2068 event.param.conn.private_data_len = iw_event->private_data_len; 2069 ret = id_priv->id.event_handler(&id_priv->id, &event); 2070 if (ret) { 2071 /* Destroy the CM ID by returning a non-zero value. */ 2072 id_priv->cm_id.iw = NULL; 2073 cma_exch(id_priv, RDMA_CM_DESTROYING); 2074 mutex_unlock(&id_priv->handler_mutex); 2075 rdma_destroy_id(&id_priv->id); 2076 return ret; 2077 } 2078 2079 out: 2080 mutex_unlock(&id_priv->handler_mutex); 2081 return ret; 2082 } 2083 2084 static int iw_conn_req_handler(struct iw_cm_id *cm_id, 2085 struct iw_cm_event *iw_event) 2086 { 2087 struct rdma_cm_id *new_cm_id; 2088 struct rdma_id_private *listen_id, *conn_id; 2089 struct rdma_cm_event event; 2090 int ret = -ECONNABORTED; 2091 struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; 2092 struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; 2093 2094 listen_id = cm_id->context; 2095 2096 mutex_lock(&listen_id->handler_mutex); 2097 if (listen_id->state != RDMA_CM_LISTEN) 2098 goto out; 2099 2100 /* Create a new RDMA id for the new IW CM ID */ 2101 new_cm_id = rdma_create_id(listen_id->id.route.addr.dev_addr.net, 2102 listen_id->id.event_handler, 2103 listen_id->id.context, 2104 RDMA_PS_TCP, IB_QPT_RC); 2105 if (IS_ERR(new_cm_id)) { 2106 ret = -ENOMEM; 2107 goto out; 2108 } 2109 conn_id = container_of(new_cm_id, struct rdma_id_private, id); 2110 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); 2111 conn_id->state = RDMA_CM_CONNECT; 2112 2113 ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr, NULL); 2114 if (ret) { 2115 mutex_unlock(&conn_id->handler_mutex); 2116 rdma_destroy_id(new_cm_id); 2117 goto out; 2118 } 2119 2120 ret = cma_acquire_dev(conn_id, listen_id); 2121 if (ret) { 2122 mutex_unlock(&conn_id->handler_mutex); 2123 rdma_destroy_id(new_cm_id); 2124 goto out; 2125 } 2126 2127 conn_id->cm_id.iw = cm_id; 2128 cm_id->context = conn_id; 2129 cm_id->cm_handler = cma_iw_handler; 2130 2131 memcpy(cma_src_addr(conn_id), laddr, rdma_addr_size(laddr)); 2132 memcpy(cma_dst_addr(conn_id), raddr, rdma_addr_size(raddr)); 2133 2134 memset(&event, 0, sizeof event); 2135 event.event = RDMA_CM_EVENT_CONNECT_REQUEST; 2136 event.param.conn.private_data = iw_event->private_data; 2137 event.param.conn.private_data_len = iw_event->private_data_len; 2138 event.param.conn.initiator_depth = iw_event->ird; 2139 event.param.conn.responder_resources = iw_event->ord; 2140 2141 /* 2142 * Protect against the user destroying conn_id from another thread 2143 * until we're done accessing it. 2144 */ 2145 atomic_inc(&conn_id->refcount); 2146 ret = conn_id->id.event_handler(&conn_id->id, &event); 2147 if (ret) { 2148 /* User wants to destroy the CM ID */ 2149 conn_id->cm_id.iw = NULL; 2150 cma_exch(conn_id, RDMA_CM_DESTROYING); 2151 mutex_unlock(&conn_id->handler_mutex); 2152 cma_deref_id(conn_id); 2153 rdma_destroy_id(&conn_id->id); 2154 goto out; 2155 } 2156 2157 mutex_unlock(&conn_id->handler_mutex); 2158 cma_deref_id(conn_id); 2159 2160 out: 2161 mutex_unlock(&listen_id->handler_mutex); 2162 return ret; 2163 } 2164 2165 static int cma_ib_listen(struct rdma_id_private *id_priv) 2166 { 2167 struct sockaddr *addr; 2168 struct ib_cm_id *id; 2169 __be64 svc_id; 2170 2171 addr = cma_src_addr(id_priv); 2172 svc_id = rdma_get_service_id(&id_priv->id, addr); 2173 id = ib_cm_insert_listen(id_priv->id.device, cma_req_handler, svc_id); 2174 if (IS_ERR(id)) 2175 return PTR_ERR(id); 2176 id_priv->cm_id.ib = id; 2177 2178 return 0; 2179 } 2180 2181 static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog) 2182 { 2183 int ret; 2184 struct iw_cm_id *id; 2185 2186 id = iw_create_cm_id(id_priv->id.device, 2187 iw_conn_req_handler, 2188 id_priv); 2189 if (IS_ERR(id)) 2190 return PTR_ERR(id); 2191 2192 id->tos = id_priv->tos; 2193 id_priv->cm_id.iw = id; 2194 2195 memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv), 2196 rdma_addr_size(cma_src_addr(id_priv))); 2197 2198 ret = iw_cm_listen(id_priv->cm_id.iw, backlog); 2199 2200 if (ret) { 2201 iw_destroy_cm_id(id_priv->cm_id.iw); 2202 id_priv->cm_id.iw = NULL; 2203 } 2204 2205 return ret; 2206 } 2207 2208 static int cma_listen_handler(struct rdma_cm_id *id, 2209 struct rdma_cm_event *event) 2210 { 2211 struct rdma_id_private *id_priv = id->context; 2212 2213 id->context = id_priv->id.context; 2214 id->event_handler = id_priv->id.event_handler; 2215 return id_priv->id.event_handler(id, event); 2216 } 2217 2218 static void cma_listen_on_dev(struct rdma_id_private *id_priv, 2219 struct cma_device *cma_dev) 2220 { 2221 struct rdma_id_private *dev_id_priv; 2222 struct rdma_cm_id *id; 2223 struct net *net = id_priv->id.route.addr.dev_addr.net; 2224 int ret; 2225 2226 if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1)) 2227 return; 2228 2229 id = rdma_create_id(net, cma_listen_handler, id_priv, id_priv->id.ps, 2230 id_priv->id.qp_type); 2231 if (IS_ERR(id)) 2232 return; 2233 2234 dev_id_priv = container_of(id, struct rdma_id_private, id); 2235 2236 dev_id_priv->state = RDMA_CM_ADDR_BOUND; 2237 memcpy(cma_src_addr(dev_id_priv), cma_src_addr(id_priv), 2238 rdma_addr_size(cma_src_addr(id_priv))); 2239 2240 _cma_attach_to_dev(dev_id_priv, cma_dev); 2241 list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list); 2242 atomic_inc(&id_priv->refcount); 2243 dev_id_priv->internal_id = 1; 2244 dev_id_priv->afonly = id_priv->afonly; 2245 2246 ret = rdma_listen(id, id_priv->backlog); 2247 if (ret) 2248 pr_warn("RDMA CMA: cma_listen_on_dev, error %d, listening on device %s\n", 2249 ret, cma_dev->device->name); 2250 } 2251 2252 static void cma_listen_on_all(struct rdma_id_private *id_priv) 2253 { 2254 struct cma_device *cma_dev; 2255 2256 mutex_lock(&lock); 2257 list_add_tail(&id_priv->list, &listen_any_list); 2258 list_for_each_entry(cma_dev, &dev_list, list) 2259 cma_listen_on_dev(id_priv, cma_dev); 2260 mutex_unlock(&lock); 2261 } 2262 2263 void rdma_set_service_type(struct rdma_cm_id *id, int tos) 2264 { 2265 struct rdma_id_private *id_priv; 2266 2267 id_priv = container_of(id, struct rdma_id_private, id); 2268 id_priv->tos = (u8) tos; 2269 } 2270 EXPORT_SYMBOL(rdma_set_service_type); 2271 2272 static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec, 2273 void *context) 2274 { 2275 struct cma_work *work = context; 2276 struct rdma_route *route; 2277 2278 route = &work->id->id.route; 2279 2280 if (!status) { 2281 route->num_paths = 1; 2282 *route->path_rec = *path_rec; 2283 } else { 2284 work->old_state = RDMA_CM_ROUTE_QUERY; 2285 work->new_state = RDMA_CM_ADDR_RESOLVED; 2286 work->event.event = RDMA_CM_EVENT_ROUTE_ERROR; 2287 work->event.status = status; 2288 } 2289 2290 queue_work(cma_wq, &work->work); 2291 } 2292 2293 static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms, 2294 struct cma_work *work) 2295 { 2296 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 2297 struct ib_sa_path_rec path_rec; 2298 ib_sa_comp_mask comp_mask; 2299 struct sockaddr_in6 *sin6; 2300 struct sockaddr_ib *sib; 2301 2302 memset(&path_rec, 0, sizeof path_rec); 2303 rdma_addr_get_sgid(dev_addr, &path_rec.sgid); 2304 rdma_addr_get_dgid(dev_addr, &path_rec.dgid); 2305 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); 2306 path_rec.numb_path = 1; 2307 path_rec.reversible = 1; 2308 path_rec.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); 2309 2310 comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | 2311 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH | 2312 IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID; 2313 2314 switch (cma_family(id_priv)) { 2315 case AF_INET: 2316 path_rec.qos_class = cpu_to_be16((u16) id_priv->tos); 2317 comp_mask |= IB_SA_PATH_REC_QOS_CLASS; 2318 break; 2319 case AF_INET6: 2320 sin6 = (struct sockaddr_in6 *) cma_src_addr(id_priv); 2321 path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20); 2322 comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS; 2323 break; 2324 case AF_IB: 2325 sib = (struct sockaddr_ib *) cma_src_addr(id_priv); 2326 path_rec.traffic_class = (u8) (be32_to_cpu(sib->sib_flowinfo) >> 20); 2327 comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS; 2328 break; 2329 } 2330 2331 id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device, 2332 id_priv->id.port_num, &path_rec, 2333 comp_mask, timeout_ms, 2334 GFP_KERNEL, cma_query_handler, 2335 work, &id_priv->query); 2336 2337 return (id_priv->query_id < 0) ? id_priv->query_id : 0; 2338 } 2339 2340 static void cma_work_handler(struct work_struct *_work) 2341 { 2342 struct cma_work *work = container_of(_work, struct cma_work, work); 2343 struct rdma_id_private *id_priv = work->id; 2344 int destroy = 0; 2345 2346 mutex_lock(&id_priv->handler_mutex); 2347 if (!cma_comp_exch(id_priv, work->old_state, work->new_state)) 2348 goto out; 2349 2350 if (id_priv->id.event_handler(&id_priv->id, &work->event)) { 2351 cma_exch(id_priv, RDMA_CM_DESTROYING); 2352 destroy = 1; 2353 } 2354 out: 2355 mutex_unlock(&id_priv->handler_mutex); 2356 cma_deref_id(id_priv); 2357 if (destroy) 2358 rdma_destroy_id(&id_priv->id); 2359 kfree(work); 2360 } 2361 2362 static void cma_ndev_work_handler(struct work_struct *_work) 2363 { 2364 struct cma_ndev_work *work = container_of(_work, struct cma_ndev_work, work); 2365 struct rdma_id_private *id_priv = work->id; 2366 int destroy = 0; 2367 2368 mutex_lock(&id_priv->handler_mutex); 2369 if (id_priv->state == RDMA_CM_DESTROYING || 2370 id_priv->state == RDMA_CM_DEVICE_REMOVAL) 2371 goto out; 2372 2373 if (id_priv->id.event_handler(&id_priv->id, &work->event)) { 2374 cma_exch(id_priv, RDMA_CM_DESTROYING); 2375 destroy = 1; 2376 } 2377 2378 out: 2379 mutex_unlock(&id_priv->handler_mutex); 2380 cma_deref_id(id_priv); 2381 if (destroy) 2382 rdma_destroy_id(&id_priv->id); 2383 kfree(work); 2384 } 2385 2386 static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms) 2387 { 2388 struct rdma_route *route = &id_priv->id.route; 2389 struct cma_work *work; 2390 int ret; 2391 2392 work = kzalloc(sizeof *work, GFP_KERNEL); 2393 if (!work) 2394 return -ENOMEM; 2395 2396 work->id = id_priv; 2397 INIT_WORK(&work->work, cma_work_handler); 2398 work->old_state = RDMA_CM_ROUTE_QUERY; 2399 work->new_state = RDMA_CM_ROUTE_RESOLVED; 2400 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 2401 2402 route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL); 2403 if (!route->path_rec) { 2404 ret = -ENOMEM; 2405 goto err1; 2406 } 2407 2408 ret = cma_query_ib_route(id_priv, timeout_ms, work); 2409 if (ret) 2410 goto err2; 2411 2412 return 0; 2413 err2: 2414 kfree(route->path_rec); 2415 route->path_rec = NULL; 2416 err1: 2417 kfree(work); 2418 return ret; 2419 } 2420 2421 int rdma_set_ib_paths(struct rdma_cm_id *id, 2422 struct ib_sa_path_rec *path_rec, int num_paths) 2423 { 2424 struct rdma_id_private *id_priv; 2425 int ret; 2426 2427 id_priv = container_of(id, struct rdma_id_private, id); 2428 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, 2429 RDMA_CM_ROUTE_RESOLVED)) 2430 return -EINVAL; 2431 2432 id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths, 2433 GFP_KERNEL); 2434 if (!id->route.path_rec) { 2435 ret = -ENOMEM; 2436 goto err; 2437 } 2438 2439 id->route.num_paths = num_paths; 2440 return 0; 2441 err: 2442 cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED); 2443 return ret; 2444 } 2445 EXPORT_SYMBOL(rdma_set_ib_paths); 2446 2447 static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms) 2448 { 2449 struct cma_work *work; 2450 2451 work = kzalloc(sizeof *work, GFP_KERNEL); 2452 if (!work) 2453 return -ENOMEM; 2454 2455 work->id = id_priv; 2456 INIT_WORK(&work->work, cma_work_handler); 2457 work->old_state = RDMA_CM_ROUTE_QUERY; 2458 work->new_state = RDMA_CM_ROUTE_RESOLVED; 2459 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 2460 queue_work(cma_wq, &work->work); 2461 return 0; 2462 } 2463 2464 static int iboe_tos_to_sl(struct net_device *ndev, int tos) 2465 { 2466 int prio; 2467 struct net_device *dev; 2468 2469 prio = rt_tos2priority(tos); 2470 dev = ndev->priv_flags & IFF_802_1Q_VLAN ? 2471 vlan_dev_real_dev(ndev) : ndev; 2472 2473 if (dev->num_tc) 2474 return netdev_get_prio_tc_map(dev, prio); 2475 2476 #if IS_ENABLED(CONFIG_VLAN_8021Q) 2477 if (ndev->priv_flags & IFF_802_1Q_VLAN) 2478 return (vlan_dev_get_egress_qos_mask(ndev, prio) & 2479 VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; 2480 #endif 2481 return 0; 2482 } 2483 2484 static enum ib_gid_type cma_route_gid_type(enum rdma_network_type network_type, 2485 unsigned long supported_gids, 2486 enum ib_gid_type default_gid) 2487 { 2488 if ((network_type == RDMA_NETWORK_IPV4 || 2489 network_type == RDMA_NETWORK_IPV6) && 2490 test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP, &supported_gids)) 2491 return IB_GID_TYPE_ROCE_UDP_ENCAP; 2492 2493 return default_gid; 2494 } 2495 2496 static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) 2497 { 2498 struct rdma_route *route = &id_priv->id.route; 2499 struct rdma_addr *addr = &route->addr; 2500 struct cma_work *work; 2501 int ret; 2502 struct net_device *ndev = NULL; 2503 2504 2505 work = kzalloc(sizeof *work, GFP_KERNEL); 2506 if (!work) 2507 return -ENOMEM; 2508 2509 work->id = id_priv; 2510 INIT_WORK(&work->work, cma_work_handler); 2511 2512 route->path_rec = kzalloc(sizeof *route->path_rec, GFP_KERNEL); 2513 if (!route->path_rec) { 2514 ret = -ENOMEM; 2515 goto err1; 2516 } 2517 2518 route->num_paths = 1; 2519 2520 if (addr->dev_addr.bound_dev_if) { 2521 unsigned long supported_gids; 2522 2523 ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if); 2524 if (!ndev) { 2525 ret = -ENODEV; 2526 goto err2; 2527 } 2528 2529 if (ndev->flags & IFF_LOOPBACK) { 2530 dev_put(ndev); 2531 if (!id_priv->id.device->get_netdev) { 2532 ret = -EOPNOTSUPP; 2533 goto err2; 2534 } 2535 2536 ndev = id_priv->id.device->get_netdev(id_priv->id.device, 2537 id_priv->id.port_num); 2538 if (!ndev) { 2539 ret = -ENODEV; 2540 goto err2; 2541 } 2542 } 2543 2544 route->path_rec->net = &init_net; 2545 route->path_rec->ifindex = ndev->ifindex; 2546 supported_gids = roce_gid_type_mask_support(id_priv->id.device, 2547 id_priv->id.port_num); 2548 route->path_rec->gid_type = 2549 cma_route_gid_type(addr->dev_addr.network, 2550 supported_gids, 2551 id_priv->gid_type); 2552 } 2553 if (!ndev) { 2554 ret = -ENODEV; 2555 goto err2; 2556 } 2557 2558 memcpy(route->path_rec->dmac, addr->dev_addr.dst_dev_addr, ETH_ALEN); 2559 2560 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, 2561 &route->path_rec->sgid); 2562 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.dst_addr, 2563 &route->path_rec->dgid); 2564 2565 /* Use the hint from IP Stack to select GID Type */ 2566 if (route->path_rec->gid_type < ib_network_to_gid_type(addr->dev_addr.network)) 2567 route->path_rec->gid_type = ib_network_to_gid_type(addr->dev_addr.network); 2568 if (((struct sockaddr *)&id_priv->id.route.addr.dst_addr)->sa_family != AF_IB) 2569 /* TODO: get the hoplimit from the inet/inet6 device */ 2570 route->path_rec->hop_limit = addr->dev_addr.hoplimit; 2571 else 2572 route->path_rec->hop_limit = 1; 2573 route->path_rec->reversible = 1; 2574 route->path_rec->pkey = cpu_to_be16(0xffff); 2575 route->path_rec->mtu_selector = IB_SA_EQ; 2576 route->path_rec->sl = iboe_tos_to_sl(ndev, id_priv->tos); 2577 route->path_rec->mtu = iboe_get_mtu(ndev->mtu); 2578 route->path_rec->rate_selector = IB_SA_EQ; 2579 route->path_rec->rate = iboe_get_rate(ndev); 2580 dev_put(ndev); 2581 route->path_rec->packet_life_time_selector = IB_SA_EQ; 2582 route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME; 2583 if (!route->path_rec->mtu) { 2584 ret = -EINVAL; 2585 goto err2; 2586 } 2587 2588 work->old_state = RDMA_CM_ROUTE_QUERY; 2589 work->new_state = RDMA_CM_ROUTE_RESOLVED; 2590 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 2591 work->event.status = 0; 2592 2593 queue_work(cma_wq, &work->work); 2594 2595 return 0; 2596 2597 err2: 2598 kfree(route->path_rec); 2599 route->path_rec = NULL; 2600 err1: 2601 kfree(work); 2602 return ret; 2603 } 2604 2605 int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms) 2606 { 2607 struct rdma_id_private *id_priv; 2608 int ret; 2609 2610 id_priv = container_of(id, struct rdma_id_private, id); 2611 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY)) 2612 return -EINVAL; 2613 2614 atomic_inc(&id_priv->refcount); 2615 if (rdma_cap_ib_sa(id->device, id->port_num)) 2616 ret = cma_resolve_ib_route(id_priv, timeout_ms); 2617 else if (rdma_protocol_roce(id->device, id->port_num)) 2618 ret = cma_resolve_iboe_route(id_priv); 2619 else if (rdma_protocol_iwarp(id->device, id->port_num)) 2620 ret = cma_resolve_iw_route(id_priv, timeout_ms); 2621 else 2622 ret = -ENOSYS; 2623 2624 if (ret) 2625 goto err; 2626 2627 return 0; 2628 err: 2629 cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED); 2630 cma_deref_id(id_priv); 2631 return ret; 2632 } 2633 EXPORT_SYMBOL(rdma_resolve_route); 2634 2635 static void cma_set_loopback(struct sockaddr *addr) 2636 { 2637 switch (addr->sa_family) { 2638 case AF_INET: 2639 ((struct sockaddr_in *) addr)->sin_addr.s_addr = htonl(INADDR_LOOPBACK); 2640 break; 2641 case AF_INET6: 2642 ipv6_addr_set(&((struct sockaddr_in6 *) addr)->sin6_addr, 2643 0, 0, 0, htonl(1)); 2644 break; 2645 default: 2646 ib_addr_set(&((struct sockaddr_ib *) addr)->sib_addr, 2647 0, 0, 0, htonl(1)); 2648 break; 2649 } 2650 } 2651 2652 static int cma_bind_loopback(struct rdma_id_private *id_priv) 2653 { 2654 struct cma_device *cma_dev, *cur_dev; 2655 struct ib_port_attr port_attr; 2656 union ib_gid gid; 2657 u16 pkey; 2658 int ret; 2659 u8 p; 2660 2661 cma_dev = NULL; 2662 mutex_lock(&lock); 2663 list_for_each_entry(cur_dev, &dev_list, list) { 2664 if (cma_family(id_priv) == AF_IB && 2665 !rdma_cap_ib_cm(cur_dev->device, 1)) 2666 continue; 2667 2668 if (!cma_dev) 2669 cma_dev = cur_dev; 2670 2671 for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) { 2672 if (!ib_query_port(cur_dev->device, p, &port_attr) && 2673 port_attr.state == IB_PORT_ACTIVE) { 2674 cma_dev = cur_dev; 2675 goto port_found; 2676 } 2677 } 2678 } 2679 2680 if (!cma_dev) { 2681 ret = -ENODEV; 2682 goto out; 2683 } 2684 2685 p = 1; 2686 2687 port_found: 2688 ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid, NULL); 2689 if (ret) 2690 goto out; 2691 2692 ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey); 2693 if (ret) 2694 goto out; 2695 2696 id_priv->id.route.addr.dev_addr.dev_type = 2697 (rdma_protocol_ib(cma_dev->device, p)) ? 2698 ARPHRD_INFINIBAND : ARPHRD_ETHER; 2699 2700 rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid); 2701 ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey); 2702 id_priv->id.port_num = p; 2703 cma_attach_to_dev(id_priv, cma_dev); 2704 cma_set_loopback(cma_src_addr(id_priv)); 2705 out: 2706 mutex_unlock(&lock); 2707 return ret; 2708 } 2709 2710 static void addr_handler(int status, struct sockaddr *src_addr, 2711 struct rdma_dev_addr *dev_addr, void *context) 2712 { 2713 struct rdma_id_private *id_priv = context; 2714 struct rdma_cm_event event; 2715 2716 memset(&event, 0, sizeof event); 2717 mutex_lock(&id_priv->handler_mutex); 2718 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, 2719 RDMA_CM_ADDR_RESOLVED)) 2720 goto out; 2721 2722 memcpy(cma_src_addr(id_priv), src_addr, rdma_addr_size(src_addr)); 2723 if (!status && !id_priv->cma_dev) 2724 status = cma_acquire_dev(id_priv, NULL); 2725 2726 if (status) { 2727 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, 2728 RDMA_CM_ADDR_BOUND)) 2729 goto out; 2730 event.event = RDMA_CM_EVENT_ADDR_ERROR; 2731 event.status = status; 2732 } else 2733 event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 2734 2735 if (id_priv->id.event_handler(&id_priv->id, &event)) { 2736 cma_exch(id_priv, RDMA_CM_DESTROYING); 2737 mutex_unlock(&id_priv->handler_mutex); 2738 cma_deref_id(id_priv); 2739 rdma_destroy_id(&id_priv->id); 2740 return; 2741 } 2742 out: 2743 mutex_unlock(&id_priv->handler_mutex); 2744 cma_deref_id(id_priv); 2745 } 2746 2747 static int cma_resolve_loopback(struct rdma_id_private *id_priv) 2748 { 2749 struct cma_work *work; 2750 union ib_gid gid; 2751 int ret; 2752 2753 work = kzalloc(sizeof *work, GFP_KERNEL); 2754 if (!work) 2755 return -ENOMEM; 2756 2757 if (!id_priv->cma_dev) { 2758 ret = cma_bind_loopback(id_priv); 2759 if (ret) 2760 goto err; 2761 } 2762 2763 rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); 2764 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid); 2765 2766 work->id = id_priv; 2767 INIT_WORK(&work->work, cma_work_handler); 2768 work->old_state = RDMA_CM_ADDR_QUERY; 2769 work->new_state = RDMA_CM_ADDR_RESOLVED; 2770 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 2771 queue_work(cma_wq, &work->work); 2772 return 0; 2773 err: 2774 kfree(work); 2775 return ret; 2776 } 2777 2778 static int cma_resolve_ib_addr(struct rdma_id_private *id_priv) 2779 { 2780 struct cma_work *work; 2781 int ret; 2782 2783 work = kzalloc(sizeof *work, GFP_KERNEL); 2784 if (!work) 2785 return -ENOMEM; 2786 2787 if (!id_priv->cma_dev) { 2788 ret = cma_resolve_ib_dev(id_priv); 2789 if (ret) 2790 goto err; 2791 } 2792 2793 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *) 2794 &(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr)); 2795 2796 work->id = id_priv; 2797 INIT_WORK(&work->work, cma_work_handler); 2798 work->old_state = RDMA_CM_ADDR_QUERY; 2799 work->new_state = RDMA_CM_ADDR_RESOLVED; 2800 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 2801 queue_work(cma_wq, &work->work); 2802 return 0; 2803 err: 2804 kfree(work); 2805 return ret; 2806 } 2807 2808 static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, 2809 struct sockaddr *dst_addr) 2810 { 2811 if (!src_addr || !src_addr->sa_family) { 2812 src_addr = (struct sockaddr *) &id->route.addr.src_addr; 2813 src_addr->sa_family = dst_addr->sa_family; 2814 if (dst_addr->sa_family == AF_INET6) { 2815 struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr; 2816 struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr; 2817 src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; 2818 if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL) 2819 id->route.addr.dev_addr.bound_dev_if = dst_addr6->sin6_scope_id; 2820 } else if (dst_addr->sa_family == AF_IB) { 2821 ((struct sockaddr_ib *) src_addr)->sib_pkey = 2822 ((struct sockaddr_ib *) dst_addr)->sib_pkey; 2823 } 2824 } 2825 return rdma_bind_addr(id, src_addr); 2826 } 2827 2828 int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, 2829 struct sockaddr *dst_addr, int timeout_ms) 2830 { 2831 struct rdma_id_private *id_priv; 2832 int ret; 2833 2834 id_priv = container_of(id, struct rdma_id_private, id); 2835 if (id_priv->state == RDMA_CM_IDLE) { 2836 ret = cma_bind_addr(id, src_addr, dst_addr); 2837 if (ret) 2838 return ret; 2839 } 2840 2841 if (cma_family(id_priv) != dst_addr->sa_family) 2842 return -EINVAL; 2843 2844 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) 2845 return -EINVAL; 2846 2847 atomic_inc(&id_priv->refcount); 2848 memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr)); 2849 if (cma_any_addr(dst_addr)) { 2850 ret = cma_resolve_loopback(id_priv); 2851 } else { 2852 if (dst_addr->sa_family == AF_IB) { 2853 ret = cma_resolve_ib_addr(id_priv); 2854 } else { 2855 ret = rdma_resolve_ip(&addr_client, cma_src_addr(id_priv), 2856 dst_addr, &id->route.addr.dev_addr, 2857 timeout_ms, addr_handler, id_priv); 2858 } 2859 } 2860 if (ret) 2861 goto err; 2862 2863 return 0; 2864 err: 2865 cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND); 2866 cma_deref_id(id_priv); 2867 return ret; 2868 } 2869 EXPORT_SYMBOL(rdma_resolve_addr); 2870 2871 int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse) 2872 { 2873 struct rdma_id_private *id_priv; 2874 unsigned long flags; 2875 int ret; 2876 2877 id_priv = container_of(id, struct rdma_id_private, id); 2878 spin_lock_irqsave(&id_priv->lock, flags); 2879 if (reuse || id_priv->state == RDMA_CM_IDLE) { 2880 id_priv->reuseaddr = reuse; 2881 ret = 0; 2882 } else { 2883 ret = -EINVAL; 2884 } 2885 spin_unlock_irqrestore(&id_priv->lock, flags); 2886 return ret; 2887 } 2888 EXPORT_SYMBOL(rdma_set_reuseaddr); 2889 2890 int rdma_set_afonly(struct rdma_cm_id *id, int afonly) 2891 { 2892 struct rdma_id_private *id_priv; 2893 unsigned long flags; 2894 int ret; 2895 2896 id_priv = container_of(id, struct rdma_id_private, id); 2897 spin_lock_irqsave(&id_priv->lock, flags); 2898 if (id_priv->state == RDMA_CM_IDLE || id_priv->state == RDMA_CM_ADDR_BOUND) { 2899 id_priv->options |= (1 << CMA_OPTION_AFONLY); 2900 id_priv->afonly = afonly; 2901 ret = 0; 2902 } else { 2903 ret = -EINVAL; 2904 } 2905 spin_unlock_irqrestore(&id_priv->lock, flags); 2906 return ret; 2907 } 2908 EXPORT_SYMBOL(rdma_set_afonly); 2909 2910 static void cma_bind_port(struct rdma_bind_list *bind_list, 2911 struct rdma_id_private *id_priv) 2912 { 2913 struct sockaddr *addr; 2914 struct sockaddr_ib *sib; 2915 u64 sid, mask; 2916 __be16 port; 2917 2918 addr = cma_src_addr(id_priv); 2919 port = htons(bind_list->port); 2920 2921 switch (addr->sa_family) { 2922 case AF_INET: 2923 ((struct sockaddr_in *) addr)->sin_port = port; 2924 break; 2925 case AF_INET6: 2926 ((struct sockaddr_in6 *) addr)->sin6_port = port; 2927 break; 2928 case AF_IB: 2929 sib = (struct sockaddr_ib *) addr; 2930 sid = be64_to_cpu(sib->sib_sid); 2931 mask = be64_to_cpu(sib->sib_sid_mask); 2932 sib->sib_sid = cpu_to_be64((sid & mask) | (u64) ntohs(port)); 2933 sib->sib_sid_mask = cpu_to_be64(~0ULL); 2934 break; 2935 } 2936 id_priv->bind_list = bind_list; 2937 hlist_add_head(&id_priv->node, &bind_list->owners); 2938 } 2939 2940 static int cma_alloc_port(enum rdma_port_space ps, 2941 struct rdma_id_private *id_priv, unsigned short snum) 2942 { 2943 struct rdma_bind_list *bind_list; 2944 int ret; 2945 2946 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL); 2947 if (!bind_list) 2948 return -ENOMEM; 2949 2950 ret = cma_ps_alloc(id_priv->id.route.addr.dev_addr.net, ps, bind_list, 2951 snum); 2952 if (ret < 0) 2953 goto err; 2954 2955 bind_list->ps = ps; 2956 bind_list->port = (unsigned short)ret; 2957 cma_bind_port(bind_list, id_priv); 2958 return 0; 2959 err: 2960 kfree(bind_list); 2961 return ret == -ENOSPC ? -EADDRNOTAVAIL : ret; 2962 } 2963 2964 static int cma_alloc_any_port(enum rdma_port_space ps, 2965 struct rdma_id_private *id_priv) 2966 { 2967 static unsigned int last_used_port; 2968 int low, high, remaining; 2969 unsigned int rover; 2970 struct net *net = id_priv->id.route.addr.dev_addr.net; 2971 2972 inet_get_local_port_range(net, &low, &high); 2973 remaining = (high - low) + 1; 2974 rover = prandom_u32() % remaining + low; 2975 retry: 2976 if (last_used_port != rover && 2977 !cma_ps_find(net, ps, (unsigned short)rover)) { 2978 int ret = cma_alloc_port(ps, id_priv, rover); 2979 /* 2980 * Remember previously used port number in order to avoid 2981 * re-using same port immediately after it is closed. 2982 */ 2983 if (!ret) 2984 last_used_port = rover; 2985 if (ret != -EADDRNOTAVAIL) 2986 return ret; 2987 } 2988 if (--remaining) { 2989 rover++; 2990 if ((rover < low) || (rover > high)) 2991 rover = low; 2992 goto retry; 2993 } 2994 return -EADDRNOTAVAIL; 2995 } 2996 2997 /* 2998 * Check that the requested port is available. This is called when trying to 2999 * bind to a specific port, or when trying to listen on a bound port. In 3000 * the latter case, the provided id_priv may already be on the bind_list, but 3001 * we still need to check that it's okay to start listening. 3002 */ 3003 static int cma_check_port(struct rdma_bind_list *bind_list, 3004 struct rdma_id_private *id_priv, uint8_t reuseaddr) 3005 { 3006 struct rdma_id_private *cur_id; 3007 struct sockaddr *addr, *cur_addr; 3008 3009 addr = cma_src_addr(id_priv); 3010 hlist_for_each_entry(cur_id, &bind_list->owners, node) { 3011 if (id_priv == cur_id) 3012 continue; 3013 3014 if ((cur_id->state != RDMA_CM_LISTEN) && reuseaddr && 3015 cur_id->reuseaddr) 3016 continue; 3017 3018 cur_addr = cma_src_addr(cur_id); 3019 if (id_priv->afonly && cur_id->afonly && 3020 (addr->sa_family != cur_addr->sa_family)) 3021 continue; 3022 3023 if (cma_any_addr(addr) || cma_any_addr(cur_addr)) 3024 return -EADDRNOTAVAIL; 3025 3026 if (!cma_addr_cmp(addr, cur_addr)) 3027 return -EADDRINUSE; 3028 } 3029 return 0; 3030 } 3031 3032 static int cma_use_port(enum rdma_port_space ps, 3033 struct rdma_id_private *id_priv) 3034 { 3035 struct rdma_bind_list *bind_list; 3036 unsigned short snum; 3037 int ret; 3038 3039 snum = ntohs(cma_port(cma_src_addr(id_priv))); 3040 if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) 3041 return -EACCES; 3042 3043 bind_list = cma_ps_find(id_priv->id.route.addr.dev_addr.net, ps, snum); 3044 if (!bind_list) { 3045 ret = cma_alloc_port(ps, id_priv, snum); 3046 } else { 3047 ret = cma_check_port(bind_list, id_priv, id_priv->reuseaddr); 3048 if (!ret) 3049 cma_bind_port(bind_list, id_priv); 3050 } 3051 return ret; 3052 } 3053 3054 static int cma_bind_listen(struct rdma_id_private *id_priv) 3055 { 3056 struct rdma_bind_list *bind_list = id_priv->bind_list; 3057 int ret = 0; 3058 3059 mutex_lock(&lock); 3060 if (bind_list->owners.first->next) 3061 ret = cma_check_port(bind_list, id_priv, 0); 3062 mutex_unlock(&lock); 3063 return ret; 3064 } 3065 3066 static enum rdma_port_space cma_select_inet_ps( 3067 struct rdma_id_private *id_priv) 3068 { 3069 switch (id_priv->id.ps) { 3070 case RDMA_PS_TCP: 3071 case RDMA_PS_UDP: 3072 case RDMA_PS_IPOIB: 3073 case RDMA_PS_IB: 3074 return id_priv->id.ps; 3075 default: 3076 3077 return 0; 3078 } 3079 } 3080 3081 static enum rdma_port_space cma_select_ib_ps(struct rdma_id_private *id_priv) 3082 { 3083 enum rdma_port_space ps = 0; 3084 struct sockaddr_ib *sib; 3085 u64 sid_ps, mask, sid; 3086 3087 sib = (struct sockaddr_ib *) cma_src_addr(id_priv); 3088 mask = be64_to_cpu(sib->sib_sid_mask) & RDMA_IB_IP_PS_MASK; 3089 sid = be64_to_cpu(sib->sib_sid) & mask; 3090 3091 if ((id_priv->id.ps == RDMA_PS_IB) && (sid == (RDMA_IB_IP_PS_IB & mask))) { 3092 sid_ps = RDMA_IB_IP_PS_IB; 3093 ps = RDMA_PS_IB; 3094 } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_TCP)) && 3095 (sid == (RDMA_IB_IP_PS_TCP & mask))) { 3096 sid_ps = RDMA_IB_IP_PS_TCP; 3097 ps = RDMA_PS_TCP; 3098 } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_UDP)) && 3099 (sid == (RDMA_IB_IP_PS_UDP & mask))) { 3100 sid_ps = RDMA_IB_IP_PS_UDP; 3101 ps = RDMA_PS_UDP; 3102 } 3103 3104 if (ps) { 3105 sib->sib_sid = cpu_to_be64(sid_ps | ntohs(cma_port((struct sockaddr *) sib))); 3106 sib->sib_sid_mask = cpu_to_be64(RDMA_IB_IP_PS_MASK | 3107 be64_to_cpu(sib->sib_sid_mask)); 3108 } 3109 return ps; 3110 } 3111 3112 static int cma_get_port(struct rdma_id_private *id_priv) 3113 { 3114 enum rdma_port_space ps; 3115 int ret; 3116 3117 if (cma_family(id_priv) != AF_IB) 3118 ps = cma_select_inet_ps(id_priv); 3119 else 3120 ps = cma_select_ib_ps(id_priv); 3121 if (!ps) 3122 return -EPROTONOSUPPORT; 3123 3124 mutex_lock(&lock); 3125 if (cma_any_port(cma_src_addr(id_priv))) 3126 ret = cma_alloc_any_port(ps, id_priv); 3127 else 3128 ret = cma_use_port(ps, id_priv); 3129 mutex_unlock(&lock); 3130 3131 return ret; 3132 } 3133 3134 static int cma_check_linklocal(struct rdma_dev_addr *dev_addr, 3135 struct sockaddr *addr) 3136 { 3137 #if IS_ENABLED(CONFIG_IPV6) 3138 struct sockaddr_in6 *sin6; 3139 3140 if (addr->sa_family != AF_INET6) 3141 return 0; 3142 3143 sin6 = (struct sockaddr_in6 *) addr; 3144 3145 if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)) 3146 return 0; 3147 3148 if (!sin6->sin6_scope_id) 3149 return -EINVAL; 3150 3151 dev_addr->bound_dev_if = sin6->sin6_scope_id; 3152 #endif 3153 return 0; 3154 } 3155 3156 int rdma_listen(struct rdma_cm_id *id, int backlog) 3157 { 3158 struct rdma_id_private *id_priv; 3159 int ret; 3160 3161 id_priv = container_of(id, struct rdma_id_private, id); 3162 if (id_priv->state == RDMA_CM_IDLE) { 3163 id->route.addr.src_addr.ss_family = AF_INET; 3164 ret = rdma_bind_addr(id, cma_src_addr(id_priv)); 3165 if (ret) 3166 return ret; 3167 } 3168 3169 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) 3170 return -EINVAL; 3171 3172 if (id_priv->reuseaddr) { 3173 ret = cma_bind_listen(id_priv); 3174 if (ret) 3175 goto err; 3176 } 3177 3178 id_priv->backlog = backlog; 3179 if (id->device) { 3180 if (rdma_cap_ib_cm(id->device, 1)) { 3181 ret = cma_ib_listen(id_priv); 3182 if (ret) 3183 goto err; 3184 } else if (rdma_cap_iw_cm(id->device, 1)) { 3185 ret = cma_iw_listen(id_priv, backlog); 3186 if (ret) 3187 goto err; 3188 } else { 3189 ret = -ENOSYS; 3190 goto err; 3191 } 3192 } else 3193 cma_listen_on_all(id_priv); 3194 3195 return 0; 3196 err: 3197 id_priv->backlog = 0; 3198 cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND); 3199 return ret; 3200 } 3201 EXPORT_SYMBOL(rdma_listen); 3202 3203 int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) 3204 { 3205 struct rdma_id_private *id_priv; 3206 int ret; 3207 3208 if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6 && 3209 addr->sa_family != AF_IB) 3210 return -EAFNOSUPPORT; 3211 3212 id_priv = container_of(id, struct rdma_id_private, id); 3213 if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND)) 3214 return -EINVAL; 3215 3216 ret = cma_check_linklocal(&id->route.addr.dev_addr, addr); 3217 if (ret) 3218 goto err1; 3219 3220 memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr)); 3221 if (!cma_any_addr(addr)) { 3222 ret = cma_translate_addr(addr, &id->route.addr.dev_addr); 3223 if (ret) 3224 goto err1; 3225 3226 ret = cma_acquire_dev(id_priv, NULL); 3227 if (ret) 3228 goto err1; 3229 } 3230 3231 if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) { 3232 if (addr->sa_family == AF_INET) 3233 id_priv->afonly = 1; 3234 #if IS_ENABLED(CONFIG_IPV6) 3235 else if (addr->sa_family == AF_INET6) { 3236 struct net *net = id_priv->id.route.addr.dev_addr.net; 3237 3238 id_priv->afonly = net->ipv6.sysctl.bindv6only; 3239 } 3240 #endif 3241 } 3242 ret = cma_get_port(id_priv); 3243 if (ret) 3244 goto err2; 3245 3246 return 0; 3247 err2: 3248 if (id_priv->cma_dev) 3249 cma_release_dev(id_priv); 3250 err1: 3251 cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE); 3252 return ret; 3253 } 3254 EXPORT_SYMBOL(rdma_bind_addr); 3255 3256 static int cma_format_hdr(void *hdr, struct rdma_id_private *id_priv) 3257 { 3258 struct cma_hdr *cma_hdr; 3259 3260 cma_hdr = hdr; 3261 cma_hdr->cma_version = CMA_VERSION; 3262 if (cma_family(id_priv) == AF_INET) { 3263 struct sockaddr_in *src4, *dst4; 3264 3265 src4 = (struct sockaddr_in *) cma_src_addr(id_priv); 3266 dst4 = (struct sockaddr_in *) cma_dst_addr(id_priv); 3267 3268 cma_set_ip_ver(cma_hdr, 4); 3269 cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr; 3270 cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr; 3271 cma_hdr->port = src4->sin_port; 3272 } else if (cma_family(id_priv) == AF_INET6) { 3273 struct sockaddr_in6 *src6, *dst6; 3274 3275 src6 = (struct sockaddr_in6 *) cma_src_addr(id_priv); 3276 dst6 = (struct sockaddr_in6 *) cma_dst_addr(id_priv); 3277 3278 cma_set_ip_ver(cma_hdr, 6); 3279 cma_hdr->src_addr.ip6 = src6->sin6_addr; 3280 cma_hdr->dst_addr.ip6 = dst6->sin6_addr; 3281 cma_hdr->port = src6->sin6_port; 3282 } 3283 return 0; 3284 } 3285 3286 static int cma_sidr_rep_handler(struct ib_cm_id *cm_id, 3287 struct ib_cm_event *ib_event) 3288 { 3289 struct rdma_id_private *id_priv = cm_id->context; 3290 struct rdma_cm_event event; 3291 struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd; 3292 int ret = 0; 3293 3294 mutex_lock(&id_priv->handler_mutex); 3295 if (id_priv->state != RDMA_CM_CONNECT) 3296 goto out; 3297 3298 memset(&event, 0, sizeof event); 3299 switch (ib_event->event) { 3300 case IB_CM_SIDR_REQ_ERROR: 3301 event.event = RDMA_CM_EVENT_UNREACHABLE; 3302 event.status = -ETIMEDOUT; 3303 break; 3304 case IB_CM_SIDR_REP_RECEIVED: 3305 event.param.ud.private_data = ib_event->private_data; 3306 event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE; 3307 if (rep->status != IB_SIDR_SUCCESS) { 3308 event.event = RDMA_CM_EVENT_UNREACHABLE; 3309 event.status = ib_event->param.sidr_rep_rcvd.status; 3310 break; 3311 } 3312 ret = cma_set_qkey(id_priv, rep->qkey); 3313 if (ret) { 3314 event.event = RDMA_CM_EVENT_ADDR_ERROR; 3315 event.status = ret; 3316 break; 3317 } 3318 ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num, 3319 id_priv->id.route.path_rec, 3320 &event.param.ud.ah_attr); 3321 event.param.ud.qp_num = rep->qpn; 3322 event.param.ud.qkey = rep->qkey; 3323 event.event = RDMA_CM_EVENT_ESTABLISHED; 3324 event.status = 0; 3325 break; 3326 default: 3327 pr_err("RDMA CMA: unexpected IB CM event: %d\n", 3328 ib_event->event); 3329 goto out; 3330 } 3331 3332 ret = id_priv->id.event_handler(&id_priv->id, &event); 3333 if (ret) { 3334 /* Destroy the CM ID by returning a non-zero value. */ 3335 id_priv->cm_id.ib = NULL; 3336 cma_exch(id_priv, RDMA_CM_DESTROYING); 3337 mutex_unlock(&id_priv->handler_mutex); 3338 rdma_destroy_id(&id_priv->id); 3339 return ret; 3340 } 3341 out: 3342 mutex_unlock(&id_priv->handler_mutex); 3343 return ret; 3344 } 3345 3346 static int cma_resolve_ib_udp(struct rdma_id_private *id_priv, 3347 struct rdma_conn_param *conn_param) 3348 { 3349 struct ib_cm_sidr_req_param req; 3350 struct ib_cm_id *id; 3351 void *private_data; 3352 int offset, ret; 3353 3354 memset(&req, 0, sizeof req); 3355 offset = cma_user_data_offset(id_priv); 3356 req.private_data_len = offset + conn_param->private_data_len; 3357 if (req.private_data_len < conn_param->private_data_len) 3358 return -EINVAL; 3359 3360 if (req.private_data_len) { 3361 private_data = kzalloc(req.private_data_len, GFP_ATOMIC); 3362 if (!private_data) 3363 return -ENOMEM; 3364 } else { 3365 private_data = NULL; 3366 } 3367 3368 if (conn_param->private_data && conn_param->private_data_len) 3369 memcpy(private_data + offset, conn_param->private_data, 3370 conn_param->private_data_len); 3371 3372 if (private_data) { 3373 ret = cma_format_hdr(private_data, id_priv); 3374 if (ret) 3375 goto out; 3376 req.private_data = private_data; 3377 } 3378 3379 id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler, 3380 id_priv); 3381 if (IS_ERR(id)) { 3382 ret = PTR_ERR(id); 3383 goto out; 3384 } 3385 id_priv->cm_id.ib = id; 3386 3387 req.path = id_priv->id.route.path_rec; 3388 req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); 3389 req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8); 3390 req.max_cm_retries = CMA_MAX_CM_RETRIES; 3391 3392 ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req); 3393 if (ret) { 3394 ib_destroy_cm_id(id_priv->cm_id.ib); 3395 id_priv->cm_id.ib = NULL; 3396 } 3397 out: 3398 kfree(private_data); 3399 return ret; 3400 } 3401 3402 static int cma_connect_ib(struct rdma_id_private *id_priv, 3403 struct rdma_conn_param *conn_param) 3404 { 3405 struct ib_cm_req_param req; 3406 struct rdma_route *route; 3407 void *private_data; 3408 struct ib_cm_id *id; 3409 int offset, ret; 3410 3411 memset(&req, 0, sizeof req); 3412 offset = cma_user_data_offset(id_priv); 3413 req.private_data_len = offset + conn_param->private_data_len; 3414 if (req.private_data_len < conn_param->private_data_len) 3415 return -EINVAL; 3416 3417 if (req.private_data_len) { 3418 private_data = kzalloc(req.private_data_len, GFP_ATOMIC); 3419 if (!private_data) 3420 return -ENOMEM; 3421 } else { 3422 private_data = NULL; 3423 } 3424 3425 if (conn_param->private_data && conn_param->private_data_len) 3426 memcpy(private_data + offset, conn_param->private_data, 3427 conn_param->private_data_len); 3428 3429 id = ib_create_cm_id(id_priv->id.device, cma_ib_handler, id_priv); 3430 if (IS_ERR(id)) { 3431 ret = PTR_ERR(id); 3432 goto out; 3433 } 3434 id_priv->cm_id.ib = id; 3435 3436 route = &id_priv->id.route; 3437 if (private_data) { 3438 ret = cma_format_hdr(private_data, id_priv); 3439 if (ret) 3440 goto out; 3441 req.private_data = private_data; 3442 } 3443 3444 req.primary_path = &route->path_rec[0]; 3445 if (route->num_paths == 2) 3446 req.alternate_path = &route->path_rec[1]; 3447 3448 req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); 3449 req.qp_num = id_priv->qp_num; 3450 req.qp_type = id_priv->id.qp_type; 3451 req.starting_psn = id_priv->seq_num; 3452 req.responder_resources = conn_param->responder_resources; 3453 req.initiator_depth = conn_param->initiator_depth; 3454 req.flow_control = conn_param->flow_control; 3455 req.retry_count = min_t(u8, 7, conn_param->retry_count); 3456 req.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count); 3457 req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; 3458 req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; 3459 req.max_cm_retries = CMA_MAX_CM_RETRIES; 3460 req.srq = id_priv->srq ? 1 : 0; 3461 3462 ret = ib_send_cm_req(id_priv->cm_id.ib, &req); 3463 out: 3464 if (ret && !IS_ERR(id)) { 3465 ib_destroy_cm_id(id); 3466 id_priv->cm_id.ib = NULL; 3467 } 3468 3469 kfree(private_data); 3470 return ret; 3471 } 3472 3473 static int cma_connect_iw(struct rdma_id_private *id_priv, 3474 struct rdma_conn_param *conn_param) 3475 { 3476 struct iw_cm_id *cm_id; 3477 int ret; 3478 struct iw_cm_conn_param iw_param; 3479 3480 cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv); 3481 if (IS_ERR(cm_id)) 3482 return PTR_ERR(cm_id); 3483 3484 cm_id->tos = id_priv->tos; 3485 id_priv->cm_id.iw = cm_id; 3486 3487 memcpy(&cm_id->local_addr, cma_src_addr(id_priv), 3488 rdma_addr_size(cma_src_addr(id_priv))); 3489 memcpy(&cm_id->remote_addr, cma_dst_addr(id_priv), 3490 rdma_addr_size(cma_dst_addr(id_priv))); 3491 3492 ret = cma_modify_qp_rtr(id_priv, conn_param); 3493 if (ret) 3494 goto out; 3495 3496 if (conn_param) { 3497 iw_param.ord = conn_param->initiator_depth; 3498 iw_param.ird = conn_param->responder_resources; 3499 iw_param.private_data = conn_param->private_data; 3500 iw_param.private_data_len = conn_param->private_data_len; 3501 iw_param.qpn = id_priv->id.qp ? id_priv->qp_num : conn_param->qp_num; 3502 } else { 3503 memset(&iw_param, 0, sizeof iw_param); 3504 iw_param.qpn = id_priv->qp_num; 3505 } 3506 ret = iw_cm_connect(cm_id, &iw_param); 3507 out: 3508 if (ret) { 3509 iw_destroy_cm_id(cm_id); 3510 id_priv->cm_id.iw = NULL; 3511 } 3512 return ret; 3513 } 3514 3515 int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) 3516 { 3517 struct rdma_id_private *id_priv; 3518 int ret; 3519 3520 id_priv = container_of(id, struct rdma_id_private, id); 3521 if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT)) 3522 return -EINVAL; 3523 3524 if (!id->qp) { 3525 id_priv->qp_num = conn_param->qp_num; 3526 id_priv->srq = conn_param->srq; 3527 } 3528 3529 if (rdma_cap_ib_cm(id->device, id->port_num)) { 3530 if (id->qp_type == IB_QPT_UD) 3531 ret = cma_resolve_ib_udp(id_priv, conn_param); 3532 else 3533 ret = cma_connect_ib(id_priv, conn_param); 3534 } else if (rdma_cap_iw_cm(id->device, id->port_num)) 3535 ret = cma_connect_iw(id_priv, conn_param); 3536 else 3537 ret = -ENOSYS; 3538 if (ret) 3539 goto err; 3540 3541 return 0; 3542 err: 3543 cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED); 3544 return ret; 3545 } 3546 EXPORT_SYMBOL(rdma_connect); 3547 3548 static int cma_accept_ib(struct rdma_id_private *id_priv, 3549 struct rdma_conn_param *conn_param) 3550 { 3551 struct ib_cm_rep_param rep; 3552 int ret; 3553 3554 ret = cma_modify_qp_rtr(id_priv, conn_param); 3555 if (ret) 3556 goto out; 3557 3558 ret = cma_modify_qp_rts(id_priv, conn_param); 3559 if (ret) 3560 goto out; 3561 3562 memset(&rep, 0, sizeof rep); 3563 rep.qp_num = id_priv->qp_num; 3564 rep.starting_psn = id_priv->seq_num; 3565 rep.private_data = conn_param->private_data; 3566 rep.private_data_len = conn_param->private_data_len; 3567 rep.responder_resources = conn_param->responder_resources; 3568 rep.initiator_depth = conn_param->initiator_depth; 3569 rep.failover_accepted = 0; 3570 rep.flow_control = conn_param->flow_control; 3571 rep.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count); 3572 rep.srq = id_priv->srq ? 1 : 0; 3573 3574 ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep); 3575 out: 3576 return ret; 3577 } 3578 3579 static int cma_accept_iw(struct rdma_id_private *id_priv, 3580 struct rdma_conn_param *conn_param) 3581 { 3582 struct iw_cm_conn_param iw_param; 3583 int ret; 3584 3585 ret = cma_modify_qp_rtr(id_priv, conn_param); 3586 if (ret) 3587 return ret; 3588 3589 iw_param.ord = conn_param->initiator_depth; 3590 iw_param.ird = conn_param->responder_resources; 3591 iw_param.private_data = conn_param->private_data; 3592 iw_param.private_data_len = conn_param->private_data_len; 3593 if (id_priv->id.qp) { 3594 iw_param.qpn = id_priv->qp_num; 3595 } else 3596 iw_param.qpn = conn_param->qp_num; 3597 3598 return iw_cm_accept(id_priv->cm_id.iw, &iw_param); 3599 } 3600 3601 static int cma_send_sidr_rep(struct rdma_id_private *id_priv, 3602 enum ib_cm_sidr_status status, u32 qkey, 3603 const void *private_data, int private_data_len) 3604 { 3605 struct ib_cm_sidr_rep_param rep; 3606 int ret; 3607 3608 memset(&rep, 0, sizeof rep); 3609 rep.status = status; 3610 if (status == IB_SIDR_SUCCESS) { 3611 ret = cma_set_qkey(id_priv, qkey); 3612 if (ret) 3613 return ret; 3614 rep.qp_num = id_priv->qp_num; 3615 rep.qkey = id_priv->qkey; 3616 } 3617 rep.private_data = private_data; 3618 rep.private_data_len = private_data_len; 3619 3620 return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep); 3621 } 3622 3623 int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) 3624 { 3625 struct rdma_id_private *id_priv; 3626 int ret; 3627 3628 id_priv = container_of(id, struct rdma_id_private, id); 3629 3630 id_priv->owner = task_pid_nr(current); 3631 3632 if (!cma_comp(id_priv, RDMA_CM_CONNECT)) 3633 return -EINVAL; 3634 3635 if (!id->qp && conn_param) { 3636 id_priv->qp_num = conn_param->qp_num; 3637 id_priv->srq = conn_param->srq; 3638 } 3639 3640 if (rdma_cap_ib_cm(id->device, id->port_num)) { 3641 if (id->qp_type == IB_QPT_UD) { 3642 if (conn_param) 3643 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, 3644 conn_param->qkey, 3645 conn_param->private_data, 3646 conn_param->private_data_len); 3647 else 3648 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, 3649 0, NULL, 0); 3650 } else { 3651 if (conn_param) 3652 ret = cma_accept_ib(id_priv, conn_param); 3653 else 3654 ret = cma_rep_recv(id_priv); 3655 } 3656 } else if (rdma_cap_iw_cm(id->device, id->port_num)) 3657 ret = cma_accept_iw(id_priv, conn_param); 3658 else 3659 ret = -ENOSYS; 3660 3661 if (ret) 3662 goto reject; 3663 3664 return 0; 3665 reject: 3666 cma_modify_qp_err(id_priv); 3667 rdma_reject(id, NULL, 0); 3668 return ret; 3669 } 3670 EXPORT_SYMBOL(rdma_accept); 3671 3672 int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event) 3673 { 3674 struct rdma_id_private *id_priv; 3675 int ret; 3676 3677 id_priv = container_of(id, struct rdma_id_private, id); 3678 if (!id_priv->cm_id.ib) 3679 return -EINVAL; 3680 3681 switch (id->device->node_type) { 3682 case RDMA_NODE_IB_CA: 3683 ret = ib_cm_notify(id_priv->cm_id.ib, event); 3684 break; 3685 default: 3686 ret = 0; 3687 break; 3688 } 3689 return ret; 3690 } 3691 EXPORT_SYMBOL(rdma_notify); 3692 3693 int rdma_reject(struct rdma_cm_id *id, const void *private_data, 3694 u8 private_data_len) 3695 { 3696 struct rdma_id_private *id_priv; 3697 int ret; 3698 3699 id_priv = container_of(id, struct rdma_id_private, id); 3700 if (!id_priv->cm_id.ib) 3701 return -EINVAL; 3702 3703 if (rdma_cap_ib_cm(id->device, id->port_num)) { 3704 if (id->qp_type == IB_QPT_UD) 3705 ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0, 3706 private_data, private_data_len); 3707 else 3708 ret = ib_send_cm_rej(id_priv->cm_id.ib, 3709 IB_CM_REJ_CONSUMER_DEFINED, NULL, 3710 0, private_data, private_data_len); 3711 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { 3712 ret = iw_cm_reject(id_priv->cm_id.iw, 3713 private_data, private_data_len); 3714 } else 3715 ret = -ENOSYS; 3716 3717 return ret; 3718 } 3719 EXPORT_SYMBOL(rdma_reject); 3720 3721 int rdma_disconnect(struct rdma_cm_id *id) 3722 { 3723 struct rdma_id_private *id_priv; 3724 int ret; 3725 3726 id_priv = container_of(id, struct rdma_id_private, id); 3727 if (!id_priv->cm_id.ib) 3728 return -EINVAL; 3729 3730 if (rdma_cap_ib_cm(id->device, id->port_num)) { 3731 ret = cma_modify_qp_err(id_priv); 3732 if (ret) 3733 goto out; 3734 /* Initiate or respond to a disconnect. */ 3735 if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) 3736 ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0); 3737 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { 3738 ret = iw_cm_disconnect(id_priv->cm_id.iw, 0); 3739 } else 3740 ret = -EINVAL; 3741 3742 out: 3743 return ret; 3744 } 3745 EXPORT_SYMBOL(rdma_disconnect); 3746 3747 static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) 3748 { 3749 struct rdma_id_private *id_priv; 3750 struct cma_multicast *mc = multicast->context; 3751 struct rdma_cm_event event; 3752 int ret = 0; 3753 3754 id_priv = mc->id_priv; 3755 mutex_lock(&id_priv->handler_mutex); 3756 if (id_priv->state != RDMA_CM_ADDR_BOUND && 3757 id_priv->state != RDMA_CM_ADDR_RESOLVED) 3758 goto out; 3759 3760 if (!status) 3761 status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey)); 3762 mutex_lock(&id_priv->qp_mutex); 3763 if (!status && id_priv->id.qp) 3764 status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid, 3765 be16_to_cpu(multicast->rec.mlid)); 3766 mutex_unlock(&id_priv->qp_mutex); 3767 3768 memset(&event, 0, sizeof event); 3769 event.status = status; 3770 event.param.ud.private_data = mc->context; 3771 if (!status) { 3772 struct rdma_dev_addr *dev_addr = 3773 &id_priv->id.route.addr.dev_addr; 3774 struct net_device *ndev = 3775 dev_get_by_index(&init_net, dev_addr->bound_dev_if); 3776 enum ib_gid_type gid_type = 3777 id_priv->cma_dev->default_gid_type[id_priv->id.port_num - 3778 rdma_start_port(id_priv->cma_dev->device)]; 3779 3780 event.event = RDMA_CM_EVENT_MULTICAST_JOIN; 3781 ib_init_ah_from_mcmember(id_priv->id.device, 3782 id_priv->id.port_num, &multicast->rec, 3783 ndev, gid_type, 3784 &event.param.ud.ah_attr); 3785 event.param.ud.qp_num = 0xFFFFFF; 3786 event.param.ud.qkey = be32_to_cpu(multicast->rec.qkey); 3787 if (ndev) 3788 dev_put(ndev); 3789 } else 3790 event.event = RDMA_CM_EVENT_MULTICAST_ERROR; 3791 3792 ret = id_priv->id.event_handler(&id_priv->id, &event); 3793 if (ret) { 3794 cma_exch(id_priv, RDMA_CM_DESTROYING); 3795 mutex_unlock(&id_priv->handler_mutex); 3796 rdma_destroy_id(&id_priv->id); 3797 return 0; 3798 } 3799 3800 out: 3801 mutex_unlock(&id_priv->handler_mutex); 3802 return 0; 3803 } 3804 3805 static void cma_set_mgid(struct rdma_id_private *id_priv, 3806 struct sockaddr *addr, union ib_gid *mgid) 3807 { 3808 unsigned char mc_map[MAX_ADDR_LEN]; 3809 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 3810 struct sockaddr_in *sin = (struct sockaddr_in *) addr; 3811 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) addr; 3812 3813 if (cma_any_addr(addr)) { 3814 memset(mgid, 0, sizeof *mgid); 3815 } else if ((addr->sa_family == AF_INET6) && 3816 ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFFF0FFFF) == 3817 0xFF10A01B)) { 3818 /* IPv6 address is an SA assigned MGID. */ 3819 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); 3820 } else if (addr->sa_family == AF_IB) { 3821 memcpy(mgid, &((struct sockaddr_ib *) addr)->sib_addr, sizeof *mgid); 3822 } else if ((addr->sa_family == AF_INET6)) { 3823 ipv6_ib_mc_map(&sin6->sin6_addr, dev_addr->broadcast, mc_map); 3824 if (id_priv->id.ps == RDMA_PS_UDP) 3825 mc_map[7] = 0x01; /* Use RDMA CM signature */ 3826 *mgid = *(union ib_gid *) (mc_map + 4); 3827 } else { 3828 ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map); 3829 if (id_priv->id.ps == RDMA_PS_UDP) 3830 mc_map[7] = 0x01; /* Use RDMA CM signature */ 3831 *mgid = *(union ib_gid *) (mc_map + 4); 3832 } 3833 } 3834 3835 static void cma_query_sa_classport_info_cb(int status, 3836 struct ib_class_port_info *rec, 3837 void *context) 3838 { 3839 struct class_port_info_context *cb_ctx = context; 3840 3841 WARN_ON(!context); 3842 3843 if (status || !rec) { 3844 pr_debug("RDMA CM: %s port %u failed query ClassPortInfo status: %d\n", 3845 cb_ctx->device->name, cb_ctx->port_num, status); 3846 goto out; 3847 } 3848 3849 memcpy(cb_ctx->class_port_info, rec, sizeof(struct ib_class_port_info)); 3850 3851 out: 3852 complete(&cb_ctx->done); 3853 } 3854 3855 static int cma_query_sa_classport_info(struct ib_device *device, u8 port_num, 3856 struct ib_class_port_info *class_port_info) 3857 { 3858 struct class_port_info_context *cb_ctx; 3859 int ret; 3860 3861 cb_ctx = kmalloc(sizeof(*cb_ctx), GFP_KERNEL); 3862 if (!cb_ctx) 3863 return -ENOMEM; 3864 3865 cb_ctx->device = device; 3866 cb_ctx->class_port_info = class_port_info; 3867 cb_ctx->port_num = port_num; 3868 init_completion(&cb_ctx->done); 3869 3870 ret = ib_sa_classport_info_rec_query(&sa_client, device, port_num, 3871 CMA_QUERY_CLASSPORT_INFO_TIMEOUT, 3872 GFP_KERNEL, cma_query_sa_classport_info_cb, 3873 cb_ctx, &cb_ctx->sa_query); 3874 if (ret < 0) { 3875 pr_err("RDMA CM: %s port %u failed to send ClassPortInfo query, ret: %d\n", 3876 device->name, port_num, ret); 3877 goto out; 3878 } 3879 3880 wait_for_completion(&cb_ctx->done); 3881 3882 out: 3883 kfree(cb_ctx); 3884 return ret; 3885 } 3886 3887 static int cma_join_ib_multicast(struct rdma_id_private *id_priv, 3888 struct cma_multicast *mc) 3889 { 3890 struct ib_sa_mcmember_rec rec; 3891 struct ib_class_port_info class_port_info; 3892 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 3893 ib_sa_comp_mask comp_mask; 3894 int ret; 3895 3896 ib_addr_get_mgid(dev_addr, &rec.mgid); 3897 ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num, 3898 &rec.mgid, &rec); 3899 if (ret) 3900 return ret; 3901 3902 ret = cma_set_qkey(id_priv, 0); 3903 if (ret) 3904 return ret; 3905 3906 cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid); 3907 rec.qkey = cpu_to_be32(id_priv->qkey); 3908 rdma_addr_get_sgid(dev_addr, &rec.port_gid); 3909 rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); 3910 rec.join_state = mc->join_state; 3911 3912 if (rec.join_state == BIT(SENDONLY_FULLMEMBER_JOIN)) { 3913 ret = cma_query_sa_classport_info(id_priv->id.device, 3914 id_priv->id.port_num, 3915 &class_port_info); 3916 3917 if (ret) 3918 return ret; 3919 3920 if (!(ib_get_cpi_capmask2(&class_port_info) & 3921 IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT)) { 3922 pr_warn("RDMA CM: %s port %u Unable to multicast join\n" 3923 "RDMA CM: SM doesn't support Send Only Full Member option\n", 3924 id_priv->id.device->name, id_priv->id.port_num); 3925 return -EOPNOTSUPP; 3926 } 3927 } 3928 3929 comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID | 3930 IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE | 3931 IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_SL | 3932 IB_SA_MCMEMBER_REC_FLOW_LABEL | 3933 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS; 3934 3935 if (id_priv->id.ps == RDMA_PS_IPOIB) 3936 comp_mask |= IB_SA_MCMEMBER_REC_RATE | 3937 IB_SA_MCMEMBER_REC_RATE_SELECTOR | 3938 IB_SA_MCMEMBER_REC_MTU_SELECTOR | 3939 IB_SA_MCMEMBER_REC_MTU | 3940 IB_SA_MCMEMBER_REC_HOP_LIMIT; 3941 3942 mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device, 3943 id_priv->id.port_num, &rec, 3944 comp_mask, GFP_KERNEL, 3945 cma_ib_mc_handler, mc); 3946 return PTR_ERR_OR_ZERO(mc->multicast.ib); 3947 } 3948 3949 static void iboe_mcast_work_handler(struct work_struct *work) 3950 { 3951 struct iboe_mcast_work *mw = container_of(work, struct iboe_mcast_work, work); 3952 struct cma_multicast *mc = mw->mc; 3953 struct ib_sa_multicast *m = mc->multicast.ib; 3954 3955 mc->multicast.ib->context = mc; 3956 cma_ib_mc_handler(0, m); 3957 kref_put(&mc->mcref, release_mc); 3958 kfree(mw); 3959 } 3960 3961 static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid) 3962 { 3963 struct sockaddr_in *sin = (struct sockaddr_in *)addr; 3964 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr; 3965 3966 if (cma_any_addr(addr)) { 3967 memset(mgid, 0, sizeof *mgid); 3968 } else if (addr->sa_family == AF_INET6) { 3969 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); 3970 } else { 3971 mgid->raw[0] = 0xff; 3972 mgid->raw[1] = 0x0e; 3973 mgid->raw[2] = 0; 3974 mgid->raw[3] = 0; 3975 mgid->raw[4] = 0; 3976 mgid->raw[5] = 0; 3977 mgid->raw[6] = 0; 3978 mgid->raw[7] = 0; 3979 mgid->raw[8] = 0; 3980 mgid->raw[9] = 0; 3981 mgid->raw[10] = 0xff; 3982 mgid->raw[11] = 0xff; 3983 *(__be32 *)(&mgid->raw[12]) = sin->sin_addr.s_addr; 3984 } 3985 } 3986 3987 static int cma_iboe_join_multicast(struct rdma_id_private *id_priv, 3988 struct cma_multicast *mc) 3989 { 3990 struct iboe_mcast_work *work; 3991 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 3992 int err = 0; 3993 struct sockaddr *addr = (struct sockaddr *)&mc->addr; 3994 struct net_device *ndev = NULL; 3995 enum ib_gid_type gid_type; 3996 bool send_only; 3997 3998 send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN); 3999 4000 if (cma_zero_addr((struct sockaddr *)&mc->addr)) 4001 return -EINVAL; 4002 4003 work = kzalloc(sizeof *work, GFP_KERNEL); 4004 if (!work) 4005 return -ENOMEM; 4006 4007 mc->multicast.ib = kzalloc(sizeof(struct ib_sa_multicast), GFP_KERNEL); 4008 if (!mc->multicast.ib) { 4009 err = -ENOMEM; 4010 goto out1; 4011 } 4012 4013 cma_iboe_set_mgid(addr, &mc->multicast.ib->rec.mgid); 4014 4015 mc->multicast.ib->rec.pkey = cpu_to_be16(0xffff); 4016 if (id_priv->id.ps == RDMA_PS_UDP) 4017 mc->multicast.ib->rec.qkey = cpu_to_be32(RDMA_UDP_QKEY); 4018 4019 if (dev_addr->bound_dev_if) 4020 ndev = dev_get_by_index(&init_net, dev_addr->bound_dev_if); 4021 if (!ndev) { 4022 err = -ENODEV; 4023 goto out2; 4024 } 4025 mc->multicast.ib->rec.rate = iboe_get_rate(ndev); 4026 mc->multicast.ib->rec.hop_limit = 1; 4027 mc->multicast.ib->rec.mtu = iboe_get_mtu(ndev->mtu); 4028 4029 gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num - 4030 rdma_start_port(id_priv->cma_dev->device)]; 4031 if (addr->sa_family == AF_INET) { 4032 if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) { 4033 mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT; 4034 if (!send_only) { 4035 err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid, 4036 true); 4037 if (!err) 4038 mc->igmp_joined = true; 4039 } 4040 } 4041 } else { 4042 if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) 4043 err = -ENOTSUPP; 4044 } 4045 dev_put(ndev); 4046 if (err || !mc->multicast.ib->rec.mtu) { 4047 if (!err) 4048 err = -EINVAL; 4049 goto out2; 4050 } 4051 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, 4052 &mc->multicast.ib->rec.port_gid); 4053 work->id = id_priv; 4054 work->mc = mc; 4055 INIT_WORK(&work->work, iboe_mcast_work_handler); 4056 kref_get(&mc->mcref); 4057 queue_work(cma_wq, &work->work); 4058 4059 return 0; 4060 4061 out2: 4062 kfree(mc->multicast.ib); 4063 out1: 4064 kfree(work); 4065 return err; 4066 } 4067 4068 int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, 4069 u8 join_state, void *context) 4070 { 4071 struct rdma_id_private *id_priv; 4072 struct cma_multicast *mc; 4073 int ret; 4074 4075 id_priv = container_of(id, struct rdma_id_private, id); 4076 if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) && 4077 !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED)) 4078 return -EINVAL; 4079 4080 mc = kmalloc(sizeof *mc, GFP_KERNEL); 4081 if (!mc) 4082 return -ENOMEM; 4083 4084 memcpy(&mc->addr, addr, rdma_addr_size(addr)); 4085 mc->context = context; 4086 mc->id_priv = id_priv; 4087 mc->igmp_joined = false; 4088 mc->join_state = join_state; 4089 spin_lock(&id_priv->lock); 4090 list_add(&mc->list, &id_priv->mc_list); 4091 spin_unlock(&id_priv->lock); 4092 4093 if (rdma_protocol_roce(id->device, id->port_num)) { 4094 kref_init(&mc->mcref); 4095 ret = cma_iboe_join_multicast(id_priv, mc); 4096 } else if (rdma_cap_ib_mcast(id->device, id->port_num)) 4097 ret = cma_join_ib_multicast(id_priv, mc); 4098 else 4099 ret = -ENOSYS; 4100 4101 if (ret) { 4102 spin_lock_irq(&id_priv->lock); 4103 list_del(&mc->list); 4104 spin_unlock_irq(&id_priv->lock); 4105 kfree(mc); 4106 } 4107 return ret; 4108 } 4109 EXPORT_SYMBOL(rdma_join_multicast); 4110 4111 void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr) 4112 { 4113 struct rdma_id_private *id_priv; 4114 struct cma_multicast *mc; 4115 4116 id_priv = container_of(id, struct rdma_id_private, id); 4117 spin_lock_irq(&id_priv->lock); 4118 list_for_each_entry(mc, &id_priv->mc_list, list) { 4119 if (!memcmp(&mc->addr, addr, rdma_addr_size(addr))) { 4120 list_del(&mc->list); 4121 spin_unlock_irq(&id_priv->lock); 4122 4123 if (id->qp) 4124 ib_detach_mcast(id->qp, 4125 &mc->multicast.ib->rec.mgid, 4126 be16_to_cpu(mc->multicast.ib->rec.mlid)); 4127 4128 BUG_ON(id_priv->cma_dev->device != id->device); 4129 4130 if (rdma_cap_ib_mcast(id->device, id->port_num)) { 4131 ib_sa_free_multicast(mc->multicast.ib); 4132 kfree(mc); 4133 } else if (rdma_protocol_roce(id->device, id->port_num)) { 4134 if (mc->igmp_joined) { 4135 struct rdma_dev_addr *dev_addr = 4136 &id->route.addr.dev_addr; 4137 struct net_device *ndev = NULL; 4138 4139 if (dev_addr->bound_dev_if) 4140 ndev = dev_get_by_index(&init_net, 4141 dev_addr->bound_dev_if); 4142 if (ndev) { 4143 cma_igmp_send(ndev, 4144 &mc->multicast.ib->rec.mgid, 4145 false); 4146 dev_put(ndev); 4147 } 4148 mc->igmp_joined = false; 4149 } 4150 kref_put(&mc->mcref, release_mc); 4151 } 4152 return; 4153 } 4154 } 4155 spin_unlock_irq(&id_priv->lock); 4156 } 4157 EXPORT_SYMBOL(rdma_leave_multicast); 4158 4159 static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv) 4160 { 4161 struct rdma_dev_addr *dev_addr; 4162 struct cma_ndev_work *work; 4163 4164 dev_addr = &id_priv->id.route.addr.dev_addr; 4165 4166 if ((dev_addr->bound_dev_if == ndev->ifindex) && 4167 (net_eq(dev_net(ndev), dev_addr->net)) && 4168 memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) { 4169 pr_info("RDMA CM addr change for ndev %s used by id %p\n", 4170 ndev->name, &id_priv->id); 4171 work = kzalloc(sizeof *work, GFP_KERNEL); 4172 if (!work) 4173 return -ENOMEM; 4174 4175 INIT_WORK(&work->work, cma_ndev_work_handler); 4176 work->id = id_priv; 4177 work->event.event = RDMA_CM_EVENT_ADDR_CHANGE; 4178 atomic_inc(&id_priv->refcount); 4179 queue_work(cma_wq, &work->work); 4180 } 4181 4182 return 0; 4183 } 4184 4185 static int cma_netdev_callback(struct notifier_block *self, unsigned long event, 4186 void *ptr) 4187 { 4188 struct net_device *ndev = netdev_notifier_info_to_dev(ptr); 4189 struct cma_device *cma_dev; 4190 struct rdma_id_private *id_priv; 4191 int ret = NOTIFY_DONE; 4192 4193 if (event != NETDEV_BONDING_FAILOVER) 4194 return NOTIFY_DONE; 4195 4196 if (!(ndev->flags & IFF_MASTER) || !(ndev->priv_flags & IFF_BONDING)) 4197 return NOTIFY_DONE; 4198 4199 mutex_lock(&lock); 4200 list_for_each_entry(cma_dev, &dev_list, list) 4201 list_for_each_entry(id_priv, &cma_dev->id_list, list) { 4202 ret = cma_netdev_change(ndev, id_priv); 4203 if (ret) 4204 goto out; 4205 } 4206 4207 out: 4208 mutex_unlock(&lock); 4209 return ret; 4210 } 4211 4212 static struct notifier_block cma_nb = { 4213 .notifier_call = cma_netdev_callback 4214 }; 4215 4216 static void cma_add_one(struct ib_device *device) 4217 { 4218 struct cma_device *cma_dev; 4219 struct rdma_id_private *id_priv; 4220 unsigned int i; 4221 unsigned long supported_gids = 0; 4222 4223 cma_dev = kmalloc(sizeof *cma_dev, GFP_KERNEL); 4224 if (!cma_dev) 4225 return; 4226 4227 cma_dev->device = device; 4228 cma_dev->default_gid_type = kcalloc(device->phys_port_cnt, 4229 sizeof(*cma_dev->default_gid_type), 4230 GFP_KERNEL); 4231 if (!cma_dev->default_gid_type) { 4232 kfree(cma_dev); 4233 return; 4234 } 4235 for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) { 4236 supported_gids = roce_gid_type_mask_support(device, i); 4237 WARN_ON(!supported_gids); 4238 cma_dev->default_gid_type[i - rdma_start_port(device)] = 4239 find_first_bit(&supported_gids, BITS_PER_LONG); 4240 } 4241 4242 init_completion(&cma_dev->comp); 4243 atomic_set(&cma_dev->refcount, 1); 4244 INIT_LIST_HEAD(&cma_dev->id_list); 4245 ib_set_client_data(device, &cma_client, cma_dev); 4246 4247 mutex_lock(&lock); 4248 list_add_tail(&cma_dev->list, &dev_list); 4249 list_for_each_entry(id_priv, &listen_any_list, list) 4250 cma_listen_on_dev(id_priv, cma_dev); 4251 mutex_unlock(&lock); 4252 } 4253 4254 static int cma_remove_id_dev(struct rdma_id_private *id_priv) 4255 { 4256 struct rdma_cm_event event; 4257 enum rdma_cm_state state; 4258 int ret = 0; 4259 4260 /* Record that we want to remove the device */ 4261 state = cma_exch(id_priv, RDMA_CM_DEVICE_REMOVAL); 4262 if (state == RDMA_CM_DESTROYING) 4263 return 0; 4264 4265 cma_cancel_operation(id_priv, state); 4266 mutex_lock(&id_priv->handler_mutex); 4267 4268 /* Check for destruction from another callback. */ 4269 if (!cma_comp(id_priv, RDMA_CM_DEVICE_REMOVAL)) 4270 goto out; 4271 4272 memset(&event, 0, sizeof event); 4273 event.event = RDMA_CM_EVENT_DEVICE_REMOVAL; 4274 ret = id_priv->id.event_handler(&id_priv->id, &event); 4275 out: 4276 mutex_unlock(&id_priv->handler_mutex); 4277 return ret; 4278 } 4279 4280 static void cma_process_remove(struct cma_device *cma_dev) 4281 { 4282 struct rdma_id_private *id_priv; 4283 int ret; 4284 4285 mutex_lock(&lock); 4286 while (!list_empty(&cma_dev->id_list)) { 4287 id_priv = list_entry(cma_dev->id_list.next, 4288 struct rdma_id_private, list); 4289 4290 list_del(&id_priv->listen_list); 4291 list_del_init(&id_priv->list); 4292 atomic_inc(&id_priv->refcount); 4293 mutex_unlock(&lock); 4294 4295 ret = id_priv->internal_id ? 1 : cma_remove_id_dev(id_priv); 4296 cma_deref_id(id_priv); 4297 if (ret) 4298 rdma_destroy_id(&id_priv->id); 4299 4300 mutex_lock(&lock); 4301 } 4302 mutex_unlock(&lock); 4303 4304 cma_deref_dev(cma_dev); 4305 wait_for_completion(&cma_dev->comp); 4306 } 4307 4308 static void cma_remove_one(struct ib_device *device, void *client_data) 4309 { 4310 struct cma_device *cma_dev = client_data; 4311 4312 if (!cma_dev) 4313 return; 4314 4315 mutex_lock(&lock); 4316 list_del(&cma_dev->list); 4317 mutex_unlock(&lock); 4318 4319 cma_process_remove(cma_dev); 4320 kfree(cma_dev->default_gid_type); 4321 kfree(cma_dev); 4322 } 4323 4324 static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb) 4325 { 4326 struct nlmsghdr *nlh; 4327 struct rdma_cm_id_stats *id_stats; 4328 struct rdma_id_private *id_priv; 4329 struct rdma_cm_id *id = NULL; 4330 struct cma_device *cma_dev; 4331 int i_dev = 0, i_id = 0; 4332 4333 /* 4334 * We export all of the IDs as a sequence of messages. Each 4335 * ID gets its own netlink message. 4336 */ 4337 mutex_lock(&lock); 4338 4339 list_for_each_entry(cma_dev, &dev_list, list) { 4340 if (i_dev < cb->args[0]) { 4341 i_dev++; 4342 continue; 4343 } 4344 4345 i_id = 0; 4346 list_for_each_entry(id_priv, &cma_dev->id_list, list) { 4347 if (i_id < cb->args[1]) { 4348 i_id++; 4349 continue; 4350 } 4351 4352 id_stats = ibnl_put_msg(skb, &nlh, cb->nlh->nlmsg_seq, 4353 sizeof *id_stats, RDMA_NL_RDMA_CM, 4354 RDMA_NL_RDMA_CM_ID_STATS, 4355 NLM_F_MULTI); 4356 if (!id_stats) 4357 goto out; 4358 4359 memset(id_stats, 0, sizeof *id_stats); 4360 id = &id_priv->id; 4361 id_stats->node_type = id->route.addr.dev_addr.dev_type; 4362 id_stats->port_num = id->port_num; 4363 id_stats->bound_dev_if = 4364 id->route.addr.dev_addr.bound_dev_if; 4365 4366 if (ibnl_put_attr(skb, nlh, 4367 rdma_addr_size(cma_src_addr(id_priv)), 4368 cma_src_addr(id_priv), 4369 RDMA_NL_RDMA_CM_ATTR_SRC_ADDR)) 4370 goto out; 4371 if (ibnl_put_attr(skb, nlh, 4372 rdma_addr_size(cma_src_addr(id_priv)), 4373 cma_dst_addr(id_priv), 4374 RDMA_NL_RDMA_CM_ATTR_DST_ADDR)) 4375 goto out; 4376 4377 id_stats->pid = id_priv->owner; 4378 id_stats->port_space = id->ps; 4379 id_stats->cm_state = id_priv->state; 4380 id_stats->qp_num = id_priv->qp_num; 4381 id_stats->qp_type = id->qp_type; 4382 4383 i_id++; 4384 } 4385 4386 cb->args[1] = 0; 4387 i_dev++; 4388 } 4389 4390 out: 4391 mutex_unlock(&lock); 4392 cb->args[0] = i_dev; 4393 cb->args[1] = i_id; 4394 4395 return skb->len; 4396 } 4397 4398 static const struct ibnl_client_cbs cma_cb_table[] = { 4399 [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats, 4400 .module = THIS_MODULE }, 4401 }; 4402 4403 static int cma_init_net(struct net *net) 4404 { 4405 struct cma_pernet *pernet = cma_pernet(net); 4406 4407 idr_init(&pernet->tcp_ps); 4408 idr_init(&pernet->udp_ps); 4409 idr_init(&pernet->ipoib_ps); 4410 idr_init(&pernet->ib_ps); 4411 4412 return 0; 4413 } 4414 4415 static void cma_exit_net(struct net *net) 4416 { 4417 struct cma_pernet *pernet = cma_pernet(net); 4418 4419 idr_destroy(&pernet->tcp_ps); 4420 idr_destroy(&pernet->udp_ps); 4421 idr_destroy(&pernet->ipoib_ps); 4422 idr_destroy(&pernet->ib_ps); 4423 } 4424 4425 static struct pernet_operations cma_pernet_operations = { 4426 .init = cma_init_net, 4427 .exit = cma_exit_net, 4428 .id = &cma_pernet_id, 4429 .size = sizeof(struct cma_pernet), 4430 }; 4431 4432 static int __init cma_init(void) 4433 { 4434 int ret; 4435 4436 cma_wq = alloc_ordered_workqueue("rdma_cm", WQ_MEM_RECLAIM); 4437 if (!cma_wq) 4438 return -ENOMEM; 4439 4440 ret = register_pernet_subsys(&cma_pernet_operations); 4441 if (ret) 4442 goto err_wq; 4443 4444 ib_sa_register_client(&sa_client); 4445 rdma_addr_register_client(&addr_client); 4446 register_netdevice_notifier(&cma_nb); 4447 4448 ret = ib_register_client(&cma_client); 4449 if (ret) 4450 goto err; 4451 4452 if (ibnl_add_client(RDMA_NL_RDMA_CM, ARRAY_SIZE(cma_cb_table), 4453 cma_cb_table)) 4454 pr_warn("RDMA CMA: failed to add netlink callback\n"); 4455 cma_configfs_init(); 4456 4457 return 0; 4458 4459 err: 4460 unregister_netdevice_notifier(&cma_nb); 4461 rdma_addr_unregister_client(&addr_client); 4462 ib_sa_unregister_client(&sa_client); 4463 err_wq: 4464 destroy_workqueue(cma_wq); 4465 return ret; 4466 } 4467 4468 static void __exit cma_cleanup(void) 4469 { 4470 cma_configfs_exit(); 4471 ibnl_remove_client(RDMA_NL_RDMA_CM); 4472 ib_unregister_client(&cma_client); 4473 unregister_netdevice_notifier(&cma_nb); 4474 rdma_addr_unregister_client(&addr_client); 4475 ib_sa_unregister_client(&sa_client); 4476 unregister_pernet_subsys(&cma_pernet_operations); 4477 destroy_workqueue(cma_wq); 4478 } 4479 4480 module_init(cma_init); 4481 module_exit(cma_cleanup); 4482