1 /* 2 * Copyright (c) 2005 Voltaire Inc. All rights reserved. 3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved. 4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved. 5 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/completion.h> 37 #include <linux/in.h> 38 #include <linux/in6.h> 39 #include <linux/mutex.h> 40 #include <linux/random.h> 41 #include <linux/igmp.h> 42 #include <linux/idr.h> 43 #include <linux/inetdevice.h> 44 #include <linux/slab.h> 45 #include <linux/module.h> 46 #include <net/route.h> 47 48 #include <net/net_namespace.h> 49 #include <net/netns/generic.h> 50 #include <net/tcp.h> 51 #include <net/ipv6.h> 52 #include <net/ip_fib.h> 53 #include <net/ip6_route.h> 54 55 #include <rdma/rdma_cm.h> 56 #include <rdma/rdma_cm_ib.h> 57 #include <rdma/rdma_netlink.h> 58 #include <rdma/ib.h> 59 #include <rdma/ib_cache.h> 60 #include <rdma/ib_cm.h> 61 #include <rdma/ib_sa.h> 62 #include <rdma/iw_cm.h> 63 64 #include "core_priv.h" 65 66 MODULE_AUTHOR("Sean Hefty"); 67 MODULE_DESCRIPTION("Generic RDMA CM Agent"); 68 MODULE_LICENSE("Dual BSD/GPL"); 69 70 #define CMA_CM_RESPONSE_TIMEOUT 20 71 #define CMA_QUERY_CLASSPORT_INFO_TIMEOUT 3000 72 #define CMA_MAX_CM_RETRIES 15 73 #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24) 74 #define CMA_IBOE_PACKET_LIFETIME 18 75 76 static const char * const cma_events[] = { 77 [RDMA_CM_EVENT_ADDR_RESOLVED] = "address resolved", 78 [RDMA_CM_EVENT_ADDR_ERROR] = "address error", 79 [RDMA_CM_EVENT_ROUTE_RESOLVED] = "route resolved ", 80 [RDMA_CM_EVENT_ROUTE_ERROR] = "route error", 81 [RDMA_CM_EVENT_CONNECT_REQUEST] = "connect request", 82 [RDMA_CM_EVENT_CONNECT_RESPONSE] = "connect response", 83 [RDMA_CM_EVENT_CONNECT_ERROR] = "connect error", 84 [RDMA_CM_EVENT_UNREACHABLE] = "unreachable", 85 [RDMA_CM_EVENT_REJECTED] = "rejected", 86 [RDMA_CM_EVENT_ESTABLISHED] = "established", 87 [RDMA_CM_EVENT_DISCONNECTED] = "disconnected", 88 [RDMA_CM_EVENT_DEVICE_REMOVAL] = "device removal", 89 [RDMA_CM_EVENT_MULTICAST_JOIN] = "multicast join", 90 [RDMA_CM_EVENT_MULTICAST_ERROR] = "multicast error", 91 [RDMA_CM_EVENT_ADDR_CHANGE] = "address change", 92 [RDMA_CM_EVENT_TIMEWAIT_EXIT] = "timewait exit", 93 }; 94 95 const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event) 96 { 97 size_t index = event; 98 99 return (index < ARRAY_SIZE(cma_events) && cma_events[index]) ? 100 cma_events[index] : "unrecognized event"; 101 } 102 EXPORT_SYMBOL(rdma_event_msg); 103 104 const char *__attribute_const__ rdma_reject_msg(struct rdma_cm_id *id, 105 int reason) 106 { 107 if (rdma_ib_or_roce(id->device, id->port_num)) 108 return ibcm_reject_msg(reason); 109 110 if (rdma_protocol_iwarp(id->device, id->port_num)) 111 return iwcm_reject_msg(reason); 112 113 WARN_ON_ONCE(1); 114 return "unrecognized transport"; 115 } 116 EXPORT_SYMBOL(rdma_reject_msg); 117 118 bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason) 119 { 120 if (rdma_ib_or_roce(id->device, id->port_num)) 121 return reason == IB_CM_REJ_CONSUMER_DEFINED; 122 123 if (rdma_protocol_iwarp(id->device, id->port_num)) 124 return reason == -ECONNREFUSED; 125 126 WARN_ON_ONCE(1); 127 return false; 128 } 129 EXPORT_SYMBOL(rdma_is_consumer_reject); 130 131 const void *rdma_consumer_reject_data(struct rdma_cm_id *id, 132 struct rdma_cm_event *ev, u8 *data_len) 133 { 134 const void *p; 135 136 if (rdma_is_consumer_reject(id, ev->status)) { 137 *data_len = ev->param.conn.private_data_len; 138 p = ev->param.conn.private_data; 139 } else { 140 *data_len = 0; 141 p = NULL; 142 } 143 return p; 144 } 145 EXPORT_SYMBOL(rdma_consumer_reject_data); 146 147 static void cma_add_one(struct ib_device *device); 148 static void cma_remove_one(struct ib_device *device, void *client_data); 149 150 static struct ib_client cma_client = { 151 .name = "cma", 152 .add = cma_add_one, 153 .remove = cma_remove_one 154 }; 155 156 static struct ib_sa_client sa_client; 157 static struct rdma_addr_client addr_client; 158 static LIST_HEAD(dev_list); 159 static LIST_HEAD(listen_any_list); 160 static DEFINE_MUTEX(lock); 161 static struct workqueue_struct *cma_wq; 162 static unsigned int cma_pernet_id; 163 164 struct cma_pernet { 165 struct idr tcp_ps; 166 struct idr udp_ps; 167 struct idr ipoib_ps; 168 struct idr ib_ps; 169 }; 170 171 static struct cma_pernet *cma_pernet(struct net *net) 172 { 173 return net_generic(net, cma_pernet_id); 174 } 175 176 static struct idr *cma_pernet_idr(struct net *net, enum rdma_port_space ps) 177 { 178 struct cma_pernet *pernet = cma_pernet(net); 179 180 switch (ps) { 181 case RDMA_PS_TCP: 182 return &pernet->tcp_ps; 183 case RDMA_PS_UDP: 184 return &pernet->udp_ps; 185 case RDMA_PS_IPOIB: 186 return &pernet->ipoib_ps; 187 case RDMA_PS_IB: 188 return &pernet->ib_ps; 189 default: 190 return NULL; 191 } 192 } 193 194 struct cma_device { 195 struct list_head list; 196 struct ib_device *device; 197 struct completion comp; 198 atomic_t refcount; 199 struct list_head id_list; 200 enum ib_gid_type *default_gid_type; 201 }; 202 203 struct rdma_bind_list { 204 enum rdma_port_space ps; 205 struct hlist_head owners; 206 unsigned short port; 207 }; 208 209 struct class_port_info_context { 210 struct ib_class_port_info *class_port_info; 211 struct ib_device *device; 212 struct completion done; 213 struct ib_sa_query *sa_query; 214 u8 port_num; 215 }; 216 217 static int cma_ps_alloc(struct net *net, enum rdma_port_space ps, 218 struct rdma_bind_list *bind_list, int snum) 219 { 220 struct idr *idr = cma_pernet_idr(net, ps); 221 222 return idr_alloc(idr, bind_list, snum, snum + 1, GFP_KERNEL); 223 } 224 225 static struct rdma_bind_list *cma_ps_find(struct net *net, 226 enum rdma_port_space ps, int snum) 227 { 228 struct idr *idr = cma_pernet_idr(net, ps); 229 230 return idr_find(idr, snum); 231 } 232 233 static void cma_ps_remove(struct net *net, enum rdma_port_space ps, int snum) 234 { 235 struct idr *idr = cma_pernet_idr(net, ps); 236 237 idr_remove(idr, snum); 238 } 239 240 enum { 241 CMA_OPTION_AFONLY, 242 }; 243 244 void cma_ref_dev(struct cma_device *cma_dev) 245 { 246 atomic_inc(&cma_dev->refcount); 247 } 248 249 struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter, 250 void *cookie) 251 { 252 struct cma_device *cma_dev; 253 struct cma_device *found_cma_dev = NULL; 254 255 mutex_lock(&lock); 256 257 list_for_each_entry(cma_dev, &dev_list, list) 258 if (filter(cma_dev->device, cookie)) { 259 found_cma_dev = cma_dev; 260 break; 261 } 262 263 if (found_cma_dev) 264 cma_ref_dev(found_cma_dev); 265 mutex_unlock(&lock); 266 return found_cma_dev; 267 } 268 269 int cma_get_default_gid_type(struct cma_device *cma_dev, 270 unsigned int port) 271 { 272 if (port < rdma_start_port(cma_dev->device) || 273 port > rdma_end_port(cma_dev->device)) 274 return -EINVAL; 275 276 return cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)]; 277 } 278 279 int cma_set_default_gid_type(struct cma_device *cma_dev, 280 unsigned int port, 281 enum ib_gid_type default_gid_type) 282 { 283 unsigned long supported_gids; 284 285 if (port < rdma_start_port(cma_dev->device) || 286 port > rdma_end_port(cma_dev->device)) 287 return -EINVAL; 288 289 supported_gids = roce_gid_type_mask_support(cma_dev->device, port); 290 291 if (!(supported_gids & 1 << default_gid_type)) 292 return -EINVAL; 293 294 cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)] = 295 default_gid_type; 296 297 return 0; 298 } 299 300 struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev) 301 { 302 return cma_dev->device; 303 } 304 305 /* 306 * Device removal can occur at anytime, so we need extra handling to 307 * serialize notifying the user of device removal with other callbacks. 308 * We do this by disabling removal notification while a callback is in process, 309 * and reporting it after the callback completes. 310 */ 311 struct rdma_id_private { 312 struct rdma_cm_id id; 313 314 struct rdma_bind_list *bind_list; 315 struct hlist_node node; 316 struct list_head list; /* listen_any_list or cma_device.list */ 317 struct list_head listen_list; /* per device listens */ 318 struct cma_device *cma_dev; 319 struct list_head mc_list; 320 321 int internal_id; 322 enum rdma_cm_state state; 323 spinlock_t lock; 324 struct mutex qp_mutex; 325 326 struct completion comp; 327 atomic_t refcount; 328 struct mutex handler_mutex; 329 330 int backlog; 331 int timeout_ms; 332 struct ib_sa_query *query; 333 int query_id; 334 union { 335 struct ib_cm_id *ib; 336 struct iw_cm_id *iw; 337 } cm_id; 338 339 u32 seq_num; 340 u32 qkey; 341 u32 qp_num; 342 pid_t owner; 343 u32 options; 344 u8 srq; 345 u8 tos; 346 u8 reuseaddr; 347 u8 afonly; 348 enum ib_gid_type gid_type; 349 }; 350 351 struct cma_multicast { 352 struct rdma_id_private *id_priv; 353 union { 354 struct ib_sa_multicast *ib; 355 } multicast; 356 struct list_head list; 357 void *context; 358 struct sockaddr_storage addr; 359 struct kref mcref; 360 bool igmp_joined; 361 u8 join_state; 362 }; 363 364 struct cma_work { 365 struct work_struct work; 366 struct rdma_id_private *id; 367 enum rdma_cm_state old_state; 368 enum rdma_cm_state new_state; 369 struct rdma_cm_event event; 370 }; 371 372 struct cma_ndev_work { 373 struct work_struct work; 374 struct rdma_id_private *id; 375 struct rdma_cm_event event; 376 }; 377 378 struct iboe_mcast_work { 379 struct work_struct work; 380 struct rdma_id_private *id; 381 struct cma_multicast *mc; 382 }; 383 384 union cma_ip_addr { 385 struct in6_addr ip6; 386 struct { 387 __be32 pad[3]; 388 __be32 addr; 389 } ip4; 390 }; 391 392 struct cma_hdr { 393 u8 cma_version; 394 u8 ip_version; /* IP version: 7:4 */ 395 __be16 port; 396 union cma_ip_addr src_addr; 397 union cma_ip_addr dst_addr; 398 }; 399 400 #define CMA_VERSION 0x00 401 402 struct cma_req_info { 403 struct ib_device *device; 404 int port; 405 union ib_gid local_gid; 406 __be64 service_id; 407 u16 pkey; 408 bool has_gid:1; 409 }; 410 411 static int cma_comp(struct rdma_id_private *id_priv, enum rdma_cm_state comp) 412 { 413 unsigned long flags; 414 int ret; 415 416 spin_lock_irqsave(&id_priv->lock, flags); 417 ret = (id_priv->state == comp); 418 spin_unlock_irqrestore(&id_priv->lock, flags); 419 return ret; 420 } 421 422 static int cma_comp_exch(struct rdma_id_private *id_priv, 423 enum rdma_cm_state comp, enum rdma_cm_state exch) 424 { 425 unsigned long flags; 426 int ret; 427 428 spin_lock_irqsave(&id_priv->lock, flags); 429 if ((ret = (id_priv->state == comp))) 430 id_priv->state = exch; 431 spin_unlock_irqrestore(&id_priv->lock, flags); 432 return ret; 433 } 434 435 static enum rdma_cm_state cma_exch(struct rdma_id_private *id_priv, 436 enum rdma_cm_state exch) 437 { 438 unsigned long flags; 439 enum rdma_cm_state old; 440 441 spin_lock_irqsave(&id_priv->lock, flags); 442 old = id_priv->state; 443 id_priv->state = exch; 444 spin_unlock_irqrestore(&id_priv->lock, flags); 445 return old; 446 } 447 448 static inline u8 cma_get_ip_ver(const struct cma_hdr *hdr) 449 { 450 return hdr->ip_version >> 4; 451 } 452 453 static inline void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver) 454 { 455 hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF); 456 } 457 458 static int cma_igmp_send(struct net_device *ndev, union ib_gid *mgid, bool join) 459 { 460 struct in_device *in_dev = NULL; 461 462 if (ndev) { 463 rtnl_lock(); 464 in_dev = __in_dev_get_rtnl(ndev); 465 if (in_dev) { 466 if (join) 467 ip_mc_inc_group(in_dev, 468 *(__be32 *)(mgid->raw + 12)); 469 else 470 ip_mc_dec_group(in_dev, 471 *(__be32 *)(mgid->raw + 12)); 472 } 473 rtnl_unlock(); 474 } 475 return (in_dev) ? 0 : -ENODEV; 476 } 477 478 static void _cma_attach_to_dev(struct rdma_id_private *id_priv, 479 struct cma_device *cma_dev) 480 { 481 cma_ref_dev(cma_dev); 482 id_priv->cma_dev = cma_dev; 483 id_priv->gid_type = 0; 484 id_priv->id.device = cma_dev->device; 485 id_priv->id.route.addr.dev_addr.transport = 486 rdma_node_get_transport(cma_dev->device->node_type); 487 list_add_tail(&id_priv->list, &cma_dev->id_list); 488 } 489 490 static void cma_attach_to_dev(struct rdma_id_private *id_priv, 491 struct cma_device *cma_dev) 492 { 493 _cma_attach_to_dev(id_priv, cma_dev); 494 id_priv->gid_type = 495 cma_dev->default_gid_type[id_priv->id.port_num - 496 rdma_start_port(cma_dev->device)]; 497 } 498 499 void cma_deref_dev(struct cma_device *cma_dev) 500 { 501 if (atomic_dec_and_test(&cma_dev->refcount)) 502 complete(&cma_dev->comp); 503 } 504 505 static inline void release_mc(struct kref *kref) 506 { 507 struct cma_multicast *mc = container_of(kref, struct cma_multicast, mcref); 508 509 kfree(mc->multicast.ib); 510 kfree(mc); 511 } 512 513 static void cma_release_dev(struct rdma_id_private *id_priv) 514 { 515 mutex_lock(&lock); 516 list_del(&id_priv->list); 517 cma_deref_dev(id_priv->cma_dev); 518 id_priv->cma_dev = NULL; 519 mutex_unlock(&lock); 520 } 521 522 static inline struct sockaddr *cma_src_addr(struct rdma_id_private *id_priv) 523 { 524 return (struct sockaddr *) &id_priv->id.route.addr.src_addr; 525 } 526 527 static inline struct sockaddr *cma_dst_addr(struct rdma_id_private *id_priv) 528 { 529 return (struct sockaddr *) &id_priv->id.route.addr.dst_addr; 530 } 531 532 static inline unsigned short cma_family(struct rdma_id_private *id_priv) 533 { 534 return id_priv->id.route.addr.src_addr.ss_family; 535 } 536 537 static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey) 538 { 539 struct ib_sa_mcmember_rec rec; 540 int ret = 0; 541 542 if (id_priv->qkey) { 543 if (qkey && id_priv->qkey != qkey) 544 return -EINVAL; 545 return 0; 546 } 547 548 if (qkey) { 549 id_priv->qkey = qkey; 550 return 0; 551 } 552 553 switch (id_priv->id.ps) { 554 case RDMA_PS_UDP: 555 case RDMA_PS_IB: 556 id_priv->qkey = RDMA_UDP_QKEY; 557 break; 558 case RDMA_PS_IPOIB: 559 ib_addr_get_mgid(&id_priv->id.route.addr.dev_addr, &rec.mgid); 560 ret = ib_sa_get_mcmember_rec(id_priv->id.device, 561 id_priv->id.port_num, &rec.mgid, 562 &rec); 563 if (!ret) 564 id_priv->qkey = be32_to_cpu(rec.qkey); 565 break; 566 default: 567 break; 568 } 569 return ret; 570 } 571 572 static void cma_translate_ib(struct sockaddr_ib *sib, struct rdma_dev_addr *dev_addr) 573 { 574 dev_addr->dev_type = ARPHRD_INFINIBAND; 575 rdma_addr_set_sgid(dev_addr, (union ib_gid *) &sib->sib_addr); 576 ib_addr_set_pkey(dev_addr, ntohs(sib->sib_pkey)); 577 } 578 579 static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_addr) 580 { 581 int ret; 582 583 if (addr->sa_family != AF_IB) { 584 ret = rdma_translate_ip(addr, dev_addr, NULL); 585 } else { 586 cma_translate_ib((struct sockaddr_ib *) addr, dev_addr); 587 ret = 0; 588 } 589 590 return ret; 591 } 592 593 static inline int cma_validate_port(struct ib_device *device, u8 port, 594 enum ib_gid_type gid_type, 595 union ib_gid *gid, int dev_type, 596 int bound_if_index) 597 { 598 int ret = -ENODEV; 599 struct net_device *ndev = NULL; 600 601 if ((dev_type == ARPHRD_INFINIBAND) && !rdma_protocol_ib(device, port)) 602 return ret; 603 604 if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port)) 605 return ret; 606 607 if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port)) { 608 ndev = dev_get_by_index(&init_net, bound_if_index); 609 if (ndev && ndev->flags & IFF_LOOPBACK) { 610 pr_info("detected loopback device\n"); 611 dev_put(ndev); 612 613 if (!device->get_netdev) 614 return -EOPNOTSUPP; 615 616 ndev = device->get_netdev(device, port); 617 if (!ndev) 618 return -ENODEV; 619 } 620 } else { 621 gid_type = IB_GID_TYPE_IB; 622 } 623 624 ret = ib_find_cached_gid_by_port(device, gid, gid_type, port, 625 ndev, NULL); 626 627 if (ndev) 628 dev_put(ndev); 629 630 return ret; 631 } 632 633 static int cma_acquire_dev(struct rdma_id_private *id_priv, 634 struct rdma_id_private *listen_id_priv) 635 { 636 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 637 struct cma_device *cma_dev; 638 union ib_gid gid, iboe_gid, *gidp; 639 int ret = -ENODEV; 640 u8 port; 641 642 if (dev_addr->dev_type != ARPHRD_INFINIBAND && 643 id_priv->id.ps == RDMA_PS_IPOIB) 644 return -EINVAL; 645 646 mutex_lock(&lock); 647 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, 648 &iboe_gid); 649 650 memcpy(&gid, dev_addr->src_dev_addr + 651 rdma_addr_gid_offset(dev_addr), sizeof gid); 652 653 if (listen_id_priv) { 654 cma_dev = listen_id_priv->cma_dev; 655 port = listen_id_priv->id.port_num; 656 gidp = rdma_protocol_roce(cma_dev->device, port) ? 657 &iboe_gid : &gid; 658 659 ret = cma_validate_port(cma_dev->device, port, 660 rdma_protocol_ib(cma_dev->device, port) ? 661 IB_GID_TYPE_IB : 662 listen_id_priv->gid_type, gidp, 663 dev_addr->dev_type, 664 dev_addr->bound_dev_if); 665 if (!ret) { 666 id_priv->id.port_num = port; 667 goto out; 668 } 669 } 670 671 list_for_each_entry(cma_dev, &dev_list, list) { 672 for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) { 673 if (listen_id_priv && 674 listen_id_priv->cma_dev == cma_dev && 675 listen_id_priv->id.port_num == port) 676 continue; 677 678 gidp = rdma_protocol_roce(cma_dev->device, port) ? 679 &iboe_gid : &gid; 680 681 ret = cma_validate_port(cma_dev->device, port, 682 rdma_protocol_ib(cma_dev->device, port) ? 683 IB_GID_TYPE_IB : 684 cma_dev->default_gid_type[port - 1], 685 gidp, dev_addr->dev_type, 686 dev_addr->bound_dev_if); 687 if (!ret) { 688 id_priv->id.port_num = port; 689 goto out; 690 } 691 } 692 } 693 694 out: 695 if (!ret) 696 cma_attach_to_dev(id_priv, cma_dev); 697 698 mutex_unlock(&lock); 699 return ret; 700 } 701 702 /* 703 * Select the source IB device and address to reach the destination IB address. 704 */ 705 static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) 706 { 707 struct cma_device *cma_dev, *cur_dev; 708 struct sockaddr_ib *addr; 709 union ib_gid gid, sgid, *dgid; 710 u16 pkey, index; 711 u8 p; 712 int i; 713 714 cma_dev = NULL; 715 addr = (struct sockaddr_ib *) cma_dst_addr(id_priv); 716 dgid = (union ib_gid *) &addr->sib_addr; 717 pkey = ntohs(addr->sib_pkey); 718 719 list_for_each_entry(cur_dev, &dev_list, list) { 720 for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) { 721 if (!rdma_cap_af_ib(cur_dev->device, p)) 722 continue; 723 724 if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index)) 725 continue; 726 727 for (i = 0; !ib_get_cached_gid(cur_dev->device, p, i, 728 &gid, NULL); 729 i++) { 730 if (!memcmp(&gid, dgid, sizeof(gid))) { 731 cma_dev = cur_dev; 732 sgid = gid; 733 id_priv->id.port_num = p; 734 goto found; 735 } 736 737 if (!cma_dev && (gid.global.subnet_prefix == 738 dgid->global.subnet_prefix)) { 739 cma_dev = cur_dev; 740 sgid = gid; 741 id_priv->id.port_num = p; 742 } 743 } 744 } 745 } 746 747 if (!cma_dev) 748 return -ENODEV; 749 750 found: 751 cma_attach_to_dev(id_priv, cma_dev); 752 addr = (struct sockaddr_ib *) cma_src_addr(id_priv); 753 memcpy(&addr->sib_addr, &sgid, sizeof sgid); 754 cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr); 755 return 0; 756 } 757 758 static void cma_deref_id(struct rdma_id_private *id_priv) 759 { 760 if (atomic_dec_and_test(&id_priv->refcount)) 761 complete(&id_priv->comp); 762 } 763 764 struct rdma_cm_id *rdma_create_id(struct net *net, 765 rdma_cm_event_handler event_handler, 766 void *context, enum rdma_port_space ps, 767 enum ib_qp_type qp_type) 768 { 769 struct rdma_id_private *id_priv; 770 771 id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL); 772 if (!id_priv) 773 return ERR_PTR(-ENOMEM); 774 775 id_priv->owner = task_pid_nr(current); 776 id_priv->state = RDMA_CM_IDLE; 777 id_priv->id.context = context; 778 id_priv->id.event_handler = event_handler; 779 id_priv->id.ps = ps; 780 id_priv->id.qp_type = qp_type; 781 spin_lock_init(&id_priv->lock); 782 mutex_init(&id_priv->qp_mutex); 783 init_completion(&id_priv->comp); 784 atomic_set(&id_priv->refcount, 1); 785 mutex_init(&id_priv->handler_mutex); 786 INIT_LIST_HEAD(&id_priv->listen_list); 787 INIT_LIST_HEAD(&id_priv->mc_list); 788 get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num); 789 id_priv->id.route.addr.dev_addr.net = get_net(net); 790 791 return &id_priv->id; 792 } 793 EXPORT_SYMBOL(rdma_create_id); 794 795 static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) 796 { 797 struct ib_qp_attr qp_attr; 798 int qp_attr_mask, ret; 799 800 qp_attr.qp_state = IB_QPS_INIT; 801 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 802 if (ret) 803 return ret; 804 805 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); 806 if (ret) 807 return ret; 808 809 qp_attr.qp_state = IB_QPS_RTR; 810 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE); 811 if (ret) 812 return ret; 813 814 qp_attr.qp_state = IB_QPS_RTS; 815 qp_attr.sq_psn = 0; 816 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN); 817 818 return ret; 819 } 820 821 static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) 822 { 823 struct ib_qp_attr qp_attr; 824 int qp_attr_mask, ret; 825 826 qp_attr.qp_state = IB_QPS_INIT; 827 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 828 if (ret) 829 return ret; 830 831 return ib_modify_qp(qp, &qp_attr, qp_attr_mask); 832 } 833 834 int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd, 835 struct ib_qp_init_attr *qp_init_attr) 836 { 837 struct rdma_id_private *id_priv; 838 struct ib_qp *qp; 839 int ret; 840 841 id_priv = container_of(id, struct rdma_id_private, id); 842 if (id->device != pd->device) 843 return -EINVAL; 844 845 qp_init_attr->port_num = id->port_num; 846 qp = ib_create_qp(pd, qp_init_attr); 847 if (IS_ERR(qp)) 848 return PTR_ERR(qp); 849 850 if (id->qp_type == IB_QPT_UD) 851 ret = cma_init_ud_qp(id_priv, qp); 852 else 853 ret = cma_init_conn_qp(id_priv, qp); 854 if (ret) 855 goto err; 856 857 id->qp = qp; 858 id_priv->qp_num = qp->qp_num; 859 id_priv->srq = (qp->srq != NULL); 860 return 0; 861 err: 862 ib_destroy_qp(qp); 863 return ret; 864 } 865 EXPORT_SYMBOL(rdma_create_qp); 866 867 void rdma_destroy_qp(struct rdma_cm_id *id) 868 { 869 struct rdma_id_private *id_priv; 870 871 id_priv = container_of(id, struct rdma_id_private, id); 872 mutex_lock(&id_priv->qp_mutex); 873 ib_destroy_qp(id_priv->id.qp); 874 id_priv->id.qp = NULL; 875 mutex_unlock(&id_priv->qp_mutex); 876 } 877 EXPORT_SYMBOL(rdma_destroy_qp); 878 879 static int cma_modify_qp_rtr(struct rdma_id_private *id_priv, 880 struct rdma_conn_param *conn_param) 881 { 882 struct ib_qp_attr qp_attr; 883 int qp_attr_mask, ret; 884 union ib_gid sgid; 885 886 mutex_lock(&id_priv->qp_mutex); 887 if (!id_priv->id.qp) { 888 ret = 0; 889 goto out; 890 } 891 892 /* Need to update QP attributes from default values. */ 893 qp_attr.qp_state = IB_QPS_INIT; 894 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 895 if (ret) 896 goto out; 897 898 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 899 if (ret) 900 goto out; 901 902 qp_attr.qp_state = IB_QPS_RTR; 903 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 904 if (ret) 905 goto out; 906 907 ret = ib_query_gid(id_priv->id.device, id_priv->id.port_num, 908 qp_attr.ah_attr.grh.sgid_index, &sgid, NULL); 909 if (ret) 910 goto out; 911 912 BUG_ON(id_priv->cma_dev->device != id_priv->id.device); 913 914 if (conn_param) 915 qp_attr.max_dest_rd_atomic = conn_param->responder_resources; 916 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 917 out: 918 mutex_unlock(&id_priv->qp_mutex); 919 return ret; 920 } 921 922 static int cma_modify_qp_rts(struct rdma_id_private *id_priv, 923 struct rdma_conn_param *conn_param) 924 { 925 struct ib_qp_attr qp_attr; 926 int qp_attr_mask, ret; 927 928 mutex_lock(&id_priv->qp_mutex); 929 if (!id_priv->id.qp) { 930 ret = 0; 931 goto out; 932 } 933 934 qp_attr.qp_state = IB_QPS_RTS; 935 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 936 if (ret) 937 goto out; 938 939 if (conn_param) 940 qp_attr.max_rd_atomic = conn_param->initiator_depth; 941 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 942 out: 943 mutex_unlock(&id_priv->qp_mutex); 944 return ret; 945 } 946 947 static int cma_modify_qp_err(struct rdma_id_private *id_priv) 948 { 949 struct ib_qp_attr qp_attr; 950 int ret; 951 952 mutex_lock(&id_priv->qp_mutex); 953 if (!id_priv->id.qp) { 954 ret = 0; 955 goto out; 956 } 957 958 qp_attr.qp_state = IB_QPS_ERR; 959 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE); 960 out: 961 mutex_unlock(&id_priv->qp_mutex); 962 return ret; 963 } 964 965 static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv, 966 struct ib_qp_attr *qp_attr, int *qp_attr_mask) 967 { 968 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 969 int ret; 970 u16 pkey; 971 972 if (rdma_cap_eth_ah(id_priv->id.device, id_priv->id.port_num)) 973 pkey = 0xffff; 974 else 975 pkey = ib_addr_get_pkey(dev_addr); 976 977 ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num, 978 pkey, &qp_attr->pkey_index); 979 if (ret) 980 return ret; 981 982 qp_attr->port_num = id_priv->id.port_num; 983 *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT; 984 985 if (id_priv->id.qp_type == IB_QPT_UD) { 986 ret = cma_set_qkey(id_priv, 0); 987 if (ret) 988 return ret; 989 990 qp_attr->qkey = id_priv->qkey; 991 *qp_attr_mask |= IB_QP_QKEY; 992 } else { 993 qp_attr->qp_access_flags = 0; 994 *qp_attr_mask |= IB_QP_ACCESS_FLAGS; 995 } 996 return 0; 997 } 998 999 int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, 1000 int *qp_attr_mask) 1001 { 1002 struct rdma_id_private *id_priv; 1003 int ret = 0; 1004 1005 id_priv = container_of(id, struct rdma_id_private, id); 1006 if (rdma_cap_ib_cm(id->device, id->port_num)) { 1007 if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD)) 1008 ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask); 1009 else 1010 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, 1011 qp_attr_mask); 1012 1013 if (qp_attr->qp_state == IB_QPS_RTR) 1014 qp_attr->rq_psn = id_priv->seq_num; 1015 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { 1016 if (!id_priv->cm_id.iw) { 1017 qp_attr->qp_access_flags = 0; 1018 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS; 1019 } else 1020 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, 1021 qp_attr_mask); 1022 } else 1023 ret = -ENOSYS; 1024 1025 return ret; 1026 } 1027 EXPORT_SYMBOL(rdma_init_qp_attr); 1028 1029 static inline int cma_zero_addr(struct sockaddr *addr) 1030 { 1031 switch (addr->sa_family) { 1032 case AF_INET: 1033 return ipv4_is_zeronet(((struct sockaddr_in *)addr)->sin_addr.s_addr); 1034 case AF_INET6: 1035 return ipv6_addr_any(&((struct sockaddr_in6 *) addr)->sin6_addr); 1036 case AF_IB: 1037 return ib_addr_any(&((struct sockaddr_ib *) addr)->sib_addr); 1038 default: 1039 return 0; 1040 } 1041 } 1042 1043 static inline int cma_loopback_addr(struct sockaddr *addr) 1044 { 1045 switch (addr->sa_family) { 1046 case AF_INET: 1047 return ipv4_is_loopback(((struct sockaddr_in *) addr)->sin_addr.s_addr); 1048 case AF_INET6: 1049 return ipv6_addr_loopback(&((struct sockaddr_in6 *) addr)->sin6_addr); 1050 case AF_IB: 1051 return ib_addr_loopback(&((struct sockaddr_ib *) addr)->sib_addr); 1052 default: 1053 return 0; 1054 } 1055 } 1056 1057 static inline int cma_any_addr(struct sockaddr *addr) 1058 { 1059 return cma_zero_addr(addr) || cma_loopback_addr(addr); 1060 } 1061 1062 static int cma_addr_cmp(struct sockaddr *src, struct sockaddr *dst) 1063 { 1064 if (src->sa_family != dst->sa_family) 1065 return -1; 1066 1067 switch (src->sa_family) { 1068 case AF_INET: 1069 return ((struct sockaddr_in *) src)->sin_addr.s_addr != 1070 ((struct sockaddr_in *) dst)->sin_addr.s_addr; 1071 case AF_INET6: 1072 return ipv6_addr_cmp(&((struct sockaddr_in6 *) src)->sin6_addr, 1073 &((struct sockaddr_in6 *) dst)->sin6_addr); 1074 default: 1075 return ib_addr_cmp(&((struct sockaddr_ib *) src)->sib_addr, 1076 &((struct sockaddr_ib *) dst)->sib_addr); 1077 } 1078 } 1079 1080 static __be16 cma_port(struct sockaddr *addr) 1081 { 1082 struct sockaddr_ib *sib; 1083 1084 switch (addr->sa_family) { 1085 case AF_INET: 1086 return ((struct sockaddr_in *) addr)->sin_port; 1087 case AF_INET6: 1088 return ((struct sockaddr_in6 *) addr)->sin6_port; 1089 case AF_IB: 1090 sib = (struct sockaddr_ib *) addr; 1091 return htons((u16) (be64_to_cpu(sib->sib_sid) & 1092 be64_to_cpu(sib->sib_sid_mask))); 1093 default: 1094 return 0; 1095 } 1096 } 1097 1098 static inline int cma_any_port(struct sockaddr *addr) 1099 { 1100 return !cma_port(addr); 1101 } 1102 1103 static void cma_save_ib_info(struct sockaddr *src_addr, 1104 struct sockaddr *dst_addr, 1105 struct rdma_cm_id *listen_id, 1106 struct ib_sa_path_rec *path) 1107 { 1108 struct sockaddr_ib *listen_ib, *ib; 1109 1110 listen_ib = (struct sockaddr_ib *) &listen_id->route.addr.src_addr; 1111 if (src_addr) { 1112 ib = (struct sockaddr_ib *)src_addr; 1113 ib->sib_family = AF_IB; 1114 if (path) { 1115 ib->sib_pkey = path->pkey; 1116 ib->sib_flowinfo = path->flow_label; 1117 memcpy(&ib->sib_addr, &path->sgid, 16); 1118 ib->sib_sid = path->service_id; 1119 ib->sib_scope_id = 0; 1120 } else { 1121 ib->sib_pkey = listen_ib->sib_pkey; 1122 ib->sib_flowinfo = listen_ib->sib_flowinfo; 1123 ib->sib_addr = listen_ib->sib_addr; 1124 ib->sib_sid = listen_ib->sib_sid; 1125 ib->sib_scope_id = listen_ib->sib_scope_id; 1126 } 1127 ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL); 1128 } 1129 if (dst_addr) { 1130 ib = (struct sockaddr_ib *)dst_addr; 1131 ib->sib_family = AF_IB; 1132 if (path) { 1133 ib->sib_pkey = path->pkey; 1134 ib->sib_flowinfo = path->flow_label; 1135 memcpy(&ib->sib_addr, &path->dgid, 16); 1136 } 1137 } 1138 } 1139 1140 static void cma_save_ip4_info(struct sockaddr_in *src_addr, 1141 struct sockaddr_in *dst_addr, 1142 struct cma_hdr *hdr, 1143 __be16 local_port) 1144 { 1145 if (src_addr) { 1146 *src_addr = (struct sockaddr_in) { 1147 .sin_family = AF_INET, 1148 .sin_addr.s_addr = hdr->dst_addr.ip4.addr, 1149 .sin_port = local_port, 1150 }; 1151 } 1152 1153 if (dst_addr) { 1154 *dst_addr = (struct sockaddr_in) { 1155 .sin_family = AF_INET, 1156 .sin_addr.s_addr = hdr->src_addr.ip4.addr, 1157 .sin_port = hdr->port, 1158 }; 1159 } 1160 } 1161 1162 static void cma_save_ip6_info(struct sockaddr_in6 *src_addr, 1163 struct sockaddr_in6 *dst_addr, 1164 struct cma_hdr *hdr, 1165 __be16 local_port) 1166 { 1167 if (src_addr) { 1168 *src_addr = (struct sockaddr_in6) { 1169 .sin6_family = AF_INET6, 1170 .sin6_addr = hdr->dst_addr.ip6, 1171 .sin6_port = local_port, 1172 }; 1173 } 1174 1175 if (dst_addr) { 1176 *dst_addr = (struct sockaddr_in6) { 1177 .sin6_family = AF_INET6, 1178 .sin6_addr = hdr->src_addr.ip6, 1179 .sin6_port = hdr->port, 1180 }; 1181 } 1182 } 1183 1184 static u16 cma_port_from_service_id(__be64 service_id) 1185 { 1186 return (u16)be64_to_cpu(service_id); 1187 } 1188 1189 static int cma_save_ip_info(struct sockaddr *src_addr, 1190 struct sockaddr *dst_addr, 1191 struct ib_cm_event *ib_event, 1192 __be64 service_id) 1193 { 1194 struct cma_hdr *hdr; 1195 __be16 port; 1196 1197 hdr = ib_event->private_data; 1198 if (hdr->cma_version != CMA_VERSION) 1199 return -EINVAL; 1200 1201 port = htons(cma_port_from_service_id(service_id)); 1202 1203 switch (cma_get_ip_ver(hdr)) { 1204 case 4: 1205 cma_save_ip4_info((struct sockaddr_in *)src_addr, 1206 (struct sockaddr_in *)dst_addr, hdr, port); 1207 break; 1208 case 6: 1209 cma_save_ip6_info((struct sockaddr_in6 *)src_addr, 1210 (struct sockaddr_in6 *)dst_addr, hdr, port); 1211 break; 1212 default: 1213 return -EAFNOSUPPORT; 1214 } 1215 1216 return 0; 1217 } 1218 1219 static int cma_save_net_info(struct sockaddr *src_addr, 1220 struct sockaddr *dst_addr, 1221 struct rdma_cm_id *listen_id, 1222 struct ib_cm_event *ib_event, 1223 sa_family_t sa_family, __be64 service_id) 1224 { 1225 if (sa_family == AF_IB) { 1226 if (ib_event->event == IB_CM_REQ_RECEIVED) 1227 cma_save_ib_info(src_addr, dst_addr, listen_id, 1228 ib_event->param.req_rcvd.primary_path); 1229 else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) 1230 cma_save_ib_info(src_addr, dst_addr, listen_id, NULL); 1231 return 0; 1232 } 1233 1234 return cma_save_ip_info(src_addr, dst_addr, ib_event, service_id); 1235 } 1236 1237 static int cma_save_req_info(const struct ib_cm_event *ib_event, 1238 struct cma_req_info *req) 1239 { 1240 const struct ib_cm_req_event_param *req_param = 1241 &ib_event->param.req_rcvd; 1242 const struct ib_cm_sidr_req_event_param *sidr_param = 1243 &ib_event->param.sidr_req_rcvd; 1244 1245 switch (ib_event->event) { 1246 case IB_CM_REQ_RECEIVED: 1247 req->device = req_param->listen_id->device; 1248 req->port = req_param->port; 1249 memcpy(&req->local_gid, &req_param->primary_path->sgid, 1250 sizeof(req->local_gid)); 1251 req->has_gid = true; 1252 req->service_id = req_param->primary_path->service_id; 1253 req->pkey = be16_to_cpu(req_param->primary_path->pkey); 1254 if (req->pkey != req_param->bth_pkey) 1255 pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n" 1256 "RDMA CMA: in the future this may cause the request to be dropped\n", 1257 req_param->bth_pkey, req->pkey); 1258 break; 1259 case IB_CM_SIDR_REQ_RECEIVED: 1260 req->device = sidr_param->listen_id->device; 1261 req->port = sidr_param->port; 1262 req->has_gid = false; 1263 req->service_id = sidr_param->service_id; 1264 req->pkey = sidr_param->pkey; 1265 if (req->pkey != sidr_param->bth_pkey) 1266 pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and SIDR request payload P_Key (0x%x)\n" 1267 "RDMA CMA: in the future this may cause the request to be dropped\n", 1268 sidr_param->bth_pkey, req->pkey); 1269 break; 1270 default: 1271 return -EINVAL; 1272 } 1273 1274 return 0; 1275 } 1276 1277 static bool validate_ipv4_net_dev(struct net_device *net_dev, 1278 const struct sockaddr_in *dst_addr, 1279 const struct sockaddr_in *src_addr) 1280 { 1281 __be32 daddr = dst_addr->sin_addr.s_addr, 1282 saddr = src_addr->sin_addr.s_addr; 1283 struct fib_result res; 1284 struct flowi4 fl4; 1285 int err; 1286 bool ret; 1287 1288 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) || 1289 ipv4_is_lbcast(daddr) || ipv4_is_zeronet(saddr) || 1290 ipv4_is_zeronet(daddr) || ipv4_is_loopback(daddr) || 1291 ipv4_is_loopback(saddr)) 1292 return false; 1293 1294 memset(&fl4, 0, sizeof(fl4)); 1295 fl4.flowi4_iif = net_dev->ifindex; 1296 fl4.daddr = daddr; 1297 fl4.saddr = saddr; 1298 1299 rcu_read_lock(); 1300 err = fib_lookup(dev_net(net_dev), &fl4, &res, 0); 1301 ret = err == 0 && FIB_RES_DEV(res) == net_dev; 1302 rcu_read_unlock(); 1303 1304 return ret; 1305 } 1306 1307 static bool validate_ipv6_net_dev(struct net_device *net_dev, 1308 const struct sockaddr_in6 *dst_addr, 1309 const struct sockaddr_in6 *src_addr) 1310 { 1311 #if IS_ENABLED(CONFIG_IPV6) 1312 const int strict = ipv6_addr_type(&dst_addr->sin6_addr) & 1313 IPV6_ADDR_LINKLOCAL; 1314 struct rt6_info *rt = rt6_lookup(dev_net(net_dev), &dst_addr->sin6_addr, 1315 &src_addr->sin6_addr, net_dev->ifindex, 1316 strict); 1317 bool ret; 1318 1319 if (!rt) 1320 return false; 1321 1322 ret = rt->rt6i_idev->dev == net_dev; 1323 ip6_rt_put(rt); 1324 1325 return ret; 1326 #else 1327 return false; 1328 #endif 1329 } 1330 1331 static bool validate_net_dev(struct net_device *net_dev, 1332 const struct sockaddr *daddr, 1333 const struct sockaddr *saddr) 1334 { 1335 const struct sockaddr_in *daddr4 = (const struct sockaddr_in *)daddr; 1336 const struct sockaddr_in *saddr4 = (const struct sockaddr_in *)saddr; 1337 const struct sockaddr_in6 *daddr6 = (const struct sockaddr_in6 *)daddr; 1338 const struct sockaddr_in6 *saddr6 = (const struct sockaddr_in6 *)saddr; 1339 1340 switch (daddr->sa_family) { 1341 case AF_INET: 1342 return saddr->sa_family == AF_INET && 1343 validate_ipv4_net_dev(net_dev, daddr4, saddr4); 1344 1345 case AF_INET6: 1346 return saddr->sa_family == AF_INET6 && 1347 validate_ipv6_net_dev(net_dev, daddr6, saddr6); 1348 1349 default: 1350 return false; 1351 } 1352 } 1353 1354 static struct net_device *cma_get_net_dev(struct ib_cm_event *ib_event, 1355 const struct cma_req_info *req) 1356 { 1357 struct sockaddr_storage listen_addr_storage, src_addr_storage; 1358 struct sockaddr *listen_addr = (struct sockaddr *)&listen_addr_storage, 1359 *src_addr = (struct sockaddr *)&src_addr_storage; 1360 struct net_device *net_dev; 1361 const union ib_gid *gid = req->has_gid ? &req->local_gid : NULL; 1362 int err; 1363 1364 err = cma_save_ip_info(listen_addr, src_addr, ib_event, 1365 req->service_id); 1366 if (err) 1367 return ERR_PTR(err); 1368 1369 net_dev = ib_get_net_dev_by_params(req->device, req->port, req->pkey, 1370 gid, listen_addr); 1371 if (!net_dev) 1372 return ERR_PTR(-ENODEV); 1373 1374 if (!validate_net_dev(net_dev, listen_addr, src_addr)) { 1375 dev_put(net_dev); 1376 return ERR_PTR(-EHOSTUNREACH); 1377 } 1378 1379 return net_dev; 1380 } 1381 1382 static enum rdma_port_space rdma_ps_from_service_id(__be64 service_id) 1383 { 1384 return (be64_to_cpu(service_id) >> 16) & 0xffff; 1385 } 1386 1387 static bool cma_match_private_data(struct rdma_id_private *id_priv, 1388 const struct cma_hdr *hdr) 1389 { 1390 struct sockaddr *addr = cma_src_addr(id_priv); 1391 __be32 ip4_addr; 1392 struct in6_addr ip6_addr; 1393 1394 if (cma_any_addr(addr) && !id_priv->afonly) 1395 return true; 1396 1397 switch (addr->sa_family) { 1398 case AF_INET: 1399 ip4_addr = ((struct sockaddr_in *)addr)->sin_addr.s_addr; 1400 if (cma_get_ip_ver(hdr) != 4) 1401 return false; 1402 if (!cma_any_addr(addr) && 1403 hdr->dst_addr.ip4.addr != ip4_addr) 1404 return false; 1405 break; 1406 case AF_INET6: 1407 ip6_addr = ((struct sockaddr_in6 *)addr)->sin6_addr; 1408 if (cma_get_ip_ver(hdr) != 6) 1409 return false; 1410 if (!cma_any_addr(addr) && 1411 memcmp(&hdr->dst_addr.ip6, &ip6_addr, sizeof(ip6_addr))) 1412 return false; 1413 break; 1414 case AF_IB: 1415 return true; 1416 default: 1417 return false; 1418 } 1419 1420 return true; 1421 } 1422 1423 static bool cma_protocol_roce_dev_port(struct ib_device *device, int port_num) 1424 { 1425 enum rdma_link_layer ll = rdma_port_get_link_layer(device, port_num); 1426 enum rdma_transport_type transport = 1427 rdma_node_get_transport(device->node_type); 1428 1429 return ll == IB_LINK_LAYER_ETHERNET && transport == RDMA_TRANSPORT_IB; 1430 } 1431 1432 static bool cma_protocol_roce(const struct rdma_cm_id *id) 1433 { 1434 struct ib_device *device = id->device; 1435 const int port_num = id->port_num ?: rdma_start_port(device); 1436 1437 return cma_protocol_roce_dev_port(device, port_num); 1438 } 1439 1440 static bool cma_match_net_dev(const struct rdma_cm_id *id, 1441 const struct net_device *net_dev, 1442 u8 port_num) 1443 { 1444 const struct rdma_addr *addr = &id->route.addr; 1445 1446 if (!net_dev) 1447 /* This request is an AF_IB request or a RoCE request */ 1448 return (!id->port_num || id->port_num == port_num) && 1449 (addr->src_addr.ss_family == AF_IB || 1450 cma_protocol_roce_dev_port(id->device, port_num)); 1451 1452 return !addr->dev_addr.bound_dev_if || 1453 (net_eq(dev_net(net_dev), addr->dev_addr.net) && 1454 addr->dev_addr.bound_dev_if == net_dev->ifindex); 1455 } 1456 1457 static struct rdma_id_private *cma_find_listener( 1458 const struct rdma_bind_list *bind_list, 1459 const struct ib_cm_id *cm_id, 1460 const struct ib_cm_event *ib_event, 1461 const struct cma_req_info *req, 1462 const struct net_device *net_dev) 1463 { 1464 struct rdma_id_private *id_priv, *id_priv_dev; 1465 1466 if (!bind_list) 1467 return ERR_PTR(-EINVAL); 1468 1469 hlist_for_each_entry(id_priv, &bind_list->owners, node) { 1470 if (cma_match_private_data(id_priv, ib_event->private_data)) { 1471 if (id_priv->id.device == cm_id->device && 1472 cma_match_net_dev(&id_priv->id, net_dev, req->port)) 1473 return id_priv; 1474 list_for_each_entry(id_priv_dev, 1475 &id_priv->listen_list, 1476 listen_list) { 1477 if (id_priv_dev->id.device == cm_id->device && 1478 cma_match_net_dev(&id_priv_dev->id, net_dev, req->port)) 1479 return id_priv_dev; 1480 } 1481 } 1482 } 1483 1484 return ERR_PTR(-EINVAL); 1485 } 1486 1487 static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id, 1488 struct ib_cm_event *ib_event, 1489 struct net_device **net_dev) 1490 { 1491 struct cma_req_info req; 1492 struct rdma_bind_list *bind_list; 1493 struct rdma_id_private *id_priv; 1494 int err; 1495 1496 err = cma_save_req_info(ib_event, &req); 1497 if (err) 1498 return ERR_PTR(err); 1499 1500 *net_dev = cma_get_net_dev(ib_event, &req); 1501 if (IS_ERR(*net_dev)) { 1502 if (PTR_ERR(*net_dev) == -EAFNOSUPPORT) { 1503 /* Assuming the protocol is AF_IB */ 1504 *net_dev = NULL; 1505 } else if (cma_protocol_roce_dev_port(req.device, req.port)) { 1506 /* TODO find the net dev matching the request parameters 1507 * through the RoCE GID table */ 1508 *net_dev = NULL; 1509 } else { 1510 return ERR_CAST(*net_dev); 1511 } 1512 } 1513 1514 bind_list = cma_ps_find(*net_dev ? dev_net(*net_dev) : &init_net, 1515 rdma_ps_from_service_id(req.service_id), 1516 cma_port_from_service_id(req.service_id)); 1517 id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev); 1518 if (IS_ERR(id_priv) && *net_dev) { 1519 dev_put(*net_dev); 1520 *net_dev = NULL; 1521 } 1522 1523 return id_priv; 1524 } 1525 1526 static inline int cma_user_data_offset(struct rdma_id_private *id_priv) 1527 { 1528 return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr); 1529 } 1530 1531 static void cma_cancel_route(struct rdma_id_private *id_priv) 1532 { 1533 if (rdma_cap_ib_sa(id_priv->id.device, id_priv->id.port_num)) { 1534 if (id_priv->query) 1535 ib_sa_cancel_query(id_priv->query_id, id_priv->query); 1536 } 1537 } 1538 1539 static void cma_cancel_listens(struct rdma_id_private *id_priv) 1540 { 1541 struct rdma_id_private *dev_id_priv; 1542 1543 /* 1544 * Remove from listen_any_list to prevent added devices from spawning 1545 * additional listen requests. 1546 */ 1547 mutex_lock(&lock); 1548 list_del(&id_priv->list); 1549 1550 while (!list_empty(&id_priv->listen_list)) { 1551 dev_id_priv = list_entry(id_priv->listen_list.next, 1552 struct rdma_id_private, listen_list); 1553 /* sync with device removal to avoid duplicate destruction */ 1554 list_del_init(&dev_id_priv->list); 1555 list_del(&dev_id_priv->listen_list); 1556 mutex_unlock(&lock); 1557 1558 rdma_destroy_id(&dev_id_priv->id); 1559 mutex_lock(&lock); 1560 } 1561 mutex_unlock(&lock); 1562 } 1563 1564 static void cma_cancel_operation(struct rdma_id_private *id_priv, 1565 enum rdma_cm_state state) 1566 { 1567 switch (state) { 1568 case RDMA_CM_ADDR_QUERY: 1569 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr); 1570 break; 1571 case RDMA_CM_ROUTE_QUERY: 1572 cma_cancel_route(id_priv); 1573 break; 1574 case RDMA_CM_LISTEN: 1575 if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev) 1576 cma_cancel_listens(id_priv); 1577 break; 1578 default: 1579 break; 1580 } 1581 } 1582 1583 static void cma_release_port(struct rdma_id_private *id_priv) 1584 { 1585 struct rdma_bind_list *bind_list = id_priv->bind_list; 1586 struct net *net = id_priv->id.route.addr.dev_addr.net; 1587 1588 if (!bind_list) 1589 return; 1590 1591 mutex_lock(&lock); 1592 hlist_del(&id_priv->node); 1593 if (hlist_empty(&bind_list->owners)) { 1594 cma_ps_remove(net, bind_list->ps, bind_list->port); 1595 kfree(bind_list); 1596 } 1597 mutex_unlock(&lock); 1598 } 1599 1600 static void cma_leave_mc_groups(struct rdma_id_private *id_priv) 1601 { 1602 struct cma_multicast *mc; 1603 1604 while (!list_empty(&id_priv->mc_list)) { 1605 mc = container_of(id_priv->mc_list.next, 1606 struct cma_multicast, list); 1607 list_del(&mc->list); 1608 if (rdma_cap_ib_mcast(id_priv->cma_dev->device, 1609 id_priv->id.port_num)) { 1610 ib_sa_free_multicast(mc->multicast.ib); 1611 kfree(mc); 1612 } else { 1613 if (mc->igmp_joined) { 1614 struct rdma_dev_addr *dev_addr = 1615 &id_priv->id.route.addr.dev_addr; 1616 struct net_device *ndev = NULL; 1617 1618 if (dev_addr->bound_dev_if) 1619 ndev = dev_get_by_index(&init_net, 1620 dev_addr->bound_dev_if); 1621 if (ndev) { 1622 cma_igmp_send(ndev, 1623 &mc->multicast.ib->rec.mgid, 1624 false); 1625 dev_put(ndev); 1626 } 1627 } 1628 kref_put(&mc->mcref, release_mc); 1629 } 1630 } 1631 } 1632 1633 void rdma_destroy_id(struct rdma_cm_id *id) 1634 { 1635 struct rdma_id_private *id_priv; 1636 enum rdma_cm_state state; 1637 1638 id_priv = container_of(id, struct rdma_id_private, id); 1639 state = cma_exch(id_priv, RDMA_CM_DESTROYING); 1640 cma_cancel_operation(id_priv, state); 1641 1642 /* 1643 * Wait for any active callback to finish. New callbacks will find 1644 * the id_priv state set to destroying and abort. 1645 */ 1646 mutex_lock(&id_priv->handler_mutex); 1647 mutex_unlock(&id_priv->handler_mutex); 1648 1649 if (id_priv->cma_dev) { 1650 if (rdma_cap_ib_cm(id_priv->id.device, 1)) { 1651 if (id_priv->cm_id.ib) 1652 ib_destroy_cm_id(id_priv->cm_id.ib); 1653 } else if (rdma_cap_iw_cm(id_priv->id.device, 1)) { 1654 if (id_priv->cm_id.iw) 1655 iw_destroy_cm_id(id_priv->cm_id.iw); 1656 } 1657 cma_leave_mc_groups(id_priv); 1658 cma_release_dev(id_priv); 1659 } 1660 1661 cma_release_port(id_priv); 1662 cma_deref_id(id_priv); 1663 wait_for_completion(&id_priv->comp); 1664 1665 if (id_priv->internal_id) 1666 cma_deref_id(id_priv->id.context); 1667 1668 kfree(id_priv->id.route.path_rec); 1669 put_net(id_priv->id.route.addr.dev_addr.net); 1670 kfree(id_priv); 1671 } 1672 EXPORT_SYMBOL(rdma_destroy_id); 1673 1674 static int cma_rep_recv(struct rdma_id_private *id_priv) 1675 { 1676 int ret; 1677 1678 ret = cma_modify_qp_rtr(id_priv, NULL); 1679 if (ret) 1680 goto reject; 1681 1682 ret = cma_modify_qp_rts(id_priv, NULL); 1683 if (ret) 1684 goto reject; 1685 1686 ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0); 1687 if (ret) 1688 goto reject; 1689 1690 return 0; 1691 reject: 1692 cma_modify_qp_err(id_priv); 1693 ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED, 1694 NULL, 0, NULL, 0); 1695 return ret; 1696 } 1697 1698 static void cma_set_rep_event_data(struct rdma_cm_event *event, 1699 struct ib_cm_rep_event_param *rep_data, 1700 void *private_data) 1701 { 1702 event->param.conn.private_data = private_data; 1703 event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE; 1704 event->param.conn.responder_resources = rep_data->responder_resources; 1705 event->param.conn.initiator_depth = rep_data->initiator_depth; 1706 event->param.conn.flow_control = rep_data->flow_control; 1707 event->param.conn.rnr_retry_count = rep_data->rnr_retry_count; 1708 event->param.conn.srq = rep_data->srq; 1709 event->param.conn.qp_num = rep_data->remote_qpn; 1710 } 1711 1712 static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) 1713 { 1714 struct rdma_id_private *id_priv = cm_id->context; 1715 struct rdma_cm_event event; 1716 int ret = 0; 1717 1718 mutex_lock(&id_priv->handler_mutex); 1719 if ((ib_event->event != IB_CM_TIMEWAIT_EXIT && 1720 id_priv->state != RDMA_CM_CONNECT) || 1721 (ib_event->event == IB_CM_TIMEWAIT_EXIT && 1722 id_priv->state != RDMA_CM_DISCONNECT)) 1723 goto out; 1724 1725 memset(&event, 0, sizeof event); 1726 switch (ib_event->event) { 1727 case IB_CM_REQ_ERROR: 1728 case IB_CM_REP_ERROR: 1729 event.event = RDMA_CM_EVENT_UNREACHABLE; 1730 event.status = -ETIMEDOUT; 1731 break; 1732 case IB_CM_REP_RECEIVED: 1733 if (id_priv->id.qp) { 1734 event.status = cma_rep_recv(id_priv); 1735 event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR : 1736 RDMA_CM_EVENT_ESTABLISHED; 1737 } else { 1738 event.event = RDMA_CM_EVENT_CONNECT_RESPONSE; 1739 } 1740 cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd, 1741 ib_event->private_data); 1742 break; 1743 case IB_CM_RTU_RECEIVED: 1744 case IB_CM_USER_ESTABLISHED: 1745 event.event = RDMA_CM_EVENT_ESTABLISHED; 1746 break; 1747 case IB_CM_DREQ_ERROR: 1748 event.status = -ETIMEDOUT; /* fall through */ 1749 case IB_CM_DREQ_RECEIVED: 1750 case IB_CM_DREP_RECEIVED: 1751 if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT, 1752 RDMA_CM_DISCONNECT)) 1753 goto out; 1754 event.event = RDMA_CM_EVENT_DISCONNECTED; 1755 break; 1756 case IB_CM_TIMEWAIT_EXIT: 1757 event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT; 1758 break; 1759 case IB_CM_MRA_RECEIVED: 1760 /* ignore event */ 1761 goto out; 1762 case IB_CM_REJ_RECEIVED: 1763 cma_modify_qp_err(id_priv); 1764 event.status = ib_event->param.rej_rcvd.reason; 1765 event.event = RDMA_CM_EVENT_REJECTED; 1766 event.param.conn.private_data = ib_event->private_data; 1767 event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE; 1768 break; 1769 default: 1770 pr_err("RDMA CMA: unexpected IB CM event: %d\n", 1771 ib_event->event); 1772 goto out; 1773 } 1774 1775 ret = id_priv->id.event_handler(&id_priv->id, &event); 1776 if (ret) { 1777 /* Destroy the CM ID by returning a non-zero value. */ 1778 id_priv->cm_id.ib = NULL; 1779 cma_exch(id_priv, RDMA_CM_DESTROYING); 1780 mutex_unlock(&id_priv->handler_mutex); 1781 rdma_destroy_id(&id_priv->id); 1782 return ret; 1783 } 1784 out: 1785 mutex_unlock(&id_priv->handler_mutex); 1786 return ret; 1787 } 1788 1789 static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id, 1790 struct ib_cm_event *ib_event, 1791 struct net_device *net_dev) 1792 { 1793 struct rdma_id_private *id_priv; 1794 struct rdma_cm_id *id; 1795 struct rdma_route *rt; 1796 const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family; 1797 const __be64 service_id = 1798 ib_event->param.req_rcvd.primary_path->service_id; 1799 int ret; 1800 1801 id = rdma_create_id(listen_id->route.addr.dev_addr.net, 1802 listen_id->event_handler, listen_id->context, 1803 listen_id->ps, ib_event->param.req_rcvd.qp_type); 1804 if (IS_ERR(id)) 1805 return NULL; 1806 1807 id_priv = container_of(id, struct rdma_id_private, id); 1808 if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr, 1809 (struct sockaddr *)&id->route.addr.dst_addr, 1810 listen_id, ib_event, ss_family, service_id)) 1811 goto err; 1812 1813 rt = &id->route; 1814 rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1; 1815 rt->path_rec = kmalloc(sizeof *rt->path_rec * rt->num_paths, 1816 GFP_KERNEL); 1817 if (!rt->path_rec) 1818 goto err; 1819 1820 rt->path_rec[0] = *ib_event->param.req_rcvd.primary_path; 1821 if (rt->num_paths == 2) 1822 rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path; 1823 1824 if (net_dev) { 1825 ret = rdma_copy_addr(&rt->addr.dev_addr, net_dev, NULL); 1826 if (ret) 1827 goto err; 1828 } else { 1829 if (!cma_protocol_roce(listen_id) && 1830 cma_any_addr(cma_src_addr(id_priv))) { 1831 rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND; 1832 rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid); 1833 ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey)); 1834 } else if (!cma_any_addr(cma_src_addr(id_priv))) { 1835 ret = cma_translate_addr(cma_src_addr(id_priv), &rt->addr.dev_addr); 1836 if (ret) 1837 goto err; 1838 } 1839 } 1840 rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); 1841 1842 id_priv->state = RDMA_CM_CONNECT; 1843 return id_priv; 1844 1845 err: 1846 rdma_destroy_id(id); 1847 return NULL; 1848 } 1849 1850 static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id, 1851 struct ib_cm_event *ib_event, 1852 struct net_device *net_dev) 1853 { 1854 struct rdma_id_private *id_priv; 1855 struct rdma_cm_id *id; 1856 const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family; 1857 struct net *net = listen_id->route.addr.dev_addr.net; 1858 int ret; 1859 1860 id = rdma_create_id(net, listen_id->event_handler, listen_id->context, 1861 listen_id->ps, IB_QPT_UD); 1862 if (IS_ERR(id)) 1863 return NULL; 1864 1865 id_priv = container_of(id, struct rdma_id_private, id); 1866 if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr, 1867 (struct sockaddr *)&id->route.addr.dst_addr, 1868 listen_id, ib_event, ss_family, 1869 ib_event->param.sidr_req_rcvd.service_id)) 1870 goto err; 1871 1872 if (net_dev) { 1873 ret = rdma_copy_addr(&id->route.addr.dev_addr, net_dev, NULL); 1874 if (ret) 1875 goto err; 1876 } else { 1877 if (!cma_any_addr(cma_src_addr(id_priv))) { 1878 ret = cma_translate_addr(cma_src_addr(id_priv), 1879 &id->route.addr.dev_addr); 1880 if (ret) 1881 goto err; 1882 } 1883 } 1884 1885 id_priv->state = RDMA_CM_CONNECT; 1886 return id_priv; 1887 err: 1888 rdma_destroy_id(id); 1889 return NULL; 1890 } 1891 1892 static void cma_set_req_event_data(struct rdma_cm_event *event, 1893 struct ib_cm_req_event_param *req_data, 1894 void *private_data, int offset) 1895 { 1896 event->param.conn.private_data = private_data + offset; 1897 event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset; 1898 event->param.conn.responder_resources = req_data->responder_resources; 1899 event->param.conn.initiator_depth = req_data->initiator_depth; 1900 event->param.conn.flow_control = req_data->flow_control; 1901 event->param.conn.retry_count = req_data->retry_count; 1902 event->param.conn.rnr_retry_count = req_data->rnr_retry_count; 1903 event->param.conn.srq = req_data->srq; 1904 event->param.conn.qp_num = req_data->remote_qpn; 1905 } 1906 1907 static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_event) 1908 { 1909 return (((ib_event->event == IB_CM_REQ_RECEIVED) && 1910 (ib_event->param.req_rcvd.qp_type == id->qp_type)) || 1911 ((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) && 1912 (id->qp_type == IB_QPT_UD)) || 1913 (!id->qp_type)); 1914 } 1915 1916 static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) 1917 { 1918 struct rdma_id_private *listen_id, *conn_id = NULL; 1919 struct rdma_cm_event event; 1920 struct net_device *net_dev; 1921 int offset, ret; 1922 1923 listen_id = cma_id_from_event(cm_id, ib_event, &net_dev); 1924 if (IS_ERR(listen_id)) 1925 return PTR_ERR(listen_id); 1926 1927 if (!cma_check_req_qp_type(&listen_id->id, ib_event)) { 1928 ret = -EINVAL; 1929 goto net_dev_put; 1930 } 1931 1932 mutex_lock(&listen_id->handler_mutex); 1933 if (listen_id->state != RDMA_CM_LISTEN) { 1934 ret = -ECONNABORTED; 1935 goto err1; 1936 } 1937 1938 memset(&event, 0, sizeof event); 1939 offset = cma_user_data_offset(listen_id); 1940 event.event = RDMA_CM_EVENT_CONNECT_REQUEST; 1941 if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) { 1942 conn_id = cma_new_udp_id(&listen_id->id, ib_event, net_dev); 1943 event.param.ud.private_data = ib_event->private_data + offset; 1944 event.param.ud.private_data_len = 1945 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset; 1946 } else { 1947 conn_id = cma_new_conn_id(&listen_id->id, ib_event, net_dev); 1948 cma_set_req_event_data(&event, &ib_event->param.req_rcvd, 1949 ib_event->private_data, offset); 1950 } 1951 if (!conn_id) { 1952 ret = -ENOMEM; 1953 goto err1; 1954 } 1955 1956 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); 1957 ret = cma_acquire_dev(conn_id, listen_id); 1958 if (ret) 1959 goto err2; 1960 1961 conn_id->cm_id.ib = cm_id; 1962 cm_id->context = conn_id; 1963 cm_id->cm_handler = cma_ib_handler; 1964 1965 /* 1966 * Protect against the user destroying conn_id from another thread 1967 * until we're done accessing it. 1968 */ 1969 atomic_inc(&conn_id->refcount); 1970 ret = conn_id->id.event_handler(&conn_id->id, &event); 1971 if (ret) 1972 goto err3; 1973 /* 1974 * Acquire mutex to prevent user executing rdma_destroy_id() 1975 * while we're accessing the cm_id. 1976 */ 1977 mutex_lock(&lock); 1978 if (cma_comp(conn_id, RDMA_CM_CONNECT) && 1979 (conn_id->id.qp_type != IB_QPT_UD)) 1980 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); 1981 mutex_unlock(&lock); 1982 mutex_unlock(&conn_id->handler_mutex); 1983 mutex_unlock(&listen_id->handler_mutex); 1984 cma_deref_id(conn_id); 1985 if (net_dev) 1986 dev_put(net_dev); 1987 return 0; 1988 1989 err3: 1990 cma_deref_id(conn_id); 1991 /* Destroy the CM ID by returning a non-zero value. */ 1992 conn_id->cm_id.ib = NULL; 1993 err2: 1994 cma_exch(conn_id, RDMA_CM_DESTROYING); 1995 mutex_unlock(&conn_id->handler_mutex); 1996 err1: 1997 mutex_unlock(&listen_id->handler_mutex); 1998 if (conn_id) 1999 rdma_destroy_id(&conn_id->id); 2000 2001 net_dev_put: 2002 if (net_dev) 2003 dev_put(net_dev); 2004 2005 return ret; 2006 } 2007 2008 __be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr) 2009 { 2010 if (addr->sa_family == AF_IB) 2011 return ((struct sockaddr_ib *) addr)->sib_sid; 2012 2013 return cpu_to_be64(((u64)id->ps << 16) + be16_to_cpu(cma_port(addr))); 2014 } 2015 EXPORT_SYMBOL(rdma_get_service_id); 2016 2017 static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) 2018 { 2019 struct rdma_id_private *id_priv = iw_id->context; 2020 struct rdma_cm_event event; 2021 int ret = 0; 2022 struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; 2023 struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; 2024 2025 mutex_lock(&id_priv->handler_mutex); 2026 if (id_priv->state != RDMA_CM_CONNECT) 2027 goto out; 2028 2029 memset(&event, 0, sizeof event); 2030 switch (iw_event->event) { 2031 case IW_CM_EVENT_CLOSE: 2032 event.event = RDMA_CM_EVENT_DISCONNECTED; 2033 break; 2034 case IW_CM_EVENT_CONNECT_REPLY: 2035 memcpy(cma_src_addr(id_priv), laddr, 2036 rdma_addr_size(laddr)); 2037 memcpy(cma_dst_addr(id_priv), raddr, 2038 rdma_addr_size(raddr)); 2039 switch (iw_event->status) { 2040 case 0: 2041 event.event = RDMA_CM_EVENT_ESTABLISHED; 2042 event.param.conn.initiator_depth = iw_event->ird; 2043 event.param.conn.responder_resources = iw_event->ord; 2044 break; 2045 case -ECONNRESET: 2046 case -ECONNREFUSED: 2047 event.event = RDMA_CM_EVENT_REJECTED; 2048 break; 2049 case -ETIMEDOUT: 2050 event.event = RDMA_CM_EVENT_UNREACHABLE; 2051 break; 2052 default: 2053 event.event = RDMA_CM_EVENT_CONNECT_ERROR; 2054 break; 2055 } 2056 break; 2057 case IW_CM_EVENT_ESTABLISHED: 2058 event.event = RDMA_CM_EVENT_ESTABLISHED; 2059 event.param.conn.initiator_depth = iw_event->ird; 2060 event.param.conn.responder_resources = iw_event->ord; 2061 break; 2062 default: 2063 BUG_ON(1); 2064 } 2065 2066 event.status = iw_event->status; 2067 event.param.conn.private_data = iw_event->private_data; 2068 event.param.conn.private_data_len = iw_event->private_data_len; 2069 ret = id_priv->id.event_handler(&id_priv->id, &event); 2070 if (ret) { 2071 /* Destroy the CM ID by returning a non-zero value. */ 2072 id_priv->cm_id.iw = NULL; 2073 cma_exch(id_priv, RDMA_CM_DESTROYING); 2074 mutex_unlock(&id_priv->handler_mutex); 2075 rdma_destroy_id(&id_priv->id); 2076 return ret; 2077 } 2078 2079 out: 2080 mutex_unlock(&id_priv->handler_mutex); 2081 return ret; 2082 } 2083 2084 static int iw_conn_req_handler(struct iw_cm_id *cm_id, 2085 struct iw_cm_event *iw_event) 2086 { 2087 struct rdma_cm_id *new_cm_id; 2088 struct rdma_id_private *listen_id, *conn_id; 2089 struct rdma_cm_event event; 2090 int ret = -ECONNABORTED; 2091 struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; 2092 struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; 2093 2094 listen_id = cm_id->context; 2095 2096 mutex_lock(&listen_id->handler_mutex); 2097 if (listen_id->state != RDMA_CM_LISTEN) 2098 goto out; 2099 2100 /* Create a new RDMA id for the new IW CM ID */ 2101 new_cm_id = rdma_create_id(listen_id->id.route.addr.dev_addr.net, 2102 listen_id->id.event_handler, 2103 listen_id->id.context, 2104 RDMA_PS_TCP, IB_QPT_RC); 2105 if (IS_ERR(new_cm_id)) { 2106 ret = -ENOMEM; 2107 goto out; 2108 } 2109 conn_id = container_of(new_cm_id, struct rdma_id_private, id); 2110 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); 2111 conn_id->state = RDMA_CM_CONNECT; 2112 2113 ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr, NULL); 2114 if (ret) { 2115 mutex_unlock(&conn_id->handler_mutex); 2116 rdma_destroy_id(new_cm_id); 2117 goto out; 2118 } 2119 2120 ret = cma_acquire_dev(conn_id, listen_id); 2121 if (ret) { 2122 mutex_unlock(&conn_id->handler_mutex); 2123 rdma_destroy_id(new_cm_id); 2124 goto out; 2125 } 2126 2127 conn_id->cm_id.iw = cm_id; 2128 cm_id->context = conn_id; 2129 cm_id->cm_handler = cma_iw_handler; 2130 2131 memcpy(cma_src_addr(conn_id), laddr, rdma_addr_size(laddr)); 2132 memcpy(cma_dst_addr(conn_id), raddr, rdma_addr_size(raddr)); 2133 2134 memset(&event, 0, sizeof event); 2135 event.event = RDMA_CM_EVENT_CONNECT_REQUEST; 2136 event.param.conn.private_data = iw_event->private_data; 2137 event.param.conn.private_data_len = iw_event->private_data_len; 2138 event.param.conn.initiator_depth = iw_event->ird; 2139 event.param.conn.responder_resources = iw_event->ord; 2140 2141 /* 2142 * Protect against the user destroying conn_id from another thread 2143 * until we're done accessing it. 2144 */ 2145 atomic_inc(&conn_id->refcount); 2146 ret = conn_id->id.event_handler(&conn_id->id, &event); 2147 if (ret) { 2148 /* User wants to destroy the CM ID */ 2149 conn_id->cm_id.iw = NULL; 2150 cma_exch(conn_id, RDMA_CM_DESTROYING); 2151 mutex_unlock(&conn_id->handler_mutex); 2152 cma_deref_id(conn_id); 2153 rdma_destroy_id(&conn_id->id); 2154 goto out; 2155 } 2156 2157 mutex_unlock(&conn_id->handler_mutex); 2158 cma_deref_id(conn_id); 2159 2160 out: 2161 mutex_unlock(&listen_id->handler_mutex); 2162 return ret; 2163 } 2164 2165 static int cma_ib_listen(struct rdma_id_private *id_priv) 2166 { 2167 struct sockaddr *addr; 2168 struct ib_cm_id *id; 2169 __be64 svc_id; 2170 2171 addr = cma_src_addr(id_priv); 2172 svc_id = rdma_get_service_id(&id_priv->id, addr); 2173 id = ib_cm_insert_listen(id_priv->id.device, cma_req_handler, svc_id); 2174 if (IS_ERR(id)) 2175 return PTR_ERR(id); 2176 id_priv->cm_id.ib = id; 2177 2178 return 0; 2179 } 2180 2181 static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog) 2182 { 2183 int ret; 2184 struct iw_cm_id *id; 2185 2186 id = iw_create_cm_id(id_priv->id.device, 2187 iw_conn_req_handler, 2188 id_priv); 2189 if (IS_ERR(id)) 2190 return PTR_ERR(id); 2191 2192 id->tos = id_priv->tos; 2193 id_priv->cm_id.iw = id; 2194 2195 memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv), 2196 rdma_addr_size(cma_src_addr(id_priv))); 2197 2198 ret = iw_cm_listen(id_priv->cm_id.iw, backlog); 2199 2200 if (ret) { 2201 iw_destroy_cm_id(id_priv->cm_id.iw); 2202 id_priv->cm_id.iw = NULL; 2203 } 2204 2205 return ret; 2206 } 2207 2208 static int cma_listen_handler(struct rdma_cm_id *id, 2209 struct rdma_cm_event *event) 2210 { 2211 struct rdma_id_private *id_priv = id->context; 2212 2213 id->context = id_priv->id.context; 2214 id->event_handler = id_priv->id.event_handler; 2215 return id_priv->id.event_handler(id, event); 2216 } 2217 2218 static void cma_listen_on_dev(struct rdma_id_private *id_priv, 2219 struct cma_device *cma_dev) 2220 { 2221 struct rdma_id_private *dev_id_priv; 2222 struct rdma_cm_id *id; 2223 struct net *net = id_priv->id.route.addr.dev_addr.net; 2224 int ret; 2225 2226 if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1)) 2227 return; 2228 2229 id = rdma_create_id(net, cma_listen_handler, id_priv, id_priv->id.ps, 2230 id_priv->id.qp_type); 2231 if (IS_ERR(id)) 2232 return; 2233 2234 dev_id_priv = container_of(id, struct rdma_id_private, id); 2235 2236 dev_id_priv->state = RDMA_CM_ADDR_BOUND; 2237 memcpy(cma_src_addr(dev_id_priv), cma_src_addr(id_priv), 2238 rdma_addr_size(cma_src_addr(id_priv))); 2239 2240 _cma_attach_to_dev(dev_id_priv, cma_dev); 2241 list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list); 2242 atomic_inc(&id_priv->refcount); 2243 dev_id_priv->internal_id = 1; 2244 dev_id_priv->afonly = id_priv->afonly; 2245 2246 ret = rdma_listen(id, id_priv->backlog); 2247 if (ret) 2248 pr_warn("RDMA CMA: cma_listen_on_dev, error %d, listening on device %s\n", 2249 ret, cma_dev->device->name); 2250 } 2251 2252 static void cma_listen_on_all(struct rdma_id_private *id_priv) 2253 { 2254 struct cma_device *cma_dev; 2255 2256 mutex_lock(&lock); 2257 list_add_tail(&id_priv->list, &listen_any_list); 2258 list_for_each_entry(cma_dev, &dev_list, list) 2259 cma_listen_on_dev(id_priv, cma_dev); 2260 mutex_unlock(&lock); 2261 } 2262 2263 void rdma_set_service_type(struct rdma_cm_id *id, int tos) 2264 { 2265 struct rdma_id_private *id_priv; 2266 2267 id_priv = container_of(id, struct rdma_id_private, id); 2268 id_priv->tos = (u8) tos; 2269 } 2270 EXPORT_SYMBOL(rdma_set_service_type); 2271 2272 static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec, 2273 void *context) 2274 { 2275 struct cma_work *work = context; 2276 struct rdma_route *route; 2277 2278 route = &work->id->id.route; 2279 2280 if (!status) { 2281 route->num_paths = 1; 2282 *route->path_rec = *path_rec; 2283 } else { 2284 work->old_state = RDMA_CM_ROUTE_QUERY; 2285 work->new_state = RDMA_CM_ADDR_RESOLVED; 2286 work->event.event = RDMA_CM_EVENT_ROUTE_ERROR; 2287 work->event.status = status; 2288 } 2289 2290 queue_work(cma_wq, &work->work); 2291 } 2292 2293 static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms, 2294 struct cma_work *work) 2295 { 2296 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 2297 struct ib_sa_path_rec path_rec; 2298 ib_sa_comp_mask comp_mask; 2299 struct sockaddr_in6 *sin6; 2300 struct sockaddr_ib *sib; 2301 2302 memset(&path_rec, 0, sizeof path_rec); 2303 rdma_addr_get_sgid(dev_addr, &path_rec.sgid); 2304 rdma_addr_get_dgid(dev_addr, &path_rec.dgid); 2305 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); 2306 path_rec.numb_path = 1; 2307 path_rec.reversible = 1; 2308 path_rec.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); 2309 2310 comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | 2311 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH | 2312 IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID; 2313 2314 switch (cma_family(id_priv)) { 2315 case AF_INET: 2316 path_rec.qos_class = cpu_to_be16((u16) id_priv->tos); 2317 comp_mask |= IB_SA_PATH_REC_QOS_CLASS; 2318 break; 2319 case AF_INET6: 2320 sin6 = (struct sockaddr_in6 *) cma_src_addr(id_priv); 2321 path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20); 2322 comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS; 2323 break; 2324 case AF_IB: 2325 sib = (struct sockaddr_ib *) cma_src_addr(id_priv); 2326 path_rec.traffic_class = (u8) (be32_to_cpu(sib->sib_flowinfo) >> 20); 2327 comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS; 2328 break; 2329 } 2330 2331 id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device, 2332 id_priv->id.port_num, &path_rec, 2333 comp_mask, timeout_ms, 2334 GFP_KERNEL, cma_query_handler, 2335 work, &id_priv->query); 2336 2337 return (id_priv->query_id < 0) ? id_priv->query_id : 0; 2338 } 2339 2340 static void cma_work_handler(struct work_struct *_work) 2341 { 2342 struct cma_work *work = container_of(_work, struct cma_work, work); 2343 struct rdma_id_private *id_priv = work->id; 2344 int destroy = 0; 2345 2346 mutex_lock(&id_priv->handler_mutex); 2347 if (!cma_comp_exch(id_priv, work->old_state, work->new_state)) 2348 goto out; 2349 2350 if (id_priv->id.event_handler(&id_priv->id, &work->event)) { 2351 cma_exch(id_priv, RDMA_CM_DESTROYING); 2352 destroy = 1; 2353 } 2354 out: 2355 mutex_unlock(&id_priv->handler_mutex); 2356 cma_deref_id(id_priv); 2357 if (destroy) 2358 rdma_destroy_id(&id_priv->id); 2359 kfree(work); 2360 } 2361 2362 static void cma_ndev_work_handler(struct work_struct *_work) 2363 { 2364 struct cma_ndev_work *work = container_of(_work, struct cma_ndev_work, work); 2365 struct rdma_id_private *id_priv = work->id; 2366 int destroy = 0; 2367 2368 mutex_lock(&id_priv->handler_mutex); 2369 if (id_priv->state == RDMA_CM_DESTROYING || 2370 id_priv->state == RDMA_CM_DEVICE_REMOVAL) 2371 goto out; 2372 2373 if (id_priv->id.event_handler(&id_priv->id, &work->event)) { 2374 cma_exch(id_priv, RDMA_CM_DESTROYING); 2375 destroy = 1; 2376 } 2377 2378 out: 2379 mutex_unlock(&id_priv->handler_mutex); 2380 cma_deref_id(id_priv); 2381 if (destroy) 2382 rdma_destroy_id(&id_priv->id); 2383 kfree(work); 2384 } 2385 2386 static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms) 2387 { 2388 struct rdma_route *route = &id_priv->id.route; 2389 struct cma_work *work; 2390 int ret; 2391 2392 work = kzalloc(sizeof *work, GFP_KERNEL); 2393 if (!work) 2394 return -ENOMEM; 2395 2396 work->id = id_priv; 2397 INIT_WORK(&work->work, cma_work_handler); 2398 work->old_state = RDMA_CM_ROUTE_QUERY; 2399 work->new_state = RDMA_CM_ROUTE_RESOLVED; 2400 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 2401 2402 route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL); 2403 if (!route->path_rec) { 2404 ret = -ENOMEM; 2405 goto err1; 2406 } 2407 2408 ret = cma_query_ib_route(id_priv, timeout_ms, work); 2409 if (ret) 2410 goto err2; 2411 2412 return 0; 2413 err2: 2414 kfree(route->path_rec); 2415 route->path_rec = NULL; 2416 err1: 2417 kfree(work); 2418 return ret; 2419 } 2420 2421 int rdma_set_ib_paths(struct rdma_cm_id *id, 2422 struct ib_sa_path_rec *path_rec, int num_paths) 2423 { 2424 struct rdma_id_private *id_priv; 2425 int ret; 2426 2427 id_priv = container_of(id, struct rdma_id_private, id); 2428 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, 2429 RDMA_CM_ROUTE_RESOLVED)) 2430 return -EINVAL; 2431 2432 id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths, 2433 GFP_KERNEL); 2434 if (!id->route.path_rec) { 2435 ret = -ENOMEM; 2436 goto err; 2437 } 2438 2439 id->route.num_paths = num_paths; 2440 return 0; 2441 err: 2442 cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED); 2443 return ret; 2444 } 2445 EXPORT_SYMBOL(rdma_set_ib_paths); 2446 2447 static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms) 2448 { 2449 struct cma_work *work; 2450 2451 work = kzalloc(sizeof *work, GFP_KERNEL); 2452 if (!work) 2453 return -ENOMEM; 2454 2455 work->id = id_priv; 2456 INIT_WORK(&work->work, cma_work_handler); 2457 work->old_state = RDMA_CM_ROUTE_QUERY; 2458 work->new_state = RDMA_CM_ROUTE_RESOLVED; 2459 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 2460 queue_work(cma_wq, &work->work); 2461 return 0; 2462 } 2463 2464 static int iboe_tos_to_sl(struct net_device *ndev, int tos) 2465 { 2466 int prio; 2467 struct net_device *dev; 2468 2469 prio = rt_tos2priority(tos); 2470 dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev; 2471 if (dev->num_tc) 2472 return netdev_get_prio_tc_map(dev, prio); 2473 2474 #if IS_ENABLED(CONFIG_VLAN_8021Q) 2475 if (is_vlan_dev(ndev)) 2476 return (vlan_dev_get_egress_qos_mask(ndev, prio) & 2477 VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; 2478 #endif 2479 return 0; 2480 } 2481 2482 static enum ib_gid_type cma_route_gid_type(enum rdma_network_type network_type, 2483 unsigned long supported_gids, 2484 enum ib_gid_type default_gid) 2485 { 2486 if ((network_type == RDMA_NETWORK_IPV4 || 2487 network_type == RDMA_NETWORK_IPV6) && 2488 test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP, &supported_gids)) 2489 return IB_GID_TYPE_ROCE_UDP_ENCAP; 2490 2491 return default_gid; 2492 } 2493 2494 static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) 2495 { 2496 struct rdma_route *route = &id_priv->id.route; 2497 struct rdma_addr *addr = &route->addr; 2498 struct cma_work *work; 2499 int ret; 2500 struct net_device *ndev = NULL; 2501 2502 2503 work = kzalloc(sizeof *work, GFP_KERNEL); 2504 if (!work) 2505 return -ENOMEM; 2506 2507 work->id = id_priv; 2508 INIT_WORK(&work->work, cma_work_handler); 2509 2510 route->path_rec = kzalloc(sizeof *route->path_rec, GFP_KERNEL); 2511 if (!route->path_rec) { 2512 ret = -ENOMEM; 2513 goto err1; 2514 } 2515 2516 route->num_paths = 1; 2517 2518 if (addr->dev_addr.bound_dev_if) { 2519 unsigned long supported_gids; 2520 2521 ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if); 2522 if (!ndev) { 2523 ret = -ENODEV; 2524 goto err2; 2525 } 2526 2527 if (ndev->flags & IFF_LOOPBACK) { 2528 dev_put(ndev); 2529 if (!id_priv->id.device->get_netdev) { 2530 ret = -EOPNOTSUPP; 2531 goto err2; 2532 } 2533 2534 ndev = id_priv->id.device->get_netdev(id_priv->id.device, 2535 id_priv->id.port_num); 2536 if (!ndev) { 2537 ret = -ENODEV; 2538 goto err2; 2539 } 2540 } 2541 2542 route->path_rec->net = &init_net; 2543 route->path_rec->ifindex = ndev->ifindex; 2544 supported_gids = roce_gid_type_mask_support(id_priv->id.device, 2545 id_priv->id.port_num); 2546 route->path_rec->gid_type = 2547 cma_route_gid_type(addr->dev_addr.network, 2548 supported_gids, 2549 id_priv->gid_type); 2550 } 2551 if (!ndev) { 2552 ret = -ENODEV; 2553 goto err2; 2554 } 2555 2556 memcpy(route->path_rec->dmac, addr->dev_addr.dst_dev_addr, ETH_ALEN); 2557 2558 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, 2559 &route->path_rec->sgid); 2560 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.dst_addr, 2561 &route->path_rec->dgid); 2562 2563 /* Use the hint from IP Stack to select GID Type */ 2564 if (route->path_rec->gid_type < ib_network_to_gid_type(addr->dev_addr.network)) 2565 route->path_rec->gid_type = ib_network_to_gid_type(addr->dev_addr.network); 2566 if (((struct sockaddr *)&id_priv->id.route.addr.dst_addr)->sa_family != AF_IB) 2567 /* TODO: get the hoplimit from the inet/inet6 device */ 2568 route->path_rec->hop_limit = addr->dev_addr.hoplimit; 2569 else 2570 route->path_rec->hop_limit = 1; 2571 route->path_rec->reversible = 1; 2572 route->path_rec->pkey = cpu_to_be16(0xffff); 2573 route->path_rec->mtu_selector = IB_SA_EQ; 2574 route->path_rec->sl = iboe_tos_to_sl(ndev, id_priv->tos); 2575 route->path_rec->mtu = iboe_get_mtu(ndev->mtu); 2576 route->path_rec->rate_selector = IB_SA_EQ; 2577 route->path_rec->rate = iboe_get_rate(ndev); 2578 dev_put(ndev); 2579 route->path_rec->packet_life_time_selector = IB_SA_EQ; 2580 route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME; 2581 if (!route->path_rec->mtu) { 2582 ret = -EINVAL; 2583 goto err2; 2584 } 2585 2586 work->old_state = RDMA_CM_ROUTE_QUERY; 2587 work->new_state = RDMA_CM_ROUTE_RESOLVED; 2588 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 2589 work->event.status = 0; 2590 2591 queue_work(cma_wq, &work->work); 2592 2593 return 0; 2594 2595 err2: 2596 kfree(route->path_rec); 2597 route->path_rec = NULL; 2598 err1: 2599 kfree(work); 2600 return ret; 2601 } 2602 2603 int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms) 2604 { 2605 struct rdma_id_private *id_priv; 2606 int ret; 2607 2608 id_priv = container_of(id, struct rdma_id_private, id); 2609 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY)) 2610 return -EINVAL; 2611 2612 atomic_inc(&id_priv->refcount); 2613 if (rdma_cap_ib_sa(id->device, id->port_num)) 2614 ret = cma_resolve_ib_route(id_priv, timeout_ms); 2615 else if (rdma_protocol_roce(id->device, id->port_num)) 2616 ret = cma_resolve_iboe_route(id_priv); 2617 else if (rdma_protocol_iwarp(id->device, id->port_num)) 2618 ret = cma_resolve_iw_route(id_priv, timeout_ms); 2619 else 2620 ret = -ENOSYS; 2621 2622 if (ret) 2623 goto err; 2624 2625 return 0; 2626 err: 2627 cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED); 2628 cma_deref_id(id_priv); 2629 return ret; 2630 } 2631 EXPORT_SYMBOL(rdma_resolve_route); 2632 2633 static void cma_set_loopback(struct sockaddr *addr) 2634 { 2635 switch (addr->sa_family) { 2636 case AF_INET: 2637 ((struct sockaddr_in *) addr)->sin_addr.s_addr = htonl(INADDR_LOOPBACK); 2638 break; 2639 case AF_INET6: 2640 ipv6_addr_set(&((struct sockaddr_in6 *) addr)->sin6_addr, 2641 0, 0, 0, htonl(1)); 2642 break; 2643 default: 2644 ib_addr_set(&((struct sockaddr_ib *) addr)->sib_addr, 2645 0, 0, 0, htonl(1)); 2646 break; 2647 } 2648 } 2649 2650 static int cma_bind_loopback(struct rdma_id_private *id_priv) 2651 { 2652 struct cma_device *cma_dev, *cur_dev; 2653 struct ib_port_attr port_attr; 2654 union ib_gid gid; 2655 u16 pkey; 2656 int ret; 2657 u8 p; 2658 2659 cma_dev = NULL; 2660 mutex_lock(&lock); 2661 list_for_each_entry(cur_dev, &dev_list, list) { 2662 if (cma_family(id_priv) == AF_IB && 2663 !rdma_cap_ib_cm(cur_dev->device, 1)) 2664 continue; 2665 2666 if (!cma_dev) 2667 cma_dev = cur_dev; 2668 2669 for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) { 2670 if (!ib_query_port(cur_dev->device, p, &port_attr) && 2671 port_attr.state == IB_PORT_ACTIVE) { 2672 cma_dev = cur_dev; 2673 goto port_found; 2674 } 2675 } 2676 } 2677 2678 if (!cma_dev) { 2679 ret = -ENODEV; 2680 goto out; 2681 } 2682 2683 p = 1; 2684 2685 port_found: 2686 ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid, NULL); 2687 if (ret) 2688 goto out; 2689 2690 ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey); 2691 if (ret) 2692 goto out; 2693 2694 id_priv->id.route.addr.dev_addr.dev_type = 2695 (rdma_protocol_ib(cma_dev->device, p)) ? 2696 ARPHRD_INFINIBAND : ARPHRD_ETHER; 2697 2698 rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid); 2699 ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey); 2700 id_priv->id.port_num = p; 2701 cma_attach_to_dev(id_priv, cma_dev); 2702 cma_set_loopback(cma_src_addr(id_priv)); 2703 out: 2704 mutex_unlock(&lock); 2705 return ret; 2706 } 2707 2708 static void addr_handler(int status, struct sockaddr *src_addr, 2709 struct rdma_dev_addr *dev_addr, void *context) 2710 { 2711 struct rdma_id_private *id_priv = context; 2712 struct rdma_cm_event event; 2713 2714 memset(&event, 0, sizeof event); 2715 mutex_lock(&id_priv->handler_mutex); 2716 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, 2717 RDMA_CM_ADDR_RESOLVED)) 2718 goto out; 2719 2720 memcpy(cma_src_addr(id_priv), src_addr, rdma_addr_size(src_addr)); 2721 if (!status && !id_priv->cma_dev) 2722 status = cma_acquire_dev(id_priv, NULL); 2723 2724 if (status) { 2725 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, 2726 RDMA_CM_ADDR_BOUND)) 2727 goto out; 2728 event.event = RDMA_CM_EVENT_ADDR_ERROR; 2729 event.status = status; 2730 } else 2731 event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 2732 2733 if (id_priv->id.event_handler(&id_priv->id, &event)) { 2734 cma_exch(id_priv, RDMA_CM_DESTROYING); 2735 mutex_unlock(&id_priv->handler_mutex); 2736 cma_deref_id(id_priv); 2737 rdma_destroy_id(&id_priv->id); 2738 return; 2739 } 2740 out: 2741 mutex_unlock(&id_priv->handler_mutex); 2742 cma_deref_id(id_priv); 2743 } 2744 2745 static int cma_resolve_loopback(struct rdma_id_private *id_priv) 2746 { 2747 struct cma_work *work; 2748 union ib_gid gid; 2749 int ret; 2750 2751 work = kzalloc(sizeof *work, GFP_KERNEL); 2752 if (!work) 2753 return -ENOMEM; 2754 2755 if (!id_priv->cma_dev) { 2756 ret = cma_bind_loopback(id_priv); 2757 if (ret) 2758 goto err; 2759 } 2760 2761 rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); 2762 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid); 2763 2764 work->id = id_priv; 2765 INIT_WORK(&work->work, cma_work_handler); 2766 work->old_state = RDMA_CM_ADDR_QUERY; 2767 work->new_state = RDMA_CM_ADDR_RESOLVED; 2768 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 2769 queue_work(cma_wq, &work->work); 2770 return 0; 2771 err: 2772 kfree(work); 2773 return ret; 2774 } 2775 2776 static int cma_resolve_ib_addr(struct rdma_id_private *id_priv) 2777 { 2778 struct cma_work *work; 2779 int ret; 2780 2781 work = kzalloc(sizeof *work, GFP_KERNEL); 2782 if (!work) 2783 return -ENOMEM; 2784 2785 if (!id_priv->cma_dev) { 2786 ret = cma_resolve_ib_dev(id_priv); 2787 if (ret) 2788 goto err; 2789 } 2790 2791 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *) 2792 &(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr)); 2793 2794 work->id = id_priv; 2795 INIT_WORK(&work->work, cma_work_handler); 2796 work->old_state = RDMA_CM_ADDR_QUERY; 2797 work->new_state = RDMA_CM_ADDR_RESOLVED; 2798 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 2799 queue_work(cma_wq, &work->work); 2800 return 0; 2801 err: 2802 kfree(work); 2803 return ret; 2804 } 2805 2806 static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, 2807 struct sockaddr *dst_addr) 2808 { 2809 if (!src_addr || !src_addr->sa_family) { 2810 src_addr = (struct sockaddr *) &id->route.addr.src_addr; 2811 src_addr->sa_family = dst_addr->sa_family; 2812 if (IS_ENABLED(CONFIG_IPV6) && 2813 dst_addr->sa_family == AF_INET6) { 2814 struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr; 2815 struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr; 2816 src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; 2817 if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL) 2818 id->route.addr.dev_addr.bound_dev_if = dst_addr6->sin6_scope_id; 2819 } else if (dst_addr->sa_family == AF_IB) { 2820 ((struct sockaddr_ib *) src_addr)->sib_pkey = 2821 ((struct sockaddr_ib *) dst_addr)->sib_pkey; 2822 } 2823 } 2824 return rdma_bind_addr(id, src_addr); 2825 } 2826 2827 int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, 2828 struct sockaddr *dst_addr, int timeout_ms) 2829 { 2830 struct rdma_id_private *id_priv; 2831 int ret; 2832 2833 id_priv = container_of(id, struct rdma_id_private, id); 2834 if (id_priv->state == RDMA_CM_IDLE) { 2835 ret = cma_bind_addr(id, src_addr, dst_addr); 2836 if (ret) 2837 return ret; 2838 } 2839 2840 if (cma_family(id_priv) != dst_addr->sa_family) 2841 return -EINVAL; 2842 2843 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) 2844 return -EINVAL; 2845 2846 atomic_inc(&id_priv->refcount); 2847 memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr)); 2848 if (cma_any_addr(dst_addr)) { 2849 ret = cma_resolve_loopback(id_priv); 2850 } else { 2851 if (dst_addr->sa_family == AF_IB) { 2852 ret = cma_resolve_ib_addr(id_priv); 2853 } else { 2854 ret = rdma_resolve_ip(&addr_client, cma_src_addr(id_priv), 2855 dst_addr, &id->route.addr.dev_addr, 2856 timeout_ms, addr_handler, id_priv); 2857 } 2858 } 2859 if (ret) 2860 goto err; 2861 2862 return 0; 2863 err: 2864 cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND); 2865 cma_deref_id(id_priv); 2866 return ret; 2867 } 2868 EXPORT_SYMBOL(rdma_resolve_addr); 2869 2870 int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse) 2871 { 2872 struct rdma_id_private *id_priv; 2873 unsigned long flags; 2874 int ret; 2875 2876 id_priv = container_of(id, struct rdma_id_private, id); 2877 spin_lock_irqsave(&id_priv->lock, flags); 2878 if (reuse || id_priv->state == RDMA_CM_IDLE) { 2879 id_priv->reuseaddr = reuse; 2880 ret = 0; 2881 } else { 2882 ret = -EINVAL; 2883 } 2884 spin_unlock_irqrestore(&id_priv->lock, flags); 2885 return ret; 2886 } 2887 EXPORT_SYMBOL(rdma_set_reuseaddr); 2888 2889 int rdma_set_afonly(struct rdma_cm_id *id, int afonly) 2890 { 2891 struct rdma_id_private *id_priv; 2892 unsigned long flags; 2893 int ret; 2894 2895 id_priv = container_of(id, struct rdma_id_private, id); 2896 spin_lock_irqsave(&id_priv->lock, flags); 2897 if (id_priv->state == RDMA_CM_IDLE || id_priv->state == RDMA_CM_ADDR_BOUND) { 2898 id_priv->options |= (1 << CMA_OPTION_AFONLY); 2899 id_priv->afonly = afonly; 2900 ret = 0; 2901 } else { 2902 ret = -EINVAL; 2903 } 2904 spin_unlock_irqrestore(&id_priv->lock, flags); 2905 return ret; 2906 } 2907 EXPORT_SYMBOL(rdma_set_afonly); 2908 2909 static void cma_bind_port(struct rdma_bind_list *bind_list, 2910 struct rdma_id_private *id_priv) 2911 { 2912 struct sockaddr *addr; 2913 struct sockaddr_ib *sib; 2914 u64 sid, mask; 2915 __be16 port; 2916 2917 addr = cma_src_addr(id_priv); 2918 port = htons(bind_list->port); 2919 2920 switch (addr->sa_family) { 2921 case AF_INET: 2922 ((struct sockaddr_in *) addr)->sin_port = port; 2923 break; 2924 case AF_INET6: 2925 ((struct sockaddr_in6 *) addr)->sin6_port = port; 2926 break; 2927 case AF_IB: 2928 sib = (struct sockaddr_ib *) addr; 2929 sid = be64_to_cpu(sib->sib_sid); 2930 mask = be64_to_cpu(sib->sib_sid_mask); 2931 sib->sib_sid = cpu_to_be64((sid & mask) | (u64) ntohs(port)); 2932 sib->sib_sid_mask = cpu_to_be64(~0ULL); 2933 break; 2934 } 2935 id_priv->bind_list = bind_list; 2936 hlist_add_head(&id_priv->node, &bind_list->owners); 2937 } 2938 2939 static int cma_alloc_port(enum rdma_port_space ps, 2940 struct rdma_id_private *id_priv, unsigned short snum) 2941 { 2942 struct rdma_bind_list *bind_list; 2943 int ret; 2944 2945 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL); 2946 if (!bind_list) 2947 return -ENOMEM; 2948 2949 ret = cma_ps_alloc(id_priv->id.route.addr.dev_addr.net, ps, bind_list, 2950 snum); 2951 if (ret < 0) 2952 goto err; 2953 2954 bind_list->ps = ps; 2955 bind_list->port = (unsigned short)ret; 2956 cma_bind_port(bind_list, id_priv); 2957 return 0; 2958 err: 2959 kfree(bind_list); 2960 return ret == -ENOSPC ? -EADDRNOTAVAIL : ret; 2961 } 2962 2963 static int cma_alloc_any_port(enum rdma_port_space ps, 2964 struct rdma_id_private *id_priv) 2965 { 2966 static unsigned int last_used_port; 2967 int low, high, remaining; 2968 unsigned int rover; 2969 struct net *net = id_priv->id.route.addr.dev_addr.net; 2970 2971 inet_get_local_port_range(net, &low, &high); 2972 remaining = (high - low) + 1; 2973 rover = prandom_u32() % remaining + low; 2974 retry: 2975 if (last_used_port != rover && 2976 !cma_ps_find(net, ps, (unsigned short)rover)) { 2977 int ret = cma_alloc_port(ps, id_priv, rover); 2978 /* 2979 * Remember previously used port number in order to avoid 2980 * re-using same port immediately after it is closed. 2981 */ 2982 if (!ret) 2983 last_used_port = rover; 2984 if (ret != -EADDRNOTAVAIL) 2985 return ret; 2986 } 2987 if (--remaining) { 2988 rover++; 2989 if ((rover < low) || (rover > high)) 2990 rover = low; 2991 goto retry; 2992 } 2993 return -EADDRNOTAVAIL; 2994 } 2995 2996 /* 2997 * Check that the requested port is available. This is called when trying to 2998 * bind to a specific port, or when trying to listen on a bound port. In 2999 * the latter case, the provided id_priv may already be on the bind_list, but 3000 * we still need to check that it's okay to start listening. 3001 */ 3002 static int cma_check_port(struct rdma_bind_list *bind_list, 3003 struct rdma_id_private *id_priv, uint8_t reuseaddr) 3004 { 3005 struct rdma_id_private *cur_id; 3006 struct sockaddr *addr, *cur_addr; 3007 3008 addr = cma_src_addr(id_priv); 3009 hlist_for_each_entry(cur_id, &bind_list->owners, node) { 3010 if (id_priv == cur_id) 3011 continue; 3012 3013 if ((cur_id->state != RDMA_CM_LISTEN) && reuseaddr && 3014 cur_id->reuseaddr) 3015 continue; 3016 3017 cur_addr = cma_src_addr(cur_id); 3018 if (id_priv->afonly && cur_id->afonly && 3019 (addr->sa_family != cur_addr->sa_family)) 3020 continue; 3021 3022 if (cma_any_addr(addr) || cma_any_addr(cur_addr)) 3023 return -EADDRNOTAVAIL; 3024 3025 if (!cma_addr_cmp(addr, cur_addr)) 3026 return -EADDRINUSE; 3027 } 3028 return 0; 3029 } 3030 3031 static int cma_use_port(enum rdma_port_space ps, 3032 struct rdma_id_private *id_priv) 3033 { 3034 struct rdma_bind_list *bind_list; 3035 unsigned short snum; 3036 int ret; 3037 3038 snum = ntohs(cma_port(cma_src_addr(id_priv))); 3039 if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) 3040 return -EACCES; 3041 3042 bind_list = cma_ps_find(id_priv->id.route.addr.dev_addr.net, ps, snum); 3043 if (!bind_list) { 3044 ret = cma_alloc_port(ps, id_priv, snum); 3045 } else { 3046 ret = cma_check_port(bind_list, id_priv, id_priv->reuseaddr); 3047 if (!ret) 3048 cma_bind_port(bind_list, id_priv); 3049 } 3050 return ret; 3051 } 3052 3053 static int cma_bind_listen(struct rdma_id_private *id_priv) 3054 { 3055 struct rdma_bind_list *bind_list = id_priv->bind_list; 3056 int ret = 0; 3057 3058 mutex_lock(&lock); 3059 if (bind_list->owners.first->next) 3060 ret = cma_check_port(bind_list, id_priv, 0); 3061 mutex_unlock(&lock); 3062 return ret; 3063 } 3064 3065 static enum rdma_port_space cma_select_inet_ps( 3066 struct rdma_id_private *id_priv) 3067 { 3068 switch (id_priv->id.ps) { 3069 case RDMA_PS_TCP: 3070 case RDMA_PS_UDP: 3071 case RDMA_PS_IPOIB: 3072 case RDMA_PS_IB: 3073 return id_priv->id.ps; 3074 default: 3075 3076 return 0; 3077 } 3078 } 3079 3080 static enum rdma_port_space cma_select_ib_ps(struct rdma_id_private *id_priv) 3081 { 3082 enum rdma_port_space ps = 0; 3083 struct sockaddr_ib *sib; 3084 u64 sid_ps, mask, sid; 3085 3086 sib = (struct sockaddr_ib *) cma_src_addr(id_priv); 3087 mask = be64_to_cpu(sib->sib_sid_mask) & RDMA_IB_IP_PS_MASK; 3088 sid = be64_to_cpu(sib->sib_sid) & mask; 3089 3090 if ((id_priv->id.ps == RDMA_PS_IB) && (sid == (RDMA_IB_IP_PS_IB & mask))) { 3091 sid_ps = RDMA_IB_IP_PS_IB; 3092 ps = RDMA_PS_IB; 3093 } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_TCP)) && 3094 (sid == (RDMA_IB_IP_PS_TCP & mask))) { 3095 sid_ps = RDMA_IB_IP_PS_TCP; 3096 ps = RDMA_PS_TCP; 3097 } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_UDP)) && 3098 (sid == (RDMA_IB_IP_PS_UDP & mask))) { 3099 sid_ps = RDMA_IB_IP_PS_UDP; 3100 ps = RDMA_PS_UDP; 3101 } 3102 3103 if (ps) { 3104 sib->sib_sid = cpu_to_be64(sid_ps | ntohs(cma_port((struct sockaddr *) sib))); 3105 sib->sib_sid_mask = cpu_to_be64(RDMA_IB_IP_PS_MASK | 3106 be64_to_cpu(sib->sib_sid_mask)); 3107 } 3108 return ps; 3109 } 3110 3111 static int cma_get_port(struct rdma_id_private *id_priv) 3112 { 3113 enum rdma_port_space ps; 3114 int ret; 3115 3116 if (cma_family(id_priv) != AF_IB) 3117 ps = cma_select_inet_ps(id_priv); 3118 else 3119 ps = cma_select_ib_ps(id_priv); 3120 if (!ps) 3121 return -EPROTONOSUPPORT; 3122 3123 mutex_lock(&lock); 3124 if (cma_any_port(cma_src_addr(id_priv))) 3125 ret = cma_alloc_any_port(ps, id_priv); 3126 else 3127 ret = cma_use_port(ps, id_priv); 3128 mutex_unlock(&lock); 3129 3130 return ret; 3131 } 3132 3133 static int cma_check_linklocal(struct rdma_dev_addr *dev_addr, 3134 struct sockaddr *addr) 3135 { 3136 #if IS_ENABLED(CONFIG_IPV6) 3137 struct sockaddr_in6 *sin6; 3138 3139 if (addr->sa_family != AF_INET6) 3140 return 0; 3141 3142 sin6 = (struct sockaddr_in6 *) addr; 3143 3144 if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)) 3145 return 0; 3146 3147 if (!sin6->sin6_scope_id) 3148 return -EINVAL; 3149 3150 dev_addr->bound_dev_if = sin6->sin6_scope_id; 3151 #endif 3152 return 0; 3153 } 3154 3155 int rdma_listen(struct rdma_cm_id *id, int backlog) 3156 { 3157 struct rdma_id_private *id_priv; 3158 int ret; 3159 3160 id_priv = container_of(id, struct rdma_id_private, id); 3161 if (id_priv->state == RDMA_CM_IDLE) { 3162 id->route.addr.src_addr.ss_family = AF_INET; 3163 ret = rdma_bind_addr(id, cma_src_addr(id_priv)); 3164 if (ret) 3165 return ret; 3166 } 3167 3168 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) 3169 return -EINVAL; 3170 3171 if (id_priv->reuseaddr) { 3172 ret = cma_bind_listen(id_priv); 3173 if (ret) 3174 goto err; 3175 } 3176 3177 id_priv->backlog = backlog; 3178 if (id->device) { 3179 if (rdma_cap_ib_cm(id->device, 1)) { 3180 ret = cma_ib_listen(id_priv); 3181 if (ret) 3182 goto err; 3183 } else if (rdma_cap_iw_cm(id->device, 1)) { 3184 ret = cma_iw_listen(id_priv, backlog); 3185 if (ret) 3186 goto err; 3187 } else { 3188 ret = -ENOSYS; 3189 goto err; 3190 } 3191 } else 3192 cma_listen_on_all(id_priv); 3193 3194 return 0; 3195 err: 3196 id_priv->backlog = 0; 3197 cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND); 3198 return ret; 3199 } 3200 EXPORT_SYMBOL(rdma_listen); 3201 3202 int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) 3203 { 3204 struct rdma_id_private *id_priv; 3205 int ret; 3206 3207 if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6 && 3208 addr->sa_family != AF_IB) 3209 return -EAFNOSUPPORT; 3210 3211 id_priv = container_of(id, struct rdma_id_private, id); 3212 if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND)) 3213 return -EINVAL; 3214 3215 ret = cma_check_linklocal(&id->route.addr.dev_addr, addr); 3216 if (ret) 3217 goto err1; 3218 3219 memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr)); 3220 if (!cma_any_addr(addr)) { 3221 ret = cma_translate_addr(addr, &id->route.addr.dev_addr); 3222 if (ret) 3223 goto err1; 3224 3225 ret = cma_acquire_dev(id_priv, NULL); 3226 if (ret) 3227 goto err1; 3228 } 3229 3230 if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) { 3231 if (addr->sa_family == AF_INET) 3232 id_priv->afonly = 1; 3233 #if IS_ENABLED(CONFIG_IPV6) 3234 else if (addr->sa_family == AF_INET6) { 3235 struct net *net = id_priv->id.route.addr.dev_addr.net; 3236 3237 id_priv->afonly = net->ipv6.sysctl.bindv6only; 3238 } 3239 #endif 3240 } 3241 ret = cma_get_port(id_priv); 3242 if (ret) 3243 goto err2; 3244 3245 return 0; 3246 err2: 3247 if (id_priv->cma_dev) 3248 cma_release_dev(id_priv); 3249 err1: 3250 cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE); 3251 return ret; 3252 } 3253 EXPORT_SYMBOL(rdma_bind_addr); 3254 3255 static int cma_format_hdr(void *hdr, struct rdma_id_private *id_priv) 3256 { 3257 struct cma_hdr *cma_hdr; 3258 3259 cma_hdr = hdr; 3260 cma_hdr->cma_version = CMA_VERSION; 3261 if (cma_family(id_priv) == AF_INET) { 3262 struct sockaddr_in *src4, *dst4; 3263 3264 src4 = (struct sockaddr_in *) cma_src_addr(id_priv); 3265 dst4 = (struct sockaddr_in *) cma_dst_addr(id_priv); 3266 3267 cma_set_ip_ver(cma_hdr, 4); 3268 cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr; 3269 cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr; 3270 cma_hdr->port = src4->sin_port; 3271 } else if (cma_family(id_priv) == AF_INET6) { 3272 struct sockaddr_in6 *src6, *dst6; 3273 3274 src6 = (struct sockaddr_in6 *) cma_src_addr(id_priv); 3275 dst6 = (struct sockaddr_in6 *) cma_dst_addr(id_priv); 3276 3277 cma_set_ip_ver(cma_hdr, 6); 3278 cma_hdr->src_addr.ip6 = src6->sin6_addr; 3279 cma_hdr->dst_addr.ip6 = dst6->sin6_addr; 3280 cma_hdr->port = src6->sin6_port; 3281 } 3282 return 0; 3283 } 3284 3285 static int cma_sidr_rep_handler(struct ib_cm_id *cm_id, 3286 struct ib_cm_event *ib_event) 3287 { 3288 struct rdma_id_private *id_priv = cm_id->context; 3289 struct rdma_cm_event event; 3290 struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd; 3291 int ret = 0; 3292 3293 mutex_lock(&id_priv->handler_mutex); 3294 if (id_priv->state != RDMA_CM_CONNECT) 3295 goto out; 3296 3297 memset(&event, 0, sizeof event); 3298 switch (ib_event->event) { 3299 case IB_CM_SIDR_REQ_ERROR: 3300 event.event = RDMA_CM_EVENT_UNREACHABLE; 3301 event.status = -ETIMEDOUT; 3302 break; 3303 case IB_CM_SIDR_REP_RECEIVED: 3304 event.param.ud.private_data = ib_event->private_data; 3305 event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE; 3306 if (rep->status != IB_SIDR_SUCCESS) { 3307 event.event = RDMA_CM_EVENT_UNREACHABLE; 3308 event.status = ib_event->param.sidr_rep_rcvd.status; 3309 break; 3310 } 3311 ret = cma_set_qkey(id_priv, rep->qkey); 3312 if (ret) { 3313 event.event = RDMA_CM_EVENT_ADDR_ERROR; 3314 event.status = ret; 3315 break; 3316 } 3317 ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num, 3318 id_priv->id.route.path_rec, 3319 &event.param.ud.ah_attr); 3320 event.param.ud.qp_num = rep->qpn; 3321 event.param.ud.qkey = rep->qkey; 3322 event.event = RDMA_CM_EVENT_ESTABLISHED; 3323 event.status = 0; 3324 break; 3325 default: 3326 pr_err("RDMA CMA: unexpected IB CM event: %d\n", 3327 ib_event->event); 3328 goto out; 3329 } 3330 3331 ret = id_priv->id.event_handler(&id_priv->id, &event); 3332 if (ret) { 3333 /* Destroy the CM ID by returning a non-zero value. */ 3334 id_priv->cm_id.ib = NULL; 3335 cma_exch(id_priv, RDMA_CM_DESTROYING); 3336 mutex_unlock(&id_priv->handler_mutex); 3337 rdma_destroy_id(&id_priv->id); 3338 return ret; 3339 } 3340 out: 3341 mutex_unlock(&id_priv->handler_mutex); 3342 return ret; 3343 } 3344 3345 static int cma_resolve_ib_udp(struct rdma_id_private *id_priv, 3346 struct rdma_conn_param *conn_param) 3347 { 3348 struct ib_cm_sidr_req_param req; 3349 struct ib_cm_id *id; 3350 void *private_data; 3351 int offset, ret; 3352 3353 memset(&req, 0, sizeof req); 3354 offset = cma_user_data_offset(id_priv); 3355 req.private_data_len = offset + conn_param->private_data_len; 3356 if (req.private_data_len < conn_param->private_data_len) 3357 return -EINVAL; 3358 3359 if (req.private_data_len) { 3360 private_data = kzalloc(req.private_data_len, GFP_ATOMIC); 3361 if (!private_data) 3362 return -ENOMEM; 3363 } else { 3364 private_data = NULL; 3365 } 3366 3367 if (conn_param->private_data && conn_param->private_data_len) 3368 memcpy(private_data + offset, conn_param->private_data, 3369 conn_param->private_data_len); 3370 3371 if (private_data) { 3372 ret = cma_format_hdr(private_data, id_priv); 3373 if (ret) 3374 goto out; 3375 req.private_data = private_data; 3376 } 3377 3378 id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler, 3379 id_priv); 3380 if (IS_ERR(id)) { 3381 ret = PTR_ERR(id); 3382 goto out; 3383 } 3384 id_priv->cm_id.ib = id; 3385 3386 req.path = id_priv->id.route.path_rec; 3387 req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); 3388 req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8); 3389 req.max_cm_retries = CMA_MAX_CM_RETRIES; 3390 3391 ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req); 3392 if (ret) { 3393 ib_destroy_cm_id(id_priv->cm_id.ib); 3394 id_priv->cm_id.ib = NULL; 3395 } 3396 out: 3397 kfree(private_data); 3398 return ret; 3399 } 3400 3401 static int cma_connect_ib(struct rdma_id_private *id_priv, 3402 struct rdma_conn_param *conn_param) 3403 { 3404 struct ib_cm_req_param req; 3405 struct rdma_route *route; 3406 void *private_data; 3407 struct ib_cm_id *id; 3408 int offset, ret; 3409 3410 memset(&req, 0, sizeof req); 3411 offset = cma_user_data_offset(id_priv); 3412 req.private_data_len = offset + conn_param->private_data_len; 3413 if (req.private_data_len < conn_param->private_data_len) 3414 return -EINVAL; 3415 3416 if (req.private_data_len) { 3417 private_data = kzalloc(req.private_data_len, GFP_ATOMIC); 3418 if (!private_data) 3419 return -ENOMEM; 3420 } else { 3421 private_data = NULL; 3422 } 3423 3424 if (conn_param->private_data && conn_param->private_data_len) 3425 memcpy(private_data + offset, conn_param->private_data, 3426 conn_param->private_data_len); 3427 3428 id = ib_create_cm_id(id_priv->id.device, cma_ib_handler, id_priv); 3429 if (IS_ERR(id)) { 3430 ret = PTR_ERR(id); 3431 goto out; 3432 } 3433 id_priv->cm_id.ib = id; 3434 3435 route = &id_priv->id.route; 3436 if (private_data) { 3437 ret = cma_format_hdr(private_data, id_priv); 3438 if (ret) 3439 goto out; 3440 req.private_data = private_data; 3441 } 3442 3443 req.primary_path = &route->path_rec[0]; 3444 if (route->num_paths == 2) 3445 req.alternate_path = &route->path_rec[1]; 3446 3447 req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); 3448 req.qp_num = id_priv->qp_num; 3449 req.qp_type = id_priv->id.qp_type; 3450 req.starting_psn = id_priv->seq_num; 3451 req.responder_resources = conn_param->responder_resources; 3452 req.initiator_depth = conn_param->initiator_depth; 3453 req.flow_control = conn_param->flow_control; 3454 req.retry_count = min_t(u8, 7, conn_param->retry_count); 3455 req.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count); 3456 req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; 3457 req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; 3458 req.max_cm_retries = CMA_MAX_CM_RETRIES; 3459 req.srq = id_priv->srq ? 1 : 0; 3460 3461 ret = ib_send_cm_req(id_priv->cm_id.ib, &req); 3462 out: 3463 if (ret && !IS_ERR(id)) { 3464 ib_destroy_cm_id(id); 3465 id_priv->cm_id.ib = NULL; 3466 } 3467 3468 kfree(private_data); 3469 return ret; 3470 } 3471 3472 static int cma_connect_iw(struct rdma_id_private *id_priv, 3473 struct rdma_conn_param *conn_param) 3474 { 3475 struct iw_cm_id *cm_id; 3476 int ret; 3477 struct iw_cm_conn_param iw_param; 3478 3479 cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv); 3480 if (IS_ERR(cm_id)) 3481 return PTR_ERR(cm_id); 3482 3483 cm_id->tos = id_priv->tos; 3484 id_priv->cm_id.iw = cm_id; 3485 3486 memcpy(&cm_id->local_addr, cma_src_addr(id_priv), 3487 rdma_addr_size(cma_src_addr(id_priv))); 3488 memcpy(&cm_id->remote_addr, cma_dst_addr(id_priv), 3489 rdma_addr_size(cma_dst_addr(id_priv))); 3490 3491 ret = cma_modify_qp_rtr(id_priv, conn_param); 3492 if (ret) 3493 goto out; 3494 3495 if (conn_param) { 3496 iw_param.ord = conn_param->initiator_depth; 3497 iw_param.ird = conn_param->responder_resources; 3498 iw_param.private_data = conn_param->private_data; 3499 iw_param.private_data_len = conn_param->private_data_len; 3500 iw_param.qpn = id_priv->id.qp ? id_priv->qp_num : conn_param->qp_num; 3501 } else { 3502 memset(&iw_param, 0, sizeof iw_param); 3503 iw_param.qpn = id_priv->qp_num; 3504 } 3505 ret = iw_cm_connect(cm_id, &iw_param); 3506 out: 3507 if (ret) { 3508 iw_destroy_cm_id(cm_id); 3509 id_priv->cm_id.iw = NULL; 3510 } 3511 return ret; 3512 } 3513 3514 int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) 3515 { 3516 struct rdma_id_private *id_priv; 3517 int ret; 3518 3519 id_priv = container_of(id, struct rdma_id_private, id); 3520 if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT)) 3521 return -EINVAL; 3522 3523 if (!id->qp) { 3524 id_priv->qp_num = conn_param->qp_num; 3525 id_priv->srq = conn_param->srq; 3526 } 3527 3528 if (rdma_cap_ib_cm(id->device, id->port_num)) { 3529 if (id->qp_type == IB_QPT_UD) 3530 ret = cma_resolve_ib_udp(id_priv, conn_param); 3531 else 3532 ret = cma_connect_ib(id_priv, conn_param); 3533 } else if (rdma_cap_iw_cm(id->device, id->port_num)) 3534 ret = cma_connect_iw(id_priv, conn_param); 3535 else 3536 ret = -ENOSYS; 3537 if (ret) 3538 goto err; 3539 3540 return 0; 3541 err: 3542 cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED); 3543 return ret; 3544 } 3545 EXPORT_SYMBOL(rdma_connect); 3546 3547 static int cma_accept_ib(struct rdma_id_private *id_priv, 3548 struct rdma_conn_param *conn_param) 3549 { 3550 struct ib_cm_rep_param rep; 3551 int ret; 3552 3553 ret = cma_modify_qp_rtr(id_priv, conn_param); 3554 if (ret) 3555 goto out; 3556 3557 ret = cma_modify_qp_rts(id_priv, conn_param); 3558 if (ret) 3559 goto out; 3560 3561 memset(&rep, 0, sizeof rep); 3562 rep.qp_num = id_priv->qp_num; 3563 rep.starting_psn = id_priv->seq_num; 3564 rep.private_data = conn_param->private_data; 3565 rep.private_data_len = conn_param->private_data_len; 3566 rep.responder_resources = conn_param->responder_resources; 3567 rep.initiator_depth = conn_param->initiator_depth; 3568 rep.failover_accepted = 0; 3569 rep.flow_control = conn_param->flow_control; 3570 rep.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count); 3571 rep.srq = id_priv->srq ? 1 : 0; 3572 3573 ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep); 3574 out: 3575 return ret; 3576 } 3577 3578 static int cma_accept_iw(struct rdma_id_private *id_priv, 3579 struct rdma_conn_param *conn_param) 3580 { 3581 struct iw_cm_conn_param iw_param; 3582 int ret; 3583 3584 ret = cma_modify_qp_rtr(id_priv, conn_param); 3585 if (ret) 3586 return ret; 3587 3588 iw_param.ord = conn_param->initiator_depth; 3589 iw_param.ird = conn_param->responder_resources; 3590 iw_param.private_data = conn_param->private_data; 3591 iw_param.private_data_len = conn_param->private_data_len; 3592 if (id_priv->id.qp) { 3593 iw_param.qpn = id_priv->qp_num; 3594 } else 3595 iw_param.qpn = conn_param->qp_num; 3596 3597 return iw_cm_accept(id_priv->cm_id.iw, &iw_param); 3598 } 3599 3600 static int cma_send_sidr_rep(struct rdma_id_private *id_priv, 3601 enum ib_cm_sidr_status status, u32 qkey, 3602 const void *private_data, int private_data_len) 3603 { 3604 struct ib_cm_sidr_rep_param rep; 3605 int ret; 3606 3607 memset(&rep, 0, sizeof rep); 3608 rep.status = status; 3609 if (status == IB_SIDR_SUCCESS) { 3610 ret = cma_set_qkey(id_priv, qkey); 3611 if (ret) 3612 return ret; 3613 rep.qp_num = id_priv->qp_num; 3614 rep.qkey = id_priv->qkey; 3615 } 3616 rep.private_data = private_data; 3617 rep.private_data_len = private_data_len; 3618 3619 return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep); 3620 } 3621 3622 int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) 3623 { 3624 struct rdma_id_private *id_priv; 3625 int ret; 3626 3627 id_priv = container_of(id, struct rdma_id_private, id); 3628 3629 id_priv->owner = task_pid_nr(current); 3630 3631 if (!cma_comp(id_priv, RDMA_CM_CONNECT)) 3632 return -EINVAL; 3633 3634 if (!id->qp && conn_param) { 3635 id_priv->qp_num = conn_param->qp_num; 3636 id_priv->srq = conn_param->srq; 3637 } 3638 3639 if (rdma_cap_ib_cm(id->device, id->port_num)) { 3640 if (id->qp_type == IB_QPT_UD) { 3641 if (conn_param) 3642 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, 3643 conn_param->qkey, 3644 conn_param->private_data, 3645 conn_param->private_data_len); 3646 else 3647 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, 3648 0, NULL, 0); 3649 } else { 3650 if (conn_param) 3651 ret = cma_accept_ib(id_priv, conn_param); 3652 else 3653 ret = cma_rep_recv(id_priv); 3654 } 3655 } else if (rdma_cap_iw_cm(id->device, id->port_num)) 3656 ret = cma_accept_iw(id_priv, conn_param); 3657 else 3658 ret = -ENOSYS; 3659 3660 if (ret) 3661 goto reject; 3662 3663 return 0; 3664 reject: 3665 cma_modify_qp_err(id_priv); 3666 rdma_reject(id, NULL, 0); 3667 return ret; 3668 } 3669 EXPORT_SYMBOL(rdma_accept); 3670 3671 int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event) 3672 { 3673 struct rdma_id_private *id_priv; 3674 int ret; 3675 3676 id_priv = container_of(id, struct rdma_id_private, id); 3677 if (!id_priv->cm_id.ib) 3678 return -EINVAL; 3679 3680 switch (id->device->node_type) { 3681 case RDMA_NODE_IB_CA: 3682 ret = ib_cm_notify(id_priv->cm_id.ib, event); 3683 break; 3684 default: 3685 ret = 0; 3686 break; 3687 } 3688 return ret; 3689 } 3690 EXPORT_SYMBOL(rdma_notify); 3691 3692 int rdma_reject(struct rdma_cm_id *id, const void *private_data, 3693 u8 private_data_len) 3694 { 3695 struct rdma_id_private *id_priv; 3696 int ret; 3697 3698 id_priv = container_of(id, struct rdma_id_private, id); 3699 if (!id_priv->cm_id.ib) 3700 return -EINVAL; 3701 3702 if (rdma_cap_ib_cm(id->device, id->port_num)) { 3703 if (id->qp_type == IB_QPT_UD) 3704 ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0, 3705 private_data, private_data_len); 3706 else 3707 ret = ib_send_cm_rej(id_priv->cm_id.ib, 3708 IB_CM_REJ_CONSUMER_DEFINED, NULL, 3709 0, private_data, private_data_len); 3710 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { 3711 ret = iw_cm_reject(id_priv->cm_id.iw, 3712 private_data, private_data_len); 3713 } else 3714 ret = -ENOSYS; 3715 3716 return ret; 3717 } 3718 EXPORT_SYMBOL(rdma_reject); 3719 3720 int rdma_disconnect(struct rdma_cm_id *id) 3721 { 3722 struct rdma_id_private *id_priv; 3723 int ret; 3724 3725 id_priv = container_of(id, struct rdma_id_private, id); 3726 if (!id_priv->cm_id.ib) 3727 return -EINVAL; 3728 3729 if (rdma_cap_ib_cm(id->device, id->port_num)) { 3730 ret = cma_modify_qp_err(id_priv); 3731 if (ret) 3732 goto out; 3733 /* Initiate or respond to a disconnect. */ 3734 if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) 3735 ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0); 3736 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { 3737 ret = iw_cm_disconnect(id_priv->cm_id.iw, 0); 3738 } else 3739 ret = -EINVAL; 3740 3741 out: 3742 return ret; 3743 } 3744 EXPORT_SYMBOL(rdma_disconnect); 3745 3746 static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) 3747 { 3748 struct rdma_id_private *id_priv; 3749 struct cma_multicast *mc = multicast->context; 3750 struct rdma_cm_event event; 3751 int ret = 0; 3752 3753 id_priv = mc->id_priv; 3754 mutex_lock(&id_priv->handler_mutex); 3755 if (id_priv->state != RDMA_CM_ADDR_BOUND && 3756 id_priv->state != RDMA_CM_ADDR_RESOLVED) 3757 goto out; 3758 3759 if (!status) 3760 status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey)); 3761 mutex_lock(&id_priv->qp_mutex); 3762 if (!status && id_priv->id.qp) 3763 status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid, 3764 be16_to_cpu(multicast->rec.mlid)); 3765 mutex_unlock(&id_priv->qp_mutex); 3766 3767 memset(&event, 0, sizeof event); 3768 event.status = status; 3769 event.param.ud.private_data = mc->context; 3770 if (!status) { 3771 struct rdma_dev_addr *dev_addr = 3772 &id_priv->id.route.addr.dev_addr; 3773 struct net_device *ndev = 3774 dev_get_by_index(&init_net, dev_addr->bound_dev_if); 3775 enum ib_gid_type gid_type = 3776 id_priv->cma_dev->default_gid_type[id_priv->id.port_num - 3777 rdma_start_port(id_priv->cma_dev->device)]; 3778 3779 event.event = RDMA_CM_EVENT_MULTICAST_JOIN; 3780 ib_init_ah_from_mcmember(id_priv->id.device, 3781 id_priv->id.port_num, &multicast->rec, 3782 ndev, gid_type, 3783 &event.param.ud.ah_attr); 3784 event.param.ud.qp_num = 0xFFFFFF; 3785 event.param.ud.qkey = be32_to_cpu(multicast->rec.qkey); 3786 if (ndev) 3787 dev_put(ndev); 3788 } else 3789 event.event = RDMA_CM_EVENT_MULTICAST_ERROR; 3790 3791 ret = id_priv->id.event_handler(&id_priv->id, &event); 3792 if (ret) { 3793 cma_exch(id_priv, RDMA_CM_DESTROYING); 3794 mutex_unlock(&id_priv->handler_mutex); 3795 rdma_destroy_id(&id_priv->id); 3796 return 0; 3797 } 3798 3799 out: 3800 mutex_unlock(&id_priv->handler_mutex); 3801 return 0; 3802 } 3803 3804 static void cma_set_mgid(struct rdma_id_private *id_priv, 3805 struct sockaddr *addr, union ib_gid *mgid) 3806 { 3807 unsigned char mc_map[MAX_ADDR_LEN]; 3808 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 3809 struct sockaddr_in *sin = (struct sockaddr_in *) addr; 3810 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) addr; 3811 3812 if (cma_any_addr(addr)) { 3813 memset(mgid, 0, sizeof *mgid); 3814 } else if ((addr->sa_family == AF_INET6) && 3815 ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFFF0FFFF) == 3816 0xFF10A01B)) { 3817 /* IPv6 address is an SA assigned MGID. */ 3818 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); 3819 } else if (addr->sa_family == AF_IB) { 3820 memcpy(mgid, &((struct sockaddr_ib *) addr)->sib_addr, sizeof *mgid); 3821 } else if ((addr->sa_family == AF_INET6)) { 3822 ipv6_ib_mc_map(&sin6->sin6_addr, dev_addr->broadcast, mc_map); 3823 if (id_priv->id.ps == RDMA_PS_UDP) 3824 mc_map[7] = 0x01; /* Use RDMA CM signature */ 3825 *mgid = *(union ib_gid *) (mc_map + 4); 3826 } else { 3827 ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map); 3828 if (id_priv->id.ps == RDMA_PS_UDP) 3829 mc_map[7] = 0x01; /* Use RDMA CM signature */ 3830 *mgid = *(union ib_gid *) (mc_map + 4); 3831 } 3832 } 3833 3834 static void cma_query_sa_classport_info_cb(int status, 3835 struct ib_class_port_info *rec, 3836 void *context) 3837 { 3838 struct class_port_info_context *cb_ctx = context; 3839 3840 WARN_ON(!context); 3841 3842 if (status || !rec) { 3843 pr_debug("RDMA CM: %s port %u failed query ClassPortInfo status: %d\n", 3844 cb_ctx->device->name, cb_ctx->port_num, status); 3845 goto out; 3846 } 3847 3848 memcpy(cb_ctx->class_port_info, rec, sizeof(struct ib_class_port_info)); 3849 3850 out: 3851 complete(&cb_ctx->done); 3852 } 3853 3854 static int cma_query_sa_classport_info(struct ib_device *device, u8 port_num, 3855 struct ib_class_port_info *class_port_info) 3856 { 3857 struct class_port_info_context *cb_ctx; 3858 int ret; 3859 3860 cb_ctx = kmalloc(sizeof(*cb_ctx), GFP_KERNEL); 3861 if (!cb_ctx) 3862 return -ENOMEM; 3863 3864 cb_ctx->device = device; 3865 cb_ctx->class_port_info = class_port_info; 3866 cb_ctx->port_num = port_num; 3867 init_completion(&cb_ctx->done); 3868 3869 ret = ib_sa_classport_info_rec_query(&sa_client, device, port_num, 3870 CMA_QUERY_CLASSPORT_INFO_TIMEOUT, 3871 GFP_KERNEL, cma_query_sa_classport_info_cb, 3872 cb_ctx, &cb_ctx->sa_query); 3873 if (ret < 0) { 3874 pr_err("RDMA CM: %s port %u failed to send ClassPortInfo query, ret: %d\n", 3875 device->name, port_num, ret); 3876 goto out; 3877 } 3878 3879 wait_for_completion(&cb_ctx->done); 3880 3881 out: 3882 kfree(cb_ctx); 3883 return ret; 3884 } 3885 3886 static int cma_join_ib_multicast(struct rdma_id_private *id_priv, 3887 struct cma_multicast *mc) 3888 { 3889 struct ib_sa_mcmember_rec rec; 3890 struct ib_class_port_info class_port_info; 3891 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 3892 ib_sa_comp_mask comp_mask; 3893 int ret; 3894 3895 ib_addr_get_mgid(dev_addr, &rec.mgid); 3896 ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num, 3897 &rec.mgid, &rec); 3898 if (ret) 3899 return ret; 3900 3901 ret = cma_set_qkey(id_priv, 0); 3902 if (ret) 3903 return ret; 3904 3905 cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid); 3906 rec.qkey = cpu_to_be32(id_priv->qkey); 3907 rdma_addr_get_sgid(dev_addr, &rec.port_gid); 3908 rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); 3909 rec.join_state = mc->join_state; 3910 3911 if (rec.join_state == BIT(SENDONLY_FULLMEMBER_JOIN)) { 3912 ret = cma_query_sa_classport_info(id_priv->id.device, 3913 id_priv->id.port_num, 3914 &class_port_info); 3915 3916 if (ret) 3917 return ret; 3918 3919 if (!(ib_get_cpi_capmask2(&class_port_info) & 3920 IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT)) { 3921 pr_warn("RDMA CM: %s port %u Unable to multicast join\n" 3922 "RDMA CM: SM doesn't support Send Only Full Member option\n", 3923 id_priv->id.device->name, id_priv->id.port_num); 3924 return -EOPNOTSUPP; 3925 } 3926 } 3927 3928 comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID | 3929 IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE | 3930 IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_SL | 3931 IB_SA_MCMEMBER_REC_FLOW_LABEL | 3932 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS; 3933 3934 if (id_priv->id.ps == RDMA_PS_IPOIB) 3935 comp_mask |= IB_SA_MCMEMBER_REC_RATE | 3936 IB_SA_MCMEMBER_REC_RATE_SELECTOR | 3937 IB_SA_MCMEMBER_REC_MTU_SELECTOR | 3938 IB_SA_MCMEMBER_REC_MTU | 3939 IB_SA_MCMEMBER_REC_HOP_LIMIT; 3940 3941 mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device, 3942 id_priv->id.port_num, &rec, 3943 comp_mask, GFP_KERNEL, 3944 cma_ib_mc_handler, mc); 3945 return PTR_ERR_OR_ZERO(mc->multicast.ib); 3946 } 3947 3948 static void iboe_mcast_work_handler(struct work_struct *work) 3949 { 3950 struct iboe_mcast_work *mw = container_of(work, struct iboe_mcast_work, work); 3951 struct cma_multicast *mc = mw->mc; 3952 struct ib_sa_multicast *m = mc->multicast.ib; 3953 3954 mc->multicast.ib->context = mc; 3955 cma_ib_mc_handler(0, m); 3956 kref_put(&mc->mcref, release_mc); 3957 kfree(mw); 3958 } 3959 3960 static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid) 3961 { 3962 struct sockaddr_in *sin = (struct sockaddr_in *)addr; 3963 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr; 3964 3965 if (cma_any_addr(addr)) { 3966 memset(mgid, 0, sizeof *mgid); 3967 } else if (addr->sa_family == AF_INET6) { 3968 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); 3969 } else { 3970 mgid->raw[0] = 0xff; 3971 mgid->raw[1] = 0x0e; 3972 mgid->raw[2] = 0; 3973 mgid->raw[3] = 0; 3974 mgid->raw[4] = 0; 3975 mgid->raw[5] = 0; 3976 mgid->raw[6] = 0; 3977 mgid->raw[7] = 0; 3978 mgid->raw[8] = 0; 3979 mgid->raw[9] = 0; 3980 mgid->raw[10] = 0xff; 3981 mgid->raw[11] = 0xff; 3982 *(__be32 *)(&mgid->raw[12]) = sin->sin_addr.s_addr; 3983 } 3984 } 3985 3986 static int cma_iboe_join_multicast(struct rdma_id_private *id_priv, 3987 struct cma_multicast *mc) 3988 { 3989 struct iboe_mcast_work *work; 3990 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 3991 int err = 0; 3992 struct sockaddr *addr = (struct sockaddr *)&mc->addr; 3993 struct net_device *ndev = NULL; 3994 enum ib_gid_type gid_type; 3995 bool send_only; 3996 3997 send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN); 3998 3999 if (cma_zero_addr((struct sockaddr *)&mc->addr)) 4000 return -EINVAL; 4001 4002 work = kzalloc(sizeof *work, GFP_KERNEL); 4003 if (!work) 4004 return -ENOMEM; 4005 4006 mc->multicast.ib = kzalloc(sizeof(struct ib_sa_multicast), GFP_KERNEL); 4007 if (!mc->multicast.ib) { 4008 err = -ENOMEM; 4009 goto out1; 4010 } 4011 4012 cma_iboe_set_mgid(addr, &mc->multicast.ib->rec.mgid); 4013 4014 mc->multicast.ib->rec.pkey = cpu_to_be16(0xffff); 4015 if (id_priv->id.ps == RDMA_PS_UDP) 4016 mc->multicast.ib->rec.qkey = cpu_to_be32(RDMA_UDP_QKEY); 4017 4018 if (dev_addr->bound_dev_if) 4019 ndev = dev_get_by_index(&init_net, dev_addr->bound_dev_if); 4020 if (!ndev) { 4021 err = -ENODEV; 4022 goto out2; 4023 } 4024 mc->multicast.ib->rec.rate = iboe_get_rate(ndev); 4025 mc->multicast.ib->rec.hop_limit = 1; 4026 mc->multicast.ib->rec.mtu = iboe_get_mtu(ndev->mtu); 4027 4028 gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num - 4029 rdma_start_port(id_priv->cma_dev->device)]; 4030 if (addr->sa_family == AF_INET) { 4031 if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) { 4032 mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT; 4033 if (!send_only) { 4034 err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid, 4035 true); 4036 if (!err) 4037 mc->igmp_joined = true; 4038 } 4039 } 4040 } else { 4041 if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) 4042 err = -ENOTSUPP; 4043 } 4044 dev_put(ndev); 4045 if (err || !mc->multicast.ib->rec.mtu) { 4046 if (!err) 4047 err = -EINVAL; 4048 goto out2; 4049 } 4050 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, 4051 &mc->multicast.ib->rec.port_gid); 4052 work->id = id_priv; 4053 work->mc = mc; 4054 INIT_WORK(&work->work, iboe_mcast_work_handler); 4055 kref_get(&mc->mcref); 4056 queue_work(cma_wq, &work->work); 4057 4058 return 0; 4059 4060 out2: 4061 kfree(mc->multicast.ib); 4062 out1: 4063 kfree(work); 4064 return err; 4065 } 4066 4067 int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, 4068 u8 join_state, void *context) 4069 { 4070 struct rdma_id_private *id_priv; 4071 struct cma_multicast *mc; 4072 int ret; 4073 4074 id_priv = container_of(id, struct rdma_id_private, id); 4075 if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) && 4076 !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED)) 4077 return -EINVAL; 4078 4079 mc = kmalloc(sizeof *mc, GFP_KERNEL); 4080 if (!mc) 4081 return -ENOMEM; 4082 4083 memcpy(&mc->addr, addr, rdma_addr_size(addr)); 4084 mc->context = context; 4085 mc->id_priv = id_priv; 4086 mc->igmp_joined = false; 4087 mc->join_state = join_state; 4088 spin_lock(&id_priv->lock); 4089 list_add(&mc->list, &id_priv->mc_list); 4090 spin_unlock(&id_priv->lock); 4091 4092 if (rdma_protocol_roce(id->device, id->port_num)) { 4093 kref_init(&mc->mcref); 4094 ret = cma_iboe_join_multicast(id_priv, mc); 4095 } else if (rdma_cap_ib_mcast(id->device, id->port_num)) 4096 ret = cma_join_ib_multicast(id_priv, mc); 4097 else 4098 ret = -ENOSYS; 4099 4100 if (ret) { 4101 spin_lock_irq(&id_priv->lock); 4102 list_del(&mc->list); 4103 spin_unlock_irq(&id_priv->lock); 4104 kfree(mc); 4105 } 4106 return ret; 4107 } 4108 EXPORT_SYMBOL(rdma_join_multicast); 4109 4110 void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr) 4111 { 4112 struct rdma_id_private *id_priv; 4113 struct cma_multicast *mc; 4114 4115 id_priv = container_of(id, struct rdma_id_private, id); 4116 spin_lock_irq(&id_priv->lock); 4117 list_for_each_entry(mc, &id_priv->mc_list, list) { 4118 if (!memcmp(&mc->addr, addr, rdma_addr_size(addr))) { 4119 list_del(&mc->list); 4120 spin_unlock_irq(&id_priv->lock); 4121 4122 if (id->qp) 4123 ib_detach_mcast(id->qp, 4124 &mc->multicast.ib->rec.mgid, 4125 be16_to_cpu(mc->multicast.ib->rec.mlid)); 4126 4127 BUG_ON(id_priv->cma_dev->device != id->device); 4128 4129 if (rdma_cap_ib_mcast(id->device, id->port_num)) { 4130 ib_sa_free_multicast(mc->multicast.ib); 4131 kfree(mc); 4132 } else if (rdma_protocol_roce(id->device, id->port_num)) { 4133 if (mc->igmp_joined) { 4134 struct rdma_dev_addr *dev_addr = 4135 &id->route.addr.dev_addr; 4136 struct net_device *ndev = NULL; 4137 4138 if (dev_addr->bound_dev_if) 4139 ndev = dev_get_by_index(&init_net, 4140 dev_addr->bound_dev_if); 4141 if (ndev) { 4142 cma_igmp_send(ndev, 4143 &mc->multicast.ib->rec.mgid, 4144 false); 4145 dev_put(ndev); 4146 } 4147 mc->igmp_joined = false; 4148 } 4149 kref_put(&mc->mcref, release_mc); 4150 } 4151 return; 4152 } 4153 } 4154 spin_unlock_irq(&id_priv->lock); 4155 } 4156 EXPORT_SYMBOL(rdma_leave_multicast); 4157 4158 static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv) 4159 { 4160 struct rdma_dev_addr *dev_addr; 4161 struct cma_ndev_work *work; 4162 4163 dev_addr = &id_priv->id.route.addr.dev_addr; 4164 4165 if ((dev_addr->bound_dev_if == ndev->ifindex) && 4166 (net_eq(dev_net(ndev), dev_addr->net)) && 4167 memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) { 4168 pr_info("RDMA CM addr change for ndev %s used by id %p\n", 4169 ndev->name, &id_priv->id); 4170 work = kzalloc(sizeof *work, GFP_KERNEL); 4171 if (!work) 4172 return -ENOMEM; 4173 4174 INIT_WORK(&work->work, cma_ndev_work_handler); 4175 work->id = id_priv; 4176 work->event.event = RDMA_CM_EVENT_ADDR_CHANGE; 4177 atomic_inc(&id_priv->refcount); 4178 queue_work(cma_wq, &work->work); 4179 } 4180 4181 return 0; 4182 } 4183 4184 static int cma_netdev_callback(struct notifier_block *self, unsigned long event, 4185 void *ptr) 4186 { 4187 struct net_device *ndev = netdev_notifier_info_to_dev(ptr); 4188 struct cma_device *cma_dev; 4189 struct rdma_id_private *id_priv; 4190 int ret = NOTIFY_DONE; 4191 4192 if (event != NETDEV_BONDING_FAILOVER) 4193 return NOTIFY_DONE; 4194 4195 if (!(ndev->flags & IFF_MASTER) || !(ndev->priv_flags & IFF_BONDING)) 4196 return NOTIFY_DONE; 4197 4198 mutex_lock(&lock); 4199 list_for_each_entry(cma_dev, &dev_list, list) 4200 list_for_each_entry(id_priv, &cma_dev->id_list, list) { 4201 ret = cma_netdev_change(ndev, id_priv); 4202 if (ret) 4203 goto out; 4204 } 4205 4206 out: 4207 mutex_unlock(&lock); 4208 return ret; 4209 } 4210 4211 static struct notifier_block cma_nb = { 4212 .notifier_call = cma_netdev_callback 4213 }; 4214 4215 static void cma_add_one(struct ib_device *device) 4216 { 4217 struct cma_device *cma_dev; 4218 struct rdma_id_private *id_priv; 4219 unsigned int i; 4220 unsigned long supported_gids = 0; 4221 4222 cma_dev = kmalloc(sizeof *cma_dev, GFP_KERNEL); 4223 if (!cma_dev) 4224 return; 4225 4226 cma_dev->device = device; 4227 cma_dev->default_gid_type = kcalloc(device->phys_port_cnt, 4228 sizeof(*cma_dev->default_gid_type), 4229 GFP_KERNEL); 4230 if (!cma_dev->default_gid_type) { 4231 kfree(cma_dev); 4232 return; 4233 } 4234 for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) { 4235 supported_gids = roce_gid_type_mask_support(device, i); 4236 WARN_ON(!supported_gids); 4237 cma_dev->default_gid_type[i - rdma_start_port(device)] = 4238 find_first_bit(&supported_gids, BITS_PER_LONG); 4239 } 4240 4241 init_completion(&cma_dev->comp); 4242 atomic_set(&cma_dev->refcount, 1); 4243 INIT_LIST_HEAD(&cma_dev->id_list); 4244 ib_set_client_data(device, &cma_client, cma_dev); 4245 4246 mutex_lock(&lock); 4247 list_add_tail(&cma_dev->list, &dev_list); 4248 list_for_each_entry(id_priv, &listen_any_list, list) 4249 cma_listen_on_dev(id_priv, cma_dev); 4250 mutex_unlock(&lock); 4251 } 4252 4253 static int cma_remove_id_dev(struct rdma_id_private *id_priv) 4254 { 4255 struct rdma_cm_event event; 4256 enum rdma_cm_state state; 4257 int ret = 0; 4258 4259 /* Record that we want to remove the device */ 4260 state = cma_exch(id_priv, RDMA_CM_DEVICE_REMOVAL); 4261 if (state == RDMA_CM_DESTROYING) 4262 return 0; 4263 4264 cma_cancel_operation(id_priv, state); 4265 mutex_lock(&id_priv->handler_mutex); 4266 4267 /* Check for destruction from another callback. */ 4268 if (!cma_comp(id_priv, RDMA_CM_DEVICE_REMOVAL)) 4269 goto out; 4270 4271 memset(&event, 0, sizeof event); 4272 event.event = RDMA_CM_EVENT_DEVICE_REMOVAL; 4273 ret = id_priv->id.event_handler(&id_priv->id, &event); 4274 out: 4275 mutex_unlock(&id_priv->handler_mutex); 4276 return ret; 4277 } 4278 4279 static void cma_process_remove(struct cma_device *cma_dev) 4280 { 4281 struct rdma_id_private *id_priv; 4282 int ret; 4283 4284 mutex_lock(&lock); 4285 while (!list_empty(&cma_dev->id_list)) { 4286 id_priv = list_entry(cma_dev->id_list.next, 4287 struct rdma_id_private, list); 4288 4289 list_del(&id_priv->listen_list); 4290 list_del_init(&id_priv->list); 4291 atomic_inc(&id_priv->refcount); 4292 mutex_unlock(&lock); 4293 4294 ret = id_priv->internal_id ? 1 : cma_remove_id_dev(id_priv); 4295 cma_deref_id(id_priv); 4296 if (ret) 4297 rdma_destroy_id(&id_priv->id); 4298 4299 mutex_lock(&lock); 4300 } 4301 mutex_unlock(&lock); 4302 4303 cma_deref_dev(cma_dev); 4304 wait_for_completion(&cma_dev->comp); 4305 } 4306 4307 static void cma_remove_one(struct ib_device *device, void *client_data) 4308 { 4309 struct cma_device *cma_dev = client_data; 4310 4311 if (!cma_dev) 4312 return; 4313 4314 mutex_lock(&lock); 4315 list_del(&cma_dev->list); 4316 mutex_unlock(&lock); 4317 4318 cma_process_remove(cma_dev); 4319 kfree(cma_dev->default_gid_type); 4320 kfree(cma_dev); 4321 } 4322 4323 static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb) 4324 { 4325 struct nlmsghdr *nlh; 4326 struct rdma_cm_id_stats *id_stats; 4327 struct rdma_id_private *id_priv; 4328 struct rdma_cm_id *id = NULL; 4329 struct cma_device *cma_dev; 4330 int i_dev = 0, i_id = 0; 4331 4332 /* 4333 * We export all of the IDs as a sequence of messages. Each 4334 * ID gets its own netlink message. 4335 */ 4336 mutex_lock(&lock); 4337 4338 list_for_each_entry(cma_dev, &dev_list, list) { 4339 if (i_dev < cb->args[0]) { 4340 i_dev++; 4341 continue; 4342 } 4343 4344 i_id = 0; 4345 list_for_each_entry(id_priv, &cma_dev->id_list, list) { 4346 if (i_id < cb->args[1]) { 4347 i_id++; 4348 continue; 4349 } 4350 4351 id_stats = ibnl_put_msg(skb, &nlh, cb->nlh->nlmsg_seq, 4352 sizeof *id_stats, RDMA_NL_RDMA_CM, 4353 RDMA_NL_RDMA_CM_ID_STATS, 4354 NLM_F_MULTI); 4355 if (!id_stats) 4356 goto out; 4357 4358 memset(id_stats, 0, sizeof *id_stats); 4359 id = &id_priv->id; 4360 id_stats->node_type = id->route.addr.dev_addr.dev_type; 4361 id_stats->port_num = id->port_num; 4362 id_stats->bound_dev_if = 4363 id->route.addr.dev_addr.bound_dev_if; 4364 4365 if (ibnl_put_attr(skb, nlh, 4366 rdma_addr_size(cma_src_addr(id_priv)), 4367 cma_src_addr(id_priv), 4368 RDMA_NL_RDMA_CM_ATTR_SRC_ADDR)) 4369 goto out; 4370 if (ibnl_put_attr(skb, nlh, 4371 rdma_addr_size(cma_src_addr(id_priv)), 4372 cma_dst_addr(id_priv), 4373 RDMA_NL_RDMA_CM_ATTR_DST_ADDR)) 4374 goto out; 4375 4376 id_stats->pid = id_priv->owner; 4377 id_stats->port_space = id->ps; 4378 id_stats->cm_state = id_priv->state; 4379 id_stats->qp_num = id_priv->qp_num; 4380 id_stats->qp_type = id->qp_type; 4381 4382 i_id++; 4383 } 4384 4385 cb->args[1] = 0; 4386 i_dev++; 4387 } 4388 4389 out: 4390 mutex_unlock(&lock); 4391 cb->args[0] = i_dev; 4392 cb->args[1] = i_id; 4393 4394 return skb->len; 4395 } 4396 4397 static const struct ibnl_client_cbs cma_cb_table[] = { 4398 [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats, 4399 .module = THIS_MODULE }, 4400 }; 4401 4402 static int cma_init_net(struct net *net) 4403 { 4404 struct cma_pernet *pernet = cma_pernet(net); 4405 4406 idr_init(&pernet->tcp_ps); 4407 idr_init(&pernet->udp_ps); 4408 idr_init(&pernet->ipoib_ps); 4409 idr_init(&pernet->ib_ps); 4410 4411 return 0; 4412 } 4413 4414 static void cma_exit_net(struct net *net) 4415 { 4416 struct cma_pernet *pernet = cma_pernet(net); 4417 4418 idr_destroy(&pernet->tcp_ps); 4419 idr_destroy(&pernet->udp_ps); 4420 idr_destroy(&pernet->ipoib_ps); 4421 idr_destroy(&pernet->ib_ps); 4422 } 4423 4424 static struct pernet_operations cma_pernet_operations = { 4425 .init = cma_init_net, 4426 .exit = cma_exit_net, 4427 .id = &cma_pernet_id, 4428 .size = sizeof(struct cma_pernet), 4429 }; 4430 4431 static int __init cma_init(void) 4432 { 4433 int ret; 4434 4435 cma_wq = alloc_ordered_workqueue("rdma_cm", WQ_MEM_RECLAIM); 4436 if (!cma_wq) 4437 return -ENOMEM; 4438 4439 ret = register_pernet_subsys(&cma_pernet_operations); 4440 if (ret) 4441 goto err_wq; 4442 4443 ib_sa_register_client(&sa_client); 4444 rdma_addr_register_client(&addr_client); 4445 register_netdevice_notifier(&cma_nb); 4446 4447 ret = ib_register_client(&cma_client); 4448 if (ret) 4449 goto err; 4450 4451 if (ibnl_add_client(RDMA_NL_RDMA_CM, ARRAY_SIZE(cma_cb_table), 4452 cma_cb_table)) 4453 pr_warn("RDMA CMA: failed to add netlink callback\n"); 4454 cma_configfs_init(); 4455 4456 return 0; 4457 4458 err: 4459 unregister_netdevice_notifier(&cma_nb); 4460 rdma_addr_unregister_client(&addr_client); 4461 ib_sa_unregister_client(&sa_client); 4462 err_wq: 4463 destroy_workqueue(cma_wq); 4464 return ret; 4465 } 4466 4467 static void __exit cma_cleanup(void) 4468 { 4469 cma_configfs_exit(); 4470 ibnl_remove_client(RDMA_NL_RDMA_CM); 4471 ib_unregister_client(&cma_client); 4472 unregister_netdevice_notifier(&cma_nb); 4473 rdma_addr_unregister_client(&addr_client); 4474 ib_sa_unregister_client(&sa_client); 4475 unregister_pernet_subsys(&cma_pernet_operations); 4476 destroy_workqueue(cma_wq); 4477 } 4478 4479 module_init(cma_init); 4480 module_exit(cma_cleanup); 4481