124f52149SLeon Romanovsky // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2e51060f0SSean Hefty /* 3e51060f0SSean Hefty * Copyright (c) 2005 Voltaire Inc. All rights reserved. 4e51060f0SSean Hefty * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved. 524f52149SLeon Romanovsky * Copyright (c) 1999-2019, Mellanox Technologies, Inc. All rights reserved. 6e51060f0SSean Hefty * Copyright (c) 2005-2006 Intel Corporation. All rights reserved. 7e51060f0SSean Hefty */ 8e51060f0SSean Hefty 9e51060f0SSean Hefty #include <linux/completion.h> 10e51060f0SSean Hefty #include <linux/in.h> 11e51060f0SSean Hefty #include <linux/in6.h> 12e51060f0SSean Hefty #include <linux/mutex.h> 13e51060f0SSean Hefty #include <linux/random.h> 14fc008bdbSPatrisious Haddad #include <linux/rbtree.h> 15bee3c3c9SMoni Shoua #include <linux/igmp.h> 1663826753SMatthew Wilcox #include <linux/xarray.h> 1707ebafbaSTom Tucker #include <linux/inetdevice.h> 185a0e3ad6STejun Heo #include <linux/slab.h> 19e4dd23d7SPaul Gortmaker #include <linux/module.h> 20366cddb4SAmir Vadai #include <net/route.h> 21e51060f0SSean Hefty 224be74b42SHaggai Eran #include <net/net_namespace.h> 234be74b42SHaggai Eran #include <net/netns/generic.h> 24925d046eSPatrisious Haddad #include <net/netevent.h> 25e51060f0SSean Hefty #include <net/tcp.h> 261f5175adSAleksey Senin #include <net/ipv6.h> 27f887f2acSHaggai Eran #include <net/ip_fib.h> 28f887f2acSHaggai Eran #include <net/ip6_route.h> 29e51060f0SSean Hefty 30e51060f0SSean Hefty #include <rdma/rdma_cm.h> 31e51060f0SSean Hefty #include <rdma/rdma_cm_ib.h> 32753f618aSNir Muchtar #include <rdma/rdma_netlink.h> 332e2d190cSSean Hefty #include <rdma/ib.h> 34e51060f0SSean Hefty #include <rdma/ib_cache.h> 35e51060f0SSean Hefty #include <rdma/ib_cm.h> 36e51060f0SSean Hefty #include <rdma/ib_sa.h> 3707ebafbaSTom Tucker #include <rdma/iw_cm.h> 38e51060f0SSean Hefty 39218a773fSMatan Barak #include "core_priv.h" 40a3b641afSSteve Wise #include "cma_priv.h" 41ed999f82SChuck Lever #include "cma_trace.h" 42218a773fSMatan Barak 43e51060f0SSean Hefty MODULE_AUTHOR("Sean Hefty"); 44e51060f0SSean Hefty MODULE_DESCRIPTION("Generic RDMA CM Agent"); 45e51060f0SSean Hefty MODULE_LICENSE("Dual BSD/GPL"); 46e51060f0SSean Hefty 47e51060f0SSean Hefty #define CMA_CM_RESPONSE_TIMEOUT 20 48d5bb7599SMichael S. Tsirkin #define CMA_MAX_CM_RETRIES 15 49dcb3f974SSean Hefty #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24) 503c86aa70SEli Cohen #define CMA_IBOE_PACKET_LIFETIME 18 515ab2d89bSLeon Romanovsky #define CMA_PREFERRED_ROCE_GID_TYPE IB_GID_TYPE_ROCE_UDP_ENCAP 52e51060f0SSean Hefty 532b1b5b60SSagi Grimberg static const char * const cma_events[] = { 542b1b5b60SSagi Grimberg [RDMA_CM_EVENT_ADDR_RESOLVED] = "address resolved", 552b1b5b60SSagi Grimberg [RDMA_CM_EVENT_ADDR_ERROR] = "address error", 562b1b5b60SSagi Grimberg [RDMA_CM_EVENT_ROUTE_RESOLVED] = "route resolved ", 572b1b5b60SSagi Grimberg [RDMA_CM_EVENT_ROUTE_ERROR] = "route error", 582b1b5b60SSagi Grimberg [RDMA_CM_EVENT_CONNECT_REQUEST] = "connect request", 592b1b5b60SSagi Grimberg [RDMA_CM_EVENT_CONNECT_RESPONSE] = "connect response", 602b1b5b60SSagi Grimberg [RDMA_CM_EVENT_CONNECT_ERROR] = "connect error", 612b1b5b60SSagi Grimberg [RDMA_CM_EVENT_UNREACHABLE] = "unreachable", 622b1b5b60SSagi Grimberg [RDMA_CM_EVENT_REJECTED] = "rejected", 632b1b5b60SSagi Grimberg [RDMA_CM_EVENT_ESTABLISHED] = "established", 642b1b5b60SSagi Grimberg [RDMA_CM_EVENT_DISCONNECTED] = "disconnected", 652b1b5b60SSagi Grimberg [RDMA_CM_EVENT_DEVICE_REMOVAL] = "device removal", 662b1b5b60SSagi Grimberg [RDMA_CM_EVENT_MULTICAST_JOIN] = "multicast join", 672b1b5b60SSagi Grimberg [RDMA_CM_EVENT_MULTICAST_ERROR] = "multicast error", 682b1b5b60SSagi Grimberg [RDMA_CM_EVENT_ADDR_CHANGE] = "address change", 692b1b5b60SSagi Grimberg [RDMA_CM_EVENT_TIMEWAIT_EXIT] = "timewait exit", 702b1b5b60SSagi Grimberg }; 712b1b5b60SSagi Grimberg 72d9e410ebSMaor Gottlieb static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid, 73d9e410ebSMaor Gottlieb enum ib_gid_type gid_type); 74b5de0c60SJason Gunthorpe 75db7489e0SBart Van Assche const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event) 762b1b5b60SSagi Grimberg { 772b1b5b60SSagi Grimberg size_t index = event; 782b1b5b60SSagi Grimberg 792b1b5b60SSagi Grimberg return (index < ARRAY_SIZE(cma_events) && cma_events[index]) ? 802b1b5b60SSagi Grimberg cma_events[index] : "unrecognized event"; 812b1b5b60SSagi Grimberg } 822b1b5b60SSagi Grimberg EXPORT_SYMBOL(rdma_event_msg); 832b1b5b60SSagi Grimberg 8477a5db13SSteve Wise const char *__attribute_const__ rdma_reject_msg(struct rdma_cm_id *id, 8577a5db13SSteve Wise int reason) 8677a5db13SSteve Wise { 8777a5db13SSteve Wise if (rdma_ib_or_roce(id->device, id->port_num)) 8877a5db13SSteve Wise return ibcm_reject_msg(reason); 8977a5db13SSteve Wise 9077a5db13SSteve Wise if (rdma_protocol_iwarp(id->device, id->port_num)) 9177a5db13SSteve Wise return iwcm_reject_msg(reason); 9277a5db13SSteve Wise 9377a5db13SSteve Wise WARN_ON_ONCE(1); 9477a5db13SSteve Wise return "unrecognized transport"; 9577a5db13SSteve Wise } 9677a5db13SSteve Wise EXPORT_SYMBOL(rdma_reject_msg); 9777a5db13SSteve Wise 98dd302ee4SLeon Romanovsky /** 99dd302ee4SLeon Romanovsky * rdma_is_consumer_reject - return true if the consumer rejected the connect 100dd302ee4SLeon Romanovsky * request. 101dd302ee4SLeon Romanovsky * @id: Communication identifier that received the REJECT event. 102dd302ee4SLeon Romanovsky * @reason: Value returned in the REJECT event status field. 103dd302ee4SLeon Romanovsky */ 104dd302ee4SLeon Romanovsky static bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason) 1055042a73dSSteve Wise { 1065042a73dSSteve Wise if (rdma_ib_or_roce(id->device, id->port_num)) 1075042a73dSSteve Wise return reason == IB_CM_REJ_CONSUMER_DEFINED; 1085042a73dSSteve Wise 1095042a73dSSteve Wise if (rdma_protocol_iwarp(id->device, id->port_num)) 1105042a73dSSteve Wise return reason == -ECONNREFUSED; 1115042a73dSSteve Wise 1125042a73dSSteve Wise WARN_ON_ONCE(1); 1135042a73dSSteve Wise return false; 1145042a73dSSteve Wise } 1155042a73dSSteve Wise 1165f244104SSteve Wise const void *rdma_consumer_reject_data(struct rdma_cm_id *id, 1175f244104SSteve Wise struct rdma_cm_event *ev, u8 *data_len) 1185f244104SSteve Wise { 1195f244104SSteve Wise const void *p; 1205f244104SSteve Wise 1215f244104SSteve Wise if (rdma_is_consumer_reject(id, ev->status)) { 1225f244104SSteve Wise *data_len = ev->param.conn.private_data_len; 1235f244104SSteve Wise p = ev->param.conn.private_data; 1245f244104SSteve Wise } else { 1255f244104SSteve Wise *data_len = 0; 1265f244104SSteve Wise p = NULL; 1275f244104SSteve Wise } 1285f244104SSteve Wise return p; 1295f244104SSteve Wise } 1305f244104SSteve Wise EXPORT_SYMBOL(rdma_consumer_reject_data); 1315f244104SSteve Wise 132fbdb0a91SSteve Wise /** 133fbdb0a91SSteve Wise * rdma_iw_cm_id() - return the iw_cm_id pointer for this cm_id. 134fbdb0a91SSteve Wise * @id: Communication Identifier 135fbdb0a91SSteve Wise */ 136fbdb0a91SSteve Wise struct iw_cm_id *rdma_iw_cm_id(struct rdma_cm_id *id) 137fbdb0a91SSteve Wise { 138fbdb0a91SSteve Wise struct rdma_id_private *id_priv; 139fbdb0a91SSteve Wise 140fbdb0a91SSteve Wise id_priv = container_of(id, struct rdma_id_private, id); 141fbdb0a91SSteve Wise if (id->device->node_type == RDMA_NODE_RNIC) 142fbdb0a91SSteve Wise return id_priv->cm_id.iw; 143fbdb0a91SSteve Wise return NULL; 144fbdb0a91SSteve Wise } 145fbdb0a91SSteve Wise EXPORT_SYMBOL(rdma_iw_cm_id); 146fbdb0a91SSteve Wise 147fbdb0a91SSteve Wise /** 148fbdb0a91SSteve Wise * rdma_res_to_id() - return the rdma_cm_id pointer for this restrack. 149fbdb0a91SSteve Wise * @res: rdma resource tracking entry pointer 150fbdb0a91SSteve Wise */ 151fbdb0a91SSteve Wise struct rdma_cm_id *rdma_res_to_id(struct rdma_restrack_entry *res) 152fbdb0a91SSteve Wise { 153fbdb0a91SSteve Wise struct rdma_id_private *id_priv = 154fbdb0a91SSteve Wise container_of(res, struct rdma_id_private, res); 155fbdb0a91SSteve Wise 156fbdb0a91SSteve Wise return &id_priv->id; 157fbdb0a91SSteve Wise } 158fbdb0a91SSteve Wise EXPORT_SYMBOL(rdma_res_to_id); 159fbdb0a91SSteve Wise 16011a0ae4cSJason Gunthorpe static int cma_add_one(struct ib_device *device); 1617c1eb45aSHaggai Eran static void cma_remove_one(struct ib_device *device, void *client_data); 162e51060f0SSean Hefty 163e51060f0SSean Hefty static struct ib_client cma_client = { 164e51060f0SSean Hefty .name = "cma", 165e51060f0SSean Hefty .add = cma_add_one, 166e51060f0SSean Hefty .remove = cma_remove_one 167e51060f0SSean Hefty }; 168e51060f0SSean Hefty 169c1a0b23bSMichael S. Tsirkin static struct ib_sa_client sa_client; 170e51060f0SSean Hefty static LIST_HEAD(dev_list); 171e51060f0SSean Hefty static LIST_HEAD(listen_any_list); 172e51060f0SSean Hefty static DEFINE_MUTEX(lock); 173fc008bdbSPatrisious Haddad static struct rb_root id_table = RB_ROOT; 174fc008bdbSPatrisious Haddad /* Serialize operations of id_table tree */ 175fc008bdbSPatrisious Haddad static DEFINE_SPINLOCK(id_table_lock); 176e51060f0SSean Hefty static struct workqueue_struct *cma_wq; 177c7d03a00SAlexey Dobriyan static unsigned int cma_pernet_id; 178e51060f0SSean Hefty 1794be74b42SHaggai Eran struct cma_pernet { 18063826753SMatthew Wilcox struct xarray tcp_ps; 18163826753SMatthew Wilcox struct xarray udp_ps; 18263826753SMatthew Wilcox struct xarray ipoib_ps; 18363826753SMatthew Wilcox struct xarray ib_ps; 1844be74b42SHaggai Eran }; 1854be74b42SHaggai Eran 1864be74b42SHaggai Eran static struct cma_pernet *cma_pernet(struct net *net) 187aac978e1SHaggai Eran { 1884be74b42SHaggai Eran return net_generic(net, cma_pernet_id); 1894be74b42SHaggai Eran } 1904be74b42SHaggai Eran 19163826753SMatthew Wilcox static 19263826753SMatthew Wilcox struct xarray *cma_pernet_xa(struct net *net, enum rdma_ucm_port_space ps) 1934be74b42SHaggai Eran { 1944be74b42SHaggai Eran struct cma_pernet *pernet = cma_pernet(net); 1954be74b42SHaggai Eran 196aac978e1SHaggai Eran switch (ps) { 197aac978e1SHaggai Eran case RDMA_PS_TCP: 1984be74b42SHaggai Eran return &pernet->tcp_ps; 199aac978e1SHaggai Eran case RDMA_PS_UDP: 2004be74b42SHaggai Eran return &pernet->udp_ps; 201aac978e1SHaggai Eran case RDMA_PS_IPOIB: 2024be74b42SHaggai Eran return &pernet->ipoib_ps; 203aac978e1SHaggai Eran case RDMA_PS_IB: 2044be74b42SHaggai Eran return &pernet->ib_ps; 205aac978e1SHaggai Eran default: 206aac978e1SHaggai Eran return NULL; 207aac978e1SHaggai Eran } 208aac978e1SHaggai Eran } 209aac978e1SHaggai Eran 210fc008bdbSPatrisious Haddad struct id_table_entry { 211fc008bdbSPatrisious Haddad struct list_head id_list; 212fc008bdbSPatrisious Haddad struct rb_node rb_node; 213fc008bdbSPatrisious Haddad }; 214fc008bdbSPatrisious Haddad 215e51060f0SSean Hefty struct cma_device { 216e51060f0SSean Hefty struct list_head list; 217e51060f0SSean Hefty struct ib_device *device; 218e51060f0SSean Hefty struct completion comp; 219be439912SParav Pandit refcount_t refcount; 220e51060f0SSean Hefty struct list_head id_list; 221045959dbSMatan Barak enum ib_gid_type *default_gid_type; 22289052d78SMajd Dibbiny u8 *default_roce_tos; 223e51060f0SSean Hefty }; 224e51060f0SSean Hefty 225e51060f0SSean Hefty struct rdma_bind_list { 2262253fc0cSSteve Wise enum rdma_ucm_port_space ps; 227e51060f0SSean Hefty struct hlist_head owners; 228e51060f0SSean Hefty unsigned short port; 229e51060f0SSean Hefty }; 230e51060f0SSean Hefty 2312253fc0cSSteve Wise static int cma_ps_alloc(struct net *net, enum rdma_ucm_port_space ps, 232aac978e1SHaggai Eran struct rdma_bind_list *bind_list, int snum) 233aac978e1SHaggai Eran { 23463826753SMatthew Wilcox struct xarray *xa = cma_pernet_xa(net, ps); 235aac978e1SHaggai Eran 23663826753SMatthew Wilcox return xa_insert(xa, snum, bind_list, GFP_KERNEL); 237aac978e1SHaggai Eran } 238aac978e1SHaggai Eran 2394be74b42SHaggai Eran static struct rdma_bind_list *cma_ps_find(struct net *net, 2402253fc0cSSteve Wise enum rdma_ucm_port_space ps, int snum) 241aac978e1SHaggai Eran { 24263826753SMatthew Wilcox struct xarray *xa = cma_pernet_xa(net, ps); 243aac978e1SHaggai Eran 24463826753SMatthew Wilcox return xa_load(xa, snum); 245aac978e1SHaggai Eran } 246aac978e1SHaggai Eran 2472253fc0cSSteve Wise static void cma_ps_remove(struct net *net, enum rdma_ucm_port_space ps, 2482253fc0cSSteve Wise int snum) 249aac978e1SHaggai Eran { 25063826753SMatthew Wilcox struct xarray *xa = cma_pernet_xa(net, ps); 251aac978e1SHaggai Eran 25263826753SMatthew Wilcox xa_erase(xa, snum); 253aac978e1SHaggai Eran } 254aac978e1SHaggai Eran 25568602120SSean Hefty enum { 25668602120SSean Hefty CMA_OPTION_AFONLY, 25768602120SSean Hefty }; 25868602120SSean Hefty 2595ff8c8faSParav Pandit void cma_dev_get(struct cma_device *cma_dev) 260218a773fSMatan Barak { 261be439912SParav Pandit refcount_inc(&cma_dev->refcount); 262218a773fSMatan Barak } 263218a773fSMatan Barak 2645ff8c8faSParav Pandit void cma_dev_put(struct cma_device *cma_dev) 2655ff8c8faSParav Pandit { 266be439912SParav Pandit if (refcount_dec_and_test(&cma_dev->refcount)) 2675ff8c8faSParav Pandit complete(&cma_dev->comp); 2685ff8c8faSParav Pandit } 2695ff8c8faSParav Pandit 270045959dbSMatan Barak struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter, 271045959dbSMatan Barak void *cookie) 272045959dbSMatan Barak { 273045959dbSMatan Barak struct cma_device *cma_dev; 274045959dbSMatan Barak struct cma_device *found_cma_dev = NULL; 275045959dbSMatan Barak 276045959dbSMatan Barak mutex_lock(&lock); 277045959dbSMatan Barak 278045959dbSMatan Barak list_for_each_entry(cma_dev, &dev_list, list) 279045959dbSMatan Barak if (filter(cma_dev->device, cookie)) { 280045959dbSMatan Barak found_cma_dev = cma_dev; 281045959dbSMatan Barak break; 282045959dbSMatan Barak } 283045959dbSMatan Barak 284045959dbSMatan Barak if (found_cma_dev) 2855ff8c8faSParav Pandit cma_dev_get(found_cma_dev); 286045959dbSMatan Barak mutex_unlock(&lock); 287045959dbSMatan Barak return found_cma_dev; 288045959dbSMatan Barak } 289045959dbSMatan Barak 290045959dbSMatan Barak int cma_get_default_gid_type(struct cma_device *cma_dev, 2911fb7f897SMark Bloch u32 port) 292045959dbSMatan Barak { 29324dc831bSYuval Shaia if (!rdma_is_port_valid(cma_dev->device, port)) 294045959dbSMatan Barak return -EINVAL; 295045959dbSMatan Barak 296045959dbSMatan Barak return cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)]; 297045959dbSMatan Barak } 298045959dbSMatan Barak 299045959dbSMatan Barak int cma_set_default_gid_type(struct cma_device *cma_dev, 3001fb7f897SMark Bloch u32 port, 301045959dbSMatan Barak enum ib_gid_type default_gid_type) 302045959dbSMatan Barak { 303045959dbSMatan Barak unsigned long supported_gids; 304045959dbSMatan Barak 30524dc831bSYuval Shaia if (!rdma_is_port_valid(cma_dev->device, port)) 306045959dbSMatan Barak return -EINVAL; 307045959dbSMatan Barak 3081c15b4f2SAvihai Horon if (default_gid_type == IB_GID_TYPE_IB && 3091c15b4f2SAvihai Horon rdma_protocol_roce_eth_encap(cma_dev->device, port)) 3101c15b4f2SAvihai Horon default_gid_type = IB_GID_TYPE_ROCE; 3111c15b4f2SAvihai Horon 312045959dbSMatan Barak supported_gids = roce_gid_type_mask_support(cma_dev->device, port); 313045959dbSMatan Barak 314045959dbSMatan Barak if (!(supported_gids & 1 << default_gid_type)) 315045959dbSMatan Barak return -EINVAL; 316045959dbSMatan Barak 317045959dbSMatan Barak cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)] = 318045959dbSMatan Barak default_gid_type; 319045959dbSMatan Barak 320045959dbSMatan Barak return 0; 321045959dbSMatan Barak } 322045959dbSMatan Barak 3231fb7f897SMark Bloch int cma_get_default_roce_tos(struct cma_device *cma_dev, u32 port) 32489052d78SMajd Dibbiny { 32589052d78SMajd Dibbiny if (!rdma_is_port_valid(cma_dev->device, port)) 32689052d78SMajd Dibbiny return -EINVAL; 32789052d78SMajd Dibbiny 32889052d78SMajd Dibbiny return cma_dev->default_roce_tos[port - rdma_start_port(cma_dev->device)]; 32989052d78SMajd Dibbiny } 33089052d78SMajd Dibbiny 3311fb7f897SMark Bloch int cma_set_default_roce_tos(struct cma_device *cma_dev, u32 port, 33289052d78SMajd Dibbiny u8 default_roce_tos) 33389052d78SMajd Dibbiny { 33489052d78SMajd Dibbiny if (!rdma_is_port_valid(cma_dev->device, port)) 33589052d78SMajd Dibbiny return -EINVAL; 33689052d78SMajd Dibbiny 33789052d78SMajd Dibbiny cma_dev->default_roce_tos[port - rdma_start_port(cma_dev->device)] = 33889052d78SMajd Dibbiny default_roce_tos; 33989052d78SMajd Dibbiny 34089052d78SMajd Dibbiny return 0; 34189052d78SMajd Dibbiny } 342045959dbSMatan Barak struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev) 343045959dbSMatan Barak { 344045959dbSMatan Barak return cma_dev->device; 345045959dbSMatan Barak } 346045959dbSMatan Barak 347e51060f0SSean Hefty /* 348e51060f0SSean Hefty * Device removal can occur at anytime, so we need extra handling to 349e51060f0SSean Hefty * serialize notifying the user of device removal with other callbacks. 350e51060f0SSean Hefty * We do this by disabling removal notification while a callback is in process, 351e51060f0SSean Hefty * and reporting it after the callback completes. 352e51060f0SSean Hefty */ 353e51060f0SSean Hefty 354c8f6a362SSean Hefty struct cma_multicast { 355c8f6a362SSean Hefty struct rdma_id_private *id_priv; 356fe454dc3SAvihai Horon union { 357b5de0c60SJason Gunthorpe struct ib_sa_multicast *sa_mc; 358fe454dc3SAvihai Horon struct { 359fe454dc3SAvihai Horon struct work_struct work; 360fe454dc3SAvihai Horon struct rdma_cm_event event; 361fe454dc3SAvihai Horon } iboe_join; 362fe454dc3SAvihai Horon }; 363c8f6a362SSean Hefty struct list_head list; 364c8f6a362SSean Hefty void *context; 3653f446754SRoland Dreier struct sockaddr_storage addr; 366ab15c95aSAlex Vesker u8 join_state; 367c8f6a362SSean Hefty }; 368c8f6a362SSean Hefty 369e51060f0SSean Hefty struct cma_work { 370e51060f0SSean Hefty struct work_struct work; 371e51060f0SSean Hefty struct rdma_id_private *id; 372550e5ca7SNir Muchtar enum rdma_cm_state old_state; 373550e5ca7SNir Muchtar enum rdma_cm_state new_state; 374e51060f0SSean Hefty struct rdma_cm_event event; 375e51060f0SSean Hefty }; 376e51060f0SSean Hefty 377e51060f0SSean Hefty union cma_ip_addr { 378e51060f0SSean Hefty struct in6_addr ip6; 379e51060f0SSean Hefty struct { 3801b90c137SAl Viro __be32 pad[3]; 3811b90c137SAl Viro __be32 addr; 382e51060f0SSean Hefty } ip4; 383e51060f0SSean Hefty }; 384e51060f0SSean Hefty 385e51060f0SSean Hefty struct cma_hdr { 386e51060f0SSean Hefty u8 cma_version; 387e51060f0SSean Hefty u8 ip_version; /* IP version: 7:4 */ 3881b90c137SAl Viro __be16 port; 389e51060f0SSean Hefty union cma_ip_addr src_addr; 390e51060f0SSean Hefty union cma_ip_addr dst_addr; 391e51060f0SSean Hefty }; 392e51060f0SSean Hefty 393e51060f0SSean Hefty #define CMA_VERSION 0x00 394e51060f0SSean Hefty 3954c21b5bcSHaggai Eran struct cma_req_info { 3962918c1a9SParav Pandit struct sockaddr_storage listen_addr_storage; 3972918c1a9SParav Pandit struct sockaddr_storage src_addr_storage; 3984c21b5bcSHaggai Eran struct ib_device *device; 3994c21b5bcSHaggai Eran union ib_gid local_gid; 4004c21b5bcSHaggai Eran __be64 service_id; 40105e0b86cSParav Pandit int port; 40205e0b86cSParav Pandit bool has_gid; 4034c21b5bcSHaggai Eran u16 pkey; 4044c21b5bcSHaggai Eran }; 4054c21b5bcSHaggai Eran 406e51060f0SSean Hefty static int cma_comp_exch(struct rdma_id_private *id_priv, 407550e5ca7SNir Muchtar enum rdma_cm_state comp, enum rdma_cm_state exch) 408e51060f0SSean Hefty { 409e51060f0SSean Hefty unsigned long flags; 410e51060f0SSean Hefty int ret; 411e51060f0SSean Hefty 4122a7cec53SJason Gunthorpe /* 4132a7cec53SJason Gunthorpe * The FSM uses a funny double locking where state is protected by both 4142a7cec53SJason Gunthorpe * the handler_mutex and the spinlock. State is not allowed to change 415071ba4ccSJason Gunthorpe * to/from a handler_mutex protected value without also holding 4162a7cec53SJason Gunthorpe * handler_mutex. 4172a7cec53SJason Gunthorpe */ 418071ba4ccSJason Gunthorpe if (comp == RDMA_CM_CONNECT || exch == RDMA_CM_CONNECT) 4192a7cec53SJason Gunthorpe lockdep_assert_held(&id_priv->handler_mutex); 4202a7cec53SJason Gunthorpe 421e51060f0SSean Hefty spin_lock_irqsave(&id_priv->lock, flags); 422e51060f0SSean Hefty if ((ret = (id_priv->state == comp))) 423e51060f0SSean Hefty id_priv->state = exch; 424e51060f0SSean Hefty spin_unlock_irqrestore(&id_priv->lock, flags); 425e51060f0SSean Hefty return ret; 426e51060f0SSean Hefty } 427e51060f0SSean Hefty 4284c21b5bcSHaggai Eran static inline u8 cma_get_ip_ver(const struct cma_hdr *hdr) 429e51060f0SSean Hefty { 430e51060f0SSean Hefty return hdr->ip_version >> 4; 431e51060f0SSean Hefty } 432e51060f0SSean Hefty 433fc008bdbSPatrisious Haddad static void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver) 434e51060f0SSean Hefty { 435e51060f0SSean Hefty hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF); 436e51060f0SSean Hefty } 437e51060f0SSean Hefty 438fc008bdbSPatrisious Haddad static struct sockaddr *cma_src_addr(struct rdma_id_private *id_priv) 439fc008bdbSPatrisious Haddad { 440fc008bdbSPatrisious Haddad return (struct sockaddr *)&id_priv->id.route.addr.src_addr; 441fc008bdbSPatrisious Haddad } 442fc008bdbSPatrisious Haddad 443fc008bdbSPatrisious Haddad static inline struct sockaddr *cma_dst_addr(struct rdma_id_private *id_priv) 444fc008bdbSPatrisious Haddad { 445fc008bdbSPatrisious Haddad return (struct sockaddr *)&id_priv->id.route.addr.dst_addr; 446fc008bdbSPatrisious Haddad } 447fc008bdbSPatrisious Haddad 448bee3c3c9SMoni Shoua static int cma_igmp_send(struct net_device *ndev, union ib_gid *mgid, bool join) 449bee3c3c9SMoni Shoua { 450bee3c3c9SMoni Shoua struct in_device *in_dev = NULL; 451bee3c3c9SMoni Shoua 452bee3c3c9SMoni Shoua if (ndev) { 453bee3c3c9SMoni Shoua rtnl_lock(); 454bee3c3c9SMoni Shoua in_dev = __in_dev_get_rtnl(ndev); 455bee3c3c9SMoni Shoua if (in_dev) { 456bee3c3c9SMoni Shoua if (join) 457bee3c3c9SMoni Shoua ip_mc_inc_group(in_dev, 458bee3c3c9SMoni Shoua *(__be32 *)(mgid->raw + 12)); 459bee3c3c9SMoni Shoua else 460bee3c3c9SMoni Shoua ip_mc_dec_group(in_dev, 461bee3c3c9SMoni Shoua *(__be32 *)(mgid->raw + 12)); 462bee3c3c9SMoni Shoua } 463bee3c3c9SMoni Shoua rtnl_unlock(); 464bee3c3c9SMoni Shoua } 465bee3c3c9SMoni Shoua return (in_dev) ? 0 : -ENODEV; 466bee3c3c9SMoni Shoua } 467bee3c3c9SMoni Shoua 468fc008bdbSPatrisious Haddad static int compare_netdev_and_ip(int ifindex_a, struct sockaddr *sa, 469fc008bdbSPatrisious Haddad struct id_table_entry *entry_b) 470fc008bdbSPatrisious Haddad { 471fc008bdbSPatrisious Haddad struct rdma_id_private *id_priv = list_first_entry( 472fc008bdbSPatrisious Haddad &entry_b->id_list, struct rdma_id_private, id_list_entry); 473fc008bdbSPatrisious Haddad int ifindex_b = id_priv->id.route.addr.dev_addr.bound_dev_if; 474fc008bdbSPatrisious Haddad struct sockaddr *sb = cma_dst_addr(id_priv); 475fc008bdbSPatrisious Haddad 476fc008bdbSPatrisious Haddad if (ifindex_a != ifindex_b) 477fc008bdbSPatrisious Haddad return (ifindex_a > ifindex_b) ? 1 : -1; 478fc008bdbSPatrisious Haddad 479fc008bdbSPatrisious Haddad if (sa->sa_family != sb->sa_family) 480fc008bdbSPatrisious Haddad return sa->sa_family - sb->sa_family; 481fc008bdbSPatrisious Haddad 482fc008bdbSPatrisious Haddad if (sa->sa_family == AF_INET) 483fc008bdbSPatrisious Haddad return memcmp((char *)&((struct sockaddr_in *)sa)->sin_addr, 484fc008bdbSPatrisious Haddad (char *)&((struct sockaddr_in *)sb)->sin_addr, 485fc008bdbSPatrisious Haddad sizeof(((struct sockaddr_in *)sa)->sin_addr)); 486fc008bdbSPatrisious Haddad 487fc008bdbSPatrisious Haddad return ipv6_addr_cmp(&((struct sockaddr_in6 *)sa)->sin6_addr, 488fc008bdbSPatrisious Haddad &((struct sockaddr_in6 *)sb)->sin6_addr); 489fc008bdbSPatrisious Haddad } 490fc008bdbSPatrisious Haddad 491fc008bdbSPatrisious Haddad static int cma_add_id_to_tree(struct rdma_id_private *node_id_priv) 492fc008bdbSPatrisious Haddad { 493fc008bdbSPatrisious Haddad struct rb_node **new, *parent = NULL; 494fc008bdbSPatrisious Haddad struct id_table_entry *this, *node; 495fc008bdbSPatrisious Haddad unsigned long flags; 496fc008bdbSPatrisious Haddad int result; 497fc008bdbSPatrisious Haddad 498fc008bdbSPatrisious Haddad node = kzalloc(sizeof(*node), GFP_KERNEL); 499fc008bdbSPatrisious Haddad if (!node) 500fc008bdbSPatrisious Haddad return -ENOMEM; 501fc008bdbSPatrisious Haddad 502fc008bdbSPatrisious Haddad spin_lock_irqsave(&id_table_lock, flags); 503fc008bdbSPatrisious Haddad new = &id_table.rb_node; 504fc008bdbSPatrisious Haddad while (*new) { 505fc008bdbSPatrisious Haddad this = container_of(*new, struct id_table_entry, rb_node); 506fc008bdbSPatrisious Haddad result = compare_netdev_and_ip( 507fc008bdbSPatrisious Haddad node_id_priv->id.route.addr.dev_addr.bound_dev_if, 508fc008bdbSPatrisious Haddad cma_dst_addr(node_id_priv), this); 509fc008bdbSPatrisious Haddad 510fc008bdbSPatrisious Haddad parent = *new; 511fc008bdbSPatrisious Haddad if (result < 0) 512fc008bdbSPatrisious Haddad new = &((*new)->rb_left); 513fc008bdbSPatrisious Haddad else if (result > 0) 514fc008bdbSPatrisious Haddad new = &((*new)->rb_right); 515fc008bdbSPatrisious Haddad else { 516fc008bdbSPatrisious Haddad list_add_tail(&node_id_priv->id_list_entry, 517fc008bdbSPatrisious Haddad &this->id_list); 518fc008bdbSPatrisious Haddad kfree(node); 519fc008bdbSPatrisious Haddad goto unlock; 520fc008bdbSPatrisious Haddad } 521fc008bdbSPatrisious Haddad } 522fc008bdbSPatrisious Haddad 523fc008bdbSPatrisious Haddad INIT_LIST_HEAD(&node->id_list); 524fc008bdbSPatrisious Haddad list_add_tail(&node_id_priv->id_list_entry, &node->id_list); 525fc008bdbSPatrisious Haddad 526fc008bdbSPatrisious Haddad rb_link_node(&node->rb_node, parent, new); 527fc008bdbSPatrisious Haddad rb_insert_color(&node->rb_node, &id_table); 528fc008bdbSPatrisious Haddad 529fc008bdbSPatrisious Haddad unlock: 530fc008bdbSPatrisious Haddad spin_unlock_irqrestore(&id_table_lock, flags); 531fc008bdbSPatrisious Haddad return 0; 532fc008bdbSPatrisious Haddad } 533fc008bdbSPatrisious Haddad 534fc008bdbSPatrisious Haddad static struct id_table_entry * 535fc008bdbSPatrisious Haddad node_from_ndev_ip(struct rb_root *root, int ifindex, struct sockaddr *sa) 536fc008bdbSPatrisious Haddad { 537fc008bdbSPatrisious Haddad struct rb_node *node = root->rb_node; 538fc008bdbSPatrisious Haddad struct id_table_entry *data; 539fc008bdbSPatrisious Haddad int result; 540fc008bdbSPatrisious Haddad 541fc008bdbSPatrisious Haddad while (node) { 542fc008bdbSPatrisious Haddad data = container_of(node, struct id_table_entry, rb_node); 543fc008bdbSPatrisious Haddad result = compare_netdev_and_ip(ifindex, sa, data); 544fc008bdbSPatrisious Haddad if (result < 0) 545fc008bdbSPatrisious Haddad node = node->rb_left; 546fc008bdbSPatrisious Haddad else if (result > 0) 547fc008bdbSPatrisious Haddad node = node->rb_right; 548fc008bdbSPatrisious Haddad else 549fc008bdbSPatrisious Haddad return data; 550fc008bdbSPatrisious Haddad } 551fc008bdbSPatrisious Haddad 552fc008bdbSPatrisious Haddad return NULL; 553fc008bdbSPatrisious Haddad } 554fc008bdbSPatrisious Haddad 555fc008bdbSPatrisious Haddad static void cma_remove_id_from_tree(struct rdma_id_private *id_priv) 556fc008bdbSPatrisious Haddad { 557fc008bdbSPatrisious Haddad struct id_table_entry *data; 558fc008bdbSPatrisious Haddad unsigned long flags; 559fc008bdbSPatrisious Haddad 560fc008bdbSPatrisious Haddad spin_lock_irqsave(&id_table_lock, flags); 561fc008bdbSPatrisious Haddad if (list_empty(&id_priv->id_list_entry)) 562fc008bdbSPatrisious Haddad goto out; 563fc008bdbSPatrisious Haddad 564fc008bdbSPatrisious Haddad data = node_from_ndev_ip(&id_table, 565fc008bdbSPatrisious Haddad id_priv->id.route.addr.dev_addr.bound_dev_if, 566fc008bdbSPatrisious Haddad cma_dst_addr(id_priv)); 567fc008bdbSPatrisious Haddad if (!data) 568fc008bdbSPatrisious Haddad goto out; 569fc008bdbSPatrisious Haddad 570fc008bdbSPatrisious Haddad list_del_init(&id_priv->id_list_entry); 571fc008bdbSPatrisious Haddad if (list_empty(&data->id_list)) { 572fc008bdbSPatrisious Haddad rb_erase(&data->rb_node, &id_table); 573fc008bdbSPatrisious Haddad kfree(data); 574fc008bdbSPatrisious Haddad } 575fc008bdbSPatrisious Haddad out: 576fc008bdbSPatrisious Haddad spin_unlock_irqrestore(&id_table_lock, flags); 577fc008bdbSPatrisious Haddad } 578fc008bdbSPatrisious Haddad 579045959dbSMatan Barak static void _cma_attach_to_dev(struct rdma_id_private *id_priv, 580e51060f0SSean Hefty struct cma_device *cma_dev) 581e51060f0SSean Hefty { 5825ff8c8faSParav Pandit cma_dev_get(cma_dev); 583e51060f0SSean Hefty id_priv->cma_dev = cma_dev; 584e51060f0SSean Hefty id_priv->id.device = cma_dev->device; 5853c86aa70SEli Cohen id_priv->id.route.addr.dev_addr.transport = 5863c86aa70SEli Cohen rdma_node_get_transport(cma_dev->device->node_type); 58799cfddb8SJason Gunthorpe list_add_tail(&id_priv->device_item, &cma_dev->id_list); 588b09c4d70SLeon Romanovsky 589278f74b3SChuck Lever trace_cm_id_attach(id_priv, cma_dev->device); 590e51060f0SSean Hefty } 591e51060f0SSean Hefty 592045959dbSMatan Barak static void cma_attach_to_dev(struct rdma_id_private *id_priv, 593045959dbSMatan Barak struct cma_device *cma_dev) 594045959dbSMatan Barak { 595045959dbSMatan Barak _cma_attach_to_dev(id_priv, cma_dev); 596045959dbSMatan Barak id_priv->gid_type = 597045959dbSMatan Barak cma_dev->default_gid_type[id_priv->id.port_num - 598045959dbSMatan Barak rdma_start_port(cma_dev->device)]; 599045959dbSMatan Barak } 600045959dbSMatan Barak 601a396d43aSSean Hefty static void cma_release_dev(struct rdma_id_private *id_priv) 602e51060f0SSean Hefty { 603a396d43aSSean Hefty mutex_lock(&lock); 60499cfddb8SJason Gunthorpe list_del_init(&id_priv->device_item); 6055ff8c8faSParav Pandit cma_dev_put(id_priv->cma_dev); 606e51060f0SSean Hefty id_priv->cma_dev = NULL; 607889d916bSShay Drory id_priv->id.device = NULL; 608e246b7c0SLeon Romanovsky if (id_priv->id.route.addr.dev_addr.sgid_attr) { 609e246b7c0SLeon Romanovsky rdma_put_gid_attr(id_priv->id.route.addr.dev_addr.sgid_attr); 610e246b7c0SLeon Romanovsky id_priv->id.route.addr.dev_addr.sgid_attr = NULL; 611e246b7c0SLeon Romanovsky } 612a396d43aSSean Hefty mutex_unlock(&lock); 613e51060f0SSean Hefty } 614e51060f0SSean Hefty 615f4753834SSean Hefty static inline unsigned short cma_family(struct rdma_id_private *id_priv) 616f4753834SSean Hefty { 617f4753834SSean Hefty return id_priv->id.route.addr.src_addr.ss_family; 618f4753834SSean Hefty } 619f4753834SSean Hefty 6205c438135SSean Hefty static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey) 621c8f6a362SSean Hefty { 622c8f6a362SSean Hefty struct ib_sa_mcmember_rec rec; 623c8f6a362SSean Hefty int ret = 0; 624c8f6a362SSean Hefty 6255c438135SSean Hefty if (id_priv->qkey) { 6265c438135SSean Hefty if (qkey && id_priv->qkey != qkey) 6275c438135SSean Hefty return -EINVAL; 628d2ca39f2SYossi Etigin return 0; 6295c438135SSean Hefty } 6305c438135SSean Hefty 6315c438135SSean Hefty if (qkey) { 6325c438135SSean Hefty id_priv->qkey = qkey; 6335c438135SSean Hefty return 0; 6345c438135SSean Hefty } 635d2ca39f2SYossi Etigin 636d2ca39f2SYossi Etigin switch (id_priv->id.ps) { 637c8f6a362SSean Hefty case RDMA_PS_UDP: 6385c438135SSean Hefty case RDMA_PS_IB: 639d2ca39f2SYossi Etigin id_priv->qkey = RDMA_UDP_QKEY; 640c8f6a362SSean Hefty break; 641c8f6a362SSean Hefty case RDMA_PS_IPOIB: 642d2ca39f2SYossi Etigin ib_addr_get_mgid(&id_priv->id.route.addr.dev_addr, &rec.mgid); 643d2ca39f2SYossi Etigin ret = ib_sa_get_mcmember_rec(id_priv->id.device, 644d2ca39f2SYossi Etigin id_priv->id.port_num, &rec.mgid, 645d2ca39f2SYossi Etigin &rec); 646d2ca39f2SYossi Etigin if (!ret) 647d2ca39f2SYossi Etigin id_priv->qkey = be32_to_cpu(rec.qkey); 648c8f6a362SSean Hefty break; 649c8f6a362SSean Hefty default: 650c8f6a362SSean Hefty break; 651c8f6a362SSean Hefty } 652c8f6a362SSean Hefty return ret; 653c8f6a362SSean Hefty } 654c8f6a362SSean Hefty 655680f920aSSean Hefty static void cma_translate_ib(struct sockaddr_ib *sib, struct rdma_dev_addr *dev_addr) 656680f920aSSean Hefty { 657680f920aSSean Hefty dev_addr->dev_type = ARPHRD_INFINIBAND; 658680f920aSSean Hefty rdma_addr_set_sgid(dev_addr, (union ib_gid *) &sib->sib_addr); 659680f920aSSean Hefty ib_addr_set_pkey(dev_addr, ntohs(sib->sib_pkey)); 660680f920aSSean Hefty } 661680f920aSSean Hefty 662680f920aSSean Hefty static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_addr) 663680f920aSSean Hefty { 664680f920aSSean Hefty int ret; 665680f920aSSean Hefty 666680f920aSSean Hefty if (addr->sa_family != AF_IB) { 667575c7e58SParav Pandit ret = rdma_translate_ip(addr, dev_addr); 668680f920aSSean Hefty } else { 669680f920aSSean Hefty cma_translate_ib((struct sockaddr_ib *) addr, dev_addr); 670680f920aSSean Hefty ret = 0; 671680f920aSSean Hefty } 672680f920aSSean Hefty 673680f920aSSean Hefty return ret; 674680f920aSSean Hefty } 675680f920aSSean Hefty 6764ed13a5fSParav Pandit static const struct ib_gid_attr * 6771fb7f897SMark Bloch cma_validate_port(struct ib_device *device, u32 port, 678045959dbSMatan Barak enum ib_gid_type gid_type, 6792493a57bSParav Pandit union ib_gid *gid, 6802493a57bSParav Pandit struct rdma_id_private *id_priv) 6817c11147dSMichael Wang { 6822493a57bSParav Pandit struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 6832493a57bSParav Pandit int bound_if_index = dev_addr->bound_dev_if; 6844ed13a5fSParav Pandit const struct ib_gid_attr *sgid_attr; 6852493a57bSParav Pandit int dev_type = dev_addr->dev_type; 686abae1b71SMatan Barak struct net_device *ndev = NULL; 6877c11147dSMichael Wang 68841c61401SParav Pandit if (!rdma_dev_access_netns(device, id_priv->id.route.addr.dev_addr.net)) 68941c61401SParav Pandit return ERR_PTR(-ENODEV); 69041c61401SParav Pandit 6917c11147dSMichael Wang if ((dev_type == ARPHRD_INFINIBAND) && !rdma_protocol_ib(device, port)) 6924ed13a5fSParav Pandit return ERR_PTR(-ENODEV); 6937c11147dSMichael Wang 6947c11147dSMichael Wang if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port)) 6954ed13a5fSParav Pandit return ERR_PTR(-ENODEV); 6967c11147dSMichael Wang 69700db63c1SParav Pandit if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port)) { 69866c74d74SParav Pandit ndev = dev_get_by_index(dev_addr->net, bound_if_index); 69900db63c1SParav Pandit if (!ndev) 7004ed13a5fSParav Pandit return ERR_PTR(-ENODEV); 70100db63c1SParav Pandit } else { 702045959dbSMatan Barak gid_type = IB_GID_TYPE_IB; 70300db63c1SParav Pandit } 704abae1b71SMatan Barak 7054ed13a5fSParav Pandit sgid_attr = rdma_find_gid_by_port(device, gid, gid_type, port, ndev); 706abae1b71SMatan Barak if (ndev) 707abae1b71SMatan Barak dev_put(ndev); 7084ed13a5fSParav Pandit return sgid_attr; 7094ed13a5fSParav Pandit } 7107c11147dSMichael Wang 7114ed13a5fSParav Pandit static void cma_bind_sgid_attr(struct rdma_id_private *id_priv, 7124ed13a5fSParav Pandit const struct ib_gid_attr *sgid_attr) 7134ed13a5fSParav Pandit { 7144ed13a5fSParav Pandit WARN_ON(id_priv->id.route.addr.dev_addr.sgid_attr); 7154ed13a5fSParav Pandit id_priv->id.route.addr.dev_addr.sgid_attr = sgid_attr; 7167c11147dSMichael Wang } 7177c11147dSMichael Wang 718ff11c6cdSParav Pandit /** 719ff11c6cdSParav Pandit * cma_acquire_dev_by_src_ip - Acquire cma device, port, gid attribute 720ff11c6cdSParav Pandit * based on source ip address. 721ff11c6cdSParav Pandit * @id_priv: cm_id which should be bound to cma device 722ff11c6cdSParav Pandit * 723ff11c6cdSParav Pandit * cma_acquire_dev_by_src_ip() binds cm id to cma device, port and GID attribute 724ff11c6cdSParav Pandit * based on source IP address. It returns 0 on success or error code otherwise. 725ff11c6cdSParav Pandit * It is applicable to active and passive side cm_id. 726ff11c6cdSParav Pandit */ 727ff11c6cdSParav Pandit static int cma_acquire_dev_by_src_ip(struct rdma_id_private *id_priv) 728ff11c6cdSParav Pandit { 729ff11c6cdSParav Pandit struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 730ff11c6cdSParav Pandit const struct ib_gid_attr *sgid_attr; 731ff11c6cdSParav Pandit union ib_gid gid, iboe_gid, *gidp; 732ff11c6cdSParav Pandit struct cma_device *cma_dev; 733ff11c6cdSParav Pandit enum ib_gid_type gid_type; 734ff11c6cdSParav Pandit int ret = -ENODEV; 7351fb7f897SMark Bloch u32 port; 736ff11c6cdSParav Pandit 737ff11c6cdSParav Pandit if (dev_addr->dev_type != ARPHRD_INFINIBAND && 738ff11c6cdSParav Pandit id_priv->id.ps == RDMA_PS_IPOIB) 739ff11c6cdSParav Pandit return -EINVAL; 740ff11c6cdSParav Pandit 741ff11c6cdSParav Pandit rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, 742ff11c6cdSParav Pandit &iboe_gid); 743ff11c6cdSParav Pandit 744ff11c6cdSParav Pandit memcpy(&gid, dev_addr->src_dev_addr + 745ff11c6cdSParav Pandit rdma_addr_gid_offset(dev_addr), sizeof(gid)); 746ff11c6cdSParav Pandit 747ff11c6cdSParav Pandit mutex_lock(&lock); 748ff11c6cdSParav Pandit list_for_each_entry(cma_dev, &dev_list, list) { 749ea1075edSJason Gunthorpe rdma_for_each_port (cma_dev->device, port) { 750ff11c6cdSParav Pandit gidp = rdma_protocol_roce(cma_dev->device, port) ? 751ff11c6cdSParav Pandit &iboe_gid : &gid; 752ff11c6cdSParav Pandit gid_type = cma_dev->default_gid_type[port - 1]; 753ff11c6cdSParav Pandit sgid_attr = cma_validate_port(cma_dev->device, port, 754ff11c6cdSParav Pandit gid_type, gidp, id_priv); 755ff11c6cdSParav Pandit if (!IS_ERR(sgid_attr)) { 756ff11c6cdSParav Pandit id_priv->id.port_num = port; 757ff11c6cdSParav Pandit cma_bind_sgid_attr(id_priv, sgid_attr); 758ff11c6cdSParav Pandit cma_attach_to_dev(id_priv, cma_dev); 759ff11c6cdSParav Pandit ret = 0; 760ff11c6cdSParav Pandit goto out; 761ff11c6cdSParav Pandit } 762ff11c6cdSParav Pandit } 763ff11c6cdSParav Pandit } 764ff11c6cdSParav Pandit out: 765ff11c6cdSParav Pandit mutex_unlock(&lock); 766ff11c6cdSParav Pandit return ret; 767ff11c6cdSParav Pandit } 768ff11c6cdSParav Pandit 76941ab1cb7SParav Pandit /** 77041ab1cb7SParav Pandit * cma_ib_acquire_dev - Acquire cma device, port and SGID attribute 77141ab1cb7SParav Pandit * @id_priv: cm id to bind to cma device 77241ab1cb7SParav Pandit * @listen_id_priv: listener cm id to match against 77341ab1cb7SParav Pandit * @req: Pointer to req structure containaining incoming 77441ab1cb7SParav Pandit * request information 77541ab1cb7SParav Pandit * cma_ib_acquire_dev() acquires cma device, port and SGID attribute when 77641ab1cb7SParav Pandit * rdma device matches for listen_id and incoming request. It also verifies 77741ab1cb7SParav Pandit * that a GID table entry is present for the source address. 77841ab1cb7SParav Pandit * Returns 0 on success, or returns error code otherwise. 77941ab1cb7SParav Pandit */ 78041ab1cb7SParav Pandit static int cma_ib_acquire_dev(struct rdma_id_private *id_priv, 78141ab1cb7SParav Pandit const struct rdma_id_private *listen_id_priv, 78241ab1cb7SParav Pandit struct cma_req_info *req) 78341ab1cb7SParav Pandit { 78441ab1cb7SParav Pandit struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 78541ab1cb7SParav Pandit const struct ib_gid_attr *sgid_attr; 78641ab1cb7SParav Pandit enum ib_gid_type gid_type; 78741ab1cb7SParav Pandit union ib_gid gid; 78841ab1cb7SParav Pandit 78941ab1cb7SParav Pandit if (dev_addr->dev_type != ARPHRD_INFINIBAND && 79041ab1cb7SParav Pandit id_priv->id.ps == RDMA_PS_IPOIB) 79141ab1cb7SParav Pandit return -EINVAL; 79241ab1cb7SParav Pandit 79341ab1cb7SParav Pandit if (rdma_protocol_roce(req->device, req->port)) 79441ab1cb7SParav Pandit rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, 79541ab1cb7SParav Pandit &gid); 79641ab1cb7SParav Pandit else 79741ab1cb7SParav Pandit memcpy(&gid, dev_addr->src_dev_addr + 79841ab1cb7SParav Pandit rdma_addr_gid_offset(dev_addr), sizeof(gid)); 79941ab1cb7SParav Pandit 80041ab1cb7SParav Pandit gid_type = listen_id_priv->cma_dev->default_gid_type[req->port - 1]; 80141ab1cb7SParav Pandit sgid_attr = cma_validate_port(req->device, req->port, 80241ab1cb7SParav Pandit gid_type, &gid, id_priv); 80341ab1cb7SParav Pandit if (IS_ERR(sgid_attr)) 80441ab1cb7SParav Pandit return PTR_ERR(sgid_attr); 80541ab1cb7SParav Pandit 80641ab1cb7SParav Pandit id_priv->id.port_num = req->port; 80741ab1cb7SParav Pandit cma_bind_sgid_attr(id_priv, sgid_attr); 80841ab1cb7SParav Pandit /* Need to acquire lock to protect against reader 80941ab1cb7SParav Pandit * of cma_dev->id_list such as cma_netdev_callback() and 81041ab1cb7SParav Pandit * cma_process_remove(). 81141ab1cb7SParav Pandit */ 81241ab1cb7SParav Pandit mutex_lock(&lock); 81341ab1cb7SParav Pandit cma_attach_to_dev(id_priv, listen_id_priv->cma_dev); 81441ab1cb7SParav Pandit mutex_unlock(&lock); 815cb5cd0eaSShay Drory rdma_restrack_add(&id_priv->res); 81641ab1cb7SParav Pandit return 0; 81741ab1cb7SParav Pandit } 81841ab1cb7SParav Pandit 81941ab1cb7SParav Pandit static int cma_iw_acquire_dev(struct rdma_id_private *id_priv, 820e7ff98aeSParav Pandit const struct rdma_id_private *listen_id_priv) 821e51060f0SSean Hefty { 822c8f6a362SSean Hefty struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 8234ed13a5fSParav Pandit const struct ib_gid_attr *sgid_attr; 824e51060f0SSean Hefty struct cma_device *cma_dev; 8254ed13a5fSParav Pandit enum ib_gid_type gid_type; 826e51060f0SSean Hefty int ret = -ENODEV; 82741ab1cb7SParav Pandit union ib_gid gid; 8281fb7f897SMark Bloch u32 port; 829e51060f0SSean Hefty 8307c11147dSMichael Wang if (dev_addr->dev_type != ARPHRD_INFINIBAND && 8312efdd6a0SMoni Shoua id_priv->id.ps == RDMA_PS_IPOIB) 8322efdd6a0SMoni Shoua return -EINVAL; 8332efdd6a0SMoni Shoua 8343c86aa70SEli Cohen memcpy(&gid, dev_addr->src_dev_addr + 83541ab1cb7SParav Pandit rdma_addr_gid_offset(dev_addr), sizeof(gid)); 83641ab1cb7SParav Pandit 83741ab1cb7SParav Pandit mutex_lock(&lock); 8387c11147dSMichael Wang 839be9130ccSDoug Ledford cma_dev = listen_id_priv->cma_dev; 840be9130ccSDoug Ledford port = listen_id_priv->id.port_num; 84179d684f0SParav Pandit gid_type = listen_id_priv->gid_type; 8424ed13a5fSParav Pandit sgid_attr = cma_validate_port(cma_dev->device, port, 84341ab1cb7SParav Pandit gid_type, &gid, id_priv); 8444ed13a5fSParav Pandit if (!IS_ERR(sgid_attr)) { 8457c11147dSMichael Wang id_priv->id.port_num = port; 8464ed13a5fSParav Pandit cma_bind_sgid_attr(id_priv, sgid_attr); 8474ed13a5fSParav Pandit ret = 0; 848be9130ccSDoug Ledford goto out; 849be9130ccSDoug Ledford } 8507c11147dSMichael Wang 851e51060f0SSean Hefty list_for_each_entry(cma_dev, &dev_list, list) { 852cc055dd3SParav Pandit rdma_for_each_port (cma_dev->device, port) { 853ff11c6cdSParav Pandit if (listen_id_priv->cma_dev == cma_dev && 854be9130ccSDoug Ledford listen_id_priv->id.port_num == port) 855be9130ccSDoug Ledford continue; 8563c86aa70SEli Cohen 85779d684f0SParav Pandit gid_type = cma_dev->default_gid_type[port - 1]; 8584ed13a5fSParav Pandit sgid_attr = cma_validate_port(cma_dev->device, port, 85941ab1cb7SParav Pandit gid_type, &gid, id_priv); 8604ed13a5fSParav Pandit if (!IS_ERR(sgid_attr)) { 8617c11147dSMichael Wang id_priv->id.port_num = port; 8624ed13a5fSParav Pandit cma_bind_sgid_attr(id_priv, sgid_attr); 8634ed13a5fSParav Pandit ret = 0; 8643c86aa70SEli Cohen goto out; 86563f05be2Sshefty } 866e51060f0SSean Hefty } 867e51060f0SSean Hefty } 8683c86aa70SEli Cohen 8693c86aa70SEli Cohen out: 870cb5cd0eaSShay Drory if (!ret) { 8713c86aa70SEli Cohen cma_attach_to_dev(id_priv, cma_dev); 872cb5cd0eaSShay Drory rdma_restrack_add(&id_priv->res); 873cb5cd0eaSShay Drory } 8743c86aa70SEli Cohen 875a396d43aSSean Hefty mutex_unlock(&lock); 876e51060f0SSean Hefty return ret; 877e51060f0SSean Hefty } 878e51060f0SSean Hefty 879f17df3b0SSean Hefty /* 880f17df3b0SSean Hefty * Select the source IB device and address to reach the destination IB address. 881f17df3b0SSean Hefty */ 882f17df3b0SSean Hefty static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) 883f17df3b0SSean Hefty { 884f17df3b0SSean Hefty struct cma_device *cma_dev, *cur_dev; 885f17df3b0SSean Hefty struct sockaddr_ib *addr; 886f17df3b0SSean Hefty union ib_gid gid, sgid, *dgid; 887cc055dd3SParav Pandit unsigned int p; 888f17df3b0SSean Hefty u16 pkey, index; 88993b1f29dSJack Wang enum ib_port_state port_state; 89020679094SAvihai Horon int ret; 891f17df3b0SSean Hefty int i; 892f17df3b0SSean Hefty 893f17df3b0SSean Hefty cma_dev = NULL; 894f17df3b0SSean Hefty addr = (struct sockaddr_ib *) cma_dst_addr(id_priv); 895f17df3b0SSean Hefty dgid = (union ib_gid *) &addr->sib_addr; 896f17df3b0SSean Hefty pkey = ntohs(addr->sib_pkey); 897f17df3b0SSean Hefty 898954a8e3aSParav Pandit mutex_lock(&lock); 899f17df3b0SSean Hefty list_for_each_entry(cur_dev, &dev_list, list) { 900cc055dd3SParav Pandit rdma_for_each_port (cur_dev->device, p) { 90130a74ef4SMichael Wang if (!rdma_cap_af_ib(cur_dev->device, p)) 902f17df3b0SSean Hefty continue; 903f17df3b0SSean Hefty 904f17df3b0SSean Hefty if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index)) 905f17df3b0SSean Hefty continue; 906f17df3b0SSean Hefty 90793b1f29dSJack Wang if (ib_get_cached_port_state(cur_dev->device, p, &port_state)) 90893b1f29dSJack Wang continue; 90920679094SAvihai Horon 91020679094SAvihai Horon for (i = 0; i < cur_dev->device->port_data[p].immutable.gid_tbl_len; 91120679094SAvihai Horon ++i) { 91220679094SAvihai Horon ret = rdma_query_gid(cur_dev->device, p, i, 91320679094SAvihai Horon &gid); 91420679094SAvihai Horon if (ret) 91520679094SAvihai Horon continue; 91620679094SAvihai Horon 917f17df3b0SSean Hefty if (!memcmp(&gid, dgid, sizeof(gid))) { 918f17df3b0SSean Hefty cma_dev = cur_dev; 919f17df3b0SSean Hefty sgid = gid; 9208fb488d7SPaul Bolle id_priv->id.port_num = p; 921f17df3b0SSean Hefty goto found; 922f17df3b0SSean Hefty } 923f17df3b0SSean Hefty 924f17df3b0SSean Hefty if (!cma_dev && (gid.global.subnet_prefix == 92593b1f29dSJack Wang dgid->global.subnet_prefix) && 92693b1f29dSJack Wang port_state == IB_PORT_ACTIVE) { 927f17df3b0SSean Hefty cma_dev = cur_dev; 928f17df3b0SSean Hefty sgid = gid; 9298fb488d7SPaul Bolle id_priv->id.port_num = p; 930954a8e3aSParav Pandit goto found; 931f17df3b0SSean Hefty } 932f17df3b0SSean Hefty } 933f17df3b0SSean Hefty } 934f17df3b0SSean Hefty } 935954a8e3aSParav Pandit mutex_unlock(&lock); 936f17df3b0SSean Hefty return -ENODEV; 937f17df3b0SSean Hefty 938f17df3b0SSean Hefty found: 939f17df3b0SSean Hefty cma_attach_to_dev(id_priv, cma_dev); 940cb5cd0eaSShay Drory rdma_restrack_add(&id_priv->res); 941954a8e3aSParav Pandit mutex_unlock(&lock); 942f17df3b0SSean Hefty addr = (struct sockaddr_ib *)cma_src_addr(id_priv); 943954a8e3aSParav Pandit memcpy(&addr->sib_addr, &sgid, sizeof(sgid)); 944f17df3b0SSean Hefty cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr); 945f17df3b0SSean Hefty return 0; 946f17df3b0SSean Hefty } 947f17df3b0SSean Hefty 948e368d23fSParav Pandit static void cma_id_get(struct rdma_id_private *id_priv) 949e368d23fSParav Pandit { 95043fb5892SParav Pandit refcount_inc(&id_priv->refcount); 951e368d23fSParav Pandit } 952e368d23fSParav Pandit 953e368d23fSParav Pandit static void cma_id_put(struct rdma_id_private *id_priv) 954e51060f0SSean Hefty { 95543fb5892SParav Pandit if (refcount_dec_and_test(&id_priv->refcount)) 956e51060f0SSean Hefty complete(&id_priv->comp); 957e51060f0SSean Hefty } 958e51060f0SSean Hefty 959b09c4d70SLeon Romanovsky static struct rdma_id_private * 960b09c4d70SLeon Romanovsky __rdma_create_id(struct net *net, rdma_cm_event_handler event_handler, 9612253fc0cSSteve Wise void *context, enum rdma_ucm_port_space ps, 962b09c4d70SLeon Romanovsky enum ib_qp_type qp_type, const struct rdma_id_private *parent) 963e51060f0SSean Hefty { 964e51060f0SSean Hefty struct rdma_id_private *id_priv; 965e51060f0SSean Hefty 966e51060f0SSean Hefty id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL); 967e51060f0SSean Hefty if (!id_priv) 968e51060f0SSean Hefty return ERR_PTR(-ENOMEM); 969e51060f0SSean Hefty 970550e5ca7SNir Muchtar id_priv->state = RDMA_CM_IDLE; 971e51060f0SSean Hefty id_priv->id.context = context; 972e51060f0SSean Hefty id_priv->id.event_handler = event_handler; 973e51060f0SSean Hefty id_priv->id.ps = ps; 974b26f9b99SSean Hefty id_priv->id.qp_type = qp_type; 97589052d78SMajd Dibbiny id_priv->tos_set = false; 9762c1619edSDanit Goldberg id_priv->timeout_set = false; 9773aeffc46SHåkon Bugge id_priv->min_rnr_timer_set = false; 97879d684f0SParav Pandit id_priv->gid_type = IB_GID_TYPE_IB; 979e51060f0SSean Hefty spin_lock_init(&id_priv->lock); 980c5483388SSean Hefty mutex_init(&id_priv->qp_mutex); 981e51060f0SSean Hefty init_completion(&id_priv->comp); 98243fb5892SParav Pandit refcount_set(&id_priv->refcount, 1); 983de910bd9SOr Gerlitz mutex_init(&id_priv->handler_mutex); 98499cfddb8SJason Gunthorpe INIT_LIST_HEAD(&id_priv->device_item); 985fc008bdbSPatrisious Haddad INIT_LIST_HEAD(&id_priv->id_list_entry); 986e51060f0SSean Hefty INIT_LIST_HEAD(&id_priv->listen_list); 987c8f6a362SSean Hefty INIT_LIST_HEAD(&id_priv->mc_list); 988e51060f0SSean Hefty get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num); 989fa20105eSGuy Shapiro id_priv->id.route.addr.dev_addr.net = get_net(net); 99023a9cd2aSMoni Shoua id_priv->seq_num &= 0x00ffffff; 991e51060f0SSean Hefty 99213ef5539SLeon Romanovsky rdma_restrack_new(&id_priv->res, RDMA_RESTRACK_CM_ID); 993b09c4d70SLeon Romanovsky if (parent) 994b09c4d70SLeon Romanovsky rdma_restrack_parent_name(&id_priv->res, &parent->res); 99513ef5539SLeon Romanovsky 996b09c4d70SLeon Romanovsky return id_priv; 997e51060f0SSean Hefty } 998b09c4d70SLeon Romanovsky 999b09c4d70SLeon Romanovsky struct rdma_cm_id * 1000b09c4d70SLeon Romanovsky __rdma_create_kernel_id(struct net *net, rdma_cm_event_handler event_handler, 1001b09c4d70SLeon Romanovsky void *context, enum rdma_ucm_port_space ps, 1002b09c4d70SLeon Romanovsky enum ib_qp_type qp_type, const char *caller) 1003b09c4d70SLeon Romanovsky { 1004b09c4d70SLeon Romanovsky struct rdma_id_private *ret; 1005b09c4d70SLeon Romanovsky 1006b09c4d70SLeon Romanovsky ret = __rdma_create_id(net, event_handler, context, ps, qp_type, NULL); 1007b09c4d70SLeon Romanovsky if (IS_ERR(ret)) 1008b09c4d70SLeon Romanovsky return ERR_CAST(ret); 1009b09c4d70SLeon Romanovsky 1010b09c4d70SLeon Romanovsky rdma_restrack_set_name(&ret->res, caller); 1011b09c4d70SLeon Romanovsky return &ret->id; 1012b09c4d70SLeon Romanovsky } 1013b09c4d70SLeon Romanovsky EXPORT_SYMBOL(__rdma_create_kernel_id); 1014b09c4d70SLeon Romanovsky 1015b09c4d70SLeon Romanovsky struct rdma_cm_id *rdma_create_user_id(rdma_cm_event_handler event_handler, 1016b09c4d70SLeon Romanovsky void *context, 1017b09c4d70SLeon Romanovsky enum rdma_ucm_port_space ps, 1018b09c4d70SLeon Romanovsky enum ib_qp_type qp_type) 1019b09c4d70SLeon Romanovsky { 1020b09c4d70SLeon Romanovsky struct rdma_id_private *ret; 1021b09c4d70SLeon Romanovsky 1022b09c4d70SLeon Romanovsky ret = __rdma_create_id(current->nsproxy->net_ns, event_handler, context, 1023b09c4d70SLeon Romanovsky ps, qp_type, NULL); 1024b09c4d70SLeon Romanovsky if (IS_ERR(ret)) 1025b09c4d70SLeon Romanovsky return ERR_CAST(ret); 1026b09c4d70SLeon Romanovsky 1027b09c4d70SLeon Romanovsky rdma_restrack_set_name(&ret->res, NULL); 1028b09c4d70SLeon Romanovsky return &ret->id; 1029b09c4d70SLeon Romanovsky } 1030b09c4d70SLeon Romanovsky EXPORT_SYMBOL(rdma_create_user_id); 1031e51060f0SSean Hefty 1032c8f6a362SSean Hefty static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) 1033e51060f0SSean Hefty { 1034e51060f0SSean Hefty struct ib_qp_attr qp_attr; 1035c8f6a362SSean Hefty int qp_attr_mask, ret; 1036e51060f0SSean Hefty 1037c8f6a362SSean Hefty qp_attr.qp_state = IB_QPS_INIT; 1038c8f6a362SSean Hefty ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 1039e51060f0SSean Hefty if (ret) 1040e51060f0SSean Hefty return ret; 1041e51060f0SSean Hefty 1042c8f6a362SSean Hefty ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); 1043c8f6a362SSean Hefty if (ret) 1044c8f6a362SSean Hefty return ret; 1045c8f6a362SSean Hefty 1046c8f6a362SSean Hefty qp_attr.qp_state = IB_QPS_RTR; 1047c8f6a362SSean Hefty ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE); 1048c8f6a362SSean Hefty if (ret) 1049c8f6a362SSean Hefty return ret; 1050c8f6a362SSean Hefty 1051c8f6a362SSean Hefty qp_attr.qp_state = IB_QPS_RTS; 1052c8f6a362SSean Hefty qp_attr.sq_psn = 0; 1053c8f6a362SSean Hefty ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN); 1054c8f6a362SSean Hefty 1055c8f6a362SSean Hefty return ret; 1056e51060f0SSean Hefty } 1057e51060f0SSean Hefty 1058db4657afSMike Marciniszyn static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) 1059db4657afSMike Marciniszyn { 1060db4657afSMike Marciniszyn struct ib_qp_attr qp_attr; 1061db4657afSMike Marciniszyn int qp_attr_mask, ret; 1062db4657afSMike Marciniszyn 1063db4657afSMike Marciniszyn qp_attr.qp_state = IB_QPS_INIT; 1064db4657afSMike Marciniszyn ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 1065db4657afSMike Marciniszyn if (ret) 1066db4657afSMike Marciniszyn return ret; 1067db4657afSMike Marciniszyn 1068db4657afSMike Marciniszyn return ib_modify_qp(qp, &qp_attr, qp_attr_mask); 1069db4657afSMike Marciniszyn } 1070db4657afSMike Marciniszyn 1071e51060f0SSean Hefty int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd, 1072e51060f0SSean Hefty struct ib_qp_init_attr *qp_init_attr) 1073e51060f0SSean Hefty { 1074e51060f0SSean Hefty struct rdma_id_private *id_priv; 1075e51060f0SSean Hefty struct ib_qp *qp; 1076db4657afSMike Marciniszyn int ret; 1077e51060f0SSean Hefty 1078e51060f0SSean Hefty id_priv = container_of(id, struct rdma_id_private, id); 1079ed999f82SChuck Lever if (id->device != pd->device) { 1080ed999f82SChuck Lever ret = -EINVAL; 1081ed999f82SChuck Lever goto out_err; 1082ed999f82SChuck Lever } 1083e51060f0SSean Hefty 10840691a286SChristoph Hellwig qp_init_attr->port_num = id->port_num; 1085e51060f0SSean Hefty qp = ib_create_qp(pd, qp_init_attr); 1086ed999f82SChuck Lever if (IS_ERR(qp)) { 1087ed999f82SChuck Lever ret = PTR_ERR(qp); 1088ed999f82SChuck Lever goto out_err; 1089ed999f82SChuck Lever } 1090e51060f0SSean Hefty 1091b26f9b99SSean Hefty if (id->qp_type == IB_QPT_UD) 1092c8f6a362SSean Hefty ret = cma_init_ud_qp(id_priv, qp); 1093db4657afSMike Marciniszyn else 1094db4657afSMike Marciniszyn ret = cma_init_conn_qp(id_priv, qp); 1095e51060f0SSean Hefty if (ret) 1096ed999f82SChuck Lever goto out_destroy; 1097e51060f0SSean Hefty 1098e51060f0SSean Hefty id->qp = qp; 1099e51060f0SSean Hefty id_priv->qp_num = qp->qp_num; 1100e51060f0SSean Hefty id_priv->srq = (qp->srq != NULL); 1101ed999f82SChuck Lever trace_cm_qp_create(id_priv, pd, qp_init_attr, 0); 1102e51060f0SSean Hefty return 0; 1103ed999f82SChuck Lever out_destroy: 1104e51060f0SSean Hefty ib_destroy_qp(qp); 1105ed999f82SChuck Lever out_err: 1106ed999f82SChuck Lever trace_cm_qp_create(id_priv, pd, qp_init_attr, ret); 1107e51060f0SSean Hefty return ret; 1108e51060f0SSean Hefty } 1109e51060f0SSean Hefty EXPORT_SYMBOL(rdma_create_qp); 1110e51060f0SSean Hefty 1111e51060f0SSean Hefty void rdma_destroy_qp(struct rdma_cm_id *id) 1112e51060f0SSean Hefty { 1113c5483388SSean Hefty struct rdma_id_private *id_priv; 1114c5483388SSean Hefty 1115c5483388SSean Hefty id_priv = container_of(id, struct rdma_id_private, id); 1116ed999f82SChuck Lever trace_cm_qp_destroy(id_priv); 1117c5483388SSean Hefty mutex_lock(&id_priv->qp_mutex); 1118c5483388SSean Hefty ib_destroy_qp(id_priv->id.qp); 1119c5483388SSean Hefty id_priv->id.qp = NULL; 1120c5483388SSean Hefty mutex_unlock(&id_priv->qp_mutex); 1121e51060f0SSean Hefty } 1122e51060f0SSean Hefty EXPORT_SYMBOL(rdma_destroy_qp); 1123e51060f0SSean Hefty 11245851bb89SSean Hefty static int cma_modify_qp_rtr(struct rdma_id_private *id_priv, 11255851bb89SSean Hefty struct rdma_conn_param *conn_param) 1126e51060f0SSean Hefty { 1127e51060f0SSean Hefty struct ib_qp_attr qp_attr; 1128e51060f0SSean Hefty int qp_attr_mask, ret; 1129e51060f0SSean Hefty 1130c5483388SSean Hefty mutex_lock(&id_priv->qp_mutex); 1131c5483388SSean Hefty if (!id_priv->id.qp) { 1132c5483388SSean Hefty ret = 0; 1133c5483388SSean Hefty goto out; 1134c5483388SSean Hefty } 1135e51060f0SSean Hefty 1136e51060f0SSean Hefty /* Need to update QP attributes from default values. */ 1137e51060f0SSean Hefty qp_attr.qp_state = IB_QPS_INIT; 1138c5483388SSean Hefty ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 1139e51060f0SSean Hefty if (ret) 1140c5483388SSean Hefty goto out; 1141e51060f0SSean Hefty 1142c5483388SSean Hefty ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 1143e51060f0SSean Hefty if (ret) 1144c5483388SSean Hefty goto out; 1145e51060f0SSean Hefty 1146e51060f0SSean Hefty qp_attr.qp_state = IB_QPS_RTR; 1147c5483388SSean Hefty ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 1148e51060f0SSean Hefty if (ret) 1149c5483388SSean Hefty goto out; 1150e51060f0SSean Hefty 1151fef60902SMichael Wang BUG_ON(id_priv->cma_dev->device != id_priv->id.device); 1152fef60902SMichael Wang 11535851bb89SSean Hefty if (conn_param) 11545851bb89SSean Hefty qp_attr.max_dest_rd_atomic = conn_param->responder_resources; 1155c5483388SSean Hefty ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 1156c5483388SSean Hefty out: 1157c5483388SSean Hefty mutex_unlock(&id_priv->qp_mutex); 1158c5483388SSean Hefty return ret; 1159e51060f0SSean Hefty } 1160e51060f0SSean Hefty 11615851bb89SSean Hefty static int cma_modify_qp_rts(struct rdma_id_private *id_priv, 11625851bb89SSean Hefty struct rdma_conn_param *conn_param) 1163e51060f0SSean Hefty { 1164e51060f0SSean Hefty struct ib_qp_attr qp_attr; 1165e51060f0SSean Hefty int qp_attr_mask, ret; 1166e51060f0SSean Hefty 1167c5483388SSean Hefty mutex_lock(&id_priv->qp_mutex); 1168c5483388SSean Hefty if (!id_priv->id.qp) { 1169c5483388SSean Hefty ret = 0; 1170c5483388SSean Hefty goto out; 1171e51060f0SSean Hefty } 1172e51060f0SSean Hefty 1173c5483388SSean Hefty qp_attr.qp_state = IB_QPS_RTS; 1174c5483388SSean Hefty ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 1175c5483388SSean Hefty if (ret) 1176c5483388SSean Hefty goto out; 1177c5483388SSean Hefty 11785851bb89SSean Hefty if (conn_param) 11795851bb89SSean Hefty qp_attr.max_rd_atomic = conn_param->initiator_depth; 1180c5483388SSean Hefty ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 1181c5483388SSean Hefty out: 1182c5483388SSean Hefty mutex_unlock(&id_priv->qp_mutex); 1183c5483388SSean Hefty return ret; 1184c5483388SSean Hefty } 1185c5483388SSean Hefty 1186c5483388SSean Hefty static int cma_modify_qp_err(struct rdma_id_private *id_priv) 1187e51060f0SSean Hefty { 1188e51060f0SSean Hefty struct ib_qp_attr qp_attr; 1189c5483388SSean Hefty int ret; 1190e51060f0SSean Hefty 1191c5483388SSean Hefty mutex_lock(&id_priv->qp_mutex); 1192c5483388SSean Hefty if (!id_priv->id.qp) { 1193c5483388SSean Hefty ret = 0; 1194c5483388SSean Hefty goto out; 1195c5483388SSean Hefty } 1196e51060f0SSean Hefty 1197e51060f0SSean Hefty qp_attr.qp_state = IB_QPS_ERR; 1198c5483388SSean Hefty ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE); 1199c5483388SSean Hefty out: 1200c5483388SSean Hefty mutex_unlock(&id_priv->qp_mutex); 1201c5483388SSean Hefty return ret; 1202e51060f0SSean Hefty } 1203e51060f0SSean Hefty 1204c8f6a362SSean Hefty static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv, 1205c8f6a362SSean Hefty struct ib_qp_attr *qp_attr, int *qp_attr_mask) 1206c8f6a362SSean Hefty { 1207c8f6a362SSean Hefty struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 1208c8f6a362SSean Hefty int ret; 12093c86aa70SEli Cohen u16 pkey; 12103c86aa70SEli Cohen 1211227128fcSMichael Wang if (rdma_cap_eth_ah(id_priv->id.device, id_priv->id.port_num)) 12123c86aa70SEli Cohen pkey = 0xffff; 1213fef60902SMichael Wang else 1214fef60902SMichael Wang pkey = ib_addr_get_pkey(dev_addr); 1215c8f6a362SSean Hefty 1216c8f6a362SSean Hefty ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num, 12173c86aa70SEli Cohen pkey, &qp_attr->pkey_index); 1218c8f6a362SSean Hefty if (ret) 1219c8f6a362SSean Hefty return ret; 1220c8f6a362SSean Hefty 1221c8f6a362SSean Hefty qp_attr->port_num = id_priv->id.port_num; 1222c8f6a362SSean Hefty *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT; 1223c8f6a362SSean Hefty 1224b26f9b99SSean Hefty if (id_priv->id.qp_type == IB_QPT_UD) { 12255c438135SSean Hefty ret = cma_set_qkey(id_priv, 0); 1226d2ca39f2SYossi Etigin if (ret) 1227d2ca39f2SYossi Etigin return ret; 1228d2ca39f2SYossi Etigin 1229c8f6a362SSean Hefty qp_attr->qkey = id_priv->qkey; 1230c8f6a362SSean Hefty *qp_attr_mask |= IB_QP_QKEY; 1231c8f6a362SSean Hefty } else { 1232c8f6a362SSean Hefty qp_attr->qp_access_flags = 0; 1233c8f6a362SSean Hefty *qp_attr_mask |= IB_QP_ACCESS_FLAGS; 1234c8f6a362SSean Hefty } 1235c8f6a362SSean Hefty return 0; 1236c8f6a362SSean Hefty } 1237c8f6a362SSean Hefty 1238e51060f0SSean Hefty int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, 1239e51060f0SSean Hefty int *qp_attr_mask) 1240e51060f0SSean Hefty { 1241e51060f0SSean Hefty struct rdma_id_private *id_priv; 1242c8f6a362SSean Hefty int ret = 0; 1243e51060f0SSean Hefty 1244e51060f0SSean Hefty id_priv = container_of(id, struct rdma_id_private, id); 124572219ceaSMichael Wang if (rdma_cap_ib_cm(id->device, id->port_num)) { 1246b26f9b99SSean Hefty if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD)) 1247c8f6a362SSean Hefty ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask); 1248c8f6a362SSean Hefty else 1249e51060f0SSean Hefty ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, 1250e51060f0SSean Hefty qp_attr_mask); 1251dd5f03beSMatan Barak 1252e51060f0SSean Hefty if (qp_attr->qp_state == IB_QPS_RTR) 1253e51060f0SSean Hefty qp_attr->rq_psn = id_priv->seq_num; 125404215330SMichael Wang } else if (rdma_cap_iw_cm(id->device, id->port_num)) { 1255c8f6a362SSean Hefty if (!id_priv->cm_id.iw) { 12568f076531SDotan Barak qp_attr->qp_access_flags = 0; 1257c8f6a362SSean Hefty *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS; 1258c8f6a362SSean Hefty } else 125907ebafbaSTom Tucker ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, 126007ebafbaSTom Tucker qp_attr_mask); 1261a62ab66bSIsmail, Mustafa qp_attr->port_num = id_priv->id.port_num; 1262a62ab66bSIsmail, Mustafa *qp_attr_mask |= IB_QP_PORT; 1263b6eb7011SWenpeng Liang } else { 1264e51060f0SSean Hefty ret = -ENOSYS; 1265b6eb7011SWenpeng Liang } 1266e51060f0SSean Hefty 12672c1619edSDanit Goldberg if ((*qp_attr_mask & IB_QP_TIMEOUT) && id_priv->timeout_set) 12682c1619edSDanit Goldberg qp_attr->timeout = id_priv->timeout; 12692c1619edSDanit Goldberg 12703aeffc46SHåkon Bugge if ((*qp_attr_mask & IB_QP_MIN_RNR_TIMER) && id_priv->min_rnr_timer_set) 12713aeffc46SHåkon Bugge qp_attr->min_rnr_timer = id_priv->min_rnr_timer; 12723aeffc46SHåkon Bugge 1273e51060f0SSean Hefty return ret; 1274e51060f0SSean Hefty } 1275e51060f0SSean Hefty EXPORT_SYMBOL(rdma_init_qp_attr); 1276e51060f0SSean Hefty 1277ca3a8aceSParav Pandit static inline bool cma_zero_addr(const struct sockaddr *addr) 1278e51060f0SSean Hefty { 12792e2d190cSSean Hefty switch (addr->sa_family) { 12802e2d190cSSean Hefty case AF_INET: 12812e2d190cSSean Hefty return ipv4_is_zeronet(((struct sockaddr_in *)addr)->sin_addr.s_addr); 12822e2d190cSSean Hefty case AF_INET6: 12832e2d190cSSean Hefty return ipv6_addr_any(&((struct sockaddr_in6 *)addr)->sin6_addr); 12842e2d190cSSean Hefty case AF_IB: 12852e2d190cSSean Hefty return ib_addr_any(&((struct sockaddr_ib *)addr)->sib_addr); 12862e2d190cSSean Hefty default: 1287ca3a8aceSParav Pandit return false; 1288e51060f0SSean Hefty } 1289e51060f0SSean Hefty } 1290e51060f0SSean Hefty 1291ca3a8aceSParav Pandit static inline bool cma_loopback_addr(const struct sockaddr *addr) 1292e51060f0SSean Hefty { 12932e2d190cSSean Hefty switch (addr->sa_family) { 12942e2d190cSSean Hefty case AF_INET: 1295ca3a8aceSParav Pandit return ipv4_is_loopback( 1296ca3a8aceSParav Pandit ((struct sockaddr_in *)addr)->sin_addr.s_addr); 12972e2d190cSSean Hefty case AF_INET6: 1298ca3a8aceSParav Pandit return ipv6_addr_loopback( 1299ca3a8aceSParav Pandit &((struct sockaddr_in6 *)addr)->sin6_addr); 13002e2d190cSSean Hefty case AF_IB: 1301ca3a8aceSParav Pandit return ib_addr_loopback( 1302ca3a8aceSParav Pandit &((struct sockaddr_ib *)addr)->sib_addr); 13032e2d190cSSean Hefty default: 1304ca3a8aceSParav Pandit return false; 13052e2d190cSSean Hefty } 1306e51060f0SSean Hefty } 1307e51060f0SSean Hefty 1308ca3a8aceSParav Pandit static inline bool cma_any_addr(const struct sockaddr *addr) 1309e51060f0SSean Hefty { 1310e51060f0SSean Hefty return cma_zero_addr(addr) || cma_loopback_addr(addr); 1311e51060f0SSean Hefty } 1312e51060f0SSean Hefty 13135d7ed2f2SParav Pandit static int cma_addr_cmp(const struct sockaddr *src, const struct sockaddr *dst) 131443b752daSHefty, Sean { 131543b752daSHefty, Sean if (src->sa_family != dst->sa_family) 131643b752daSHefty, Sean return -1; 131743b752daSHefty, Sean 131843b752daSHefty, Sean switch (src->sa_family) { 131943b752daSHefty, Sean case AF_INET: 132043b752daSHefty, Sean return ((struct sockaddr_in *)src)->sin_addr.s_addr != 132143b752daSHefty, Sean ((struct sockaddr_in *)dst)->sin_addr.s_addr; 13225d7ed2f2SParav Pandit case AF_INET6: { 13235d7ed2f2SParav Pandit struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *)src; 13245d7ed2f2SParav Pandit struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *)dst; 13255d7ed2f2SParav Pandit bool link_local; 13265d7ed2f2SParav Pandit 13275d7ed2f2SParav Pandit if (ipv6_addr_cmp(&src_addr6->sin6_addr, 13285d7ed2f2SParav Pandit &dst_addr6->sin6_addr)) 13295d7ed2f2SParav Pandit return 1; 13305d7ed2f2SParav Pandit link_local = ipv6_addr_type(&dst_addr6->sin6_addr) & 13315d7ed2f2SParav Pandit IPV6_ADDR_LINKLOCAL; 13325d7ed2f2SParav Pandit /* Link local must match their scope_ids */ 13335d7ed2f2SParav Pandit return link_local ? (src_addr6->sin6_scope_id != 13345d7ed2f2SParav Pandit dst_addr6->sin6_scope_id) : 13355d7ed2f2SParav Pandit 0; 13365d7ed2f2SParav Pandit } 13375d7ed2f2SParav Pandit 13382e2d190cSSean Hefty default: 13392e2d190cSSean Hefty return ib_addr_cmp(&((struct sockaddr_ib *) src)->sib_addr, 13402e2d190cSSean Hefty &((struct sockaddr_ib *) dst)->sib_addr); 134143b752daSHefty, Sean } 134243b752daSHefty, Sean } 134343b752daSHefty, Sean 13442df7dba8SParav Pandit static __be16 cma_port(const struct sockaddr *addr) 1345628e5f6dSSean Hefty { 134658afdcb7SSean Hefty struct sockaddr_ib *sib; 134758afdcb7SSean Hefty 134858afdcb7SSean Hefty switch (addr->sa_family) { 134958afdcb7SSean Hefty case AF_INET: 1350628e5f6dSSean Hefty return ((struct sockaddr_in *) addr)->sin_port; 135158afdcb7SSean Hefty case AF_INET6: 1352628e5f6dSSean Hefty return ((struct sockaddr_in6 *) addr)->sin6_port; 135358afdcb7SSean Hefty case AF_IB: 135458afdcb7SSean Hefty sib = (struct sockaddr_ib *) addr; 135558afdcb7SSean Hefty return htons((u16) (be64_to_cpu(sib->sib_sid) & 135658afdcb7SSean Hefty be64_to_cpu(sib->sib_sid_mask))); 135758afdcb7SSean Hefty default: 135858afdcb7SSean Hefty return 0; 135958afdcb7SSean Hefty } 1360628e5f6dSSean Hefty } 1361628e5f6dSSean Hefty 13622df7dba8SParav Pandit static inline int cma_any_port(const struct sockaddr *addr) 1363e51060f0SSean Hefty { 1364628e5f6dSSean Hefty return !cma_port(addr); 1365e51060f0SSean Hefty } 1366e51060f0SSean Hefty 13670c505f70SHaggai Eran static void cma_save_ib_info(struct sockaddr *src_addr, 13680c505f70SHaggai Eran struct sockaddr *dst_addr, 1369e7ff98aeSParav Pandit const struct rdma_cm_id *listen_id, 1370e7ff98aeSParav Pandit const struct sa_path_rec *path) 1371e51060f0SSean Hefty { 1372fbaa1a6dSSean Hefty struct sockaddr_ib *listen_ib, *ib; 1373e51060f0SSean Hefty 1374fbaa1a6dSSean Hefty listen_ib = (struct sockaddr_ib *) &listen_id->route.addr.src_addr; 13750c505f70SHaggai Eran if (src_addr) { 13760c505f70SHaggai Eran ib = (struct sockaddr_ib *)src_addr; 13770c505f70SHaggai Eran ib->sib_family = AF_IB; 1378c07678bbSMatthew Finlay if (path) { 1379fbaa1a6dSSean Hefty ib->sib_pkey = path->pkey; 1380fbaa1a6dSSean Hefty ib->sib_flowinfo = path->flow_label; 1381fbaa1a6dSSean Hefty memcpy(&ib->sib_addr, &path->sgid, 16); 1382d3957b86SMajd Dibbiny ib->sib_sid = path->service_id; 13830c505f70SHaggai Eran ib->sib_scope_id = 0; 1384c07678bbSMatthew Finlay } else { 1385c07678bbSMatthew Finlay ib->sib_pkey = listen_ib->sib_pkey; 1386c07678bbSMatthew Finlay ib->sib_flowinfo = listen_ib->sib_flowinfo; 1387c07678bbSMatthew Finlay ib->sib_addr = listen_ib->sib_addr; 1388fbaa1a6dSSean Hefty ib->sib_sid = listen_ib->sib_sid; 1389fbaa1a6dSSean Hefty ib->sib_scope_id = listen_ib->sib_scope_id; 13900c505f70SHaggai Eran } 13910c505f70SHaggai Eran ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL); 13920c505f70SHaggai Eran } 13930c505f70SHaggai Eran if (dst_addr) { 13940c505f70SHaggai Eran ib = (struct sockaddr_ib *)dst_addr; 13950c505f70SHaggai Eran ib->sib_family = AF_IB; 1396c07678bbSMatthew Finlay if (path) { 1397fbaa1a6dSSean Hefty ib->sib_pkey = path->pkey; 1398fbaa1a6dSSean Hefty ib->sib_flowinfo = path->flow_label; 1399fbaa1a6dSSean Hefty memcpy(&ib->sib_addr, &path->dgid, 16); 1400fbaa1a6dSSean Hefty } 1401c07678bbSMatthew Finlay } 140228521440SJason Gunthorpe } 140328521440SJason Gunthorpe 1404c50e90d0SArnd Bergmann static void cma_save_ip4_info(struct sockaddr_in *src_addr, 1405c50e90d0SArnd Bergmann struct sockaddr_in *dst_addr, 14060c505f70SHaggai Eran struct cma_hdr *hdr, 14070c505f70SHaggai Eran __be16 local_port) 1408fbaa1a6dSSean Hefty { 14090c505f70SHaggai Eran if (src_addr) { 1410c50e90d0SArnd Bergmann *src_addr = (struct sockaddr_in) { 1411c50e90d0SArnd Bergmann .sin_family = AF_INET, 1412c50e90d0SArnd Bergmann .sin_addr.s_addr = hdr->dst_addr.ip4.addr, 1413c50e90d0SArnd Bergmann .sin_port = local_port, 1414c50e90d0SArnd Bergmann }; 14150c505f70SHaggai Eran } 1416fbaa1a6dSSean Hefty 14170c505f70SHaggai Eran if (dst_addr) { 1418c50e90d0SArnd Bergmann *dst_addr = (struct sockaddr_in) { 1419c50e90d0SArnd Bergmann .sin_family = AF_INET, 1420c50e90d0SArnd Bergmann .sin_addr.s_addr = hdr->src_addr.ip4.addr, 1421c50e90d0SArnd Bergmann .sin_port = hdr->port, 1422c50e90d0SArnd Bergmann }; 1423fbaa1a6dSSean Hefty } 14240c505f70SHaggai Eran } 1425fbaa1a6dSSean Hefty 1426c50e90d0SArnd Bergmann static void cma_save_ip6_info(struct sockaddr_in6 *src_addr, 1427c50e90d0SArnd Bergmann struct sockaddr_in6 *dst_addr, 14280c505f70SHaggai Eran struct cma_hdr *hdr, 14290c505f70SHaggai Eran __be16 local_port) 1430fbaa1a6dSSean Hefty { 14310c505f70SHaggai Eran if (src_addr) { 1432c50e90d0SArnd Bergmann *src_addr = (struct sockaddr_in6) { 1433c50e90d0SArnd Bergmann .sin6_family = AF_INET6, 1434c50e90d0SArnd Bergmann .sin6_addr = hdr->dst_addr.ip6, 1435c50e90d0SArnd Bergmann .sin6_port = local_port, 1436c50e90d0SArnd Bergmann }; 14370c505f70SHaggai Eran } 1438fbaa1a6dSSean Hefty 14390c505f70SHaggai Eran if (dst_addr) { 1440c50e90d0SArnd Bergmann *dst_addr = (struct sockaddr_in6) { 1441c50e90d0SArnd Bergmann .sin6_family = AF_INET6, 1442c50e90d0SArnd Bergmann .sin6_addr = hdr->src_addr.ip6, 1443c50e90d0SArnd Bergmann .sin6_port = hdr->port, 1444c50e90d0SArnd Bergmann }; 1445fbaa1a6dSSean Hefty } 14460c505f70SHaggai Eran } 1447fbaa1a6dSSean Hefty 14480c505f70SHaggai Eran static u16 cma_port_from_service_id(__be64 service_id) 14490c505f70SHaggai Eran { 14500c505f70SHaggai Eran return (u16)be64_to_cpu(service_id); 14510c505f70SHaggai Eran } 14520c505f70SHaggai Eran 14530c505f70SHaggai Eran static int cma_save_ip_info(struct sockaddr *src_addr, 14540c505f70SHaggai Eran struct sockaddr *dst_addr, 1455e7ff98aeSParav Pandit const struct ib_cm_event *ib_event, 14560c505f70SHaggai Eran __be64 service_id) 1457fbaa1a6dSSean Hefty { 1458fbaa1a6dSSean Hefty struct cma_hdr *hdr; 14590c505f70SHaggai Eran __be16 port; 1460e51060f0SSean Hefty 1461fbaa1a6dSSean Hefty hdr = ib_event->private_data; 1462fbaa1a6dSSean Hefty if (hdr->cma_version != CMA_VERSION) 1463fbaa1a6dSSean Hefty return -EINVAL; 1464e51060f0SSean Hefty 14650c505f70SHaggai Eran port = htons(cma_port_from_service_id(service_id)); 14660c505f70SHaggai Eran 1467fbaa1a6dSSean Hefty switch (cma_get_ip_ver(hdr)) { 1468e51060f0SSean Hefty case 4: 1469c50e90d0SArnd Bergmann cma_save_ip4_info((struct sockaddr_in *)src_addr, 1470c50e90d0SArnd Bergmann (struct sockaddr_in *)dst_addr, hdr, port); 1471e51060f0SSean Hefty break; 1472e51060f0SSean Hefty case 6: 1473c50e90d0SArnd Bergmann cma_save_ip6_info((struct sockaddr_in6 *)src_addr, 1474c50e90d0SArnd Bergmann (struct sockaddr_in6 *)dst_addr, hdr, port); 1475e51060f0SSean Hefty break; 1476e51060f0SSean Hefty default: 14774c21b5bcSHaggai Eran return -EAFNOSUPPORT; 1478e51060f0SSean Hefty } 14790c505f70SHaggai Eran 1480fbaa1a6dSSean Hefty return 0; 1481e51060f0SSean Hefty } 1482e51060f0SSean Hefty 14830c505f70SHaggai Eran static int cma_save_net_info(struct sockaddr *src_addr, 14840c505f70SHaggai Eran struct sockaddr *dst_addr, 1485e7ff98aeSParav Pandit const struct rdma_cm_id *listen_id, 1486e7ff98aeSParav Pandit const struct ib_cm_event *ib_event, 14870c505f70SHaggai Eran sa_family_t sa_family, __be64 service_id) 14880c505f70SHaggai Eran { 14890c505f70SHaggai Eran if (sa_family == AF_IB) { 14900c505f70SHaggai Eran if (ib_event->event == IB_CM_REQ_RECEIVED) 14910c505f70SHaggai Eran cma_save_ib_info(src_addr, dst_addr, listen_id, 14920c505f70SHaggai Eran ib_event->param.req_rcvd.primary_path); 14930c505f70SHaggai Eran else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) 14940c505f70SHaggai Eran cma_save_ib_info(src_addr, dst_addr, listen_id, NULL); 14950c505f70SHaggai Eran return 0; 14960c505f70SHaggai Eran } 14970c505f70SHaggai Eran 14980c505f70SHaggai Eran return cma_save_ip_info(src_addr, dst_addr, ib_event, service_id); 14990c505f70SHaggai Eran } 15000c505f70SHaggai Eran 15014c21b5bcSHaggai Eran static int cma_save_req_info(const struct ib_cm_event *ib_event, 15024c21b5bcSHaggai Eran struct cma_req_info *req) 15034c21b5bcSHaggai Eran { 15044c21b5bcSHaggai Eran const struct ib_cm_req_event_param *req_param = 15054c21b5bcSHaggai Eran &ib_event->param.req_rcvd; 15064c21b5bcSHaggai Eran const struct ib_cm_sidr_req_event_param *sidr_param = 15074c21b5bcSHaggai Eran &ib_event->param.sidr_req_rcvd; 15084c21b5bcSHaggai Eran 15094c21b5bcSHaggai Eran switch (ib_event->event) { 15104c21b5bcSHaggai Eran case IB_CM_REQ_RECEIVED: 15114c21b5bcSHaggai Eran req->device = req_param->listen_id->device; 15124c21b5bcSHaggai Eran req->port = req_param->port; 15134c21b5bcSHaggai Eran memcpy(&req->local_gid, &req_param->primary_path->sgid, 15144c21b5bcSHaggai Eran sizeof(req->local_gid)); 15154c21b5bcSHaggai Eran req->has_gid = true; 1516d3957b86SMajd Dibbiny req->service_id = req_param->primary_path->service_id; 1517ab3964adSHaggai Eran req->pkey = be16_to_cpu(req_param->primary_path->pkey); 151884424a7fSHaggai Eran if (req->pkey != req_param->bth_pkey) 151984424a7fSHaggai Eran pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n" 152084424a7fSHaggai Eran "RDMA CMA: in the future this may cause the request to be dropped\n", 152184424a7fSHaggai Eran req_param->bth_pkey, req->pkey); 15224c21b5bcSHaggai Eran break; 15234c21b5bcSHaggai Eran case IB_CM_SIDR_REQ_RECEIVED: 15244c21b5bcSHaggai Eran req->device = sidr_param->listen_id->device; 15254c21b5bcSHaggai Eran req->port = sidr_param->port; 15264c21b5bcSHaggai Eran req->has_gid = false; 15274c21b5bcSHaggai Eran req->service_id = sidr_param->service_id; 1528ab3964adSHaggai Eran req->pkey = sidr_param->pkey; 152984424a7fSHaggai Eran if (req->pkey != sidr_param->bth_pkey) 153084424a7fSHaggai Eran pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and SIDR request payload P_Key (0x%x)\n" 153184424a7fSHaggai Eran "RDMA CMA: in the future this may cause the request to be dropped\n", 153284424a7fSHaggai Eran sidr_param->bth_pkey, req->pkey); 15334c21b5bcSHaggai Eran break; 15344c21b5bcSHaggai Eran default: 15354c21b5bcSHaggai Eran return -EINVAL; 15364c21b5bcSHaggai Eran } 15374c21b5bcSHaggai Eran 15384c21b5bcSHaggai Eran return 0; 15394c21b5bcSHaggai Eran } 15404c21b5bcSHaggai Eran 1541f887f2acSHaggai Eran static bool validate_ipv4_net_dev(struct net_device *net_dev, 1542f887f2acSHaggai Eran const struct sockaddr_in *dst_addr, 1543f887f2acSHaggai Eran const struct sockaddr_in *src_addr) 1544f887f2acSHaggai Eran { 1545f887f2acSHaggai Eran __be32 daddr = dst_addr->sin_addr.s_addr, 1546f887f2acSHaggai Eran saddr = src_addr->sin_addr.s_addr; 1547f887f2acSHaggai Eran struct fib_result res; 1548f887f2acSHaggai Eran struct flowi4 fl4; 1549f887f2acSHaggai Eran int err; 1550f887f2acSHaggai Eran bool ret; 1551f887f2acSHaggai Eran 1552f887f2acSHaggai Eran if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) || 1553f887f2acSHaggai Eran ipv4_is_lbcast(daddr) || ipv4_is_zeronet(saddr) || 1554f887f2acSHaggai Eran ipv4_is_zeronet(daddr) || ipv4_is_loopback(daddr) || 1555f887f2acSHaggai Eran ipv4_is_loopback(saddr)) 1556f887f2acSHaggai Eran return false; 1557f887f2acSHaggai Eran 1558f887f2acSHaggai Eran memset(&fl4, 0, sizeof(fl4)); 1559eb83f502SHåkon Bugge fl4.flowi4_oif = net_dev->ifindex; 1560f887f2acSHaggai Eran fl4.daddr = daddr; 1561f887f2acSHaggai Eran fl4.saddr = saddr; 1562f887f2acSHaggai Eran 1563f887f2acSHaggai Eran rcu_read_lock(); 1564f887f2acSHaggai Eran err = fib_lookup(dev_net(net_dev), &fl4, &res, 0); 1565d3632493SBart Van Assche ret = err == 0 && FIB_RES_DEV(res) == net_dev; 1566f887f2acSHaggai Eran rcu_read_unlock(); 1567f887f2acSHaggai Eran 1568f887f2acSHaggai Eran return ret; 1569f887f2acSHaggai Eran } 1570f887f2acSHaggai Eran 1571f887f2acSHaggai Eran static bool validate_ipv6_net_dev(struct net_device *net_dev, 1572f887f2acSHaggai Eran const struct sockaddr_in6 *dst_addr, 1573f887f2acSHaggai Eran const struct sockaddr_in6 *src_addr) 1574f887f2acSHaggai Eran { 1575f887f2acSHaggai Eran #if IS_ENABLED(CONFIG_IPV6) 1576f887f2acSHaggai Eran const int strict = ipv6_addr_type(&dst_addr->sin6_addr) & 1577f887f2acSHaggai Eran IPV6_ADDR_LINKLOCAL; 1578f887f2acSHaggai Eran struct rt6_info *rt = rt6_lookup(dev_net(net_dev), &dst_addr->sin6_addr, 1579f887f2acSHaggai Eran &src_addr->sin6_addr, net_dev->ifindex, 1580b75cc8f9SDavid Ahern NULL, strict); 1581f887f2acSHaggai Eran bool ret; 1582f887f2acSHaggai Eran 1583f887f2acSHaggai Eran if (!rt) 1584f887f2acSHaggai Eran return false; 1585f887f2acSHaggai Eran 1586f887f2acSHaggai Eran ret = rt->rt6i_idev->dev == net_dev; 1587f887f2acSHaggai Eran ip6_rt_put(rt); 1588f887f2acSHaggai Eran 1589f887f2acSHaggai Eran return ret; 1590f887f2acSHaggai Eran #else 1591f887f2acSHaggai Eran return false; 1592f887f2acSHaggai Eran #endif 1593f887f2acSHaggai Eran } 1594f887f2acSHaggai Eran 1595f887f2acSHaggai Eran static bool validate_net_dev(struct net_device *net_dev, 1596f887f2acSHaggai Eran const struct sockaddr *daddr, 1597f887f2acSHaggai Eran const struct sockaddr *saddr) 1598f887f2acSHaggai Eran { 1599f887f2acSHaggai Eran const struct sockaddr_in *daddr4 = (const struct sockaddr_in *)daddr; 1600f887f2acSHaggai Eran const struct sockaddr_in *saddr4 = (const struct sockaddr_in *)saddr; 1601f887f2acSHaggai Eran const struct sockaddr_in6 *daddr6 = (const struct sockaddr_in6 *)daddr; 1602f887f2acSHaggai Eran const struct sockaddr_in6 *saddr6 = (const struct sockaddr_in6 *)saddr; 1603f887f2acSHaggai Eran 1604f887f2acSHaggai Eran switch (daddr->sa_family) { 1605f887f2acSHaggai Eran case AF_INET: 1606f887f2acSHaggai Eran return saddr->sa_family == AF_INET && 1607f887f2acSHaggai Eran validate_ipv4_net_dev(net_dev, daddr4, saddr4); 1608f887f2acSHaggai Eran 1609f887f2acSHaggai Eran case AF_INET6: 1610f887f2acSHaggai Eran return saddr->sa_family == AF_INET6 && 1611f887f2acSHaggai Eran validate_ipv6_net_dev(net_dev, daddr6, saddr6); 1612f887f2acSHaggai Eran 1613f887f2acSHaggai Eran default: 1614f887f2acSHaggai Eran return false; 1615f887f2acSHaggai Eran } 1616f887f2acSHaggai Eran } 1617f887f2acSHaggai Eran 1618cee10433SParav Pandit static struct net_device * 1619cee10433SParav Pandit roce_get_net_dev_by_cm_event(const struct ib_cm_event *ib_event) 1620cee10433SParav Pandit { 1621cee10433SParav Pandit const struct ib_gid_attr *sgid_attr = NULL; 1622adb4a57aSParav Pandit struct net_device *ndev; 1623cee10433SParav Pandit 1624cee10433SParav Pandit if (ib_event->event == IB_CM_REQ_RECEIVED) 1625cee10433SParav Pandit sgid_attr = ib_event->param.req_rcvd.ppath_sgid_attr; 1626cee10433SParav Pandit else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) 1627cee10433SParav Pandit sgid_attr = ib_event->param.sidr_req_rcvd.sgid_attr; 1628cee10433SParav Pandit 1629cee10433SParav Pandit if (!sgid_attr) 1630cee10433SParav Pandit return NULL; 1631adb4a57aSParav Pandit 1632adb4a57aSParav Pandit rcu_read_lock(); 1633adb4a57aSParav Pandit ndev = rdma_read_gid_attr_ndev_rcu(sgid_attr); 1634adb4a57aSParav Pandit if (IS_ERR(ndev)) 1635adb4a57aSParav Pandit ndev = NULL; 1636adb4a57aSParav Pandit else 1637adb4a57aSParav Pandit dev_hold(ndev); 1638adb4a57aSParav Pandit rcu_read_unlock(); 1639adb4a57aSParav Pandit return ndev; 1640cee10433SParav Pandit } 1641cee10433SParav Pandit 1642e7ff98aeSParav Pandit static struct net_device *cma_get_net_dev(const struct ib_cm_event *ib_event, 16432918c1a9SParav Pandit struct cma_req_info *req) 16444c21b5bcSHaggai Eran { 16452918c1a9SParav Pandit struct sockaddr *listen_addr = 16462918c1a9SParav Pandit (struct sockaddr *)&req->listen_addr_storage; 16472918c1a9SParav Pandit struct sockaddr *src_addr = (struct sockaddr *)&req->src_addr_storage; 16484c21b5bcSHaggai Eran struct net_device *net_dev; 16494c21b5bcSHaggai Eran const union ib_gid *gid = req->has_gid ? &req->local_gid : NULL; 16504c21b5bcSHaggai Eran int err; 16514c21b5bcSHaggai Eran 1652f887f2acSHaggai Eran err = cma_save_ip_info(listen_addr, src_addr, ib_event, 1653f887f2acSHaggai Eran req->service_id); 16544c21b5bcSHaggai Eran if (err) 16554c21b5bcSHaggai Eran return ERR_PTR(err); 16564c21b5bcSHaggai Eran 1657cee10433SParav Pandit if (rdma_protocol_roce(req->device, req->port)) 1658cee10433SParav Pandit net_dev = roce_get_net_dev_by_cm_event(ib_event); 1659cee10433SParav Pandit else 1660cee10433SParav Pandit net_dev = ib_get_net_dev_by_params(req->device, req->port, 1661cee10433SParav Pandit req->pkey, 16624c21b5bcSHaggai Eran gid, listen_addr); 16634c21b5bcSHaggai Eran if (!net_dev) 16644c21b5bcSHaggai Eran return ERR_PTR(-ENODEV); 16654c21b5bcSHaggai Eran 16664c21b5bcSHaggai Eran return net_dev; 16674c21b5bcSHaggai Eran } 16684c21b5bcSHaggai Eran 16692253fc0cSSteve Wise static enum rdma_ucm_port_space rdma_ps_from_service_id(__be64 service_id) 16704c21b5bcSHaggai Eran { 16714c21b5bcSHaggai Eran return (be64_to_cpu(service_id) >> 16) & 0xffff; 16724c21b5bcSHaggai Eran } 16734c21b5bcSHaggai Eran 16744c21b5bcSHaggai Eran static bool cma_match_private_data(struct rdma_id_private *id_priv, 16754c21b5bcSHaggai Eran const struct cma_hdr *hdr) 16764c21b5bcSHaggai Eran { 16774c21b5bcSHaggai Eran struct sockaddr *addr = cma_src_addr(id_priv); 16784c21b5bcSHaggai Eran __be32 ip4_addr; 16794c21b5bcSHaggai Eran struct in6_addr ip6_addr; 16804c21b5bcSHaggai Eran 16814c21b5bcSHaggai Eran if (cma_any_addr(addr) && !id_priv->afonly) 16824c21b5bcSHaggai Eran return true; 16834c21b5bcSHaggai Eran 16844c21b5bcSHaggai Eran switch (addr->sa_family) { 16854c21b5bcSHaggai Eran case AF_INET: 16864c21b5bcSHaggai Eran ip4_addr = ((struct sockaddr_in *)addr)->sin_addr.s_addr; 16874c21b5bcSHaggai Eran if (cma_get_ip_ver(hdr) != 4) 16884c21b5bcSHaggai Eran return false; 16894c21b5bcSHaggai Eran if (!cma_any_addr(addr) && 16904c21b5bcSHaggai Eran hdr->dst_addr.ip4.addr != ip4_addr) 16914c21b5bcSHaggai Eran return false; 16924c21b5bcSHaggai Eran break; 16934c21b5bcSHaggai Eran case AF_INET6: 16944c21b5bcSHaggai Eran ip6_addr = ((struct sockaddr_in6 *)addr)->sin6_addr; 16954c21b5bcSHaggai Eran if (cma_get_ip_ver(hdr) != 6) 16964c21b5bcSHaggai Eran return false; 16974c21b5bcSHaggai Eran if (!cma_any_addr(addr) && 16984c21b5bcSHaggai Eran memcmp(&hdr->dst_addr.ip6, &ip6_addr, sizeof(ip6_addr))) 16994c21b5bcSHaggai Eran return false; 17004c21b5bcSHaggai Eran break; 17014c21b5bcSHaggai Eran case AF_IB: 17024c21b5bcSHaggai Eran return true; 17034c21b5bcSHaggai Eran default: 17044c21b5bcSHaggai Eran return false; 17054c21b5bcSHaggai Eran } 17064c21b5bcSHaggai Eran 17074c21b5bcSHaggai Eran return true; 17084c21b5bcSHaggai Eran } 17094c21b5bcSHaggai Eran 1710b8cab5daSHaggai Eran static bool cma_protocol_roce(const struct rdma_cm_id *id) 1711b8cab5daSHaggai Eran { 1712b8cab5daSHaggai Eran struct ib_device *device = id->device; 17131fb7f897SMark Bloch const u32 port_num = id->port_num ?: rdma_start_port(device); 1714b8cab5daSHaggai Eran 17155ac08a34SParav Pandit return rdma_protocol_roce(device, port_num); 1716b8cab5daSHaggai Eran } 1717b8cab5daSHaggai Eran 171878fb282bSParav Pandit static bool cma_is_req_ipv6_ll(const struct cma_req_info *req) 171978fb282bSParav Pandit { 172078fb282bSParav Pandit const struct sockaddr *daddr = 172178fb282bSParav Pandit (const struct sockaddr *)&req->listen_addr_storage; 172278fb282bSParav Pandit const struct sockaddr_in6 *daddr6 = (const struct sockaddr_in6 *)daddr; 172378fb282bSParav Pandit 172478fb282bSParav Pandit /* Returns true if the req is for IPv6 link local */ 172578fb282bSParav Pandit return (daddr->sa_family == AF_INET6 && 172678fb282bSParav Pandit (ipv6_addr_type(&daddr6->sin6_addr) & IPV6_ADDR_LINKLOCAL)); 172778fb282bSParav Pandit } 172878fb282bSParav Pandit 1729fac51590SMatan Barak static bool cma_match_net_dev(const struct rdma_cm_id *id, 1730fac51590SMatan Barak const struct net_device *net_dev, 173178fb282bSParav Pandit const struct cma_req_info *req) 17324c21b5bcSHaggai Eran { 1733fac51590SMatan Barak const struct rdma_addr *addr = &id->route.addr; 17344c21b5bcSHaggai Eran 17354c21b5bcSHaggai Eran if (!net_dev) 1736d274e45cSParav Pandit /* This request is an AF_IB request */ 173778fb282bSParav Pandit return (!id->port_num || id->port_num == req->port) && 1738d274e45cSParav Pandit (addr->src_addr.ss_family == AF_IB); 17394c21b5bcSHaggai Eran 1740643d213aSParav Pandit /* 174178fb282bSParav Pandit * If the request is not for IPv6 link local, allow matching 174278fb282bSParav Pandit * request to any netdevice of the one or multiport rdma device. 174378fb282bSParav Pandit */ 174478fb282bSParav Pandit if (!cma_is_req_ipv6_ll(req)) 174578fb282bSParav Pandit return true; 174678fb282bSParav Pandit /* 1747643d213aSParav Pandit * Net namespaces must match, and if the listner is listening 1748643d213aSParav Pandit * on a specific netdevice than netdevice must match as well. 1749643d213aSParav Pandit */ 1750643d213aSParav Pandit if (net_eq(dev_net(net_dev), addr->dev_addr.net) && 1751643d213aSParav Pandit (!!addr->dev_addr.bound_dev_if == 1752643d213aSParav Pandit (addr->dev_addr.bound_dev_if == net_dev->ifindex))) 1753643d213aSParav Pandit return true; 1754643d213aSParav Pandit else 1755643d213aSParav Pandit return false; 17564c21b5bcSHaggai Eran } 17574c21b5bcSHaggai Eran 17584c21b5bcSHaggai Eran static struct rdma_id_private *cma_find_listener( 17594c21b5bcSHaggai Eran const struct rdma_bind_list *bind_list, 17604c21b5bcSHaggai Eran const struct ib_cm_id *cm_id, 17614c21b5bcSHaggai Eran const struct ib_cm_event *ib_event, 17624c21b5bcSHaggai Eran const struct cma_req_info *req, 17634c21b5bcSHaggai Eran const struct net_device *net_dev) 17644c21b5bcSHaggai Eran { 17654c21b5bcSHaggai Eran struct rdma_id_private *id_priv, *id_priv_dev; 17664c21b5bcSHaggai Eran 1767730c8912SMark Zhang lockdep_assert_held(&lock); 1768730c8912SMark Zhang 17694c21b5bcSHaggai Eran if (!bind_list) 17704c21b5bcSHaggai Eran return ERR_PTR(-EINVAL); 17714c21b5bcSHaggai Eran 17724c21b5bcSHaggai Eran hlist_for_each_entry(id_priv, &bind_list->owners, node) { 17734c21b5bcSHaggai Eran if (cma_match_private_data(id_priv, ib_event->private_data)) { 17744c21b5bcSHaggai Eran if (id_priv->id.device == cm_id->device && 177578fb282bSParav Pandit cma_match_net_dev(&id_priv->id, net_dev, req)) 17764c21b5bcSHaggai Eran return id_priv; 17774c21b5bcSHaggai Eran list_for_each_entry(id_priv_dev, 17784c21b5bcSHaggai Eran &id_priv->listen_list, 177999cfddb8SJason Gunthorpe listen_item) { 17804c21b5bcSHaggai Eran if (id_priv_dev->id.device == cm_id->device && 178178fb282bSParav Pandit cma_match_net_dev(&id_priv_dev->id, 178278fb282bSParav Pandit net_dev, req)) 17834c21b5bcSHaggai Eran return id_priv_dev; 17844c21b5bcSHaggai Eran } 17854c21b5bcSHaggai Eran } 17864c21b5bcSHaggai Eran } 17874c21b5bcSHaggai Eran 17884c21b5bcSHaggai Eran return ERR_PTR(-EINVAL); 17894c21b5bcSHaggai Eran } 17904c21b5bcSHaggai Eran 1791e7ff98aeSParav Pandit static struct rdma_id_private * 179285463316SParav Pandit cma_ib_id_from_event(struct ib_cm_id *cm_id, 1793e7ff98aeSParav Pandit const struct ib_cm_event *ib_event, 179441ab1cb7SParav Pandit struct cma_req_info *req, 17950b3ca768SHaggai Eran struct net_device **net_dev) 17964c21b5bcSHaggai Eran { 17974c21b5bcSHaggai Eran struct rdma_bind_list *bind_list; 17984c21b5bcSHaggai Eran struct rdma_id_private *id_priv; 17994c21b5bcSHaggai Eran int err; 18004c21b5bcSHaggai Eran 180141ab1cb7SParav Pandit err = cma_save_req_info(ib_event, req); 18024c21b5bcSHaggai Eran if (err) 18034c21b5bcSHaggai Eran return ERR_PTR(err); 18044c21b5bcSHaggai Eran 180541ab1cb7SParav Pandit *net_dev = cma_get_net_dev(ib_event, req); 18060b3ca768SHaggai Eran if (IS_ERR(*net_dev)) { 18070b3ca768SHaggai Eran if (PTR_ERR(*net_dev) == -EAFNOSUPPORT) { 18084c21b5bcSHaggai Eran /* Assuming the protocol is AF_IB */ 18090b3ca768SHaggai Eran *net_dev = NULL; 18104c21b5bcSHaggai Eran } else { 18110b3ca768SHaggai Eran return ERR_CAST(*net_dev); 18124c21b5bcSHaggai Eran } 18134c21b5bcSHaggai Eran } 18144c21b5bcSHaggai Eran 1815730c8912SMark Zhang mutex_lock(&lock); 18162918c1a9SParav Pandit /* 18172918c1a9SParav Pandit * Net namespace might be getting deleted while route lookup, 18182918c1a9SParav Pandit * cm_id lookup is in progress. Therefore, perform netdevice 18192918c1a9SParav Pandit * validation, cm_id lookup under rcu lock. 18202918c1a9SParav Pandit * RCU lock along with netdevice state check, synchronizes with 18212918c1a9SParav Pandit * netdevice migrating to different net namespace and also avoids 18222918c1a9SParav Pandit * case where net namespace doesn't get deleted while lookup is in 18232918c1a9SParav Pandit * progress. 18242918c1a9SParav Pandit * If the device state is not IFF_UP, its properties such as ifindex 18252918c1a9SParav Pandit * and nd_net cannot be trusted to remain valid without rcu lock. 18262918c1a9SParav Pandit * net/core/dev.c change_net_namespace() ensures to synchronize with 18272918c1a9SParav Pandit * ongoing operations on net device after device is closed using 18282918c1a9SParav Pandit * synchronize_net(). 18292918c1a9SParav Pandit */ 18302918c1a9SParav Pandit rcu_read_lock(); 18312918c1a9SParav Pandit if (*net_dev) { 18322918c1a9SParav Pandit /* 18332918c1a9SParav Pandit * If netdevice is down, it is likely that it is administratively 18342918c1a9SParav Pandit * down or it might be migrating to different namespace. 18352918c1a9SParav Pandit * In that case avoid further processing, as the net namespace 18362918c1a9SParav Pandit * or ifindex may change. 18372918c1a9SParav Pandit */ 18382918c1a9SParav Pandit if (((*net_dev)->flags & IFF_UP) == 0) { 18392918c1a9SParav Pandit id_priv = ERR_PTR(-EHOSTUNREACH); 18402918c1a9SParav Pandit goto err; 18412918c1a9SParav Pandit } 18422918c1a9SParav Pandit 18432918c1a9SParav Pandit if (!validate_net_dev(*net_dev, 184427cfde79SMichael Guralnik (struct sockaddr *)&req->src_addr_storage, 184527cfde79SMichael Guralnik (struct sockaddr *)&req->listen_addr_storage)) { 18462918c1a9SParav Pandit id_priv = ERR_PTR(-EHOSTUNREACH); 18472918c1a9SParav Pandit goto err; 18482918c1a9SParav Pandit } 18492918c1a9SParav Pandit } 18502918c1a9SParav Pandit 1851fa20105eSGuy Shapiro bind_list = cma_ps_find(*net_dev ? dev_net(*net_dev) : &init_net, 185241ab1cb7SParav Pandit rdma_ps_from_service_id(req->service_id), 185341ab1cb7SParav Pandit cma_port_from_service_id(req->service_id)); 185441ab1cb7SParav Pandit id_priv = cma_find_listener(bind_list, cm_id, ib_event, req, *net_dev); 18552918c1a9SParav Pandit err: 18562918c1a9SParav Pandit rcu_read_unlock(); 1857730c8912SMark Zhang mutex_unlock(&lock); 1858b3b51f9fSHaggai Eran if (IS_ERR(id_priv) && *net_dev) { 1859be688195SHaggai Eran dev_put(*net_dev); 1860be688195SHaggai Eran *net_dev = NULL; 1861be688195SHaggai Eran } 18624c21b5bcSHaggai Eran return id_priv; 18634c21b5bcSHaggai Eran } 18644c21b5bcSHaggai Eran 1865c0b64f58SBart Van Assche static inline u8 cma_user_data_offset(struct rdma_id_private *id_priv) 1866e51060f0SSean Hefty { 1867e8160e15SSean Hefty return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr); 1868e51060f0SSean Hefty } 1869e51060f0SSean Hefty 1870e51060f0SSean Hefty static void cma_cancel_route(struct rdma_id_private *id_priv) 1871e51060f0SSean Hefty { 1872fe53ba2fSMichael Wang if (rdma_cap_ib_sa(id_priv->id.device, id_priv->id.port_num)) { 1873e51060f0SSean Hefty if (id_priv->query) 1874e51060f0SSean Hefty ib_sa_cancel_query(id_priv->query_id, id_priv->query); 1875e51060f0SSean Hefty } 1876e51060f0SSean Hefty } 1877e51060f0SSean Hefty 1878ca465e1fSTao Liu static void _cma_cancel_listens(struct rdma_id_private *id_priv) 1879e51060f0SSean Hefty { 1880e51060f0SSean Hefty struct rdma_id_private *dev_id_priv; 1881e51060f0SSean Hefty 1882ca465e1fSTao Liu lockdep_assert_held(&lock); 1883ca465e1fSTao Liu 1884d02d1f53SSean Hefty /* 1885d02d1f53SSean Hefty * Remove from listen_any_list to prevent added devices from spawning 1886d02d1f53SSean Hefty * additional listen requests. 1887d02d1f53SSean Hefty */ 188899cfddb8SJason Gunthorpe list_del_init(&id_priv->listen_any_item); 1889e51060f0SSean Hefty 1890e51060f0SSean Hefty while (!list_empty(&id_priv->listen_list)) { 189199cfddb8SJason Gunthorpe dev_id_priv = 189299cfddb8SJason Gunthorpe list_first_entry(&id_priv->listen_list, 189399cfddb8SJason Gunthorpe struct rdma_id_private, listen_item); 1894d02d1f53SSean Hefty /* sync with device removal to avoid duplicate destruction */ 189599cfddb8SJason Gunthorpe list_del_init(&dev_id_priv->device_item); 189699cfddb8SJason Gunthorpe list_del_init(&dev_id_priv->listen_item); 1897d02d1f53SSean Hefty mutex_unlock(&lock); 1898d02d1f53SSean Hefty 1899d02d1f53SSean Hefty rdma_destroy_id(&dev_id_priv->id); 1900d02d1f53SSean Hefty mutex_lock(&lock); 1901e51060f0SSean Hefty } 1902ca465e1fSTao Liu } 1903ca465e1fSTao Liu 1904ca465e1fSTao Liu static void cma_cancel_listens(struct rdma_id_private *id_priv) 1905ca465e1fSTao Liu { 1906ca465e1fSTao Liu mutex_lock(&lock); 1907ca465e1fSTao Liu _cma_cancel_listens(id_priv); 1908e51060f0SSean Hefty mutex_unlock(&lock); 1909e51060f0SSean Hefty } 1910e51060f0SSean Hefty 1911e51060f0SSean Hefty static void cma_cancel_operation(struct rdma_id_private *id_priv, 1912550e5ca7SNir Muchtar enum rdma_cm_state state) 1913e51060f0SSean Hefty { 1914e51060f0SSean Hefty switch (state) { 1915550e5ca7SNir Muchtar case RDMA_CM_ADDR_QUERY: 1916305d568bSJason Gunthorpe /* 1917305d568bSJason Gunthorpe * We can avoid doing the rdma_addr_cancel() based on state, 1918305d568bSJason Gunthorpe * only RDMA_CM_ADDR_QUERY has a work that could still execute. 1919305d568bSJason Gunthorpe * Notice that the addr_handler work could still be exiting 1920305d568bSJason Gunthorpe * outside this state, however due to the interaction with the 1921305d568bSJason Gunthorpe * handler_mutex the work is guaranteed not to touch id_priv 1922305d568bSJason Gunthorpe * during exit. 1923305d568bSJason Gunthorpe */ 1924e51060f0SSean Hefty rdma_addr_cancel(&id_priv->id.route.addr.dev_addr); 1925e51060f0SSean Hefty break; 1926550e5ca7SNir Muchtar case RDMA_CM_ROUTE_QUERY: 1927e51060f0SSean Hefty cma_cancel_route(id_priv); 1928e51060f0SSean Hefty break; 1929550e5ca7SNir Muchtar case RDMA_CM_LISTEN: 1930f4753834SSean Hefty if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev) 1931e51060f0SSean Hefty cma_cancel_listens(id_priv); 1932e51060f0SSean Hefty break; 1933e51060f0SSean Hefty default: 1934e51060f0SSean Hefty break; 1935e51060f0SSean Hefty } 1936e51060f0SSean Hefty } 1937e51060f0SSean Hefty 1938e51060f0SSean Hefty static void cma_release_port(struct rdma_id_private *id_priv) 1939e51060f0SSean Hefty { 1940e51060f0SSean Hefty struct rdma_bind_list *bind_list = id_priv->bind_list; 1941fa20105eSGuy Shapiro struct net *net = id_priv->id.route.addr.dev_addr.net; 1942e51060f0SSean Hefty 1943e51060f0SSean Hefty if (!bind_list) 1944e51060f0SSean Hefty return; 1945e51060f0SSean Hefty 1946e51060f0SSean Hefty mutex_lock(&lock); 1947e51060f0SSean Hefty hlist_del(&id_priv->node); 1948e51060f0SSean Hefty if (hlist_empty(&bind_list->owners)) { 1949fa20105eSGuy Shapiro cma_ps_remove(net, bind_list->ps, bind_list->port); 1950e51060f0SSean Hefty kfree(bind_list); 1951e51060f0SSean Hefty } 1952e51060f0SSean Hefty mutex_unlock(&lock); 1953e51060f0SSean Hefty } 1954e51060f0SSean Hefty 19553788d299SJason Gunthorpe static void destroy_mc(struct rdma_id_private *id_priv, 195688145678SParav Pandit struct cma_multicast *mc) 195788145678SParav Pandit { 19582cc74e1eSChristoph Lameter bool send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN); 19592cc74e1eSChristoph Lameter 1960b5de0c60SJason Gunthorpe if (rdma_cap_ib_mcast(id_priv->id.device, id_priv->id.port_num)) 1961b5de0c60SJason Gunthorpe ib_sa_free_multicast(mc->sa_mc); 19623788d299SJason Gunthorpe 1963b5de0c60SJason Gunthorpe if (rdma_protocol_roce(id_priv->id.device, id_priv->id.port_num)) { 19643788d299SJason Gunthorpe struct rdma_dev_addr *dev_addr = 19653788d299SJason Gunthorpe &id_priv->id.route.addr.dev_addr; 196688145678SParav Pandit struct net_device *ndev = NULL; 196788145678SParav Pandit 196888145678SParav Pandit if (dev_addr->bound_dev_if) 19693788d299SJason Gunthorpe ndev = dev_get_by_index(dev_addr->net, 19703788d299SJason Gunthorpe dev_addr->bound_dev_if); 1971d9e410ebSMaor Gottlieb if (ndev && !send_only) { 1972d9e410ebSMaor Gottlieb enum ib_gid_type gid_type; 1973b5de0c60SJason Gunthorpe union ib_gid mgid; 1974b5de0c60SJason Gunthorpe 1975d9e410ebSMaor Gottlieb gid_type = id_priv->cma_dev->default_gid_type 1976d9e410ebSMaor Gottlieb [id_priv->id.port_num - 1977d9e410ebSMaor Gottlieb rdma_start_port( 1978d9e410ebSMaor Gottlieb id_priv->cma_dev->device)]; 1979d9e410ebSMaor Gottlieb cma_iboe_set_mgid((struct sockaddr *)&mc->addr, &mgid, 1980d9e410ebSMaor Gottlieb gid_type); 1981b5de0c60SJason Gunthorpe cma_igmp_send(ndev, &mgid, false); 198288145678SParav Pandit } 1983d9e410ebSMaor Gottlieb dev_put(ndev); 1984fe454dc3SAvihai Horon 1985fe454dc3SAvihai Horon cancel_work_sync(&mc->iboe_join.work); 198688145678SParav Pandit } 1987b5de0c60SJason Gunthorpe kfree(mc); 198888145678SParav Pandit } 198988145678SParav Pandit 1990c8f6a362SSean Hefty static void cma_leave_mc_groups(struct rdma_id_private *id_priv) 1991c8f6a362SSean Hefty { 1992c8f6a362SSean Hefty struct cma_multicast *mc; 1993c8f6a362SSean Hefty 1994c8f6a362SSean Hefty while (!list_empty(&id_priv->mc_list)) { 19953788d299SJason Gunthorpe mc = list_first_entry(&id_priv->mc_list, struct cma_multicast, 19963788d299SJason Gunthorpe list); 1997c8f6a362SSean Hefty list_del(&mc->list); 19983788d299SJason Gunthorpe destroy_mc(id_priv, mc); 1999c8f6a362SSean Hefty } 2000bee3c3c9SMoni Shoua } 2001c8f6a362SSean Hefty 2002f6a9d47aSJason Gunthorpe static void _destroy_id(struct rdma_id_private *id_priv, 2003f6a9d47aSJason Gunthorpe enum rdma_cm_state state) 2004e51060f0SSean Hefty { 2005e51060f0SSean Hefty cma_cancel_operation(id_priv, state); 2006e51060f0SSean Hefty 20073d828754SLeon Romanovsky rdma_restrack_del(&id_priv->res); 2008fc008bdbSPatrisious Haddad cma_remove_id_from_tree(id_priv); 2009ed7a01fdSLeon Romanovsky if (id_priv->cma_dev) { 201072219ceaSMichael Wang if (rdma_cap_ib_cm(id_priv->id.device, 1)) { 20110c9361fcSJack Morgenstein if (id_priv->cm_id.ib) 2012e51060f0SSean Hefty ib_destroy_cm_id(id_priv->cm_id.ib); 201304215330SMichael Wang } else if (rdma_cap_iw_cm(id_priv->id.device, 1)) { 20140c9361fcSJack Morgenstein if (id_priv->cm_id.iw) 201507ebafbaSTom Tucker iw_destroy_cm_id(id_priv->cm_id.iw); 2016e51060f0SSean Hefty } 2017c8f6a362SSean Hefty cma_leave_mc_groups(id_priv); 2018a396d43aSSean Hefty cma_release_dev(id_priv); 2019e51060f0SSean Hefty } 2020e51060f0SSean Hefty 2021e51060f0SSean Hefty cma_release_port(id_priv); 2022e368d23fSParav Pandit cma_id_put(id_priv); 2023e51060f0SSean Hefty wait_for_completion(&id_priv->comp); 2024e51060f0SSean Hefty 2025d02d1f53SSean Hefty if (id_priv->internal_id) 2026e368d23fSParav Pandit cma_id_put(id_priv->id.context); 2027d02d1f53SSean Hefty 2028e51060f0SSean Hefty kfree(id_priv->id.route.path_rec); 20295a374949SMark Zhang kfree(id_priv->id.route.path_rec_inbound); 20305a374949SMark Zhang kfree(id_priv->id.route.path_rec_outbound); 20314ed13a5fSParav Pandit 2032fa20105eSGuy Shapiro put_net(id_priv->id.route.addr.dev_addr.net); 2033e51060f0SSean Hefty kfree(id_priv); 2034e51060f0SSean Hefty } 2035f6a9d47aSJason Gunthorpe 2036f6a9d47aSJason Gunthorpe /* 2037f6a9d47aSJason Gunthorpe * destroy an ID from within the handler_mutex. This ensures that no other 2038f6a9d47aSJason Gunthorpe * handlers can start running concurrently. 2039f6a9d47aSJason Gunthorpe */ 2040f6a9d47aSJason Gunthorpe static void destroy_id_handler_unlock(struct rdma_id_private *id_priv) 2041f6a9d47aSJason Gunthorpe __releases(&idprv->handler_mutex) 2042f6a9d47aSJason Gunthorpe { 2043f6a9d47aSJason Gunthorpe enum rdma_cm_state state; 2044f6a9d47aSJason Gunthorpe unsigned long flags; 2045f6a9d47aSJason Gunthorpe 2046f6a9d47aSJason Gunthorpe trace_cm_id_destroy(id_priv); 2047f6a9d47aSJason Gunthorpe 2048f6a9d47aSJason Gunthorpe /* 2049f6a9d47aSJason Gunthorpe * Setting the state to destroyed under the handler mutex provides a 2050f6a9d47aSJason Gunthorpe * fence against calling handler callbacks. If this is invoked due to 2051f6a9d47aSJason Gunthorpe * the failure of a handler callback then it guarentees that no future 2052f6a9d47aSJason Gunthorpe * handlers will be called. 2053f6a9d47aSJason Gunthorpe */ 2054f6a9d47aSJason Gunthorpe lockdep_assert_held(&id_priv->handler_mutex); 2055f6a9d47aSJason Gunthorpe spin_lock_irqsave(&id_priv->lock, flags); 2056f6a9d47aSJason Gunthorpe state = id_priv->state; 2057f6a9d47aSJason Gunthorpe id_priv->state = RDMA_CM_DESTROYING; 2058f6a9d47aSJason Gunthorpe spin_unlock_irqrestore(&id_priv->lock, flags); 2059f6a9d47aSJason Gunthorpe mutex_unlock(&id_priv->handler_mutex); 2060f6a9d47aSJason Gunthorpe _destroy_id(id_priv, state); 2061f6a9d47aSJason Gunthorpe } 2062f6a9d47aSJason Gunthorpe 2063f6a9d47aSJason Gunthorpe void rdma_destroy_id(struct rdma_cm_id *id) 2064f6a9d47aSJason Gunthorpe { 2065f6a9d47aSJason Gunthorpe struct rdma_id_private *id_priv = 2066f6a9d47aSJason Gunthorpe container_of(id, struct rdma_id_private, id); 2067f6a9d47aSJason Gunthorpe 2068f6a9d47aSJason Gunthorpe mutex_lock(&id_priv->handler_mutex); 2069f6a9d47aSJason Gunthorpe destroy_id_handler_unlock(id_priv); 2070f6a9d47aSJason Gunthorpe } 2071e51060f0SSean Hefty EXPORT_SYMBOL(rdma_destroy_id); 2072e51060f0SSean Hefty 2073e51060f0SSean Hefty static int cma_rep_recv(struct rdma_id_private *id_priv) 2074e51060f0SSean Hefty { 2075e51060f0SSean Hefty int ret; 2076e51060f0SSean Hefty 20775851bb89SSean Hefty ret = cma_modify_qp_rtr(id_priv, NULL); 2078e51060f0SSean Hefty if (ret) 2079e51060f0SSean Hefty goto reject; 2080e51060f0SSean Hefty 20815851bb89SSean Hefty ret = cma_modify_qp_rts(id_priv, NULL); 2082e51060f0SSean Hefty if (ret) 2083e51060f0SSean Hefty goto reject; 2084e51060f0SSean Hefty 2085ed999f82SChuck Lever trace_cm_send_rtu(id_priv); 2086e51060f0SSean Hefty ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0); 2087e51060f0SSean Hefty if (ret) 2088e51060f0SSean Hefty goto reject; 2089e51060f0SSean Hefty 2090e51060f0SSean Hefty return 0; 2091e51060f0SSean Hefty reject: 2092498683c6SMoni Shoua pr_debug_ratelimited("RDMA CM: CONNECT_ERROR: failed to handle reply. status %d\n", ret); 2093c5483388SSean Hefty cma_modify_qp_err(id_priv); 2094ed999f82SChuck Lever trace_cm_send_rej(id_priv); 2095e51060f0SSean Hefty ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED, 2096e51060f0SSean Hefty NULL, 0, NULL, 0); 2097e51060f0SSean Hefty return ret; 2098e51060f0SSean Hefty } 2099e51060f0SSean Hefty 2100a1b1b61fSSean Hefty static void cma_set_rep_event_data(struct rdma_cm_event *event, 2101e7ff98aeSParav Pandit const struct ib_cm_rep_event_param *rep_data, 2102a1b1b61fSSean Hefty void *private_data) 2103a1b1b61fSSean Hefty { 2104a1b1b61fSSean Hefty event->param.conn.private_data = private_data; 2105a1b1b61fSSean Hefty event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE; 2106a1b1b61fSSean Hefty event->param.conn.responder_resources = rep_data->responder_resources; 2107a1b1b61fSSean Hefty event->param.conn.initiator_depth = rep_data->initiator_depth; 2108a1b1b61fSSean Hefty event->param.conn.flow_control = rep_data->flow_control; 2109a1b1b61fSSean Hefty event->param.conn.rnr_retry_count = rep_data->rnr_retry_count; 2110a1b1b61fSSean Hefty event->param.conn.srq = rep_data->srq; 2111a1b1b61fSSean Hefty event->param.conn.qp_num = rep_data->remote_qpn; 2112a20652e1SLeon Romanovsky 2113a20652e1SLeon Romanovsky event->ece.vendor_id = rep_data->ece.vendor_id; 2114a20652e1SLeon Romanovsky event->ece.attr_mod = rep_data->ece.attr_mod; 2115a1b1b61fSSean Hefty } 2116a1b1b61fSSean Hefty 2117ed999f82SChuck Lever static int cma_cm_event_handler(struct rdma_id_private *id_priv, 2118ed999f82SChuck Lever struct rdma_cm_event *event) 2119ed999f82SChuck Lever { 2120ed999f82SChuck Lever int ret; 2121ed999f82SChuck Lever 21223647a28dSJason Gunthorpe lockdep_assert_held(&id_priv->handler_mutex); 21233647a28dSJason Gunthorpe 2124ed999f82SChuck Lever trace_cm_event_handler(id_priv, event); 2125ed999f82SChuck Lever ret = id_priv->id.event_handler(&id_priv->id, event); 2126ed999f82SChuck Lever trace_cm_event_done(id_priv, event, ret); 2127ed999f82SChuck Lever return ret; 2128ed999f82SChuck Lever } 2129ed999f82SChuck Lever 2130e7ff98aeSParav Pandit static int cma_ib_handler(struct ib_cm_id *cm_id, 2131e7ff98aeSParav Pandit const struct ib_cm_event *ib_event) 2132e51060f0SSean Hefty { 2133e51060f0SSean Hefty struct rdma_id_private *id_priv = cm_id->context; 21347582df82SParav Pandit struct rdma_cm_event event = {}; 21352a7cec53SJason Gunthorpe enum rdma_cm_state state; 2136f6a9d47aSJason Gunthorpe int ret; 2137e51060f0SSean Hefty 213837e07cdaSBart Van Assche mutex_lock(&id_priv->handler_mutex); 21392a7cec53SJason Gunthorpe state = READ_ONCE(id_priv->state); 214038ca83a5SAmir Vadai if ((ib_event->event != IB_CM_TIMEWAIT_EXIT && 21412a7cec53SJason Gunthorpe state != RDMA_CM_CONNECT) || 214238ca83a5SAmir Vadai (ib_event->event == IB_CM_TIMEWAIT_EXIT && 21432a7cec53SJason Gunthorpe state != RDMA_CM_DISCONNECT)) 214437e07cdaSBart Van Assche goto out; 2145e51060f0SSean Hefty 2146e51060f0SSean Hefty switch (ib_event->event) { 2147e51060f0SSean Hefty case IB_CM_REQ_ERROR: 2148e51060f0SSean Hefty case IB_CM_REP_ERROR: 2149a1b1b61fSSean Hefty event.event = RDMA_CM_EVENT_UNREACHABLE; 2150a1b1b61fSSean Hefty event.status = -ETIMEDOUT; 2151e51060f0SSean Hefty break; 2152e51060f0SSean Hefty case IB_CM_REP_RECEIVED: 21532a7cec53SJason Gunthorpe if (state == RDMA_CM_CONNECT && 2154ed999f82SChuck Lever (id_priv->id.qp_type != IB_QPT_UD)) { 2155ed999f82SChuck Lever trace_cm_send_mra(id_priv); 215661c0ddbeSMoni Shoua ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); 2157ed999f82SChuck Lever } 215801602f11SSean Hefty if (id_priv->id.qp) { 2159a1b1b61fSSean Hefty event.status = cma_rep_recv(id_priv); 2160a1b1b61fSSean Hefty event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR : 2161e51060f0SSean Hefty RDMA_CM_EVENT_ESTABLISHED; 216201602f11SSean Hefty } else { 2163a1b1b61fSSean Hefty event.event = RDMA_CM_EVENT_CONNECT_RESPONSE; 216401602f11SSean Hefty } 2165a1b1b61fSSean Hefty cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd, 2166a1b1b61fSSean Hefty ib_event->private_data); 2167e51060f0SSean Hefty break; 2168e51060f0SSean Hefty case IB_CM_RTU_RECEIVED: 21690fe313b0SSean Hefty case IB_CM_USER_ESTABLISHED: 21700fe313b0SSean Hefty event.event = RDMA_CM_EVENT_ESTABLISHED; 2171e51060f0SSean Hefty break; 2172e51060f0SSean Hefty case IB_CM_DREQ_ERROR: 2173df561f66SGustavo A. R. Silva event.status = -ETIMEDOUT; 2174df561f66SGustavo A. R. Silva fallthrough; 2175e51060f0SSean Hefty case IB_CM_DREQ_RECEIVED: 2176e51060f0SSean Hefty case IB_CM_DREP_RECEIVED: 2177550e5ca7SNir Muchtar if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT, 2178550e5ca7SNir Muchtar RDMA_CM_DISCONNECT)) 2179e51060f0SSean Hefty goto out; 2180a1b1b61fSSean Hefty event.event = RDMA_CM_EVENT_DISCONNECTED; 2181e51060f0SSean Hefty break; 2182e51060f0SSean Hefty case IB_CM_TIMEWAIT_EXIT: 218338ca83a5SAmir Vadai event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT; 218438ca83a5SAmir Vadai break; 2185e51060f0SSean Hefty case IB_CM_MRA_RECEIVED: 2186e51060f0SSean Hefty /* ignore event */ 2187e51060f0SSean Hefty goto out; 2188e51060f0SSean Hefty case IB_CM_REJ_RECEIVED: 2189498683c6SMoni Shoua pr_debug_ratelimited("RDMA CM: REJECTED: %s\n", rdma_reject_msg(&id_priv->id, 2190498683c6SMoni Shoua ib_event->param.rej_rcvd.reason)); 2191c5483388SSean Hefty cma_modify_qp_err(id_priv); 2192a1b1b61fSSean Hefty event.status = ib_event->param.rej_rcvd.reason; 2193a1b1b61fSSean Hefty event.event = RDMA_CM_EVENT_REJECTED; 2194a1b1b61fSSean Hefty event.param.conn.private_data = ib_event->private_data; 2195a1b1b61fSSean Hefty event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE; 2196e51060f0SSean Hefty break; 2197e51060f0SSean Hefty default: 2198aba25a3eSParav Pandit pr_err("RDMA CMA: unexpected IB CM event: %d\n", 2199e51060f0SSean Hefty ib_event->event); 2200e51060f0SSean Hefty goto out; 2201e51060f0SSean Hefty } 2202e51060f0SSean Hefty 2203ed999f82SChuck Lever ret = cma_cm_event_handler(id_priv, &event); 2204e51060f0SSean Hefty if (ret) { 2205e51060f0SSean Hefty /* Destroy the CM ID by returning a non-zero value. */ 2206e51060f0SSean Hefty id_priv->cm_id.ib = NULL; 2207f6a9d47aSJason Gunthorpe destroy_id_handler_unlock(id_priv); 2208e51060f0SSean Hefty return ret; 2209e51060f0SSean Hefty } 2210e51060f0SSean Hefty out: 2211de910bd9SOr Gerlitz mutex_unlock(&id_priv->handler_mutex); 2212f6a9d47aSJason Gunthorpe return 0; 2213e51060f0SSean Hefty } 2214e51060f0SSean Hefty 2215e7ff98aeSParav Pandit static struct rdma_id_private * 221685463316SParav Pandit cma_ib_new_conn_id(const struct rdma_cm_id *listen_id, 2217e7ff98aeSParav Pandit const struct ib_cm_event *ib_event, 22180b3ca768SHaggai Eran struct net_device *net_dev) 2219e51060f0SSean Hefty { 222000313983SSteve Wise struct rdma_id_private *listen_id_priv; 2221e51060f0SSean Hefty struct rdma_id_private *id_priv; 2222e51060f0SSean Hefty struct rdma_cm_id *id; 2223e51060f0SSean Hefty struct rdma_route *rt; 22240c505f70SHaggai Eran const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family; 22259fdca4daSDasaratharaman Chandramouli struct sa_path_rec *path = ib_event->param.req_rcvd.primary_path; 2226d3957b86SMajd Dibbiny const __be64 service_id = 2227d3957b86SMajd Dibbiny ib_event->param.req_rcvd.primary_path->service_id; 222864c5e613SOr Gerlitz int ret; 2229e51060f0SSean Hefty 223000313983SSteve Wise listen_id_priv = container_of(listen_id, struct rdma_id_private, id); 2231b09c4d70SLeon Romanovsky id_priv = __rdma_create_id(listen_id->route.addr.dev_addr.net, 2232fa20105eSGuy Shapiro listen_id->event_handler, listen_id->context, 2233b09c4d70SLeon Romanovsky listen_id->ps, 2234b09c4d70SLeon Romanovsky ib_event->param.req_rcvd.qp_type, 2235b09c4d70SLeon Romanovsky listen_id_priv); 2236b09c4d70SLeon Romanovsky if (IS_ERR(id_priv)) 22370c9361fcSJack Morgenstein return NULL; 22383f168d2bSKrishna Kumar 2239b09c4d70SLeon Romanovsky id = &id_priv->id; 22400c505f70SHaggai Eran if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr, 22410c505f70SHaggai Eran (struct sockaddr *)&id->route.addr.dst_addr, 22420c505f70SHaggai Eran listen_id, ib_event, ss_family, service_id)) 2243fbaa1a6dSSean Hefty goto err; 22443f168d2bSKrishna Kumar 22453f168d2bSKrishna Kumar rt = &id->route; 2246bf9a9928SMark Zhang rt->num_pri_alt_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1; 2247bf9a9928SMark Zhang rt->path_rec = kmalloc_array(rt->num_pri_alt_paths, 2248bf9a9928SMark Zhang sizeof(*rt->path_rec), GFP_KERNEL); 22493f168d2bSKrishna Kumar if (!rt->path_rec) 22500c9361fcSJack Morgenstein goto err; 22513f168d2bSKrishna Kumar 22529fdca4daSDasaratharaman Chandramouli rt->path_rec[0] = *path; 2253bf9a9928SMark Zhang if (rt->num_pri_alt_paths == 2) 2254e51060f0SSean Hefty rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path; 2255e51060f0SSean Hefty 22560b3ca768SHaggai Eran if (net_dev) { 225777addc52SParav Pandit rdma_copy_src_l2_addr(&rt->addr.dev_addr, net_dev); 22580b3ca768SHaggai Eran } else { 2259b8cab5daSHaggai Eran if (!cma_protocol_roce(listen_id) && 2260b8cab5daSHaggai Eran cma_any_addr(cma_src_addr(id_priv))) { 2261b8cab5daSHaggai Eran rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND; 2262b8cab5daSHaggai Eran rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid); 2263b8cab5daSHaggai Eran ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey)); 2264b8cab5daSHaggai Eran } else if (!cma_any_addr(cma_src_addr(id_priv))) { 2265b8cab5daSHaggai Eran ret = cma_translate_addr(cma_src_addr(id_priv), &rt->addr.dev_addr); 2266b8cab5daSHaggai Eran if (ret) 2267b8cab5daSHaggai Eran goto err; 2268b8cab5daSHaggai Eran } 22696f8372b6SSean Hefty } 22706f8372b6SSean Hefty rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); 2271e51060f0SSean Hefty 2272550e5ca7SNir Muchtar id_priv->state = RDMA_CM_CONNECT; 2273e51060f0SSean Hefty return id_priv; 22743f168d2bSKrishna Kumar 22753f168d2bSKrishna Kumar err: 22760c9361fcSJack Morgenstein rdma_destroy_id(id); 2277e51060f0SSean Hefty return NULL; 2278e51060f0SSean Hefty } 2279e51060f0SSean Hefty 2280e7ff98aeSParav Pandit static struct rdma_id_private * 228185463316SParav Pandit cma_ib_new_udp_id(const struct rdma_cm_id *listen_id, 2282e7ff98aeSParav Pandit const struct ib_cm_event *ib_event, 22830b3ca768SHaggai Eran struct net_device *net_dev) 2284628e5f6dSSean Hefty { 2285e7ff98aeSParav Pandit const struct rdma_id_private *listen_id_priv; 2286628e5f6dSSean Hefty struct rdma_id_private *id_priv; 2287628e5f6dSSean Hefty struct rdma_cm_id *id; 22880c505f70SHaggai Eran const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family; 2289fa20105eSGuy Shapiro struct net *net = listen_id->route.addr.dev_addr.net; 2290628e5f6dSSean Hefty int ret; 2291628e5f6dSSean Hefty 229200313983SSteve Wise listen_id_priv = container_of(listen_id, struct rdma_id_private, id); 2293b09c4d70SLeon Romanovsky id_priv = __rdma_create_id(net, listen_id->event_handler, 2294b09c4d70SLeon Romanovsky listen_id->context, listen_id->ps, IB_QPT_UD, 2295b09c4d70SLeon Romanovsky listen_id_priv); 2296b09c4d70SLeon Romanovsky if (IS_ERR(id_priv)) 2297628e5f6dSSean Hefty return NULL; 2298628e5f6dSSean Hefty 2299b09c4d70SLeon Romanovsky id = &id_priv->id; 23000c505f70SHaggai Eran if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr, 23010c505f70SHaggai Eran (struct sockaddr *)&id->route.addr.dst_addr, 23020c505f70SHaggai Eran listen_id, ib_event, ss_family, 23030c505f70SHaggai Eran ib_event->param.sidr_req_rcvd.service_id)) 2304628e5f6dSSean Hefty goto err; 2305628e5f6dSSean Hefty 23060b3ca768SHaggai Eran if (net_dev) { 230777addc52SParav Pandit rdma_copy_src_l2_addr(&id->route.addr.dev_addr, net_dev); 23080b3ca768SHaggai Eran } else { 2309b8cab5daSHaggai Eran if (!cma_any_addr(cma_src_addr(id_priv))) { 2310b8cab5daSHaggai Eran ret = cma_translate_addr(cma_src_addr(id_priv), 23110b3ca768SHaggai Eran &id->route.addr.dev_addr); 2312b8cab5daSHaggai Eran if (ret) 2313b8cab5daSHaggai Eran goto err; 2314b8cab5daSHaggai Eran } 23156f8372b6SSean Hefty } 2316628e5f6dSSean Hefty 2317550e5ca7SNir Muchtar id_priv->state = RDMA_CM_CONNECT; 2318628e5f6dSSean Hefty return id_priv; 2319628e5f6dSSean Hefty err: 2320628e5f6dSSean Hefty rdma_destroy_id(id); 2321628e5f6dSSean Hefty return NULL; 2322628e5f6dSSean Hefty } 2323628e5f6dSSean Hefty 2324a1b1b61fSSean Hefty static void cma_set_req_event_data(struct rdma_cm_event *event, 2325e7ff98aeSParav Pandit const struct ib_cm_req_event_param *req_data, 2326a1b1b61fSSean Hefty void *private_data, int offset) 2327a1b1b61fSSean Hefty { 2328a1b1b61fSSean Hefty event->param.conn.private_data = private_data + offset; 2329a1b1b61fSSean Hefty event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset; 2330a1b1b61fSSean Hefty event->param.conn.responder_resources = req_data->responder_resources; 2331a1b1b61fSSean Hefty event->param.conn.initiator_depth = req_data->initiator_depth; 2332a1b1b61fSSean Hefty event->param.conn.flow_control = req_data->flow_control; 2333a1b1b61fSSean Hefty event->param.conn.retry_count = req_data->retry_count; 2334a1b1b61fSSean Hefty event->param.conn.rnr_retry_count = req_data->rnr_retry_count; 2335a1b1b61fSSean Hefty event->param.conn.srq = req_data->srq; 2336a1b1b61fSSean Hefty event->param.conn.qp_num = req_data->remote_qpn; 2337a20652e1SLeon Romanovsky 2338a20652e1SLeon Romanovsky event->ece.vendor_id = req_data->ece.vendor_id; 2339a20652e1SLeon Romanovsky event->ece.attr_mod = req_data->ece.attr_mod; 2340a1b1b61fSSean Hefty } 2341a1b1b61fSSean Hefty 234285463316SParav Pandit static int cma_ib_check_req_qp_type(const struct rdma_cm_id *id, 2343e7ff98aeSParav Pandit const struct ib_cm_event *ib_event) 23449595480cSHefty, Sean { 23454dd81e89SSean Hefty return (((ib_event->event == IB_CM_REQ_RECEIVED) && 23469595480cSHefty, Sean (ib_event->param.req_rcvd.qp_type == id->qp_type)) || 23479595480cSHefty, Sean ((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) && 23489595480cSHefty, Sean (id->qp_type == IB_QPT_UD)) || 23499595480cSHefty, Sean (!id->qp_type)); 23509595480cSHefty, Sean } 23519595480cSHefty, Sean 235285463316SParav Pandit static int cma_ib_req_handler(struct ib_cm_id *cm_id, 2353e7ff98aeSParav Pandit const struct ib_cm_event *ib_event) 2354e51060f0SSean Hefty { 235537e07cdaSBart Van Assche struct rdma_id_private *listen_id, *conn_id = NULL; 23567582df82SParav Pandit struct rdma_cm_event event = {}; 235741ab1cb7SParav Pandit struct cma_req_info req = {}; 23580b3ca768SHaggai Eran struct net_device *net_dev; 2359c0b64f58SBart Van Assche u8 offset; 2360c0b64f58SBart Van Assche int ret; 2361e51060f0SSean Hefty 236241ab1cb7SParav Pandit listen_id = cma_ib_id_from_event(cm_id, ib_event, &req, &net_dev); 23634c21b5bcSHaggai Eran if (IS_ERR(listen_id)) 23644c21b5bcSHaggai Eran return PTR_ERR(listen_id); 23654c21b5bcSHaggai Eran 2366ed999f82SChuck Lever trace_cm_req_handler(listen_id, ib_event->event); 236785463316SParav Pandit if (!cma_ib_check_req_qp_type(&listen_id->id, ib_event)) { 23680b3ca768SHaggai Eran ret = -EINVAL; 23690b3ca768SHaggai Eran goto net_dev_put; 23700b3ca768SHaggai Eran } 23719595480cSHefty, Sean 237237e07cdaSBart Van Assche mutex_lock(&listen_id->handler_mutex); 2373d490ee52SJason Gunthorpe if (READ_ONCE(listen_id->state) != RDMA_CM_LISTEN) { 23740b3ca768SHaggai Eran ret = -ECONNABORTED; 2375f6a9d47aSJason Gunthorpe goto err_unlock; 23760b3ca768SHaggai Eran } 2377e51060f0SSean Hefty 2378e8160e15SSean Hefty offset = cma_user_data_offset(listen_id); 2379628e5f6dSSean Hefty event.event = RDMA_CM_EVENT_CONNECT_REQUEST; 23809595480cSHefty, Sean if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) { 238185463316SParav Pandit conn_id = cma_ib_new_udp_id(&listen_id->id, ib_event, net_dev); 2382628e5f6dSSean Hefty event.param.ud.private_data = ib_event->private_data + offset; 2383628e5f6dSSean Hefty event.param.ud.private_data_len = 2384628e5f6dSSean Hefty IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset; 2385628e5f6dSSean Hefty } else { 238685463316SParav Pandit conn_id = cma_ib_new_conn_id(&listen_id->id, ib_event, net_dev); 2387628e5f6dSSean Hefty cma_set_req_event_data(&event, &ib_event->param.req_rcvd, 2388628e5f6dSSean Hefty ib_event->private_data, offset); 2389628e5f6dSSean Hefty } 2390e51060f0SSean Hefty if (!conn_id) { 2391e51060f0SSean Hefty ret = -ENOMEM; 2392f6a9d47aSJason Gunthorpe goto err_unlock; 2393e51060f0SSean Hefty } 2394e51060f0SSean Hefty 2395de910bd9SOr Gerlitz mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); 239641ab1cb7SParav Pandit ret = cma_ib_acquire_dev(conn_id, listen_id, &req); 2397f6a9d47aSJason Gunthorpe if (ret) { 2398f6a9d47aSJason Gunthorpe destroy_id_handler_unlock(conn_id); 2399f6a9d47aSJason Gunthorpe goto err_unlock; 2400f6a9d47aSJason Gunthorpe } 2401e51060f0SSean Hefty 2402e51060f0SSean Hefty conn_id->cm_id.ib = cm_id; 2403e51060f0SSean Hefty cm_id->context = conn_id; 2404e51060f0SSean Hefty cm_id->cm_handler = cma_ib_handler; 2405e51060f0SSean Hefty 2406ed999f82SChuck Lever ret = cma_cm_event_handler(conn_id, &event); 2407f6a9d47aSJason Gunthorpe if (ret) { 2408f6a9d47aSJason Gunthorpe /* Destroy the CM ID by returning a non-zero value. */ 2409f6a9d47aSJason Gunthorpe conn_id->cm_id.ib = NULL; 2410f6a9d47aSJason Gunthorpe mutex_unlock(&listen_id->handler_mutex); 2411f6a9d47aSJason Gunthorpe destroy_id_handler_unlock(conn_id); 2412f6a9d47aSJason Gunthorpe goto net_dev_put; 2413f6a9d47aSJason Gunthorpe } 2414f6a9d47aSJason Gunthorpe 24152a7cec53SJason Gunthorpe if (READ_ONCE(conn_id->state) == RDMA_CM_CONNECT && 24162a7cec53SJason Gunthorpe conn_id->id.qp_type != IB_QPT_UD) { 2417ed999f82SChuck Lever trace_cm_send_mra(cm_id->context); 2418ead595aeSSean Hefty ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); 2419ed999f82SChuck Lever } 2420de910bd9SOr Gerlitz mutex_unlock(&conn_id->handler_mutex); 2421a1a733f6SKrishna Kumar 2422f6a9d47aSJason Gunthorpe err_unlock: 2423de910bd9SOr Gerlitz mutex_unlock(&listen_id->handler_mutex); 24240b3ca768SHaggai Eran 24250b3ca768SHaggai Eran net_dev_put: 24260b3ca768SHaggai Eran if (net_dev) 24270b3ca768SHaggai Eran dev_put(net_dev); 24280b3ca768SHaggai Eran 2429e51060f0SSean Hefty return ret; 2430e51060f0SSean Hefty } 2431e51060f0SSean Hefty 2432cf53936fSSean Hefty __be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr) 2433e51060f0SSean Hefty { 2434496ce3ceSSean Hefty if (addr->sa_family == AF_IB) 2435496ce3ceSSean Hefty return ((struct sockaddr_ib *) addr)->sib_sid; 2436496ce3ceSSean Hefty 2437cf53936fSSean Hefty return cpu_to_be64(((u64)id->ps << 16) + be16_to_cpu(cma_port(addr))); 2438e51060f0SSean Hefty } 2439cf53936fSSean Hefty EXPORT_SYMBOL(rdma_get_service_id); 2440e51060f0SSean Hefty 2441411460acSParav Pandit void rdma_read_gids(struct rdma_cm_id *cm_id, union ib_gid *sgid, 2442411460acSParav Pandit union ib_gid *dgid) 2443411460acSParav Pandit { 2444411460acSParav Pandit struct rdma_addr *addr = &cm_id->route.addr; 2445411460acSParav Pandit 2446411460acSParav Pandit if (!cm_id->device) { 2447411460acSParav Pandit if (sgid) 2448411460acSParav Pandit memset(sgid, 0, sizeof(*sgid)); 2449411460acSParav Pandit if (dgid) 2450411460acSParav Pandit memset(dgid, 0, sizeof(*dgid)); 2451411460acSParav Pandit return; 2452411460acSParav Pandit } 2453411460acSParav Pandit 2454411460acSParav Pandit if (rdma_protocol_roce(cm_id->device, cm_id->port_num)) { 2455411460acSParav Pandit if (sgid) 2456411460acSParav Pandit rdma_ip2gid((struct sockaddr *)&addr->src_addr, sgid); 2457411460acSParav Pandit if (dgid) 2458411460acSParav Pandit rdma_ip2gid((struct sockaddr *)&addr->dst_addr, dgid); 2459411460acSParav Pandit } else { 2460411460acSParav Pandit if (sgid) 2461411460acSParav Pandit rdma_addr_get_sgid(&addr->dev_addr, sgid); 2462411460acSParav Pandit if (dgid) 2463411460acSParav Pandit rdma_addr_get_dgid(&addr->dev_addr, dgid); 2464411460acSParav Pandit } 2465411460acSParav Pandit } 2466411460acSParav Pandit EXPORT_SYMBOL(rdma_read_gids); 2467411460acSParav Pandit 246807ebafbaSTom Tucker static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) 246907ebafbaSTom Tucker { 247007ebafbaSTom Tucker struct rdma_id_private *id_priv = iw_id->context; 24717582df82SParav Pandit struct rdma_cm_event event = {}; 247207ebafbaSTom Tucker int ret = 0; 247324d44a39SSteve Wise struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; 247424d44a39SSteve Wise struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; 247507ebafbaSTom Tucker 247637e07cdaSBart Van Assche mutex_lock(&id_priv->handler_mutex); 24772a7cec53SJason Gunthorpe if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT) 247837e07cdaSBart Van Assche goto out; 247907ebafbaSTom Tucker 248007ebafbaSTom Tucker switch (iw_event->event) { 248107ebafbaSTom Tucker case IW_CM_EVENT_CLOSE: 2482a1b1b61fSSean Hefty event.event = RDMA_CM_EVENT_DISCONNECTED; 248307ebafbaSTom Tucker break; 248407ebafbaSTom Tucker case IW_CM_EVENT_CONNECT_REPLY: 248524d44a39SSteve Wise memcpy(cma_src_addr(id_priv), laddr, 248624d44a39SSteve Wise rdma_addr_size(laddr)); 248724d44a39SSteve Wise memcpy(cma_dst_addr(id_priv), raddr, 248824d44a39SSteve Wise rdma_addr_size(raddr)); 2489881a045fSSteve Wise switch (iw_event->status) { 2490881a045fSSteve Wise case 0: 2491a1b1b61fSSean Hefty event.event = RDMA_CM_EVENT_ESTABLISHED; 24923ebeebc3SKumar Sanghvi event.param.conn.initiator_depth = iw_event->ird; 24933ebeebc3SKumar Sanghvi event.param.conn.responder_resources = iw_event->ord; 249407ebafbaSTom Tucker break; 2495881a045fSSteve Wise case -ECONNRESET: 2496881a045fSSteve Wise case -ECONNREFUSED: 2497881a045fSSteve Wise event.event = RDMA_CM_EVENT_REJECTED; 2498881a045fSSteve Wise break; 2499881a045fSSteve Wise case -ETIMEDOUT: 2500881a045fSSteve Wise event.event = RDMA_CM_EVENT_UNREACHABLE; 2501881a045fSSteve Wise break; 2502881a045fSSteve Wise default: 2503881a045fSSteve Wise event.event = RDMA_CM_EVENT_CONNECT_ERROR; 2504881a045fSSteve Wise break; 2505881a045fSSteve Wise } 2506881a045fSSteve Wise break; 250707ebafbaSTom Tucker case IW_CM_EVENT_ESTABLISHED: 2508a1b1b61fSSean Hefty event.event = RDMA_CM_EVENT_ESTABLISHED; 25093ebeebc3SKumar Sanghvi event.param.conn.initiator_depth = iw_event->ird; 25103ebeebc3SKumar Sanghvi event.param.conn.responder_resources = iw_event->ord; 251107ebafbaSTom Tucker break; 251207ebafbaSTom Tucker default: 2513671a6cc2SLeon Romanovsky goto out; 251407ebafbaSTom Tucker } 251507ebafbaSTom Tucker 2516a1b1b61fSSean Hefty event.status = iw_event->status; 2517a1b1b61fSSean Hefty event.param.conn.private_data = iw_event->private_data; 2518a1b1b61fSSean Hefty event.param.conn.private_data_len = iw_event->private_data_len; 2519ed999f82SChuck Lever ret = cma_cm_event_handler(id_priv, &event); 252007ebafbaSTom Tucker if (ret) { 252107ebafbaSTom Tucker /* Destroy the CM ID by returning a non-zero value. */ 252207ebafbaSTom Tucker id_priv->cm_id.iw = NULL; 2523f6a9d47aSJason Gunthorpe destroy_id_handler_unlock(id_priv); 252407ebafbaSTom Tucker return ret; 252507ebafbaSTom Tucker } 252607ebafbaSTom Tucker 252737e07cdaSBart Van Assche out: 2528de910bd9SOr Gerlitz mutex_unlock(&id_priv->handler_mutex); 252907ebafbaSTom Tucker return ret; 253007ebafbaSTom Tucker } 253107ebafbaSTom Tucker 253207ebafbaSTom Tucker static int iw_conn_req_handler(struct iw_cm_id *cm_id, 253307ebafbaSTom Tucker struct iw_cm_event *iw_event) 253407ebafbaSTom Tucker { 253507ebafbaSTom Tucker struct rdma_id_private *listen_id, *conn_id; 25367582df82SParav Pandit struct rdma_cm_event event = {}; 253737e07cdaSBart Van Assche int ret = -ECONNABORTED; 253824d44a39SSteve Wise struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; 253924d44a39SSteve Wise struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; 254007ebafbaSTom Tucker 25417582df82SParav Pandit event.event = RDMA_CM_EVENT_CONNECT_REQUEST; 25427582df82SParav Pandit event.param.conn.private_data = iw_event->private_data; 25437582df82SParav Pandit event.param.conn.private_data_len = iw_event->private_data_len; 25447582df82SParav Pandit event.param.conn.initiator_depth = iw_event->ird; 25457582df82SParav Pandit event.param.conn.responder_resources = iw_event->ord; 25467582df82SParav Pandit 254707ebafbaSTom Tucker listen_id = cm_id->context; 254837e07cdaSBart Van Assche 254937e07cdaSBart Van Assche mutex_lock(&listen_id->handler_mutex); 2550d490ee52SJason Gunthorpe if (READ_ONCE(listen_id->state) != RDMA_CM_LISTEN) 255137e07cdaSBart Van Assche goto out; 255207ebafbaSTom Tucker 255307ebafbaSTom Tucker /* Create a new RDMA id for the new IW CM ID */ 2554b09c4d70SLeon Romanovsky conn_id = __rdma_create_id(listen_id->id.route.addr.dev_addr.net, 2555fa20105eSGuy Shapiro listen_id->id.event_handler, 2556b09c4d70SLeon Romanovsky listen_id->id.context, RDMA_PS_TCP, 2557b09c4d70SLeon Romanovsky IB_QPT_RC, listen_id); 2558b09c4d70SLeon Romanovsky if (IS_ERR(conn_id)) { 255907ebafbaSTom Tucker ret = -ENOMEM; 256007ebafbaSTom Tucker goto out; 256107ebafbaSTom Tucker } 2562de910bd9SOr Gerlitz mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); 2563550e5ca7SNir Muchtar conn_id->state = RDMA_CM_CONNECT; 256407ebafbaSTom Tucker 2565575c7e58SParav Pandit ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr); 256607ebafbaSTom Tucker if (ret) { 2567f6a9d47aSJason Gunthorpe mutex_unlock(&listen_id->handler_mutex); 2568f6a9d47aSJason Gunthorpe destroy_id_handler_unlock(conn_id); 2569f6a9d47aSJason Gunthorpe return ret; 257007ebafbaSTom Tucker } 257107ebafbaSTom Tucker 257241ab1cb7SParav Pandit ret = cma_iw_acquire_dev(conn_id, listen_id); 257307ebafbaSTom Tucker if (ret) { 2574f6a9d47aSJason Gunthorpe mutex_unlock(&listen_id->handler_mutex); 2575f6a9d47aSJason Gunthorpe destroy_id_handler_unlock(conn_id); 2576f6a9d47aSJason Gunthorpe return ret; 257707ebafbaSTom Tucker } 257807ebafbaSTom Tucker 257907ebafbaSTom Tucker conn_id->cm_id.iw = cm_id; 258007ebafbaSTom Tucker cm_id->context = conn_id; 258107ebafbaSTom Tucker cm_id->cm_handler = cma_iw_handler; 258207ebafbaSTom Tucker 258324d44a39SSteve Wise memcpy(cma_src_addr(conn_id), laddr, rdma_addr_size(laddr)); 258424d44a39SSteve Wise memcpy(cma_dst_addr(conn_id), raddr, rdma_addr_size(raddr)); 258507ebafbaSTom Tucker 2586ed999f82SChuck Lever ret = cma_cm_event_handler(conn_id, &event); 258707ebafbaSTom Tucker if (ret) { 258807ebafbaSTom Tucker /* User wants to destroy the CM ID */ 258907ebafbaSTom Tucker conn_id->cm_id.iw = NULL; 2590b66f31efSBart Van Assche mutex_unlock(&listen_id->handler_mutex); 2591f6a9d47aSJason Gunthorpe destroy_id_handler_unlock(conn_id); 2592b66f31efSBart Van Assche return ret; 259307ebafbaSTom Tucker } 259407ebafbaSTom Tucker 2595de910bd9SOr Gerlitz mutex_unlock(&conn_id->handler_mutex); 2596de910bd9SOr Gerlitz 259707ebafbaSTom Tucker out: 2598de910bd9SOr Gerlitz mutex_unlock(&listen_id->handler_mutex); 259907ebafbaSTom Tucker return ret; 260007ebafbaSTom Tucker } 260107ebafbaSTom Tucker 2602e51060f0SSean Hefty static int cma_ib_listen(struct rdma_id_private *id_priv) 2603e51060f0SSean Hefty { 2604e51060f0SSean Hefty struct sockaddr *addr; 26050c9361fcSJack Morgenstein struct ib_cm_id *id; 2606e51060f0SSean Hefty __be64 svc_id; 2607e51060f0SSean Hefty 2608f4753834SSean Hefty addr = cma_src_addr(id_priv); 2609cf53936fSSean Hefty svc_id = rdma_get_service_id(&id_priv->id, addr); 261085463316SParav Pandit id = ib_cm_insert_listen(id_priv->id.device, 261185463316SParav Pandit cma_ib_req_handler, svc_id); 261251efe394SHaggai Eran if (IS_ERR(id)) 261351efe394SHaggai Eran return PTR_ERR(id); 261451efe394SHaggai Eran id_priv->cm_id.ib = id; 2615e51060f0SSean Hefty 261651efe394SHaggai Eran return 0; 2617e51060f0SSean Hefty } 2618e51060f0SSean Hefty 261907ebafbaSTom Tucker static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog) 262007ebafbaSTom Tucker { 262107ebafbaSTom Tucker int ret; 26220c9361fcSJack Morgenstein struct iw_cm_id *id; 262307ebafbaSTom Tucker 26240c9361fcSJack Morgenstein id = iw_create_cm_id(id_priv->id.device, 262507ebafbaSTom Tucker iw_conn_req_handler, 262607ebafbaSTom Tucker id_priv); 26270c9361fcSJack Morgenstein if (IS_ERR(id)) 26280c9361fcSJack Morgenstein return PTR_ERR(id); 26290c9361fcSJack Morgenstein 2630ca0c448dSHåkon Bugge mutex_lock(&id_priv->qp_mutex); 263168cdba06SSteve Wise id->tos = id_priv->tos; 2632926ba19bSSteve Wise id->tos_set = id_priv->tos_set; 2633ca0c448dSHåkon Bugge mutex_unlock(&id_priv->qp_mutex); 2634e35ecb46SBernard Metzler id->afonly = id_priv->afonly; 26350c9361fcSJack Morgenstein id_priv->cm_id.iw = id; 263607ebafbaSTom Tucker 263724d44a39SSteve Wise memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv), 263824d44a39SSteve Wise rdma_addr_size(cma_src_addr(id_priv))); 263907ebafbaSTom Tucker 264007ebafbaSTom Tucker ret = iw_cm_listen(id_priv->cm_id.iw, backlog); 264107ebafbaSTom Tucker 264207ebafbaSTom Tucker if (ret) { 264307ebafbaSTom Tucker iw_destroy_cm_id(id_priv->cm_id.iw); 264407ebafbaSTom Tucker id_priv->cm_id.iw = NULL; 264507ebafbaSTom Tucker } 264607ebafbaSTom Tucker 264707ebafbaSTom Tucker return ret; 264807ebafbaSTom Tucker } 264907ebafbaSTom Tucker 2650e51060f0SSean Hefty static int cma_listen_handler(struct rdma_cm_id *id, 2651e51060f0SSean Hefty struct rdma_cm_event *event) 2652e51060f0SSean Hefty { 2653e51060f0SSean Hefty struct rdma_id_private *id_priv = id->context; 2654e51060f0SSean Hefty 2655d54f23c0SJason Gunthorpe /* Listening IDs are always destroyed on removal */ 2656d54f23c0SJason Gunthorpe if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL) 2657d54f23c0SJason Gunthorpe return -1; 2658d54f23c0SJason Gunthorpe 2659e51060f0SSean Hefty id->context = id_priv->id.context; 2660e51060f0SSean Hefty id->event_handler = id_priv->id.event_handler; 2661ed999f82SChuck Lever trace_cm_event_handler(id_priv, event); 2662e51060f0SSean Hefty return id_priv->id.event_handler(id, event); 2663e51060f0SSean Hefty } 2664e51060f0SSean Hefty 2665c80a0c52SLeon Romanovsky static int cma_listen_on_dev(struct rdma_id_private *id_priv, 2666dd37d2f5SJason Gunthorpe struct cma_device *cma_dev, 2667dd37d2f5SJason Gunthorpe struct rdma_id_private **to_destroy) 2668e51060f0SSean Hefty { 2669e51060f0SSean Hefty struct rdma_id_private *dev_id_priv; 2670fa20105eSGuy Shapiro struct net *net = id_priv->id.route.addr.dev_addr.net; 2671e51060f0SSean Hefty int ret; 2672e51060f0SSean Hefty 2673730c8912SMark Zhang lockdep_assert_held(&lock); 2674730c8912SMark Zhang 2675dd37d2f5SJason Gunthorpe *to_destroy = NULL; 267672219ceaSMichael Wang if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1)) 2677c80a0c52SLeon Romanovsky return 0; 267894d0c939SSean Hefty 2679b09c4d70SLeon Romanovsky dev_id_priv = 2680b09c4d70SLeon Romanovsky __rdma_create_id(net, cma_listen_handler, id_priv, 2681b09c4d70SLeon Romanovsky id_priv->id.ps, id_priv->id.qp_type, id_priv); 2682b09c4d70SLeon Romanovsky if (IS_ERR(dev_id_priv)) 2683c80a0c52SLeon Romanovsky return PTR_ERR(dev_id_priv); 2684e51060f0SSean Hefty 2685550e5ca7SNir Muchtar dev_id_priv->state = RDMA_CM_ADDR_BOUND; 2686f4753834SSean Hefty memcpy(cma_src_addr(dev_id_priv), cma_src_addr(id_priv), 2687f4753834SSean Hefty rdma_addr_size(cma_src_addr(id_priv))); 2688e51060f0SSean Hefty 2689045959dbSMatan Barak _cma_attach_to_dev(dev_id_priv, cma_dev); 2690cb5cd0eaSShay Drory rdma_restrack_add(&dev_id_priv->res); 2691e368d23fSParav Pandit cma_id_get(id_priv); 2692d02d1f53SSean Hefty dev_id_priv->internal_id = 1; 26935b0ec991SSean Hefty dev_id_priv->afonly = id_priv->afonly; 2694ca0c448dSHåkon Bugge mutex_lock(&id_priv->qp_mutex); 26959491128fSSteve Wise dev_id_priv->tos_set = id_priv->tos_set; 26969491128fSSteve Wise dev_id_priv->tos = id_priv->tos; 2697ca0c448dSHåkon Bugge mutex_unlock(&id_priv->qp_mutex); 2698e51060f0SSean Hefty 2699b09c4d70SLeon Romanovsky ret = rdma_listen(&dev_id_priv->id, id_priv->backlog); 2700e51060f0SSean Hefty if (ret) 2701c80a0c52SLeon Romanovsky goto err_listen; 270299cfddb8SJason Gunthorpe list_add_tail(&dev_id_priv->listen_item, &id_priv->listen_list); 2703c80a0c52SLeon Romanovsky return 0; 2704c80a0c52SLeon Romanovsky err_listen: 2705dd37d2f5SJason Gunthorpe /* Caller must destroy this after releasing lock */ 2706dd37d2f5SJason Gunthorpe *to_destroy = dev_id_priv; 2707c80a0c52SLeon Romanovsky dev_warn(&cma_dev->device->dev, "RDMA CMA: %s, error %d\n", __func__, ret); 2708c80a0c52SLeon Romanovsky return ret; 2709e51060f0SSean Hefty } 2710e51060f0SSean Hefty 2711c80a0c52SLeon Romanovsky static int cma_listen_on_all(struct rdma_id_private *id_priv) 2712e51060f0SSean Hefty { 2713dd37d2f5SJason Gunthorpe struct rdma_id_private *to_destroy; 2714e51060f0SSean Hefty struct cma_device *cma_dev; 2715c80a0c52SLeon Romanovsky int ret; 2716e51060f0SSean Hefty 2717e51060f0SSean Hefty mutex_lock(&lock); 271899cfddb8SJason Gunthorpe list_add_tail(&id_priv->listen_any_item, &listen_any_list); 2719c80a0c52SLeon Romanovsky list_for_each_entry(cma_dev, &dev_list, list) { 2720dd37d2f5SJason Gunthorpe ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy); 2721dd37d2f5SJason Gunthorpe if (ret) { 2722dd37d2f5SJason Gunthorpe /* Prevent racing with cma_process_remove() */ 2723dd37d2f5SJason Gunthorpe if (to_destroy) 272499cfddb8SJason Gunthorpe list_del_init(&to_destroy->device_item); 2725c80a0c52SLeon Romanovsky goto err_listen; 2726c80a0c52SLeon Romanovsky } 2727dd37d2f5SJason Gunthorpe } 2728e51060f0SSean Hefty mutex_unlock(&lock); 2729c80a0c52SLeon Romanovsky return 0; 2730c80a0c52SLeon Romanovsky 2731c80a0c52SLeon Romanovsky err_listen: 2732ca465e1fSTao Liu _cma_cancel_listens(id_priv); 2733c80a0c52SLeon Romanovsky mutex_unlock(&lock); 2734dd37d2f5SJason Gunthorpe if (to_destroy) 2735dd37d2f5SJason Gunthorpe rdma_destroy_id(&to_destroy->id); 2736c80a0c52SLeon Romanovsky return ret; 2737e51060f0SSean Hefty } 2738e51060f0SSean Hefty 2739a81c994dSSean Hefty void rdma_set_service_type(struct rdma_cm_id *id, int tos) 2740a81c994dSSean Hefty { 2741a81c994dSSean Hefty struct rdma_id_private *id_priv; 2742a81c994dSSean Hefty 2743a81c994dSSean Hefty id_priv = container_of(id, struct rdma_id_private, id); 2744ca0c448dSHåkon Bugge mutex_lock(&id_priv->qp_mutex); 2745a81c994dSSean Hefty id_priv->tos = (u8) tos; 274689052d78SMajd Dibbiny id_priv->tos_set = true; 2747ca0c448dSHåkon Bugge mutex_unlock(&id_priv->qp_mutex); 2748a81c994dSSean Hefty } 2749a81c994dSSean Hefty EXPORT_SYMBOL(rdma_set_service_type); 2750a81c994dSSean Hefty 27512c1619edSDanit Goldberg /** 27522c1619edSDanit Goldberg * rdma_set_ack_timeout() - Set the ack timeout of QP associated 27532c1619edSDanit Goldberg * with a connection identifier. 27542c1619edSDanit Goldberg * @id: Communication identifier to associated with service type. 27552c1619edSDanit Goldberg * @timeout: Ack timeout to set a QP, expressed as 4.096 * 2^(timeout) usec. 27562c1619edSDanit Goldberg * 27572c1619edSDanit Goldberg * This function should be called before rdma_connect() on active side, 27582c1619edSDanit Goldberg * and on passive side before rdma_accept(). It is applicable to primary 27592c1619edSDanit Goldberg * path only. The timeout will affect the local side of the QP, it is not 2760e1ee1e62SDag Moxnes * negotiated with remote side and zero disables the timer. In case it is 2761e1ee1e62SDag Moxnes * set before rdma_resolve_route, the value will also be used to determine 2762e1ee1e62SDag Moxnes * PacketLifeTime for RoCE. 27632c1619edSDanit Goldberg * 27642c1619edSDanit Goldberg * Return: 0 for success 27652c1619edSDanit Goldberg */ 27662c1619edSDanit Goldberg int rdma_set_ack_timeout(struct rdma_cm_id *id, u8 timeout) 27672c1619edSDanit Goldberg { 27682c1619edSDanit Goldberg struct rdma_id_private *id_priv; 27692c1619edSDanit Goldberg 2770748663c8SHåkon Bugge if (id->qp_type != IB_QPT_RC && id->qp_type != IB_QPT_XRC_INI) 27712c1619edSDanit Goldberg return -EINVAL; 27722c1619edSDanit Goldberg 27732c1619edSDanit Goldberg id_priv = container_of(id, struct rdma_id_private, id); 2774ca0c448dSHåkon Bugge mutex_lock(&id_priv->qp_mutex); 27752c1619edSDanit Goldberg id_priv->timeout = timeout; 27762c1619edSDanit Goldberg id_priv->timeout_set = true; 2777ca0c448dSHåkon Bugge mutex_unlock(&id_priv->qp_mutex); 27782c1619edSDanit Goldberg 27792c1619edSDanit Goldberg return 0; 27802c1619edSDanit Goldberg } 27812c1619edSDanit Goldberg EXPORT_SYMBOL(rdma_set_ack_timeout); 27822c1619edSDanit Goldberg 27833aeffc46SHåkon Bugge /** 27843aeffc46SHåkon Bugge * rdma_set_min_rnr_timer() - Set the minimum RNR Retry timer of the 27853aeffc46SHåkon Bugge * QP associated with a connection identifier. 27863aeffc46SHåkon Bugge * @id: Communication identifier to associated with service type. 27873aeffc46SHåkon Bugge * @min_rnr_timer: 5-bit value encoded as Table 45: "Encoding for RNR NAK 27883aeffc46SHåkon Bugge * Timer Field" in the IBTA specification. 27893aeffc46SHåkon Bugge * 27903aeffc46SHåkon Bugge * This function should be called before rdma_connect() on active 27913aeffc46SHåkon Bugge * side, and on passive side before rdma_accept(). The timer value 27923aeffc46SHåkon Bugge * will be associated with the local QP. When it receives a send it is 27933aeffc46SHåkon Bugge * not read to handle, typically if the receive queue is empty, an RNR 27943aeffc46SHåkon Bugge * Retry NAK is returned to the requester with the min_rnr_timer 27953aeffc46SHåkon Bugge * encoded. The requester will then wait at least the time specified 27963aeffc46SHåkon Bugge * in the NAK before retrying. The default is zero, which translates 27973aeffc46SHåkon Bugge * to a minimum RNR Timer value of 655 ms. 27983aeffc46SHåkon Bugge * 27993aeffc46SHåkon Bugge * Return: 0 for success 28003aeffc46SHåkon Bugge */ 28013aeffc46SHåkon Bugge int rdma_set_min_rnr_timer(struct rdma_cm_id *id, u8 min_rnr_timer) 28023aeffc46SHåkon Bugge { 28033aeffc46SHåkon Bugge struct rdma_id_private *id_priv; 28043aeffc46SHåkon Bugge 28053aeffc46SHåkon Bugge /* It is a five-bit value */ 28063aeffc46SHåkon Bugge if (min_rnr_timer & 0xe0) 28073aeffc46SHåkon Bugge return -EINVAL; 28083aeffc46SHåkon Bugge 28093aeffc46SHåkon Bugge if (WARN_ON(id->qp_type != IB_QPT_RC && id->qp_type != IB_QPT_XRC_TGT)) 28103aeffc46SHåkon Bugge return -EINVAL; 28113aeffc46SHåkon Bugge 28123aeffc46SHåkon Bugge id_priv = container_of(id, struct rdma_id_private, id); 2813ca0c448dSHåkon Bugge mutex_lock(&id_priv->qp_mutex); 28143aeffc46SHåkon Bugge id_priv->min_rnr_timer = min_rnr_timer; 28153aeffc46SHåkon Bugge id_priv->min_rnr_timer_set = true; 2816ca0c448dSHåkon Bugge mutex_unlock(&id_priv->qp_mutex); 28173aeffc46SHåkon Bugge 28183aeffc46SHåkon Bugge return 0; 28193aeffc46SHåkon Bugge } 28203aeffc46SHåkon Bugge EXPORT_SYMBOL(rdma_set_min_rnr_timer); 28213aeffc46SHåkon Bugge 28225a374949SMark Zhang static void route_set_path_rec_inbound(struct cma_work *work, 28235a374949SMark Zhang struct sa_path_rec *path_rec) 28245a374949SMark Zhang { 28255a374949SMark Zhang struct rdma_route *route = &work->id->id.route; 28265a374949SMark Zhang 28275a374949SMark Zhang if (!route->path_rec_inbound) { 28285a374949SMark Zhang route->path_rec_inbound = 28295a374949SMark Zhang kzalloc(sizeof(*route->path_rec_inbound), GFP_KERNEL); 28305a374949SMark Zhang if (!route->path_rec_inbound) 28315a374949SMark Zhang return; 28325a374949SMark Zhang } 28335a374949SMark Zhang 28345a374949SMark Zhang *route->path_rec_inbound = *path_rec; 28355a374949SMark Zhang } 28365a374949SMark Zhang 28375a374949SMark Zhang static void route_set_path_rec_outbound(struct cma_work *work, 28385a374949SMark Zhang struct sa_path_rec *path_rec) 28395a374949SMark Zhang { 28405a374949SMark Zhang struct rdma_route *route = &work->id->id.route; 28415a374949SMark Zhang 28425a374949SMark Zhang if (!route->path_rec_outbound) { 28435a374949SMark Zhang route->path_rec_outbound = 28445a374949SMark Zhang kzalloc(sizeof(*route->path_rec_outbound), GFP_KERNEL); 28455a374949SMark Zhang if (!route->path_rec_outbound) 28465a374949SMark Zhang return; 28475a374949SMark Zhang } 28485a374949SMark Zhang 28495a374949SMark Zhang *route->path_rec_outbound = *path_rec; 28505a374949SMark Zhang } 28515a374949SMark Zhang 2852c2f8fc4eSDasaratharaman Chandramouli static void cma_query_handler(int status, struct sa_path_rec *path_rec, 28535a374949SMark Zhang int num_prs, void *context) 2854e51060f0SSean Hefty { 2855e51060f0SSean Hefty struct cma_work *work = context; 2856e51060f0SSean Hefty struct rdma_route *route; 28575a374949SMark Zhang int i; 2858e51060f0SSean Hefty 2859e51060f0SSean Hefty route = &work->id->id.route; 2860e51060f0SSean Hefty 28615a374949SMark Zhang if (status) 28625a374949SMark Zhang goto fail; 28635a374949SMark Zhang 28645a374949SMark Zhang for (i = 0; i < num_prs; i++) { 28655a374949SMark Zhang if (!path_rec[i].flags || (path_rec[i].flags & IB_PATH_GMP)) 28665a374949SMark Zhang *route->path_rec = path_rec[i]; 28675a374949SMark Zhang else if (path_rec[i].flags & IB_PATH_INBOUND) 28685a374949SMark Zhang route_set_path_rec_inbound(work, &path_rec[i]); 28695a374949SMark Zhang else if (path_rec[i].flags & IB_PATH_OUTBOUND) 28705a374949SMark Zhang route_set_path_rec_outbound(work, &path_rec[i]); 28715a374949SMark Zhang } 28725a374949SMark Zhang if (!route->path_rec) { 28735a374949SMark Zhang status = -EINVAL; 28745a374949SMark Zhang goto fail; 28755a374949SMark Zhang } 28765a374949SMark Zhang 2877bf9a9928SMark Zhang route->num_pri_alt_paths = 1; 28785a374949SMark Zhang queue_work(cma_wq, &work->work); 28795a374949SMark Zhang return; 28805a374949SMark Zhang 28815a374949SMark Zhang fail: 2882550e5ca7SNir Muchtar work->old_state = RDMA_CM_ROUTE_QUERY; 2883550e5ca7SNir Muchtar work->new_state = RDMA_CM_ADDR_RESOLVED; 2884e51060f0SSean Hefty work->event.event = RDMA_CM_EVENT_ROUTE_ERROR; 28858f0472d3SSean Hefty work->event.status = status; 2886498683c6SMoni Shoua pr_debug_ratelimited("RDMA CM: ROUTE_ERROR: failed to query path. status %d\n", 2887498683c6SMoni Shoua status); 2888e51060f0SSean Hefty queue_work(cma_wq, &work->work); 2889e51060f0SSean Hefty } 2890e51060f0SSean Hefty 2891dbace111SLeon Romanovsky static int cma_query_ib_route(struct rdma_id_private *id_priv, 2892dbace111SLeon Romanovsky unsigned long timeout_ms, struct cma_work *work) 2893e51060f0SSean Hefty { 2894f4753834SSean Hefty struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 2895c2f8fc4eSDasaratharaman Chandramouli struct sa_path_rec path_rec; 2896a81c994dSSean Hefty ib_sa_comp_mask comp_mask; 2897a81c994dSSean Hefty struct sockaddr_in6 *sin6; 2898f68194caSSean Hefty struct sockaddr_ib *sib; 2899e51060f0SSean Hefty 2900e51060f0SSean Hefty memset(&path_rec, 0, sizeof path_rec); 29014c33bd19SDasaratharaman Chandramouli 29024c33bd19SDasaratharaman Chandramouli if (rdma_cap_opa_ah(id_priv->id.device, id_priv->id.port_num)) 29034c33bd19SDasaratharaman Chandramouli path_rec.rec_type = SA_PATH_REC_TYPE_OPA; 29044c33bd19SDasaratharaman Chandramouli else 29059fdca4daSDasaratharaman Chandramouli path_rec.rec_type = SA_PATH_REC_TYPE_IB; 2906f4753834SSean Hefty rdma_addr_get_sgid(dev_addr, &path_rec.sgid); 2907f4753834SSean Hefty rdma_addr_get_dgid(dev_addr, &path_rec.dgid); 2908f4753834SSean Hefty path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); 2909e51060f0SSean Hefty path_rec.numb_path = 1; 2910962063e6SSean Hefty path_rec.reversible = 1; 2911d3957b86SMajd Dibbiny path_rec.service_id = rdma_get_service_id(&id_priv->id, 2912d3957b86SMajd Dibbiny cma_dst_addr(id_priv)); 2913a81c994dSSean Hefty 2914a81c994dSSean Hefty comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | 2915a81c994dSSean Hefty IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH | 2916a81c994dSSean Hefty IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID; 2917a81c994dSSean Hefty 2918f68194caSSean Hefty switch (cma_family(id_priv)) { 2919f68194caSSean Hefty case AF_INET: 2920a81c994dSSean Hefty path_rec.qos_class = cpu_to_be16((u16) id_priv->tos); 2921a81c994dSSean Hefty comp_mask |= IB_SA_PATH_REC_QOS_CLASS; 2922f68194caSSean Hefty break; 2923f68194caSSean Hefty case AF_INET6: 2924f4753834SSean Hefty sin6 = (struct sockaddr_in6 *) cma_src_addr(id_priv); 2925a81c994dSSean Hefty path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20); 2926a81c994dSSean Hefty comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS; 2927f68194caSSean Hefty break; 2928f68194caSSean Hefty case AF_IB: 2929f68194caSSean Hefty sib = (struct sockaddr_ib *) cma_src_addr(id_priv); 2930f68194caSSean Hefty path_rec.traffic_class = (u8) (be32_to_cpu(sib->sib_flowinfo) >> 20); 2931f68194caSSean Hefty comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS; 2932f68194caSSean Hefty break; 2933a81c994dSSean Hefty } 2934e51060f0SSean Hefty 2935c1a0b23bSMichael S. Tsirkin id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device, 2936e51060f0SSean Hefty id_priv->id.port_num, &path_rec, 2937a81c994dSSean Hefty comp_mask, timeout_ms, 2938a81c994dSSean Hefty GFP_KERNEL, cma_query_handler, 2939a81c994dSSean Hefty work, &id_priv->query); 2940e51060f0SSean Hefty 2941e51060f0SSean Hefty return (id_priv->query_id < 0) ? id_priv->query_id : 0; 2942e51060f0SSean Hefty } 2943e51060f0SSean Hefty 2944fe454dc3SAvihai Horon static void cma_iboe_join_work_handler(struct work_struct *work) 2945fe454dc3SAvihai Horon { 2946fe454dc3SAvihai Horon struct cma_multicast *mc = 2947fe454dc3SAvihai Horon container_of(work, struct cma_multicast, iboe_join.work); 2948fe454dc3SAvihai Horon struct rdma_cm_event *event = &mc->iboe_join.event; 2949fe454dc3SAvihai Horon struct rdma_id_private *id_priv = mc->id_priv; 2950fe454dc3SAvihai Horon int ret; 2951fe454dc3SAvihai Horon 2952fe454dc3SAvihai Horon mutex_lock(&id_priv->handler_mutex); 2953fe454dc3SAvihai Horon if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING || 2954fe454dc3SAvihai Horon READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL) 2955fe454dc3SAvihai Horon goto out_unlock; 2956fe454dc3SAvihai Horon 2957fe454dc3SAvihai Horon ret = cma_cm_event_handler(id_priv, event); 2958fe454dc3SAvihai Horon WARN_ON(ret); 2959fe454dc3SAvihai Horon 2960fe454dc3SAvihai Horon out_unlock: 2961fe454dc3SAvihai Horon mutex_unlock(&id_priv->handler_mutex); 2962fe454dc3SAvihai Horon if (event->event == RDMA_CM_EVENT_MULTICAST_JOIN) 2963fe454dc3SAvihai Horon rdma_destroy_ah_attr(&event->param.ud.ah_attr); 2964fe454dc3SAvihai Horon } 2965fe454dc3SAvihai Horon 2966c4028958SDavid Howells static void cma_work_handler(struct work_struct *_work) 2967e51060f0SSean Hefty { 2968c4028958SDavid Howells struct cma_work *work = container_of(_work, struct cma_work, work); 2969e51060f0SSean Hefty struct rdma_id_private *id_priv = work->id; 2970e51060f0SSean Hefty 2971de910bd9SOr Gerlitz mutex_lock(&id_priv->handler_mutex); 29727e85bcdaSJason Gunthorpe if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING || 29737e85bcdaSJason Gunthorpe READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL) 29747e85bcdaSJason Gunthorpe goto out_unlock; 29757e85bcdaSJason Gunthorpe if (work->old_state != 0 || work->new_state != 0) { 2976e51060f0SSean Hefty if (!cma_comp_exch(id_priv, work->old_state, work->new_state)) 2977f6a9d47aSJason Gunthorpe goto out_unlock; 2978e51060f0SSean Hefty } 2979e51060f0SSean Hefty 2980e51060f0SSean Hefty if (cma_cm_event_handler(id_priv, &work->event)) { 2981e51060f0SSean Hefty cma_id_put(id_priv); 2982e51060f0SSean Hefty destroy_id_handler_unlock(id_priv); 2983e51060f0SSean Hefty goto out_free; 2984e51060f0SSean Hefty } 2985f6a9d47aSJason Gunthorpe 2986f6a9d47aSJason Gunthorpe out_unlock: 2987de910bd9SOr Gerlitz mutex_unlock(&id_priv->handler_mutex); 2988e368d23fSParav Pandit cma_id_put(id_priv); 2989f6a9d47aSJason Gunthorpe out_free: 2990b5de0c60SJason Gunthorpe if (work->event.event == RDMA_CM_EVENT_MULTICAST_JOIN) 2991b5de0c60SJason Gunthorpe rdma_destroy_ah_attr(&work->event.param.ud.ah_attr); 2992dd5bdff8SOr Gerlitz kfree(work); 2993dd5bdff8SOr Gerlitz } 2994dd5bdff8SOr Gerlitz 2995981b5a23SParav Pandit static void cma_init_resolve_route_work(struct cma_work *work, 2996981b5a23SParav Pandit struct rdma_id_private *id_priv) 2997981b5a23SParav Pandit { 2998981b5a23SParav Pandit work->id = id_priv; 2999981b5a23SParav Pandit INIT_WORK(&work->work, cma_work_handler); 3000981b5a23SParav Pandit work->old_state = RDMA_CM_ROUTE_QUERY; 3001981b5a23SParav Pandit work->new_state = RDMA_CM_ROUTE_RESOLVED; 3002981b5a23SParav Pandit work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 3003981b5a23SParav Pandit } 3004981b5a23SParav Pandit 3005081ea519SParav Pandit static void enqueue_resolve_addr_work(struct cma_work *work, 3006981b5a23SParav Pandit struct rdma_id_private *id_priv) 3007981b5a23SParav Pandit { 3008e368d23fSParav Pandit /* Balances with cma_id_put() in cma_work_handler */ 3009e368d23fSParav Pandit cma_id_get(id_priv); 3010081ea519SParav Pandit 3011981b5a23SParav Pandit work->id = id_priv; 3012981b5a23SParav Pandit INIT_WORK(&work->work, cma_work_handler); 3013981b5a23SParav Pandit work->old_state = RDMA_CM_ADDR_QUERY; 3014981b5a23SParav Pandit work->new_state = RDMA_CM_ADDR_RESOLVED; 3015981b5a23SParav Pandit work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 3016081ea519SParav Pandit 3017081ea519SParav Pandit queue_work(cma_wq, &work->work); 3018981b5a23SParav Pandit } 3019981b5a23SParav Pandit 3020dbace111SLeon Romanovsky static int cma_resolve_ib_route(struct rdma_id_private *id_priv, 3021dbace111SLeon Romanovsky unsigned long timeout_ms) 3022e51060f0SSean Hefty { 3023e51060f0SSean Hefty struct rdma_route *route = &id_priv->id.route; 3024e51060f0SSean Hefty struct cma_work *work; 3025e51060f0SSean Hefty int ret; 3026e51060f0SSean Hefty 3027e51060f0SSean Hefty work = kzalloc(sizeof *work, GFP_KERNEL); 3028e51060f0SSean Hefty if (!work) 3029e51060f0SSean Hefty return -ENOMEM; 3030e51060f0SSean Hefty 3031981b5a23SParav Pandit cma_init_resolve_route_work(work, id_priv); 3032e51060f0SSean Hefty 303374f160eaSGerd Rausch if (!route->path_rec) 3034e51060f0SSean Hefty route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL); 3035e51060f0SSean Hefty if (!route->path_rec) { 3036e51060f0SSean Hefty ret = -ENOMEM; 3037e51060f0SSean Hefty goto err1; 3038e51060f0SSean Hefty } 3039e51060f0SSean Hefty 3040e51060f0SSean Hefty ret = cma_query_ib_route(id_priv, timeout_ms, work); 3041e51060f0SSean Hefty if (ret) 3042e51060f0SSean Hefty goto err2; 3043e51060f0SSean Hefty 3044e51060f0SSean Hefty return 0; 3045e51060f0SSean Hefty err2: 3046e51060f0SSean Hefty kfree(route->path_rec); 3047e51060f0SSean Hefty route->path_rec = NULL; 3048e51060f0SSean Hefty err1: 3049e51060f0SSean Hefty kfree(work); 3050e51060f0SSean Hefty return ret; 3051e51060f0SSean Hefty } 3052e51060f0SSean Hefty 30539327c7afSParav Pandit static enum ib_gid_type cma_route_gid_type(enum rdma_network_type network_type, 30549327c7afSParav Pandit unsigned long supported_gids, 30559327c7afSParav Pandit enum ib_gid_type default_gid) 30569327c7afSParav Pandit { 30579327c7afSParav Pandit if ((network_type == RDMA_NETWORK_IPV4 || 30589327c7afSParav Pandit network_type == RDMA_NETWORK_IPV6) && 30599327c7afSParav Pandit test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP, &supported_gids)) 30609327c7afSParav Pandit return IB_GID_TYPE_ROCE_UDP_ENCAP; 30619327c7afSParav Pandit 30629327c7afSParav Pandit return default_gid; 30639327c7afSParav Pandit } 30649327c7afSParav Pandit 30659327c7afSParav Pandit /* 30669327c7afSParav Pandit * cma_iboe_set_path_rec_l2_fields() is helper function which sets 30679327c7afSParav Pandit * path record type based on GID type. 30689327c7afSParav Pandit * It also sets up other L2 fields which includes destination mac address 30699327c7afSParav Pandit * netdev ifindex, of the path record. 30709327c7afSParav Pandit * It returns the netdev of the bound interface for this path record entry. 30719327c7afSParav Pandit */ 30729327c7afSParav Pandit static struct net_device * 30739327c7afSParav Pandit cma_iboe_set_path_rec_l2_fields(struct rdma_id_private *id_priv) 30749327c7afSParav Pandit { 30759327c7afSParav Pandit struct rdma_route *route = &id_priv->id.route; 30769327c7afSParav Pandit enum ib_gid_type gid_type = IB_GID_TYPE_ROCE; 30779327c7afSParav Pandit struct rdma_addr *addr = &route->addr; 30789327c7afSParav Pandit unsigned long supported_gids; 30799327c7afSParav Pandit struct net_device *ndev; 30809327c7afSParav Pandit 30819327c7afSParav Pandit if (!addr->dev_addr.bound_dev_if) 30829327c7afSParav Pandit return NULL; 30839327c7afSParav Pandit 30849327c7afSParav Pandit ndev = dev_get_by_index(addr->dev_addr.net, 30859327c7afSParav Pandit addr->dev_addr.bound_dev_if); 30869327c7afSParav Pandit if (!ndev) 30879327c7afSParav Pandit return NULL; 30889327c7afSParav Pandit 30899327c7afSParav Pandit supported_gids = roce_gid_type_mask_support(id_priv->id.device, 30909327c7afSParav Pandit id_priv->id.port_num); 30919327c7afSParav Pandit gid_type = cma_route_gid_type(addr->dev_addr.network, 30929327c7afSParav Pandit supported_gids, 30939327c7afSParav Pandit id_priv->gid_type); 30949327c7afSParav Pandit /* Use the hint from IP Stack to select GID Type */ 30959327c7afSParav Pandit if (gid_type < ib_network_to_gid_type(addr->dev_addr.network)) 30969327c7afSParav Pandit gid_type = ib_network_to_gid_type(addr->dev_addr.network); 30979327c7afSParav Pandit route->path_rec->rec_type = sa_conv_gid_to_pathrec_type(gid_type); 30989327c7afSParav Pandit 3099114cc9c4SParav Pandit route->path_rec->roce.route_resolved = true; 31009327c7afSParav Pandit sa_path_set_dmac(route->path_rec, addr->dev_addr.dst_dev_addr); 31019327c7afSParav Pandit return ndev; 31029327c7afSParav Pandit } 31039327c7afSParav Pandit 3104fe75889fSParav Pandit int rdma_set_ib_path(struct rdma_cm_id *id, 3105fe75889fSParav Pandit struct sa_path_rec *path_rec) 3106e51060f0SSean Hefty { 3107e51060f0SSean Hefty struct rdma_id_private *id_priv; 31088d20a1f0SParav Pandit struct net_device *ndev; 3109e51060f0SSean Hefty int ret; 3110e51060f0SSean Hefty 3111e51060f0SSean Hefty id_priv = container_of(id, struct rdma_id_private, id); 3112550e5ca7SNir Muchtar if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, 3113550e5ca7SNir Muchtar RDMA_CM_ROUTE_RESOLVED)) 3114e51060f0SSean Hefty return -EINVAL; 3115e51060f0SSean Hefty 3116fe75889fSParav Pandit id->route.path_rec = kmemdup(path_rec, sizeof(*path_rec), 31179893e742SJulia Lawall GFP_KERNEL); 3118e51060f0SSean Hefty if (!id->route.path_rec) { 3119e51060f0SSean Hefty ret = -ENOMEM; 3120e51060f0SSean Hefty goto err; 3121e51060f0SSean Hefty } 3122e51060f0SSean Hefty 31238d20a1f0SParav Pandit if (rdma_protocol_roce(id->device, id->port_num)) { 31248d20a1f0SParav Pandit ndev = cma_iboe_set_path_rec_l2_fields(id_priv); 31258d20a1f0SParav Pandit if (!ndev) { 31268d20a1f0SParav Pandit ret = -ENODEV; 31278d20a1f0SParav Pandit goto err_free; 31288d20a1f0SParav Pandit } 31298d20a1f0SParav Pandit dev_put(ndev); 31308d20a1f0SParav Pandit } 31318d20a1f0SParav Pandit 3132bf9a9928SMark Zhang id->route.num_pri_alt_paths = 1; 3133e51060f0SSean Hefty return 0; 31348d20a1f0SParav Pandit 31358d20a1f0SParav Pandit err_free: 31368d20a1f0SParav Pandit kfree(id->route.path_rec); 31378d20a1f0SParav Pandit id->route.path_rec = NULL; 3138e51060f0SSean Hefty err: 3139550e5ca7SNir Muchtar cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED); 3140e51060f0SSean Hefty return ret; 3141e51060f0SSean Hefty } 3142fe75889fSParav Pandit EXPORT_SYMBOL(rdma_set_ib_path); 3143e51060f0SSean Hefty 3144d6f91252SLeon Romanovsky static int cma_resolve_iw_route(struct rdma_id_private *id_priv) 314507ebafbaSTom Tucker { 314607ebafbaSTom Tucker struct cma_work *work; 314707ebafbaSTom Tucker 314807ebafbaSTom Tucker work = kzalloc(sizeof *work, GFP_KERNEL); 314907ebafbaSTom Tucker if (!work) 315007ebafbaSTom Tucker return -ENOMEM; 315107ebafbaSTom Tucker 3152981b5a23SParav Pandit cma_init_resolve_route_work(work, id_priv); 315307ebafbaSTom Tucker queue_work(cma_wq, &work->work); 315407ebafbaSTom Tucker return 0; 315507ebafbaSTom Tucker } 315607ebafbaSTom Tucker 3157d3bd9396SParav Pandit static int get_vlan_ndev_tc(struct net_device *vlan_ndev, int prio) 3158eb072c4bSEyal Perry { 3159eb072c4bSEyal Perry struct net_device *dev; 3160eb072c4bSEyal Perry 3161d3bd9396SParav Pandit dev = vlan_dev_real_dev(vlan_ndev); 3162eb072c4bSEyal Perry if (dev->num_tc) 3163eb072c4bSEyal Perry return netdev_get_prio_tc_map(dev, prio); 3164eb072c4bSEyal Perry 3165d3bd9396SParav Pandit return (vlan_dev_get_egress_qos_mask(vlan_ndev, prio) & 3166eb072c4bSEyal Perry VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; 3167d3bd9396SParav Pandit } 3168d3bd9396SParav Pandit 3169d3bd9396SParav Pandit struct iboe_prio_tc_map { 3170d3bd9396SParav Pandit int input_prio; 3171d3bd9396SParav Pandit int output_tc; 3172d3bd9396SParav Pandit bool found; 3173d3bd9396SParav Pandit }; 3174d3bd9396SParav Pandit 3175eff74233STaehee Yoo static int get_lower_vlan_dev_tc(struct net_device *dev, 3176eff74233STaehee Yoo struct netdev_nested_priv *priv) 3177d3bd9396SParav Pandit { 3178eff74233STaehee Yoo struct iboe_prio_tc_map *map = (struct iboe_prio_tc_map *)priv->data; 3179d3bd9396SParav Pandit 3180d3bd9396SParav Pandit if (is_vlan_dev(dev)) 3181d3bd9396SParav Pandit map->output_tc = get_vlan_ndev_tc(dev, map->input_prio); 3182d3bd9396SParav Pandit else if (dev->num_tc) 3183d3bd9396SParav Pandit map->output_tc = netdev_get_prio_tc_map(dev, map->input_prio); 3184d3bd9396SParav Pandit else 3185d3bd9396SParav Pandit map->output_tc = 0; 3186d3bd9396SParav Pandit /* We are interested only in first level VLAN device, so always 3187d3bd9396SParav Pandit * return 1 to stop iterating over next level devices. 3188d3bd9396SParav Pandit */ 3189d3bd9396SParav Pandit map->found = true; 3190d3bd9396SParav Pandit return 1; 3191d3bd9396SParav Pandit } 3192d3bd9396SParav Pandit 3193d3bd9396SParav Pandit static int iboe_tos_to_sl(struct net_device *ndev, int tos) 3194d3bd9396SParav Pandit { 3195d3bd9396SParav Pandit struct iboe_prio_tc_map prio_tc_map = {}; 3196d3bd9396SParav Pandit int prio = rt_tos2priority(tos); 3197eff74233STaehee Yoo struct netdev_nested_priv priv; 3198d3bd9396SParav Pandit 3199d3bd9396SParav Pandit /* If VLAN device, get it directly from the VLAN netdev */ 3200d3bd9396SParav Pandit if (is_vlan_dev(ndev)) 3201d3bd9396SParav Pandit return get_vlan_ndev_tc(ndev, prio); 3202d3bd9396SParav Pandit 3203d3bd9396SParav Pandit prio_tc_map.input_prio = prio; 3204eff74233STaehee Yoo priv.data = (void *)&prio_tc_map; 3205d3bd9396SParav Pandit rcu_read_lock(); 3206d3bd9396SParav Pandit netdev_walk_all_lower_dev_rcu(ndev, 3207d3bd9396SParav Pandit get_lower_vlan_dev_tc, 3208eff74233STaehee Yoo &priv); 3209d3bd9396SParav Pandit rcu_read_unlock(); 3210d3bd9396SParav Pandit /* If map is found from lower device, use it; Otherwise 3211d3bd9396SParav Pandit * continue with the current netdevice to get priority to tc map. 3212d3bd9396SParav Pandit */ 3213d3bd9396SParav Pandit if (prio_tc_map.found) 3214d3bd9396SParav Pandit return prio_tc_map.output_tc; 3215d3bd9396SParav Pandit else if (ndev->num_tc) 3216d3bd9396SParav Pandit return netdev_get_prio_tc_map(ndev, prio); 3217d3bd9396SParav Pandit else 3218eb072c4bSEyal Perry return 0; 3219eb072c4bSEyal Perry } 3220eb072c4bSEyal Perry 3221f6653405SMark Zhang static __be32 cma_get_roce_udp_flow_label(struct rdma_id_private *id_priv) 3222f6653405SMark Zhang { 3223f6653405SMark Zhang struct sockaddr_in6 *addr6; 3224f6653405SMark Zhang u16 dport, sport; 3225f6653405SMark Zhang u32 hash, fl; 3226f6653405SMark Zhang 3227f6653405SMark Zhang addr6 = (struct sockaddr_in6 *)cma_src_addr(id_priv); 3228f6653405SMark Zhang fl = be32_to_cpu(addr6->sin6_flowinfo) & IB_GRH_FLOWLABEL_MASK; 3229f6653405SMark Zhang if ((cma_family(id_priv) != AF_INET6) || !fl) { 3230f6653405SMark Zhang dport = be16_to_cpu(cma_port(cma_dst_addr(id_priv))); 3231f6653405SMark Zhang sport = be16_to_cpu(cma_port(cma_src_addr(id_priv))); 3232f6653405SMark Zhang hash = (u32)sport * 31 + dport; 3233f6653405SMark Zhang fl = hash & IB_GRH_FLOWLABEL_MASK; 3234f6653405SMark Zhang } 3235f6653405SMark Zhang 3236f6653405SMark Zhang return cpu_to_be32(fl); 3237f6653405SMark Zhang } 3238f6653405SMark Zhang 32393c86aa70SEli Cohen static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) 32403c86aa70SEli Cohen { 32413c86aa70SEli Cohen struct rdma_route *route = &id_priv->id.route; 32423c86aa70SEli Cohen struct rdma_addr *addr = &route->addr; 32433c86aa70SEli Cohen struct cma_work *work; 32443c86aa70SEli Cohen int ret; 32454367ec7fSParav Pandit struct net_device *ndev; 32464367ec7fSParav Pandit 324789052d78SMajd Dibbiny u8 default_roce_tos = id_priv->cma_dev->default_roce_tos[id_priv->id.port_num - 324889052d78SMajd Dibbiny rdma_start_port(id_priv->cma_dev->device)]; 3249ca0c448dSHåkon Bugge u8 tos; 3250dd5f03beSMatan Barak 3251ca0c448dSHåkon Bugge mutex_lock(&id_priv->qp_mutex); 3252ca0c448dSHåkon Bugge tos = id_priv->tos_set ? id_priv->tos : default_roce_tos; 3253ca0c448dSHåkon Bugge mutex_unlock(&id_priv->qp_mutex); 32543c86aa70SEli Cohen 32553c86aa70SEli Cohen work = kzalloc(sizeof *work, GFP_KERNEL); 32563c86aa70SEli Cohen if (!work) 32573c86aa70SEli Cohen return -ENOMEM; 32583c86aa70SEli Cohen 32593c86aa70SEli Cohen route->path_rec = kzalloc(sizeof *route->path_rec, GFP_KERNEL); 32603c86aa70SEli Cohen if (!route->path_rec) { 32613c86aa70SEli Cohen ret = -ENOMEM; 32623c86aa70SEli Cohen goto err1; 32633c86aa70SEli Cohen } 32643c86aa70SEli Cohen 3265bf9a9928SMark Zhang route->num_pri_alt_paths = 1; 32663c86aa70SEli Cohen 32679327c7afSParav Pandit ndev = cma_iboe_set_path_rec_l2_fields(id_priv); 326823d70503SWei Yongjun if (!ndev) { 326923d70503SWei Yongjun ret = -ENODEV; 327023d70503SWei Yongjun goto err2; 327123d70503SWei Yongjun } 327220029832SMatan Barak 32737b85627bSMoni Shoua rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, 32747b85627bSMoni Shoua &route->path_rec->sgid); 32757b85627bSMoni Shoua rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.dst_addr, 32767b85627bSMoni Shoua &route->path_rec->dgid); 3277af7bd463SEli Cohen 3278c3efe750SMatan Barak if (((struct sockaddr *)&id_priv->id.route.addr.dst_addr)->sa_family != AF_IB) 3279c865f246SSomnath Kotur /* TODO: get the hoplimit from the inet/inet6 device */ 3280c3efe750SMatan Barak route->path_rec->hop_limit = addr->dev_addr.hoplimit; 3281c3efe750SMatan Barak else 3282af7bd463SEli Cohen route->path_rec->hop_limit = 1; 3283af7bd463SEli Cohen route->path_rec->reversible = 1; 3284af7bd463SEli Cohen route->path_rec->pkey = cpu_to_be16(0xffff); 3285af7bd463SEli Cohen route->path_rec->mtu_selector = IB_SA_EQ; 328689052d78SMajd Dibbiny route->path_rec->sl = iboe_tos_to_sl(ndev, tos); 328789052d78SMajd Dibbiny route->path_rec->traffic_class = tos; 32883c86aa70SEli Cohen route->path_rec->mtu = iboe_get_mtu(ndev->mtu); 32893c86aa70SEli Cohen route->path_rec->rate_selector = IB_SA_EQ; 32903c86aa70SEli Cohen route->path_rec->rate = iboe_get_rate(ndev); 32913c86aa70SEli Cohen dev_put(ndev); 32923c86aa70SEli Cohen route->path_rec->packet_life_time_selector = IB_SA_EQ; 3293e1ee1e62SDag Moxnes /* In case ACK timeout is set, use this value to calculate 3294e1ee1e62SDag Moxnes * PacketLifeTime. As per IBTA 12.7.34, 3295e1ee1e62SDag Moxnes * local ACK timeout = (2 * PacketLifeTime + Local CA’s ACK delay). 3296e1ee1e62SDag Moxnes * Assuming a negligible local ACK delay, we can use 3297e1ee1e62SDag Moxnes * PacketLifeTime = local ACK timeout/2 3298e1ee1e62SDag Moxnes * as a reasonable approximation for RoCE networks. 3299e1ee1e62SDag Moxnes */ 3300ca0c448dSHåkon Bugge mutex_lock(&id_priv->qp_mutex); 3301e84045eaSHåkon Bugge if (id_priv->timeout_set && id_priv->timeout) 3302e84045eaSHåkon Bugge route->path_rec->packet_life_time = id_priv->timeout - 1; 3303e84045eaSHåkon Bugge else 3304e84045eaSHåkon Bugge route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME; 3305ca0c448dSHåkon Bugge mutex_unlock(&id_priv->qp_mutex); 3306e1ee1e62SDag Moxnes 33073c86aa70SEli Cohen if (!route->path_rec->mtu) { 33083c86aa70SEli Cohen ret = -EINVAL; 33093c86aa70SEli Cohen goto err2; 33103c86aa70SEli Cohen } 33113c86aa70SEli Cohen 3312f6653405SMark Zhang if (rdma_protocol_roce_udp_encap(id_priv->id.device, 3313f6653405SMark Zhang id_priv->id.port_num)) 3314f6653405SMark Zhang route->path_rec->flow_label = 3315f6653405SMark Zhang cma_get_roce_udp_flow_label(id_priv); 3316f6653405SMark Zhang 3317981b5a23SParav Pandit cma_init_resolve_route_work(work, id_priv); 33183c86aa70SEli Cohen queue_work(cma_wq, &work->work); 33193c86aa70SEli Cohen 33203c86aa70SEli Cohen return 0; 33213c86aa70SEli Cohen 33223c86aa70SEli Cohen err2: 33233c86aa70SEli Cohen kfree(route->path_rec); 33243c86aa70SEli Cohen route->path_rec = NULL; 3325bf9a9928SMark Zhang route->num_pri_alt_paths = 0; 33263c86aa70SEli Cohen err1: 33273c86aa70SEli Cohen kfree(work); 33283c86aa70SEli Cohen return ret; 33293c86aa70SEli Cohen } 33303c86aa70SEli Cohen 3331dbace111SLeon Romanovsky int rdma_resolve_route(struct rdma_cm_id *id, unsigned long timeout_ms) 3332e51060f0SSean Hefty { 3333e51060f0SSean Hefty struct rdma_id_private *id_priv; 3334e51060f0SSean Hefty int ret; 3335e51060f0SSean Hefty 33365f5a6509SHåkon Bugge if (!timeout_ms) 33375f5a6509SHåkon Bugge return -EINVAL; 33385f5a6509SHåkon Bugge 3339e51060f0SSean Hefty id_priv = container_of(id, struct rdma_id_private, id); 3340550e5ca7SNir Muchtar if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY)) 3341e51060f0SSean Hefty return -EINVAL; 3342e51060f0SSean Hefty 3343e368d23fSParav Pandit cma_id_get(id_priv); 3344fe53ba2fSMichael Wang if (rdma_cap_ib_sa(id->device, id->port_num)) 3345e51060f0SSean Hefty ret = cma_resolve_ib_route(id_priv, timeout_ms); 3346fc008bdbSPatrisious Haddad else if (rdma_protocol_roce(id->device, id->port_num)) { 33473c86aa70SEli Cohen ret = cma_resolve_iboe_route(id_priv); 3348fc008bdbSPatrisious Haddad if (!ret) 3349fc008bdbSPatrisious Haddad cma_add_id_to_tree(id_priv); 3350fc008bdbSPatrisious Haddad } 3351c72f2189SMichael Wang else if (rdma_protocol_iwarp(id->device, id->port_num)) 3352d6f91252SLeon Romanovsky ret = cma_resolve_iw_route(id_priv); 3353c72f2189SMichael Wang else 3354e51060f0SSean Hefty ret = -ENOSYS; 3355c72f2189SMichael Wang 3356e51060f0SSean Hefty if (ret) 3357e51060f0SSean Hefty goto err; 3358e51060f0SSean Hefty 3359e51060f0SSean Hefty return 0; 3360e51060f0SSean Hefty err: 3361550e5ca7SNir Muchtar cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED); 3362e368d23fSParav Pandit cma_id_put(id_priv); 3363e51060f0SSean Hefty return ret; 3364e51060f0SSean Hefty } 3365e51060f0SSean Hefty EXPORT_SYMBOL(rdma_resolve_route); 3366e51060f0SSean Hefty 33676a3e362dSSean Hefty static void cma_set_loopback(struct sockaddr *addr) 33686a3e362dSSean Hefty { 33696a3e362dSSean Hefty switch (addr->sa_family) { 33706a3e362dSSean Hefty case AF_INET: 33716a3e362dSSean Hefty ((struct sockaddr_in *) addr)->sin_addr.s_addr = htonl(INADDR_LOOPBACK); 33726a3e362dSSean Hefty break; 33736a3e362dSSean Hefty case AF_INET6: 33746a3e362dSSean Hefty ipv6_addr_set(&((struct sockaddr_in6 *) addr)->sin6_addr, 33756a3e362dSSean Hefty 0, 0, 0, htonl(1)); 33766a3e362dSSean Hefty break; 33776a3e362dSSean Hefty default: 33786a3e362dSSean Hefty ib_addr_set(&((struct sockaddr_ib *) addr)->sib_addr, 33796a3e362dSSean Hefty 0, 0, 0, htonl(1)); 33806a3e362dSSean Hefty break; 33816a3e362dSSean Hefty } 33826a3e362dSSean Hefty } 33836a3e362dSSean Hefty 3384e51060f0SSean Hefty static int cma_bind_loopback(struct rdma_id_private *id_priv) 3385e51060f0SSean Hefty { 3386b0569e40SSean Hefty struct cma_device *cma_dev, *cur_dev; 3387f0ee3404SMichael S. Tsirkin union ib_gid gid; 3388102c5ce0SJack Wang enum ib_port_state port_state; 3389cc055dd3SParav Pandit unsigned int p; 3390e51060f0SSean Hefty u16 pkey; 3391e51060f0SSean Hefty int ret; 3392e51060f0SSean Hefty 3393b0569e40SSean Hefty cma_dev = NULL; 3394e51060f0SSean Hefty mutex_lock(&lock); 3395b0569e40SSean Hefty list_for_each_entry(cur_dev, &dev_list, list) { 3396b0569e40SSean Hefty if (cma_family(id_priv) == AF_IB && 339772219ceaSMichael Wang !rdma_cap_ib_cm(cur_dev->device, 1)) 3398b0569e40SSean Hefty continue; 3399b0569e40SSean Hefty 3400b0569e40SSean Hefty if (!cma_dev) 3401b0569e40SSean Hefty cma_dev = cur_dev; 3402b0569e40SSean Hefty 3403cc055dd3SParav Pandit rdma_for_each_port (cur_dev->device, p) { 3404102c5ce0SJack Wang if (!ib_get_cached_port_state(cur_dev->device, p, &port_state) && 3405102c5ce0SJack Wang port_state == IB_PORT_ACTIVE) { 3406b0569e40SSean Hefty cma_dev = cur_dev; 3407b0569e40SSean Hefty goto port_found; 3408b0569e40SSean Hefty } 3409b0569e40SSean Hefty } 3410b0569e40SSean Hefty } 3411b0569e40SSean Hefty 3412b0569e40SSean Hefty if (!cma_dev) { 3413e82153b5SKrishna Kumar ret = -ENODEV; 3414e82153b5SKrishna Kumar goto out; 3415e82153b5SKrishna Kumar } 3416e51060f0SSean Hefty 3417e51060f0SSean Hefty p = 1; 3418e51060f0SSean Hefty 3419e51060f0SSean Hefty port_found: 34201dfce294SParav Pandit ret = rdma_query_gid(cma_dev->device, p, 0, &gid); 3421e51060f0SSean Hefty if (ret) 3422e51060f0SSean Hefty goto out; 3423e51060f0SSean Hefty 3424e51060f0SSean Hefty ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey); 3425e51060f0SSean Hefty if (ret) 3426e51060f0SSean Hefty goto out; 3427e51060f0SSean Hefty 34286f8372b6SSean Hefty id_priv->id.route.addr.dev_addr.dev_type = 342921655afcSMichael Wang (rdma_protocol_ib(cma_dev->device, p)) ? 34306f8372b6SSean Hefty ARPHRD_INFINIBAND : ARPHRD_ETHER; 34316f8372b6SSean Hefty 34326f8372b6SSean Hefty rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid); 3433e51060f0SSean Hefty ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey); 3434e51060f0SSean Hefty id_priv->id.port_num = p; 3435e51060f0SSean Hefty cma_attach_to_dev(id_priv, cma_dev); 3436cb5cd0eaSShay Drory rdma_restrack_add(&id_priv->res); 3437f4753834SSean Hefty cma_set_loopback(cma_src_addr(id_priv)); 3438e51060f0SSean Hefty out: 3439e51060f0SSean Hefty mutex_unlock(&lock); 3440e51060f0SSean Hefty return ret; 3441e51060f0SSean Hefty } 3442e51060f0SSean Hefty 3443e51060f0SSean Hefty static void addr_handler(int status, struct sockaddr *src_addr, 3444e51060f0SSean Hefty struct rdma_dev_addr *dev_addr, void *context) 3445e51060f0SSean Hefty { 3446e51060f0SSean Hefty struct rdma_id_private *id_priv = context; 34477582df82SParav Pandit struct rdma_cm_event event = {}; 34485fc01fb8SMyungho Jung struct sockaddr *addr; 34495fc01fb8SMyungho Jung struct sockaddr_storage old_addr; 3450e51060f0SSean Hefty 3451de910bd9SOr Gerlitz mutex_lock(&id_priv->handler_mutex); 3452550e5ca7SNir Muchtar if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, 3453550e5ca7SNir Muchtar RDMA_CM_ADDR_RESOLVED)) 345461a73c70SSean Hefty goto out; 345561a73c70SSean Hefty 34565fc01fb8SMyungho Jung /* 34575fc01fb8SMyungho Jung * Store the previous src address, so that if we fail to acquire 34585fc01fb8SMyungho Jung * matching rdma device, old address can be restored back, which helps 34595fc01fb8SMyungho Jung * to cancel the cma listen operation correctly. 34605fc01fb8SMyungho Jung */ 34615fc01fb8SMyungho Jung addr = cma_src_addr(id_priv); 34625fc01fb8SMyungho Jung memcpy(&old_addr, addr, rdma_addr_size(addr)); 34635fc01fb8SMyungho Jung memcpy(addr, src_addr, rdma_addr_size(src_addr)); 3464498683c6SMoni Shoua if (!status && !id_priv->cma_dev) { 3465ff11c6cdSParav Pandit status = cma_acquire_dev_by_src_ip(id_priv); 3466498683c6SMoni Shoua if (status) 3467498683c6SMoni Shoua pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to acquire device. status %d\n", 3468498683c6SMoni Shoua status); 3469cb5cd0eaSShay Drory rdma_restrack_add(&id_priv->res); 3470a6e4d254SHåkon Bugge } else if (status) { 3471498683c6SMoni Shoua pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to resolve IP. status %d\n", status); 3472498683c6SMoni Shoua } 3473e51060f0SSean Hefty 3474e51060f0SSean Hefty if (status) { 34755fc01fb8SMyungho Jung memcpy(addr, &old_addr, 34765fc01fb8SMyungho Jung rdma_addr_size((struct sockaddr *)&old_addr)); 3477550e5ca7SNir Muchtar if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, 3478550e5ca7SNir Muchtar RDMA_CM_ADDR_BOUND)) 3479e51060f0SSean Hefty goto out; 3480a1b1b61fSSean Hefty event.event = RDMA_CM_EVENT_ADDR_ERROR; 3481a1b1b61fSSean Hefty event.status = status; 34827b85627bSMoni Shoua } else 3483a1b1b61fSSean Hefty event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 3484e51060f0SSean Hefty 3485ed999f82SChuck Lever if (cma_cm_event_handler(id_priv, &event)) { 3486f6a9d47aSJason Gunthorpe destroy_id_handler_unlock(id_priv); 3487e51060f0SSean Hefty return; 3488e51060f0SSean Hefty } 3489e51060f0SSean Hefty out: 3490de910bd9SOr Gerlitz mutex_unlock(&id_priv->handler_mutex); 3491e51060f0SSean Hefty } 3492e51060f0SSean Hefty 3493e51060f0SSean Hefty static int cma_resolve_loopback(struct rdma_id_private *id_priv) 3494e51060f0SSean Hefty { 3495e51060f0SSean Hefty struct cma_work *work; 3496f0ee3404SMichael S. Tsirkin union ib_gid gid; 3497e51060f0SSean Hefty int ret; 3498e51060f0SSean Hefty 3499e51060f0SSean Hefty work = kzalloc(sizeof *work, GFP_KERNEL); 3500e51060f0SSean Hefty if (!work) 3501e51060f0SSean Hefty return -ENOMEM; 3502e51060f0SSean Hefty 3503e51060f0SSean Hefty if (!id_priv->cma_dev) { 3504e51060f0SSean Hefty ret = cma_bind_loopback(id_priv); 3505e51060f0SSean Hefty if (ret) 3506e51060f0SSean Hefty goto err; 3507e51060f0SSean Hefty } 3508e51060f0SSean Hefty 35096f8372b6SSean Hefty rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); 35106f8372b6SSean Hefty rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid); 3511e51060f0SSean Hefty 3512081ea519SParav Pandit enqueue_resolve_addr_work(work, id_priv); 3513e51060f0SSean Hefty return 0; 3514e51060f0SSean Hefty err: 3515e51060f0SSean Hefty kfree(work); 3516e51060f0SSean Hefty return ret; 3517e51060f0SSean Hefty } 3518e51060f0SSean Hefty 3519f17df3b0SSean Hefty static int cma_resolve_ib_addr(struct rdma_id_private *id_priv) 3520f17df3b0SSean Hefty { 3521f17df3b0SSean Hefty struct cma_work *work; 3522f17df3b0SSean Hefty int ret; 3523f17df3b0SSean Hefty 3524f17df3b0SSean Hefty work = kzalloc(sizeof *work, GFP_KERNEL); 3525f17df3b0SSean Hefty if (!work) 3526f17df3b0SSean Hefty return -ENOMEM; 3527f17df3b0SSean Hefty 3528f17df3b0SSean Hefty if (!id_priv->cma_dev) { 3529f17df3b0SSean Hefty ret = cma_resolve_ib_dev(id_priv); 3530f17df3b0SSean Hefty if (ret) 3531f17df3b0SSean Hefty goto err; 3532f17df3b0SSean Hefty } 3533f17df3b0SSean Hefty 3534f17df3b0SSean Hefty rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *) 3535f17df3b0SSean Hefty &(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr)); 3536f17df3b0SSean Hefty 3537081ea519SParav Pandit enqueue_resolve_addr_work(work, id_priv); 3538f17df3b0SSean Hefty return 0; 3539f17df3b0SSean Hefty err: 3540f17df3b0SSean Hefty kfree(work); 3541f17df3b0SSean Hefty return ret; 3542f17df3b0SSean Hefty } 3543f17df3b0SSean Hefty 3544e51060f0SSean Hefty static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, 35452df7dba8SParav Pandit const struct sockaddr *dst_addr) 3546e51060f0SSean Hefty { 354722e9f710SJason Gunthorpe struct sockaddr_storage zero_sock = {}; 354822e9f710SJason Gunthorpe 354922e9f710SJason Gunthorpe if (src_addr && src_addr->sa_family) 355022e9f710SJason Gunthorpe return rdma_bind_addr(id, src_addr); 355122e9f710SJason Gunthorpe 355222e9f710SJason Gunthorpe /* 355322e9f710SJason Gunthorpe * When the src_addr is not specified, automatically supply an any addr 355422e9f710SJason Gunthorpe */ 355522e9f710SJason Gunthorpe zero_sock.ss_family = dst_addr->sa_family; 355622e9f710SJason Gunthorpe if (IS_ENABLED(CONFIG_IPV6) && dst_addr->sa_family == AF_INET6) { 355722e9f710SJason Gunthorpe struct sockaddr_in6 *src_addr6 = 355822e9f710SJason Gunthorpe (struct sockaddr_in6 *)&zero_sock; 355922e9f710SJason Gunthorpe struct sockaddr_in6 *dst_addr6 = 356022e9f710SJason Gunthorpe (struct sockaddr_in6 *)dst_addr; 356122e9f710SJason Gunthorpe 35626c26a771SSpencer Baugh src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; 35636c26a771SSpencer Baugh if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL) 356422e9f710SJason Gunthorpe id->route.addr.dev_addr.bound_dev_if = 356522e9f710SJason Gunthorpe dst_addr6->sin6_scope_id; 3566f17df3b0SSean Hefty } else if (dst_addr->sa_family == AF_IB) { 356722e9f710SJason Gunthorpe ((struct sockaddr_ib *)&zero_sock)->sib_pkey = 3568f17df3b0SSean Hefty ((struct sockaddr_ib *)dst_addr)->sib_pkey; 3569d14714dfSSean Hefty } 357022e9f710SJason Gunthorpe return rdma_bind_addr(id, (struct sockaddr *)&zero_sock); 3571e51060f0SSean Hefty } 3572e51060f0SSean Hefty 3573732d41c5SJason Gunthorpe /* 3574732d41c5SJason Gunthorpe * If required, resolve the source address for bind and leave the id_priv in 3575732d41c5SJason Gunthorpe * state RDMA_CM_ADDR_BOUND. This oddly uses the state to determine the prior 3576732d41c5SJason Gunthorpe * calls made by ULP, a previously bound ID will not be re-bound and src_addr is 3577732d41c5SJason Gunthorpe * ignored. 3578732d41c5SJason Gunthorpe */ 3579732d41c5SJason Gunthorpe static int resolve_prepare_src(struct rdma_id_private *id_priv, 3580732d41c5SJason Gunthorpe struct sockaddr *src_addr, 3581732d41c5SJason Gunthorpe const struct sockaddr *dst_addr) 3582e51060f0SSean Hefty { 3583e51060f0SSean Hefty int ret; 3584e51060f0SSean Hefty 3585e4103312SParav Pandit memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr)); 3586732d41c5SJason Gunthorpe if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) { 3587732d41c5SJason Gunthorpe /* For a well behaved ULP state will be RDMA_CM_IDLE */ 3588732d41c5SJason Gunthorpe ret = cma_bind_addr(&id_priv->id, src_addr, dst_addr); 3589732d41c5SJason Gunthorpe if (ret) 3590732d41c5SJason Gunthorpe goto err_dst; 3591732d41c5SJason Gunthorpe if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, 3592732d41c5SJason Gunthorpe RDMA_CM_ADDR_QUERY))) { 3593732d41c5SJason Gunthorpe ret = -EINVAL; 3594732d41c5SJason Gunthorpe goto err_dst; 3595e51060f0SSean Hefty } 3596e4103312SParav Pandit } 3597e51060f0SSean Hefty 3598e4103312SParav Pandit if (cma_family(id_priv) != dst_addr->sa_family) { 3599732d41c5SJason Gunthorpe ret = -EINVAL; 3600732d41c5SJason Gunthorpe goto err_state; 3601732d41c5SJason Gunthorpe } 3602732d41c5SJason Gunthorpe return 0; 3603732d41c5SJason Gunthorpe 3604732d41c5SJason Gunthorpe err_state: 3605732d41c5SJason Gunthorpe cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND); 3606732d41c5SJason Gunthorpe err_dst: 3607e4103312SParav Pandit memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr)); 3608732d41c5SJason Gunthorpe return ret; 3609e4103312SParav Pandit } 36104ae7152eSSean Hefty 3611732d41c5SJason Gunthorpe int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, 3612732d41c5SJason Gunthorpe const struct sockaddr *dst_addr, unsigned long timeout_ms) 3613732d41c5SJason Gunthorpe { 3614732d41c5SJason Gunthorpe struct rdma_id_private *id_priv = 3615732d41c5SJason Gunthorpe container_of(id, struct rdma_id_private, id); 3616732d41c5SJason Gunthorpe int ret; 3617732d41c5SJason Gunthorpe 3618732d41c5SJason Gunthorpe ret = resolve_prepare_src(id_priv, src_addr, dst_addr); 3619732d41c5SJason Gunthorpe if (ret) 3620732d41c5SJason Gunthorpe return ret; 3621e51060f0SSean Hefty 3622f17df3b0SSean Hefty if (cma_any_addr(dst_addr)) { 3623e51060f0SSean Hefty ret = cma_resolve_loopback(id_priv); 3624f17df3b0SSean Hefty } else { 3625f17df3b0SSean Hefty if (dst_addr->sa_family == AF_IB) { 3626f17df3b0SSean Hefty ret = cma_resolve_ib_addr(id_priv); 3627f17df3b0SSean Hefty } else { 3628305d568bSJason Gunthorpe /* 3629305d568bSJason Gunthorpe * The FSM can return back to RDMA_CM_ADDR_BOUND after 3630305d568bSJason Gunthorpe * rdma_resolve_ip() is called, eg through the error 3631305d568bSJason Gunthorpe * path in addr_handler(). If this happens the existing 3632305d568bSJason Gunthorpe * request must be canceled before issuing a new one. 3633305d568bSJason Gunthorpe * Since canceling a request is a bit slow and this 3634305d568bSJason Gunthorpe * oddball path is rare, keep track once a request has 3635305d568bSJason Gunthorpe * been issued. The track turns out to be a permanent 3636305d568bSJason Gunthorpe * state since this is the only cancel as it is 3637305d568bSJason Gunthorpe * immediately before rdma_resolve_ip(). 3638305d568bSJason Gunthorpe */ 3639305d568bSJason Gunthorpe if (id_priv->used_resolve_ip) 3640305d568bSJason Gunthorpe rdma_addr_cancel(&id->route.addr.dev_addr); 3641305d568bSJason Gunthorpe else 3642305d568bSJason Gunthorpe id_priv->used_resolve_ip = 1; 36430e9d2c19SParav Pandit ret = rdma_resolve_ip(cma_src_addr(id_priv), dst_addr, 36440e9d2c19SParav Pandit &id->route.addr.dev_addr, 36450e9d2c19SParav Pandit timeout_ms, addr_handler, 36460e9d2c19SParav Pandit false, id_priv); 3647f17df3b0SSean Hefty } 3648f17df3b0SSean Hefty } 3649e51060f0SSean Hefty if (ret) 3650e51060f0SSean Hefty goto err; 3651e51060f0SSean Hefty 3652e51060f0SSean Hefty return 0; 3653e51060f0SSean Hefty err: 3654550e5ca7SNir Muchtar cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND); 3655e51060f0SSean Hefty return ret; 3656e51060f0SSean Hefty } 3657e51060f0SSean Hefty EXPORT_SYMBOL(rdma_resolve_addr); 3658e51060f0SSean Hefty 3659a9bb7912SHefty, Sean int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse) 3660a9bb7912SHefty, Sean { 3661a9bb7912SHefty, Sean struct rdma_id_private *id_priv; 3662a9bb7912SHefty, Sean unsigned long flags; 3663a9bb7912SHefty, Sean int ret; 3664a9bb7912SHefty, Sean 3665a9bb7912SHefty, Sean id_priv = container_of(id, struct rdma_id_private, id); 3666a9bb7912SHefty, Sean spin_lock_irqsave(&id_priv->lock, flags); 3667d490ee52SJason Gunthorpe if ((reuse && id_priv->state != RDMA_CM_LISTEN) || 3668d490ee52SJason Gunthorpe id_priv->state == RDMA_CM_IDLE) { 3669a9bb7912SHefty, Sean id_priv->reuseaddr = reuse; 3670a9bb7912SHefty, Sean ret = 0; 3671a9bb7912SHefty, Sean } else { 3672a9bb7912SHefty, Sean ret = -EINVAL; 3673a9bb7912SHefty, Sean } 3674a9bb7912SHefty, Sean spin_unlock_irqrestore(&id_priv->lock, flags); 3675a9bb7912SHefty, Sean return ret; 3676a9bb7912SHefty, Sean } 3677a9bb7912SHefty, Sean EXPORT_SYMBOL(rdma_set_reuseaddr); 3678a9bb7912SHefty, Sean 367968602120SSean Hefty int rdma_set_afonly(struct rdma_cm_id *id, int afonly) 368068602120SSean Hefty { 368168602120SSean Hefty struct rdma_id_private *id_priv; 368268602120SSean Hefty unsigned long flags; 368368602120SSean Hefty int ret; 368468602120SSean Hefty 368568602120SSean Hefty id_priv = container_of(id, struct rdma_id_private, id); 368668602120SSean Hefty spin_lock_irqsave(&id_priv->lock, flags); 368768602120SSean Hefty if (id_priv->state == RDMA_CM_IDLE || id_priv->state == RDMA_CM_ADDR_BOUND) { 368868602120SSean Hefty id_priv->options |= (1 << CMA_OPTION_AFONLY); 368968602120SSean Hefty id_priv->afonly = afonly; 369068602120SSean Hefty ret = 0; 369168602120SSean Hefty } else { 369268602120SSean Hefty ret = -EINVAL; 369368602120SSean Hefty } 369468602120SSean Hefty spin_unlock_irqrestore(&id_priv->lock, flags); 369568602120SSean Hefty return ret; 369668602120SSean Hefty } 369768602120SSean Hefty EXPORT_SYMBOL(rdma_set_afonly); 369868602120SSean Hefty 3699e51060f0SSean Hefty static void cma_bind_port(struct rdma_bind_list *bind_list, 3700e51060f0SSean Hefty struct rdma_id_private *id_priv) 3701e51060f0SSean Hefty { 370258afdcb7SSean Hefty struct sockaddr *addr; 370358afdcb7SSean Hefty struct sockaddr_ib *sib; 370458afdcb7SSean Hefty u64 sid, mask; 370558afdcb7SSean Hefty __be16 port; 3706e51060f0SSean Hefty 3707730c8912SMark Zhang lockdep_assert_held(&lock); 3708730c8912SMark Zhang 3709f4753834SSean Hefty addr = cma_src_addr(id_priv); 371058afdcb7SSean Hefty port = htons(bind_list->port); 371158afdcb7SSean Hefty 371258afdcb7SSean Hefty switch (addr->sa_family) { 371358afdcb7SSean Hefty case AF_INET: 371458afdcb7SSean Hefty ((struct sockaddr_in *) addr)->sin_port = port; 371558afdcb7SSean Hefty break; 371658afdcb7SSean Hefty case AF_INET6: 371758afdcb7SSean Hefty ((struct sockaddr_in6 *) addr)->sin6_port = port; 371858afdcb7SSean Hefty break; 371958afdcb7SSean Hefty case AF_IB: 372058afdcb7SSean Hefty sib = (struct sockaddr_ib *) addr; 372158afdcb7SSean Hefty sid = be64_to_cpu(sib->sib_sid); 372258afdcb7SSean Hefty mask = be64_to_cpu(sib->sib_sid_mask); 372358afdcb7SSean Hefty sib->sib_sid = cpu_to_be64((sid & mask) | (u64) ntohs(port)); 372458afdcb7SSean Hefty sib->sib_sid_mask = cpu_to_be64(~0ULL); 372558afdcb7SSean Hefty break; 372658afdcb7SSean Hefty } 3727e51060f0SSean Hefty id_priv->bind_list = bind_list; 3728e51060f0SSean Hefty hlist_add_head(&id_priv->node, &bind_list->owners); 3729e51060f0SSean Hefty } 3730e51060f0SSean Hefty 37312253fc0cSSteve Wise static int cma_alloc_port(enum rdma_ucm_port_space ps, 3732aac978e1SHaggai Eran struct rdma_id_private *id_priv, unsigned short snum) 3733e51060f0SSean Hefty { 3734e51060f0SSean Hefty struct rdma_bind_list *bind_list; 37353b069c5dSTejun Heo int ret; 3736e51060f0SSean Hefty 3737730c8912SMark Zhang lockdep_assert_held(&lock); 3738730c8912SMark Zhang 3739cb164b8cSSean Hefty bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL); 3740e51060f0SSean Hefty if (!bind_list) 3741e51060f0SSean Hefty return -ENOMEM; 3742e51060f0SSean Hefty 3743fa20105eSGuy Shapiro ret = cma_ps_alloc(id_priv->id.route.addr.dev_addr.net, ps, bind_list, 3744fa20105eSGuy Shapiro snum); 37453b069c5dSTejun Heo if (ret < 0) 37463b069c5dSTejun Heo goto err; 3747e51060f0SSean Hefty 3748e51060f0SSean Hefty bind_list->ps = ps; 3749061ccb52SLeon Romanovsky bind_list->port = snum; 3750e51060f0SSean Hefty cma_bind_port(bind_list, id_priv); 3751e51060f0SSean Hefty return 0; 37523b069c5dSTejun Heo err: 3753aedec080SSean Hefty kfree(bind_list); 37543b069c5dSTejun Heo return ret == -ENOSPC ? -EADDRNOTAVAIL : ret; 3755aedec080SSean Hefty } 3756aedec080SSean Hefty 375719b752a1SMoni Shoua static int cma_port_is_unique(struct rdma_bind_list *bind_list, 375819b752a1SMoni Shoua struct rdma_id_private *id_priv) 375919b752a1SMoni Shoua { 376019b752a1SMoni Shoua struct rdma_id_private *cur_id; 376119b752a1SMoni Shoua struct sockaddr *daddr = cma_dst_addr(id_priv); 376219b752a1SMoni Shoua struct sockaddr *saddr = cma_src_addr(id_priv); 376319b752a1SMoni Shoua __be16 dport = cma_port(daddr); 376419b752a1SMoni Shoua 3765730c8912SMark Zhang lockdep_assert_held(&lock); 3766730c8912SMark Zhang 376719b752a1SMoni Shoua hlist_for_each_entry(cur_id, &bind_list->owners, node) { 376819b752a1SMoni Shoua struct sockaddr *cur_daddr = cma_dst_addr(cur_id); 376919b752a1SMoni Shoua struct sockaddr *cur_saddr = cma_src_addr(cur_id); 377019b752a1SMoni Shoua __be16 cur_dport = cma_port(cur_daddr); 377119b752a1SMoni Shoua 377219b752a1SMoni Shoua if (id_priv == cur_id) 377319b752a1SMoni Shoua continue; 377419b752a1SMoni Shoua 377519b752a1SMoni Shoua /* different dest port -> unique */ 37769dea9a2fSTatyana Nikolova if (!cma_any_port(daddr) && 37779dea9a2fSTatyana Nikolova !cma_any_port(cur_daddr) && 377819b752a1SMoni Shoua (dport != cur_dport)) 377919b752a1SMoni Shoua continue; 378019b752a1SMoni Shoua 378119b752a1SMoni Shoua /* different src address -> unique */ 378219b752a1SMoni Shoua if (!cma_any_addr(saddr) && 378319b752a1SMoni Shoua !cma_any_addr(cur_saddr) && 378419b752a1SMoni Shoua cma_addr_cmp(saddr, cur_saddr)) 378519b752a1SMoni Shoua continue; 378619b752a1SMoni Shoua 378719b752a1SMoni Shoua /* different dst address -> unique */ 37889dea9a2fSTatyana Nikolova if (!cma_any_addr(daddr) && 37899dea9a2fSTatyana Nikolova !cma_any_addr(cur_daddr) && 379019b752a1SMoni Shoua cma_addr_cmp(daddr, cur_daddr)) 379119b752a1SMoni Shoua continue; 379219b752a1SMoni Shoua 379319b752a1SMoni Shoua return -EADDRNOTAVAIL; 379419b752a1SMoni Shoua } 379519b752a1SMoni Shoua return 0; 379619b752a1SMoni Shoua } 379719b752a1SMoni Shoua 37982253fc0cSSteve Wise static int cma_alloc_any_port(enum rdma_ucm_port_space ps, 3799aac978e1SHaggai Eran struct rdma_id_private *id_priv) 3800aedec080SSean Hefty { 38015d7220e8STetsuo Handa static unsigned int last_used_port; 38025d7220e8STetsuo Handa int low, high, remaining; 38035d7220e8STetsuo Handa unsigned int rover; 3804fa20105eSGuy Shapiro struct net *net = id_priv->id.route.addr.dev_addr.net; 3805aedec080SSean Hefty 3806730c8912SMark Zhang lockdep_assert_held(&lock); 3807730c8912SMark Zhang 3808fa20105eSGuy Shapiro inet_get_local_port_range(net, &low, &high); 38095d7220e8STetsuo Handa remaining = (high - low) + 1; 3810*e8a533cbSJason A. Donenfeld rover = get_random_u32_inclusive(low, remaining + low - 1); 38115d7220e8STetsuo Handa retry: 381219b752a1SMoni Shoua if (last_used_port != rover) { 381319b752a1SMoni Shoua struct rdma_bind_list *bind_list; 381419b752a1SMoni Shoua int ret; 381519b752a1SMoni Shoua 381619b752a1SMoni Shoua bind_list = cma_ps_find(net, ps, (unsigned short)rover); 381719b752a1SMoni Shoua 381819b752a1SMoni Shoua if (!bind_list) { 381919b752a1SMoni Shoua ret = cma_alloc_port(ps, id_priv, rover); 382019b752a1SMoni Shoua } else { 382119b752a1SMoni Shoua ret = cma_port_is_unique(bind_list, id_priv); 382219b752a1SMoni Shoua if (!ret) 382319b752a1SMoni Shoua cma_bind_port(bind_list, id_priv); 382419b752a1SMoni Shoua } 38255d7220e8STetsuo Handa /* 38265d7220e8STetsuo Handa * Remember previously used port number in order to avoid 38275d7220e8STetsuo Handa * re-using same port immediately after it is closed. 38285d7220e8STetsuo Handa */ 38295d7220e8STetsuo Handa if (!ret) 38305d7220e8STetsuo Handa last_used_port = rover; 38315d7220e8STetsuo Handa if (ret != -EADDRNOTAVAIL) 38325d7220e8STetsuo Handa return ret; 38335d7220e8STetsuo Handa } 38345d7220e8STetsuo Handa if (--remaining) { 38355d7220e8STetsuo Handa rover++; 38365d7220e8STetsuo Handa if ((rover < low) || (rover > high)) 38375d7220e8STetsuo Handa rover = low; 3838aedec080SSean Hefty goto retry; 3839aedec080SSean Hefty } 38405d7220e8STetsuo Handa return -EADDRNOTAVAIL; 3841e51060f0SSean Hefty } 3842e51060f0SSean Hefty 3843a9bb7912SHefty, Sean /* 3844a9bb7912SHefty, Sean * Check that the requested port is available. This is called when trying to 3845a9bb7912SHefty, Sean * bind to a specific port, or when trying to listen on a bound port. In 3846a9bb7912SHefty, Sean * the latter case, the provided id_priv may already be on the bind_list, but 3847a9bb7912SHefty, Sean * we still need to check that it's okay to start listening. 3848a9bb7912SHefty, Sean */ 3849a9bb7912SHefty, Sean static int cma_check_port(struct rdma_bind_list *bind_list, 3850a9bb7912SHefty, Sean struct rdma_id_private *id_priv, uint8_t reuseaddr) 3851e51060f0SSean Hefty { 3852e51060f0SSean Hefty struct rdma_id_private *cur_id; 385343b752daSHefty, Sean struct sockaddr *addr, *cur_addr; 3854e51060f0SSean Hefty 3855730c8912SMark Zhang lockdep_assert_held(&lock); 3856730c8912SMark Zhang 3857f4753834SSean Hefty addr = cma_src_addr(id_priv); 3858b67bfe0dSSasha Levin hlist_for_each_entry(cur_id, &bind_list->owners, node) { 3859a9bb7912SHefty, Sean if (id_priv == cur_id) 3860a9bb7912SHefty, Sean continue; 3861a9bb7912SHefty, Sean 3862d490ee52SJason Gunthorpe if (reuseaddr && cur_id->reuseaddr) 38635b0ec991SSean Hefty continue; 38645b0ec991SSean Hefty 3865f4753834SSean Hefty cur_addr = cma_src_addr(cur_id); 38665b0ec991SSean Hefty if (id_priv->afonly && cur_id->afonly && 38675b0ec991SSean Hefty (addr->sa_family != cur_addr->sa_family)) 38685b0ec991SSean Hefty continue; 38695b0ec991SSean Hefty 38705b0ec991SSean Hefty if (cma_any_addr(addr) || cma_any_addr(cur_addr)) 3871e51060f0SSean Hefty return -EADDRNOTAVAIL; 3872e51060f0SSean Hefty 387343b752daSHefty, Sean if (!cma_addr_cmp(addr, cur_addr)) 3874e51060f0SSean Hefty return -EADDRINUSE; 3875e51060f0SSean Hefty } 3876e51060f0SSean Hefty return 0; 3877e51060f0SSean Hefty } 3878e51060f0SSean Hefty 38792253fc0cSSteve Wise static int cma_use_port(enum rdma_ucm_port_space ps, 3880aac978e1SHaggai Eran struct rdma_id_private *id_priv) 3881a9bb7912SHefty, Sean { 3882a9bb7912SHefty, Sean struct rdma_bind_list *bind_list; 3883a9bb7912SHefty, Sean unsigned short snum; 3884a9bb7912SHefty, Sean int ret; 3885a9bb7912SHefty, Sean 3886730c8912SMark Zhang lockdep_assert_held(&lock); 3887730c8912SMark Zhang 3888f4753834SSean Hefty snum = ntohs(cma_port(cma_src_addr(id_priv))); 3889a9bb7912SHefty, Sean if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) 3890a9bb7912SHefty, Sean return -EACCES; 3891a9bb7912SHefty, Sean 3892fa20105eSGuy Shapiro bind_list = cma_ps_find(id_priv->id.route.addr.dev_addr.net, ps, snum); 3893a9bb7912SHefty, Sean if (!bind_list) { 3894a9bb7912SHefty, Sean ret = cma_alloc_port(ps, id_priv, snum); 3895a9bb7912SHefty, Sean } else { 3896a9bb7912SHefty, Sean ret = cma_check_port(bind_list, id_priv, id_priv->reuseaddr); 3897a9bb7912SHefty, Sean if (!ret) 3898a9bb7912SHefty, Sean cma_bind_port(bind_list, id_priv); 3899a9bb7912SHefty, Sean } 3900a9bb7912SHefty, Sean return ret; 3901a9bb7912SHefty, Sean } 3902a9bb7912SHefty, Sean 39032253fc0cSSteve Wise static enum rdma_ucm_port_space 39042253fc0cSSteve Wise cma_select_inet_ps(struct rdma_id_private *id_priv) 390558afdcb7SSean Hefty { 390658afdcb7SSean Hefty switch (id_priv->id.ps) { 390758afdcb7SSean Hefty case RDMA_PS_TCP: 390858afdcb7SSean Hefty case RDMA_PS_UDP: 390958afdcb7SSean Hefty case RDMA_PS_IPOIB: 391058afdcb7SSean Hefty case RDMA_PS_IB: 3911aac978e1SHaggai Eran return id_priv->id.ps; 391258afdcb7SSean Hefty default: 3913aac978e1SHaggai Eran 3914aac978e1SHaggai Eran return 0; 391558afdcb7SSean Hefty } 391658afdcb7SSean Hefty } 391758afdcb7SSean Hefty 39182253fc0cSSteve Wise static enum rdma_ucm_port_space 39192253fc0cSSteve Wise cma_select_ib_ps(struct rdma_id_private *id_priv) 392058afdcb7SSean Hefty { 39212253fc0cSSteve Wise enum rdma_ucm_port_space ps = 0; 392258afdcb7SSean Hefty struct sockaddr_ib *sib; 392358afdcb7SSean Hefty u64 sid_ps, mask, sid; 392458afdcb7SSean Hefty 3925f4753834SSean Hefty sib = (struct sockaddr_ib *) cma_src_addr(id_priv); 392658afdcb7SSean Hefty mask = be64_to_cpu(sib->sib_sid_mask) & RDMA_IB_IP_PS_MASK; 392758afdcb7SSean Hefty sid = be64_to_cpu(sib->sib_sid) & mask; 392858afdcb7SSean Hefty 392958afdcb7SSean Hefty if ((id_priv->id.ps == RDMA_PS_IB) && (sid == (RDMA_IB_IP_PS_IB & mask))) { 393058afdcb7SSean Hefty sid_ps = RDMA_IB_IP_PS_IB; 3931aac978e1SHaggai Eran ps = RDMA_PS_IB; 393258afdcb7SSean Hefty } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_TCP)) && 393358afdcb7SSean Hefty (sid == (RDMA_IB_IP_PS_TCP & mask))) { 393458afdcb7SSean Hefty sid_ps = RDMA_IB_IP_PS_TCP; 3935aac978e1SHaggai Eran ps = RDMA_PS_TCP; 393658afdcb7SSean Hefty } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_UDP)) && 393758afdcb7SSean Hefty (sid == (RDMA_IB_IP_PS_UDP & mask))) { 393858afdcb7SSean Hefty sid_ps = RDMA_IB_IP_PS_UDP; 3939aac978e1SHaggai Eran ps = RDMA_PS_UDP; 394058afdcb7SSean Hefty } 394158afdcb7SSean Hefty 394258afdcb7SSean Hefty if (ps) { 394358afdcb7SSean Hefty sib->sib_sid = cpu_to_be64(sid_ps | ntohs(cma_port((struct sockaddr *) sib))); 394458afdcb7SSean Hefty sib->sib_sid_mask = cpu_to_be64(RDMA_IB_IP_PS_MASK | 394558afdcb7SSean Hefty be64_to_cpu(sib->sib_sid_mask)); 394658afdcb7SSean Hefty } 394758afdcb7SSean Hefty return ps; 394858afdcb7SSean Hefty } 394958afdcb7SSean Hefty 3950e51060f0SSean Hefty static int cma_get_port(struct rdma_id_private *id_priv) 3951e51060f0SSean Hefty { 39522253fc0cSSteve Wise enum rdma_ucm_port_space ps; 3953e51060f0SSean Hefty int ret; 3954e51060f0SSean Hefty 3955f4753834SSean Hefty if (cma_family(id_priv) != AF_IB) 395658afdcb7SSean Hefty ps = cma_select_inet_ps(id_priv); 395758afdcb7SSean Hefty else 395858afdcb7SSean Hefty ps = cma_select_ib_ps(id_priv); 395958afdcb7SSean Hefty if (!ps) 3960e51060f0SSean Hefty return -EPROTONOSUPPORT; 3961e51060f0SSean Hefty 3962e51060f0SSean Hefty mutex_lock(&lock); 3963f4753834SSean Hefty if (cma_any_port(cma_src_addr(id_priv))) 3964aedec080SSean Hefty ret = cma_alloc_any_port(ps, id_priv); 3965e51060f0SSean Hefty else 3966e51060f0SSean Hefty ret = cma_use_port(ps, id_priv); 3967e51060f0SSean Hefty mutex_unlock(&lock); 3968e51060f0SSean Hefty 3969e51060f0SSean Hefty return ret; 3970e51060f0SSean Hefty } 3971e51060f0SSean Hefty 3972d14714dfSSean Hefty static int cma_check_linklocal(struct rdma_dev_addr *dev_addr, 3973d14714dfSSean Hefty struct sockaddr *addr) 3974d14714dfSSean Hefty { 3975d90f9b35SRoland Dreier #if IS_ENABLED(CONFIG_IPV6) 3976d14714dfSSean Hefty struct sockaddr_in6 *sin6; 3977d14714dfSSean Hefty 3978d14714dfSSean Hefty if (addr->sa_family != AF_INET6) 3979d14714dfSSean Hefty return 0; 3980d14714dfSSean Hefty 3981d14714dfSSean Hefty sin6 = (struct sockaddr_in6 *) addr; 39825462edddSSomnath Kotur 39835462edddSSomnath Kotur if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)) 39845462edddSSomnath Kotur return 0; 39855462edddSSomnath Kotur 39865462edddSSomnath Kotur if (!sin6->sin6_scope_id) 3987d14714dfSSean Hefty return -EINVAL; 3988d14714dfSSean Hefty 3989d14714dfSSean Hefty dev_addr->bound_dev_if = sin6->sin6_scope_id; 3990d14714dfSSean Hefty #endif 3991d14714dfSSean Hefty return 0; 3992d14714dfSSean Hefty } 3993d14714dfSSean Hefty 3994a9bb7912SHefty, Sean int rdma_listen(struct rdma_cm_id *id, int backlog) 3995a9bb7912SHefty, Sean { 3996732d41c5SJason Gunthorpe struct rdma_id_private *id_priv = 3997732d41c5SJason Gunthorpe container_of(id, struct rdma_id_private, id); 3998a9bb7912SHefty, Sean int ret; 3999a9bb7912SHefty, Sean 4000732d41c5SJason Gunthorpe if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) { 4001bc0bdc5aSJason Gunthorpe struct sockaddr_in any_in = { 4002bc0bdc5aSJason Gunthorpe .sin_family = AF_INET, 4003bc0bdc5aSJason Gunthorpe .sin_addr.s_addr = htonl(INADDR_ANY), 4004bc0bdc5aSJason Gunthorpe }; 4005bc0bdc5aSJason Gunthorpe 4006732d41c5SJason Gunthorpe /* For a well behaved ULP state will be RDMA_CM_IDLE */ 4007bc0bdc5aSJason Gunthorpe ret = rdma_bind_addr(id, (struct sockaddr *)&any_in); 4008a9bb7912SHefty, Sean if (ret) 4009a9bb7912SHefty, Sean return ret; 4010732d41c5SJason Gunthorpe if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, 4011732d41c5SJason Gunthorpe RDMA_CM_LISTEN))) 4012a9bb7912SHefty, Sean return -EINVAL; 4013a9bb7912SHefty, Sean } 4014a9bb7912SHefty, Sean 4015d490ee52SJason Gunthorpe /* 4016d490ee52SJason Gunthorpe * Once the ID reaches RDMA_CM_LISTEN it is not allowed to be reusable 4017d490ee52SJason Gunthorpe * any more, and has to be unique in the bind list. 4018d490ee52SJason Gunthorpe */ 4019a9bb7912SHefty, Sean if (id_priv->reuseaddr) { 4020d490ee52SJason Gunthorpe mutex_lock(&lock); 4021d490ee52SJason Gunthorpe ret = cma_check_port(id_priv->bind_list, id_priv, 0); 4022d490ee52SJason Gunthorpe if (!ret) 4023d490ee52SJason Gunthorpe id_priv->reuseaddr = 0; 4024d490ee52SJason Gunthorpe mutex_unlock(&lock); 4025a9bb7912SHefty, Sean if (ret) 4026a9bb7912SHefty, Sean goto err; 4027a9bb7912SHefty, Sean } 4028a9bb7912SHefty, Sean 4029a9bb7912SHefty, Sean id_priv->backlog = backlog; 4030889d916bSShay Drory if (id_priv->cma_dev) { 403172219ceaSMichael Wang if (rdma_cap_ib_cm(id->device, 1)) { 4032a9bb7912SHefty, Sean ret = cma_ib_listen(id_priv); 4033a9bb7912SHefty, Sean if (ret) 4034a9bb7912SHefty, Sean goto err; 403504215330SMichael Wang } else if (rdma_cap_iw_cm(id->device, 1)) { 4036a9bb7912SHefty, Sean ret = cma_iw_listen(id_priv, backlog); 4037a9bb7912SHefty, Sean if (ret) 4038a9bb7912SHefty, Sean goto err; 403921655afcSMichael Wang } else { 4040a9bb7912SHefty, Sean ret = -ENOSYS; 4041a9bb7912SHefty, Sean goto err; 4042a9bb7912SHefty, Sean } 4043c80a0c52SLeon Romanovsky } else { 4044c80a0c52SLeon Romanovsky ret = cma_listen_on_all(id_priv); 4045c80a0c52SLeon Romanovsky if (ret) 4046c80a0c52SLeon Romanovsky goto err; 4047c80a0c52SLeon Romanovsky } 4048a9bb7912SHefty, Sean 4049a9bb7912SHefty, Sean return 0; 4050a9bb7912SHefty, Sean err: 4051a9bb7912SHefty, Sean id_priv->backlog = 0; 4052d490ee52SJason Gunthorpe /* 4053d490ee52SJason Gunthorpe * All the failure paths that lead here will not allow the req_handler's 4054d490ee52SJason Gunthorpe * to have run. 4055d490ee52SJason Gunthorpe */ 4056550e5ca7SNir Muchtar cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND); 4057a9bb7912SHefty, Sean return ret; 4058a9bb7912SHefty, Sean } 4059a9bb7912SHefty, Sean EXPORT_SYMBOL(rdma_listen); 4060a9bb7912SHefty, Sean 4061e51060f0SSean Hefty int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) 4062e51060f0SSean Hefty { 4063e51060f0SSean Hefty struct rdma_id_private *id_priv; 4064e51060f0SSean Hefty int ret; 40656df6b4a9SMoni Shoua struct sockaddr *daddr; 4066e51060f0SSean Hefty 4067680f920aSSean Hefty if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6 && 4068680f920aSSean Hefty addr->sa_family != AF_IB) 4069e51060f0SSean Hefty return -EAFNOSUPPORT; 4070e51060f0SSean Hefty 4071e51060f0SSean Hefty id_priv = container_of(id, struct rdma_id_private, id); 4072550e5ca7SNir Muchtar if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND)) 4073e51060f0SSean Hefty return -EINVAL; 4074e51060f0SSean Hefty 4075d14714dfSSean Hefty ret = cma_check_linklocal(&id->route.addr.dev_addr, addr); 4076d14714dfSSean Hefty if (ret) 4077d14714dfSSean Hefty goto err1; 4078d14714dfSSean Hefty 40797b85627bSMoni Shoua memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr)); 40808523c048SSean Hefty if (!cma_any_addr(addr)) { 4081680f920aSSean Hefty ret = cma_translate_addr(addr, &id->route.addr.dev_addr); 4082255d0c14SKrishna Kumar if (ret) 4083255d0c14SKrishna Kumar goto err1; 4084255d0c14SKrishna Kumar 4085ff11c6cdSParav Pandit ret = cma_acquire_dev_by_src_ip(id_priv); 4086e51060f0SSean Hefty if (ret) 4087255d0c14SKrishna Kumar goto err1; 4088e51060f0SSean Hefty } 4089e51060f0SSean Hefty 409068602120SSean Hefty if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) { 40915b0ec991SSean Hefty if (addr->sa_family == AF_INET) 40925b0ec991SSean Hefty id_priv->afonly = 1; 40935b0ec991SSean Hefty #if IS_ENABLED(CONFIG_IPV6) 4094fa20105eSGuy Shapiro else if (addr->sa_family == AF_INET6) { 4095fa20105eSGuy Shapiro struct net *net = id_priv->id.route.addr.dev_addr.net; 4096fa20105eSGuy Shapiro 4097fa20105eSGuy Shapiro id_priv->afonly = net->ipv6.sysctl.bindv6only; 4098fa20105eSGuy Shapiro } 40995b0ec991SSean Hefty #endif 410068602120SSean Hefty } 41019dea9a2fSTatyana Nikolova daddr = cma_dst_addr(id_priv); 41029dea9a2fSTatyana Nikolova daddr->sa_family = addr->sa_family; 41039dea9a2fSTatyana Nikolova 4104e51060f0SSean Hefty ret = cma_get_port(id_priv); 4105e51060f0SSean Hefty if (ret) 4106255d0c14SKrishna Kumar goto err2; 4107e51060f0SSean Hefty 4108cb5cd0eaSShay Drory if (!cma_any_addr(addr)) 4109cb5cd0eaSShay Drory rdma_restrack_add(&id_priv->res); 4110e51060f0SSean Hefty return 0; 4111255d0c14SKrishna Kumar err2: 4112ed7a01fdSLeon Romanovsky if (id_priv->cma_dev) 4113a396d43aSSean Hefty cma_release_dev(id_priv); 4114255d0c14SKrishna Kumar err1: 4115550e5ca7SNir Muchtar cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE); 4116e51060f0SSean Hefty return ret; 4117e51060f0SSean Hefty } 4118e51060f0SSean Hefty EXPORT_SYMBOL(rdma_bind_addr); 4119e51060f0SSean Hefty 4120f4753834SSean Hefty static int cma_format_hdr(void *hdr, struct rdma_id_private *id_priv) 4121e51060f0SSean Hefty { 4122e51060f0SSean Hefty struct cma_hdr *cma_hdr; 4123e51060f0SSean Hefty 412401602f11SSean Hefty cma_hdr = hdr; 412501602f11SSean Hefty cma_hdr->cma_version = CMA_VERSION; 4126f4753834SSean Hefty if (cma_family(id_priv) == AF_INET) { 41271f5175adSAleksey Senin struct sockaddr_in *src4, *dst4; 41281f5175adSAleksey Senin 4129f4753834SSean Hefty src4 = (struct sockaddr_in *) cma_src_addr(id_priv); 4130f4753834SSean Hefty dst4 = (struct sockaddr_in *) cma_dst_addr(id_priv); 4131e51060f0SSean Hefty 4132e51060f0SSean Hefty cma_set_ip_ver(cma_hdr, 4); 4133e51060f0SSean Hefty cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr; 4134e51060f0SSean Hefty cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr; 4135e51060f0SSean Hefty cma_hdr->port = src4->sin_port; 4136e8160e15SSean Hefty } else if (cma_family(id_priv) == AF_INET6) { 41371f5175adSAleksey Senin struct sockaddr_in6 *src6, *dst6; 41381f5175adSAleksey Senin 4139f4753834SSean Hefty src6 = (struct sockaddr_in6 *) cma_src_addr(id_priv); 4140f4753834SSean Hefty dst6 = (struct sockaddr_in6 *) cma_dst_addr(id_priv); 41411f5175adSAleksey Senin 41421f5175adSAleksey Senin cma_set_ip_ver(cma_hdr, 6); 41431f5175adSAleksey Senin cma_hdr->src_addr.ip6 = src6->sin6_addr; 41441f5175adSAleksey Senin cma_hdr->dst_addr.ip6 = dst6->sin6_addr; 41451f5175adSAleksey Senin cma_hdr->port = src6->sin6_port; 41461f5175adSAleksey Senin } 4147e51060f0SSean Hefty return 0; 4148e51060f0SSean Hefty } 4149e51060f0SSean Hefty 4150628e5f6dSSean Hefty static int cma_sidr_rep_handler(struct ib_cm_id *cm_id, 4151e7ff98aeSParav Pandit const struct ib_cm_event *ib_event) 4152628e5f6dSSean Hefty { 4153628e5f6dSSean Hefty struct rdma_id_private *id_priv = cm_id->context; 41547582df82SParav Pandit struct rdma_cm_event event = {}; 4155e7ff98aeSParav Pandit const struct ib_cm_sidr_rep_event_param *rep = 4156e7ff98aeSParav Pandit &ib_event->param.sidr_rep_rcvd; 4157f6a9d47aSJason Gunthorpe int ret; 4158628e5f6dSSean Hefty 415937e07cdaSBart Van Assche mutex_lock(&id_priv->handler_mutex); 41602a7cec53SJason Gunthorpe if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT) 416137e07cdaSBart Van Assche goto out; 4162628e5f6dSSean Hefty 4163628e5f6dSSean Hefty switch (ib_event->event) { 4164628e5f6dSSean Hefty case IB_CM_SIDR_REQ_ERROR: 4165628e5f6dSSean Hefty event.event = RDMA_CM_EVENT_UNREACHABLE; 4166628e5f6dSSean Hefty event.status = -ETIMEDOUT; 4167628e5f6dSSean Hefty break; 4168628e5f6dSSean Hefty case IB_CM_SIDR_REP_RECEIVED: 4169628e5f6dSSean Hefty event.param.ud.private_data = ib_event->private_data; 4170628e5f6dSSean Hefty event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE; 4171628e5f6dSSean Hefty if (rep->status != IB_SIDR_SUCCESS) { 4172628e5f6dSSean Hefty event.event = RDMA_CM_EVENT_UNREACHABLE; 4173628e5f6dSSean Hefty event.status = ib_event->param.sidr_rep_rcvd.status; 4174498683c6SMoni Shoua pr_debug_ratelimited("RDMA CM: UNREACHABLE: bad SIDR reply. status %d\n", 4175498683c6SMoni Shoua event.status); 4176628e5f6dSSean Hefty break; 4177628e5f6dSSean Hefty } 41785c438135SSean Hefty ret = cma_set_qkey(id_priv, rep->qkey); 4179d2ca39f2SYossi Etigin if (ret) { 4180498683c6SMoni Shoua pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to set qkey. status %d\n", ret); 4181d2ca39f2SYossi Etigin event.event = RDMA_CM_EVENT_ADDR_ERROR; 41825c438135SSean Hefty event.status = ret; 4183628e5f6dSSean Hefty break; 4184628e5f6dSSean Hefty } 41854ad6a024SParav Pandit ib_init_ah_attr_from_path(id_priv->id.device, 41864ad6a024SParav Pandit id_priv->id.port_num, 4187628e5f6dSSean Hefty id_priv->id.route.path_rec, 418839839107SParav Pandit &event.param.ud.ah_attr, 418939839107SParav Pandit rep->sgid_attr); 4190628e5f6dSSean Hefty event.param.ud.qp_num = rep->qpn; 4191628e5f6dSSean Hefty event.param.ud.qkey = rep->qkey; 4192628e5f6dSSean Hefty event.event = RDMA_CM_EVENT_ESTABLISHED; 4193628e5f6dSSean Hefty event.status = 0; 4194628e5f6dSSean Hefty break; 4195628e5f6dSSean Hefty default: 4196aba25a3eSParav Pandit pr_err("RDMA CMA: unexpected IB CM event: %d\n", 4197628e5f6dSSean Hefty ib_event->event); 4198628e5f6dSSean Hefty goto out; 4199628e5f6dSSean Hefty } 4200628e5f6dSSean Hefty 4201ed999f82SChuck Lever ret = cma_cm_event_handler(id_priv, &event); 4202aa74f487SParav Pandit 4203aa74f487SParav Pandit rdma_destroy_ah_attr(&event.param.ud.ah_attr); 4204628e5f6dSSean Hefty if (ret) { 4205628e5f6dSSean Hefty /* Destroy the CM ID by returning a non-zero value. */ 4206628e5f6dSSean Hefty id_priv->cm_id.ib = NULL; 4207f6a9d47aSJason Gunthorpe destroy_id_handler_unlock(id_priv); 4208628e5f6dSSean Hefty return ret; 4209628e5f6dSSean Hefty } 4210628e5f6dSSean Hefty out: 4211de910bd9SOr Gerlitz mutex_unlock(&id_priv->handler_mutex); 4212f6a9d47aSJason Gunthorpe return 0; 4213628e5f6dSSean Hefty } 4214628e5f6dSSean Hefty 4215628e5f6dSSean Hefty static int cma_resolve_ib_udp(struct rdma_id_private *id_priv, 4216628e5f6dSSean Hefty struct rdma_conn_param *conn_param) 4217628e5f6dSSean Hefty { 4218628e5f6dSSean Hefty struct ib_cm_sidr_req_param req; 42190c9361fcSJack Morgenstein struct ib_cm_id *id; 4220e511d1aeSSean Hefty void *private_data; 4221c0b64f58SBart Van Assche u8 offset; 4222c0b64f58SBart Van Assche int ret; 4223628e5f6dSSean Hefty 4224e511d1aeSSean Hefty memset(&req, 0, sizeof req); 4225e8160e15SSean Hefty offset = cma_user_data_offset(id_priv); 42268d0d2b0fSHåkon Bugge if (check_add_overflow(offset, conn_param->private_data_len, &req.private_data_len)) 422704ded167SSean Hefty return -EINVAL; 422804ded167SSean Hefty 4229e8160e15SSean Hefty if (req.private_data_len) { 4230e511d1aeSSean Hefty private_data = kzalloc(req.private_data_len, GFP_ATOMIC); 4231e511d1aeSSean Hefty if (!private_data) 4232628e5f6dSSean Hefty return -ENOMEM; 4233e8160e15SSean Hefty } else { 4234e511d1aeSSean Hefty private_data = NULL; 4235e8160e15SSean Hefty } 4236628e5f6dSSean Hefty 4237628e5f6dSSean Hefty if (conn_param->private_data && conn_param->private_data_len) 4238e511d1aeSSean Hefty memcpy(private_data + offset, conn_param->private_data, 4239e511d1aeSSean Hefty conn_param->private_data_len); 4240628e5f6dSSean Hefty 4241e511d1aeSSean Hefty if (private_data) { 4242e511d1aeSSean Hefty ret = cma_format_hdr(private_data, id_priv); 4243628e5f6dSSean Hefty if (ret) 4244628e5f6dSSean Hefty goto out; 4245e511d1aeSSean Hefty req.private_data = private_data; 4246e8160e15SSean Hefty } 4247628e5f6dSSean Hefty 42480c9361fcSJack Morgenstein id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler, 42490c9361fcSJack Morgenstein id_priv); 42500c9361fcSJack Morgenstein if (IS_ERR(id)) { 42510c9361fcSJack Morgenstein ret = PTR_ERR(id); 4252628e5f6dSSean Hefty goto out; 4253628e5f6dSSean Hefty } 42540c9361fcSJack Morgenstein id_priv->cm_id.ib = id; 4255628e5f6dSSean Hefty 4256f4753834SSean Hefty req.path = id_priv->id.route.path_rec; 4257815d456eSParav Pandit req.sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr; 4258cf53936fSSean Hefty req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); 4259628e5f6dSSean Hefty req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8); 4260628e5f6dSSean Hefty req.max_cm_retries = CMA_MAX_CM_RETRIES; 4261628e5f6dSSean Hefty 4262ed999f82SChuck Lever trace_cm_send_sidr_req(id_priv); 4263628e5f6dSSean Hefty ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req); 4264628e5f6dSSean Hefty if (ret) { 4265628e5f6dSSean Hefty ib_destroy_cm_id(id_priv->cm_id.ib); 4266628e5f6dSSean Hefty id_priv->cm_id.ib = NULL; 4267628e5f6dSSean Hefty } 4268628e5f6dSSean Hefty out: 4269e511d1aeSSean Hefty kfree(private_data); 4270628e5f6dSSean Hefty return ret; 4271628e5f6dSSean Hefty } 4272628e5f6dSSean Hefty 4273e51060f0SSean Hefty static int cma_connect_ib(struct rdma_id_private *id_priv, 4274e51060f0SSean Hefty struct rdma_conn_param *conn_param) 4275e51060f0SSean Hefty { 4276e51060f0SSean Hefty struct ib_cm_req_param req; 4277e51060f0SSean Hefty struct rdma_route *route; 4278e51060f0SSean Hefty void *private_data; 42790c9361fcSJack Morgenstein struct ib_cm_id *id; 4280c0b64f58SBart Van Assche u8 offset; 4281c0b64f58SBart Van Assche int ret; 4282e51060f0SSean Hefty 4283e51060f0SSean Hefty memset(&req, 0, sizeof req); 4284e8160e15SSean Hefty offset = cma_user_data_offset(id_priv); 42858d0d2b0fSHåkon Bugge if (check_add_overflow(offset, conn_param->private_data_len, &req.private_data_len)) 428604ded167SSean Hefty return -EINVAL; 428704ded167SSean Hefty 4288e8160e15SSean Hefty if (req.private_data_len) { 4289e51060f0SSean Hefty private_data = kzalloc(req.private_data_len, GFP_ATOMIC); 4290e51060f0SSean Hefty if (!private_data) 4291e51060f0SSean Hefty return -ENOMEM; 4292e8160e15SSean Hefty } else { 4293e8160e15SSean Hefty private_data = NULL; 4294e8160e15SSean Hefty } 4295e51060f0SSean Hefty 4296e51060f0SSean Hefty if (conn_param->private_data && conn_param->private_data_len) 4297e51060f0SSean Hefty memcpy(private_data + offset, conn_param->private_data, 4298e51060f0SSean Hefty conn_param->private_data_len); 4299e51060f0SSean Hefty 43000c9361fcSJack Morgenstein id = ib_create_cm_id(id_priv->id.device, cma_ib_handler, id_priv); 43010c9361fcSJack Morgenstein if (IS_ERR(id)) { 43020c9361fcSJack Morgenstein ret = PTR_ERR(id); 4303e51060f0SSean Hefty goto out; 4304e51060f0SSean Hefty } 43050c9361fcSJack Morgenstein id_priv->cm_id.ib = id; 4306e51060f0SSean Hefty 4307e51060f0SSean Hefty route = &id_priv->id.route; 4308e8160e15SSean Hefty if (private_data) { 4309f4753834SSean Hefty ret = cma_format_hdr(private_data, id_priv); 4310e51060f0SSean Hefty if (ret) 4311e51060f0SSean Hefty goto out; 4312e51060f0SSean Hefty req.private_data = private_data; 4313e8160e15SSean Hefty } 4314e51060f0SSean Hefty 4315e51060f0SSean Hefty req.primary_path = &route->path_rec[0]; 4316eb8336dbSMark Zhang req.primary_path_inbound = route->path_rec_inbound; 4317eb8336dbSMark Zhang req.primary_path_outbound = route->path_rec_outbound; 4318bf9a9928SMark Zhang if (route->num_pri_alt_paths == 2) 4319e51060f0SSean Hefty req.alternate_path = &route->path_rec[1]; 4320e51060f0SSean Hefty 4321815d456eSParav Pandit req.ppath_sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr; 4322815d456eSParav Pandit /* Alternate path SGID attribute currently unsupported */ 4323cf53936fSSean Hefty req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); 4324e51060f0SSean Hefty req.qp_num = id_priv->qp_num; 432518c441a6SSean Hefty req.qp_type = id_priv->id.qp_type; 4326e51060f0SSean Hefty req.starting_psn = id_priv->seq_num; 4327e51060f0SSean Hefty req.responder_resources = conn_param->responder_resources; 4328e51060f0SSean Hefty req.initiator_depth = conn_param->initiator_depth; 4329e51060f0SSean Hefty req.flow_control = conn_param->flow_control; 43304ede178aSSean Hefty req.retry_count = min_t(u8, 7, conn_param->retry_count); 43314ede178aSSean Hefty req.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count); 4332e51060f0SSean Hefty req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; 4333e51060f0SSean Hefty req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; 4334e51060f0SSean Hefty req.max_cm_retries = CMA_MAX_CM_RETRIES; 4335e51060f0SSean Hefty req.srq = id_priv->srq ? 1 : 0; 4336a20652e1SLeon Romanovsky req.ece.vendor_id = id_priv->ece.vendor_id; 4337a20652e1SLeon Romanovsky req.ece.attr_mod = id_priv->ece.attr_mod; 4338e51060f0SSean Hefty 4339ed999f82SChuck Lever trace_cm_send_req(id_priv); 4340e51060f0SSean Hefty ret = ib_send_cm_req(id_priv->cm_id.ib, &req); 4341e51060f0SSean Hefty out: 43420c9361fcSJack Morgenstein if (ret && !IS_ERR(id)) { 43430c9361fcSJack Morgenstein ib_destroy_cm_id(id); 4344675a027cSKrishna Kumar id_priv->cm_id.ib = NULL; 4345675a027cSKrishna Kumar } 4346675a027cSKrishna Kumar 4347e51060f0SSean Hefty kfree(private_data); 4348e51060f0SSean Hefty return ret; 4349e51060f0SSean Hefty } 4350e51060f0SSean Hefty 435107ebafbaSTom Tucker static int cma_connect_iw(struct rdma_id_private *id_priv, 435207ebafbaSTom Tucker struct rdma_conn_param *conn_param) 435307ebafbaSTom Tucker { 435407ebafbaSTom Tucker struct iw_cm_id *cm_id; 435507ebafbaSTom Tucker int ret; 435607ebafbaSTom Tucker struct iw_cm_conn_param iw_param; 435707ebafbaSTom Tucker 435807ebafbaSTom Tucker cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv); 43590c9361fcSJack Morgenstein if (IS_ERR(cm_id)) 43600c9361fcSJack Morgenstein return PTR_ERR(cm_id); 436107ebafbaSTom Tucker 4362ca0c448dSHåkon Bugge mutex_lock(&id_priv->qp_mutex); 436368cdba06SSteve Wise cm_id->tos = id_priv->tos; 4364926ba19bSSteve Wise cm_id->tos_set = id_priv->tos_set; 4365ca0c448dSHåkon Bugge mutex_unlock(&id_priv->qp_mutex); 4366ca0c448dSHåkon Bugge 436707ebafbaSTom Tucker id_priv->cm_id.iw = cm_id; 436807ebafbaSTom Tucker 436924d44a39SSteve Wise memcpy(&cm_id->local_addr, cma_src_addr(id_priv), 437024d44a39SSteve Wise rdma_addr_size(cma_src_addr(id_priv))); 437124d44a39SSteve Wise memcpy(&cm_id->remote_addr, cma_dst_addr(id_priv), 437224d44a39SSteve Wise rdma_addr_size(cma_dst_addr(id_priv))); 437307ebafbaSTom Tucker 43745851bb89SSean Hefty ret = cma_modify_qp_rtr(id_priv, conn_param); 4375675a027cSKrishna Kumar if (ret) 4376675a027cSKrishna Kumar goto out; 437707ebafbaSTom Tucker 4378f45ee80eSHefty, Sean if (conn_param) { 437907ebafbaSTom Tucker iw_param.ord = conn_param->initiator_depth; 438007ebafbaSTom Tucker iw_param.ird = conn_param->responder_resources; 438107ebafbaSTom Tucker iw_param.private_data = conn_param->private_data; 438207ebafbaSTom Tucker iw_param.private_data_len = conn_param->private_data_len; 4383f45ee80eSHefty, Sean iw_param.qpn = id_priv->id.qp ? id_priv->qp_num : conn_param->qp_num; 4384f45ee80eSHefty, Sean } else { 4385f45ee80eSHefty, Sean memset(&iw_param, 0, sizeof iw_param); 438607ebafbaSTom Tucker iw_param.qpn = id_priv->qp_num; 4387f45ee80eSHefty, Sean } 438807ebafbaSTom Tucker ret = iw_cm_connect(cm_id, &iw_param); 438907ebafbaSTom Tucker out: 43900c9361fcSJack Morgenstein if (ret) { 4391675a027cSKrishna Kumar iw_destroy_cm_id(cm_id); 4392675a027cSKrishna Kumar id_priv->cm_id.iw = NULL; 4393675a027cSKrishna Kumar } 439407ebafbaSTom Tucker return ret; 439507ebafbaSTom Tucker } 439607ebafbaSTom Tucker 4397071ba4ccSJason Gunthorpe /** 4398071ba4ccSJason Gunthorpe * rdma_connect_locked - Initiate an active connection request. 4399071ba4ccSJason Gunthorpe * @id: Connection identifier to connect. 4400071ba4ccSJason Gunthorpe * @conn_param: Connection information used for connected QPs. 4401071ba4ccSJason Gunthorpe * 4402071ba4ccSJason Gunthorpe * Same as rdma_connect() but can only be called from the 4403071ba4ccSJason Gunthorpe * RDMA_CM_EVENT_ROUTE_RESOLVED handler callback. 4404071ba4ccSJason Gunthorpe */ 4405071ba4ccSJason Gunthorpe int rdma_connect_locked(struct rdma_cm_id *id, 4406071ba4ccSJason Gunthorpe struct rdma_conn_param *conn_param) 4407e51060f0SSean Hefty { 44082a7cec53SJason Gunthorpe struct rdma_id_private *id_priv = 44092a7cec53SJason Gunthorpe container_of(id, struct rdma_id_private, id); 4410e51060f0SSean Hefty int ret; 4411e51060f0SSean Hefty 4412071ba4ccSJason Gunthorpe if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT)) 4413071ba4ccSJason Gunthorpe return -EINVAL; 4414e51060f0SSean Hefty 4415e51060f0SSean Hefty if (!id->qp) { 4416e51060f0SSean Hefty id_priv->qp_num = conn_param->qp_num; 4417e51060f0SSean Hefty id_priv->srq = conn_param->srq; 4418e51060f0SSean Hefty } 4419e51060f0SSean Hefty 442072219ceaSMichael Wang if (rdma_cap_ib_cm(id->device, id->port_num)) { 4421b26f9b99SSean Hefty if (id->qp_type == IB_QPT_UD) 4422628e5f6dSSean Hefty ret = cma_resolve_ib_udp(id_priv, conn_param); 4423628e5f6dSSean Hefty else 4424e51060f0SSean Hefty ret = cma_connect_ib(id_priv, conn_param); 4425b6eb7011SWenpeng Liang } else if (rdma_cap_iw_cm(id->device, id->port_num)) { 442607ebafbaSTom Tucker ret = cma_connect_iw(id_priv, conn_param); 4427b6eb7011SWenpeng Liang } else { 4428e51060f0SSean Hefty ret = -ENOSYS; 4429b6eb7011SWenpeng Liang } 4430e51060f0SSean Hefty if (ret) 44312a7cec53SJason Gunthorpe goto err_state; 4432e51060f0SSean Hefty return 0; 44332a7cec53SJason Gunthorpe err_state: 4434550e5ca7SNir Muchtar cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED); 4435071ba4ccSJason Gunthorpe return ret; 4436071ba4ccSJason Gunthorpe } 4437071ba4ccSJason Gunthorpe EXPORT_SYMBOL(rdma_connect_locked); 4438071ba4ccSJason Gunthorpe 4439071ba4ccSJason Gunthorpe /** 4440071ba4ccSJason Gunthorpe * rdma_connect - Initiate an active connection request. 4441071ba4ccSJason Gunthorpe * @id: Connection identifier to connect. 4442071ba4ccSJason Gunthorpe * @conn_param: Connection information used for connected QPs. 4443071ba4ccSJason Gunthorpe * 4444071ba4ccSJason Gunthorpe * Users must have resolved a route for the rdma_cm_id to connect with by having 4445071ba4ccSJason Gunthorpe * called rdma_resolve_route before calling this routine. 4446071ba4ccSJason Gunthorpe * 4447071ba4ccSJason Gunthorpe * This call will either connect to a remote QP or obtain remote QP information 4448071ba4ccSJason Gunthorpe * for unconnected rdma_cm_id's. The actual operation is based on the 4449071ba4ccSJason Gunthorpe * rdma_cm_id's port space. 4450071ba4ccSJason Gunthorpe */ 4451071ba4ccSJason Gunthorpe int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) 4452071ba4ccSJason Gunthorpe { 4453071ba4ccSJason Gunthorpe struct rdma_id_private *id_priv = 4454071ba4ccSJason Gunthorpe container_of(id, struct rdma_id_private, id); 4455071ba4ccSJason Gunthorpe int ret; 4456071ba4ccSJason Gunthorpe 4457071ba4ccSJason Gunthorpe mutex_lock(&id_priv->handler_mutex); 4458071ba4ccSJason Gunthorpe ret = rdma_connect_locked(id, conn_param); 44592a7cec53SJason Gunthorpe mutex_unlock(&id_priv->handler_mutex); 4460e51060f0SSean Hefty return ret; 4461e51060f0SSean Hefty } 4462e51060f0SSean Hefty EXPORT_SYMBOL(rdma_connect); 4463e51060f0SSean Hefty 446434e2ab57SLeon Romanovsky /** 446534e2ab57SLeon Romanovsky * rdma_connect_ece - Initiate an active connection request with ECE data. 446634e2ab57SLeon Romanovsky * @id: Connection identifier to connect. 446734e2ab57SLeon Romanovsky * @conn_param: Connection information used for connected QPs. 446834e2ab57SLeon Romanovsky * @ece: ECE parameters 446934e2ab57SLeon Romanovsky * 447034e2ab57SLeon Romanovsky * See rdma_connect() explanation. 447134e2ab57SLeon Romanovsky */ 447234e2ab57SLeon Romanovsky int rdma_connect_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param, 447334e2ab57SLeon Romanovsky struct rdma_ucm_ece *ece) 447434e2ab57SLeon Romanovsky { 447534e2ab57SLeon Romanovsky struct rdma_id_private *id_priv = 447634e2ab57SLeon Romanovsky container_of(id, struct rdma_id_private, id); 447734e2ab57SLeon Romanovsky 447834e2ab57SLeon Romanovsky id_priv->ece.vendor_id = ece->vendor_id; 447934e2ab57SLeon Romanovsky id_priv->ece.attr_mod = ece->attr_mod; 448034e2ab57SLeon Romanovsky 448134e2ab57SLeon Romanovsky return rdma_connect(id, conn_param); 448234e2ab57SLeon Romanovsky } 448334e2ab57SLeon Romanovsky EXPORT_SYMBOL(rdma_connect_ece); 448434e2ab57SLeon Romanovsky 4485e51060f0SSean Hefty static int cma_accept_ib(struct rdma_id_private *id_priv, 4486e51060f0SSean Hefty struct rdma_conn_param *conn_param) 4487e51060f0SSean Hefty { 4488e51060f0SSean Hefty struct ib_cm_rep_param rep; 44895851bb89SSean Hefty int ret; 4490e51060f0SSean Hefty 44915851bb89SSean Hefty ret = cma_modify_qp_rtr(id_priv, conn_param); 4492e51060f0SSean Hefty if (ret) 44930fe313b0SSean Hefty goto out; 44940fe313b0SSean Hefty 44955851bb89SSean Hefty ret = cma_modify_qp_rts(id_priv, conn_param); 44960fe313b0SSean Hefty if (ret) 44970fe313b0SSean Hefty goto out; 44980fe313b0SSean Hefty 4499e51060f0SSean Hefty memset(&rep, 0, sizeof rep); 4500e51060f0SSean Hefty rep.qp_num = id_priv->qp_num; 4501e51060f0SSean Hefty rep.starting_psn = id_priv->seq_num; 4502e51060f0SSean Hefty rep.private_data = conn_param->private_data; 4503e51060f0SSean Hefty rep.private_data_len = conn_param->private_data_len; 4504e51060f0SSean Hefty rep.responder_resources = conn_param->responder_resources; 4505e51060f0SSean Hefty rep.initiator_depth = conn_param->initiator_depth; 4506e51060f0SSean Hefty rep.failover_accepted = 0; 4507e51060f0SSean Hefty rep.flow_control = conn_param->flow_control; 45084ede178aSSean Hefty rep.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count); 4509e51060f0SSean Hefty rep.srq = id_priv->srq ? 1 : 0; 45100cb15372SLeon Romanovsky rep.ece.vendor_id = id_priv->ece.vendor_id; 45110cb15372SLeon Romanovsky rep.ece.attr_mod = id_priv->ece.attr_mod; 4512e51060f0SSean Hefty 4513ed999f82SChuck Lever trace_cm_send_rep(id_priv); 45140fe313b0SSean Hefty ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep); 45150fe313b0SSean Hefty out: 45160fe313b0SSean Hefty return ret; 4517e51060f0SSean Hefty } 4518e51060f0SSean Hefty 451907ebafbaSTom Tucker static int cma_accept_iw(struct rdma_id_private *id_priv, 452007ebafbaSTom Tucker struct rdma_conn_param *conn_param) 452107ebafbaSTom Tucker { 452207ebafbaSTom Tucker struct iw_cm_conn_param iw_param; 452307ebafbaSTom Tucker int ret; 452407ebafbaSTom Tucker 4525f2625f7dSSteve Wise if (!conn_param) 4526f2625f7dSSteve Wise return -EINVAL; 4527f2625f7dSSteve Wise 45285851bb89SSean Hefty ret = cma_modify_qp_rtr(id_priv, conn_param); 452907ebafbaSTom Tucker if (ret) 453007ebafbaSTom Tucker return ret; 453107ebafbaSTom Tucker 453207ebafbaSTom Tucker iw_param.ord = conn_param->initiator_depth; 453307ebafbaSTom Tucker iw_param.ird = conn_param->responder_resources; 453407ebafbaSTom Tucker iw_param.private_data = conn_param->private_data; 453507ebafbaSTom Tucker iw_param.private_data_len = conn_param->private_data_len; 4536b6eb7011SWenpeng Liang if (id_priv->id.qp) 453707ebafbaSTom Tucker iw_param.qpn = id_priv->qp_num; 4538b6eb7011SWenpeng Liang else 453907ebafbaSTom Tucker iw_param.qpn = conn_param->qp_num; 454007ebafbaSTom Tucker 454107ebafbaSTom Tucker return iw_cm_accept(id_priv->cm_id.iw, &iw_param); 454207ebafbaSTom Tucker } 454307ebafbaSTom Tucker 4544628e5f6dSSean Hefty static int cma_send_sidr_rep(struct rdma_id_private *id_priv, 45455c438135SSean Hefty enum ib_cm_sidr_status status, u32 qkey, 4546628e5f6dSSean Hefty const void *private_data, int private_data_len) 4547628e5f6dSSean Hefty { 4548628e5f6dSSean Hefty struct ib_cm_sidr_rep_param rep; 4549d2ca39f2SYossi Etigin int ret; 4550628e5f6dSSean Hefty 4551628e5f6dSSean Hefty memset(&rep, 0, sizeof rep); 4552628e5f6dSSean Hefty rep.status = status; 4553628e5f6dSSean Hefty if (status == IB_SIDR_SUCCESS) { 45545c438135SSean Hefty ret = cma_set_qkey(id_priv, qkey); 4555d2ca39f2SYossi Etigin if (ret) 4556d2ca39f2SYossi Etigin return ret; 4557628e5f6dSSean Hefty rep.qp_num = id_priv->qp_num; 4558c8f6a362SSean Hefty rep.qkey = id_priv->qkey; 45590cb15372SLeon Romanovsky 45600cb15372SLeon Romanovsky rep.ece.vendor_id = id_priv->ece.vendor_id; 45610cb15372SLeon Romanovsky rep.ece.attr_mod = id_priv->ece.attr_mod; 4562628e5f6dSSean Hefty } 45630cb15372SLeon Romanovsky 4564628e5f6dSSean Hefty rep.private_data = private_data; 4565628e5f6dSSean Hefty rep.private_data_len = private_data_len; 4566628e5f6dSSean Hefty 4567ed999f82SChuck Lever trace_cm_send_sidr_rep(id_priv); 4568628e5f6dSSean Hefty return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep); 4569628e5f6dSSean Hefty } 4570628e5f6dSSean Hefty 4571b09c4d70SLeon Romanovsky /** 4572b09c4d70SLeon Romanovsky * rdma_accept - Called to accept a connection request or response. 4573b09c4d70SLeon Romanovsky * @id: Connection identifier associated with the request. 4574b09c4d70SLeon Romanovsky * @conn_param: Information needed to establish the connection. This must be 4575b09c4d70SLeon Romanovsky * provided if accepting a connection request. If accepting a connection 4576b09c4d70SLeon Romanovsky * response, this parameter must be NULL. 4577b09c4d70SLeon Romanovsky * 4578b09c4d70SLeon Romanovsky * Typically, this routine is only called by the listener to accept a connection 4579b09c4d70SLeon Romanovsky * request. It must also be called on the active side of a connection if the 4580b09c4d70SLeon Romanovsky * user is performing their own QP transitions. 4581b09c4d70SLeon Romanovsky * 4582b09c4d70SLeon Romanovsky * In the case of error, a reject message is sent to the remote side and the 4583b09c4d70SLeon Romanovsky * state of the qp associated with the id is modified to error, such that any 4584b09c4d70SLeon Romanovsky * previously posted receive buffers would be flushed. 4585b09c4d70SLeon Romanovsky * 4586b09c4d70SLeon Romanovsky * This function is for use by kernel ULPs and must be called from under the 4587b09c4d70SLeon Romanovsky * handler callback. 4588b09c4d70SLeon Romanovsky */ 4589b09c4d70SLeon Romanovsky int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) 4590e51060f0SSean Hefty { 4591d114c6feSJason Gunthorpe struct rdma_id_private *id_priv = 4592d114c6feSJason Gunthorpe container_of(id, struct rdma_id_private, id); 4593e51060f0SSean Hefty int ret; 4594e51060f0SSean Hefty 4595d114c6feSJason Gunthorpe lockdep_assert_held(&id_priv->handler_mutex); 459683e9502dSNir Muchtar 4597d114c6feSJason Gunthorpe if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT) 4598e51060f0SSean Hefty return -EINVAL; 4599e51060f0SSean Hefty 4600e51060f0SSean Hefty if (!id->qp && conn_param) { 4601e51060f0SSean Hefty id_priv->qp_num = conn_param->qp_num; 4602e51060f0SSean Hefty id_priv->srq = conn_param->srq; 4603e51060f0SSean Hefty } 4604e51060f0SSean Hefty 460572219ceaSMichael Wang if (rdma_cap_ib_cm(id->device, id->port_num)) { 4606f45ee80eSHefty, Sean if (id->qp_type == IB_QPT_UD) { 4607f45ee80eSHefty, Sean if (conn_param) 4608628e5f6dSSean Hefty ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, 46095c438135SSean Hefty conn_param->qkey, 4610628e5f6dSSean Hefty conn_param->private_data, 4611628e5f6dSSean Hefty conn_param->private_data_len); 4612f45ee80eSHefty, Sean else 4613f45ee80eSHefty, Sean ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, 46145c438135SSean Hefty 0, NULL, 0); 4615f45ee80eSHefty, Sean } else { 4616f45ee80eSHefty, Sean if (conn_param) 4617e51060f0SSean Hefty ret = cma_accept_ib(id_priv, conn_param); 4618e51060f0SSean Hefty else 4619e51060f0SSean Hefty ret = cma_rep_recv(id_priv); 4620f45ee80eSHefty, Sean } 4621b6eb7011SWenpeng Liang } else if (rdma_cap_iw_cm(id->device, id->port_num)) { 462207ebafbaSTom Tucker ret = cma_accept_iw(id_priv, conn_param); 4623b6eb7011SWenpeng Liang } else { 4624e51060f0SSean Hefty ret = -ENOSYS; 4625b6eb7011SWenpeng Liang } 4626e51060f0SSean Hefty if (ret) 4627e51060f0SSean Hefty goto reject; 4628e51060f0SSean Hefty 4629e51060f0SSean Hefty return 0; 4630e51060f0SSean Hefty reject: 4631c5483388SSean Hefty cma_modify_qp_err(id_priv); 46328094ba0aSLeon Romanovsky rdma_reject(id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED); 4633e51060f0SSean Hefty return ret; 4634e51060f0SSean Hefty } 4635b09c4d70SLeon Romanovsky EXPORT_SYMBOL(rdma_accept); 4636e51060f0SSean Hefty 4637b09c4d70SLeon Romanovsky int rdma_accept_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param, 4638b09c4d70SLeon Romanovsky struct rdma_ucm_ece *ece) 46390cb15372SLeon Romanovsky { 46400cb15372SLeon Romanovsky struct rdma_id_private *id_priv = 46410cb15372SLeon Romanovsky container_of(id, struct rdma_id_private, id); 46420cb15372SLeon Romanovsky 46430cb15372SLeon Romanovsky id_priv->ece.vendor_id = ece->vendor_id; 46440cb15372SLeon Romanovsky id_priv->ece.attr_mod = ece->attr_mod; 46450cb15372SLeon Romanovsky 4646b09c4d70SLeon Romanovsky return rdma_accept(id, conn_param); 46470cb15372SLeon Romanovsky } 4648b09c4d70SLeon Romanovsky EXPORT_SYMBOL(rdma_accept_ece); 46490cb15372SLeon Romanovsky 4650d114c6feSJason Gunthorpe void rdma_lock_handler(struct rdma_cm_id *id) 4651d114c6feSJason Gunthorpe { 4652d114c6feSJason Gunthorpe struct rdma_id_private *id_priv = 4653d114c6feSJason Gunthorpe container_of(id, struct rdma_id_private, id); 4654d114c6feSJason Gunthorpe 4655d114c6feSJason Gunthorpe mutex_lock(&id_priv->handler_mutex); 4656d114c6feSJason Gunthorpe } 4657d114c6feSJason Gunthorpe EXPORT_SYMBOL(rdma_lock_handler); 4658d114c6feSJason Gunthorpe 4659d114c6feSJason Gunthorpe void rdma_unlock_handler(struct rdma_cm_id *id) 4660d114c6feSJason Gunthorpe { 4661d114c6feSJason Gunthorpe struct rdma_id_private *id_priv = 4662d114c6feSJason Gunthorpe container_of(id, struct rdma_id_private, id); 4663d114c6feSJason Gunthorpe 4664d114c6feSJason Gunthorpe mutex_unlock(&id_priv->handler_mutex); 4665d114c6feSJason Gunthorpe } 4666d114c6feSJason Gunthorpe EXPORT_SYMBOL(rdma_unlock_handler); 46670fe313b0SSean Hefty 46680fe313b0SSean Hefty int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event) 46690fe313b0SSean Hefty { 4670e51060f0SSean Hefty struct rdma_id_private *id_priv; 4671e51060f0SSean Hefty int ret; 4672e51060f0SSean Hefty 4673e51060f0SSean Hefty id_priv = container_of(id, struct rdma_id_private, id); 4674e51060f0SSean Hefty if (!id_priv->cm_id.ib) 4675e51060f0SSean Hefty return -EINVAL; 4676e51060f0SSean Hefty 46770c9361fcSJack Morgenstein switch (id->device->node_type) { 4678e51060f0SSean Hefty case RDMA_NODE_IB_CA: 4679e51060f0SSean Hefty ret = ib_cm_notify(id_priv->cm_id.ib, event); 468072219ceaSMichael Wang break; 4681ed999f82SChuck Lever default: 46825c438135SSean Hefty ret = 0; 4683e51060f0SSean Hefty break; 4684ed999f82SChuck Lever } 4685ed999f82SChuck Lever return ret; 4686628e5f6dSSean Hefty } 4687628e5f6dSSean Hefty EXPORT_SYMBOL(rdma_notify); 4688628e5f6dSSean Hefty 4689ed999f82SChuck Lever int rdma_reject(struct rdma_cm_id *id, const void *private_data, 46908094ba0aSLeon Romanovsky u8 private_data_len, u8 reason) 4691e51060f0SSean Hefty { 4692e51060f0SSean Hefty struct rdma_id_private *id_priv; 469307ebafbaSTom Tucker int ret; 469407ebafbaSTom Tucker 469507ebafbaSTom Tucker id_priv = container_of(id, struct rdma_id_private, id); 469607ebafbaSTom Tucker if (!id_priv->cm_id.ib) 4697e51060f0SSean Hefty return -EINVAL; 4698e51060f0SSean Hefty 4699e51060f0SSean Hefty if (rdma_cap_ib_cm(id->device, id->port_num)) { 4700e51060f0SSean Hefty if (id->qp_type == IB_QPT_UD) { 4701e51060f0SSean Hefty ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0, 4702e51060f0SSean Hefty private_data, private_data_len); 4703e51060f0SSean Hefty } else { 4704e51060f0SSean Hefty trace_cm_send_rej(id_priv); 47058094ba0aSLeon Romanovsky ret = ib_send_cm_rej(id_priv->cm_id.ib, reason, NULL, 0, 47068094ba0aSLeon Romanovsky private_data, private_data_len); 4707e51060f0SSean Hefty } 470804215330SMichael Wang } else if (rdma_cap_iw_cm(id->device, id->port_num)) { 4709e51060f0SSean Hefty ret = iw_cm_reject(id_priv->cm_id.iw, 4710e51060f0SSean Hefty private_data, private_data_len); 4711b6eb7011SWenpeng Liang } else { 4712e51060f0SSean Hefty ret = -ENOSYS; 4713b6eb7011SWenpeng Liang } 471421655afcSMichael Wang 4715e51060f0SSean Hefty return ret; 4716e51060f0SSean Hefty } 4717e51060f0SSean Hefty EXPORT_SYMBOL(rdma_reject); 4718e51060f0SSean Hefty 4719e51060f0SSean Hefty int rdma_disconnect(struct rdma_cm_id *id) 4720e51060f0SSean Hefty { 4721e51060f0SSean Hefty struct rdma_id_private *id_priv; 4722e51060f0SSean Hefty int ret; 4723e51060f0SSean Hefty 4724e51060f0SSean Hefty id_priv = container_of(id, struct rdma_id_private, id); 47250c9361fcSJack Morgenstein if (!id_priv->cm_id.ib) 4726e51060f0SSean Hefty return -EINVAL; 4727e51060f0SSean Hefty 472872219ceaSMichael Wang if (rdma_cap_ib_cm(id->device, id->port_num)) { 4729c5483388SSean Hefty ret = cma_modify_qp_err(id_priv); 4730e51060f0SSean Hefty if (ret) 4731e51060f0SSean Hefty goto out; 4732e51060f0SSean Hefty /* Initiate or respond to a disconnect. */ 4733ed999f82SChuck Lever trace_cm_disconnect(id_priv); 4734ed999f82SChuck Lever if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) { 4735ed999f82SChuck Lever if (!ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0)) 4736ed999f82SChuck Lever trace_cm_sent_drep(id_priv); 4737ed999f82SChuck Lever } else { 4738ed999f82SChuck Lever trace_cm_sent_dreq(id_priv); 4739ed999f82SChuck Lever } 474004215330SMichael Wang } else if (rdma_cap_iw_cm(id->device, id->port_num)) { 474107ebafbaSTom Tucker ret = iw_cm_disconnect(id_priv->cm_id.iw, 0); 474221655afcSMichael Wang } else 474307ebafbaSTom Tucker ret = -EINVAL; 474421655afcSMichael Wang 4745e51060f0SSean Hefty out: 4746e51060f0SSean Hefty return ret; 4747e51060f0SSean Hefty } 4748e51060f0SSean Hefty EXPORT_SYMBOL(rdma_disconnect); 4749e51060f0SSean Hefty 4750b5de0c60SJason Gunthorpe static void cma_make_mc_event(int status, struct rdma_id_private *id_priv, 4751b5de0c60SJason Gunthorpe struct ib_sa_multicast *multicast, 4752b5de0c60SJason Gunthorpe struct rdma_cm_event *event, 4753b5de0c60SJason Gunthorpe struct cma_multicast *mc) 4754c8f6a362SSean Hefty { 4755b5de0c60SJason Gunthorpe struct rdma_dev_addr *dev_addr; 4756b5de0c60SJason Gunthorpe enum ib_gid_type gid_type; 4757b5de0c60SJason Gunthorpe struct net_device *ndev; 4758c8f6a362SSean Hefty 47595c438135SSean Hefty if (!status) 47605c438135SSean Hefty status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey)); 4761498683c6SMoni Shoua else 4762498683c6SMoni Shoua pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to join multicast. status %d\n", 4763498683c6SMoni Shoua status); 4764bee3c3c9SMoni Shoua 4765b5de0c60SJason Gunthorpe event->status = status; 4766b5de0c60SJason Gunthorpe event->param.ud.private_data = mc->context; 4767b5de0c60SJason Gunthorpe if (status) { 4768b5de0c60SJason Gunthorpe event->event = RDMA_CM_EVENT_MULTICAST_ERROR; 4769b5de0c60SJason Gunthorpe return; 477046ea5061SSean Hefty } 4771c8f6a362SSean Hefty 4772b5de0c60SJason Gunthorpe dev_addr = &id_priv->id.route.addr.dev_addr; 4773b5de0c60SJason Gunthorpe ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if); 4774b5de0c60SJason Gunthorpe gid_type = 4775b5de0c60SJason Gunthorpe id_priv->cma_dev 4776b5de0c60SJason Gunthorpe ->default_gid_type[id_priv->id.port_num - 4777b5de0c60SJason Gunthorpe rdma_start_port( 4778b5de0c60SJason Gunthorpe id_priv->cma_dev->device)]; 4779c8f6a362SSean Hefty 4780b5de0c60SJason Gunthorpe event->event = RDMA_CM_EVENT_MULTICAST_JOIN; 4781b5de0c60SJason Gunthorpe if (ib_init_ah_from_mcmember(id_priv->id.device, id_priv->id.port_num, 4782b5de0c60SJason Gunthorpe &multicast->rec, ndev, gid_type, 4783b5de0c60SJason Gunthorpe &event->param.ud.ah_attr)) { 4784b5de0c60SJason Gunthorpe event->event = RDMA_CM_EVENT_MULTICAST_ERROR; 4785b5de0c60SJason Gunthorpe goto out; 4786b5de0c60SJason Gunthorpe } 47876d337179SParav Pandit 4788b5de0c60SJason Gunthorpe event->param.ud.qp_num = 0xFFFFFF; 4789b5de0c60SJason Gunthorpe event->param.ud.qkey = be32_to_cpu(multicast->rec.qkey); 4790b5de0c60SJason Gunthorpe 4791b5de0c60SJason Gunthorpe out: 4792bee3c3c9SMoni Shoua if (ndev) 4793bee3c3c9SMoni Shoua dev_put(ndev); 4794b5de0c60SJason Gunthorpe } 4795c8f6a362SSean Hefty 4796b5de0c60SJason Gunthorpe static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) 4797b5de0c60SJason Gunthorpe { 4798b5de0c60SJason Gunthorpe struct cma_multicast *mc = multicast->context; 4799b5de0c60SJason Gunthorpe struct rdma_id_private *id_priv = mc->id_priv; 4800b5de0c60SJason Gunthorpe struct rdma_cm_event event = {}; 4801b5de0c60SJason Gunthorpe int ret = 0; 4802b5de0c60SJason Gunthorpe 4803b5de0c60SJason Gunthorpe mutex_lock(&id_priv->handler_mutex); 4804b5de0c60SJason Gunthorpe if (READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL || 4805b5de0c60SJason Gunthorpe READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING) 4806b5de0c60SJason Gunthorpe goto out; 4807b5de0c60SJason Gunthorpe 4808b5de0c60SJason Gunthorpe cma_make_mc_event(status, id_priv, multicast, &event, mc); 4809ed999f82SChuck Lever ret = cma_cm_event_handler(id_priv, &event); 4810f685c195SParav Pandit rdma_destroy_ah_attr(&event.param.ud.ah_attr); 4811fe454dc3SAvihai Horon WARN_ON(ret); 48128aa08602SSean Hefty 481337e07cdaSBart Van Assche out: 4814de910bd9SOr Gerlitz mutex_unlock(&id_priv->handler_mutex); 4815c8f6a362SSean Hefty return 0; 4816c8f6a362SSean Hefty } 4817c8f6a362SSean Hefty 4818c8f6a362SSean Hefty static void cma_set_mgid(struct rdma_id_private *id_priv, 4819c8f6a362SSean Hefty struct sockaddr *addr, union ib_gid *mgid) 4820c8f6a362SSean Hefty { 4821c8f6a362SSean Hefty unsigned char mc_map[MAX_ADDR_LEN]; 4822c8f6a362SSean Hefty struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 4823c8f6a362SSean Hefty struct sockaddr_in *sin = (struct sockaddr_in *) addr; 4824c8f6a362SSean Hefty struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) addr; 4825c8f6a362SSean Hefty 4826c8f6a362SSean Hefty if (cma_any_addr(addr)) { 4827c8f6a362SSean Hefty memset(mgid, 0, sizeof *mgid); 4828c8f6a362SSean Hefty } else if ((addr->sa_family == AF_INET6) && 48291c9b2819SJason Gunthorpe ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFFF0FFFF) == 4830c8f6a362SSean Hefty 0xFF10A01B)) { 4831c8f6a362SSean Hefty /* IPv6 address is an SA assigned MGID. */ 4832c8f6a362SSean Hefty memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); 48335bc2b7b3SSean Hefty } else if (addr->sa_family == AF_IB) { 48345bc2b7b3SSean Hefty memcpy(mgid, &((struct sockaddr_ib *) addr)->sib_addr, sizeof *mgid); 4835076dd53bSVarsha Rao } else if (addr->sa_family == AF_INET6) { 4836e2e62697SJason Gunthorpe ipv6_ib_mc_map(&sin6->sin6_addr, dev_addr->broadcast, mc_map); 4837e2e62697SJason Gunthorpe if (id_priv->id.ps == RDMA_PS_UDP) 4838e2e62697SJason Gunthorpe mc_map[7] = 0x01; /* Use RDMA CM signature */ 4839e2e62697SJason Gunthorpe *mgid = *(union ib_gid *) (mc_map + 4); 4840c8f6a362SSean Hefty } else { 4841a9e527e3SRolf Manderscheid ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map); 4842c8f6a362SSean Hefty if (id_priv->id.ps == RDMA_PS_UDP) 4843c8f6a362SSean Hefty mc_map[7] = 0x01; /* Use RDMA CM signature */ 4844c8f6a362SSean Hefty *mgid = *(union ib_gid *) (mc_map + 4); 4845c8f6a362SSean Hefty } 4846c8f6a362SSean Hefty } 4847c8f6a362SSean Hefty 4848c8f6a362SSean Hefty static int cma_join_ib_multicast(struct rdma_id_private *id_priv, 4849c8f6a362SSean Hefty struct cma_multicast *mc) 4850c8f6a362SSean Hefty { 4851c8f6a362SSean Hefty struct ib_sa_mcmember_rec rec; 4852c8f6a362SSean Hefty struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 4853c8f6a362SSean Hefty ib_sa_comp_mask comp_mask; 4854c8f6a362SSean Hefty int ret; 4855c8f6a362SSean Hefty 4856c8f6a362SSean Hefty ib_addr_get_mgid(dev_addr, &rec.mgid); 4857c8f6a362SSean Hefty ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num, 4858c8f6a362SSean Hefty &rec.mgid, &rec); 4859c8f6a362SSean Hefty if (ret) 4860c8f6a362SSean Hefty return ret; 4861c8f6a362SSean Hefty 48625bc2b7b3SSean Hefty ret = cma_set_qkey(id_priv, 0); 48635bc2b7b3SSean Hefty if (ret) 48645bc2b7b3SSean Hefty return ret; 48655bc2b7b3SSean Hefty 48663f446754SRoland Dreier cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid); 48675bc2b7b3SSean Hefty rec.qkey = cpu_to_be32(id_priv->qkey); 48686f8372b6SSean Hefty rdma_addr_get_sgid(dev_addr, &rec.port_gid); 4869c8f6a362SSean Hefty rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); 4870ab15c95aSAlex Vesker rec.join_state = mc->join_state; 4871ab15c95aSAlex Vesker 4872c8f6a362SSean Hefty comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID | 4873c8f6a362SSean Hefty IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE | 4874c8f6a362SSean Hefty IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_SL | 4875c8f6a362SSean Hefty IB_SA_MCMEMBER_REC_FLOW_LABEL | 4876c8f6a362SSean Hefty IB_SA_MCMEMBER_REC_TRAFFIC_CLASS; 4877c8f6a362SSean Hefty 487884adeee9SYossi Etigin if (id_priv->id.ps == RDMA_PS_IPOIB) 487984adeee9SYossi Etigin comp_mask |= IB_SA_MCMEMBER_REC_RATE | 48802a22fb8cSDotan Barak IB_SA_MCMEMBER_REC_RATE_SELECTOR | 48812a22fb8cSDotan Barak IB_SA_MCMEMBER_REC_MTU_SELECTOR | 48822a22fb8cSDotan Barak IB_SA_MCMEMBER_REC_MTU | 48832a22fb8cSDotan Barak IB_SA_MCMEMBER_REC_HOP_LIMIT; 488484adeee9SYossi Etigin 4885b5de0c60SJason Gunthorpe mc->sa_mc = ib_sa_join_multicast(&sa_client, id_priv->id.device, 4886b5de0c60SJason Gunthorpe id_priv->id.port_num, &rec, comp_mask, 4887b5de0c60SJason Gunthorpe GFP_KERNEL, cma_ib_mc_handler, mc); 4888b5de0c60SJason Gunthorpe return PTR_ERR_OR_ZERO(mc->sa_mc); 48893c86aa70SEli Cohen } 48903c86aa70SEli Cohen 4891be1d325aSNoa Osherovich static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid, 4892be1d325aSNoa Osherovich enum ib_gid_type gid_type) 48933c86aa70SEli Cohen { 48943c86aa70SEli Cohen struct sockaddr_in *sin = (struct sockaddr_in *)addr; 48953c86aa70SEli Cohen struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr; 48963c86aa70SEli Cohen 48973c86aa70SEli Cohen if (cma_any_addr(addr)) { 48983c86aa70SEli Cohen memset(mgid, 0, sizeof *mgid); 48993c86aa70SEli Cohen } else if (addr->sa_family == AF_INET6) { 49003c86aa70SEli Cohen memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); 49013c86aa70SEli Cohen } else { 49025c181bdaSParav Pandit mgid->raw[0] = 49035c181bdaSParav Pandit (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ? 0 : 0xff; 49045c181bdaSParav Pandit mgid->raw[1] = 49055c181bdaSParav Pandit (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ? 0 : 0x0e; 49063c86aa70SEli Cohen mgid->raw[2] = 0; 49073c86aa70SEli Cohen mgid->raw[3] = 0; 49083c86aa70SEli Cohen mgid->raw[4] = 0; 49093c86aa70SEli Cohen mgid->raw[5] = 0; 49103c86aa70SEli Cohen mgid->raw[6] = 0; 49113c86aa70SEli Cohen mgid->raw[7] = 0; 49123c86aa70SEli Cohen mgid->raw[8] = 0; 49133c86aa70SEli Cohen mgid->raw[9] = 0; 49143c86aa70SEli Cohen mgid->raw[10] = 0xff; 49153c86aa70SEli Cohen mgid->raw[11] = 0xff; 49163c86aa70SEli Cohen *(__be32 *)(&mgid->raw[12]) = sin->sin_addr.s_addr; 49173c86aa70SEli Cohen } 49183c86aa70SEli Cohen } 49193c86aa70SEli Cohen 49203c86aa70SEli Cohen static int cma_iboe_join_multicast(struct rdma_id_private *id_priv, 49213c86aa70SEli Cohen struct cma_multicast *mc) 49223c86aa70SEli Cohen { 49233c86aa70SEli Cohen struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 4924bee3c3c9SMoni Shoua int err = 0; 49253c86aa70SEli Cohen struct sockaddr *addr = (struct sockaddr *)&mc->addr; 49263c86aa70SEli Cohen struct net_device *ndev = NULL; 4927b5de0c60SJason Gunthorpe struct ib_sa_multicast ib; 4928bee3c3c9SMoni Shoua enum ib_gid_type gid_type; 4929ab15c95aSAlex Vesker bool send_only; 4930ab15c95aSAlex Vesker 4931ab15c95aSAlex Vesker send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN); 49323c86aa70SEli Cohen 4933b5de0c60SJason Gunthorpe if (cma_zero_addr(addr)) 49343c86aa70SEli Cohen return -EINVAL; 49353c86aa70SEli Cohen 4936be1d325aSNoa Osherovich gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num - 4937be1d325aSNoa Osherovich rdma_start_port(id_priv->cma_dev->device)]; 4938b5de0c60SJason Gunthorpe cma_iboe_set_mgid(addr, &ib.rec.mgid, gid_type); 49393c86aa70SEli Cohen 4940b5de0c60SJason Gunthorpe ib.rec.pkey = cpu_to_be16(0xffff); 49413c86aa70SEli Cohen if (id_priv->id.ps == RDMA_PS_UDP) 4942b5de0c60SJason Gunthorpe ib.rec.qkey = cpu_to_be32(RDMA_UDP_QKEY); 49433c86aa70SEli Cohen 49443c86aa70SEli Cohen if (dev_addr->bound_dev_if) 4945052eac6eSParav Pandit ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if); 4946fe454dc3SAvihai Horon if (!ndev) 4947fe454dc3SAvihai Horon return -ENODEV; 4948fe454dc3SAvihai Horon 4949b5de0c60SJason Gunthorpe ib.rec.rate = iboe_get_rate(ndev); 4950b5de0c60SJason Gunthorpe ib.rec.hop_limit = 1; 4951b5de0c60SJason Gunthorpe ib.rec.mtu = iboe_get_mtu(ndev->mtu); 4952bee3c3c9SMoni Shoua 4953bee3c3c9SMoni Shoua if (addr->sa_family == AF_INET) { 4954c65f6c5aSAlex Vesker if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) { 4955b5de0c60SJason Gunthorpe ib.rec.hop_limit = IPV6_DEFAULT_HOPLIMIT; 4956ab15c95aSAlex Vesker if (!send_only) { 4957b5de0c60SJason Gunthorpe err = cma_igmp_send(ndev, &ib.rec.mgid, 4958bee3c3c9SMoni Shoua true); 4959bee3c3c9SMoni Shoua } 4960bee3c3c9SMoni Shoua } 4961bee3c3c9SMoni Shoua } else { 4962bee3c3c9SMoni Shoua if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) 4963bee3c3c9SMoni Shoua err = -ENOTSUPP; 4964bee3c3c9SMoni Shoua } 49653c86aa70SEli Cohen dev_put(ndev); 4966fe454dc3SAvihai Horon if (err || !ib.rec.mtu) 4967fe454dc3SAvihai Horon return err ?: -EINVAL; 4968fe454dc3SAvihai Horon 49697b85627bSMoni Shoua rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, 4970b5de0c60SJason Gunthorpe &ib.rec.port_gid); 4971fe454dc3SAvihai Horon INIT_WORK(&mc->iboe_join.work, cma_iboe_join_work_handler); 4972fe454dc3SAvihai Horon cma_make_mc_event(0, id_priv, &ib, &mc->iboe_join.event, mc); 4973fe454dc3SAvihai Horon queue_work(cma_wq, &mc->iboe_join.work); 49743c86aa70SEli Cohen return 0; 49753c86aa70SEli Cohen } 49763c86aa70SEli Cohen 4977c8f6a362SSean Hefty int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, 4978ab15c95aSAlex Vesker u8 join_state, void *context) 4979c8f6a362SSean Hefty { 49805cfbf929SJason Gunthorpe struct rdma_id_private *id_priv = 49815cfbf929SJason Gunthorpe container_of(id, struct rdma_id_private, id); 4982c8f6a362SSean Hefty struct cma_multicast *mc; 4983c8f6a362SSean Hefty int ret; 4984c8f6a362SSean Hefty 49851bb5091dSJason Gunthorpe /* Not supported for kernel QPs */ 49861bb5091dSJason Gunthorpe if (WARN_ON(id->qp)) 49877688f2c3SLeon Romanovsky return -EINVAL; 49887688f2c3SLeon Romanovsky 49895cfbf929SJason Gunthorpe /* ULP is calling this wrong. */ 49905cfbf929SJason Gunthorpe if (!id->device || (READ_ONCE(id_priv->state) != RDMA_CM_ADDR_BOUND && 49915cfbf929SJason Gunthorpe READ_ONCE(id_priv->state) != RDMA_CM_ADDR_RESOLVED)) 4992c8f6a362SSean Hefty return -EINVAL; 4993c8f6a362SSean Hefty 4994b5de0c60SJason Gunthorpe mc = kzalloc(sizeof(*mc), GFP_KERNEL); 4995c8f6a362SSean Hefty if (!mc) 4996c8f6a362SSean Hefty return -ENOMEM; 4997c8f6a362SSean Hefty 4998ef560861SSean Hefty memcpy(&mc->addr, addr, rdma_addr_size(addr)); 4999c8f6a362SSean Hefty mc->context = context; 5000c8f6a362SSean Hefty mc->id_priv = id_priv; 5001ab15c95aSAlex Vesker mc->join_state = join_state; 5002c8f6a362SSean Hefty 50035d9fb044SIra Weiny if (rdma_protocol_roce(id->device, id->port_num)) { 50043c86aa70SEli Cohen ret = cma_iboe_join_multicast(id_priv, mc); 5005c0126915SJason Gunthorpe if (ret) 5006c0126915SJason Gunthorpe goto out_err; 5007c0126915SJason Gunthorpe } else if (rdma_cap_ib_mcast(id->device, id->port_num)) { 50085c9a5282SMichael Wang ret = cma_join_ib_multicast(id_priv, mc); 5009c0126915SJason Gunthorpe if (ret) 5010c0126915SJason Gunthorpe goto out_err; 5011c0126915SJason Gunthorpe } else { 5012c8f6a362SSean Hefty ret = -ENOSYS; 5013c0126915SJason Gunthorpe goto out_err; 5014c8f6a362SSean Hefty } 5015c0126915SJason Gunthorpe 5016c0126915SJason Gunthorpe spin_lock(&id_priv->lock); 5017c0126915SJason Gunthorpe list_add(&mc->list, &id_priv->mc_list); 5018c0126915SJason Gunthorpe spin_unlock(&id_priv->lock); 5019c0126915SJason Gunthorpe 5020c0126915SJason Gunthorpe return 0; 5021c0126915SJason Gunthorpe out_err: 5022c0126915SJason Gunthorpe kfree(mc); 5023c8f6a362SSean Hefty return ret; 5024c8f6a362SSean Hefty } 5025c8f6a362SSean Hefty EXPORT_SYMBOL(rdma_join_multicast); 5026c8f6a362SSean Hefty 5027c8f6a362SSean Hefty void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr) 5028c8f6a362SSean Hefty { 5029c8f6a362SSean Hefty struct rdma_id_private *id_priv; 5030c8f6a362SSean Hefty struct cma_multicast *mc; 5031c8f6a362SSean Hefty 5032c8f6a362SSean Hefty id_priv = container_of(id, struct rdma_id_private, id); 5033c8f6a362SSean Hefty spin_lock_irq(&id_priv->lock); 5034c8f6a362SSean Hefty list_for_each_entry(mc, &id_priv->mc_list, list) { 50353788d299SJason Gunthorpe if (memcmp(&mc->addr, addr, rdma_addr_size(addr)) != 0) 50363788d299SJason Gunthorpe continue; 5037c8f6a362SSean Hefty list_del(&mc->list); 5038c8f6a362SSean Hefty spin_unlock_irq(&id_priv->lock); 5039c8f6a362SSean Hefty 50403788d299SJason Gunthorpe WARN_ON(id_priv->cma_dev->device != id->device); 50413788d299SJason Gunthorpe destroy_mc(id_priv, mc); 5042c8f6a362SSean Hefty return; 5043c8f6a362SSean Hefty } 5044c8f6a362SSean Hefty spin_unlock_irq(&id_priv->lock); 5045c8f6a362SSean Hefty } 5046c8f6a362SSean Hefty EXPORT_SYMBOL(rdma_leave_multicast); 5047c8f6a362SSean Hefty 5048dd5bdff8SOr Gerlitz static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv) 5049dd5bdff8SOr Gerlitz { 5050dd5bdff8SOr Gerlitz struct rdma_dev_addr *dev_addr; 50517e85bcdaSJason Gunthorpe struct cma_work *work; 5052dd5bdff8SOr Gerlitz 5053dd5bdff8SOr Gerlitz dev_addr = &id_priv->id.route.addr.dev_addr; 5054dd5bdff8SOr Gerlitz 50556266ed6eSSean Hefty if ((dev_addr->bound_dev_if == ndev->ifindex) && 5056fa20105eSGuy Shapiro (net_eq(dev_net(ndev), dev_addr->net)) && 5057dd5bdff8SOr Gerlitz memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) { 5058aba25a3eSParav Pandit pr_info("RDMA CM addr change for ndev %s used by id %p\n", 5059dd5bdff8SOr Gerlitz ndev->name, &id_priv->id); 5060dd5bdff8SOr Gerlitz work = kzalloc(sizeof *work, GFP_KERNEL); 5061dd5bdff8SOr Gerlitz if (!work) 5062dd5bdff8SOr Gerlitz return -ENOMEM; 5063dd5bdff8SOr Gerlitz 50647e85bcdaSJason Gunthorpe INIT_WORK(&work->work, cma_work_handler); 5065dd5bdff8SOr Gerlitz work->id = id_priv; 5066dd5bdff8SOr Gerlitz work->event.event = RDMA_CM_EVENT_ADDR_CHANGE; 5067e368d23fSParav Pandit cma_id_get(id_priv); 5068dd5bdff8SOr Gerlitz queue_work(cma_wq, &work->work); 5069dd5bdff8SOr Gerlitz } 5070dd5bdff8SOr Gerlitz 5071dd5bdff8SOr Gerlitz return 0; 5072dd5bdff8SOr Gerlitz } 5073dd5bdff8SOr Gerlitz 5074dd5bdff8SOr Gerlitz static int cma_netdev_callback(struct notifier_block *self, unsigned long event, 5075351638e7SJiri Pirko void *ptr) 5076dd5bdff8SOr Gerlitz { 5077351638e7SJiri Pirko struct net_device *ndev = netdev_notifier_info_to_dev(ptr); 5078dd5bdff8SOr Gerlitz struct cma_device *cma_dev; 5079dd5bdff8SOr Gerlitz struct rdma_id_private *id_priv; 5080dd5bdff8SOr Gerlitz int ret = NOTIFY_DONE; 5081dd5bdff8SOr Gerlitz 5082dd5bdff8SOr Gerlitz if (event != NETDEV_BONDING_FAILOVER) 5083dd5bdff8SOr Gerlitz return NOTIFY_DONE; 5084dd5bdff8SOr Gerlitz 50853cd96fddSParav Pandit if (!netif_is_bond_master(ndev)) 5086dd5bdff8SOr Gerlitz return NOTIFY_DONE; 5087dd5bdff8SOr Gerlitz 5088dd5bdff8SOr Gerlitz mutex_lock(&lock); 5089dd5bdff8SOr Gerlitz list_for_each_entry(cma_dev, &dev_list, list) 509099cfddb8SJason Gunthorpe list_for_each_entry(id_priv, &cma_dev->id_list, device_item) { 5091dd5bdff8SOr Gerlitz ret = cma_netdev_change(ndev, id_priv); 5092dd5bdff8SOr Gerlitz if (ret) 5093dd5bdff8SOr Gerlitz goto out; 5094dd5bdff8SOr Gerlitz } 5095dd5bdff8SOr Gerlitz 5096dd5bdff8SOr Gerlitz out: 5097dd5bdff8SOr Gerlitz mutex_unlock(&lock); 5098dd5bdff8SOr Gerlitz return ret; 5099dd5bdff8SOr Gerlitz } 5100dd5bdff8SOr Gerlitz 5101925d046eSPatrisious Haddad static void cma_netevent_work_handler(struct work_struct *_work) 5102925d046eSPatrisious Haddad { 5103925d046eSPatrisious Haddad struct rdma_id_private *id_priv = 5104925d046eSPatrisious Haddad container_of(_work, struct rdma_id_private, id.net_work); 5105925d046eSPatrisious Haddad struct rdma_cm_event event = {}; 5106925d046eSPatrisious Haddad 5107925d046eSPatrisious Haddad mutex_lock(&id_priv->handler_mutex); 5108925d046eSPatrisious Haddad 5109925d046eSPatrisious Haddad if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING || 5110925d046eSPatrisious Haddad READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL) 5111925d046eSPatrisious Haddad goto out_unlock; 5112925d046eSPatrisious Haddad 5113925d046eSPatrisious Haddad event.event = RDMA_CM_EVENT_UNREACHABLE; 5114925d046eSPatrisious Haddad event.status = -ETIMEDOUT; 5115925d046eSPatrisious Haddad 5116925d046eSPatrisious Haddad if (cma_cm_event_handler(id_priv, &event)) { 5117925d046eSPatrisious Haddad __acquire(&id_priv->handler_mutex); 5118925d046eSPatrisious Haddad id_priv->cm_id.ib = NULL; 5119925d046eSPatrisious Haddad cma_id_put(id_priv); 5120925d046eSPatrisious Haddad destroy_id_handler_unlock(id_priv); 5121925d046eSPatrisious Haddad return; 5122925d046eSPatrisious Haddad } 5123925d046eSPatrisious Haddad 5124925d046eSPatrisious Haddad out_unlock: 5125925d046eSPatrisious Haddad mutex_unlock(&id_priv->handler_mutex); 5126925d046eSPatrisious Haddad cma_id_put(id_priv); 5127925d046eSPatrisious Haddad } 5128925d046eSPatrisious Haddad 5129925d046eSPatrisious Haddad static int cma_netevent_callback(struct notifier_block *self, 5130925d046eSPatrisious Haddad unsigned long event, void *ctx) 5131925d046eSPatrisious Haddad { 5132925d046eSPatrisious Haddad struct id_table_entry *ips_node = NULL; 5133925d046eSPatrisious Haddad struct rdma_id_private *current_id; 5134925d046eSPatrisious Haddad struct neighbour *neigh = ctx; 5135925d046eSPatrisious Haddad unsigned long flags; 5136925d046eSPatrisious Haddad 5137925d046eSPatrisious Haddad if (event != NETEVENT_NEIGH_UPDATE) 5138925d046eSPatrisious Haddad return NOTIFY_DONE; 5139925d046eSPatrisious Haddad 5140925d046eSPatrisious Haddad spin_lock_irqsave(&id_table_lock, flags); 5141925d046eSPatrisious Haddad if (neigh->tbl->family == AF_INET6) { 5142925d046eSPatrisious Haddad struct sockaddr_in6 neigh_sock_6; 5143925d046eSPatrisious Haddad 5144925d046eSPatrisious Haddad neigh_sock_6.sin6_family = AF_INET6; 5145925d046eSPatrisious Haddad neigh_sock_6.sin6_addr = *(struct in6_addr *)neigh->primary_key; 5146925d046eSPatrisious Haddad ips_node = node_from_ndev_ip(&id_table, neigh->dev->ifindex, 5147925d046eSPatrisious Haddad (struct sockaddr *)&neigh_sock_6); 5148925d046eSPatrisious Haddad } else if (neigh->tbl->family == AF_INET) { 5149925d046eSPatrisious Haddad struct sockaddr_in neigh_sock_4; 5150925d046eSPatrisious Haddad 5151925d046eSPatrisious Haddad neigh_sock_4.sin_family = AF_INET; 5152925d046eSPatrisious Haddad neigh_sock_4.sin_addr.s_addr = *(__be32 *)(neigh->primary_key); 5153925d046eSPatrisious Haddad ips_node = node_from_ndev_ip(&id_table, neigh->dev->ifindex, 5154925d046eSPatrisious Haddad (struct sockaddr *)&neigh_sock_4); 5155925d046eSPatrisious Haddad } else 5156925d046eSPatrisious Haddad goto out; 5157925d046eSPatrisious Haddad 5158925d046eSPatrisious Haddad if (!ips_node) 5159925d046eSPatrisious Haddad goto out; 5160925d046eSPatrisious Haddad 5161925d046eSPatrisious Haddad list_for_each_entry(current_id, &ips_node->id_list, id_list_entry) { 5162925d046eSPatrisious Haddad if (!memcmp(current_id->id.route.addr.dev_addr.dst_dev_addr, 5163925d046eSPatrisious Haddad neigh->ha, ETH_ALEN)) 5164925d046eSPatrisious Haddad continue; 5165925d046eSPatrisious Haddad INIT_WORK(¤t_id->id.net_work, cma_netevent_work_handler); 5166925d046eSPatrisious Haddad cma_id_get(current_id); 5167925d046eSPatrisious Haddad queue_work(cma_wq, ¤t_id->id.net_work); 5168925d046eSPatrisious Haddad } 5169925d046eSPatrisious Haddad out: 5170925d046eSPatrisious Haddad spin_unlock_irqrestore(&id_table_lock, flags); 5171925d046eSPatrisious Haddad return NOTIFY_DONE; 5172925d046eSPatrisious Haddad } 5173925d046eSPatrisious Haddad 5174dd5bdff8SOr Gerlitz static struct notifier_block cma_nb = { 5175dd5bdff8SOr Gerlitz .notifier_call = cma_netdev_callback 5176dd5bdff8SOr Gerlitz }; 5177dd5bdff8SOr Gerlitz 5178925d046eSPatrisious Haddad static struct notifier_block cma_netevent_cb = { 5179925d046eSPatrisious Haddad .notifier_call = cma_netevent_callback 5180925d046eSPatrisious Haddad }; 5181925d046eSPatrisious Haddad 51823647a28dSJason Gunthorpe static void cma_send_device_removal_put(struct rdma_id_private *id_priv) 5183e51060f0SSean Hefty { 51843647a28dSJason Gunthorpe struct rdma_cm_event event = { .event = RDMA_CM_EVENT_DEVICE_REMOVAL }; 5185550e5ca7SNir Muchtar enum rdma_cm_state state; 51863647a28dSJason Gunthorpe unsigned long flags; 5187e51060f0SSean Hefty 5188de910bd9SOr Gerlitz mutex_lock(&id_priv->handler_mutex); 51893647a28dSJason Gunthorpe /* Record that we want to remove the device */ 51903647a28dSJason Gunthorpe spin_lock_irqsave(&id_priv->lock, flags); 51913647a28dSJason Gunthorpe state = id_priv->state; 51923647a28dSJason Gunthorpe if (state == RDMA_CM_DESTROYING || state == RDMA_CM_DEVICE_REMOVAL) { 51933647a28dSJason Gunthorpe spin_unlock_irqrestore(&id_priv->lock, flags); 5194de910bd9SOr Gerlitz mutex_unlock(&id_priv->handler_mutex); 51953647a28dSJason Gunthorpe cma_id_put(id_priv); 51963647a28dSJason Gunthorpe return; 51973647a28dSJason Gunthorpe } 51983647a28dSJason Gunthorpe id_priv->state = RDMA_CM_DEVICE_REMOVAL; 51993647a28dSJason Gunthorpe spin_unlock_irqrestore(&id_priv->lock, flags); 52003647a28dSJason Gunthorpe 52013647a28dSJason Gunthorpe if (cma_cm_event_handler(id_priv, &event)) { 52023647a28dSJason Gunthorpe /* 52033647a28dSJason Gunthorpe * At this point the ULP promises it won't call 52043647a28dSJason Gunthorpe * rdma_destroy_id() concurrently 52053647a28dSJason Gunthorpe */ 52063647a28dSJason Gunthorpe cma_id_put(id_priv); 52073647a28dSJason Gunthorpe mutex_unlock(&id_priv->handler_mutex); 5208f6a9d47aSJason Gunthorpe trace_cm_id_destroy(id_priv); 5209f6a9d47aSJason Gunthorpe _destroy_id(id_priv, state); 52103647a28dSJason Gunthorpe return; 52113647a28dSJason Gunthorpe } 52123647a28dSJason Gunthorpe mutex_unlock(&id_priv->handler_mutex); 52133647a28dSJason Gunthorpe 52143647a28dSJason Gunthorpe /* 52153647a28dSJason Gunthorpe * If this races with destroy then the thread that first assigns state 52163647a28dSJason Gunthorpe * to a destroying does the cancel. 52173647a28dSJason Gunthorpe */ 52183647a28dSJason Gunthorpe cma_cancel_operation(id_priv, state); 52193647a28dSJason Gunthorpe cma_id_put(id_priv); 5220e51060f0SSean Hefty } 5221e51060f0SSean Hefty 5222e51060f0SSean Hefty static void cma_process_remove(struct cma_device *cma_dev) 5223e51060f0SSean Hefty { 5224e51060f0SSean Hefty mutex_lock(&lock); 5225e51060f0SSean Hefty while (!list_empty(&cma_dev->id_list)) { 52263647a28dSJason Gunthorpe struct rdma_id_private *id_priv = list_first_entry( 522799cfddb8SJason Gunthorpe &cma_dev->id_list, struct rdma_id_private, device_item); 5228e51060f0SSean Hefty 522999cfddb8SJason Gunthorpe list_del_init(&id_priv->listen_item); 523099cfddb8SJason Gunthorpe list_del_init(&id_priv->device_item); 5231e368d23fSParav Pandit cma_id_get(id_priv); 5232e51060f0SSean Hefty mutex_unlock(&lock); 5233e51060f0SSean Hefty 52343647a28dSJason Gunthorpe cma_send_device_removal_put(id_priv); 5235e51060f0SSean Hefty 5236e51060f0SSean Hefty mutex_lock(&lock); 5237e51060f0SSean Hefty } 5238e51060f0SSean Hefty mutex_unlock(&lock); 5239e51060f0SSean Hefty 52405ff8c8faSParav Pandit cma_dev_put(cma_dev); 5241e51060f0SSean Hefty wait_for_completion(&cma_dev->comp); 5242e51060f0SSean Hefty } 5243e51060f0SSean Hefty 52444d51c3d9SParav Pandit static bool cma_supported(struct ib_device *device) 52454d51c3d9SParav Pandit { 52464d51c3d9SParav Pandit u32 i; 52474d51c3d9SParav Pandit 52484d51c3d9SParav Pandit rdma_for_each_port(device, i) { 52494d51c3d9SParav Pandit if (rdma_cap_ib_cm(device, i) || rdma_cap_iw_cm(device, i)) 52504d51c3d9SParav Pandit return true; 52514d51c3d9SParav Pandit } 52524d51c3d9SParav Pandit return false; 52534d51c3d9SParav Pandit } 52544d51c3d9SParav Pandit 5255c80a0c52SLeon Romanovsky static int cma_add_one(struct ib_device *device) 5256c80a0c52SLeon Romanovsky { 5257dd37d2f5SJason Gunthorpe struct rdma_id_private *to_destroy; 5258c80a0c52SLeon Romanovsky struct cma_device *cma_dev; 5259c80a0c52SLeon Romanovsky struct rdma_id_private *id_priv; 5260c80a0c52SLeon Romanovsky unsigned long supported_gids = 0; 5261c80a0c52SLeon Romanovsky int ret; 52621fb7f897SMark Bloch u32 i; 5263c80a0c52SLeon Romanovsky 52644d51c3d9SParav Pandit if (!cma_supported(device)) 52654d51c3d9SParav Pandit return -EOPNOTSUPP; 52664d51c3d9SParav Pandit 5267c80a0c52SLeon Romanovsky cma_dev = kmalloc(sizeof(*cma_dev), GFP_KERNEL); 5268c80a0c52SLeon Romanovsky if (!cma_dev) 5269c80a0c52SLeon Romanovsky return -ENOMEM; 5270c80a0c52SLeon Romanovsky 5271c80a0c52SLeon Romanovsky cma_dev->device = device; 5272c80a0c52SLeon Romanovsky cma_dev->default_gid_type = kcalloc(device->phys_port_cnt, 5273c80a0c52SLeon Romanovsky sizeof(*cma_dev->default_gid_type), 5274c80a0c52SLeon Romanovsky GFP_KERNEL); 5275c80a0c52SLeon Romanovsky if (!cma_dev->default_gid_type) { 5276c80a0c52SLeon Romanovsky ret = -ENOMEM; 5277c80a0c52SLeon Romanovsky goto free_cma_dev; 5278c80a0c52SLeon Romanovsky } 5279c80a0c52SLeon Romanovsky 5280c80a0c52SLeon Romanovsky cma_dev->default_roce_tos = kcalloc(device->phys_port_cnt, 5281c80a0c52SLeon Romanovsky sizeof(*cma_dev->default_roce_tos), 5282c80a0c52SLeon Romanovsky GFP_KERNEL); 5283c80a0c52SLeon Romanovsky if (!cma_dev->default_roce_tos) { 5284c80a0c52SLeon Romanovsky ret = -ENOMEM; 5285c80a0c52SLeon Romanovsky goto free_gid_type; 5286c80a0c52SLeon Romanovsky } 5287c80a0c52SLeon Romanovsky 5288c80a0c52SLeon Romanovsky rdma_for_each_port (device, i) { 5289c80a0c52SLeon Romanovsky supported_gids = roce_gid_type_mask_support(device, i); 5290c80a0c52SLeon Romanovsky WARN_ON(!supported_gids); 5291c80a0c52SLeon Romanovsky if (supported_gids & (1 << CMA_PREFERRED_ROCE_GID_TYPE)) 5292c80a0c52SLeon Romanovsky cma_dev->default_gid_type[i - rdma_start_port(device)] = 5293c80a0c52SLeon Romanovsky CMA_PREFERRED_ROCE_GID_TYPE; 5294c80a0c52SLeon Romanovsky else 5295c80a0c52SLeon Romanovsky cma_dev->default_gid_type[i - rdma_start_port(device)] = 5296c80a0c52SLeon Romanovsky find_first_bit(&supported_gids, BITS_PER_LONG); 5297c80a0c52SLeon Romanovsky cma_dev->default_roce_tos[i - rdma_start_port(device)] = 0; 5298c80a0c52SLeon Romanovsky } 5299c80a0c52SLeon Romanovsky 5300c80a0c52SLeon Romanovsky init_completion(&cma_dev->comp); 5301c80a0c52SLeon Romanovsky refcount_set(&cma_dev->refcount, 1); 5302c80a0c52SLeon Romanovsky INIT_LIST_HEAD(&cma_dev->id_list); 5303c80a0c52SLeon Romanovsky ib_set_client_data(device, &cma_client, cma_dev); 5304c80a0c52SLeon Romanovsky 5305c80a0c52SLeon Romanovsky mutex_lock(&lock); 5306c80a0c52SLeon Romanovsky list_add_tail(&cma_dev->list, &dev_list); 530799cfddb8SJason Gunthorpe list_for_each_entry(id_priv, &listen_any_list, listen_any_item) { 5308dd37d2f5SJason Gunthorpe ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy); 5309c80a0c52SLeon Romanovsky if (ret) 5310c80a0c52SLeon Romanovsky goto free_listen; 5311c80a0c52SLeon Romanovsky } 5312c80a0c52SLeon Romanovsky mutex_unlock(&lock); 5313c80a0c52SLeon Romanovsky 5314c80a0c52SLeon Romanovsky trace_cm_add_one(device); 5315c80a0c52SLeon Romanovsky return 0; 5316c80a0c52SLeon Romanovsky 5317c80a0c52SLeon Romanovsky free_listen: 5318c80a0c52SLeon Romanovsky list_del(&cma_dev->list); 5319c80a0c52SLeon Romanovsky mutex_unlock(&lock); 5320c80a0c52SLeon Romanovsky 5321dd37d2f5SJason Gunthorpe /* cma_process_remove() will delete to_destroy */ 5322c80a0c52SLeon Romanovsky cma_process_remove(cma_dev); 5323c80a0c52SLeon Romanovsky kfree(cma_dev->default_roce_tos); 5324c80a0c52SLeon Romanovsky free_gid_type: 5325c80a0c52SLeon Romanovsky kfree(cma_dev->default_gid_type); 5326c80a0c52SLeon Romanovsky 5327c80a0c52SLeon Romanovsky free_cma_dev: 5328c80a0c52SLeon Romanovsky kfree(cma_dev); 5329c80a0c52SLeon Romanovsky return ret; 5330c80a0c52SLeon Romanovsky } 5331c80a0c52SLeon Romanovsky 53327c1eb45aSHaggai Eran static void cma_remove_one(struct ib_device *device, void *client_data) 5333e51060f0SSean Hefty { 53347c1eb45aSHaggai Eran struct cma_device *cma_dev = client_data; 5335e51060f0SSean Hefty 5336ed999f82SChuck Lever trace_cm_remove_one(device); 5337ed999f82SChuck Lever 5338e51060f0SSean Hefty mutex_lock(&lock); 5339e51060f0SSean Hefty list_del(&cma_dev->list); 5340e51060f0SSean Hefty mutex_unlock(&lock); 5341e51060f0SSean Hefty 5342e51060f0SSean Hefty cma_process_remove(cma_dev); 534389052d78SMajd Dibbiny kfree(cma_dev->default_roce_tos); 5344045959dbSMatan Barak kfree(cma_dev->default_gid_type); 5345e51060f0SSean Hefty kfree(cma_dev); 5346e51060f0SSean Hefty } 5347e51060f0SSean Hefty 53484be74b42SHaggai Eran static int cma_init_net(struct net *net) 53494be74b42SHaggai Eran { 53504be74b42SHaggai Eran struct cma_pernet *pernet = cma_pernet(net); 53514be74b42SHaggai Eran 535263826753SMatthew Wilcox xa_init(&pernet->tcp_ps); 535363826753SMatthew Wilcox xa_init(&pernet->udp_ps); 535463826753SMatthew Wilcox xa_init(&pernet->ipoib_ps); 535563826753SMatthew Wilcox xa_init(&pernet->ib_ps); 53564be74b42SHaggai Eran 53574be74b42SHaggai Eran return 0; 53584be74b42SHaggai Eran } 53594be74b42SHaggai Eran 53604be74b42SHaggai Eran static void cma_exit_net(struct net *net) 53614be74b42SHaggai Eran { 53624be74b42SHaggai Eran struct cma_pernet *pernet = cma_pernet(net); 53634be74b42SHaggai Eran 536463826753SMatthew Wilcox WARN_ON(!xa_empty(&pernet->tcp_ps)); 536563826753SMatthew Wilcox WARN_ON(!xa_empty(&pernet->udp_ps)); 536663826753SMatthew Wilcox WARN_ON(!xa_empty(&pernet->ipoib_ps)); 536763826753SMatthew Wilcox WARN_ON(!xa_empty(&pernet->ib_ps)); 53684be74b42SHaggai Eran } 53694be74b42SHaggai Eran 53704be74b42SHaggai Eran static struct pernet_operations cma_pernet_operations = { 53714be74b42SHaggai Eran .init = cma_init_net, 53724be74b42SHaggai Eran .exit = cma_exit_net, 53734be74b42SHaggai Eran .id = &cma_pernet_id, 53744be74b42SHaggai Eran .size = sizeof(struct cma_pernet), 53754be74b42SHaggai Eran }; 53764be74b42SHaggai Eran 5377716abb1fSPeter Huewe static int __init cma_init(void) 5378e51060f0SSean Hefty { 53795d7220e8STetsuo Handa int ret; 5380227b60f5SStephen Hemminger 538132ac9e43SJason Gunthorpe /* 538232ac9e43SJason Gunthorpe * There is a rare lock ordering dependency in cma_netdev_callback() 538332ac9e43SJason Gunthorpe * that only happens when bonding is enabled. Teach lockdep that rtnl 538432ac9e43SJason Gunthorpe * must never be nested under lock so it can find these without having 538532ac9e43SJason Gunthorpe * to test with bonding. 538632ac9e43SJason Gunthorpe */ 538732ac9e43SJason Gunthorpe if (IS_ENABLED(CONFIG_LOCKDEP)) { 538832ac9e43SJason Gunthorpe rtnl_lock(); 538932ac9e43SJason Gunthorpe mutex_lock(&lock); 539032ac9e43SJason Gunthorpe mutex_unlock(&lock); 539132ac9e43SJason Gunthorpe rtnl_unlock(); 539232ac9e43SJason Gunthorpe } 539332ac9e43SJason Gunthorpe 5394dee9acbbSBhaktipriya Shridhar cma_wq = alloc_ordered_workqueue("rdma_cm", WQ_MEM_RECLAIM); 5395e51060f0SSean Hefty if (!cma_wq) 5396e51060f0SSean Hefty return -ENOMEM; 5397e51060f0SSean Hefty 53984be74b42SHaggai Eran ret = register_pernet_subsys(&cma_pernet_operations); 53994be74b42SHaggai Eran if (ret) 54004be74b42SHaggai Eran goto err_wq; 54014be74b42SHaggai Eran 5402c1a0b23bSMichael S. Tsirkin ib_sa_register_client(&sa_client); 5403dd5bdff8SOr Gerlitz register_netdevice_notifier(&cma_nb); 5404925d046eSPatrisious Haddad register_netevent_notifier(&cma_netevent_cb); 5405c1a0b23bSMichael S. Tsirkin 5406e51060f0SSean Hefty ret = ib_register_client(&cma_client); 5407e51060f0SSean Hefty if (ret) 5408e51060f0SSean Hefty goto err; 5409753f618aSNir Muchtar 5410a7bfb93fSzhengbin ret = cma_configfs_init(); 5411a7bfb93fSzhengbin if (ret) 5412a7bfb93fSzhengbin goto err_ib; 5413753f618aSNir Muchtar 5414e51060f0SSean Hefty return 0; 5415e51060f0SSean Hefty 5416a7bfb93fSzhengbin err_ib: 5417a7bfb93fSzhengbin ib_unregister_client(&cma_client); 5418e51060f0SSean Hefty err: 5419925d046eSPatrisious Haddad unregister_netevent_notifier(&cma_netevent_cb); 5420dd5bdff8SOr Gerlitz unregister_netdevice_notifier(&cma_nb); 5421c1a0b23bSMichael S. Tsirkin ib_sa_unregister_client(&sa_client); 542244a7b675SChuhong Yuan unregister_pernet_subsys(&cma_pernet_operations); 54234be74b42SHaggai Eran err_wq: 5424e51060f0SSean Hefty destroy_workqueue(cma_wq); 5425e51060f0SSean Hefty return ret; 5426e51060f0SSean Hefty } 5427e51060f0SSean Hefty 5428716abb1fSPeter Huewe static void __exit cma_cleanup(void) 5429e51060f0SSean Hefty { 5430045959dbSMatan Barak cma_configfs_exit(); 5431e51060f0SSean Hefty ib_unregister_client(&cma_client); 5432925d046eSPatrisious Haddad unregister_netevent_notifier(&cma_netevent_cb); 5433dd5bdff8SOr Gerlitz unregister_netdevice_notifier(&cma_nb); 5434c1a0b23bSMichael S. Tsirkin ib_sa_unregister_client(&sa_client); 54354be74b42SHaggai Eran unregister_pernet_subsys(&cma_pernet_operations); 5436e51060f0SSean Hefty destroy_workqueue(cma_wq); 5437e51060f0SSean Hefty } 5438e51060f0SSean Hefty 5439e51060f0SSean Hefty module_init(cma_init); 5440e51060f0SSean Hefty module_exit(cma_cleanup); 5441