xref: /linux/net/rds/ib.c (revision ad1d7dc0d79d3dd2c5d2931b13edbd4fe33e5fac)
1ec16227eSAndy Grover /*
2ec16227eSAndy Grover  * Copyright (c) 2006 Oracle.  All rights reserved.
3ec16227eSAndy Grover  *
4ec16227eSAndy Grover  * This software is available to you under a choice of one of two
5ec16227eSAndy Grover  * licenses.  You may choose to be licensed under the terms of the GNU
6ec16227eSAndy Grover  * General Public License (GPL) Version 2, available from the file
7ec16227eSAndy Grover  * COPYING in the main directory of this source tree, or the
8ec16227eSAndy Grover  * OpenIB.org BSD license below:
9ec16227eSAndy Grover  *
10ec16227eSAndy Grover  *     Redistribution and use in source and binary forms, with or
11ec16227eSAndy Grover  *     without modification, are permitted provided that the following
12ec16227eSAndy Grover  *     conditions are met:
13ec16227eSAndy Grover  *
14ec16227eSAndy Grover  *      - Redistributions of source code must retain the above
15ec16227eSAndy Grover  *        copyright notice, this list of conditions and the following
16ec16227eSAndy Grover  *        disclaimer.
17ec16227eSAndy Grover  *
18ec16227eSAndy Grover  *      - Redistributions in binary form must reproduce the above
19ec16227eSAndy Grover  *        copyright notice, this list of conditions and the following
20ec16227eSAndy Grover  *        disclaimer in the documentation and/or other materials
21ec16227eSAndy Grover  *        provided with the distribution.
22ec16227eSAndy Grover  *
23ec16227eSAndy Grover  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24ec16227eSAndy Grover  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25ec16227eSAndy Grover  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26ec16227eSAndy Grover  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27ec16227eSAndy Grover  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28ec16227eSAndy Grover  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29ec16227eSAndy Grover  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30ec16227eSAndy Grover  * SOFTWARE.
31ec16227eSAndy Grover  *
32ec16227eSAndy Grover  */
33ec16227eSAndy Grover #include <linux/kernel.h>
34ec16227eSAndy Grover #include <linux/in.h>
35ec16227eSAndy Grover #include <linux/if.h>
36ec16227eSAndy Grover #include <linux/netdevice.h>
37ec16227eSAndy Grover #include <linux/inetdevice.h>
38ec16227eSAndy Grover #include <linux/if_arp.h>
39ec16227eSAndy Grover #include <linux/delay.h>
405a0e3ad6STejun Heo #include <linux/slab.h>
413a9a231dSPaul Gortmaker #include <linux/module.h>
42ec16227eSAndy Grover 
43ec16227eSAndy Grover #include "rds.h"
44ec16227eSAndy Grover #include "ib.h"
45ec16227eSAndy Grover 
46ff51bf84Sstephen hemminger static unsigned int fmr_pool_size = RDS_FMR_POOL_SIZE;
47ec16227eSAndy Grover unsigned int fmr_message_size = RDS_FMR_SIZE + 1; /* +1 allows for unaligned MRs */
483ba23adeSAndy Grover unsigned int rds_ib_retry_count = RDS_IB_DEFAULT_RETRY_COUNT;
49ec16227eSAndy Grover 
50ec16227eSAndy Grover module_param(fmr_pool_size, int, 0444);
51ec16227eSAndy Grover MODULE_PARM_DESC(fmr_pool_size, " Max number of fmr per HCA");
52ec16227eSAndy Grover module_param(fmr_message_size, int, 0444);
53ec16227eSAndy Grover MODULE_PARM_DESC(fmr_message_size, " Max size of a RDMA transfer");
543ba23adeSAndy Grover module_param(rds_ib_retry_count, int, 0444);
553ba23adeSAndy Grover MODULE_PARM_DESC(rds_ib_retry_count, " Number of hw retries before reporting an error");
56ec16227eSAndy Grover 
57ea819867SZach Brown /*
58ea819867SZach Brown  * we have a clumsy combination of RCU and a rwsem protecting this list
59ea819867SZach Brown  * because it is used both in the get_mr fast path and while blocking in
60ea819867SZach Brown  * the FMR flushing path.
61ea819867SZach Brown  */
62ea819867SZach Brown DECLARE_RWSEM(rds_ib_devices_lock);
63ec16227eSAndy Grover struct list_head rds_ib_devices;
64ec16227eSAndy Grover 
65745cbccaSAndy Grover /* NOTE: if also grabbing ibdev lock, grab this first */
66ec16227eSAndy Grover DEFINE_SPINLOCK(ib_nodev_conns_lock);
67ec16227eSAndy Grover LIST_HEAD(ib_nodev_conns);
68ec16227eSAndy Grover 
69ff51bf84Sstephen hemminger static void rds_ib_nodev_connect(void)
70fc19de38SZach Brown {
71fc19de38SZach Brown 	struct rds_ib_connection *ic;
72fc19de38SZach Brown 
73fc19de38SZach Brown 	spin_lock(&ib_nodev_conns_lock);
74fc19de38SZach Brown 	list_for_each_entry(ic, &ib_nodev_conns, ib_node)
75fc19de38SZach Brown 		rds_conn_connect_if_down(ic->conn);
76fc19de38SZach Brown 	spin_unlock(&ib_nodev_conns_lock);
77fc19de38SZach Brown }
78fc19de38SZach Brown 
79ff51bf84Sstephen hemminger static void rds_ib_dev_shutdown(struct rds_ib_device *rds_ibdev)
80fc19de38SZach Brown {
81fc19de38SZach Brown 	struct rds_ib_connection *ic;
82fc19de38SZach Brown 	unsigned long flags;
83fc19de38SZach Brown 
84fc19de38SZach Brown 	spin_lock_irqsave(&rds_ibdev->spinlock, flags);
85fc19de38SZach Brown 	list_for_each_entry(ic, &rds_ibdev->conn_list, ib_node)
86fc19de38SZach Brown 		rds_conn_drop(ic->conn);
87fc19de38SZach Brown 	spin_unlock_irqrestore(&rds_ibdev->spinlock, flags);
88fc19de38SZach Brown }
89fc19de38SZach Brown 
903e0249f9SZach Brown /*
913e0249f9SZach Brown  * rds_ib_destroy_mr_pool() blocks on a few things and mrs drop references
923e0249f9SZach Brown  * from interrupt context so we push freing off into a work struct in krdsd.
933e0249f9SZach Brown  */
943e0249f9SZach Brown static void rds_ib_dev_free(struct work_struct *work)
953e0249f9SZach Brown {
963e0249f9SZach Brown 	struct rds_ib_ipaddr *i_ipaddr, *i_next;
973e0249f9SZach Brown 	struct rds_ib_device *rds_ibdev = container_of(work,
983e0249f9SZach Brown 					struct rds_ib_device, free_work);
993e0249f9SZach Brown 
1003e0249f9SZach Brown 	if (rds_ibdev->mr_pool)
1013e0249f9SZach Brown 		rds_ib_destroy_mr_pool(rds_ibdev->mr_pool);
1023e0249f9SZach Brown 	if (rds_ibdev->mr)
1033e0249f9SZach Brown 		ib_dereg_mr(rds_ibdev->mr);
1043e0249f9SZach Brown 	if (rds_ibdev->pd)
1053e0249f9SZach Brown 		ib_dealloc_pd(rds_ibdev->pd);
1063e0249f9SZach Brown 
1073e0249f9SZach Brown 	list_for_each_entry_safe(i_ipaddr, i_next, &rds_ibdev->ipaddr_list, list) {
1083e0249f9SZach Brown 		list_del(&i_ipaddr->list);
1093e0249f9SZach Brown 		kfree(i_ipaddr);
1103e0249f9SZach Brown 	}
1113e0249f9SZach Brown 
1123e0249f9SZach Brown 	kfree(rds_ibdev);
1133e0249f9SZach Brown }
1143e0249f9SZach Brown 
1153e0249f9SZach Brown void rds_ib_dev_put(struct rds_ib_device *rds_ibdev)
1163e0249f9SZach Brown {
1173e0249f9SZach Brown 	BUG_ON(atomic_read(&rds_ibdev->refcount) <= 0);
1183e0249f9SZach Brown 	if (atomic_dec_and_test(&rds_ibdev->refcount))
1193e0249f9SZach Brown 		queue_work(rds_wq, &rds_ibdev->free_work);
1203e0249f9SZach Brown }
1213e0249f9SZach Brown 
122ff51bf84Sstephen hemminger static void rds_ib_add_one(struct ib_device *device)
123ec16227eSAndy Grover {
124ec16227eSAndy Grover 	struct rds_ib_device *rds_ibdev;
125ec16227eSAndy Grover 	struct ib_device_attr *dev_attr;
126ec16227eSAndy Grover 
127ec16227eSAndy Grover 	/* Only handle IB (no iWARP) devices */
128ec16227eSAndy Grover 	if (device->node_type != RDMA_NODE_IB_CA)
129ec16227eSAndy Grover 		return;
130ec16227eSAndy Grover 
131ec16227eSAndy Grover 	dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
132ec16227eSAndy Grover 	if (!dev_attr)
133ec16227eSAndy Grover 		return;
134ec16227eSAndy Grover 
135ec16227eSAndy Grover 	if (ib_query_device(device, dev_attr)) {
136ec16227eSAndy Grover 		rdsdebug("Query device failed for %s\n", device->name);
137ec16227eSAndy Grover 		goto free_attr;
138ec16227eSAndy Grover 	}
139ec16227eSAndy Grover 
1403e0249f9SZach Brown 	rds_ibdev = kzalloc_node(sizeof(struct rds_ib_device), GFP_KERNEL,
1413e0249f9SZach Brown 				 ibdev_to_node(device));
142ec16227eSAndy Grover 	if (!rds_ibdev)
143ec16227eSAndy Grover 		goto free_attr;
144ec16227eSAndy Grover 
145ec16227eSAndy Grover 	spin_lock_init(&rds_ibdev->spinlock);
1463e0249f9SZach Brown 	atomic_set(&rds_ibdev->refcount, 1);
1473e0249f9SZach Brown 	INIT_WORK(&rds_ibdev->free_work, rds_ib_dev_free);
148ec16227eSAndy Grover 
149ec16227eSAndy Grover 	rds_ibdev->max_wrs = dev_attr->max_qp_wr;
150ec16227eSAndy Grover 	rds_ibdev->max_sge = min(dev_attr->max_sge, RDS_IB_MAX_SGE);
151ec16227eSAndy Grover 
152ec16227eSAndy Grover 	rds_ibdev->fmr_max_remaps = dev_attr->max_map_per_fmr?: 32;
153ec16227eSAndy Grover 	rds_ibdev->max_fmrs = dev_attr->max_fmr ?
154ec16227eSAndy Grover 			min_t(unsigned int, dev_attr->max_fmr, fmr_pool_size) :
155ec16227eSAndy Grover 			fmr_pool_size;
156ec16227eSAndy Grover 
15740589e74SAndy Grover 	rds_ibdev->max_initiator_depth = dev_attr->max_qp_init_rd_atom;
15840589e74SAndy Grover 	rds_ibdev->max_responder_resources = dev_attr->max_qp_rd_atom;
15940589e74SAndy Grover 
160ec16227eSAndy Grover 	rds_ibdev->dev = device;
161ec16227eSAndy Grover 	rds_ibdev->pd = ib_alloc_pd(device);
1623e0249f9SZach Brown 	if (IS_ERR(rds_ibdev->pd)) {
1633e0249f9SZach Brown 		rds_ibdev->pd = NULL;
1643e0249f9SZach Brown 		goto put_dev;
1653e0249f9SZach Brown 	}
166ec16227eSAndy Grover 
1673e0249f9SZach Brown 	rds_ibdev->mr = ib_get_dma_mr(rds_ibdev->pd, IB_ACCESS_LOCAL_WRITE);
1683e0249f9SZach Brown 	if (IS_ERR(rds_ibdev->mr)) {
1693e0249f9SZach Brown 		rds_ibdev->mr = NULL;
1703e0249f9SZach Brown 		goto put_dev;
1713e0249f9SZach Brown 	}
172ec16227eSAndy Grover 
173ec16227eSAndy Grover 	rds_ibdev->mr_pool = rds_ib_create_mr_pool(rds_ibdev);
174ec16227eSAndy Grover 	if (IS_ERR(rds_ibdev->mr_pool)) {
175ec16227eSAndy Grover 		rds_ibdev->mr_pool = NULL;
1763e0249f9SZach Brown 		goto put_dev;
177ec16227eSAndy Grover 	}
178ec16227eSAndy Grover 
179ec16227eSAndy Grover 	INIT_LIST_HEAD(&rds_ibdev->ipaddr_list);
180ec16227eSAndy Grover 	INIT_LIST_HEAD(&rds_ibdev->conn_list);
181ea819867SZach Brown 
182ea819867SZach Brown 	down_write(&rds_ib_devices_lock);
183ea819867SZach Brown 	list_add_tail_rcu(&rds_ibdev->list, &rds_ib_devices);
184ea819867SZach Brown 	up_write(&rds_ib_devices_lock);
1853e0249f9SZach Brown 	atomic_inc(&rds_ibdev->refcount);
186ec16227eSAndy Grover 
187ec16227eSAndy Grover 	ib_set_client_data(device, &rds_ib_client, rds_ibdev);
1883e0249f9SZach Brown 	atomic_inc(&rds_ibdev->refcount);
189ec16227eSAndy Grover 
190fc19de38SZach Brown 	rds_ib_nodev_connect();
191fc19de38SZach Brown 
1923e0249f9SZach Brown put_dev:
1933e0249f9SZach Brown 	rds_ib_dev_put(rds_ibdev);
194ec16227eSAndy Grover free_attr:
195ec16227eSAndy Grover 	kfree(dev_attr);
196ec16227eSAndy Grover }
197ec16227eSAndy Grover 
1983e0249f9SZach Brown /*
1993e0249f9SZach Brown  * New connections use this to find the device to associate with the
2003e0249f9SZach Brown  * connection.  It's not in the fast path so we're not concerned about the
2013e0249f9SZach Brown  * performance of the IB call.  (As of this writing, it uses an interrupt
2023e0249f9SZach Brown  * blocking spinlock to serialize walking a per-device list of all registered
2033e0249f9SZach Brown  * clients.)
2043e0249f9SZach Brown  *
2053e0249f9SZach Brown  * RCU is used to handle incoming connections racing with device teardown.
2063e0249f9SZach Brown  * Rather than use a lock to serialize removal from the client_data and
2073e0249f9SZach Brown  * getting a new reference, we use an RCU grace period.  The destruction
2083e0249f9SZach Brown  * path removes the device from client_data and then waits for all RCU
2093e0249f9SZach Brown  * readers to finish.
2103e0249f9SZach Brown  *
2113e0249f9SZach Brown  * A new connection can get NULL from this if its arriving on a
2123e0249f9SZach Brown  * device that is in the process of being removed.
2133e0249f9SZach Brown  */
2143e0249f9SZach Brown struct rds_ib_device *rds_ib_get_client_data(struct ib_device *device)
2153e0249f9SZach Brown {
2163e0249f9SZach Brown 	struct rds_ib_device *rds_ibdev;
2173e0249f9SZach Brown 
2183e0249f9SZach Brown 	rcu_read_lock();
2193e0249f9SZach Brown 	rds_ibdev = ib_get_client_data(device, &rds_ib_client);
2203e0249f9SZach Brown 	if (rds_ibdev)
2213e0249f9SZach Brown 		atomic_inc(&rds_ibdev->refcount);
2223e0249f9SZach Brown 	rcu_read_unlock();
2233e0249f9SZach Brown 	return rds_ibdev;
2243e0249f9SZach Brown }
2253e0249f9SZach Brown 
2263e0249f9SZach Brown /*
2273e0249f9SZach Brown  * The IB stack is letting us know that a device is going away.  This can
2283e0249f9SZach Brown  * happen if the underlying HCA driver is removed or if PCI hotplug is removing
2293e0249f9SZach Brown  * the pci function, for example.
2303e0249f9SZach Brown  *
2313e0249f9SZach Brown  * This can be called at any time and can be racing with any other RDS path.
2323e0249f9SZach Brown  */
233ff51bf84Sstephen hemminger static void rds_ib_remove_one(struct ib_device *device)
234ec16227eSAndy Grover {
235ec16227eSAndy Grover 	struct rds_ib_device *rds_ibdev;
236ec16227eSAndy Grover 
237ec16227eSAndy Grover 	rds_ibdev = ib_get_client_data(device, &rds_ib_client);
238ec16227eSAndy Grover 	if (!rds_ibdev)
239ec16227eSAndy Grover 		return;
240ec16227eSAndy Grover 
241fc19de38SZach Brown 	rds_ib_dev_shutdown(rds_ibdev);
242ec16227eSAndy Grover 
243ea819867SZach Brown 	/* stop connection attempts from getting a reference to this device. */
2443e0249f9SZach Brown 	ib_set_client_data(device, &rds_ib_client, NULL);
245ea819867SZach Brown 
246ea819867SZach Brown 	down_write(&rds_ib_devices_lock);
247ea819867SZach Brown 	list_del_rcu(&rds_ibdev->list);
248ea819867SZach Brown 	up_write(&rds_ib_devices_lock);
249ea819867SZach Brown 
250ea819867SZach Brown 	/*
251ea819867SZach Brown 	 * This synchronize rcu is waiting for readers of both the ib
252ea819867SZach Brown 	 * client data and the devices list to finish before we drop
253ea819867SZach Brown 	 * both of those references.
254ea819867SZach Brown 	 */
2553e0249f9SZach Brown 	synchronize_rcu();
2563e0249f9SZach Brown 	rds_ib_dev_put(rds_ibdev);
2573e0249f9SZach Brown 	rds_ib_dev_put(rds_ibdev);
258ec16227eSAndy Grover }
259ec16227eSAndy Grover 
260ec16227eSAndy Grover struct ib_client rds_ib_client = {
261ec16227eSAndy Grover 	.name   = "rds_ib",
262ec16227eSAndy Grover 	.add    = rds_ib_add_one,
263ec16227eSAndy Grover 	.remove = rds_ib_remove_one
264ec16227eSAndy Grover };
265ec16227eSAndy Grover 
266ec16227eSAndy Grover static int rds_ib_conn_info_visitor(struct rds_connection *conn,
267ec16227eSAndy Grover 				    void *buffer)
268ec16227eSAndy Grover {
269ec16227eSAndy Grover 	struct rds_info_rdma_connection *iinfo = buffer;
270ec16227eSAndy Grover 	struct rds_ib_connection *ic;
271ec16227eSAndy Grover 
272ec16227eSAndy Grover 	/* We will only ever look at IB transports */
273ec16227eSAndy Grover 	if (conn->c_trans != &rds_ib_transport)
274ec16227eSAndy Grover 		return 0;
275ec16227eSAndy Grover 
276ec16227eSAndy Grover 	iinfo->src_addr = conn->c_laddr;
277ec16227eSAndy Grover 	iinfo->dst_addr = conn->c_faddr;
278ec16227eSAndy Grover 
279ec16227eSAndy Grover 	memset(&iinfo->src_gid, 0, sizeof(iinfo->src_gid));
280ec16227eSAndy Grover 	memset(&iinfo->dst_gid, 0, sizeof(iinfo->dst_gid));
281ec16227eSAndy Grover 	if (rds_conn_state(conn) == RDS_CONN_UP) {
282ec16227eSAndy Grover 		struct rds_ib_device *rds_ibdev;
283ec16227eSAndy Grover 		struct rdma_dev_addr *dev_addr;
284ec16227eSAndy Grover 
285ec16227eSAndy Grover 		ic = conn->c_transport_data;
286ec16227eSAndy Grover 		dev_addr = &ic->i_cm_id->route.addr.dev_addr;
287ec16227eSAndy Grover 
2886f8372b6SSean Hefty 		rdma_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid);
2896f8372b6SSean Hefty 		rdma_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid);
290ec16227eSAndy Grover 
2913e0249f9SZach Brown 		rds_ibdev = ic->rds_ibdev;
292ec16227eSAndy Grover 		iinfo->max_send_wr = ic->i_send_ring.w_nr;
293ec16227eSAndy Grover 		iinfo->max_recv_wr = ic->i_recv_ring.w_nr;
294ec16227eSAndy Grover 		iinfo->max_send_sge = rds_ibdev->max_sge;
295ec16227eSAndy Grover 		rds_ib_get_mr_info(rds_ibdev, iinfo);
296ec16227eSAndy Grover 	}
297ec16227eSAndy Grover 	return 1;
298ec16227eSAndy Grover }
299ec16227eSAndy Grover 
300ec16227eSAndy Grover static void rds_ib_ic_info(struct socket *sock, unsigned int len,
301ec16227eSAndy Grover 			   struct rds_info_iterator *iter,
302ec16227eSAndy Grover 			   struct rds_info_lengths *lens)
303ec16227eSAndy Grover {
304ec16227eSAndy Grover 	rds_for_each_conn_info(sock, len, iter, lens,
305ec16227eSAndy Grover 				rds_ib_conn_info_visitor,
306ec16227eSAndy Grover 				sizeof(struct rds_info_rdma_connection));
307ec16227eSAndy Grover }
308ec16227eSAndy Grover 
309ec16227eSAndy Grover 
310ec16227eSAndy Grover /*
311ec16227eSAndy Grover  * Early RDS/IB was built to only bind to an address if there is an IPoIB
312ec16227eSAndy Grover  * device with that address set.
313ec16227eSAndy Grover  *
314ec16227eSAndy Grover  * If it were me, I'd advocate for something more flexible.  Sending and
315ec16227eSAndy Grover  * receiving should be device-agnostic.  Transports would try and maintain
316ec16227eSAndy Grover  * connections between peers who have messages queued.  Userspace would be
317ec16227eSAndy Grover  * allowed to influence which paths have priority.  We could call userspace
318ec16227eSAndy Grover  * asserting this policy "routing".
319ec16227eSAndy Grover  */
320d5a8ac28SSowmini Varadhan static int rds_ib_laddr_check(struct net *net, __be32 addr)
321ec16227eSAndy Grover {
322ec16227eSAndy Grover 	int ret;
323ec16227eSAndy Grover 	struct rdma_cm_id *cm_id;
324ec16227eSAndy Grover 	struct sockaddr_in sin;
325ec16227eSAndy Grover 
326ec16227eSAndy Grover 	/* Create a CMA ID and try to bind it. This catches both
327ec16227eSAndy Grover 	 * IB and iWARP capable NICs.
328ec16227eSAndy Grover 	 */
329b26f9b99SSean Hefty 	cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP, IB_QPT_RC);
33094713babSDan Carpenter 	if (IS_ERR(cm_id))
33194713babSDan Carpenter 		return PTR_ERR(cm_id);
332ec16227eSAndy Grover 
333ec16227eSAndy Grover 	memset(&sin, 0, sizeof(sin));
334ec16227eSAndy Grover 	sin.sin_family = AF_INET;
335ec16227eSAndy Grover 	sin.sin_addr.s_addr = addr;
336ec16227eSAndy Grover 
337ec16227eSAndy Grover 	/* rdma_bind_addr will only succeed for IB & iWARP devices */
338ec16227eSAndy Grover 	ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
339ec16227eSAndy Grover 	/* due to this, we will claim to support iWARP devices unless we
340ec16227eSAndy Grover 	   check node_type. */
341c2349758SSasha Levin 	if (ret || !cm_id->device ||
342c2349758SSasha Levin 	    cm_id->device->node_type != RDMA_NODE_IB_CA)
343ec16227eSAndy Grover 		ret = -EADDRNOTAVAIL;
344ec16227eSAndy Grover 
345ec16227eSAndy Grover 	rdsdebug("addr %pI4 ret %d node type %d\n",
346ec16227eSAndy Grover 		&addr, ret,
347ec16227eSAndy Grover 		cm_id->device ? cm_id->device->node_type : -1);
348ec16227eSAndy Grover 
349ec16227eSAndy Grover 	rdma_destroy_id(cm_id);
350ec16227eSAndy Grover 
351ec16227eSAndy Grover 	return ret;
352ec16227eSAndy Grover }
353ec16227eSAndy Grover 
35424fa163aSZach Brown static void rds_ib_unregister_client(void)
35524fa163aSZach Brown {
35624fa163aSZach Brown 	ib_unregister_client(&rds_ib_client);
35724fa163aSZach Brown 	/* wait for rds_ib_dev_free() to complete */
35824fa163aSZach Brown 	flush_workqueue(rds_wq);
35924fa163aSZach Brown }
36024fa163aSZach Brown 
361ec16227eSAndy Grover void rds_ib_exit(void)
362ec16227eSAndy Grover {
363ec16227eSAndy Grover 	rds_info_deregister_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info);
36424fa163aSZach Brown 	rds_ib_unregister_client();
3658aeb1ba6SZach Brown 	rds_ib_destroy_nodev_conns();
366ec16227eSAndy Grover 	rds_ib_sysctl_exit();
367ec16227eSAndy Grover 	rds_ib_recv_exit();
368ec16227eSAndy Grover 	rds_trans_unregister(&rds_ib_transport);
369*ad1d7dc0Ssantosh.shilimkar@oracle.com 	rds_ib_fmr_exit();
370ec16227eSAndy Grover }
371ec16227eSAndy Grover 
372ec16227eSAndy Grover struct rds_transport rds_ib_transport = {
373ec16227eSAndy Grover 	.laddr_check		= rds_ib_laddr_check,
374ec16227eSAndy Grover 	.xmit_complete		= rds_ib_xmit_complete,
375ec16227eSAndy Grover 	.xmit			= rds_ib_xmit,
376ec16227eSAndy Grover 	.xmit_rdma		= rds_ib_xmit_rdma,
37715133f6eSAndy Grover 	.xmit_atomic		= rds_ib_xmit_atomic,
378ec16227eSAndy Grover 	.recv			= rds_ib_recv,
379ec16227eSAndy Grover 	.conn_alloc		= rds_ib_conn_alloc,
380ec16227eSAndy Grover 	.conn_free		= rds_ib_conn_free,
381ec16227eSAndy Grover 	.conn_connect		= rds_ib_conn_connect,
382ec16227eSAndy Grover 	.conn_shutdown		= rds_ib_conn_shutdown,
383ec16227eSAndy Grover 	.inc_copy_to_user	= rds_ib_inc_copy_to_user,
384ec16227eSAndy Grover 	.inc_free		= rds_ib_inc_free,
385ec16227eSAndy Grover 	.cm_initiate_connect	= rds_ib_cm_initiate_connect,
386ec16227eSAndy Grover 	.cm_handle_connect	= rds_ib_cm_handle_connect,
387ec16227eSAndy Grover 	.cm_connect_complete	= rds_ib_cm_connect_complete,
388ec16227eSAndy Grover 	.stats_info_copy	= rds_ib_stats_info_copy,
389ec16227eSAndy Grover 	.exit			= rds_ib_exit,
390ec16227eSAndy Grover 	.get_mr			= rds_ib_get_mr,
391ec16227eSAndy Grover 	.sync_mr		= rds_ib_sync_mr,
392ec16227eSAndy Grover 	.free_mr		= rds_ib_free_mr,
393ec16227eSAndy Grover 	.flush_mrs		= rds_ib_flush_mrs,
394ec16227eSAndy Grover 	.t_owner		= THIS_MODULE,
395ec16227eSAndy Grover 	.t_name			= "infiniband",
396335776bdSAndy Grover 	.t_type			= RDS_TRANS_IB
397ec16227eSAndy Grover };
398ec16227eSAndy Grover 
399ef87b7eaSZach Brown int rds_ib_init(void)
400ec16227eSAndy Grover {
401ec16227eSAndy Grover 	int ret;
402ec16227eSAndy Grover 
403ec16227eSAndy Grover 	INIT_LIST_HEAD(&rds_ib_devices);
404ec16227eSAndy Grover 
405*ad1d7dc0Ssantosh.shilimkar@oracle.com 	ret = rds_ib_fmr_init();
406515e079dSZach Brown 	if (ret)
407c534a107STejun Heo 		goto out;
408515e079dSZach Brown 
409*ad1d7dc0Ssantosh.shilimkar@oracle.com 	ret = ib_register_client(&rds_ib_client);
410*ad1d7dc0Ssantosh.shilimkar@oracle.com 	if (ret)
411*ad1d7dc0Ssantosh.shilimkar@oracle.com 		goto out_fmr_exit;
412*ad1d7dc0Ssantosh.shilimkar@oracle.com 
413ec16227eSAndy Grover 	ret = rds_ib_sysctl_init();
414ec16227eSAndy Grover 	if (ret)
415ec16227eSAndy Grover 		goto out_ibreg;
416ec16227eSAndy Grover 
417ec16227eSAndy Grover 	ret = rds_ib_recv_init();
418ec16227eSAndy Grover 	if (ret)
419ec16227eSAndy Grover 		goto out_sysctl;
420ec16227eSAndy Grover 
421ec16227eSAndy Grover 	ret = rds_trans_register(&rds_ib_transport);
422ec16227eSAndy Grover 	if (ret)
423ec16227eSAndy Grover 		goto out_recv;
424ec16227eSAndy Grover 
425ec16227eSAndy Grover 	rds_info_register_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info);
426ec16227eSAndy Grover 
427ec16227eSAndy Grover 	goto out;
428ec16227eSAndy Grover 
429ec16227eSAndy Grover out_recv:
430ec16227eSAndy Grover 	rds_ib_recv_exit();
431ec16227eSAndy Grover out_sysctl:
432ec16227eSAndy Grover 	rds_ib_sysctl_exit();
433ec16227eSAndy Grover out_ibreg:
43424fa163aSZach Brown 	rds_ib_unregister_client();
435*ad1d7dc0Ssantosh.shilimkar@oracle.com out_fmr_exit:
436*ad1d7dc0Ssantosh.shilimkar@oracle.com 	rds_ib_fmr_exit();
437ec16227eSAndy Grover out:
438ec16227eSAndy Grover 	return ret;
439ec16227eSAndy Grover }
440ec16227eSAndy Grover 
441ec16227eSAndy Grover MODULE_LICENSE("GPL");
442ec16227eSAndy Grover 
443