xref: /linux/net/sunrpc/xprtrdma/transport.c (revision 46557bef3f3834ac33031c7be27d39d90d507442)
1 /*
2  * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the BSD-type
8  * license below:
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  *
14  *      Redistributions of source code must retain the above copyright
15  *      notice, this list of conditions and the following disclaimer.
16  *
17  *      Redistributions in binary form must reproduce the above
18  *      copyright notice, this list of conditions and the following
19  *      disclaimer in the documentation and/or other materials provided
20  *      with the distribution.
21  *
22  *      Neither the name of the Network Appliance, Inc. nor the names of
23  *      its contributors may be used to endorse or promote products
24  *      derived from this software without specific prior written
25  *      permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 /*
41  * transport.c
42  *
43  * This file contains the top-level implementation of an RPC RDMA
44  * transport.
45  *
46  * Naming convention: functions beginning with xprt_ are part of the
47  * transport switch. All others are RPC RDMA internal.
48  */
49 
50 #include <linux/module.h>
51 #include <linux/init.h>
52 #include <linux/seq_file.h>
53 
54 #include "xprt_rdma.h"
55 
56 #ifdef RPC_DEBUG
57 # define RPCDBG_FACILITY	RPCDBG_TRANS
58 #endif
59 
60 MODULE_LICENSE("Dual BSD/GPL");
61 
62 MODULE_DESCRIPTION("RPC/RDMA Transport for Linux kernel NFS");
63 MODULE_AUTHOR("Network Appliance, Inc.");
64 
65 /*
66  * tunables
67  */
68 
69 static unsigned int xprt_rdma_slot_table_entries = RPCRDMA_DEF_SLOT_TABLE;
70 static unsigned int xprt_rdma_max_inline_read = RPCRDMA_DEF_INLINE;
71 static unsigned int xprt_rdma_max_inline_write = RPCRDMA_DEF_INLINE;
72 static unsigned int xprt_rdma_inline_write_padding;
73 static unsigned int xprt_rdma_memreg_strategy = RPCRDMA_FRMR;
74                 int xprt_rdma_pad_optimize = 0;
75 
76 #ifdef RPC_DEBUG
77 
78 static unsigned int min_slot_table_size = RPCRDMA_MIN_SLOT_TABLE;
79 static unsigned int max_slot_table_size = RPCRDMA_MAX_SLOT_TABLE;
80 static unsigned int zero;
81 static unsigned int max_padding = PAGE_SIZE;
82 static unsigned int min_memreg = RPCRDMA_BOUNCEBUFFERS;
83 static unsigned int max_memreg = RPCRDMA_LAST - 1;
84 
85 static struct ctl_table_header *sunrpc_table_header;
86 
87 static ctl_table xr_tunables_table[] = {
88 	{
89 		.ctl_name       = CTL_UNNUMBERED,
90 		.procname	= "rdma_slot_table_entries",
91 		.data		= &xprt_rdma_slot_table_entries,
92 		.maxlen		= sizeof(unsigned int),
93 		.mode		= 0644,
94 		.proc_handler	= &proc_dointvec_minmax,
95 		.strategy	= &sysctl_intvec,
96 		.extra1		= &min_slot_table_size,
97 		.extra2		= &max_slot_table_size
98 	},
99 	{
100 		.ctl_name       = CTL_UNNUMBERED,
101 		.procname	= "rdma_max_inline_read",
102 		.data		= &xprt_rdma_max_inline_read,
103 		.maxlen		= sizeof(unsigned int),
104 		.mode		= 0644,
105 		.proc_handler	= &proc_dointvec,
106 		.strategy	= &sysctl_intvec,
107 	},
108 	{
109 		.ctl_name       = CTL_UNNUMBERED,
110 		.procname	= "rdma_max_inline_write",
111 		.data		= &xprt_rdma_max_inline_write,
112 		.maxlen		= sizeof(unsigned int),
113 		.mode		= 0644,
114 		.proc_handler	= &proc_dointvec,
115 		.strategy	= &sysctl_intvec,
116 	},
117 	{
118 		.ctl_name       = CTL_UNNUMBERED,
119 		.procname	= "rdma_inline_write_padding",
120 		.data		= &xprt_rdma_inline_write_padding,
121 		.maxlen		= sizeof(unsigned int),
122 		.mode		= 0644,
123 		.proc_handler	= &proc_dointvec_minmax,
124 		.strategy	= &sysctl_intvec,
125 		.extra1		= &zero,
126 		.extra2		= &max_padding,
127 	},
128 	{
129 		.ctl_name       = CTL_UNNUMBERED,
130 		.procname	= "rdma_memreg_strategy",
131 		.data		= &xprt_rdma_memreg_strategy,
132 		.maxlen		= sizeof(unsigned int),
133 		.mode		= 0644,
134 		.proc_handler	= &proc_dointvec_minmax,
135 		.strategy	= &sysctl_intvec,
136 		.extra1		= &min_memreg,
137 		.extra2		= &max_memreg,
138 	},
139 	{
140 		.ctl_name       = CTL_UNNUMBERED,
141 		.procname	= "rdma_pad_optimize",
142 		.data		= &xprt_rdma_pad_optimize,
143 		.maxlen		= sizeof(unsigned int),
144 		.mode		= 0644,
145 		.proc_handler	= &proc_dointvec,
146 	},
147 	{
148 		.ctl_name = 0,
149 	},
150 };
151 
152 static ctl_table sunrpc_table[] = {
153 	{
154 		.ctl_name	= CTL_SUNRPC,
155 		.procname	= "sunrpc",
156 		.mode		= 0555,
157 		.child		= xr_tunables_table
158 	},
159 	{
160 		.ctl_name = 0,
161 	},
162 };
163 
164 #endif
165 
166 static struct rpc_xprt_ops xprt_rdma_procs;	/* forward reference */
167 
168 static void
169 xprt_rdma_format_addresses(struct rpc_xprt *xprt)
170 {
171 	struct sockaddr *sap = (struct sockaddr *)
172 					&rpcx_to_rdmad(xprt).addr;
173 	struct sockaddr_in *sin = (struct sockaddr_in *)sap;
174 	char buf[64];
175 
176 	(void)rpc_ntop(sap, buf, sizeof(buf));
177 	xprt->address_strings[RPC_DISPLAY_ADDR] = kstrdup(buf, GFP_KERNEL);
178 
179 	(void)snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
180 	xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
181 
182 	xprt->address_strings[RPC_DISPLAY_PROTO] = "rdma";
183 
184 	(void)snprintf(buf, sizeof(buf), "%02x%02x%02x%02x",
185 				NIPQUAD(sin->sin_addr.s_addr));
186 	xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
187 
188 	(void)snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
189 	xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
190 
191 	/* netid */
192 	xprt->address_strings[RPC_DISPLAY_NETID] = "rdma";
193 }
194 
195 static void
196 xprt_rdma_free_addresses(struct rpc_xprt *xprt)
197 {
198 	unsigned int i;
199 
200 	for (i = 0; i < RPC_DISPLAY_MAX; i++)
201 		switch (i) {
202 		case RPC_DISPLAY_PROTO:
203 		case RPC_DISPLAY_NETID:
204 			continue;
205 		default:
206 			kfree(xprt->address_strings[i]);
207 		}
208 }
209 
210 static void
211 xprt_rdma_connect_worker(struct work_struct *work)
212 {
213 	struct rpcrdma_xprt *r_xprt =
214 		container_of(work, struct rpcrdma_xprt, rdma_connect.work);
215 	struct rpc_xprt *xprt = &r_xprt->xprt;
216 	int rc = 0;
217 
218 	if (!xprt->shutdown) {
219 		xprt_clear_connected(xprt);
220 
221 		dprintk("RPC:       %s: %sconnect\n", __func__,
222 				r_xprt->rx_ep.rep_connected != 0 ? "re" : "");
223 		rc = rpcrdma_ep_connect(&r_xprt->rx_ep, &r_xprt->rx_ia);
224 		if (rc)
225 			goto out;
226 	}
227 	goto out_clear;
228 
229 out:
230 	xprt_wake_pending_tasks(xprt, rc);
231 
232 out_clear:
233 	dprintk("RPC:       %s: exit\n", __func__);
234 	xprt_clear_connecting(xprt);
235 }
236 
237 /*
238  * xprt_rdma_destroy
239  *
240  * Destroy the xprt.
241  * Free all memory associated with the object, including its own.
242  * NOTE: none of the *destroy methods free memory for their top-level
243  * objects, even though they may have allocated it (they do free
244  * private memory). It's up to the caller to handle it. In this
245  * case (RDMA transport), all structure memory is inlined with the
246  * struct rpcrdma_xprt.
247  */
248 static void
249 xprt_rdma_destroy(struct rpc_xprt *xprt)
250 {
251 	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
252 	int rc;
253 
254 	dprintk("RPC:       %s: called\n", __func__);
255 
256 	cancel_delayed_work(&r_xprt->rdma_connect);
257 	flush_scheduled_work();
258 
259 	xprt_clear_connected(xprt);
260 
261 	rpcrdma_buffer_destroy(&r_xprt->rx_buf);
262 	rc = rpcrdma_ep_destroy(&r_xprt->rx_ep, &r_xprt->rx_ia);
263 	if (rc)
264 		dprintk("RPC:       %s: rpcrdma_ep_destroy returned %i\n",
265 			__func__, rc);
266 	rpcrdma_ia_close(&r_xprt->rx_ia);
267 
268 	xprt_rdma_free_addresses(xprt);
269 
270 	kfree(xprt->slot);
271 	xprt->slot = NULL;
272 	kfree(xprt);
273 
274 	dprintk("RPC:       %s: returning\n", __func__);
275 
276 	module_put(THIS_MODULE);
277 }
278 
279 static const struct rpc_timeout xprt_rdma_default_timeout = {
280 	.to_initval = 60 * HZ,
281 	.to_maxval = 60 * HZ,
282 };
283 
284 /**
285  * xprt_setup_rdma - Set up transport to use RDMA
286  *
287  * @args: rpc transport arguments
288  */
289 static struct rpc_xprt *
290 xprt_setup_rdma(struct xprt_create *args)
291 {
292 	struct rpcrdma_create_data_internal cdata;
293 	struct rpc_xprt *xprt;
294 	struct rpcrdma_xprt *new_xprt;
295 	struct rpcrdma_ep *new_ep;
296 	struct sockaddr_in *sin;
297 	int rc;
298 
299 	if (args->addrlen > sizeof(xprt->addr)) {
300 		dprintk("RPC:       %s: address too large\n", __func__);
301 		return ERR_PTR(-EBADF);
302 	}
303 
304 	xprt = kzalloc(sizeof(struct rpcrdma_xprt), GFP_KERNEL);
305 	if (xprt == NULL) {
306 		dprintk("RPC:       %s: couldn't allocate rpcrdma_xprt\n",
307 			__func__);
308 		return ERR_PTR(-ENOMEM);
309 	}
310 
311 	xprt->max_reqs = xprt_rdma_slot_table_entries;
312 	xprt->slot = kcalloc(xprt->max_reqs,
313 				sizeof(struct rpc_rqst), GFP_KERNEL);
314 	if (xprt->slot == NULL) {
315 		dprintk("RPC:       %s: couldn't allocate %d slots\n",
316 			__func__, xprt->max_reqs);
317 		kfree(xprt);
318 		return ERR_PTR(-ENOMEM);
319 	}
320 
321 	/* 60 second timeout, no retries */
322 	xprt->timeout = &xprt_rdma_default_timeout;
323 	xprt->bind_timeout = (60U * HZ);
324 	xprt->connect_timeout = (60U * HZ);
325 	xprt->reestablish_timeout = (5U * HZ);
326 	xprt->idle_timeout = (5U * 60 * HZ);
327 
328 	xprt->resvport = 0;		/* privileged port not needed */
329 	xprt->tsh_size = 0;		/* RPC-RDMA handles framing */
330 	xprt->max_payload = RPCRDMA_MAX_DATA_SEGS * PAGE_SIZE;
331 	xprt->ops = &xprt_rdma_procs;
332 
333 	/*
334 	 * Set up RDMA-specific connect data.
335 	 */
336 
337 	/* Put server RDMA address in local cdata */
338 	memcpy(&cdata.addr, args->dstaddr, args->addrlen);
339 
340 	/* Ensure xprt->addr holds valid server TCP (not RDMA)
341 	 * address, for any side protocols which peek at it */
342 	xprt->prot = IPPROTO_TCP;
343 	xprt->addrlen = args->addrlen;
344 	memcpy(&xprt->addr, &cdata.addr, xprt->addrlen);
345 
346 	sin = (struct sockaddr_in *)&cdata.addr;
347 	if (ntohs(sin->sin_port) != 0)
348 		xprt_set_bound(xprt);
349 
350 	dprintk("RPC:       %s: %pI4:%u\n",
351 		__func__, &sin->sin_addr.s_addr, ntohs(sin->sin_port));
352 
353 	/* Set max requests */
354 	cdata.max_requests = xprt->max_reqs;
355 
356 	/* Set some length limits */
357 	cdata.rsize = RPCRDMA_MAX_SEGS * PAGE_SIZE; /* RDMA write max */
358 	cdata.wsize = RPCRDMA_MAX_SEGS * PAGE_SIZE; /* RDMA read max */
359 
360 	cdata.inline_wsize = xprt_rdma_max_inline_write;
361 	if (cdata.inline_wsize > cdata.wsize)
362 		cdata.inline_wsize = cdata.wsize;
363 
364 	cdata.inline_rsize = xprt_rdma_max_inline_read;
365 	if (cdata.inline_rsize > cdata.rsize)
366 		cdata.inline_rsize = cdata.rsize;
367 
368 	cdata.padding = xprt_rdma_inline_write_padding;
369 
370 	/*
371 	 * Create new transport instance, which includes initialized
372 	 *  o ia
373 	 *  o endpoint
374 	 *  o buffers
375 	 */
376 
377 	new_xprt = rpcx_to_rdmax(xprt);
378 
379 	rc = rpcrdma_ia_open(new_xprt, (struct sockaddr *) &cdata.addr,
380 				xprt_rdma_memreg_strategy);
381 	if (rc)
382 		goto out1;
383 
384 	/*
385 	 * initialize and create ep
386 	 */
387 	new_xprt->rx_data = cdata;
388 	new_ep = &new_xprt->rx_ep;
389 	new_ep->rep_remote_addr = cdata.addr;
390 
391 	rc = rpcrdma_ep_create(&new_xprt->rx_ep,
392 				&new_xprt->rx_ia, &new_xprt->rx_data);
393 	if (rc)
394 		goto out2;
395 
396 	/*
397 	 * Allocate pre-registered send and receive buffers for headers and
398 	 * any inline data. Also specify any padding which will be provided
399 	 * from a preregistered zero buffer.
400 	 */
401 	rc = rpcrdma_buffer_create(&new_xprt->rx_buf, new_ep, &new_xprt->rx_ia,
402 				&new_xprt->rx_data);
403 	if (rc)
404 		goto out3;
405 
406 	/*
407 	 * Register a callback for connection events. This is necessary because
408 	 * connection loss notification is async. We also catch connection loss
409 	 * when reaping receives.
410 	 */
411 	INIT_DELAYED_WORK(&new_xprt->rdma_connect, xprt_rdma_connect_worker);
412 	new_ep->rep_func = rpcrdma_conn_func;
413 	new_ep->rep_xprt = xprt;
414 
415 	xprt_rdma_format_addresses(xprt);
416 
417 	if (!try_module_get(THIS_MODULE))
418 		goto out4;
419 
420 	return xprt;
421 
422 out4:
423 	xprt_rdma_free_addresses(xprt);
424 	rc = -EINVAL;
425 out3:
426 	(void) rpcrdma_ep_destroy(new_ep, &new_xprt->rx_ia);
427 out2:
428 	rpcrdma_ia_close(&new_xprt->rx_ia);
429 out1:
430 	kfree(xprt->slot);
431 	kfree(xprt);
432 	return ERR_PTR(rc);
433 }
434 
435 /*
436  * Close a connection, during shutdown or timeout/reconnect
437  */
438 static void
439 xprt_rdma_close(struct rpc_xprt *xprt)
440 {
441 	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
442 
443 	dprintk("RPC:       %s: closing\n", __func__);
444 	if (r_xprt->rx_ep.rep_connected > 0)
445 		xprt->reestablish_timeout = 0;
446 	xprt_disconnect_done(xprt);
447 	(void) rpcrdma_ep_disconnect(&r_xprt->rx_ep, &r_xprt->rx_ia);
448 }
449 
450 static void
451 xprt_rdma_set_port(struct rpc_xprt *xprt, u16 port)
452 {
453 	struct sockaddr_in *sap;
454 
455 	sap = (struct sockaddr_in *)&xprt->addr;
456 	sap->sin_port = htons(port);
457 	sap = (struct sockaddr_in *)&rpcx_to_rdmad(xprt).addr;
458 	sap->sin_port = htons(port);
459 	dprintk("RPC:       %s: %u\n", __func__, port);
460 }
461 
462 static void
463 xprt_rdma_connect(struct rpc_task *task)
464 {
465 	struct rpc_xprt *xprt = (struct rpc_xprt *)task->tk_xprt;
466 	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
467 
468 	if (!xprt_test_and_set_connecting(xprt)) {
469 		if (r_xprt->rx_ep.rep_connected != 0) {
470 			/* Reconnect */
471 			schedule_delayed_work(&r_xprt->rdma_connect,
472 				xprt->reestablish_timeout);
473 			xprt->reestablish_timeout <<= 1;
474 			if (xprt->reestablish_timeout > (30 * HZ))
475 				xprt->reestablish_timeout = (30 * HZ);
476 			else if (xprt->reestablish_timeout < (5 * HZ))
477 				xprt->reestablish_timeout = (5 * HZ);
478 		} else {
479 			schedule_delayed_work(&r_xprt->rdma_connect, 0);
480 			if (!RPC_IS_ASYNC(task))
481 				flush_scheduled_work();
482 		}
483 	}
484 }
485 
486 static int
487 xprt_rdma_reserve_xprt(struct rpc_task *task)
488 {
489 	struct rpc_xprt *xprt = task->tk_xprt;
490 	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
491 	int credits = atomic_read(&r_xprt->rx_buf.rb_credits);
492 
493 	/* == RPC_CWNDSCALE @ init, but *after* setup */
494 	if (r_xprt->rx_buf.rb_cwndscale == 0UL) {
495 		r_xprt->rx_buf.rb_cwndscale = xprt->cwnd;
496 		dprintk("RPC:       %s: cwndscale %lu\n", __func__,
497 			r_xprt->rx_buf.rb_cwndscale);
498 		BUG_ON(r_xprt->rx_buf.rb_cwndscale <= 0);
499 	}
500 	xprt->cwnd = credits * r_xprt->rx_buf.rb_cwndscale;
501 	return xprt_reserve_xprt_cong(task);
502 }
503 
504 /*
505  * The RDMA allocate/free functions need the task structure as a place
506  * to hide the struct rpcrdma_req, which is necessary for the actual send/recv
507  * sequence. For this reason, the recv buffers are attached to send
508  * buffers for portions of the RPC. Note that the RPC layer allocates
509  * both send and receive buffers in the same call. We may register
510  * the receive buffer portion when using reply chunks.
511  */
512 static void *
513 xprt_rdma_allocate(struct rpc_task *task, size_t size)
514 {
515 	struct rpc_xprt *xprt = task->tk_xprt;
516 	struct rpcrdma_req *req, *nreq;
517 
518 	req = rpcrdma_buffer_get(&rpcx_to_rdmax(xprt)->rx_buf);
519 	BUG_ON(NULL == req);
520 
521 	if (size > req->rl_size) {
522 		dprintk("RPC:       %s: size %zd too large for buffer[%zd]: "
523 			"prog %d vers %d proc %d\n",
524 			__func__, size, req->rl_size,
525 			task->tk_client->cl_prog, task->tk_client->cl_vers,
526 			task->tk_msg.rpc_proc->p_proc);
527 		/*
528 		 * Outgoing length shortage. Our inline write max must have
529 		 * been configured to perform direct i/o.
530 		 *
531 		 * This is therefore a large metadata operation, and the
532 		 * allocate call was made on the maximum possible message,
533 		 * e.g. containing long filename(s) or symlink data. In
534 		 * fact, while these metadata operations *might* carry
535 		 * large outgoing payloads, they rarely *do*. However, we
536 		 * have to commit to the request here, so reallocate and
537 		 * register it now. The data path will never require this
538 		 * reallocation.
539 		 *
540 		 * If the allocation or registration fails, the RPC framework
541 		 * will (doggedly) retry.
542 		 */
543 		if (rpcx_to_rdmax(xprt)->rx_ia.ri_memreg_strategy ==
544 				RPCRDMA_BOUNCEBUFFERS) {
545 			/* forced to "pure inline" */
546 			dprintk("RPC:       %s: too much data (%zd) for inline "
547 					"(r/w max %d/%d)\n", __func__, size,
548 					rpcx_to_rdmad(xprt).inline_rsize,
549 					rpcx_to_rdmad(xprt).inline_wsize);
550 			size = req->rl_size;
551 			rpc_exit(task, -EIO);		/* fail the operation */
552 			rpcx_to_rdmax(xprt)->rx_stats.failed_marshal_count++;
553 			goto out;
554 		}
555 		if (task->tk_flags & RPC_TASK_SWAPPER)
556 			nreq = kmalloc(sizeof *req + size, GFP_ATOMIC);
557 		else
558 			nreq = kmalloc(sizeof *req + size, GFP_NOFS);
559 		if (nreq == NULL)
560 			goto outfail;
561 
562 		if (rpcrdma_register_internal(&rpcx_to_rdmax(xprt)->rx_ia,
563 				nreq->rl_base, size + sizeof(struct rpcrdma_req)
564 				- offsetof(struct rpcrdma_req, rl_base),
565 				&nreq->rl_handle, &nreq->rl_iov)) {
566 			kfree(nreq);
567 			goto outfail;
568 		}
569 		rpcx_to_rdmax(xprt)->rx_stats.hardway_register_count += size;
570 		nreq->rl_size = size;
571 		nreq->rl_niovs = 0;
572 		nreq->rl_nchunks = 0;
573 		nreq->rl_buffer = (struct rpcrdma_buffer *)req;
574 		nreq->rl_reply = req->rl_reply;
575 		memcpy(nreq->rl_segments,
576 			req->rl_segments, sizeof nreq->rl_segments);
577 		/* flag the swap with an unused field */
578 		nreq->rl_iov.length = 0;
579 		req->rl_reply = NULL;
580 		req = nreq;
581 	}
582 	dprintk("RPC:       %s: size %zd, request 0x%p\n", __func__, size, req);
583 out:
584 	req->rl_connect_cookie = 0;	/* our reserved value */
585 	return req->rl_xdr_buf;
586 
587 outfail:
588 	rpcrdma_buffer_put(req);
589 	rpcx_to_rdmax(xprt)->rx_stats.failed_marshal_count++;
590 	return NULL;
591 }
592 
593 /*
594  * This function returns all RDMA resources to the pool.
595  */
596 static void
597 xprt_rdma_free(void *buffer)
598 {
599 	struct rpcrdma_req *req;
600 	struct rpcrdma_xprt *r_xprt;
601 	struct rpcrdma_rep *rep;
602 	int i;
603 
604 	if (buffer == NULL)
605 		return;
606 
607 	req = container_of(buffer, struct rpcrdma_req, rl_xdr_buf[0]);
608 	if (req->rl_iov.length == 0) {	/* see allocate above */
609 		r_xprt = container_of(((struct rpcrdma_req *) req->rl_buffer)->rl_buffer,
610 				      struct rpcrdma_xprt, rx_buf);
611 	} else
612 		r_xprt = container_of(req->rl_buffer, struct rpcrdma_xprt, rx_buf);
613 	rep = req->rl_reply;
614 
615 	dprintk("RPC:       %s: called on 0x%p%s\n",
616 		__func__, rep, (rep && rep->rr_func) ? " (with waiter)" : "");
617 
618 	/*
619 	 * Finish the deregistration. When using mw bind, this was
620 	 * begun in rpcrdma_reply_handler(). In all other modes, we
621 	 * do it here, in thread context. The process is considered
622 	 * complete when the rr_func vector becomes NULL - this
623 	 * was put in place during rpcrdma_reply_handler() - the wait
624 	 * call below will not block if the dereg is "done". If
625 	 * interrupted, our framework will clean up.
626 	 */
627 	for (i = 0; req->rl_nchunks;) {
628 		--req->rl_nchunks;
629 		i += rpcrdma_deregister_external(
630 			&req->rl_segments[i], r_xprt, NULL);
631 	}
632 
633 	if (rep && wait_event_interruptible(rep->rr_unbind, !rep->rr_func)) {
634 		rep->rr_func = NULL;	/* abandon the callback */
635 		req->rl_reply = NULL;
636 	}
637 
638 	if (req->rl_iov.length == 0) {	/* see allocate above */
639 		struct rpcrdma_req *oreq = (struct rpcrdma_req *)req->rl_buffer;
640 		oreq->rl_reply = req->rl_reply;
641 		(void) rpcrdma_deregister_internal(&r_xprt->rx_ia,
642 						   req->rl_handle,
643 						   &req->rl_iov);
644 		kfree(req);
645 		req = oreq;
646 	}
647 
648 	/* Put back request+reply buffers */
649 	rpcrdma_buffer_put(req);
650 }
651 
652 /*
653  * send_request invokes the meat of RPC RDMA. It must do the following:
654  *  1.  Marshal the RPC request into an RPC RDMA request, which means
655  *	putting a header in front of data, and creating IOVs for RDMA
656  *	from those in the request.
657  *  2.  In marshaling, detect opportunities for RDMA, and use them.
658  *  3.  Post a recv message to set up asynch completion, then send
659  *	the request (rpcrdma_ep_post).
660  *  4.  No partial sends are possible in the RPC-RDMA protocol (as in UDP).
661  */
662 
663 static int
664 xprt_rdma_send_request(struct rpc_task *task)
665 {
666 	struct rpc_rqst *rqst = task->tk_rqstp;
667 	struct rpc_xprt *xprt = task->tk_xprt;
668 	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
669 	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
670 
671 	/* marshal the send itself */
672 	if (req->rl_niovs == 0 && rpcrdma_marshal_req(rqst) != 0) {
673 		r_xprt->rx_stats.failed_marshal_count++;
674 		dprintk("RPC:       %s: rpcrdma_marshal_req failed\n",
675 			__func__);
676 		return -EIO;
677 	}
678 
679 	if (req->rl_reply == NULL) 		/* e.g. reconnection */
680 		rpcrdma_recv_buffer_get(req);
681 
682 	if (req->rl_reply) {
683 		req->rl_reply->rr_func = rpcrdma_reply_handler;
684 		/* this need only be done once, but... */
685 		req->rl_reply->rr_xprt = xprt;
686 	}
687 
688 	/* Must suppress retransmit to maintain credits */
689 	if (req->rl_connect_cookie == xprt->connect_cookie)
690 		goto drop_connection;
691 	req->rl_connect_cookie = xprt->connect_cookie;
692 
693 	if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req))
694 		goto drop_connection;
695 
696 	task->tk_bytes_sent += rqst->rq_snd_buf.len;
697 	rqst->rq_bytes_sent = 0;
698 	return 0;
699 
700 drop_connection:
701 	xprt_disconnect_done(xprt);
702 	return -ENOTCONN;	/* implies disconnect */
703 }
704 
705 static void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
706 {
707 	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
708 	long idle_time = 0;
709 
710 	if (xprt_connected(xprt))
711 		idle_time = (long)(jiffies - xprt->last_used) / HZ;
712 
713 	seq_printf(seq,
714 	  "\txprt:\trdma %u %lu %lu %lu %ld %lu %lu %lu %Lu %Lu "
715 	  "%lu %lu %lu %Lu %Lu %Lu %Lu %lu %lu %lu\n",
716 
717 	   0,	/* need a local port? */
718 	   xprt->stat.bind_count,
719 	   xprt->stat.connect_count,
720 	   xprt->stat.connect_time,
721 	   idle_time,
722 	   xprt->stat.sends,
723 	   xprt->stat.recvs,
724 	   xprt->stat.bad_xids,
725 	   xprt->stat.req_u,
726 	   xprt->stat.bklog_u,
727 
728 	   r_xprt->rx_stats.read_chunk_count,
729 	   r_xprt->rx_stats.write_chunk_count,
730 	   r_xprt->rx_stats.reply_chunk_count,
731 	   r_xprt->rx_stats.total_rdma_request,
732 	   r_xprt->rx_stats.total_rdma_reply,
733 	   r_xprt->rx_stats.pullup_copy_count,
734 	   r_xprt->rx_stats.fixup_copy_count,
735 	   r_xprt->rx_stats.hardway_register_count,
736 	   r_xprt->rx_stats.failed_marshal_count,
737 	   r_xprt->rx_stats.bad_reply_count);
738 }
739 
740 /*
741  * Plumbing for rpc transport switch and kernel module
742  */
743 
744 static struct rpc_xprt_ops xprt_rdma_procs = {
745 	.reserve_xprt		= xprt_rdma_reserve_xprt,
746 	.release_xprt		= xprt_release_xprt_cong, /* sunrpc/xprt.c */
747 	.release_request	= xprt_release_rqst_cong,       /* ditto */
748 	.set_retrans_timeout	= xprt_set_retrans_timeout_def, /* ditto */
749 	.rpcbind		= rpcb_getport_async,	/* sunrpc/rpcb_clnt.c */
750 	.set_port		= xprt_rdma_set_port,
751 	.connect		= xprt_rdma_connect,
752 	.buf_alloc		= xprt_rdma_allocate,
753 	.buf_free		= xprt_rdma_free,
754 	.send_request		= xprt_rdma_send_request,
755 	.close			= xprt_rdma_close,
756 	.destroy		= xprt_rdma_destroy,
757 	.print_stats		= xprt_rdma_print_stats
758 };
759 
760 static struct xprt_class xprt_rdma = {
761 	.list			= LIST_HEAD_INIT(xprt_rdma.list),
762 	.name			= "rdma",
763 	.owner			= THIS_MODULE,
764 	.ident			= XPRT_TRANSPORT_RDMA,
765 	.setup			= xprt_setup_rdma,
766 };
767 
768 static void __exit xprt_rdma_cleanup(void)
769 {
770 	int rc;
771 
772 	dprintk(KERN_INFO "RPCRDMA Module Removed, deregister RPC RDMA transport\n");
773 #ifdef RPC_DEBUG
774 	if (sunrpc_table_header) {
775 		unregister_sysctl_table(sunrpc_table_header);
776 		sunrpc_table_header = NULL;
777 	}
778 #endif
779 	rc = xprt_unregister_transport(&xprt_rdma);
780 	if (rc)
781 		dprintk("RPC:       %s: xprt_unregister returned %i\n",
782 			__func__, rc);
783 }
784 
785 static int __init xprt_rdma_init(void)
786 {
787 	int rc;
788 
789 	rc = xprt_register_transport(&xprt_rdma);
790 
791 	if (rc)
792 		return rc;
793 
794 	dprintk(KERN_INFO "RPCRDMA Module Init, register RPC RDMA transport\n");
795 
796 	dprintk(KERN_INFO "Defaults:\n");
797 	dprintk(KERN_INFO "\tSlots %d\n"
798 		"\tMaxInlineRead %d\n\tMaxInlineWrite %d\n",
799 		xprt_rdma_slot_table_entries,
800 		xprt_rdma_max_inline_read, xprt_rdma_max_inline_write);
801 	dprintk(KERN_INFO "\tPadding %d\n\tMemreg %d\n",
802 		xprt_rdma_inline_write_padding, xprt_rdma_memreg_strategy);
803 
804 #ifdef RPC_DEBUG
805 	if (!sunrpc_table_header)
806 		sunrpc_table_header = register_sysctl_table(sunrpc_table);
807 #endif
808 	return 0;
809 }
810 
811 module_init(xprt_rdma_init);
812 module_exit(xprt_rdma_cleanup);
813