1 /* 2 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the BSD-type 8 * license below: 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 14 * Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 17 * Redistributions in binary form must reproduce the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer in the documentation and/or other materials provided 20 * with the distribution. 21 * 22 * Neither the name of the Network Appliance, Inc. nor the names of 23 * its contributors may be used to endorse or promote products 24 * derived from this software without specific prior written 25 * permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * transport.c 42 * 43 * This file contains the top-level implementation of an RPC RDMA 44 * transport. 45 * 46 * Naming convention: functions beginning with xprt_ are part of the 47 * transport switch. All others are RPC RDMA internal. 48 */ 49 50 #include <linux/module.h> 51 #include <linux/init.h> 52 #include <linux/slab.h> 53 #include <linux/seq_file.h> 54 55 #include "xprt_rdma.h" 56 57 #ifdef RPC_DEBUG 58 # define RPCDBG_FACILITY RPCDBG_TRANS 59 #endif 60 61 MODULE_LICENSE("Dual BSD/GPL"); 62 63 MODULE_DESCRIPTION("RPC/RDMA Transport for Linux kernel NFS"); 64 MODULE_AUTHOR("Network Appliance, Inc."); 65 66 /* 67 * tunables 68 */ 69 70 static unsigned int xprt_rdma_slot_table_entries = RPCRDMA_DEF_SLOT_TABLE; 71 static unsigned int xprt_rdma_max_inline_read = RPCRDMA_DEF_INLINE; 72 static unsigned int xprt_rdma_max_inline_write = RPCRDMA_DEF_INLINE; 73 static unsigned int xprt_rdma_inline_write_padding; 74 static unsigned int xprt_rdma_memreg_strategy = RPCRDMA_FRMR; 75 int xprt_rdma_pad_optimize = 0; 76 77 #ifdef RPC_DEBUG 78 79 static unsigned int min_slot_table_size = RPCRDMA_MIN_SLOT_TABLE; 80 static unsigned int max_slot_table_size = RPCRDMA_MAX_SLOT_TABLE; 81 static unsigned int zero; 82 static unsigned int max_padding = PAGE_SIZE; 83 static unsigned int min_memreg = RPCRDMA_BOUNCEBUFFERS; 84 static unsigned int max_memreg = RPCRDMA_LAST - 1; 85 86 static struct ctl_table_header *sunrpc_table_header; 87 88 static ctl_table xr_tunables_table[] = { 89 { 90 .procname = "rdma_slot_table_entries", 91 .data = &xprt_rdma_slot_table_entries, 92 .maxlen = sizeof(unsigned int), 93 .mode = 0644, 94 .proc_handler = proc_dointvec_minmax, 95 .extra1 = &min_slot_table_size, 96 .extra2 = &max_slot_table_size 97 }, 98 { 99 .procname = "rdma_max_inline_read", 100 .data = &xprt_rdma_max_inline_read, 101 .maxlen = sizeof(unsigned int), 102 .mode = 0644, 103 .proc_handler = proc_dointvec, 104 }, 105 { 106 .procname = "rdma_max_inline_write", 107 .data = &xprt_rdma_max_inline_write, 108 .maxlen = sizeof(unsigned int), 109 .mode = 0644, 110 .proc_handler = proc_dointvec, 111 }, 112 { 113 .procname = "rdma_inline_write_padding", 114 .data = &xprt_rdma_inline_write_padding, 115 .maxlen = sizeof(unsigned int), 116 .mode = 0644, 117 .proc_handler = proc_dointvec_minmax, 118 .extra1 = &zero, 119 .extra2 = &max_padding, 120 }, 121 { 122 .procname = "rdma_memreg_strategy", 123 .data = &xprt_rdma_memreg_strategy, 124 .maxlen = sizeof(unsigned int), 125 .mode = 0644, 126 .proc_handler = proc_dointvec_minmax, 127 .extra1 = &min_memreg, 128 .extra2 = &max_memreg, 129 }, 130 { 131 .procname = "rdma_pad_optimize", 132 .data = &xprt_rdma_pad_optimize, 133 .maxlen = sizeof(unsigned int), 134 .mode = 0644, 135 .proc_handler = proc_dointvec, 136 }, 137 { }, 138 }; 139 140 static ctl_table sunrpc_table[] = { 141 { 142 .procname = "sunrpc", 143 .mode = 0555, 144 .child = xr_tunables_table 145 }, 146 { }, 147 }; 148 149 #endif 150 151 static struct rpc_xprt_ops xprt_rdma_procs; /* forward reference */ 152 153 static void 154 xprt_rdma_format_addresses(struct rpc_xprt *xprt) 155 { 156 struct sockaddr *sap = (struct sockaddr *) 157 &rpcx_to_rdmad(xprt).addr; 158 struct sockaddr_in *sin = (struct sockaddr_in *)sap; 159 char buf[64]; 160 161 (void)rpc_ntop(sap, buf, sizeof(buf)); 162 xprt->address_strings[RPC_DISPLAY_ADDR] = kstrdup(buf, GFP_KERNEL); 163 164 snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap)); 165 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL); 166 167 xprt->address_strings[RPC_DISPLAY_PROTO] = "rdma"; 168 169 snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr)); 170 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL); 171 172 snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap)); 173 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL); 174 175 /* netid */ 176 xprt->address_strings[RPC_DISPLAY_NETID] = "rdma"; 177 } 178 179 static void 180 xprt_rdma_free_addresses(struct rpc_xprt *xprt) 181 { 182 unsigned int i; 183 184 for (i = 0; i < RPC_DISPLAY_MAX; i++) 185 switch (i) { 186 case RPC_DISPLAY_PROTO: 187 case RPC_DISPLAY_NETID: 188 continue; 189 default: 190 kfree(xprt->address_strings[i]); 191 } 192 } 193 194 static void 195 xprt_rdma_connect_worker(struct work_struct *work) 196 { 197 struct rpcrdma_xprt *r_xprt = 198 container_of(work, struct rpcrdma_xprt, rdma_connect.work); 199 struct rpc_xprt *xprt = &r_xprt->xprt; 200 int rc = 0; 201 202 if (!xprt->shutdown) { 203 current->flags |= PF_FSTRANS; 204 xprt_clear_connected(xprt); 205 206 dprintk("RPC: %s: %sconnect\n", __func__, 207 r_xprt->rx_ep.rep_connected != 0 ? "re" : ""); 208 rc = rpcrdma_ep_connect(&r_xprt->rx_ep, &r_xprt->rx_ia); 209 if (rc) 210 goto out; 211 } 212 goto out_clear; 213 214 out: 215 xprt_wake_pending_tasks(xprt, rc); 216 out_clear: 217 dprintk("RPC: %s: exit\n", __func__); 218 xprt_clear_connecting(xprt); 219 current->flags &= ~PF_FSTRANS; 220 } 221 222 /* 223 * xprt_rdma_destroy 224 * 225 * Destroy the xprt. 226 * Free all memory associated with the object, including its own. 227 * NOTE: none of the *destroy methods free memory for their top-level 228 * objects, even though they may have allocated it (they do free 229 * private memory). It's up to the caller to handle it. In this 230 * case (RDMA transport), all structure memory is inlined with the 231 * struct rpcrdma_xprt. 232 */ 233 static void 234 xprt_rdma_destroy(struct rpc_xprt *xprt) 235 { 236 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 237 int rc; 238 239 dprintk("RPC: %s: called\n", __func__); 240 241 cancel_delayed_work_sync(&r_xprt->rdma_connect); 242 243 xprt_clear_connected(xprt); 244 245 rpcrdma_buffer_destroy(&r_xprt->rx_buf); 246 rc = rpcrdma_ep_destroy(&r_xprt->rx_ep, &r_xprt->rx_ia); 247 if (rc) 248 dprintk("RPC: %s: rpcrdma_ep_destroy returned %i\n", 249 __func__, rc); 250 rpcrdma_ia_close(&r_xprt->rx_ia); 251 252 xprt_rdma_free_addresses(xprt); 253 254 xprt_free(xprt); 255 256 dprintk("RPC: %s: returning\n", __func__); 257 258 module_put(THIS_MODULE); 259 } 260 261 static const struct rpc_timeout xprt_rdma_default_timeout = { 262 .to_initval = 60 * HZ, 263 .to_maxval = 60 * HZ, 264 }; 265 266 /** 267 * xprt_setup_rdma - Set up transport to use RDMA 268 * 269 * @args: rpc transport arguments 270 */ 271 static struct rpc_xprt * 272 xprt_setup_rdma(struct xprt_create *args) 273 { 274 struct rpcrdma_create_data_internal cdata; 275 struct rpc_xprt *xprt; 276 struct rpcrdma_xprt *new_xprt; 277 struct rpcrdma_ep *new_ep; 278 struct sockaddr_in *sin; 279 int rc; 280 281 if (args->addrlen > sizeof(xprt->addr)) { 282 dprintk("RPC: %s: address too large\n", __func__); 283 return ERR_PTR(-EBADF); 284 } 285 286 xprt = xprt_alloc(args->net, sizeof(struct rpcrdma_xprt), 287 xprt_rdma_slot_table_entries, 288 xprt_rdma_slot_table_entries); 289 if (xprt == NULL) { 290 dprintk("RPC: %s: couldn't allocate rpcrdma_xprt\n", 291 __func__); 292 return ERR_PTR(-ENOMEM); 293 } 294 295 /* 60 second timeout, no retries */ 296 xprt->timeout = &xprt_rdma_default_timeout; 297 xprt->bind_timeout = (60U * HZ); 298 xprt->reestablish_timeout = (5U * HZ); 299 xprt->idle_timeout = (5U * 60 * HZ); 300 301 xprt->resvport = 0; /* privileged port not needed */ 302 xprt->tsh_size = 0; /* RPC-RDMA handles framing */ 303 xprt->max_payload = RPCRDMA_MAX_DATA_SEGS * PAGE_SIZE; 304 xprt->ops = &xprt_rdma_procs; 305 306 /* 307 * Set up RDMA-specific connect data. 308 */ 309 310 /* Put server RDMA address in local cdata */ 311 memcpy(&cdata.addr, args->dstaddr, args->addrlen); 312 313 /* Ensure xprt->addr holds valid server TCP (not RDMA) 314 * address, for any side protocols which peek at it */ 315 xprt->prot = IPPROTO_TCP; 316 xprt->addrlen = args->addrlen; 317 memcpy(&xprt->addr, &cdata.addr, xprt->addrlen); 318 319 sin = (struct sockaddr_in *)&cdata.addr; 320 if (ntohs(sin->sin_port) != 0) 321 xprt_set_bound(xprt); 322 323 dprintk("RPC: %s: %pI4:%u\n", 324 __func__, &sin->sin_addr.s_addr, ntohs(sin->sin_port)); 325 326 /* Set max requests */ 327 cdata.max_requests = xprt->max_reqs; 328 329 /* Set some length limits */ 330 cdata.rsize = RPCRDMA_MAX_SEGS * PAGE_SIZE; /* RDMA write max */ 331 cdata.wsize = RPCRDMA_MAX_SEGS * PAGE_SIZE; /* RDMA read max */ 332 333 cdata.inline_wsize = xprt_rdma_max_inline_write; 334 if (cdata.inline_wsize > cdata.wsize) 335 cdata.inline_wsize = cdata.wsize; 336 337 cdata.inline_rsize = xprt_rdma_max_inline_read; 338 if (cdata.inline_rsize > cdata.rsize) 339 cdata.inline_rsize = cdata.rsize; 340 341 cdata.padding = xprt_rdma_inline_write_padding; 342 343 /* 344 * Create new transport instance, which includes initialized 345 * o ia 346 * o endpoint 347 * o buffers 348 */ 349 350 new_xprt = rpcx_to_rdmax(xprt); 351 352 rc = rpcrdma_ia_open(new_xprt, (struct sockaddr *) &cdata.addr, 353 xprt_rdma_memreg_strategy); 354 if (rc) 355 goto out1; 356 357 /* 358 * initialize and create ep 359 */ 360 new_xprt->rx_data = cdata; 361 new_ep = &new_xprt->rx_ep; 362 new_ep->rep_remote_addr = cdata.addr; 363 364 rc = rpcrdma_ep_create(&new_xprt->rx_ep, 365 &new_xprt->rx_ia, &new_xprt->rx_data); 366 if (rc) 367 goto out2; 368 369 /* 370 * Allocate pre-registered send and receive buffers for headers and 371 * any inline data. Also specify any padding which will be provided 372 * from a preregistered zero buffer. 373 */ 374 rc = rpcrdma_buffer_create(&new_xprt->rx_buf, new_ep, &new_xprt->rx_ia, 375 &new_xprt->rx_data); 376 if (rc) 377 goto out3; 378 379 /* 380 * Register a callback for connection events. This is necessary because 381 * connection loss notification is async. We also catch connection loss 382 * when reaping receives. 383 */ 384 INIT_DELAYED_WORK(&new_xprt->rdma_connect, xprt_rdma_connect_worker); 385 new_ep->rep_func = rpcrdma_conn_func; 386 new_ep->rep_xprt = xprt; 387 388 xprt_rdma_format_addresses(xprt); 389 390 if (!try_module_get(THIS_MODULE)) 391 goto out4; 392 393 return xprt; 394 395 out4: 396 xprt_rdma_free_addresses(xprt); 397 rc = -EINVAL; 398 out3: 399 (void) rpcrdma_ep_destroy(new_ep, &new_xprt->rx_ia); 400 out2: 401 rpcrdma_ia_close(&new_xprt->rx_ia); 402 out1: 403 xprt_free(xprt); 404 return ERR_PTR(rc); 405 } 406 407 /* 408 * Close a connection, during shutdown or timeout/reconnect 409 */ 410 static void 411 xprt_rdma_close(struct rpc_xprt *xprt) 412 { 413 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 414 415 dprintk("RPC: %s: closing\n", __func__); 416 if (r_xprt->rx_ep.rep_connected > 0) 417 xprt->reestablish_timeout = 0; 418 xprt_disconnect_done(xprt); 419 (void) rpcrdma_ep_disconnect(&r_xprt->rx_ep, &r_xprt->rx_ia); 420 } 421 422 static void 423 xprt_rdma_set_port(struct rpc_xprt *xprt, u16 port) 424 { 425 struct sockaddr_in *sap; 426 427 sap = (struct sockaddr_in *)&xprt->addr; 428 sap->sin_port = htons(port); 429 sap = (struct sockaddr_in *)&rpcx_to_rdmad(xprt).addr; 430 sap->sin_port = htons(port); 431 dprintk("RPC: %s: %u\n", __func__, port); 432 } 433 434 static void 435 xprt_rdma_connect(struct rpc_task *task) 436 { 437 struct rpc_xprt *xprt = (struct rpc_xprt *)task->tk_xprt; 438 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 439 440 if (r_xprt->rx_ep.rep_connected != 0) { 441 /* Reconnect */ 442 schedule_delayed_work(&r_xprt->rdma_connect, 443 xprt->reestablish_timeout); 444 xprt->reestablish_timeout <<= 1; 445 if (xprt->reestablish_timeout > (30 * HZ)) 446 xprt->reestablish_timeout = (30 * HZ); 447 else if (xprt->reestablish_timeout < (5 * HZ)) 448 xprt->reestablish_timeout = (5 * HZ); 449 } else { 450 schedule_delayed_work(&r_xprt->rdma_connect, 0); 451 if (!RPC_IS_ASYNC(task)) 452 flush_delayed_work(&r_xprt->rdma_connect); 453 } 454 } 455 456 static int 457 xprt_rdma_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task) 458 { 459 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 460 int credits = atomic_read(&r_xprt->rx_buf.rb_credits); 461 462 /* == RPC_CWNDSCALE @ init, but *after* setup */ 463 if (r_xprt->rx_buf.rb_cwndscale == 0UL) { 464 r_xprt->rx_buf.rb_cwndscale = xprt->cwnd; 465 dprintk("RPC: %s: cwndscale %lu\n", __func__, 466 r_xprt->rx_buf.rb_cwndscale); 467 BUG_ON(r_xprt->rx_buf.rb_cwndscale <= 0); 468 } 469 xprt->cwnd = credits * r_xprt->rx_buf.rb_cwndscale; 470 return xprt_reserve_xprt_cong(xprt, task); 471 } 472 473 /* 474 * The RDMA allocate/free functions need the task structure as a place 475 * to hide the struct rpcrdma_req, which is necessary for the actual send/recv 476 * sequence. For this reason, the recv buffers are attached to send 477 * buffers for portions of the RPC. Note that the RPC layer allocates 478 * both send and receive buffers in the same call. We may register 479 * the receive buffer portion when using reply chunks. 480 */ 481 static void * 482 xprt_rdma_allocate(struct rpc_task *task, size_t size) 483 { 484 struct rpc_xprt *xprt = task->tk_xprt; 485 struct rpcrdma_req *req, *nreq; 486 487 req = rpcrdma_buffer_get(&rpcx_to_rdmax(xprt)->rx_buf); 488 BUG_ON(NULL == req); 489 490 if (size > req->rl_size) { 491 dprintk("RPC: %s: size %zd too large for buffer[%zd]: " 492 "prog %d vers %d proc %d\n", 493 __func__, size, req->rl_size, 494 task->tk_client->cl_prog, task->tk_client->cl_vers, 495 task->tk_msg.rpc_proc->p_proc); 496 /* 497 * Outgoing length shortage. Our inline write max must have 498 * been configured to perform direct i/o. 499 * 500 * This is therefore a large metadata operation, and the 501 * allocate call was made on the maximum possible message, 502 * e.g. containing long filename(s) or symlink data. In 503 * fact, while these metadata operations *might* carry 504 * large outgoing payloads, they rarely *do*. However, we 505 * have to commit to the request here, so reallocate and 506 * register it now. The data path will never require this 507 * reallocation. 508 * 509 * If the allocation or registration fails, the RPC framework 510 * will (doggedly) retry. 511 */ 512 if (rpcx_to_rdmax(xprt)->rx_ia.ri_memreg_strategy == 513 RPCRDMA_BOUNCEBUFFERS) { 514 /* forced to "pure inline" */ 515 dprintk("RPC: %s: too much data (%zd) for inline " 516 "(r/w max %d/%d)\n", __func__, size, 517 rpcx_to_rdmad(xprt).inline_rsize, 518 rpcx_to_rdmad(xprt).inline_wsize); 519 size = req->rl_size; 520 rpc_exit(task, -EIO); /* fail the operation */ 521 rpcx_to_rdmax(xprt)->rx_stats.failed_marshal_count++; 522 goto out; 523 } 524 if (task->tk_flags & RPC_TASK_SWAPPER) 525 nreq = kmalloc(sizeof *req + size, GFP_ATOMIC); 526 else 527 nreq = kmalloc(sizeof *req + size, GFP_NOFS); 528 if (nreq == NULL) 529 goto outfail; 530 531 if (rpcrdma_register_internal(&rpcx_to_rdmax(xprt)->rx_ia, 532 nreq->rl_base, size + sizeof(struct rpcrdma_req) 533 - offsetof(struct rpcrdma_req, rl_base), 534 &nreq->rl_handle, &nreq->rl_iov)) { 535 kfree(nreq); 536 goto outfail; 537 } 538 rpcx_to_rdmax(xprt)->rx_stats.hardway_register_count += size; 539 nreq->rl_size = size; 540 nreq->rl_niovs = 0; 541 nreq->rl_nchunks = 0; 542 nreq->rl_buffer = (struct rpcrdma_buffer *)req; 543 nreq->rl_reply = req->rl_reply; 544 memcpy(nreq->rl_segments, 545 req->rl_segments, sizeof nreq->rl_segments); 546 /* flag the swap with an unused field */ 547 nreq->rl_iov.length = 0; 548 req->rl_reply = NULL; 549 req = nreq; 550 } 551 dprintk("RPC: %s: size %zd, request 0x%p\n", __func__, size, req); 552 out: 553 req->rl_connect_cookie = 0; /* our reserved value */ 554 return req->rl_xdr_buf; 555 556 outfail: 557 rpcrdma_buffer_put(req); 558 rpcx_to_rdmax(xprt)->rx_stats.failed_marshal_count++; 559 return NULL; 560 } 561 562 /* 563 * This function returns all RDMA resources to the pool. 564 */ 565 static void 566 xprt_rdma_free(void *buffer) 567 { 568 struct rpcrdma_req *req; 569 struct rpcrdma_xprt *r_xprt; 570 struct rpcrdma_rep *rep; 571 int i; 572 573 if (buffer == NULL) 574 return; 575 576 req = container_of(buffer, struct rpcrdma_req, rl_xdr_buf[0]); 577 if (req->rl_iov.length == 0) { /* see allocate above */ 578 r_xprt = container_of(((struct rpcrdma_req *) req->rl_buffer)->rl_buffer, 579 struct rpcrdma_xprt, rx_buf); 580 } else 581 r_xprt = container_of(req->rl_buffer, struct rpcrdma_xprt, rx_buf); 582 rep = req->rl_reply; 583 584 dprintk("RPC: %s: called on 0x%p%s\n", 585 __func__, rep, (rep && rep->rr_func) ? " (with waiter)" : ""); 586 587 /* 588 * Finish the deregistration. When using mw bind, this was 589 * begun in rpcrdma_reply_handler(). In all other modes, we 590 * do it here, in thread context. The process is considered 591 * complete when the rr_func vector becomes NULL - this 592 * was put in place during rpcrdma_reply_handler() - the wait 593 * call below will not block if the dereg is "done". If 594 * interrupted, our framework will clean up. 595 */ 596 for (i = 0; req->rl_nchunks;) { 597 --req->rl_nchunks; 598 i += rpcrdma_deregister_external( 599 &req->rl_segments[i], r_xprt, NULL); 600 } 601 602 if (rep && wait_event_interruptible(rep->rr_unbind, !rep->rr_func)) { 603 rep->rr_func = NULL; /* abandon the callback */ 604 req->rl_reply = NULL; 605 } 606 607 if (req->rl_iov.length == 0) { /* see allocate above */ 608 struct rpcrdma_req *oreq = (struct rpcrdma_req *)req->rl_buffer; 609 oreq->rl_reply = req->rl_reply; 610 (void) rpcrdma_deregister_internal(&r_xprt->rx_ia, 611 req->rl_handle, 612 &req->rl_iov); 613 kfree(req); 614 req = oreq; 615 } 616 617 /* Put back request+reply buffers */ 618 rpcrdma_buffer_put(req); 619 } 620 621 /* 622 * send_request invokes the meat of RPC RDMA. It must do the following: 623 * 1. Marshal the RPC request into an RPC RDMA request, which means 624 * putting a header in front of data, and creating IOVs for RDMA 625 * from those in the request. 626 * 2. In marshaling, detect opportunities for RDMA, and use them. 627 * 3. Post a recv message to set up asynch completion, then send 628 * the request (rpcrdma_ep_post). 629 * 4. No partial sends are possible in the RPC-RDMA protocol (as in UDP). 630 */ 631 632 static int 633 xprt_rdma_send_request(struct rpc_task *task) 634 { 635 struct rpc_rqst *rqst = task->tk_rqstp; 636 struct rpc_xprt *xprt = task->tk_xprt; 637 struct rpcrdma_req *req = rpcr_to_rdmar(rqst); 638 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 639 640 /* marshal the send itself */ 641 if (req->rl_niovs == 0 && rpcrdma_marshal_req(rqst) != 0) { 642 r_xprt->rx_stats.failed_marshal_count++; 643 dprintk("RPC: %s: rpcrdma_marshal_req failed\n", 644 __func__); 645 return -EIO; 646 } 647 648 if (req->rl_reply == NULL) /* e.g. reconnection */ 649 rpcrdma_recv_buffer_get(req); 650 651 if (req->rl_reply) { 652 req->rl_reply->rr_func = rpcrdma_reply_handler; 653 /* this need only be done once, but... */ 654 req->rl_reply->rr_xprt = xprt; 655 } 656 657 /* Must suppress retransmit to maintain credits */ 658 if (req->rl_connect_cookie == xprt->connect_cookie) 659 goto drop_connection; 660 req->rl_connect_cookie = xprt->connect_cookie; 661 662 if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req)) 663 goto drop_connection; 664 665 rqst->rq_xmit_bytes_sent += rqst->rq_snd_buf.len; 666 rqst->rq_bytes_sent = 0; 667 return 0; 668 669 drop_connection: 670 xprt_disconnect_done(xprt); 671 return -ENOTCONN; /* implies disconnect */ 672 } 673 674 static void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) 675 { 676 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 677 long idle_time = 0; 678 679 if (xprt_connected(xprt)) 680 idle_time = (long)(jiffies - xprt->last_used) / HZ; 681 682 seq_printf(seq, 683 "\txprt:\trdma %u %lu %lu %lu %ld %lu %lu %lu %Lu %Lu " 684 "%lu %lu %lu %Lu %Lu %Lu %Lu %lu %lu %lu\n", 685 686 0, /* need a local port? */ 687 xprt->stat.bind_count, 688 xprt->stat.connect_count, 689 xprt->stat.connect_time, 690 idle_time, 691 xprt->stat.sends, 692 xprt->stat.recvs, 693 xprt->stat.bad_xids, 694 xprt->stat.req_u, 695 xprt->stat.bklog_u, 696 697 r_xprt->rx_stats.read_chunk_count, 698 r_xprt->rx_stats.write_chunk_count, 699 r_xprt->rx_stats.reply_chunk_count, 700 r_xprt->rx_stats.total_rdma_request, 701 r_xprt->rx_stats.total_rdma_reply, 702 r_xprt->rx_stats.pullup_copy_count, 703 r_xprt->rx_stats.fixup_copy_count, 704 r_xprt->rx_stats.hardway_register_count, 705 r_xprt->rx_stats.failed_marshal_count, 706 r_xprt->rx_stats.bad_reply_count); 707 } 708 709 /* 710 * Plumbing for rpc transport switch and kernel module 711 */ 712 713 static struct rpc_xprt_ops xprt_rdma_procs = { 714 .reserve_xprt = xprt_rdma_reserve_xprt, 715 .release_xprt = xprt_release_xprt_cong, /* sunrpc/xprt.c */ 716 .release_request = xprt_release_rqst_cong, /* ditto */ 717 .set_retrans_timeout = xprt_set_retrans_timeout_def, /* ditto */ 718 .rpcbind = rpcb_getport_async, /* sunrpc/rpcb_clnt.c */ 719 .set_port = xprt_rdma_set_port, 720 .connect = xprt_rdma_connect, 721 .buf_alloc = xprt_rdma_allocate, 722 .buf_free = xprt_rdma_free, 723 .send_request = xprt_rdma_send_request, 724 .close = xprt_rdma_close, 725 .destroy = xprt_rdma_destroy, 726 .print_stats = xprt_rdma_print_stats 727 }; 728 729 static struct xprt_class xprt_rdma = { 730 .list = LIST_HEAD_INIT(xprt_rdma.list), 731 .name = "rdma", 732 .owner = THIS_MODULE, 733 .ident = XPRT_TRANSPORT_RDMA, 734 .setup = xprt_setup_rdma, 735 }; 736 737 static void __exit xprt_rdma_cleanup(void) 738 { 739 int rc; 740 741 dprintk(KERN_INFO "RPCRDMA Module Removed, deregister RPC RDMA transport\n"); 742 #ifdef RPC_DEBUG 743 if (sunrpc_table_header) { 744 unregister_sysctl_table(sunrpc_table_header); 745 sunrpc_table_header = NULL; 746 } 747 #endif 748 rc = xprt_unregister_transport(&xprt_rdma); 749 if (rc) 750 dprintk("RPC: %s: xprt_unregister returned %i\n", 751 __func__, rc); 752 } 753 754 static int __init xprt_rdma_init(void) 755 { 756 int rc; 757 758 rc = xprt_register_transport(&xprt_rdma); 759 760 if (rc) 761 return rc; 762 763 dprintk(KERN_INFO "RPCRDMA Module Init, register RPC RDMA transport\n"); 764 765 dprintk(KERN_INFO "Defaults:\n"); 766 dprintk(KERN_INFO "\tSlots %d\n" 767 "\tMaxInlineRead %d\n\tMaxInlineWrite %d\n", 768 xprt_rdma_slot_table_entries, 769 xprt_rdma_max_inline_read, xprt_rdma_max_inline_write); 770 dprintk(KERN_INFO "\tPadding %d\n\tMemreg %d\n", 771 xprt_rdma_inline_write_padding, xprt_rdma_memreg_strategy); 772 773 #ifdef RPC_DEBUG 774 if (!sunrpc_table_header) 775 sunrpc_table_header = register_sysctl_table(sunrpc_table); 776 #endif 777 return 0; 778 } 779 780 module_init(xprt_rdma_init); 781 module_exit(xprt_rdma_cleanup); 782