1 /* 2 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the BSD-type 8 * license below: 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 14 * Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 17 * Redistributions in binary form must reproduce the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer in the documentation and/or other materials provided 20 * with the distribution. 21 * 22 * Neither the name of the Network Appliance, Inc. nor the names of 23 * its contributors may be used to endorse or promote products 24 * derived from this software without specific prior written 25 * permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * transport.c 42 * 43 * This file contains the top-level implementation of an RPC RDMA 44 * transport. 45 * 46 * Naming convention: functions beginning with xprt_ are part of the 47 * transport switch. All others are RPC RDMA internal. 48 */ 49 50 #include <linux/module.h> 51 #include <linux/init.h> 52 #include <linux/slab.h> 53 #include <linux/seq_file.h> 54 #include <linux/sunrpc/addr.h> 55 56 #include "xprt_rdma.h" 57 58 #ifdef RPC_DEBUG 59 # define RPCDBG_FACILITY RPCDBG_TRANS 60 #endif 61 62 MODULE_LICENSE("Dual BSD/GPL"); 63 64 MODULE_DESCRIPTION("RPC/RDMA Transport for Linux kernel NFS"); 65 MODULE_AUTHOR("Network Appliance, Inc."); 66 67 /* 68 * tunables 69 */ 70 71 static unsigned int xprt_rdma_slot_table_entries = RPCRDMA_DEF_SLOT_TABLE; 72 static unsigned int xprt_rdma_max_inline_read = RPCRDMA_DEF_INLINE; 73 static unsigned int xprt_rdma_max_inline_write = RPCRDMA_DEF_INLINE; 74 static unsigned int xprt_rdma_inline_write_padding; 75 static unsigned int xprt_rdma_memreg_strategy = RPCRDMA_FRMR; 76 int xprt_rdma_pad_optimize = 0; 77 78 #ifdef RPC_DEBUG 79 80 static unsigned int min_slot_table_size = RPCRDMA_MIN_SLOT_TABLE; 81 static unsigned int max_slot_table_size = RPCRDMA_MAX_SLOT_TABLE; 82 static unsigned int zero; 83 static unsigned int max_padding = PAGE_SIZE; 84 static unsigned int min_memreg = RPCRDMA_BOUNCEBUFFERS; 85 static unsigned int max_memreg = RPCRDMA_LAST - 1; 86 87 static struct ctl_table_header *sunrpc_table_header; 88 89 static struct ctl_table xr_tunables_table[] = { 90 { 91 .procname = "rdma_slot_table_entries", 92 .data = &xprt_rdma_slot_table_entries, 93 .maxlen = sizeof(unsigned int), 94 .mode = 0644, 95 .proc_handler = proc_dointvec_minmax, 96 .extra1 = &min_slot_table_size, 97 .extra2 = &max_slot_table_size 98 }, 99 { 100 .procname = "rdma_max_inline_read", 101 .data = &xprt_rdma_max_inline_read, 102 .maxlen = sizeof(unsigned int), 103 .mode = 0644, 104 .proc_handler = proc_dointvec, 105 }, 106 { 107 .procname = "rdma_max_inline_write", 108 .data = &xprt_rdma_max_inline_write, 109 .maxlen = sizeof(unsigned int), 110 .mode = 0644, 111 .proc_handler = proc_dointvec, 112 }, 113 { 114 .procname = "rdma_inline_write_padding", 115 .data = &xprt_rdma_inline_write_padding, 116 .maxlen = sizeof(unsigned int), 117 .mode = 0644, 118 .proc_handler = proc_dointvec_minmax, 119 .extra1 = &zero, 120 .extra2 = &max_padding, 121 }, 122 { 123 .procname = "rdma_memreg_strategy", 124 .data = &xprt_rdma_memreg_strategy, 125 .maxlen = sizeof(unsigned int), 126 .mode = 0644, 127 .proc_handler = proc_dointvec_minmax, 128 .extra1 = &min_memreg, 129 .extra2 = &max_memreg, 130 }, 131 { 132 .procname = "rdma_pad_optimize", 133 .data = &xprt_rdma_pad_optimize, 134 .maxlen = sizeof(unsigned int), 135 .mode = 0644, 136 .proc_handler = proc_dointvec, 137 }, 138 { }, 139 }; 140 141 static struct ctl_table sunrpc_table[] = { 142 { 143 .procname = "sunrpc", 144 .mode = 0555, 145 .child = xr_tunables_table 146 }, 147 { }, 148 }; 149 150 #endif 151 152 #define RPCRDMA_BIND_TO (60U * HZ) 153 #define RPCRDMA_INIT_REEST_TO (5U * HZ) 154 #define RPCRDMA_MAX_REEST_TO (30U * HZ) 155 #define RPCRDMA_IDLE_DISC_TO (5U * 60 * HZ) 156 157 static struct rpc_xprt_ops xprt_rdma_procs; /* forward reference */ 158 159 static void 160 xprt_rdma_format_addresses(struct rpc_xprt *xprt) 161 { 162 struct sockaddr *sap = (struct sockaddr *) 163 &rpcx_to_rdmad(xprt).addr; 164 struct sockaddr_in *sin = (struct sockaddr_in *)sap; 165 char buf[64]; 166 167 (void)rpc_ntop(sap, buf, sizeof(buf)); 168 xprt->address_strings[RPC_DISPLAY_ADDR] = kstrdup(buf, GFP_KERNEL); 169 170 snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap)); 171 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL); 172 173 xprt->address_strings[RPC_DISPLAY_PROTO] = "rdma"; 174 175 snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr)); 176 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL); 177 178 snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap)); 179 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL); 180 181 /* netid */ 182 xprt->address_strings[RPC_DISPLAY_NETID] = "rdma"; 183 } 184 185 static void 186 xprt_rdma_free_addresses(struct rpc_xprt *xprt) 187 { 188 unsigned int i; 189 190 for (i = 0; i < RPC_DISPLAY_MAX; i++) 191 switch (i) { 192 case RPC_DISPLAY_PROTO: 193 case RPC_DISPLAY_NETID: 194 continue; 195 default: 196 kfree(xprt->address_strings[i]); 197 } 198 } 199 200 static void 201 xprt_rdma_connect_worker(struct work_struct *work) 202 { 203 struct rpcrdma_xprt *r_xprt = 204 container_of(work, struct rpcrdma_xprt, rdma_connect.work); 205 struct rpc_xprt *xprt = &r_xprt->xprt; 206 int rc = 0; 207 208 current->flags |= PF_FSTRANS; 209 xprt_clear_connected(xprt); 210 211 dprintk("RPC: %s: %sconnect\n", __func__, 212 r_xprt->rx_ep.rep_connected != 0 ? "re" : ""); 213 rc = rpcrdma_ep_connect(&r_xprt->rx_ep, &r_xprt->rx_ia); 214 if (rc) 215 xprt_wake_pending_tasks(xprt, rc); 216 217 dprintk("RPC: %s: exit\n", __func__); 218 xprt_clear_connecting(xprt); 219 current->flags &= ~PF_FSTRANS; 220 } 221 222 /* 223 * xprt_rdma_destroy 224 * 225 * Destroy the xprt. 226 * Free all memory associated with the object, including its own. 227 * NOTE: none of the *destroy methods free memory for their top-level 228 * objects, even though they may have allocated it (they do free 229 * private memory). It's up to the caller to handle it. In this 230 * case (RDMA transport), all structure memory is inlined with the 231 * struct rpcrdma_xprt. 232 */ 233 static void 234 xprt_rdma_destroy(struct rpc_xprt *xprt) 235 { 236 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 237 238 dprintk("RPC: %s: called\n", __func__); 239 240 cancel_delayed_work_sync(&r_xprt->rdma_connect); 241 242 xprt_clear_connected(xprt); 243 244 rpcrdma_buffer_destroy(&r_xprt->rx_buf); 245 rpcrdma_ep_destroy(&r_xprt->rx_ep, &r_xprt->rx_ia); 246 rpcrdma_ia_close(&r_xprt->rx_ia); 247 248 xprt_rdma_free_addresses(xprt); 249 250 xprt_free(xprt); 251 252 dprintk("RPC: %s: returning\n", __func__); 253 254 module_put(THIS_MODULE); 255 } 256 257 static const struct rpc_timeout xprt_rdma_default_timeout = { 258 .to_initval = 60 * HZ, 259 .to_maxval = 60 * HZ, 260 }; 261 262 /** 263 * xprt_setup_rdma - Set up transport to use RDMA 264 * 265 * @args: rpc transport arguments 266 */ 267 static struct rpc_xprt * 268 xprt_setup_rdma(struct xprt_create *args) 269 { 270 struct rpcrdma_create_data_internal cdata; 271 struct rpc_xprt *xprt; 272 struct rpcrdma_xprt *new_xprt; 273 struct rpcrdma_ep *new_ep; 274 struct sockaddr_in *sin; 275 int rc; 276 277 if (args->addrlen > sizeof(xprt->addr)) { 278 dprintk("RPC: %s: address too large\n", __func__); 279 return ERR_PTR(-EBADF); 280 } 281 282 xprt = xprt_alloc(args->net, sizeof(struct rpcrdma_xprt), 283 xprt_rdma_slot_table_entries, 284 xprt_rdma_slot_table_entries); 285 if (xprt == NULL) { 286 dprintk("RPC: %s: couldn't allocate rpcrdma_xprt\n", 287 __func__); 288 return ERR_PTR(-ENOMEM); 289 } 290 291 /* 60 second timeout, no retries */ 292 xprt->timeout = &xprt_rdma_default_timeout; 293 xprt->bind_timeout = RPCRDMA_BIND_TO; 294 xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO; 295 xprt->idle_timeout = RPCRDMA_IDLE_DISC_TO; 296 297 xprt->resvport = 0; /* privileged port not needed */ 298 xprt->tsh_size = 0; /* RPC-RDMA handles framing */ 299 xprt->ops = &xprt_rdma_procs; 300 301 /* 302 * Set up RDMA-specific connect data. 303 */ 304 305 /* Put server RDMA address in local cdata */ 306 memcpy(&cdata.addr, args->dstaddr, args->addrlen); 307 308 /* Ensure xprt->addr holds valid server TCP (not RDMA) 309 * address, for any side protocols which peek at it */ 310 xprt->prot = IPPROTO_TCP; 311 xprt->addrlen = args->addrlen; 312 memcpy(&xprt->addr, &cdata.addr, xprt->addrlen); 313 314 sin = (struct sockaddr_in *)&cdata.addr; 315 if (ntohs(sin->sin_port) != 0) 316 xprt_set_bound(xprt); 317 318 dprintk("RPC: %s: %pI4:%u\n", 319 __func__, &sin->sin_addr.s_addr, ntohs(sin->sin_port)); 320 321 /* Set max requests */ 322 cdata.max_requests = xprt->max_reqs; 323 324 /* Set some length limits */ 325 cdata.rsize = RPCRDMA_MAX_SEGS * PAGE_SIZE; /* RDMA write max */ 326 cdata.wsize = RPCRDMA_MAX_SEGS * PAGE_SIZE; /* RDMA read max */ 327 328 cdata.inline_wsize = xprt_rdma_max_inline_write; 329 if (cdata.inline_wsize > cdata.wsize) 330 cdata.inline_wsize = cdata.wsize; 331 332 cdata.inline_rsize = xprt_rdma_max_inline_read; 333 if (cdata.inline_rsize > cdata.rsize) 334 cdata.inline_rsize = cdata.rsize; 335 336 cdata.padding = xprt_rdma_inline_write_padding; 337 338 /* 339 * Create new transport instance, which includes initialized 340 * o ia 341 * o endpoint 342 * o buffers 343 */ 344 345 new_xprt = rpcx_to_rdmax(xprt); 346 347 rc = rpcrdma_ia_open(new_xprt, (struct sockaddr *) &cdata.addr, 348 xprt_rdma_memreg_strategy); 349 if (rc) 350 goto out1; 351 352 /* 353 * initialize and create ep 354 */ 355 new_xprt->rx_data = cdata; 356 new_ep = &new_xprt->rx_ep; 357 new_ep->rep_remote_addr = cdata.addr; 358 359 rc = rpcrdma_ep_create(&new_xprt->rx_ep, 360 &new_xprt->rx_ia, &new_xprt->rx_data); 361 if (rc) 362 goto out2; 363 364 /* 365 * Allocate pre-registered send and receive buffers for headers and 366 * any inline data. Also specify any padding which will be provided 367 * from a preregistered zero buffer. 368 */ 369 rc = rpcrdma_buffer_create(&new_xprt->rx_buf, new_ep, &new_xprt->rx_ia, 370 &new_xprt->rx_data); 371 if (rc) 372 goto out3; 373 374 /* 375 * Register a callback for connection events. This is necessary because 376 * connection loss notification is async. We also catch connection loss 377 * when reaping receives. 378 */ 379 INIT_DELAYED_WORK(&new_xprt->rdma_connect, xprt_rdma_connect_worker); 380 new_ep->rep_func = rpcrdma_conn_func; 381 new_ep->rep_xprt = xprt; 382 383 xprt_rdma_format_addresses(xprt); 384 xprt->max_payload = rpcrdma_max_payload(new_xprt); 385 dprintk("RPC: %s: transport data payload maximum: %zu bytes\n", 386 __func__, xprt->max_payload); 387 388 if (!try_module_get(THIS_MODULE)) 389 goto out4; 390 391 return xprt; 392 393 out4: 394 xprt_rdma_free_addresses(xprt); 395 rc = -EINVAL; 396 out3: 397 rpcrdma_ep_destroy(new_ep, &new_xprt->rx_ia); 398 out2: 399 rpcrdma_ia_close(&new_xprt->rx_ia); 400 out1: 401 xprt_free(xprt); 402 return ERR_PTR(rc); 403 } 404 405 /* 406 * Close a connection, during shutdown or timeout/reconnect 407 */ 408 static void 409 xprt_rdma_close(struct rpc_xprt *xprt) 410 { 411 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 412 413 dprintk("RPC: %s: closing\n", __func__); 414 if (r_xprt->rx_ep.rep_connected > 0) 415 xprt->reestablish_timeout = 0; 416 xprt_disconnect_done(xprt); 417 rpcrdma_ep_disconnect(&r_xprt->rx_ep, &r_xprt->rx_ia); 418 } 419 420 static void 421 xprt_rdma_set_port(struct rpc_xprt *xprt, u16 port) 422 { 423 struct sockaddr_in *sap; 424 425 sap = (struct sockaddr_in *)&xprt->addr; 426 sap->sin_port = htons(port); 427 sap = (struct sockaddr_in *)&rpcx_to_rdmad(xprt).addr; 428 sap->sin_port = htons(port); 429 dprintk("RPC: %s: %u\n", __func__, port); 430 } 431 432 static void 433 xprt_rdma_connect(struct rpc_xprt *xprt, struct rpc_task *task) 434 { 435 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 436 437 if (r_xprt->rx_ep.rep_connected != 0) { 438 /* Reconnect */ 439 schedule_delayed_work(&r_xprt->rdma_connect, 440 xprt->reestablish_timeout); 441 xprt->reestablish_timeout <<= 1; 442 if (xprt->reestablish_timeout > RPCRDMA_MAX_REEST_TO) 443 xprt->reestablish_timeout = RPCRDMA_MAX_REEST_TO; 444 else if (xprt->reestablish_timeout < RPCRDMA_INIT_REEST_TO) 445 xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO; 446 } else { 447 schedule_delayed_work(&r_xprt->rdma_connect, 0); 448 if (!RPC_IS_ASYNC(task)) 449 flush_delayed_work(&r_xprt->rdma_connect); 450 } 451 } 452 453 /* 454 * The RDMA allocate/free functions need the task structure as a place 455 * to hide the struct rpcrdma_req, which is necessary for the actual send/recv 456 * sequence. For this reason, the recv buffers are attached to send 457 * buffers for portions of the RPC. Note that the RPC layer allocates 458 * both send and receive buffers in the same call. We may register 459 * the receive buffer portion when using reply chunks. 460 */ 461 static void * 462 xprt_rdma_allocate(struct rpc_task *task, size_t size) 463 { 464 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; 465 struct rpcrdma_req *req, *nreq; 466 467 req = rpcrdma_buffer_get(&rpcx_to_rdmax(xprt)->rx_buf); 468 if (req == NULL) 469 return NULL; 470 471 if (size > req->rl_size) { 472 dprintk("RPC: %s: size %zd too large for buffer[%zd]: " 473 "prog %d vers %d proc %d\n", 474 __func__, size, req->rl_size, 475 task->tk_client->cl_prog, task->tk_client->cl_vers, 476 task->tk_msg.rpc_proc->p_proc); 477 /* 478 * Outgoing length shortage. Our inline write max must have 479 * been configured to perform direct i/o. 480 * 481 * This is therefore a large metadata operation, and the 482 * allocate call was made on the maximum possible message, 483 * e.g. containing long filename(s) or symlink data. In 484 * fact, while these metadata operations *might* carry 485 * large outgoing payloads, they rarely *do*. However, we 486 * have to commit to the request here, so reallocate and 487 * register it now. The data path will never require this 488 * reallocation. 489 * 490 * If the allocation or registration fails, the RPC framework 491 * will (doggedly) retry. 492 */ 493 if (task->tk_flags & RPC_TASK_SWAPPER) 494 nreq = kmalloc(sizeof *req + size, GFP_ATOMIC); 495 else 496 nreq = kmalloc(sizeof *req + size, GFP_NOFS); 497 if (nreq == NULL) 498 goto outfail; 499 500 if (rpcrdma_register_internal(&rpcx_to_rdmax(xprt)->rx_ia, 501 nreq->rl_base, size + sizeof(struct rpcrdma_req) 502 - offsetof(struct rpcrdma_req, rl_base), 503 &nreq->rl_handle, &nreq->rl_iov)) { 504 kfree(nreq); 505 goto outfail; 506 } 507 rpcx_to_rdmax(xprt)->rx_stats.hardway_register_count += size; 508 nreq->rl_size = size; 509 nreq->rl_niovs = 0; 510 nreq->rl_nchunks = 0; 511 nreq->rl_buffer = (struct rpcrdma_buffer *)req; 512 nreq->rl_reply = req->rl_reply; 513 memcpy(nreq->rl_segments, 514 req->rl_segments, sizeof nreq->rl_segments); 515 /* flag the swap with an unused field */ 516 nreq->rl_iov.length = 0; 517 req->rl_reply = NULL; 518 req = nreq; 519 } 520 dprintk("RPC: %s: size %zd, request 0x%p\n", __func__, size, req); 521 req->rl_connect_cookie = 0; /* our reserved value */ 522 return req->rl_xdr_buf; 523 524 outfail: 525 rpcrdma_buffer_put(req); 526 rpcx_to_rdmax(xprt)->rx_stats.failed_marshal_count++; 527 return NULL; 528 } 529 530 /* 531 * This function returns all RDMA resources to the pool. 532 */ 533 static void 534 xprt_rdma_free(void *buffer) 535 { 536 struct rpcrdma_req *req; 537 struct rpcrdma_xprt *r_xprt; 538 struct rpcrdma_rep *rep; 539 int i; 540 541 if (buffer == NULL) 542 return; 543 544 req = container_of(buffer, struct rpcrdma_req, rl_xdr_buf[0]); 545 if (req->rl_iov.length == 0) { /* see allocate above */ 546 r_xprt = container_of(((struct rpcrdma_req *) req->rl_buffer)->rl_buffer, 547 struct rpcrdma_xprt, rx_buf); 548 } else 549 r_xprt = container_of(req->rl_buffer, struct rpcrdma_xprt, rx_buf); 550 rep = req->rl_reply; 551 552 dprintk("RPC: %s: called on 0x%p%s\n", 553 __func__, rep, (rep && rep->rr_func) ? " (with waiter)" : ""); 554 555 /* 556 * Finish the deregistration. The process is considered 557 * complete when the rr_func vector becomes NULL - this 558 * was put in place during rpcrdma_reply_handler() - the wait 559 * call below will not block if the dereg is "done". If 560 * interrupted, our framework will clean up. 561 */ 562 for (i = 0; req->rl_nchunks;) { 563 --req->rl_nchunks; 564 i += rpcrdma_deregister_external( 565 &req->rl_segments[i], r_xprt); 566 } 567 568 if (req->rl_iov.length == 0) { /* see allocate above */ 569 struct rpcrdma_req *oreq = (struct rpcrdma_req *)req->rl_buffer; 570 oreq->rl_reply = req->rl_reply; 571 (void) rpcrdma_deregister_internal(&r_xprt->rx_ia, 572 req->rl_handle, 573 &req->rl_iov); 574 kfree(req); 575 req = oreq; 576 } 577 578 /* Put back request+reply buffers */ 579 rpcrdma_buffer_put(req); 580 } 581 582 /* 583 * send_request invokes the meat of RPC RDMA. It must do the following: 584 * 1. Marshal the RPC request into an RPC RDMA request, which means 585 * putting a header in front of data, and creating IOVs for RDMA 586 * from those in the request. 587 * 2. In marshaling, detect opportunities for RDMA, and use them. 588 * 3. Post a recv message to set up asynch completion, then send 589 * the request (rpcrdma_ep_post). 590 * 4. No partial sends are possible in the RPC-RDMA protocol (as in UDP). 591 */ 592 593 static int 594 xprt_rdma_send_request(struct rpc_task *task) 595 { 596 struct rpc_rqst *rqst = task->tk_rqstp; 597 struct rpc_xprt *xprt = rqst->rq_xprt; 598 struct rpcrdma_req *req = rpcr_to_rdmar(rqst); 599 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 600 int rc = 0; 601 602 if (req->rl_niovs == 0) 603 rc = rpcrdma_marshal_req(rqst); 604 else if (r_xprt->rx_ia.ri_memreg_strategy == RPCRDMA_FRMR) 605 rc = rpcrdma_marshal_chunks(rqst, 0); 606 if (rc < 0) 607 goto failed_marshal; 608 609 if (req->rl_reply == NULL) /* e.g. reconnection */ 610 rpcrdma_recv_buffer_get(req); 611 612 if (req->rl_reply) { 613 req->rl_reply->rr_func = rpcrdma_reply_handler; 614 /* this need only be done once, but... */ 615 req->rl_reply->rr_xprt = xprt; 616 } 617 618 /* Must suppress retransmit to maintain credits */ 619 if (req->rl_connect_cookie == xprt->connect_cookie) 620 goto drop_connection; 621 req->rl_connect_cookie = xprt->connect_cookie; 622 623 if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req)) 624 goto drop_connection; 625 626 rqst->rq_xmit_bytes_sent += rqst->rq_snd_buf.len; 627 rqst->rq_bytes_sent = 0; 628 return 0; 629 630 failed_marshal: 631 r_xprt->rx_stats.failed_marshal_count++; 632 dprintk("RPC: %s: rpcrdma_marshal_req failed, status %i\n", 633 __func__, rc); 634 if (rc == -EIO) 635 return -EIO; 636 drop_connection: 637 xprt_disconnect_done(xprt); 638 return -ENOTCONN; /* implies disconnect */ 639 } 640 641 static void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) 642 { 643 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 644 long idle_time = 0; 645 646 if (xprt_connected(xprt)) 647 idle_time = (long)(jiffies - xprt->last_used) / HZ; 648 649 seq_printf(seq, 650 "\txprt:\trdma %u %lu %lu %lu %ld %lu %lu %lu %Lu %Lu " 651 "%lu %lu %lu %Lu %Lu %Lu %Lu %lu %lu %lu\n", 652 653 0, /* need a local port? */ 654 xprt->stat.bind_count, 655 xprt->stat.connect_count, 656 xprt->stat.connect_time, 657 idle_time, 658 xprt->stat.sends, 659 xprt->stat.recvs, 660 xprt->stat.bad_xids, 661 xprt->stat.req_u, 662 xprt->stat.bklog_u, 663 664 r_xprt->rx_stats.read_chunk_count, 665 r_xprt->rx_stats.write_chunk_count, 666 r_xprt->rx_stats.reply_chunk_count, 667 r_xprt->rx_stats.total_rdma_request, 668 r_xprt->rx_stats.total_rdma_reply, 669 r_xprt->rx_stats.pullup_copy_count, 670 r_xprt->rx_stats.fixup_copy_count, 671 r_xprt->rx_stats.hardway_register_count, 672 r_xprt->rx_stats.failed_marshal_count, 673 r_xprt->rx_stats.bad_reply_count); 674 } 675 676 /* 677 * Plumbing for rpc transport switch and kernel module 678 */ 679 680 static struct rpc_xprt_ops xprt_rdma_procs = { 681 .reserve_xprt = xprt_reserve_xprt_cong, 682 .release_xprt = xprt_release_xprt_cong, /* sunrpc/xprt.c */ 683 .alloc_slot = xprt_alloc_slot, 684 .release_request = xprt_release_rqst_cong, /* ditto */ 685 .set_retrans_timeout = xprt_set_retrans_timeout_def, /* ditto */ 686 .rpcbind = rpcb_getport_async, /* sunrpc/rpcb_clnt.c */ 687 .set_port = xprt_rdma_set_port, 688 .connect = xprt_rdma_connect, 689 .buf_alloc = xprt_rdma_allocate, 690 .buf_free = xprt_rdma_free, 691 .send_request = xprt_rdma_send_request, 692 .close = xprt_rdma_close, 693 .destroy = xprt_rdma_destroy, 694 .print_stats = xprt_rdma_print_stats 695 }; 696 697 static struct xprt_class xprt_rdma = { 698 .list = LIST_HEAD_INIT(xprt_rdma.list), 699 .name = "rdma", 700 .owner = THIS_MODULE, 701 .ident = XPRT_TRANSPORT_RDMA, 702 .setup = xprt_setup_rdma, 703 }; 704 705 static void __exit xprt_rdma_cleanup(void) 706 { 707 int rc; 708 709 dprintk("RPCRDMA Module Removed, deregister RPC RDMA transport\n"); 710 #ifdef RPC_DEBUG 711 if (sunrpc_table_header) { 712 unregister_sysctl_table(sunrpc_table_header); 713 sunrpc_table_header = NULL; 714 } 715 #endif 716 rc = xprt_unregister_transport(&xprt_rdma); 717 if (rc) 718 dprintk("RPC: %s: xprt_unregister returned %i\n", 719 __func__, rc); 720 } 721 722 static int __init xprt_rdma_init(void) 723 { 724 int rc; 725 726 rc = xprt_register_transport(&xprt_rdma); 727 728 if (rc) 729 return rc; 730 731 dprintk("RPCRDMA Module Init, register RPC RDMA transport\n"); 732 733 dprintk("Defaults:\n"); 734 dprintk("\tSlots %d\n" 735 "\tMaxInlineRead %d\n\tMaxInlineWrite %d\n", 736 xprt_rdma_slot_table_entries, 737 xprt_rdma_max_inline_read, xprt_rdma_max_inline_write); 738 dprintk("\tPadding %d\n\tMemreg %d\n", 739 xprt_rdma_inline_write_padding, xprt_rdma_memreg_strategy); 740 741 #ifdef RPC_DEBUG 742 if (!sunrpc_table_header) 743 sunrpc_table_header = register_sysctl_table(sunrpc_table); 744 #endif 745 return 0; 746 } 747 748 module_init(xprt_rdma_init); 749 module_exit(xprt_rdma_cleanup); 750