1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (c) 2016-2018 Oracle. All rights reserved. 4 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved. 5 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the BSD-type 11 * license below: 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 17 * Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 20 * Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials provided 23 * with the distribution. 24 * 25 * Neither the name of the Network Appliance, Inc. nor the names of 26 * its contributors may be used to endorse or promote products 27 * derived from this software without specific prior written 28 * permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 34 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 35 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 36 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 41 * 42 * Author: Tom Tucker <tom@opengridcomputing.com> 43 */ 44 45 /* Operation 46 * 47 * The main entry point is svc_rdma_sendto. This is called by the 48 * RPC server when an RPC Reply is ready to be transmitted to a client. 49 * 50 * The passed-in svc_rqst contains a struct xdr_buf which holds an 51 * XDR-encoded RPC Reply message. sendto must construct the RPC-over-RDMA 52 * transport header, post all Write WRs needed for this Reply, then post 53 * a Send WR conveying the transport header and the RPC message itself to 54 * the client. 55 * 56 * svc_rdma_sendto must fully transmit the Reply before returning, as 57 * the svc_rqst will be recycled as soon as sendto returns. Remaining 58 * resources referred to by the svc_rqst are also recycled at that time. 59 * Therefore any resources that must remain longer must be detached 60 * from the svc_rqst and released later. 61 * 62 * Page Management 63 * 64 * The I/O that performs Reply transmission is asynchronous, and may 65 * complete well after sendto returns. Thus pages under I/O must be 66 * removed from the svc_rqst before sendto returns. 67 * 68 * The logic here depends on Send Queue and completion ordering. Since 69 * the Send WR is always posted last, it will always complete last. Thus 70 * when it completes, it is guaranteed that all previous Write WRs have 71 * also completed. 72 * 73 * Write WRs are constructed and posted. Each Write segment gets its own 74 * svc_rdma_rw_ctxt, allowing the Write completion handler to find and 75 * DMA-unmap the pages under I/O for that Write segment. The Write 76 * completion handler does not release any pages. 77 * 78 * When the Send WR is constructed, it also gets its own svc_rdma_send_ctxt. 79 * The ownership of all of the Reply's pages are transferred into that 80 * ctxt, the Send WR is posted, and sendto returns. 81 * 82 * The svc_rdma_send_ctxt is presented when the Send WR completes. The 83 * Send completion handler finally releases the Reply's pages. 84 * 85 * This mechanism also assumes that completions on the transport's Send 86 * Completion Queue do not run in parallel. Otherwise a Write completion 87 * and Send completion running at the same time could release pages that 88 * are still DMA-mapped. 89 * 90 * Error Handling 91 * 92 * - If the Send WR is posted successfully, it will either complete 93 * successfully, or get flushed. Either way, the Send completion 94 * handler releases the Reply's pages. 95 * - If the Send WR cannot be not posted, the forward path releases 96 * the Reply's pages. 97 * 98 * This handles the case, without the use of page reference counting, 99 * where two different Write segments send portions of the same page. 100 */ 101 102 #include <linux/spinlock.h> 103 #include <asm/unaligned.h> 104 105 #include <rdma/ib_verbs.h> 106 #include <rdma/rdma_cm.h> 107 108 #include <linux/sunrpc/debug.h> 109 #include <linux/sunrpc/rpc_rdma.h> 110 #include <linux/sunrpc/svc_rdma.h> 111 112 #include "xprt_rdma.h" 113 #include <trace/events/rpcrdma.h> 114 115 #define RPCDBG_FACILITY RPCDBG_SVCXPRT 116 117 static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc); 118 119 static inline struct svc_rdma_send_ctxt * 120 svc_rdma_next_send_ctxt(struct list_head *list) 121 { 122 return list_first_entry_or_null(list, struct svc_rdma_send_ctxt, 123 sc_list); 124 } 125 126 static struct svc_rdma_send_ctxt * 127 svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma) 128 { 129 struct svc_rdma_send_ctxt *ctxt; 130 dma_addr_t addr; 131 void *buffer; 132 size_t size; 133 int i; 134 135 size = sizeof(*ctxt); 136 size += rdma->sc_max_send_sges * sizeof(struct ib_sge); 137 ctxt = kmalloc(size, GFP_KERNEL); 138 if (!ctxt) 139 goto fail0; 140 buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL); 141 if (!buffer) 142 goto fail1; 143 addr = ib_dma_map_single(rdma->sc_pd->device, buffer, 144 rdma->sc_max_req_size, DMA_TO_DEVICE); 145 if (ib_dma_mapping_error(rdma->sc_pd->device, addr)) 146 goto fail2; 147 148 ctxt->sc_send_wr.next = NULL; 149 ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe; 150 ctxt->sc_send_wr.sg_list = ctxt->sc_sges; 151 ctxt->sc_send_wr.send_flags = IB_SEND_SIGNALED; 152 ctxt->sc_cqe.done = svc_rdma_wc_send; 153 ctxt->sc_xprt_buf = buffer; 154 ctxt->sc_sges[0].addr = addr; 155 156 for (i = 0; i < rdma->sc_max_send_sges; i++) 157 ctxt->sc_sges[i].lkey = rdma->sc_pd->local_dma_lkey; 158 return ctxt; 159 160 fail2: 161 kfree(buffer); 162 fail1: 163 kfree(ctxt); 164 fail0: 165 return NULL; 166 } 167 168 /** 169 * svc_rdma_send_ctxts_destroy - Release all send_ctxt's for an xprt 170 * @rdma: svcxprt_rdma being torn down 171 * 172 */ 173 void svc_rdma_send_ctxts_destroy(struct svcxprt_rdma *rdma) 174 { 175 struct svc_rdma_send_ctxt *ctxt; 176 177 while ((ctxt = svc_rdma_next_send_ctxt(&rdma->sc_send_ctxts))) { 178 list_del(&ctxt->sc_list); 179 ib_dma_unmap_single(rdma->sc_pd->device, 180 ctxt->sc_sges[0].addr, 181 rdma->sc_max_req_size, 182 DMA_TO_DEVICE); 183 kfree(ctxt->sc_xprt_buf); 184 kfree(ctxt); 185 } 186 } 187 188 /** 189 * svc_rdma_send_ctxt_get - Get a free send_ctxt 190 * @rdma: controlling svcxprt_rdma 191 * 192 * Returns a ready-to-use send_ctxt, or NULL if none are 193 * available and a fresh one cannot be allocated. 194 */ 195 struct svc_rdma_send_ctxt *svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma) 196 { 197 struct svc_rdma_send_ctxt *ctxt; 198 199 spin_lock(&rdma->sc_send_lock); 200 ctxt = svc_rdma_next_send_ctxt(&rdma->sc_send_ctxts); 201 if (!ctxt) 202 goto out_empty; 203 list_del(&ctxt->sc_list); 204 spin_unlock(&rdma->sc_send_lock); 205 206 out: 207 ctxt->sc_send_wr.num_sge = 0; 208 ctxt->sc_cur_sge_no = 0; 209 ctxt->sc_page_count = 0; 210 return ctxt; 211 212 out_empty: 213 spin_unlock(&rdma->sc_send_lock); 214 ctxt = svc_rdma_send_ctxt_alloc(rdma); 215 if (!ctxt) 216 return NULL; 217 goto out; 218 } 219 220 /** 221 * svc_rdma_send_ctxt_put - Return send_ctxt to free list 222 * @rdma: controlling svcxprt_rdma 223 * @ctxt: object to return to the free list 224 * 225 * Pages left in sc_pages are DMA unmapped and released. 226 */ 227 void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma, 228 struct svc_rdma_send_ctxt *ctxt) 229 { 230 struct ib_device *device = rdma->sc_cm_id->device; 231 unsigned int i; 232 233 /* The first SGE contains the transport header, which 234 * remains mapped until @ctxt is destroyed. 235 */ 236 for (i = 1; i < ctxt->sc_send_wr.num_sge; i++) 237 ib_dma_unmap_page(device, 238 ctxt->sc_sges[i].addr, 239 ctxt->sc_sges[i].length, 240 DMA_TO_DEVICE); 241 242 for (i = 0; i < ctxt->sc_page_count; ++i) 243 put_page(ctxt->sc_pages[i]); 244 245 spin_lock(&rdma->sc_send_lock); 246 list_add(&ctxt->sc_list, &rdma->sc_send_ctxts); 247 spin_unlock(&rdma->sc_send_lock); 248 } 249 250 /** 251 * svc_rdma_wc_send - Invoked by RDMA provider for each polled Send WC 252 * @cq: Completion Queue context 253 * @wc: Work Completion object 254 * 255 * NB: The svc_xprt/svcxprt_rdma is pinned whenever it's possible that 256 * the Send completion handler could be running. 257 */ 258 static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc) 259 { 260 struct svcxprt_rdma *rdma = cq->cq_context; 261 struct ib_cqe *cqe = wc->wr_cqe; 262 struct svc_rdma_send_ctxt *ctxt; 263 264 trace_svcrdma_wc_send(wc); 265 266 atomic_inc(&rdma->sc_sq_avail); 267 wake_up(&rdma->sc_send_wait); 268 269 ctxt = container_of(cqe, struct svc_rdma_send_ctxt, sc_cqe); 270 svc_rdma_send_ctxt_put(rdma, ctxt); 271 272 if (unlikely(wc->status != IB_WC_SUCCESS)) { 273 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); 274 svc_xprt_enqueue(&rdma->sc_xprt); 275 if (wc->status != IB_WC_WR_FLUSH_ERR) 276 pr_err("svcrdma: Send: %s (%u/0x%x)\n", 277 ib_wc_status_msg(wc->status), 278 wc->status, wc->vendor_err); 279 } 280 281 svc_xprt_put(&rdma->sc_xprt); 282 } 283 284 /** 285 * svc_rdma_send - Post a single Send WR 286 * @rdma: transport on which to post the WR 287 * @wr: prepared Send WR to post 288 * 289 * Returns zero the Send WR was posted successfully. Otherwise, a 290 * negative errno is returned. 291 */ 292 int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr) 293 { 294 struct ib_send_wr *bad_wr; 295 int ret; 296 297 might_sleep(); 298 299 /* If the SQ is full, wait until an SQ entry is available */ 300 while (1) { 301 if ((atomic_dec_return(&rdma->sc_sq_avail) < 0)) { 302 atomic_inc(&rdma_stat_sq_starve); 303 trace_svcrdma_sq_full(rdma); 304 atomic_inc(&rdma->sc_sq_avail); 305 wait_event(rdma->sc_send_wait, 306 atomic_read(&rdma->sc_sq_avail) > 1); 307 if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags)) 308 return -ENOTCONN; 309 trace_svcrdma_sq_retry(rdma); 310 continue; 311 } 312 313 svc_xprt_get(&rdma->sc_xprt); 314 ret = ib_post_send(rdma->sc_qp, wr, &bad_wr); 315 trace_svcrdma_post_send(wr, ret); 316 if (ret) { 317 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); 318 svc_xprt_put(&rdma->sc_xprt); 319 wake_up(&rdma->sc_send_wait); 320 } 321 break; 322 } 323 return ret; 324 } 325 326 static u32 xdr_padsize(u32 len) 327 { 328 return (len & 3) ? (4 - (len & 3)) : 0; 329 } 330 331 /* Returns length of transport header, in bytes. 332 */ 333 static unsigned int svc_rdma_reply_hdr_len(__be32 *rdma_resp) 334 { 335 unsigned int nsegs; 336 __be32 *p; 337 338 p = rdma_resp; 339 340 /* RPC-over-RDMA V1 replies never have a Read list. */ 341 p += rpcrdma_fixed_maxsz + 1; 342 343 /* Skip Write list. */ 344 while (*p++ != xdr_zero) { 345 nsegs = be32_to_cpup(p++); 346 p += nsegs * rpcrdma_segment_maxsz; 347 } 348 349 /* Skip Reply chunk. */ 350 if (*p++ != xdr_zero) { 351 nsegs = be32_to_cpup(p++); 352 p += nsegs * rpcrdma_segment_maxsz; 353 } 354 355 return (unsigned long)p - (unsigned long)rdma_resp; 356 } 357 358 /* One Write chunk is copied from Call transport header to Reply 359 * transport header. Each segment's length field is updated to 360 * reflect number of bytes consumed in the segment. 361 * 362 * Returns number of segments in this chunk. 363 */ 364 static unsigned int xdr_encode_write_chunk(__be32 *dst, __be32 *src, 365 unsigned int remaining) 366 { 367 unsigned int i, nsegs; 368 u32 seg_len; 369 370 /* Write list discriminator */ 371 *dst++ = *src++; 372 373 /* number of segments in this chunk */ 374 nsegs = be32_to_cpup(src); 375 *dst++ = *src++; 376 377 for (i = nsegs; i; i--) { 378 /* segment's RDMA handle */ 379 *dst++ = *src++; 380 381 /* bytes returned in this segment */ 382 seg_len = be32_to_cpu(*src); 383 if (remaining >= seg_len) { 384 /* entire segment was consumed */ 385 *dst = *src; 386 remaining -= seg_len; 387 } else { 388 /* segment only partly filled */ 389 *dst = cpu_to_be32(remaining); 390 remaining = 0; 391 } 392 dst++; src++; 393 394 /* segment's RDMA offset */ 395 *dst++ = *src++; 396 *dst++ = *src++; 397 } 398 399 return nsegs; 400 } 401 402 /* The client provided a Write list in the Call message. Fill in 403 * the segments in the first Write chunk in the Reply's transport 404 * header with the number of bytes consumed in each segment. 405 * Remaining chunks are returned unused. 406 * 407 * Assumptions: 408 * - Client has provided only one Write chunk 409 */ 410 static void svc_rdma_xdr_encode_write_list(__be32 *rdma_resp, __be32 *wr_ch, 411 unsigned int consumed) 412 { 413 unsigned int nsegs; 414 __be32 *p, *q; 415 416 /* RPC-over-RDMA V1 replies never have a Read list. */ 417 p = rdma_resp + rpcrdma_fixed_maxsz + 1; 418 419 q = wr_ch; 420 while (*q != xdr_zero) { 421 nsegs = xdr_encode_write_chunk(p, q, consumed); 422 q += 2 + nsegs * rpcrdma_segment_maxsz; 423 p += 2 + nsegs * rpcrdma_segment_maxsz; 424 consumed = 0; 425 } 426 427 /* Terminate Write list */ 428 *p++ = xdr_zero; 429 430 /* Reply chunk discriminator; may be replaced later */ 431 *p = xdr_zero; 432 } 433 434 /* The client provided a Reply chunk in the Call message. Fill in 435 * the segments in the Reply chunk in the Reply message with the 436 * number of bytes consumed in each segment. 437 * 438 * Assumptions: 439 * - Reply can always fit in the provided Reply chunk 440 */ 441 static void svc_rdma_xdr_encode_reply_chunk(__be32 *rdma_resp, __be32 *rp_ch, 442 unsigned int consumed) 443 { 444 __be32 *p; 445 446 /* Find the Reply chunk in the Reply's xprt header. 447 * RPC-over-RDMA V1 replies never have a Read list. 448 */ 449 p = rdma_resp + rpcrdma_fixed_maxsz + 1; 450 451 /* Skip past Write list */ 452 while (*p++ != xdr_zero) 453 p += 1 + be32_to_cpup(p) * rpcrdma_segment_maxsz; 454 455 xdr_encode_write_chunk(p, rp_ch, consumed); 456 } 457 458 /* Parse the RPC Call's transport header. 459 */ 460 static void svc_rdma_get_write_arrays(__be32 *rdma_argp, 461 __be32 **write, __be32 **reply) 462 { 463 __be32 *p; 464 465 p = rdma_argp + rpcrdma_fixed_maxsz; 466 467 /* Read list */ 468 while (*p++ != xdr_zero) 469 p += 5; 470 471 /* Write list */ 472 if (*p != xdr_zero) { 473 *write = p; 474 while (*p++ != xdr_zero) 475 p += 1 + be32_to_cpu(*p) * 4; 476 } else { 477 *write = NULL; 478 p++; 479 } 480 481 /* Reply chunk */ 482 if (*p != xdr_zero) 483 *reply = p; 484 else 485 *reply = NULL; 486 } 487 488 /* RPC-over-RDMA Version One private extension: Remote Invalidation. 489 * Responder's choice: requester signals it can handle Send With 490 * Invalidate, and responder chooses one rkey to invalidate. 491 * 492 * Find a candidate rkey to invalidate when sending a reply. Picks the 493 * first R_key it finds in the chunk lists. 494 * 495 * Returns zero if RPC's chunk lists are empty. 496 */ 497 static u32 svc_rdma_get_inv_rkey(__be32 *rdma_argp, 498 __be32 *wr_lst, __be32 *rp_ch) 499 { 500 __be32 *p; 501 502 p = rdma_argp + rpcrdma_fixed_maxsz; 503 if (*p != xdr_zero) 504 p += 2; 505 else if (wr_lst && be32_to_cpup(wr_lst + 1)) 506 p = wr_lst + 2; 507 else if (rp_ch && be32_to_cpup(rp_ch + 1)) 508 p = rp_ch + 2; 509 else 510 return 0; 511 return be32_to_cpup(p); 512 } 513 514 static int svc_rdma_dma_map_page(struct svcxprt_rdma *rdma, 515 struct svc_rdma_send_ctxt *ctxt, 516 struct page *page, 517 unsigned long offset, 518 unsigned int len) 519 { 520 struct ib_device *dev = rdma->sc_cm_id->device; 521 dma_addr_t dma_addr; 522 523 dma_addr = ib_dma_map_page(dev, page, offset, len, DMA_TO_DEVICE); 524 if (ib_dma_mapping_error(dev, dma_addr)) 525 goto out_maperr; 526 527 ctxt->sc_sges[ctxt->sc_cur_sge_no].addr = dma_addr; 528 ctxt->sc_sges[ctxt->sc_cur_sge_no].length = len; 529 ctxt->sc_send_wr.num_sge++; 530 return 0; 531 532 out_maperr: 533 trace_svcrdma_dma_map_page(rdma, page); 534 return -EIO; 535 } 536 537 /* ib_dma_map_page() is used here because svc_rdma_dma_unmap() 538 * handles DMA-unmap and it uses ib_dma_unmap_page() exclusively. 539 */ 540 static int svc_rdma_dma_map_buf(struct svcxprt_rdma *rdma, 541 struct svc_rdma_send_ctxt *ctxt, 542 unsigned char *base, 543 unsigned int len) 544 { 545 return svc_rdma_dma_map_page(rdma, ctxt, virt_to_page(base), 546 offset_in_page(base), len); 547 } 548 549 /** 550 * svc_rdma_sync_reply_hdr - DMA sync the transport header buffer 551 * @rdma: controlling transport 552 * @ctxt: send_ctxt for the Send WR 553 * @len: length of transport header 554 * 555 */ 556 void svc_rdma_sync_reply_hdr(struct svcxprt_rdma *rdma, 557 struct svc_rdma_send_ctxt *ctxt, 558 unsigned int len) 559 { 560 ctxt->sc_sges[0].length = len; 561 ctxt->sc_send_wr.num_sge++; 562 ib_dma_sync_single_for_device(rdma->sc_pd->device, 563 ctxt->sc_sges[0].addr, len, 564 DMA_TO_DEVICE); 565 } 566 567 /* svc_rdma_map_reply_msg - Map the buffer holding RPC message 568 * @rdma: controlling transport 569 * @ctxt: send_ctxt for the Send WR 570 * @xdr: prepared xdr_buf containing RPC message 571 * @wr_lst: pointer to Call header's Write list, or NULL 572 * 573 * Load the xdr_buf into the ctxt's sge array, and DMA map each 574 * element as it is added. 575 * 576 * Returns zero on success, or a negative errno on failure. 577 */ 578 int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, 579 struct svc_rdma_send_ctxt *ctxt, 580 struct xdr_buf *xdr, __be32 *wr_lst) 581 { 582 unsigned int len, remaining; 583 unsigned long page_off; 584 struct page **ppages; 585 unsigned char *base; 586 u32 xdr_pad; 587 int ret; 588 589 if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges) 590 return -EIO; 591 ret = svc_rdma_dma_map_buf(rdma, ctxt, 592 xdr->head[0].iov_base, 593 xdr->head[0].iov_len); 594 if (ret < 0) 595 return ret; 596 597 /* If a Write chunk is present, the xdr_buf's page list 598 * is not included inline. However the Upper Layer may 599 * have added XDR padding in the tail buffer, and that 600 * should not be included inline. 601 */ 602 if (wr_lst) { 603 base = xdr->tail[0].iov_base; 604 len = xdr->tail[0].iov_len; 605 xdr_pad = xdr_padsize(xdr->page_len); 606 607 if (len && xdr_pad) { 608 base += xdr_pad; 609 len -= xdr_pad; 610 } 611 612 goto tail; 613 } 614 615 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT); 616 page_off = xdr->page_base & ~PAGE_MASK; 617 remaining = xdr->page_len; 618 while (remaining) { 619 len = min_t(u32, PAGE_SIZE - page_off, remaining); 620 621 if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges) 622 return -EIO; 623 ret = svc_rdma_dma_map_page(rdma, ctxt, *ppages++, 624 page_off, len); 625 if (ret < 0) 626 return ret; 627 628 remaining -= len; 629 page_off = 0; 630 } 631 632 base = xdr->tail[0].iov_base; 633 len = xdr->tail[0].iov_len; 634 tail: 635 if (len) { 636 if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges) 637 return -EIO; 638 ret = svc_rdma_dma_map_buf(rdma, ctxt, base, len); 639 if (ret < 0) 640 return ret; 641 } 642 643 return 0; 644 } 645 646 /* The svc_rqst and all resources it owns are released as soon as 647 * svc_rdma_sendto returns. Transfer pages under I/O to the ctxt 648 * so they are released by the Send completion handler. 649 */ 650 static void svc_rdma_save_io_pages(struct svc_rqst *rqstp, 651 struct svc_rdma_send_ctxt *ctxt) 652 { 653 int i, pages = rqstp->rq_next_page - rqstp->rq_respages; 654 655 ctxt->sc_page_count += pages; 656 for (i = 0; i < pages; i++) { 657 ctxt->sc_pages[i] = rqstp->rq_respages[i]; 658 rqstp->rq_respages[i] = NULL; 659 } 660 rqstp->rq_next_page = rqstp->rq_respages + 1; 661 } 662 663 /* Prepare the portion of the RPC Reply that will be transmitted 664 * via RDMA Send. The RPC-over-RDMA transport header is prepared 665 * in sc_sges[0], and the RPC xdr_buf is prepared in following sges. 666 * 667 * Depending on whether a Write list or Reply chunk is present, 668 * the server may send all, a portion of, or none of the xdr_buf. 669 * In the latter case, only the transport header (sc_sges[0]) is 670 * transmitted. 671 * 672 * RDMA Send is the last step of transmitting an RPC reply. Pages 673 * involved in the earlier RDMA Writes are here transferred out 674 * of the rqstp and into the ctxt's page array. These pages are 675 * DMA unmapped by each Write completion, but the subsequent Send 676 * completion finally releases these pages. 677 * 678 * Assumptions: 679 * - The Reply's transport header will never be larger than a page. 680 */ 681 static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma, 682 struct svc_rdma_send_ctxt *ctxt, 683 __be32 *rdma_argp, 684 struct svc_rqst *rqstp, 685 __be32 *wr_lst, __be32 *rp_ch) 686 { 687 int ret; 688 689 if (!rp_ch) { 690 ret = svc_rdma_map_reply_msg(rdma, ctxt, 691 &rqstp->rq_res, wr_lst); 692 if (ret < 0) 693 return ret; 694 } 695 696 svc_rdma_save_io_pages(rqstp, ctxt); 697 698 ctxt->sc_send_wr.opcode = IB_WR_SEND; 699 if (rdma->sc_snd_w_inv) { 700 ctxt->sc_send_wr.ex.invalidate_rkey = 701 svc_rdma_get_inv_rkey(rdma_argp, wr_lst, rp_ch); 702 if (ctxt->sc_send_wr.ex.invalidate_rkey) 703 ctxt->sc_send_wr.opcode = IB_WR_SEND_WITH_INV; 704 } 705 dprintk("svcrdma: posting Send WR with %u sge(s)\n", 706 ctxt->sc_send_wr.num_sge); 707 return svc_rdma_send(rdma, &ctxt->sc_send_wr); 708 } 709 710 /* Given the client-provided Write and Reply chunks, the server was not 711 * able to form a complete reply. Return an RDMA_ERROR message so the 712 * client can retire this RPC transaction. As above, the Send completion 713 * routine releases payload pages that were part of a previous RDMA Write. 714 * 715 * Remote Invalidation is skipped for simplicity. 716 */ 717 static int svc_rdma_send_error_msg(struct svcxprt_rdma *rdma, 718 struct svc_rdma_send_ctxt *ctxt, 719 struct svc_rqst *rqstp) 720 { 721 __be32 *p; 722 int ret; 723 724 p = ctxt->sc_xprt_buf; 725 trace_svcrdma_err_chunk(*p); 726 p += 3; 727 *p++ = rdma_error; 728 *p = err_chunk; 729 svc_rdma_sync_reply_hdr(rdma, ctxt, RPCRDMA_HDRLEN_ERR); 730 731 svc_rdma_save_io_pages(rqstp, ctxt); 732 733 ctxt->sc_send_wr.opcode = IB_WR_SEND; 734 ret = svc_rdma_send(rdma, &ctxt->sc_send_wr); 735 if (ret) { 736 svc_rdma_send_ctxt_put(rdma, ctxt); 737 return ret; 738 } 739 740 return 0; 741 } 742 743 void svc_rdma_prep_reply_hdr(struct svc_rqst *rqstp) 744 { 745 } 746 747 /** 748 * svc_rdma_sendto - Transmit an RPC reply 749 * @rqstp: processed RPC request, reply XDR already in ::rq_res 750 * 751 * Any resources still associated with @rqstp are released upon return. 752 * If no reply message was possible, the connection is closed. 753 * 754 * Returns: 755 * %0 if an RPC reply has been successfully posted, 756 * %-ENOMEM if a resource shortage occurred (connection is lost), 757 * %-ENOTCONN if posting failed (connection is lost). 758 */ 759 int svc_rdma_sendto(struct svc_rqst *rqstp) 760 { 761 struct svc_xprt *xprt = rqstp->rq_xprt; 762 struct svcxprt_rdma *rdma = 763 container_of(xprt, struct svcxprt_rdma, sc_xprt); 764 struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt; 765 __be32 *p, *rdma_argp, *rdma_resp, *wr_lst, *rp_ch; 766 struct xdr_buf *xdr = &rqstp->rq_res; 767 struct svc_rdma_send_ctxt *sctxt; 768 int ret; 769 770 rdma_argp = rctxt->rc_recv_buf; 771 svc_rdma_get_write_arrays(rdma_argp, &wr_lst, &rp_ch); 772 773 /* Create the RDMA response header. xprt->xpt_mutex, 774 * acquired in svc_send(), serializes RPC replies. The 775 * code path below that inserts the credit grant value 776 * into each transport header runs only inside this 777 * critical section. 778 */ 779 ret = -ENOMEM; 780 sctxt = svc_rdma_send_ctxt_get(rdma); 781 if (!sctxt) 782 goto err0; 783 rdma_resp = sctxt->sc_xprt_buf; 784 785 p = rdma_resp; 786 *p++ = *rdma_argp; 787 *p++ = *(rdma_argp + 1); 788 *p++ = rdma->sc_fc_credits; 789 *p++ = rp_ch ? rdma_nomsg : rdma_msg; 790 791 /* Start with empty chunks */ 792 *p++ = xdr_zero; 793 *p++ = xdr_zero; 794 *p = xdr_zero; 795 796 if (wr_lst) { 797 /* XXX: Presume the client sent only one Write chunk */ 798 ret = svc_rdma_send_write_chunk(rdma, wr_lst, xdr); 799 if (ret < 0) 800 goto err2; 801 svc_rdma_xdr_encode_write_list(rdma_resp, wr_lst, ret); 802 } 803 if (rp_ch) { 804 ret = svc_rdma_send_reply_chunk(rdma, rp_ch, wr_lst, xdr); 805 if (ret < 0) 806 goto err2; 807 svc_rdma_xdr_encode_reply_chunk(rdma_resp, rp_ch, ret); 808 } 809 810 svc_rdma_sync_reply_hdr(rdma, sctxt, svc_rdma_reply_hdr_len(rdma_resp)); 811 ret = svc_rdma_send_reply_msg(rdma, sctxt, rdma_argp, rqstp, 812 wr_lst, rp_ch); 813 if (ret < 0) 814 goto err1; 815 ret = 0; 816 817 out: 818 rqstp->rq_xprt_ctxt = NULL; 819 svc_rdma_recv_ctxt_put(rdma, rctxt); 820 return ret; 821 822 err2: 823 if (ret != -E2BIG && ret != -EINVAL) 824 goto err1; 825 826 ret = svc_rdma_send_error_msg(rdma, sctxt, rqstp); 827 if (ret < 0) 828 goto err1; 829 ret = 0; 830 goto out; 831 832 err1: 833 svc_rdma_send_ctxt_put(rdma, sctxt); 834 err0: 835 trace_svcrdma_send_failed(rqstp, ret); 836 set_bit(XPT_CLOSE, &xprt->xpt_flags); 837 ret = -ENOTCONN; 838 goto out; 839 } 840