1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (c) 2016-2018 Oracle. All rights reserved. 4 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved. 5 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the BSD-type 11 * license below: 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 17 * Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 20 * Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials provided 23 * with the distribution. 24 * 25 * Neither the name of the Network Appliance, Inc. nor the names of 26 * its contributors may be used to endorse or promote products 27 * derived from this software without specific prior written 28 * permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 34 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 35 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 36 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 41 * 42 * Author: Tom Tucker <tom@opengridcomputing.com> 43 */ 44 45 /* Operation 46 * 47 * The main entry point is svc_rdma_recvfrom. This is called from 48 * svc_recv when the transport indicates there is incoming data to 49 * be read. "Data Ready" is signaled when an RDMA Receive completes, 50 * or when a set of RDMA Reads complete. 51 * 52 * An svc_rqst is passed in. This structure contains an array of 53 * free pages (rq_pages) that will contain the incoming RPC message. 54 * 55 * Short messages are moved directly into svc_rqst::rq_arg, and 56 * the RPC Call is ready to be processed by the Upper Layer. 57 * svc_rdma_recvfrom returns the length of the RPC Call message, 58 * completing the reception of the RPC Call. 59 * 60 * However, when an incoming message has Read chunks, 61 * svc_rdma_recvfrom must post RDMA Reads to pull the RPC Call's 62 * data payload from the client. svc_rdma_recvfrom sets up the 63 * RDMA Reads using pages in svc_rqst::rq_pages, which are 64 * transferred to an svc_rdma_recv_ctxt for the duration of the 65 * I/O. svc_rdma_recvfrom then returns zero, since the RPC message 66 * is still not yet ready. 67 * 68 * When the Read chunk payloads have become available on the 69 * server, "Data Ready" is raised again, and svc_recv calls 70 * svc_rdma_recvfrom again. This second call may use a different 71 * svc_rqst than the first one, thus any information that needs 72 * to be preserved across these two calls is kept in an 73 * svc_rdma_recv_ctxt. 74 * 75 * The second call to svc_rdma_recvfrom performs final assembly 76 * of the RPC Call message, using the RDMA Read sink pages kept in 77 * the svc_rdma_recv_ctxt. The xdr_buf is copied from the 78 * svc_rdma_recv_ctxt to the second svc_rqst. The second call returns 79 * the length of the completed RPC Call message. 80 * 81 * Page Management 82 * 83 * Pages under I/O must be transferred from the first svc_rqst to an 84 * svc_rdma_recv_ctxt before the first svc_rdma_recvfrom call returns. 85 * 86 * The first svc_rqst supplies pages for RDMA Reads. These are moved 87 * from rqstp::rq_pages into ctxt::pages. The consumed elements of 88 * the rq_pages array are set to NULL and refilled with the first 89 * svc_rdma_recvfrom call returns. 90 * 91 * During the second svc_rdma_recvfrom call, RDMA Read sink pages 92 * are transferred from the svc_rdma_recv_ctxt to the second svc_rqst. 93 */ 94 95 #include <linux/slab.h> 96 #include <linux/spinlock.h> 97 #include <asm/unaligned.h> 98 #include <rdma/ib_verbs.h> 99 #include <rdma/rdma_cm.h> 100 101 #include <linux/sunrpc/xdr.h> 102 #include <linux/sunrpc/debug.h> 103 #include <linux/sunrpc/rpc_rdma.h> 104 #include <linux/sunrpc/svc_rdma.h> 105 106 #include "xprt_rdma.h" 107 #include <trace/events/rpcrdma.h> 108 109 static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc); 110 111 static inline struct svc_rdma_recv_ctxt * 112 svc_rdma_next_recv_ctxt(struct list_head *list) 113 { 114 return list_first_entry_or_null(list, struct svc_rdma_recv_ctxt, 115 rc_list); 116 } 117 118 static void svc_rdma_recv_cid_init(struct svcxprt_rdma *rdma, 119 struct rpc_rdma_cid *cid) 120 { 121 cid->ci_queue_id = rdma->sc_rq_cq->res.id; 122 cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids); 123 } 124 125 static struct svc_rdma_recv_ctxt * 126 svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma *rdma) 127 { 128 int node = ibdev_to_node(rdma->sc_cm_id->device); 129 struct svc_rdma_recv_ctxt *ctxt; 130 dma_addr_t addr; 131 void *buffer; 132 133 ctxt = kmalloc_node(sizeof(*ctxt), GFP_KERNEL, node); 134 if (!ctxt) 135 goto fail0; 136 buffer = kmalloc_node(rdma->sc_max_req_size, GFP_KERNEL, node); 137 if (!buffer) 138 goto fail1; 139 addr = ib_dma_map_single(rdma->sc_pd->device, buffer, 140 rdma->sc_max_req_size, DMA_FROM_DEVICE); 141 if (ib_dma_mapping_error(rdma->sc_pd->device, addr)) 142 goto fail2; 143 144 svc_rdma_recv_cid_init(rdma, &ctxt->rc_cid); 145 pcl_init(&ctxt->rc_call_pcl); 146 pcl_init(&ctxt->rc_read_pcl); 147 pcl_init(&ctxt->rc_write_pcl); 148 pcl_init(&ctxt->rc_reply_pcl); 149 150 ctxt->rc_recv_wr.next = NULL; 151 ctxt->rc_recv_wr.wr_cqe = &ctxt->rc_cqe; 152 ctxt->rc_recv_wr.sg_list = &ctxt->rc_recv_sge; 153 ctxt->rc_recv_wr.num_sge = 1; 154 ctxt->rc_cqe.done = svc_rdma_wc_receive; 155 ctxt->rc_recv_sge.addr = addr; 156 ctxt->rc_recv_sge.length = rdma->sc_max_req_size; 157 ctxt->rc_recv_sge.lkey = rdma->sc_pd->local_dma_lkey; 158 ctxt->rc_recv_buf = buffer; 159 return ctxt; 160 161 fail2: 162 kfree(buffer); 163 fail1: 164 kfree(ctxt); 165 fail0: 166 return NULL; 167 } 168 169 static void svc_rdma_recv_ctxt_destroy(struct svcxprt_rdma *rdma, 170 struct svc_rdma_recv_ctxt *ctxt) 171 { 172 ib_dma_unmap_single(rdma->sc_pd->device, ctxt->rc_recv_sge.addr, 173 ctxt->rc_recv_sge.length, DMA_FROM_DEVICE); 174 kfree(ctxt->rc_recv_buf); 175 kfree(ctxt); 176 } 177 178 /** 179 * svc_rdma_recv_ctxts_destroy - Release all recv_ctxt's for an xprt 180 * @rdma: svcxprt_rdma being torn down 181 * 182 */ 183 void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma) 184 { 185 struct svc_rdma_recv_ctxt *ctxt; 186 struct llist_node *node; 187 188 while ((node = llist_del_first(&rdma->sc_recv_ctxts))) { 189 ctxt = llist_entry(node, struct svc_rdma_recv_ctxt, rc_node); 190 svc_rdma_recv_ctxt_destroy(rdma, ctxt); 191 } 192 } 193 194 /** 195 * svc_rdma_recv_ctxt_get - Allocate a recv_ctxt 196 * @rdma: controlling svcxprt_rdma 197 * 198 * Returns a recv_ctxt or (rarely) NULL if none are available. 199 */ 200 struct svc_rdma_recv_ctxt *svc_rdma_recv_ctxt_get(struct svcxprt_rdma *rdma) 201 { 202 struct svc_rdma_recv_ctxt *ctxt; 203 struct llist_node *node; 204 205 node = llist_del_first(&rdma->sc_recv_ctxts); 206 if (!node) 207 goto out_empty; 208 ctxt = llist_entry(node, struct svc_rdma_recv_ctxt, rc_node); 209 210 out: 211 ctxt->rc_page_count = 0; 212 return ctxt; 213 214 out_empty: 215 ctxt = svc_rdma_recv_ctxt_alloc(rdma); 216 if (!ctxt) 217 return NULL; 218 goto out; 219 } 220 221 /** 222 * svc_rdma_recv_ctxt_put - Return recv_ctxt to free list 223 * @rdma: controlling svcxprt_rdma 224 * @ctxt: object to return to the free list 225 * 226 */ 227 void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma, 228 struct svc_rdma_recv_ctxt *ctxt) 229 { 230 pcl_free(&ctxt->rc_call_pcl); 231 pcl_free(&ctxt->rc_read_pcl); 232 pcl_free(&ctxt->rc_write_pcl); 233 pcl_free(&ctxt->rc_reply_pcl); 234 235 llist_add(&ctxt->rc_node, &rdma->sc_recv_ctxts); 236 } 237 238 /** 239 * svc_rdma_release_ctxt - Release transport-specific per-rqst resources 240 * @xprt: the transport which owned the context 241 * @vctxt: the context from rqstp->rq_xprt_ctxt or dr->xprt_ctxt 242 * 243 * Ensure that the recv_ctxt is released whether or not a Reply 244 * was sent. For example, the client could close the connection, 245 * or svc_process could drop an RPC, before the Reply is sent. 246 */ 247 void svc_rdma_release_ctxt(struct svc_xprt *xprt, void *vctxt) 248 { 249 struct svc_rdma_recv_ctxt *ctxt = vctxt; 250 struct svcxprt_rdma *rdma = 251 container_of(xprt, struct svcxprt_rdma, sc_xprt); 252 253 if (ctxt) 254 svc_rdma_recv_ctxt_put(rdma, ctxt); 255 } 256 257 static bool svc_rdma_refresh_recvs(struct svcxprt_rdma *rdma, 258 unsigned int wanted) 259 { 260 const struct ib_recv_wr *bad_wr = NULL; 261 struct svc_rdma_recv_ctxt *ctxt; 262 struct ib_recv_wr *recv_chain; 263 int ret; 264 265 if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags)) 266 return false; 267 268 recv_chain = NULL; 269 while (wanted--) { 270 ctxt = svc_rdma_recv_ctxt_get(rdma); 271 if (!ctxt) 272 break; 273 274 trace_svcrdma_post_recv(ctxt); 275 ctxt->rc_recv_wr.next = recv_chain; 276 recv_chain = &ctxt->rc_recv_wr; 277 rdma->sc_pending_recvs++; 278 } 279 if (!recv_chain) 280 return false; 281 282 ret = ib_post_recv(rdma->sc_qp, recv_chain, &bad_wr); 283 if (ret) 284 goto err_free; 285 return true; 286 287 err_free: 288 trace_svcrdma_rq_post_err(rdma, ret); 289 while (bad_wr) { 290 ctxt = container_of(bad_wr, struct svc_rdma_recv_ctxt, 291 rc_recv_wr); 292 bad_wr = bad_wr->next; 293 svc_rdma_recv_ctxt_put(rdma, ctxt); 294 } 295 /* Since we're destroying the xprt, no need to reset 296 * sc_pending_recvs. */ 297 return false; 298 } 299 300 /** 301 * svc_rdma_post_recvs - Post initial set of Recv WRs 302 * @rdma: fresh svcxprt_rdma 303 * 304 * Returns true if successful, otherwise false. 305 */ 306 bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma) 307 { 308 return svc_rdma_refresh_recvs(rdma, rdma->sc_max_requests); 309 } 310 311 /** 312 * svc_rdma_wc_receive - Invoked by RDMA provider for each polled Receive WC 313 * @cq: Completion Queue context 314 * @wc: Work Completion object 315 * 316 */ 317 static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) 318 { 319 struct svcxprt_rdma *rdma = cq->cq_context; 320 struct ib_cqe *cqe = wc->wr_cqe; 321 struct svc_rdma_recv_ctxt *ctxt; 322 323 rdma->sc_pending_recvs--; 324 325 /* WARNING: Only wc->wr_cqe and wc->status are reliable */ 326 ctxt = container_of(cqe, struct svc_rdma_recv_ctxt, rc_cqe); 327 328 if (wc->status != IB_WC_SUCCESS) 329 goto flushed; 330 trace_svcrdma_wc_recv(wc, &ctxt->rc_cid); 331 332 /* If receive posting fails, the connection is about to be 333 * lost anyway. The server will not be able to send a reply 334 * for this RPC, and the client will retransmit this RPC 335 * anyway when it reconnects. 336 * 337 * Therefore we drop the Receive, even if status was SUCCESS 338 * to reduce the likelihood of replayed requests once the 339 * client reconnects. 340 */ 341 if (rdma->sc_pending_recvs < rdma->sc_max_requests) 342 if (!svc_rdma_refresh_recvs(rdma, rdma->sc_recv_batch)) 343 goto dropped; 344 345 /* All wc fields are now known to be valid */ 346 ctxt->rc_byte_len = wc->byte_len; 347 348 spin_lock(&rdma->sc_rq_dto_lock); 349 list_add_tail(&ctxt->rc_list, &rdma->sc_rq_dto_q); 350 /* Note the unlock pairs with the smp_rmb in svc_xprt_ready: */ 351 set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags); 352 spin_unlock(&rdma->sc_rq_dto_lock); 353 if (!test_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags)) 354 svc_xprt_enqueue(&rdma->sc_xprt); 355 return; 356 357 flushed: 358 if (wc->status == IB_WC_WR_FLUSH_ERR) 359 trace_svcrdma_wc_recv_flush(wc, &ctxt->rc_cid); 360 else 361 trace_svcrdma_wc_recv_err(wc, &ctxt->rc_cid); 362 dropped: 363 svc_rdma_recv_ctxt_put(rdma, ctxt); 364 svc_xprt_deferred_close(&rdma->sc_xprt); 365 } 366 367 /** 368 * svc_rdma_flush_recv_queues - Drain pending Receive work 369 * @rdma: svcxprt_rdma being shut down 370 * 371 */ 372 void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma) 373 { 374 struct svc_rdma_recv_ctxt *ctxt; 375 376 while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_rq_dto_q))) { 377 list_del(&ctxt->rc_list); 378 svc_rdma_recv_ctxt_put(rdma, ctxt); 379 } 380 } 381 382 static void svc_rdma_build_arg_xdr(struct svc_rqst *rqstp, 383 struct svc_rdma_recv_ctxt *ctxt) 384 { 385 struct xdr_buf *arg = &rqstp->rq_arg; 386 387 arg->head[0].iov_base = ctxt->rc_recv_buf; 388 arg->head[0].iov_len = ctxt->rc_byte_len; 389 arg->tail[0].iov_base = NULL; 390 arg->tail[0].iov_len = 0; 391 arg->page_len = 0; 392 arg->page_base = 0; 393 arg->buflen = ctxt->rc_byte_len; 394 arg->len = ctxt->rc_byte_len; 395 } 396 397 /** 398 * xdr_count_read_segments - Count number of Read segments in Read list 399 * @rctxt: Ingress receive context 400 * @p: Start of an un-decoded Read list 401 * 402 * Before allocating anything, ensure the ingress Read list is safe 403 * to use. 404 * 405 * The segment count is limited to how many segments can fit in the 406 * transport header without overflowing the buffer. That's about 40 407 * Read segments for a 1KB inline threshold. 408 * 409 * Return values: 410 * %true: Read list is valid. @rctxt's xdr_stream is updated to point 411 * to the first byte past the Read list. rc_read_pcl and 412 * rc_call_pcl cl_count fields are set to the number of 413 * Read segments in the list. 414 * %false: Read list is corrupt. @rctxt's xdr_stream is left in an 415 * unknown state. 416 */ 417 static bool xdr_count_read_segments(struct svc_rdma_recv_ctxt *rctxt, __be32 *p) 418 { 419 rctxt->rc_call_pcl.cl_count = 0; 420 rctxt->rc_read_pcl.cl_count = 0; 421 while (xdr_item_is_present(p)) { 422 u32 position, handle, length; 423 u64 offset; 424 425 p = xdr_inline_decode(&rctxt->rc_stream, 426 rpcrdma_readseg_maxsz * sizeof(*p)); 427 if (!p) 428 return false; 429 430 xdr_decode_read_segment(p, &position, &handle, 431 &length, &offset); 432 if (position) { 433 if (position & 3) 434 return false; 435 ++rctxt->rc_read_pcl.cl_count; 436 } else { 437 ++rctxt->rc_call_pcl.cl_count; 438 } 439 440 p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p)); 441 if (!p) 442 return false; 443 } 444 return true; 445 } 446 447 /* Sanity check the Read list. 448 * 449 * Sanity checks: 450 * - Read list does not overflow Receive buffer. 451 * - Chunk size limited by largest NFS data payload. 452 * 453 * Return values: 454 * %true: Read list is valid. @rctxt's xdr_stream is updated 455 * to point to the first byte past the Read list. 456 * %false: Read list is corrupt. @rctxt's xdr_stream is left 457 * in an unknown state. 458 */ 459 static bool xdr_check_read_list(struct svc_rdma_recv_ctxt *rctxt) 460 { 461 __be32 *p; 462 463 p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p)); 464 if (!p) 465 return false; 466 if (!xdr_count_read_segments(rctxt, p)) 467 return false; 468 if (!pcl_alloc_call(rctxt, p)) 469 return false; 470 return pcl_alloc_read(rctxt, p); 471 } 472 473 static bool xdr_check_write_chunk(struct svc_rdma_recv_ctxt *rctxt) 474 { 475 u32 segcount; 476 __be32 *p; 477 478 if (xdr_stream_decode_u32(&rctxt->rc_stream, &segcount)) 479 return false; 480 481 /* A bogus segcount causes this buffer overflow check to fail. */ 482 p = xdr_inline_decode(&rctxt->rc_stream, 483 segcount * rpcrdma_segment_maxsz * sizeof(*p)); 484 return p != NULL; 485 } 486 487 /** 488 * xdr_count_write_chunks - Count number of Write chunks in Write list 489 * @rctxt: Received header and decoding state 490 * @p: start of an un-decoded Write list 491 * 492 * Before allocating anything, ensure the ingress Write list is 493 * safe to use. 494 * 495 * Return values: 496 * %true: Write list is valid. @rctxt's xdr_stream is updated 497 * to point to the first byte past the Write list, and 498 * the number of Write chunks is in rc_write_pcl.cl_count. 499 * %false: Write list is corrupt. @rctxt's xdr_stream is left 500 * in an indeterminate state. 501 */ 502 static bool xdr_count_write_chunks(struct svc_rdma_recv_ctxt *rctxt, __be32 *p) 503 { 504 rctxt->rc_write_pcl.cl_count = 0; 505 while (xdr_item_is_present(p)) { 506 if (!xdr_check_write_chunk(rctxt)) 507 return false; 508 ++rctxt->rc_write_pcl.cl_count; 509 p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p)); 510 if (!p) 511 return false; 512 } 513 return true; 514 } 515 516 /* Sanity check the Write list. 517 * 518 * Implementation limits: 519 * - This implementation currently supports only one Write chunk. 520 * 521 * Sanity checks: 522 * - Write list does not overflow Receive buffer. 523 * - Chunk size limited by largest NFS data payload. 524 * 525 * Return values: 526 * %true: Write list is valid. @rctxt's xdr_stream is updated 527 * to point to the first byte past the Write list. 528 * %false: Write list is corrupt. @rctxt's xdr_stream is left 529 * in an unknown state. 530 */ 531 static bool xdr_check_write_list(struct svc_rdma_recv_ctxt *rctxt) 532 { 533 __be32 *p; 534 535 p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p)); 536 if (!p) 537 return false; 538 if (!xdr_count_write_chunks(rctxt, p)) 539 return false; 540 if (!pcl_alloc_write(rctxt, &rctxt->rc_write_pcl, p)) 541 return false; 542 543 rctxt->rc_cur_result_payload = pcl_first_chunk(&rctxt->rc_write_pcl); 544 return true; 545 } 546 547 /* Sanity check the Reply chunk. 548 * 549 * Sanity checks: 550 * - Reply chunk does not overflow Receive buffer. 551 * - Chunk size limited by largest NFS data payload. 552 * 553 * Return values: 554 * %true: Reply chunk is valid. @rctxt's xdr_stream is updated 555 * to point to the first byte past the Reply chunk. 556 * %false: Reply chunk is corrupt. @rctxt's xdr_stream is left 557 * in an unknown state. 558 */ 559 static bool xdr_check_reply_chunk(struct svc_rdma_recv_ctxt *rctxt) 560 { 561 __be32 *p; 562 563 p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p)); 564 if (!p) 565 return false; 566 567 if (!xdr_item_is_present(p)) 568 return true; 569 if (!xdr_check_write_chunk(rctxt)) 570 return false; 571 572 rctxt->rc_reply_pcl.cl_count = 1; 573 return pcl_alloc_write(rctxt, &rctxt->rc_reply_pcl, p); 574 } 575 576 /* RPC-over-RDMA Version One private extension: Remote Invalidation. 577 * Responder's choice: requester signals it can handle Send With 578 * Invalidate, and responder chooses one R_key to invalidate. 579 * 580 * If there is exactly one distinct R_key in the received transport 581 * header, set rc_inv_rkey to that R_key. Otherwise, set it to zero. 582 */ 583 static void svc_rdma_get_inv_rkey(struct svcxprt_rdma *rdma, 584 struct svc_rdma_recv_ctxt *ctxt) 585 { 586 struct svc_rdma_segment *segment; 587 struct svc_rdma_chunk *chunk; 588 u32 inv_rkey; 589 590 ctxt->rc_inv_rkey = 0; 591 592 if (!rdma->sc_snd_w_inv) 593 return; 594 595 inv_rkey = 0; 596 pcl_for_each_chunk(chunk, &ctxt->rc_call_pcl) { 597 pcl_for_each_segment(segment, chunk) { 598 if (inv_rkey == 0) 599 inv_rkey = segment->rs_handle; 600 else if (inv_rkey != segment->rs_handle) 601 return; 602 } 603 } 604 pcl_for_each_chunk(chunk, &ctxt->rc_read_pcl) { 605 pcl_for_each_segment(segment, chunk) { 606 if (inv_rkey == 0) 607 inv_rkey = segment->rs_handle; 608 else if (inv_rkey != segment->rs_handle) 609 return; 610 } 611 } 612 pcl_for_each_chunk(chunk, &ctxt->rc_write_pcl) { 613 pcl_for_each_segment(segment, chunk) { 614 if (inv_rkey == 0) 615 inv_rkey = segment->rs_handle; 616 else if (inv_rkey != segment->rs_handle) 617 return; 618 } 619 } 620 pcl_for_each_chunk(chunk, &ctxt->rc_reply_pcl) { 621 pcl_for_each_segment(segment, chunk) { 622 if (inv_rkey == 0) 623 inv_rkey = segment->rs_handle; 624 else if (inv_rkey != segment->rs_handle) 625 return; 626 } 627 } 628 ctxt->rc_inv_rkey = inv_rkey; 629 } 630 631 /** 632 * svc_rdma_xdr_decode_req - Decode the transport header 633 * @rq_arg: xdr_buf containing ingress RPC/RDMA message 634 * @rctxt: state of decoding 635 * 636 * On entry, xdr->head[0].iov_base points to first byte of the 637 * RPC-over-RDMA transport header. 638 * 639 * On successful exit, head[0] points to first byte past the 640 * RPC-over-RDMA header. For RDMA_MSG, this is the RPC message. 641 * 642 * The length of the RPC-over-RDMA header is returned. 643 * 644 * Assumptions: 645 * - The transport header is entirely contained in the head iovec. 646 */ 647 static int svc_rdma_xdr_decode_req(struct xdr_buf *rq_arg, 648 struct svc_rdma_recv_ctxt *rctxt) 649 { 650 __be32 *p, *rdma_argp; 651 unsigned int hdr_len; 652 653 rdma_argp = rq_arg->head[0].iov_base; 654 xdr_init_decode(&rctxt->rc_stream, rq_arg, rdma_argp, NULL); 655 656 p = xdr_inline_decode(&rctxt->rc_stream, 657 rpcrdma_fixed_maxsz * sizeof(*p)); 658 if (unlikely(!p)) 659 goto out_short; 660 p++; 661 if (*p != rpcrdma_version) 662 goto out_version; 663 p += 2; 664 rctxt->rc_msgtype = *p; 665 switch (rctxt->rc_msgtype) { 666 case rdma_msg: 667 break; 668 case rdma_nomsg: 669 break; 670 case rdma_done: 671 goto out_drop; 672 case rdma_error: 673 goto out_drop; 674 default: 675 goto out_proc; 676 } 677 678 if (!xdr_check_read_list(rctxt)) 679 goto out_inval; 680 if (!xdr_check_write_list(rctxt)) 681 goto out_inval; 682 if (!xdr_check_reply_chunk(rctxt)) 683 goto out_inval; 684 685 rq_arg->head[0].iov_base = rctxt->rc_stream.p; 686 hdr_len = xdr_stream_pos(&rctxt->rc_stream); 687 rq_arg->head[0].iov_len -= hdr_len; 688 rq_arg->len -= hdr_len; 689 trace_svcrdma_decode_rqst(rctxt, rdma_argp, hdr_len); 690 return hdr_len; 691 692 out_short: 693 trace_svcrdma_decode_short_err(rctxt, rq_arg->len); 694 return -EINVAL; 695 696 out_version: 697 trace_svcrdma_decode_badvers_err(rctxt, rdma_argp); 698 return -EPROTONOSUPPORT; 699 700 out_drop: 701 trace_svcrdma_decode_drop_err(rctxt, rdma_argp); 702 return 0; 703 704 out_proc: 705 trace_svcrdma_decode_badproc_err(rctxt, rdma_argp); 706 return -EINVAL; 707 708 out_inval: 709 trace_svcrdma_decode_parse_err(rctxt, rdma_argp); 710 return -EINVAL; 711 } 712 713 static void svc_rdma_send_error(struct svcxprt_rdma *rdma, 714 struct svc_rdma_recv_ctxt *rctxt, 715 int status) 716 { 717 struct svc_rdma_send_ctxt *sctxt; 718 719 sctxt = svc_rdma_send_ctxt_get(rdma); 720 if (!sctxt) 721 return; 722 svc_rdma_send_error_msg(rdma, sctxt, rctxt, status); 723 } 724 725 /* By convention, backchannel calls arrive via rdma_msg type 726 * messages, and never populate the chunk lists. This makes 727 * the RPC/RDMA header small and fixed in size, so it is 728 * straightforward to check the RPC header's direction field. 729 */ 730 static bool svc_rdma_is_reverse_direction_reply(struct svc_xprt *xprt, 731 struct svc_rdma_recv_ctxt *rctxt) 732 { 733 __be32 *p = rctxt->rc_recv_buf; 734 735 if (!xprt->xpt_bc_xprt) 736 return false; 737 738 if (rctxt->rc_msgtype != rdma_msg) 739 return false; 740 741 if (!pcl_is_empty(&rctxt->rc_call_pcl)) 742 return false; 743 if (!pcl_is_empty(&rctxt->rc_read_pcl)) 744 return false; 745 if (!pcl_is_empty(&rctxt->rc_write_pcl)) 746 return false; 747 if (!pcl_is_empty(&rctxt->rc_reply_pcl)) 748 return false; 749 750 /* RPC call direction */ 751 if (*(p + 8) == cpu_to_be32(RPC_CALL)) 752 return false; 753 754 return true; 755 } 756 757 /** 758 * svc_rdma_recvfrom - Receive an RPC call 759 * @rqstp: request structure into which to receive an RPC Call 760 * 761 * Returns: 762 * The positive number of bytes in the RPC Call message, 763 * %0 if there were no Calls ready to return, 764 * %-EINVAL if the Read chunk data is too large, 765 * %-ENOMEM if rdma_rw context pool was exhausted, 766 * %-ENOTCONN if posting failed (connection is lost), 767 * %-EIO if rdma_rw initialization failed (DMA mapping, etc). 768 * 769 * Called in a loop when XPT_DATA is set. XPT_DATA is cleared only 770 * when there are no remaining ctxt's to process. 771 * 772 * The next ctxt is removed from the "receive" lists. 773 * 774 * - If the ctxt completes a Receive, then construct the Call 775 * message from the contents of the Receive buffer. 776 * 777 * - If there are no Read chunks in this message, then finish 778 * assembling the Call message and return the number of bytes 779 * in the message. 780 * 781 * - If there are Read chunks in this message, post Read WRs to 782 * pull that payload. When the Read WRs complete, build the 783 * full message and return the number of bytes in it. 784 */ 785 int svc_rdma_recvfrom(struct svc_rqst *rqstp) 786 { 787 struct svc_xprt *xprt = rqstp->rq_xprt; 788 struct svcxprt_rdma *rdma_xprt = 789 container_of(xprt, struct svcxprt_rdma, sc_xprt); 790 struct svc_rdma_recv_ctxt *ctxt; 791 int ret; 792 793 /* Prevent svc_xprt_release() from releasing pages in rq_pages 794 * when returning 0 or an error. 795 */ 796 rqstp->rq_respages = rqstp->rq_pages; 797 rqstp->rq_next_page = rqstp->rq_respages; 798 799 rqstp->rq_xprt_ctxt = NULL; 800 801 ctxt = NULL; 802 spin_lock(&rdma_xprt->sc_rq_dto_lock); 803 ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_rq_dto_q); 804 if (ctxt) 805 list_del(&ctxt->rc_list); 806 else 807 /* No new incoming requests, terminate the loop */ 808 clear_bit(XPT_DATA, &xprt->xpt_flags); 809 spin_unlock(&rdma_xprt->sc_rq_dto_lock); 810 811 /* Unblock the transport for the next receive */ 812 svc_xprt_received(xprt); 813 if (!ctxt) 814 return 0; 815 816 percpu_counter_inc(&svcrdma_stat_recv); 817 ib_dma_sync_single_for_cpu(rdma_xprt->sc_pd->device, 818 ctxt->rc_recv_sge.addr, ctxt->rc_byte_len, 819 DMA_FROM_DEVICE); 820 svc_rdma_build_arg_xdr(rqstp, ctxt); 821 822 ret = svc_rdma_xdr_decode_req(&rqstp->rq_arg, ctxt); 823 if (ret < 0) 824 goto out_err; 825 if (ret == 0) 826 goto out_drop; 827 828 if (svc_rdma_is_reverse_direction_reply(xprt, ctxt)) 829 goto out_backchannel; 830 831 svc_rdma_get_inv_rkey(rdma_xprt, ctxt); 832 833 if (!pcl_is_empty(&ctxt->rc_read_pcl) || 834 !pcl_is_empty(&ctxt->rc_call_pcl)) { 835 ret = svc_rdma_process_read_list(rdma_xprt, rqstp, ctxt); 836 if (ret < 0) 837 goto out_readfail; 838 } 839 840 rqstp->rq_xprt_ctxt = ctxt; 841 rqstp->rq_prot = IPPROTO_MAX; 842 svc_xprt_copy_addrs(rqstp, xprt); 843 set_bit(RQ_SECURE, &rqstp->rq_flags); 844 return rqstp->rq_arg.len; 845 846 out_err: 847 svc_rdma_send_error(rdma_xprt, ctxt, ret); 848 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt); 849 return 0; 850 851 out_readfail: 852 if (ret == -EINVAL) 853 svc_rdma_send_error(rdma_xprt, ctxt, ret); 854 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt); 855 svc_xprt_deferred_close(xprt); 856 return -ENOTCONN; 857 858 out_backchannel: 859 svc_rdma_handle_bc_reply(rqstp, ctxt); 860 out_drop: 861 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt); 862 return 0; 863 } 864