1 /* 2 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the BSD-type 8 * license below: 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 14 * Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 17 * Redistributions in binary form must reproduce the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer in the documentation and/or other materials provided 20 * with the distribution. 21 * 22 * Neither the name of the Network Appliance, Inc. nor the names of 23 * its contributors may be used to endorse or promote products 24 * derived from this software without specific prior written 25 * permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * rpc_rdma.c 42 * 43 * This file contains the guts of the RPC RDMA protocol, and 44 * does marshaling/unmarshaling, etc. It is also where interfacing 45 * to the Linux RPC framework lives. 46 */ 47 48 #include "xprt_rdma.h" 49 50 #include <linux/highmem.h> 51 52 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 53 # define RPCDBG_FACILITY RPCDBG_TRANS 54 #endif 55 56 enum rpcrdma_chunktype { 57 rpcrdma_noch = 0, 58 rpcrdma_readch, 59 rpcrdma_areadch, 60 rpcrdma_writech, 61 rpcrdma_replych 62 }; 63 64 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 65 static const char transfertypes[][12] = { 66 "pure inline", /* no chunks */ 67 " read chunk", /* some argument via rdma read */ 68 "*read chunk", /* entire request via rdma read */ 69 "write chunk", /* some result via rdma write */ 70 "reply chunk" /* entire reply via rdma write */ 71 }; 72 #endif 73 74 /* The client can send a request inline as long as the RPCRDMA header 75 * plus the RPC call fit under the transport's inline limit. If the 76 * combined call message size exceeds that limit, the client must use 77 * the read chunk list for this operation. 78 */ 79 static bool rpcrdma_args_inline(struct rpc_rqst *rqst) 80 { 81 unsigned int callsize = RPCRDMA_HDRLEN_MIN + rqst->rq_snd_buf.len; 82 83 return callsize <= RPCRDMA_INLINE_WRITE_THRESHOLD(rqst); 84 } 85 86 /* The client can't know how large the actual reply will be. Thus it 87 * plans for the largest possible reply for that particular ULP 88 * operation. If the maximum combined reply message size exceeds that 89 * limit, the client must provide a write list or a reply chunk for 90 * this request. 91 */ 92 static bool rpcrdma_results_inline(struct rpc_rqst *rqst) 93 { 94 unsigned int repsize = RPCRDMA_HDRLEN_MIN + rqst->rq_rcv_buf.buflen; 95 96 return repsize <= RPCRDMA_INLINE_READ_THRESHOLD(rqst); 97 } 98 99 static int 100 rpcrdma_tail_pullup(struct xdr_buf *buf) 101 { 102 size_t tlen = buf->tail[0].iov_len; 103 size_t skip = tlen & 3; 104 105 /* Do not include the tail if it is only an XDR pad */ 106 if (tlen < 4) 107 return 0; 108 109 /* xdr_write_pages() adds a pad at the beginning of the tail 110 * if the content in "buf->pages" is unaligned. Force the 111 * tail's actual content to land at the next XDR position 112 * after the head instead. 113 */ 114 if (skip) { 115 unsigned char *src, *dst; 116 unsigned int count; 117 118 src = buf->tail[0].iov_base; 119 dst = buf->head[0].iov_base; 120 dst += buf->head[0].iov_len; 121 122 src += skip; 123 tlen -= skip; 124 125 dprintk("RPC: %s: skip=%zu, memmove(%p, %p, %zu)\n", 126 __func__, skip, dst, src, tlen); 127 128 for (count = tlen; count; count--) 129 *dst++ = *src++; 130 } 131 132 return tlen; 133 } 134 135 /* 136 * Chunk assembly from upper layer xdr_buf. 137 * 138 * Prepare the passed-in xdr_buf into representation as RPC/RDMA chunk 139 * elements. Segments are then coalesced when registered, if possible 140 * within the selected memreg mode. 141 * 142 * Returns positive number of segments converted, or a negative errno. 143 */ 144 145 static int 146 rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos, 147 enum rpcrdma_chunktype type, struct rpcrdma_mr_seg *seg, int nsegs) 148 { 149 int len, n = 0, p; 150 int page_base; 151 struct page **ppages; 152 153 if (pos == 0 && xdrbuf->head[0].iov_len) { 154 seg[n].mr_page = NULL; 155 seg[n].mr_offset = xdrbuf->head[0].iov_base; 156 seg[n].mr_len = xdrbuf->head[0].iov_len; 157 ++n; 158 } 159 160 len = xdrbuf->page_len; 161 ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT); 162 page_base = xdrbuf->page_base & ~PAGE_MASK; 163 p = 0; 164 while (len && n < nsegs) { 165 if (!ppages[p]) { 166 /* alloc the pagelist for receiving buffer */ 167 ppages[p] = alloc_page(GFP_ATOMIC); 168 if (!ppages[p]) 169 return -ENOMEM; 170 } 171 seg[n].mr_page = ppages[p]; 172 seg[n].mr_offset = (void *)(unsigned long) page_base; 173 seg[n].mr_len = min_t(u32, PAGE_SIZE - page_base, len); 174 if (seg[n].mr_len > PAGE_SIZE) 175 return -EIO; 176 len -= seg[n].mr_len; 177 ++n; 178 ++p; 179 page_base = 0; /* page offset only applies to first page */ 180 } 181 182 /* Message overflows the seg array */ 183 if (len && n == nsegs) 184 return -EIO; 185 186 /* When encoding the read list, the tail is always sent inline */ 187 if (type == rpcrdma_readch) 188 return n; 189 190 if (xdrbuf->tail[0].iov_len) { 191 /* the rpcrdma protocol allows us to omit any trailing 192 * xdr pad bytes, saving the server an RDMA operation. */ 193 if (xdrbuf->tail[0].iov_len < 4 && xprt_rdma_pad_optimize) 194 return n; 195 if (n == nsegs) 196 /* Tail remains, but we're out of segments */ 197 return -EIO; 198 seg[n].mr_page = NULL; 199 seg[n].mr_offset = xdrbuf->tail[0].iov_base; 200 seg[n].mr_len = xdrbuf->tail[0].iov_len; 201 ++n; 202 } 203 204 return n; 205 } 206 207 /* 208 * Create read/write chunk lists, and reply chunks, for RDMA 209 * 210 * Assume check against THRESHOLD has been done, and chunks are required. 211 * Assume only encoding one list entry for read|write chunks. The NFSv3 212 * protocol is simple enough to allow this as it only has a single "bulk 213 * result" in each procedure - complicated NFSv4 COMPOUNDs are not. (The 214 * RDMA/Sessions NFSv4 proposal addresses this for future v4 revs.) 215 * 216 * When used for a single reply chunk (which is a special write 217 * chunk used for the entire reply, rather than just the data), it 218 * is used primarily for READDIR and READLINK which would otherwise 219 * be severely size-limited by a small rdma inline read max. The server 220 * response will come back as an RDMA Write, followed by a message 221 * of type RDMA_NOMSG carrying the xid and length. As a result, reply 222 * chunks do not provide data alignment, however they do not require 223 * "fixup" (moving the response to the upper layer buffer) either. 224 * 225 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64): 226 * 227 * Read chunklist (a linked list): 228 * N elements, position P (same P for all chunks of same arg!): 229 * 1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0 230 * 231 * Write chunklist (a list of (one) counted array): 232 * N elements: 233 * 1 - N - HLOO - HLOO - ... - HLOO - 0 234 * 235 * Reply chunk (a counted array): 236 * N elements: 237 * 1 - N - HLOO - HLOO - ... - HLOO 238 * 239 * Returns positive RPC/RDMA header size, or negative errno. 240 */ 241 242 static ssize_t 243 rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target, 244 struct rpcrdma_msg *headerp, enum rpcrdma_chunktype type) 245 { 246 struct rpcrdma_req *req = rpcr_to_rdmar(rqst); 247 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt); 248 int n, nsegs, nchunks = 0; 249 unsigned int pos; 250 struct rpcrdma_mr_seg *seg = req->rl_segments; 251 struct rpcrdma_read_chunk *cur_rchunk = NULL; 252 struct rpcrdma_write_array *warray = NULL; 253 struct rpcrdma_write_chunk *cur_wchunk = NULL; 254 __be32 *iptr = headerp->rm_body.rm_chunks; 255 int (*map)(struct rpcrdma_xprt *, struct rpcrdma_mr_seg *, int, bool); 256 257 if (type == rpcrdma_readch || type == rpcrdma_areadch) { 258 /* a read chunk - server will RDMA Read our memory */ 259 cur_rchunk = (struct rpcrdma_read_chunk *) iptr; 260 } else { 261 /* a write or reply chunk - server will RDMA Write our memory */ 262 *iptr++ = xdr_zero; /* encode a NULL read chunk list */ 263 if (type == rpcrdma_replych) 264 *iptr++ = xdr_zero; /* a NULL write chunk list */ 265 warray = (struct rpcrdma_write_array *) iptr; 266 cur_wchunk = (struct rpcrdma_write_chunk *) (warray + 1); 267 } 268 269 if (type == rpcrdma_replych || type == rpcrdma_areadch) 270 pos = 0; 271 else 272 pos = target->head[0].iov_len; 273 274 nsegs = rpcrdma_convert_iovs(target, pos, type, seg, RPCRDMA_MAX_SEGS); 275 if (nsegs < 0) 276 return nsegs; 277 278 map = r_xprt->rx_ia.ri_ops->ro_map; 279 do { 280 n = map(r_xprt, seg, nsegs, cur_wchunk != NULL); 281 if (n <= 0) 282 goto out; 283 if (cur_rchunk) { /* read */ 284 cur_rchunk->rc_discrim = xdr_one; 285 /* all read chunks have the same "position" */ 286 cur_rchunk->rc_position = cpu_to_be32(pos); 287 cur_rchunk->rc_target.rs_handle = 288 cpu_to_be32(seg->mr_rkey); 289 cur_rchunk->rc_target.rs_length = 290 cpu_to_be32(seg->mr_len); 291 xdr_encode_hyper( 292 (__be32 *)&cur_rchunk->rc_target.rs_offset, 293 seg->mr_base); 294 dprintk("RPC: %s: read chunk " 295 "elem %d@0x%llx:0x%x pos %u (%s)\n", __func__, 296 seg->mr_len, (unsigned long long)seg->mr_base, 297 seg->mr_rkey, pos, n < nsegs ? "more" : "last"); 298 cur_rchunk++; 299 r_xprt->rx_stats.read_chunk_count++; 300 } else { /* write/reply */ 301 cur_wchunk->wc_target.rs_handle = 302 cpu_to_be32(seg->mr_rkey); 303 cur_wchunk->wc_target.rs_length = 304 cpu_to_be32(seg->mr_len); 305 xdr_encode_hyper( 306 (__be32 *)&cur_wchunk->wc_target.rs_offset, 307 seg->mr_base); 308 dprintk("RPC: %s: %s chunk " 309 "elem %d@0x%llx:0x%x (%s)\n", __func__, 310 (type == rpcrdma_replych) ? "reply" : "write", 311 seg->mr_len, (unsigned long long)seg->mr_base, 312 seg->mr_rkey, n < nsegs ? "more" : "last"); 313 cur_wchunk++; 314 if (type == rpcrdma_replych) 315 r_xprt->rx_stats.reply_chunk_count++; 316 else 317 r_xprt->rx_stats.write_chunk_count++; 318 r_xprt->rx_stats.total_rdma_request += seg->mr_len; 319 } 320 nchunks++; 321 seg += n; 322 nsegs -= n; 323 } while (nsegs); 324 325 /* success. all failures return above */ 326 req->rl_nchunks = nchunks; 327 328 /* 329 * finish off header. If write, marshal discrim and nchunks. 330 */ 331 if (cur_rchunk) { 332 iptr = (__be32 *) cur_rchunk; 333 *iptr++ = xdr_zero; /* finish the read chunk list */ 334 *iptr++ = xdr_zero; /* encode a NULL write chunk list */ 335 *iptr++ = xdr_zero; /* encode a NULL reply chunk */ 336 } else { 337 warray->wc_discrim = xdr_one; 338 warray->wc_nchunks = cpu_to_be32(nchunks); 339 iptr = (__be32 *) cur_wchunk; 340 if (type == rpcrdma_writech) { 341 *iptr++ = xdr_zero; /* finish the write chunk list */ 342 *iptr++ = xdr_zero; /* encode a NULL reply chunk */ 343 } 344 } 345 346 /* 347 * Return header size. 348 */ 349 return (unsigned char *)iptr - (unsigned char *)headerp; 350 351 out: 352 for (pos = 0; nchunks--;) 353 pos += r_xprt->rx_ia.ri_ops->ro_unmap(r_xprt, 354 &req->rl_segments[pos]); 355 return n; 356 } 357 358 /* 359 * Copy write data inline. 360 * This function is used for "small" requests. Data which is passed 361 * to RPC via iovecs (or page list) is copied directly into the 362 * pre-registered memory buffer for this request. For small amounts 363 * of data, this is efficient. The cutoff value is tunable. 364 */ 365 static void rpcrdma_inline_pullup(struct rpc_rqst *rqst) 366 { 367 int i, npages, curlen; 368 int copy_len; 369 unsigned char *srcp, *destp; 370 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt); 371 int page_base; 372 struct page **ppages; 373 374 destp = rqst->rq_svec[0].iov_base; 375 curlen = rqst->rq_svec[0].iov_len; 376 destp += curlen; 377 378 dprintk("RPC: %s: destp 0x%p len %d hdrlen %d\n", 379 __func__, destp, rqst->rq_slen, curlen); 380 381 copy_len = rqst->rq_snd_buf.page_len; 382 383 if (rqst->rq_snd_buf.tail[0].iov_len) { 384 curlen = rqst->rq_snd_buf.tail[0].iov_len; 385 if (destp + copy_len != rqst->rq_snd_buf.tail[0].iov_base) { 386 memmove(destp + copy_len, 387 rqst->rq_snd_buf.tail[0].iov_base, curlen); 388 r_xprt->rx_stats.pullup_copy_count += curlen; 389 } 390 dprintk("RPC: %s: tail destp 0x%p len %d\n", 391 __func__, destp + copy_len, curlen); 392 rqst->rq_svec[0].iov_len += curlen; 393 } 394 r_xprt->rx_stats.pullup_copy_count += copy_len; 395 396 page_base = rqst->rq_snd_buf.page_base; 397 ppages = rqst->rq_snd_buf.pages + (page_base >> PAGE_SHIFT); 398 page_base &= ~PAGE_MASK; 399 npages = PAGE_ALIGN(page_base+copy_len) >> PAGE_SHIFT; 400 for (i = 0; copy_len && i < npages; i++) { 401 curlen = PAGE_SIZE - page_base; 402 if (curlen > copy_len) 403 curlen = copy_len; 404 dprintk("RPC: %s: page %d destp 0x%p len %d curlen %d\n", 405 __func__, i, destp, copy_len, curlen); 406 srcp = kmap_atomic(ppages[i]); 407 memcpy(destp, srcp+page_base, curlen); 408 kunmap_atomic(srcp); 409 rqst->rq_svec[0].iov_len += curlen; 410 destp += curlen; 411 copy_len -= curlen; 412 page_base = 0; 413 } 414 /* header now contains entire send message */ 415 } 416 417 /* 418 * Marshal a request: the primary job of this routine is to choose 419 * the transfer modes. See comments below. 420 * 421 * Uses multiple RDMA IOVs for a request: 422 * [0] -- RPC RDMA header, which uses memory from the *start* of the 423 * preregistered buffer that already holds the RPC data in 424 * its middle. 425 * [1] -- the RPC header/data, marshaled by RPC and the NFS protocol. 426 * [2] -- optional padding. 427 * [3] -- if padded, header only in [1] and data here. 428 * 429 * Returns zero on success, otherwise a negative errno. 430 */ 431 432 int 433 rpcrdma_marshal_req(struct rpc_rqst *rqst) 434 { 435 struct rpc_xprt *xprt = rqst->rq_xprt; 436 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 437 struct rpcrdma_req *req = rpcr_to_rdmar(rqst); 438 char *base; 439 size_t rpclen; 440 ssize_t hdrlen; 441 enum rpcrdma_chunktype rtype, wtype; 442 struct rpcrdma_msg *headerp; 443 444 /* 445 * rpclen gets amount of data in first buffer, which is the 446 * pre-registered buffer. 447 */ 448 base = rqst->rq_svec[0].iov_base; 449 rpclen = rqst->rq_svec[0].iov_len; 450 451 headerp = rdmab_to_msg(req->rl_rdmabuf); 452 /* don't byte-swap XID, it's already done in request */ 453 headerp->rm_xid = rqst->rq_xid; 454 headerp->rm_vers = rpcrdma_version; 455 headerp->rm_credit = cpu_to_be32(r_xprt->rx_buf.rb_max_requests); 456 headerp->rm_type = rdma_msg; 457 458 /* 459 * Chunks needed for results? 460 * 461 * o Read ops return data as write chunk(s), header as inline. 462 * o If the expected result is under the inline threshold, all ops 463 * return as inline. 464 * o Large non-read ops return as a single reply chunk. 465 */ 466 if (rqst->rq_rcv_buf.flags & XDRBUF_READ) 467 wtype = rpcrdma_writech; 468 else if (rpcrdma_results_inline(rqst)) 469 wtype = rpcrdma_noch; 470 else 471 wtype = rpcrdma_replych; 472 473 /* 474 * Chunks needed for arguments? 475 * 476 * o If the total request is under the inline threshold, all ops 477 * are sent as inline. 478 * o Large write ops transmit data as read chunk(s), header as 479 * inline. 480 * o Large non-write ops are sent with the entire message as a 481 * single read chunk (protocol 0-position special case). 482 * 483 * This assumes that the upper layer does not present a request 484 * that both has a data payload, and whose non-data arguments 485 * by themselves are larger than the inline threshold. 486 */ 487 if (rpcrdma_args_inline(rqst)) { 488 rtype = rpcrdma_noch; 489 } else if (rqst->rq_snd_buf.flags & XDRBUF_WRITE) { 490 rtype = rpcrdma_readch; 491 } else { 492 r_xprt->rx_stats.nomsg_call_count++; 493 headerp->rm_type = htonl(RDMA_NOMSG); 494 rtype = rpcrdma_areadch; 495 rpclen = 0; 496 } 497 498 /* The following simplification is not true forever */ 499 if (rtype != rpcrdma_noch && wtype == rpcrdma_replych) 500 wtype = rpcrdma_noch; 501 if (rtype != rpcrdma_noch && wtype != rpcrdma_noch) { 502 dprintk("RPC: %s: cannot marshal multiple chunk lists\n", 503 __func__); 504 return -EIO; 505 } 506 507 hdrlen = RPCRDMA_HDRLEN_MIN; 508 509 /* 510 * Pull up any extra send data into the preregistered buffer. 511 * When padding is in use and applies to the transfer, insert 512 * it and change the message type. 513 */ 514 if (rtype == rpcrdma_noch) { 515 516 rpcrdma_inline_pullup(rqst); 517 518 headerp->rm_body.rm_nochunks.rm_empty[0] = xdr_zero; 519 headerp->rm_body.rm_nochunks.rm_empty[1] = xdr_zero; 520 headerp->rm_body.rm_nochunks.rm_empty[2] = xdr_zero; 521 /* new length after pullup */ 522 rpclen = rqst->rq_svec[0].iov_len; 523 } else if (rtype == rpcrdma_readch) 524 rpclen += rpcrdma_tail_pullup(&rqst->rq_snd_buf); 525 if (rtype != rpcrdma_noch) { 526 hdrlen = rpcrdma_create_chunks(rqst, &rqst->rq_snd_buf, 527 headerp, rtype); 528 wtype = rtype; /* simplify dprintk */ 529 530 } else if (wtype != rpcrdma_noch) { 531 hdrlen = rpcrdma_create_chunks(rqst, &rqst->rq_rcv_buf, 532 headerp, wtype); 533 } 534 if (hdrlen < 0) 535 return hdrlen; 536 537 dprintk("RPC: %s: %s: hdrlen %zd rpclen %zd" 538 " headerp 0x%p base 0x%p lkey 0x%x\n", 539 __func__, transfertypes[wtype], hdrlen, rpclen, 540 headerp, base, rdmab_lkey(req->rl_rdmabuf)); 541 542 /* 543 * initialize send_iov's - normally only two: rdma chunk header and 544 * single preregistered RPC header buffer, but if padding is present, 545 * then use a preregistered (and zeroed) pad buffer between the RPC 546 * header and any write data. In all non-rdma cases, any following 547 * data has been copied into the RPC header buffer. 548 */ 549 req->rl_send_iov[0].addr = rdmab_addr(req->rl_rdmabuf); 550 req->rl_send_iov[0].length = hdrlen; 551 req->rl_send_iov[0].lkey = rdmab_lkey(req->rl_rdmabuf); 552 553 req->rl_niovs = 1; 554 if (rtype == rpcrdma_areadch) 555 return 0; 556 557 req->rl_send_iov[1].addr = rdmab_addr(req->rl_sendbuf); 558 req->rl_send_iov[1].length = rpclen; 559 req->rl_send_iov[1].lkey = rdmab_lkey(req->rl_sendbuf); 560 561 req->rl_niovs = 2; 562 return 0; 563 } 564 565 /* 566 * Chase down a received write or reply chunklist to get length 567 * RDMA'd by server. See map at rpcrdma_create_chunks()! :-) 568 */ 569 static int 570 rpcrdma_count_chunks(struct rpcrdma_rep *rep, unsigned int max, int wrchunk, __be32 **iptrp) 571 { 572 unsigned int i, total_len; 573 struct rpcrdma_write_chunk *cur_wchunk; 574 char *base = (char *)rdmab_to_msg(rep->rr_rdmabuf); 575 576 i = be32_to_cpu(**iptrp); 577 if (i > max) 578 return -1; 579 cur_wchunk = (struct rpcrdma_write_chunk *) (*iptrp + 1); 580 total_len = 0; 581 while (i--) { 582 struct rpcrdma_segment *seg = &cur_wchunk->wc_target; 583 ifdebug(FACILITY) { 584 u64 off; 585 xdr_decode_hyper((__be32 *)&seg->rs_offset, &off); 586 dprintk("RPC: %s: chunk %d@0x%llx:0x%x\n", 587 __func__, 588 be32_to_cpu(seg->rs_length), 589 (unsigned long long)off, 590 be32_to_cpu(seg->rs_handle)); 591 } 592 total_len += be32_to_cpu(seg->rs_length); 593 ++cur_wchunk; 594 } 595 /* check and adjust for properly terminated write chunk */ 596 if (wrchunk) { 597 __be32 *w = (__be32 *) cur_wchunk; 598 if (*w++ != xdr_zero) 599 return -1; 600 cur_wchunk = (struct rpcrdma_write_chunk *) w; 601 } 602 if ((char *)cur_wchunk > base + rep->rr_len) 603 return -1; 604 605 *iptrp = (__be32 *) cur_wchunk; 606 return total_len; 607 } 608 609 /* 610 * Scatter inline received data back into provided iov's. 611 */ 612 static void 613 rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad) 614 { 615 int i, npages, curlen, olen; 616 char *destp; 617 struct page **ppages; 618 int page_base; 619 620 curlen = rqst->rq_rcv_buf.head[0].iov_len; 621 if (curlen > copy_len) { /* write chunk header fixup */ 622 curlen = copy_len; 623 rqst->rq_rcv_buf.head[0].iov_len = curlen; 624 } 625 626 dprintk("RPC: %s: srcp 0x%p len %d hdrlen %d\n", 627 __func__, srcp, copy_len, curlen); 628 629 /* Shift pointer for first receive segment only */ 630 rqst->rq_rcv_buf.head[0].iov_base = srcp; 631 srcp += curlen; 632 copy_len -= curlen; 633 634 olen = copy_len; 635 i = 0; 636 rpcx_to_rdmax(rqst->rq_xprt)->rx_stats.fixup_copy_count += olen; 637 page_base = rqst->rq_rcv_buf.page_base; 638 ppages = rqst->rq_rcv_buf.pages + (page_base >> PAGE_SHIFT); 639 page_base &= ~PAGE_MASK; 640 641 if (copy_len && rqst->rq_rcv_buf.page_len) { 642 npages = PAGE_ALIGN(page_base + 643 rqst->rq_rcv_buf.page_len) >> PAGE_SHIFT; 644 for (; i < npages; i++) { 645 curlen = PAGE_SIZE - page_base; 646 if (curlen > copy_len) 647 curlen = copy_len; 648 dprintk("RPC: %s: page %d" 649 " srcp 0x%p len %d curlen %d\n", 650 __func__, i, srcp, copy_len, curlen); 651 destp = kmap_atomic(ppages[i]); 652 memcpy(destp + page_base, srcp, curlen); 653 flush_dcache_page(ppages[i]); 654 kunmap_atomic(destp); 655 srcp += curlen; 656 copy_len -= curlen; 657 if (copy_len == 0) 658 break; 659 page_base = 0; 660 } 661 } 662 663 if (copy_len && rqst->rq_rcv_buf.tail[0].iov_len) { 664 curlen = copy_len; 665 if (curlen > rqst->rq_rcv_buf.tail[0].iov_len) 666 curlen = rqst->rq_rcv_buf.tail[0].iov_len; 667 if (rqst->rq_rcv_buf.tail[0].iov_base != srcp) 668 memmove(rqst->rq_rcv_buf.tail[0].iov_base, srcp, curlen); 669 dprintk("RPC: %s: tail srcp 0x%p len %d curlen %d\n", 670 __func__, srcp, copy_len, curlen); 671 rqst->rq_rcv_buf.tail[0].iov_len = curlen; 672 copy_len -= curlen; ++i; 673 } else 674 rqst->rq_rcv_buf.tail[0].iov_len = 0; 675 676 if (pad) { 677 /* implicit padding on terminal chunk */ 678 unsigned char *p = rqst->rq_rcv_buf.tail[0].iov_base; 679 while (pad--) 680 p[rqst->rq_rcv_buf.tail[0].iov_len++] = 0; 681 } 682 683 if (copy_len) 684 dprintk("RPC: %s: %d bytes in" 685 " %d extra segments (%d lost)\n", 686 __func__, olen, i, copy_len); 687 688 /* TBD avoid a warning from call_decode() */ 689 rqst->rq_private_buf = rqst->rq_rcv_buf; 690 } 691 692 void 693 rpcrdma_connect_worker(struct work_struct *work) 694 { 695 struct rpcrdma_ep *ep = 696 container_of(work, struct rpcrdma_ep, rep_connect_worker.work); 697 struct rpcrdma_xprt *r_xprt = 698 container_of(ep, struct rpcrdma_xprt, rx_ep); 699 struct rpc_xprt *xprt = &r_xprt->rx_xprt; 700 701 spin_lock_bh(&xprt->transport_lock); 702 if (++xprt->connect_cookie == 0) /* maintain a reserved value */ 703 ++xprt->connect_cookie; 704 if (ep->rep_connected > 0) { 705 if (!xprt_test_and_set_connected(xprt)) 706 xprt_wake_pending_tasks(xprt, 0); 707 } else { 708 if (xprt_test_and_clear_connected(xprt)) 709 xprt_wake_pending_tasks(xprt, -ENOTCONN); 710 } 711 spin_unlock_bh(&xprt->transport_lock); 712 } 713 714 /* 715 * This function is called when an async event is posted to 716 * the connection which changes the connection state. All it 717 * does at this point is mark the connection up/down, the rpc 718 * timers do the rest. 719 */ 720 void 721 rpcrdma_conn_func(struct rpcrdma_ep *ep) 722 { 723 schedule_delayed_work(&ep->rep_connect_worker, 0); 724 } 725 726 /* 727 * Called as a tasklet to do req/reply match and complete a request 728 * Errors must result in the RPC task either being awakened, or 729 * allowed to timeout, to discover the errors at that time. 730 */ 731 void 732 rpcrdma_reply_handler(struct rpcrdma_rep *rep) 733 { 734 struct rpcrdma_msg *headerp; 735 struct rpcrdma_req *req; 736 struct rpc_rqst *rqst; 737 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt; 738 struct rpc_xprt *xprt = &r_xprt->rx_xprt; 739 __be32 *iptr; 740 int rdmalen, status; 741 unsigned long cwnd; 742 u32 credits; 743 744 /* Check status. If bad, signal disconnect and return rep to pool */ 745 if (rep->rr_len == ~0U) { 746 rpcrdma_recv_buffer_put(rep); 747 if (r_xprt->rx_ep.rep_connected == 1) { 748 r_xprt->rx_ep.rep_connected = -EIO; 749 rpcrdma_conn_func(&r_xprt->rx_ep); 750 } 751 return; 752 } 753 if (rep->rr_len < RPCRDMA_HDRLEN_MIN) { 754 dprintk("RPC: %s: short/invalid reply\n", __func__); 755 goto repost; 756 } 757 headerp = rdmab_to_msg(rep->rr_rdmabuf); 758 if (headerp->rm_vers != rpcrdma_version) { 759 dprintk("RPC: %s: invalid version %d\n", 760 __func__, be32_to_cpu(headerp->rm_vers)); 761 goto repost; 762 } 763 764 /* Get XID and try for a match. */ 765 spin_lock(&xprt->transport_lock); 766 rqst = xprt_lookup_rqst(xprt, headerp->rm_xid); 767 if (rqst == NULL) { 768 spin_unlock(&xprt->transport_lock); 769 dprintk("RPC: %s: reply 0x%p failed " 770 "to match any request xid 0x%08x len %d\n", 771 __func__, rep, be32_to_cpu(headerp->rm_xid), 772 rep->rr_len); 773 repost: 774 r_xprt->rx_stats.bad_reply_count++; 775 if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, &r_xprt->rx_ep, rep)) 776 rpcrdma_recv_buffer_put(rep); 777 778 return; 779 } 780 781 /* get request object */ 782 req = rpcr_to_rdmar(rqst); 783 if (req->rl_reply) { 784 spin_unlock(&xprt->transport_lock); 785 dprintk("RPC: %s: duplicate reply 0x%p to RPC " 786 "request 0x%p: xid 0x%08x\n", __func__, rep, req, 787 be32_to_cpu(headerp->rm_xid)); 788 goto repost; 789 } 790 791 dprintk("RPC: %s: reply 0x%p completes request 0x%p\n" 792 " RPC request 0x%p xid 0x%08x\n", 793 __func__, rep, req, rqst, 794 be32_to_cpu(headerp->rm_xid)); 795 796 /* from here on, the reply is no longer an orphan */ 797 req->rl_reply = rep; 798 xprt->reestablish_timeout = 0; 799 800 /* check for expected message types */ 801 /* The order of some of these tests is important. */ 802 switch (headerp->rm_type) { 803 case rdma_msg: 804 /* never expect read chunks */ 805 /* never expect reply chunks (two ways to check) */ 806 /* never expect write chunks without having offered RDMA */ 807 if (headerp->rm_body.rm_chunks[0] != xdr_zero || 808 (headerp->rm_body.rm_chunks[1] == xdr_zero && 809 headerp->rm_body.rm_chunks[2] != xdr_zero) || 810 (headerp->rm_body.rm_chunks[1] != xdr_zero && 811 req->rl_nchunks == 0)) 812 goto badheader; 813 if (headerp->rm_body.rm_chunks[1] != xdr_zero) { 814 /* count any expected write chunks in read reply */ 815 /* start at write chunk array count */ 816 iptr = &headerp->rm_body.rm_chunks[2]; 817 rdmalen = rpcrdma_count_chunks(rep, 818 req->rl_nchunks, 1, &iptr); 819 /* check for validity, and no reply chunk after */ 820 if (rdmalen < 0 || *iptr++ != xdr_zero) 821 goto badheader; 822 rep->rr_len -= 823 ((unsigned char *)iptr - (unsigned char *)headerp); 824 status = rep->rr_len + rdmalen; 825 r_xprt->rx_stats.total_rdma_reply += rdmalen; 826 /* special case - last chunk may omit padding */ 827 if (rdmalen &= 3) { 828 rdmalen = 4 - rdmalen; 829 status += rdmalen; 830 } 831 } else { 832 /* else ordinary inline */ 833 rdmalen = 0; 834 iptr = (__be32 *)((unsigned char *)headerp + 835 RPCRDMA_HDRLEN_MIN); 836 rep->rr_len -= RPCRDMA_HDRLEN_MIN; 837 status = rep->rr_len; 838 } 839 /* Fix up the rpc results for upper layer */ 840 rpcrdma_inline_fixup(rqst, (char *)iptr, rep->rr_len, rdmalen); 841 break; 842 843 case rdma_nomsg: 844 /* never expect read or write chunks, always reply chunks */ 845 if (headerp->rm_body.rm_chunks[0] != xdr_zero || 846 headerp->rm_body.rm_chunks[1] != xdr_zero || 847 headerp->rm_body.rm_chunks[2] != xdr_one || 848 req->rl_nchunks == 0) 849 goto badheader; 850 iptr = (__be32 *)((unsigned char *)headerp + 851 RPCRDMA_HDRLEN_MIN); 852 rdmalen = rpcrdma_count_chunks(rep, req->rl_nchunks, 0, &iptr); 853 if (rdmalen < 0) 854 goto badheader; 855 r_xprt->rx_stats.total_rdma_reply += rdmalen; 856 /* Reply chunk buffer already is the reply vector - no fixup. */ 857 status = rdmalen; 858 break; 859 860 badheader: 861 default: 862 dprintk("%s: invalid rpcrdma reply header (type %d):" 863 " chunks[012] == %d %d %d" 864 " expected chunks <= %d\n", 865 __func__, be32_to_cpu(headerp->rm_type), 866 headerp->rm_body.rm_chunks[0], 867 headerp->rm_body.rm_chunks[1], 868 headerp->rm_body.rm_chunks[2], 869 req->rl_nchunks); 870 status = -EIO; 871 r_xprt->rx_stats.bad_reply_count++; 872 break; 873 } 874 875 credits = be32_to_cpu(headerp->rm_credit); 876 if (credits == 0) 877 credits = 1; /* don't deadlock */ 878 else if (credits > r_xprt->rx_buf.rb_max_requests) 879 credits = r_xprt->rx_buf.rb_max_requests; 880 881 cwnd = xprt->cwnd; 882 xprt->cwnd = credits << RPC_CWNDSHIFT; 883 if (xprt->cwnd > cwnd) 884 xprt_release_rqst_cong(rqst->rq_task); 885 886 dprintk("RPC: %s: xprt_complete_rqst(0x%p, 0x%p, %d)\n", 887 __func__, xprt, rqst, status); 888 xprt_complete_rqst(rqst->rq_task, status); 889 spin_unlock(&xprt->transport_lock); 890 } 891