xref: /linux/net/sunrpc/xprtrdma/rpc_rdma.c (revision b85900e91c8402bedc1db14e6d293e26f25d30d4)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (c) 2014-2020, Oracle and/or its affiliates.
4  * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the BSD-type
10  * license below:
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  *
16  *      Redistributions of source code must retain the above copyright
17  *      notice, this list of conditions and the following disclaimer.
18  *
19  *      Redistributions in binary form must reproduce the above
20  *      copyright notice, this list of conditions and the following
21  *      disclaimer in the documentation and/or other materials provided
22  *      with the distribution.
23  *
24  *      Neither the name of the Network Appliance, Inc. nor the names of
25  *      its contributors may be used to endorse or promote products
26  *      derived from this software without specific prior written
27  *      permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40  */
41 
42 /*
43  * rpc_rdma.c
44  *
45  * This file contains the guts of the RPC RDMA protocol, and
46  * does marshaling/unmarshaling, etc. It is also where interfacing
47  * to the Linux RPC framework lives.
48  */
49 
50 #include <linux/highmem.h>
51 
52 #include <linux/sunrpc/svc_rdma.h>
53 
54 #include "xprt_rdma.h"
55 #include <trace/events/rpcrdma.h>
56 
57 /* Returns size of largest RPC-over-RDMA header in a Call message
58  *
59  * The largest Call header contains a full-size Read list and a
60  * minimal Reply chunk.
61  */
rpcrdma_max_call_header_size(unsigned int maxsegs)62 static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs)
63 {
64 	unsigned int size;
65 
66 	/* Fixed header fields and list discriminators */
67 	size = RPCRDMA_HDRLEN_MIN;
68 
69 	/* Maximum Read list size */
70 	size += maxsegs * rpcrdma_readchunk_maxsz * sizeof(__be32);
71 
72 	/* Minimal Read chunk size */
73 	size += sizeof(__be32);	/* segment count */
74 	size += rpcrdma_segment_maxsz * sizeof(__be32);
75 	size += sizeof(__be32);	/* list discriminator */
76 
77 	return size;
78 }
79 
80 /* Returns size of largest RPC-over-RDMA header in a Reply message
81  *
82  * There is only one Write list or one Reply chunk per Reply
83  * message.  The larger list is the Write list.
84  */
rpcrdma_max_reply_header_size(unsigned int maxsegs)85 static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs)
86 {
87 	unsigned int size;
88 
89 	/* Fixed header fields and list discriminators */
90 	size = RPCRDMA_HDRLEN_MIN;
91 
92 	/* Maximum Write list size */
93 	size += sizeof(__be32);		/* segment count */
94 	size += maxsegs * rpcrdma_segment_maxsz * sizeof(__be32);
95 	size += sizeof(__be32);	/* list discriminator */
96 
97 	return size;
98 }
99 
100 /**
101  * rpcrdma_set_max_header_sizes - Initialize inline payload sizes
102  * @ep: endpoint to initialize
103  *
104  * The max_inline fields contain the maximum size of an RPC message
105  * so the marshaling code doesn't have to repeat this calculation
106  * for every RPC.
107  */
rpcrdma_set_max_header_sizes(struct rpcrdma_ep * ep)108 void rpcrdma_set_max_header_sizes(struct rpcrdma_ep *ep)
109 {
110 	unsigned int maxsegs = ep->re_max_rdma_segs;
111 
112 	ep->re_max_inline_send =
113 		ep->re_inline_send - rpcrdma_max_call_header_size(maxsegs);
114 	ep->re_max_inline_recv =
115 		ep->re_inline_recv - rpcrdma_max_reply_header_size(maxsegs);
116 }
117 
118 /* The client can send a request inline as long as the RPCRDMA header
119  * plus the RPC call fit under the transport's inline limit. If the
120  * combined call message size exceeds that limit, the client must use
121  * a Read chunk for this operation.
122  *
123  * A Read chunk is also required if sending the RPC call inline would
124  * exceed this device's max_sge limit.
125  */
rpcrdma_args_inline(struct rpcrdma_xprt * r_xprt,struct rpc_rqst * rqst)126 static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt,
127 				struct rpc_rqst *rqst)
128 {
129 	struct xdr_buf *xdr = &rqst->rq_snd_buf;
130 	struct rpcrdma_ep *ep = r_xprt->rx_ep;
131 	unsigned int count, remaining, offset;
132 
133 	if (xdr->len > ep->re_max_inline_send)
134 		return false;
135 
136 	if (xdr->page_len) {
137 		remaining = xdr->page_len;
138 		offset = offset_in_page(xdr->page_base);
139 		count = RPCRDMA_MIN_SEND_SGES;
140 		while (remaining) {
141 			remaining -= min_t(unsigned int,
142 					   PAGE_SIZE - offset, remaining);
143 			offset = 0;
144 			if (++count > ep->re_attr.cap.max_send_sge)
145 				return false;
146 		}
147 	}
148 
149 	return true;
150 }
151 
152 /* The client can't know how large the actual reply will be. Thus it
153  * plans for the largest possible reply for that particular ULP
154  * operation. If the maximum combined reply message size exceeds that
155  * limit, the client must provide a write list or a reply chunk for
156  * this request.
157  */
rpcrdma_results_inline(struct rpcrdma_xprt * r_xprt,struct rpc_rqst * rqst)158 static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
159 				   struct rpc_rqst *rqst)
160 {
161 	return rqst->rq_rcv_buf.buflen <= r_xprt->rx_ep->re_max_inline_recv;
162 }
163 
164 /* The client is required to provide a Reply chunk if the maximum
165  * size of the non-payload part of the RPC Reply is larger than
166  * the inline threshold.
167  */
168 static bool
rpcrdma_nonpayload_inline(const struct rpcrdma_xprt * r_xprt,const struct rpc_rqst * rqst)169 rpcrdma_nonpayload_inline(const struct rpcrdma_xprt *r_xprt,
170 			  const struct rpc_rqst *rqst)
171 {
172 	const struct xdr_buf *buf = &rqst->rq_rcv_buf;
173 
174 	return (buf->head[0].iov_len + buf->tail[0].iov_len) <
175 		r_xprt->rx_ep->re_max_inline_recv;
176 }
177 
178 /* ACL likes to be lazy in allocating pages. For TCP, these
179  * pages can be allocated during receive processing. Not true
180  * for RDMA, which must always provision receive buffers
181  * up front.
182  */
183 static noinline int
rpcrdma_alloc_sparse_pages(struct xdr_buf * buf)184 rpcrdma_alloc_sparse_pages(struct xdr_buf *buf)
185 {
186 	struct page **ppages;
187 	int len;
188 
189 	len = buf->page_len;
190 	ppages = buf->pages + (buf->page_base >> PAGE_SHIFT);
191 	while (len > 0) {
192 		if (!*ppages)
193 			*ppages = alloc_page(GFP_NOWAIT);
194 		if (!*ppages)
195 			return -ENOBUFS;
196 		ppages++;
197 		len -= PAGE_SIZE;
198 	}
199 
200 	return 0;
201 }
202 
203 static void
rpcrdma_xdr_cursor_init(struct rpcrdma_xdr_cursor * cur,const struct xdr_buf * xdrbuf,unsigned int pos,enum rpcrdma_chunktype type)204 rpcrdma_xdr_cursor_init(struct rpcrdma_xdr_cursor *cur,
205 			const struct xdr_buf *xdrbuf,
206 			unsigned int pos, enum rpcrdma_chunktype type)
207 {
208 	cur->xc_buf = xdrbuf;
209 	cur->xc_page_offset = 0;
210 	cur->xc_flags = 0;
211 
212 	if (pos != 0)
213 		cur->xc_flags |= XC_HEAD_DONE;
214 	if (!xdrbuf->page_len)
215 		cur->xc_flags |= XC_PAGES_DONE;
216 	if (type == rpcrdma_readch || type == rpcrdma_writech ||
217 	    !xdrbuf->tail[0].iov_len)
218 		cur->xc_flags |= XC_TAIL_DONE;
219 }
220 
221 static bool
rpcrdma_xdr_cursor_done(const struct rpcrdma_xdr_cursor * cur)222 rpcrdma_xdr_cursor_done(const struct rpcrdma_xdr_cursor *cur)
223 {
224 	return (cur->xc_flags & (XC_HEAD_DONE | XC_PAGES_DONE |
225 				 XC_TAIL_DONE)) ==
226 	       (XC_HEAD_DONE | XC_PAGES_DONE | XC_TAIL_DONE);
227 }
228 
229 static int
encode_rdma_segment(struct xdr_stream * xdr,struct rpcrdma_mr * mr)230 encode_rdma_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr)
231 {
232 	__be32 *p;
233 
234 	p = xdr_reserve_space(xdr, 4 * sizeof(*p));
235 	if (unlikely(!p))
236 		return -EMSGSIZE;
237 
238 	xdr_encode_rdma_segment(p, mr->mr_handle, mr->mr_length, mr->mr_offset);
239 	return 0;
240 }
241 
242 static int
encode_read_segment(struct xdr_stream * xdr,struct rpcrdma_mr * mr,u32 position)243 encode_read_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr,
244 		    u32 position)
245 {
246 	__be32 *p;
247 
248 	p = xdr_reserve_space(xdr, 6 * sizeof(*p));
249 	if (unlikely(!p))
250 		return -EMSGSIZE;
251 
252 	*p++ = xdr_one;			/* Item present */
253 	xdr_encode_read_segment(p, position, mr->mr_handle, mr->mr_length,
254 				mr->mr_offset);
255 	return 0;
256 }
257 
rpcrdma_mr_prepare(struct rpcrdma_xprt * r_xprt,struct rpcrdma_req * req,struct rpcrdma_xdr_cursor * cur,bool writing,struct rpcrdma_mr ** mr)258 static int rpcrdma_mr_prepare(struct rpcrdma_xprt *r_xprt,
259 			      struct rpcrdma_req *req,
260 			      struct rpcrdma_xdr_cursor *cur,
261 			      bool writing, struct rpcrdma_mr **mr)
262 {
263 	*mr = rpcrdma_mr_pop(&req->rl_free_mrs);
264 	if (!*mr) {
265 		*mr = rpcrdma_mr_get(r_xprt);
266 		if (!*mr)
267 			goto out_getmr_err;
268 		(*mr)->mr_req = req;
269 	}
270 
271 	rpcrdma_mr_push(*mr, &req->rl_registered);
272 	return frwr_map(r_xprt, cur, writing, req->rl_slot.rq_xid, *mr);
273 
274 out_getmr_err:
275 	trace_xprtrdma_nomrs_err(r_xprt, req);
276 	xprt_wait_for_buffer_space(&r_xprt->rx_xprt);
277 	rpcrdma_mrs_refresh(r_xprt);
278 	return -EAGAIN;
279 }
280 
281 /* Register and XDR encode the Read list. Supports encoding a list of read
282  * segments that belong to a single read chunk.
283  *
284  * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
285  *
286  *  Read chunklist (a linked list):
287  *   N elements, position P (same P for all chunks of same arg!):
288  *    1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
289  *
290  * Returns zero on success, or a negative errno if a failure occurred.
291  * @xdr is advanced to the next position in the stream.
292  *
293  * Only a single @pos value is currently supported.
294  */
rpcrdma_encode_read_list(struct rpcrdma_xprt * r_xprt,struct rpcrdma_req * req,struct rpc_rqst * rqst,enum rpcrdma_chunktype rtype)295 static int rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt,
296 				    struct rpcrdma_req *req,
297 				    struct rpc_rqst *rqst,
298 				    enum rpcrdma_chunktype rtype)
299 {
300 	struct xdr_stream *xdr = &req->rl_stream;
301 	struct rpcrdma_xdr_cursor cur;
302 	struct rpcrdma_mr *mr;
303 	unsigned int pos;
304 	int ret;
305 
306 	if (rtype == rpcrdma_noch_pullup || rtype == rpcrdma_noch_mapped)
307 		goto done;
308 
309 	pos = rqst->rq_snd_buf.head[0].iov_len;
310 	if (rtype == rpcrdma_areadch)
311 		pos = 0;
312 	rpcrdma_xdr_cursor_init(&cur, &rqst->rq_snd_buf, pos, rtype);
313 
314 	do {
315 		ret = rpcrdma_mr_prepare(r_xprt, req, &cur, false, &mr);
316 		if (ret)
317 			return ret;
318 
319 		if (encode_read_segment(xdr, mr, pos) < 0)
320 			return -EMSGSIZE;
321 
322 		trace_xprtrdma_chunk_read(rqst->rq_task, pos, mr,
323 					  rpcrdma_xdr_cursor_done(&cur));
324 		r_xprt->rx_stats.read_chunk_count++;
325 	} while (!rpcrdma_xdr_cursor_done(&cur));
326 
327 done:
328 	if (xdr_stream_encode_item_absent(xdr) < 0)
329 		return -EMSGSIZE;
330 	return 0;
331 }
332 
333 /* Register and XDR encode the Write list. Supports encoding a list
334  * containing one array of plain segments that belong to a single
335  * write chunk.
336  *
337  * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
338  *
339  *  Write chunklist (a list of (one) counted array):
340  *   N elements:
341  *    1 - N - HLOO - HLOO - ... - HLOO - 0
342  *
343  * Returns zero on success, or a negative errno if a failure occurred.
344  * @xdr is advanced to the next position in the stream.
345  *
346  * Only a single Write chunk is currently supported.
347  */
rpcrdma_encode_write_list(struct rpcrdma_xprt * r_xprt,struct rpcrdma_req * req,struct rpc_rqst * rqst,enum rpcrdma_chunktype wtype)348 static int rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt,
349 				     struct rpcrdma_req *req,
350 				     struct rpc_rqst *rqst,
351 				     enum rpcrdma_chunktype wtype)
352 {
353 	struct xdr_stream *xdr = &req->rl_stream;
354 	struct rpcrdma_ep *ep = r_xprt->rx_ep;
355 	struct rpcrdma_xdr_cursor cur;
356 	struct rpcrdma_mr *mr;
357 	int nchunks, ret;
358 	__be32 *segcount;
359 
360 	if (wtype != rpcrdma_writech)
361 		goto done;
362 
363 	rpcrdma_xdr_cursor_init(&cur, &rqst->rq_rcv_buf,
364 				rqst->rq_rcv_buf.head[0].iov_len, wtype);
365 
366 	if (xdr_stream_encode_item_present(xdr) < 0)
367 		return -EMSGSIZE;
368 	segcount = xdr_reserve_space(xdr, sizeof(*segcount));
369 	if (unlikely(!segcount))
370 		return -EMSGSIZE;
371 	/* Actual value encoded below */
372 
373 	nchunks = 0;
374 	do {
375 		ret = rpcrdma_mr_prepare(r_xprt, req, &cur, true, &mr);
376 		if (ret)
377 			return ret;
378 
379 		if (encode_rdma_segment(xdr, mr) < 0)
380 			return -EMSGSIZE;
381 
382 		trace_xprtrdma_chunk_write(rqst->rq_task, mr,
383 					   rpcrdma_xdr_cursor_done(&cur));
384 		r_xprt->rx_stats.write_chunk_count++;
385 		r_xprt->rx_stats.total_rdma_request += mr->mr_length;
386 		nchunks++;
387 	} while (!rpcrdma_xdr_cursor_done(&cur));
388 
389 	if (xdr_pad_size(rqst->rq_rcv_buf.page_len)) {
390 		if (encode_rdma_segment(xdr, ep->re_write_pad_mr) < 0)
391 			return -EMSGSIZE;
392 
393 		trace_xprtrdma_chunk_wp(rqst->rq_task, ep->re_write_pad_mr,
394 					true);
395 		r_xprt->rx_stats.write_chunk_count++;
396 		r_xprt->rx_stats.total_rdma_request +=
397 			ep->re_write_pad_mr->mr_length;
398 		nchunks++;
399 	}
400 
401 	/* Update count of segments in this Write chunk */
402 	*segcount = cpu_to_be32(nchunks);
403 
404 done:
405 	if (xdr_stream_encode_item_absent(xdr) < 0)
406 		return -EMSGSIZE;
407 	return 0;
408 }
409 
410 /* Register and XDR encode the Reply chunk. Supports encoding an array
411  * of plain segments that belong to a single write (reply) chunk.
412  *
413  * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
414  *
415  *  Reply chunk (a counted array):
416  *   N elements:
417  *    1 - N - HLOO - HLOO - ... - HLOO
418  *
419  * Returns zero on success, or a negative errno if a failure occurred.
420  * @xdr is advanced to the next position in the stream.
421  */
rpcrdma_encode_reply_chunk(struct rpcrdma_xprt * r_xprt,struct rpcrdma_req * req,struct rpc_rqst * rqst,enum rpcrdma_chunktype wtype)422 static int rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt,
423 				      struct rpcrdma_req *req,
424 				      struct rpc_rqst *rqst,
425 				      enum rpcrdma_chunktype wtype)
426 {
427 	struct xdr_stream *xdr = &req->rl_stream;
428 	struct rpcrdma_xdr_cursor cur;
429 	struct rpcrdma_mr *mr;
430 	int nchunks, ret;
431 	__be32 *segcount;
432 
433 	if (wtype != rpcrdma_replych) {
434 		if (xdr_stream_encode_item_absent(xdr) < 0)
435 			return -EMSGSIZE;
436 		return 0;
437 	}
438 
439 	rpcrdma_xdr_cursor_init(&cur, &rqst->rq_rcv_buf, 0, wtype);
440 
441 	if (xdr_stream_encode_item_present(xdr) < 0)
442 		return -EMSGSIZE;
443 	segcount = xdr_reserve_space(xdr, sizeof(*segcount));
444 	if (unlikely(!segcount))
445 		return -EMSGSIZE;
446 	/* Actual value encoded below */
447 
448 	nchunks = 0;
449 	do {
450 		ret = rpcrdma_mr_prepare(r_xprt, req, &cur, true, &mr);
451 		if (ret)
452 			return ret;
453 
454 		if (encode_rdma_segment(xdr, mr) < 0)
455 			return -EMSGSIZE;
456 
457 		trace_xprtrdma_chunk_reply(rqst->rq_task, mr,
458 					   rpcrdma_xdr_cursor_done(&cur));
459 		r_xprt->rx_stats.reply_chunk_count++;
460 		r_xprt->rx_stats.total_rdma_request += mr->mr_length;
461 		nchunks++;
462 	} while (!rpcrdma_xdr_cursor_done(&cur));
463 
464 	/* Update count of segments in the Reply chunk */
465 	*segcount = cpu_to_be32(nchunks);
466 
467 	return 0;
468 }
469 
rpcrdma_sendctx_done(struct kref * kref)470 static void rpcrdma_sendctx_done(struct kref *kref)
471 {
472 	struct rpcrdma_req *req =
473 		container_of(kref, struct rpcrdma_req, rl_kref);
474 	struct rpcrdma_rep *rep = req->rl_reply;
475 
476 	rpcrdma_complete_rqst(rep);
477 	rep->rr_rxprt->rx_stats.reply_waits_for_send++;
478 }
479 
480 /**
481  * rpcrdma_sendctx_unmap - DMA-unmap Send buffer
482  * @sc: sendctx containing SGEs to unmap
483  *
484  */
rpcrdma_sendctx_unmap(struct rpcrdma_sendctx * sc)485 void rpcrdma_sendctx_unmap(struct rpcrdma_sendctx *sc)
486 {
487 	struct rpcrdma_regbuf *rb = sc->sc_req->rl_sendbuf;
488 	struct ib_sge *sge;
489 
490 	if (!sc->sc_unmap_count)
491 		return;
492 
493 	/* The first two SGEs contain the transport header and
494 	 * the inline buffer. These are always left mapped so
495 	 * they can be cheaply re-used.
496 	 */
497 	for (sge = &sc->sc_sges[2]; sc->sc_unmap_count;
498 	     ++sge, --sc->sc_unmap_count)
499 		ib_dma_unmap_page(rdmab_device(rb), sge->addr, sge->length,
500 				  DMA_TO_DEVICE);
501 
502 	kref_put(&sc->sc_req->rl_kref, rpcrdma_sendctx_done);
503 }
504 
505 /* Prepare an SGE for the RPC-over-RDMA transport header.
506  */
rpcrdma_prepare_hdr_sge(struct rpcrdma_xprt * r_xprt,struct rpcrdma_req * req,u32 len)507 static void rpcrdma_prepare_hdr_sge(struct rpcrdma_xprt *r_xprt,
508 				    struct rpcrdma_req *req, u32 len)
509 {
510 	struct rpcrdma_sendctx *sc = req->rl_sendctx;
511 	struct rpcrdma_regbuf *rb = req->rl_rdmabuf;
512 	struct ib_sge *sge = &sc->sc_sges[req->rl_wr.num_sge++];
513 
514 	sge->addr = rdmab_addr(rb);
515 	sge->length = len;
516 	sge->lkey = rdmab_lkey(rb);
517 
518 	ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr, sge->length,
519 				      DMA_TO_DEVICE);
520 }
521 
522 /* The head iovec is straightforward, as it is usually already
523  * DMA-mapped. Sync the content that has changed.
524  */
rpcrdma_prepare_head_iov(struct rpcrdma_xprt * r_xprt,struct rpcrdma_req * req,unsigned int len)525 static bool rpcrdma_prepare_head_iov(struct rpcrdma_xprt *r_xprt,
526 				     struct rpcrdma_req *req, unsigned int len)
527 {
528 	struct rpcrdma_sendctx *sc = req->rl_sendctx;
529 	struct ib_sge *sge = &sc->sc_sges[req->rl_wr.num_sge++];
530 	struct rpcrdma_regbuf *rb = req->rl_sendbuf;
531 
532 	if (!rpcrdma_regbuf_dma_map(r_xprt, rb))
533 		return false;
534 
535 	sge->addr = rdmab_addr(rb);
536 	sge->length = len;
537 	sge->lkey = rdmab_lkey(rb);
538 
539 	ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr, sge->length,
540 				      DMA_TO_DEVICE);
541 	return true;
542 }
543 
544 /* If there is a page list present, DMA map and prepare an
545  * SGE for each page to be sent.
546  */
rpcrdma_prepare_pagelist(struct rpcrdma_req * req,struct xdr_buf * xdr)547 static bool rpcrdma_prepare_pagelist(struct rpcrdma_req *req,
548 				     struct xdr_buf *xdr)
549 {
550 	struct rpcrdma_sendctx *sc = req->rl_sendctx;
551 	struct rpcrdma_regbuf *rb = req->rl_sendbuf;
552 	unsigned int page_base, len, remaining;
553 	struct page **ppages;
554 	struct ib_sge *sge;
555 
556 	ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
557 	page_base = offset_in_page(xdr->page_base);
558 	remaining = xdr->page_len;
559 	while (remaining) {
560 		sge = &sc->sc_sges[req->rl_wr.num_sge++];
561 		len = min_t(unsigned int, PAGE_SIZE - page_base, remaining);
562 		sge->addr = ib_dma_map_page(rdmab_device(rb), *ppages,
563 					    page_base, len, DMA_TO_DEVICE);
564 		if (ib_dma_mapping_error(rdmab_device(rb), sge->addr))
565 			goto out_mapping_err;
566 
567 		sge->length = len;
568 		sge->lkey = rdmab_lkey(rb);
569 
570 		sc->sc_unmap_count++;
571 		ppages++;
572 		remaining -= len;
573 		page_base = 0;
574 	}
575 
576 	return true;
577 
578 out_mapping_err:
579 	trace_xprtrdma_dma_maperr(sge->addr);
580 	return false;
581 }
582 
583 /* The tail iovec may include an XDR pad for the page list,
584  * as well as additional content, and may not reside in the
585  * same page as the head iovec.
586  */
rpcrdma_prepare_tail_iov(struct rpcrdma_req * req,struct xdr_buf * xdr,unsigned int page_base,unsigned int len)587 static bool rpcrdma_prepare_tail_iov(struct rpcrdma_req *req,
588 				     struct xdr_buf *xdr,
589 				     unsigned int page_base, unsigned int len)
590 {
591 	struct rpcrdma_sendctx *sc = req->rl_sendctx;
592 	struct ib_sge *sge = &sc->sc_sges[req->rl_wr.num_sge++];
593 	struct rpcrdma_regbuf *rb = req->rl_sendbuf;
594 	struct page *page = virt_to_page(xdr->tail[0].iov_base);
595 
596 	sge->addr = ib_dma_map_page(rdmab_device(rb), page, page_base, len,
597 				    DMA_TO_DEVICE);
598 	if (ib_dma_mapping_error(rdmab_device(rb), sge->addr))
599 		goto out_mapping_err;
600 
601 	sge->length = len;
602 	sge->lkey = rdmab_lkey(rb);
603 	++sc->sc_unmap_count;
604 	return true;
605 
606 out_mapping_err:
607 	trace_xprtrdma_dma_maperr(sge->addr);
608 	return false;
609 }
610 
611 /* Copy the tail to the end of the head buffer.
612  */
rpcrdma_pullup_tail_iov(struct rpcrdma_xprt * r_xprt,struct rpcrdma_req * req,struct xdr_buf * xdr)613 static void rpcrdma_pullup_tail_iov(struct rpcrdma_xprt *r_xprt,
614 				    struct rpcrdma_req *req,
615 				    struct xdr_buf *xdr)
616 {
617 	unsigned char *dst;
618 
619 	dst = (unsigned char *)xdr->head[0].iov_base;
620 	dst += xdr->head[0].iov_len + xdr->page_len;
621 	memmove(dst, xdr->tail[0].iov_base, xdr->tail[0].iov_len);
622 	r_xprt->rx_stats.pullup_copy_count += xdr->tail[0].iov_len;
623 }
624 
625 /* Copy pagelist content into the head buffer.
626  */
rpcrdma_pullup_pagelist(struct rpcrdma_xprt * r_xprt,struct rpcrdma_req * req,struct xdr_buf * xdr)627 static void rpcrdma_pullup_pagelist(struct rpcrdma_xprt *r_xprt,
628 				    struct rpcrdma_req *req,
629 				    struct xdr_buf *xdr)
630 {
631 	unsigned int len, page_base, remaining;
632 	struct page **ppages;
633 	unsigned char *src, *dst;
634 
635 	dst = (unsigned char *)xdr->head[0].iov_base;
636 	dst += xdr->head[0].iov_len;
637 	ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
638 	page_base = offset_in_page(xdr->page_base);
639 	remaining = xdr->page_len;
640 	while (remaining) {
641 		src = page_address(*ppages);
642 		src += page_base;
643 		len = min_t(unsigned int, PAGE_SIZE - page_base, remaining);
644 		memcpy(dst, src, len);
645 		r_xprt->rx_stats.pullup_copy_count += len;
646 
647 		ppages++;
648 		dst += len;
649 		remaining -= len;
650 		page_base = 0;
651 	}
652 }
653 
654 /* Copy the contents of @xdr into @rl_sendbuf and DMA sync it.
655  * When the head, pagelist, and tail are small, a pull-up copy
656  * is considerably less costly than DMA mapping the components
657  * of @xdr.
658  *
659  * Assumptions:
660  *  - the caller has already verified that the total length
661  *    of the RPC Call body will fit into @rl_sendbuf.
662  */
rpcrdma_prepare_noch_pullup(struct rpcrdma_xprt * r_xprt,struct rpcrdma_req * req,struct xdr_buf * xdr)663 static bool rpcrdma_prepare_noch_pullup(struct rpcrdma_xprt *r_xprt,
664 					struct rpcrdma_req *req,
665 					struct xdr_buf *xdr)
666 {
667 	if (unlikely(xdr->tail[0].iov_len))
668 		rpcrdma_pullup_tail_iov(r_xprt, req, xdr);
669 
670 	if (unlikely(xdr->page_len))
671 		rpcrdma_pullup_pagelist(r_xprt, req, xdr);
672 
673 	/* The whole RPC message resides in the head iovec now */
674 	return rpcrdma_prepare_head_iov(r_xprt, req, xdr->len);
675 }
676 
rpcrdma_prepare_noch_mapped(struct rpcrdma_xprt * r_xprt,struct rpcrdma_req * req,struct xdr_buf * xdr)677 static bool rpcrdma_prepare_noch_mapped(struct rpcrdma_xprt *r_xprt,
678 					struct rpcrdma_req *req,
679 					struct xdr_buf *xdr)
680 {
681 	struct kvec *tail = &xdr->tail[0];
682 
683 	if (!rpcrdma_prepare_head_iov(r_xprt, req, xdr->head[0].iov_len))
684 		return false;
685 	if (xdr->page_len)
686 		if (!rpcrdma_prepare_pagelist(req, xdr))
687 			return false;
688 	if (tail->iov_len)
689 		if (!rpcrdma_prepare_tail_iov(req, xdr,
690 					      offset_in_page(tail->iov_base),
691 					      tail->iov_len))
692 			return false;
693 
694 	if (req->rl_sendctx->sc_unmap_count)
695 		kref_get(&req->rl_kref);
696 	return true;
697 }
698 
rpcrdma_prepare_readch(struct rpcrdma_xprt * r_xprt,struct rpcrdma_req * req,struct xdr_buf * xdr)699 static bool rpcrdma_prepare_readch(struct rpcrdma_xprt *r_xprt,
700 				   struct rpcrdma_req *req,
701 				   struct xdr_buf *xdr)
702 {
703 	if (!rpcrdma_prepare_head_iov(r_xprt, req, xdr->head[0].iov_len))
704 		return false;
705 
706 	/* If there is a Read chunk, the page list is being handled
707 	 * via explicit RDMA, and thus is skipped here.
708 	 */
709 
710 	/* Do not include the tail if it is only an XDR pad */
711 	if (xdr->tail[0].iov_len > 3) {
712 		unsigned int page_base, len;
713 
714 		/* If the content in the page list is an odd length,
715 		 * xdr_write_pages() adds a pad at the beginning of
716 		 * the tail iovec. Force the tail's non-pad content to
717 		 * land at the next XDR position in the Send message.
718 		 */
719 		page_base = offset_in_page(xdr->tail[0].iov_base);
720 		len = xdr->tail[0].iov_len;
721 		page_base += len & 3;
722 		len -= len & 3;
723 		if (!rpcrdma_prepare_tail_iov(req, xdr, page_base, len))
724 			return false;
725 		kref_get(&req->rl_kref);
726 	}
727 
728 	return true;
729 }
730 
731 /**
732  * rpcrdma_prepare_send_sges - Construct SGEs for a Send WR
733  * @r_xprt: controlling transport
734  * @req: context of RPC Call being marshalled
735  * @hdrlen: size of transport header, in bytes
736  * @xdr: xdr_buf containing RPC Call
737  * @rtype: chunk type being encoded
738  *
739  * Returns 0 on success; otherwise a negative errno is returned.
740  */
rpcrdma_prepare_send_sges(struct rpcrdma_xprt * r_xprt,struct rpcrdma_req * req,u32 hdrlen,struct xdr_buf * xdr,enum rpcrdma_chunktype rtype)741 inline int rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt,
742 				     struct rpcrdma_req *req, u32 hdrlen,
743 				     struct xdr_buf *xdr,
744 				     enum rpcrdma_chunktype rtype)
745 {
746 	int ret;
747 
748 	ret = -EAGAIN;
749 	req->rl_sendctx = rpcrdma_sendctx_get_locked(r_xprt);
750 	if (!req->rl_sendctx)
751 		goto out_nosc;
752 	req->rl_sendctx->sc_unmap_count = 0;
753 	req->rl_sendctx->sc_req = req;
754 	kref_init(&req->rl_kref);
755 	req->rl_wr.wr_cqe = &req->rl_sendctx->sc_cqe;
756 	req->rl_wr.sg_list = req->rl_sendctx->sc_sges;
757 	req->rl_wr.num_sge = 0;
758 	req->rl_wr.opcode = IB_WR_SEND;
759 
760 	rpcrdma_prepare_hdr_sge(r_xprt, req, hdrlen);
761 
762 	ret = -EIO;
763 	switch (rtype) {
764 	case rpcrdma_noch_pullup:
765 		if (!rpcrdma_prepare_noch_pullup(r_xprt, req, xdr))
766 			goto out_unmap;
767 		break;
768 	case rpcrdma_noch_mapped:
769 		if (!rpcrdma_prepare_noch_mapped(r_xprt, req, xdr))
770 			goto out_unmap;
771 		break;
772 	case rpcrdma_readch:
773 		if (!rpcrdma_prepare_readch(r_xprt, req, xdr))
774 			goto out_unmap;
775 		break;
776 	case rpcrdma_areadch:
777 		break;
778 	default:
779 		goto out_unmap;
780 	}
781 
782 	return 0;
783 
784 out_unmap:
785 	rpcrdma_sendctx_unmap(req->rl_sendctx);
786 out_nosc:
787 	trace_xprtrdma_prepsend_failed(&req->rl_slot, ret);
788 	return ret;
789 }
790 
791 /**
792  * rpcrdma_marshal_req - Marshal and send one RPC request
793  * @r_xprt: controlling transport
794  * @rqst: RPC request to be marshaled
795  *
796  * For the RPC in "rqst", this function:
797  *  - Chooses the transfer mode (eg., RDMA_MSG or RDMA_NOMSG)
798  *  - Registers Read, Write, and Reply chunks
799  *  - Constructs the transport header
800  *  - Posts a Send WR to send the transport header and request
801  *
802  * Returns:
803  *	%0 if the RPC was sent successfully,
804  *	%-ENOTCONN if the connection was lost,
805  *	%-EAGAIN if the caller should call again with the same arguments,
806  *	%-ENOBUFS if the caller should call again after a delay,
807  *	%-EMSGSIZE if the transport header is too small,
808  *	%-EIO if a permanent problem occurred while marshaling.
809  */
810 int
rpcrdma_marshal_req(struct rpcrdma_xprt * r_xprt,struct rpc_rqst * rqst)811 rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
812 {
813 	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
814 	struct xdr_stream *xdr = &req->rl_stream;
815 	enum rpcrdma_chunktype rtype, wtype;
816 	struct xdr_buf *buf = &rqst->rq_snd_buf;
817 	bool ddp_allowed;
818 	__be32 *p;
819 	int ret;
820 
821 	if (unlikely(rqst->rq_rcv_buf.flags & XDRBUF_SPARSE_PAGES)) {
822 		ret = rpcrdma_alloc_sparse_pages(&rqst->rq_rcv_buf);
823 		if (ret)
824 			return ret;
825 	}
826 
827 	rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
828 	xdr_init_encode(xdr, &req->rl_hdrbuf, rdmab_data(req->rl_rdmabuf),
829 			rqst);
830 
831 	/* Fixed header fields */
832 	ret = -EMSGSIZE;
833 	p = xdr_reserve_space(xdr, 4 * sizeof(*p));
834 	if (!p)
835 		goto out_err;
836 	*p++ = rqst->rq_xid;
837 	*p++ = rpcrdma_version;
838 	*p++ = r_xprt->rx_buf.rb_max_requests;
839 
840 	/* When the ULP employs a GSS flavor that guarantees integrity
841 	 * or privacy, direct data placement of individual data items
842 	 * is not allowed.
843 	 */
844 	ddp_allowed = !test_bit(RPCAUTH_AUTH_DATATOUCH,
845 				&rqst->rq_cred->cr_auth->au_flags);
846 
847 	/*
848 	 * Chunks needed for results?
849 	 *
850 	 * o If the expected result is under the inline threshold, all ops
851 	 *   return as inline.
852 	 * o Large read ops return data as write chunk(s), header as
853 	 *   inline.
854 	 * o Large non-read ops return as a single reply chunk.
855 	 */
856 	if (rpcrdma_results_inline(r_xprt, rqst))
857 		wtype = rpcrdma_noch;
858 	else if ((ddp_allowed && rqst->rq_rcv_buf.flags & XDRBUF_READ) &&
859 		 rpcrdma_nonpayload_inline(r_xprt, rqst))
860 		wtype = rpcrdma_writech;
861 	else
862 		wtype = rpcrdma_replych;
863 
864 	/*
865 	 * Chunks needed for arguments?
866 	 *
867 	 * o If the total request is under the inline threshold, all ops
868 	 *   are sent as inline.
869 	 * o Large write ops transmit data as read chunk(s), header as
870 	 *   inline.
871 	 * o Large non-write ops are sent with the entire message as a
872 	 *   single read chunk (protocol 0-position special case).
873 	 *
874 	 * This assumes that the upper layer does not present a request
875 	 * that both has a data payload, and whose non-data arguments
876 	 * by themselves are larger than the inline threshold.
877 	 */
878 	if (rpcrdma_args_inline(r_xprt, rqst)) {
879 		*p++ = rdma_msg;
880 		rtype = buf->len < rdmab_length(req->rl_sendbuf) ?
881 			rpcrdma_noch_pullup : rpcrdma_noch_mapped;
882 	} else if (ddp_allowed && buf->flags & XDRBUF_WRITE) {
883 		*p++ = rdma_msg;
884 		rtype = rpcrdma_readch;
885 	} else {
886 		r_xprt->rx_stats.nomsg_call_count++;
887 		*p++ = rdma_nomsg;
888 		rtype = rpcrdma_areadch;
889 	}
890 
891 	/* This implementation supports the following combinations
892 	 * of chunk lists in one RPC-over-RDMA Call message:
893 	 *
894 	 *   - Read list
895 	 *   - Write list
896 	 *   - Reply chunk
897 	 *   - Read list + Reply chunk
898 	 *
899 	 * It might not yet support the following combinations:
900 	 *
901 	 *   - Read list + Write list
902 	 *
903 	 * It does not support the following combinations:
904 	 *
905 	 *   - Write list + Reply chunk
906 	 *   - Read list + Write list + Reply chunk
907 	 *
908 	 * This implementation supports only a single chunk in each
909 	 * Read or Write list. Thus for example the client cannot
910 	 * send a Call message with a Position Zero Read chunk and a
911 	 * regular Read chunk at the same time.
912 	 */
913 	ret = rpcrdma_encode_read_list(r_xprt, req, rqst, rtype);
914 	if (ret)
915 		goto out_err;
916 	ret = rpcrdma_encode_write_list(r_xprt, req, rqst, wtype);
917 	if (ret)
918 		goto out_err;
919 	ret = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, wtype);
920 	if (ret)
921 		goto out_err;
922 
923 	ret = rpcrdma_prepare_send_sges(r_xprt, req, req->rl_hdrbuf.len,
924 					buf, rtype);
925 	if (ret)
926 		goto out_err;
927 
928 	trace_xprtrdma_marshal(req, rtype, wtype);
929 	return 0;
930 
931 out_err:
932 	trace_xprtrdma_marshal_failed(rqst, ret);
933 	r_xprt->rx_stats.failed_marshal_count++;
934 	frwr_reset(req);
935 	return ret;
936 }
937 
__rpcrdma_update_cwnd_locked(struct rpc_xprt * xprt,struct rpcrdma_buffer * buf,u32 grant)938 static void __rpcrdma_update_cwnd_locked(struct rpc_xprt *xprt,
939 					 struct rpcrdma_buffer *buf,
940 					 u32 grant)
941 {
942 	buf->rb_credits = grant;
943 	xprt->cwnd = grant << RPC_CWNDSHIFT;
944 }
945 
rpcrdma_update_cwnd(struct rpcrdma_xprt * r_xprt,u32 grant)946 static void rpcrdma_update_cwnd(struct rpcrdma_xprt *r_xprt, u32 grant)
947 {
948 	struct rpc_xprt *xprt = &r_xprt->rx_xprt;
949 
950 	spin_lock(&xprt->transport_lock);
951 	__rpcrdma_update_cwnd_locked(xprt, &r_xprt->rx_buf, grant);
952 	spin_unlock(&xprt->transport_lock);
953 }
954 
955 /**
956  * rpcrdma_reset_cwnd - Reset the xprt's congestion window
957  * @r_xprt: controlling transport instance
958  *
959  * Prepare @r_xprt for the next connection by reinitializing
960  * its credit grant to one (see RFC 8166, Section 3.3.3).
961  */
rpcrdma_reset_cwnd(struct rpcrdma_xprt * r_xprt)962 void rpcrdma_reset_cwnd(struct rpcrdma_xprt *r_xprt)
963 {
964 	struct rpc_xprt *xprt = &r_xprt->rx_xprt;
965 
966 	spin_lock(&xprt->transport_lock);
967 	xprt->cong = 0;
968 	__rpcrdma_update_cwnd_locked(xprt, &r_xprt->rx_buf, 1);
969 	spin_unlock(&xprt->transport_lock);
970 }
971 
972 /**
973  * rpcrdma_inline_fixup - Scatter inline received data into rqst's iovecs
974  * @rqst: controlling RPC request
975  * @srcp: points to RPC message payload in receive buffer
976  * @copy_len: remaining length of receive buffer content
977  * @pad: Write chunk pad bytes needed (zero for pure inline)
978  *
979  * The upper layer has set the maximum number of bytes it can
980  * receive in each component of rq_rcv_buf. These values are set in
981  * the head.iov_len, page_len, tail.iov_len, and buflen fields.
982  *
983  * Unlike the TCP equivalent (xdr_partial_copy_from_skb), in
984  * many cases this function simply updates iov_base pointers in
985  * rq_rcv_buf to point directly to the received reply data, to
986  * avoid copying reply data.
987  *
988  * Returns the count of bytes which had to be memcopied.
989  */
990 static unsigned long
rpcrdma_inline_fixup(struct rpc_rqst * rqst,char * srcp,int copy_len,int pad)991 rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
992 {
993 	unsigned long fixup_copy_count;
994 	int i, npages, curlen;
995 	char *destp;
996 	struct page **ppages;
997 	int page_base;
998 
999 	/* The head iovec is redirected to the RPC reply message
1000 	 * in the receive buffer, to avoid a memcopy.
1001 	 */
1002 	rqst->rq_rcv_buf.head[0].iov_base = srcp;
1003 	rqst->rq_private_buf.head[0].iov_base = srcp;
1004 
1005 	/* The contents of the receive buffer that follow
1006 	 * head.iov_len bytes are copied into the page list.
1007 	 */
1008 	curlen = rqst->rq_rcv_buf.head[0].iov_len;
1009 	if (curlen > copy_len)
1010 		curlen = copy_len;
1011 	srcp += curlen;
1012 	copy_len -= curlen;
1013 
1014 	ppages = rqst->rq_rcv_buf.pages +
1015 		(rqst->rq_rcv_buf.page_base >> PAGE_SHIFT);
1016 	page_base = offset_in_page(rqst->rq_rcv_buf.page_base);
1017 	fixup_copy_count = 0;
1018 	if (copy_len && rqst->rq_rcv_buf.page_len) {
1019 		int pagelist_len;
1020 
1021 		pagelist_len = rqst->rq_rcv_buf.page_len;
1022 		if (pagelist_len > copy_len)
1023 			pagelist_len = copy_len;
1024 		npages = PAGE_ALIGN(page_base + pagelist_len) >> PAGE_SHIFT;
1025 		for (i = 0; i < npages; i++) {
1026 			curlen = PAGE_SIZE - page_base;
1027 			if (curlen > pagelist_len)
1028 				curlen = pagelist_len;
1029 
1030 			destp = kmap_atomic(ppages[i]);
1031 			memcpy(destp + page_base, srcp, curlen);
1032 			flush_dcache_page(ppages[i]);
1033 			kunmap_atomic(destp);
1034 			srcp += curlen;
1035 			copy_len -= curlen;
1036 			fixup_copy_count += curlen;
1037 			pagelist_len -= curlen;
1038 			if (!pagelist_len)
1039 				break;
1040 			page_base = 0;
1041 		}
1042 
1043 		/* Implicit padding for the last segment in a Write
1044 		 * chunk is inserted inline at the front of the tail
1045 		 * iovec. The upper layer ignores the content of
1046 		 * the pad. Simply ensure inline content in the tail
1047 		 * that follows the Write chunk is properly aligned.
1048 		 */
1049 		if (pad)
1050 			srcp -= pad;
1051 	}
1052 
1053 	/* The tail iovec is redirected to the remaining data
1054 	 * in the receive buffer, to avoid a memcopy.
1055 	 */
1056 	if (copy_len || pad) {
1057 		rqst->rq_rcv_buf.tail[0].iov_base = srcp;
1058 		rqst->rq_private_buf.tail[0].iov_base = srcp;
1059 	}
1060 
1061 	if (fixup_copy_count)
1062 		trace_xprtrdma_fixup(rqst, fixup_copy_count);
1063 	return fixup_copy_count;
1064 }
1065 
1066 /* By convention, backchannel calls arrive via rdma_msg type
1067  * messages, and never populate the chunk lists. This makes
1068  * the RPC/RDMA header small and fixed in size, so it is
1069  * straightforward to check the RPC header's direction field.
1070  */
1071 static bool
rpcrdma_is_bcall(struct rpcrdma_xprt * r_xprt,struct rpcrdma_rep * rep)1072 rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
1073 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1074 {
1075 	struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1076 	struct xdr_stream *xdr = &rep->rr_stream;
1077 	__be32 *p;
1078 
1079 	if (rep->rr_proc != rdma_msg)
1080 		return false;
1081 
1082 	/* Peek at stream contents without advancing. */
1083 	p = xdr_inline_decode(xdr, 0);
1084 
1085 	/* Chunk lists */
1086 	if (xdr_item_is_present(p++))
1087 		return false;
1088 	if (xdr_item_is_present(p++))
1089 		return false;
1090 	if (xdr_item_is_present(p++))
1091 		return false;
1092 
1093 	/* RPC header */
1094 	if (*p++ != rep->rr_xid)
1095 		return false;
1096 	if (*p != cpu_to_be32(RPC_CALL))
1097 		return false;
1098 
1099 	/* No bc service. */
1100 	if (xprt->bc_serv == NULL)
1101 		return false;
1102 
1103 	/* Now that we are sure this is a backchannel call,
1104 	 * advance to the RPC header.
1105 	 */
1106 	p = xdr_inline_decode(xdr, 3 * sizeof(*p));
1107 	if (unlikely(!p))
1108 		return true;
1109 
1110 	rpcrdma_bc_receive_call(r_xprt, rep);
1111 	return true;
1112 }
1113 #else	/* CONFIG_SUNRPC_BACKCHANNEL */
1114 {
1115 	return false;
1116 }
1117 #endif	/* CONFIG_SUNRPC_BACKCHANNEL */
1118 
decode_rdma_segment(struct xdr_stream * xdr,u32 * length)1119 static int decode_rdma_segment(struct xdr_stream *xdr, u32 *length)
1120 {
1121 	u32 handle;
1122 	u64 offset;
1123 	__be32 *p;
1124 
1125 	p = xdr_inline_decode(xdr, 4 * sizeof(*p));
1126 	if (unlikely(!p))
1127 		return -EIO;
1128 
1129 	xdr_decode_rdma_segment(p, &handle, length, &offset);
1130 	trace_xprtrdma_decode_seg(handle, *length, offset);
1131 	return 0;
1132 }
1133 
decode_write_chunk(struct xdr_stream * xdr,u32 * length)1134 static int decode_write_chunk(struct xdr_stream *xdr, u32 *length)
1135 {
1136 	u32 segcount, seglength;
1137 	__be32 *p;
1138 
1139 	p = xdr_inline_decode(xdr, sizeof(*p));
1140 	if (unlikely(!p))
1141 		return -EIO;
1142 
1143 	*length = 0;
1144 	segcount = be32_to_cpup(p);
1145 	while (segcount--) {
1146 		if (decode_rdma_segment(xdr, &seglength))
1147 			return -EIO;
1148 		*length += seglength;
1149 	}
1150 
1151 	return 0;
1152 }
1153 
1154 /* In RPC-over-RDMA Version One replies, a Read list is never
1155  * expected. This decoder is a stub that returns an error if
1156  * a Read list is present.
1157  */
decode_read_list(struct xdr_stream * xdr)1158 static int decode_read_list(struct xdr_stream *xdr)
1159 {
1160 	__be32 *p;
1161 
1162 	p = xdr_inline_decode(xdr, sizeof(*p));
1163 	if (unlikely(!p))
1164 		return -EIO;
1165 	if (unlikely(xdr_item_is_present(p)))
1166 		return -EIO;
1167 	return 0;
1168 }
1169 
1170 /* Supports only one Write chunk in the Write list
1171  */
decode_write_list(struct xdr_stream * xdr,u32 * length)1172 static int decode_write_list(struct xdr_stream *xdr, u32 *length)
1173 {
1174 	u32 chunklen;
1175 	bool first;
1176 	__be32 *p;
1177 
1178 	*length = 0;
1179 	first = true;
1180 	do {
1181 		p = xdr_inline_decode(xdr, sizeof(*p));
1182 		if (unlikely(!p))
1183 			return -EIO;
1184 		if (xdr_item_is_absent(p))
1185 			break;
1186 		if (!first)
1187 			return -EIO;
1188 
1189 		if (decode_write_chunk(xdr, &chunklen))
1190 			return -EIO;
1191 		*length += chunklen;
1192 		first = false;
1193 	} while (true);
1194 	return 0;
1195 }
1196 
decode_reply_chunk(struct xdr_stream * xdr,u32 * length)1197 static int decode_reply_chunk(struct xdr_stream *xdr, u32 *length)
1198 {
1199 	__be32 *p;
1200 
1201 	p = xdr_inline_decode(xdr, sizeof(*p));
1202 	if (unlikely(!p))
1203 		return -EIO;
1204 
1205 	*length = 0;
1206 	if (xdr_item_is_present(p))
1207 		if (decode_write_chunk(xdr, length))
1208 			return -EIO;
1209 	return 0;
1210 }
1211 
1212 static int
rpcrdma_decode_msg(struct rpcrdma_xprt * r_xprt,struct rpcrdma_rep * rep,struct rpc_rqst * rqst)1213 rpcrdma_decode_msg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1214 		   struct rpc_rqst *rqst)
1215 {
1216 	struct xdr_stream *xdr = &rep->rr_stream;
1217 	u32 writelist, replychunk, rpclen;
1218 	char *base;
1219 
1220 	/* Decode the chunk lists */
1221 	if (decode_read_list(xdr))
1222 		return -EIO;
1223 	if (decode_write_list(xdr, &writelist))
1224 		return -EIO;
1225 	if (decode_reply_chunk(xdr, &replychunk))
1226 		return -EIO;
1227 
1228 	/* RDMA_MSG sanity checks */
1229 	if (unlikely(replychunk))
1230 		return -EIO;
1231 
1232 	/* Build the RPC reply's Payload stream in rqst->rq_rcv_buf */
1233 	base = (char *)xdr_inline_decode(xdr, 0);
1234 	rpclen = xdr_stream_remaining(xdr);
1235 	r_xprt->rx_stats.fixup_copy_count +=
1236 		rpcrdma_inline_fixup(rqst, base, rpclen, writelist & 3);
1237 
1238 	r_xprt->rx_stats.total_rdma_reply += writelist;
1239 	return rpclen + xdr_align_size(writelist);
1240 }
1241 
1242 static noinline int
rpcrdma_decode_nomsg(struct rpcrdma_xprt * r_xprt,struct rpcrdma_rep * rep)1243 rpcrdma_decode_nomsg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
1244 {
1245 	struct xdr_stream *xdr = &rep->rr_stream;
1246 	u32 writelist, replychunk;
1247 
1248 	/* Decode the chunk lists */
1249 	if (decode_read_list(xdr))
1250 		return -EIO;
1251 	if (decode_write_list(xdr, &writelist))
1252 		return -EIO;
1253 	if (decode_reply_chunk(xdr, &replychunk))
1254 		return -EIO;
1255 
1256 	/* RDMA_NOMSG sanity checks */
1257 	if (unlikely(writelist))
1258 		return -EIO;
1259 	if (unlikely(!replychunk))
1260 		return -EIO;
1261 
1262 	/* Reply chunk buffer already is the reply vector */
1263 	r_xprt->rx_stats.total_rdma_reply += replychunk;
1264 	return replychunk;
1265 }
1266 
1267 static noinline int
rpcrdma_decode_error(struct rpcrdma_xprt * r_xprt,struct rpcrdma_rep * rep,struct rpc_rqst * rqst)1268 rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1269 		     struct rpc_rqst *rqst)
1270 {
1271 	struct xdr_stream *xdr = &rep->rr_stream;
1272 	__be32 *p;
1273 
1274 	p = xdr_inline_decode(xdr, sizeof(*p));
1275 	if (unlikely(!p))
1276 		return -EIO;
1277 
1278 	switch (*p) {
1279 	case err_vers:
1280 		p = xdr_inline_decode(xdr, 2 * sizeof(*p));
1281 		if (!p)
1282 			break;
1283 		trace_xprtrdma_err_vers(rqst, p, p + 1);
1284 		break;
1285 	case err_chunk:
1286 		trace_xprtrdma_err_chunk(rqst);
1287 		break;
1288 	default:
1289 		trace_xprtrdma_err_unrecognized(rqst, p);
1290 	}
1291 
1292 	return -EIO;
1293 }
1294 
1295 /**
1296  * rpcrdma_unpin_rqst - Release rqst without completing it
1297  * @rep: RPC/RDMA Receive context
1298  *
1299  * This is done when a connection is lost so that a Reply
1300  * can be dropped and its matching Call can be subsequently
1301  * retransmitted on a new connection.
1302  */
rpcrdma_unpin_rqst(struct rpcrdma_rep * rep)1303 void rpcrdma_unpin_rqst(struct rpcrdma_rep *rep)
1304 {
1305 	struct rpc_xprt *xprt = &rep->rr_rxprt->rx_xprt;
1306 	struct rpc_rqst *rqst = rep->rr_rqst;
1307 	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
1308 
1309 	req->rl_reply = NULL;
1310 	rep->rr_rqst = NULL;
1311 
1312 	spin_lock(&xprt->queue_lock);
1313 	xprt_unpin_rqst(rqst);
1314 	spin_unlock(&xprt->queue_lock);
1315 }
1316 
1317 /**
1318  * rpcrdma_complete_rqst - Pass completed rqst back to RPC
1319  * @rep: RPC/RDMA Receive context
1320  *
1321  * Reconstruct the RPC reply and complete the transaction
1322  * while @rqst is still pinned to ensure the rep, rqst, and
1323  * rq_task pointers remain stable.
1324  */
rpcrdma_complete_rqst(struct rpcrdma_rep * rep)1325 void rpcrdma_complete_rqst(struct rpcrdma_rep *rep)
1326 {
1327 	struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1328 	struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1329 	struct rpc_rqst *rqst = rep->rr_rqst;
1330 	int status;
1331 
1332 	switch (rep->rr_proc) {
1333 	case rdma_msg:
1334 		status = rpcrdma_decode_msg(r_xprt, rep, rqst);
1335 		break;
1336 	case rdma_nomsg:
1337 		status = rpcrdma_decode_nomsg(r_xprt, rep);
1338 		break;
1339 	case rdma_error:
1340 		status = rpcrdma_decode_error(r_xprt, rep, rqst);
1341 		break;
1342 	default:
1343 		status = -EIO;
1344 	}
1345 	if (status < 0)
1346 		goto out_badheader;
1347 
1348 out:
1349 	spin_lock(&xprt->queue_lock);
1350 	xprt_complete_rqst(rqst->rq_task, status);
1351 	xprt_unpin_rqst(rqst);
1352 	spin_unlock(&xprt->queue_lock);
1353 	return;
1354 
1355 out_badheader:
1356 	trace_xprtrdma_reply_hdr_err(rep);
1357 	r_xprt->rx_stats.bad_reply_count++;
1358 	rqst->rq_task->tk_status = status;
1359 	status = 0;
1360 	goto out;
1361 }
1362 
rpcrdma_reply_done(struct kref * kref)1363 static void rpcrdma_reply_done(struct kref *kref)
1364 {
1365 	struct rpcrdma_req *req =
1366 		container_of(kref, struct rpcrdma_req, rl_kref);
1367 
1368 	rpcrdma_complete_rqst(req->rl_reply);
1369 }
1370 
1371 /**
1372  * rpcrdma_reply_handler - Process received RPC/RDMA messages
1373  * @rep: Incoming rpcrdma_rep object to process
1374  *
1375  * Errors must result in the RPC task either being awakened, or
1376  * allowed to timeout, to discover the errors at that time.
1377  */
rpcrdma_reply_handler(struct rpcrdma_rep * rep)1378 void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
1379 {
1380 	struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1381 	struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1382 	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1383 	struct rpcrdma_req *req;
1384 	struct rpc_rqst *rqst;
1385 	u32 credits;
1386 	__be32 *p;
1387 
1388 	/* Any data means we had a useful conversation, so
1389 	 * then we don't need to delay the next reconnect.
1390 	 */
1391 	if (xprt->reestablish_timeout)
1392 		xprt->reestablish_timeout = 0;
1393 
1394 	/* Fixed transport header fields */
1395 	xdr_init_decode(&rep->rr_stream, &rep->rr_hdrbuf,
1396 			rep->rr_hdrbuf.head[0].iov_base, NULL);
1397 	p = xdr_inline_decode(&rep->rr_stream, 4 * sizeof(*p));
1398 	if (unlikely(!p))
1399 		goto out_shortreply;
1400 	rep->rr_xid = *p++;
1401 	rep->rr_vers = *p++;
1402 	credits = be32_to_cpu(*p++);
1403 	rep->rr_proc = *p++;
1404 
1405 	if (rep->rr_vers != rpcrdma_version)
1406 		goto out_badversion;
1407 
1408 	if (rpcrdma_is_bcall(r_xprt, rep))
1409 		return;
1410 
1411 	/* Match incoming rpcrdma_rep to an rpcrdma_req to
1412 	 * get context for handling any incoming chunks.
1413 	 */
1414 	spin_lock(&xprt->queue_lock);
1415 	rqst = xprt_lookup_rqst(xprt, rep->rr_xid);
1416 	if (!rqst)
1417 		goto out_norqst;
1418 	xprt_pin_rqst(rqst);
1419 	spin_unlock(&xprt->queue_lock);
1420 
1421 	if (credits == 0)
1422 		credits = 1;	/* don't deadlock */
1423 	else if (credits > r_xprt->rx_ep->re_max_requests)
1424 		credits = r_xprt->rx_ep->re_max_requests;
1425 	if (buf->rb_credits != credits)
1426 		rpcrdma_update_cwnd(r_xprt, credits);
1427 
1428 	req = rpcr_to_rdmar(rqst);
1429 	if (unlikely(req->rl_reply))
1430 		rpcrdma_rep_put(buf, req->rl_reply);
1431 	req->rl_reply = rep;
1432 	rep->rr_rqst = rqst;
1433 
1434 	trace_xprtrdma_reply(rqst->rq_task, rep, credits);
1435 
1436 	if (rep->rr_wc_flags & IB_WC_WITH_INVALIDATE)
1437 		frwr_reminv(rep, &req->rl_registered);
1438 	if (!list_empty(&req->rl_registered))
1439 		frwr_unmap_async(r_xprt, req);
1440 		/* LocalInv completion will complete the RPC */
1441 	else
1442 		kref_put(&req->rl_kref, rpcrdma_reply_done);
1443 
1444 out_post:
1445 	rpcrdma_post_recvs(r_xprt,
1446 			   credits + (buf->rb_bc_srv_max_requests << 1));
1447 	return;
1448 
1449 out_norqst:
1450 	spin_unlock(&xprt->queue_lock);
1451 	trace_xprtrdma_reply_rqst_err(rep);
1452 	rpcrdma_rep_put(buf, rep);
1453 	goto out_post;
1454 
1455 out_badversion:
1456 	trace_xprtrdma_reply_vers_err(rep);
1457 	goto out;
1458 
1459 out_shortreply:
1460 	trace_xprtrdma_reply_short_err(rep);
1461 
1462 out:
1463 	rpcrdma_rep_put(buf, rep);
1464 }
1465