xref: /linux/net/sunrpc/xprtrdma/svc_rdma_sendto.c (revision eb01fe7abbe2d0b38824d2a93fdb4cc3eaf2ccc1)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (c) 2016-2018 Oracle. All rights reserved.
4  * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
5  * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the BSD-type
11  * license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  *      Redistributions of source code must retain the above copyright
18  *      notice, this list of conditions and the following disclaimer.
19  *
20  *      Redistributions in binary form must reproduce the above
21  *      copyright notice, this list of conditions and the following
22  *      disclaimer in the documentation and/or other materials provided
23  *      with the distribution.
24  *
25  *      Neither the name of the Network Appliance, Inc. nor the names of
26  *      its contributors may be used to endorse or promote products
27  *      derived from this software without specific prior written
28  *      permission.
29  *
30  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
33  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
34  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
35  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
36  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
40  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41  *
42  * Author: Tom Tucker <tom@opengridcomputing.com>
43  */
44 
45 /* Operation
46  *
47  * The main entry point is svc_rdma_sendto. This is called by the
48  * RPC server when an RPC Reply is ready to be transmitted to a client.
49  *
50  * The passed-in svc_rqst contains a struct xdr_buf which holds an
51  * XDR-encoded RPC Reply message. sendto must construct the RPC-over-RDMA
52  * transport header, post all Write WRs needed for this Reply, then post
53  * a Send WR conveying the transport header and the RPC message itself to
54  * the client.
55  *
56  * svc_rdma_sendto must fully transmit the Reply before returning, as
57  * the svc_rqst will be recycled as soon as sendto returns. Remaining
58  * resources referred to by the svc_rqst are also recycled at that time.
59  * Therefore any resources that must remain longer must be detached
60  * from the svc_rqst and released later.
61  *
62  * Page Management
63  *
64  * The I/O that performs Reply transmission is asynchronous, and may
65  * complete well after sendto returns. Thus pages under I/O must be
66  * removed from the svc_rqst before sendto returns.
67  *
68  * The logic here depends on Send Queue and completion ordering. Since
69  * the Send WR is always posted last, it will always complete last. Thus
70  * when it completes, it is guaranteed that all previous Write WRs have
71  * also completed.
72  *
73  * Write WRs are constructed and posted. Each Write segment gets its own
74  * svc_rdma_rw_ctxt, allowing the Write completion handler to find and
75  * DMA-unmap the pages under I/O for that Write segment. The Write
76  * completion handler does not release any pages.
77  *
78  * When the Send WR is constructed, it also gets its own svc_rdma_send_ctxt.
79  * The ownership of all of the Reply's pages are transferred into that
80  * ctxt, the Send WR is posted, and sendto returns.
81  *
82  * The svc_rdma_send_ctxt is presented when the Send WR completes. The
83  * Send completion handler finally releases the Reply's pages.
84  *
85  * This mechanism also assumes that completions on the transport's Send
86  * Completion Queue do not run in parallel. Otherwise a Write completion
87  * and Send completion running at the same time could release pages that
88  * are still DMA-mapped.
89  *
90  * Error Handling
91  *
92  * - If the Send WR is posted successfully, it will either complete
93  *   successfully, or get flushed. Either way, the Send completion
94  *   handler releases the Reply's pages.
95  * - If the Send WR cannot be not posted, the forward path releases
96  *   the Reply's pages.
97  *
98  * This handles the case, without the use of page reference counting,
99  * where two different Write segments send portions of the same page.
100  */
101 
102 #include <linux/spinlock.h>
103 #include <asm/unaligned.h>
104 
105 #include <rdma/ib_verbs.h>
106 #include <rdma/rdma_cm.h>
107 
108 #include <linux/sunrpc/debug.h>
109 #include <linux/sunrpc/svc_rdma.h>
110 
111 #include "xprt_rdma.h"
112 #include <trace/events/rpcrdma.h>
113 
114 static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc);
115 
116 static struct svc_rdma_send_ctxt *
117 svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma)
118 {
119 	int node = ibdev_to_node(rdma->sc_cm_id->device);
120 	struct svc_rdma_send_ctxt *ctxt;
121 	dma_addr_t addr;
122 	void *buffer;
123 	int i;
124 
125 	ctxt = kzalloc_node(struct_size(ctxt, sc_sges, rdma->sc_max_send_sges),
126 			    GFP_KERNEL, node);
127 	if (!ctxt)
128 		goto fail0;
129 	buffer = kmalloc_node(rdma->sc_max_req_size, GFP_KERNEL, node);
130 	if (!buffer)
131 		goto fail1;
132 	addr = ib_dma_map_single(rdma->sc_pd->device, buffer,
133 				 rdma->sc_max_req_size, DMA_TO_DEVICE);
134 	if (ib_dma_mapping_error(rdma->sc_pd->device, addr))
135 		goto fail2;
136 
137 	svc_rdma_send_cid_init(rdma, &ctxt->sc_cid);
138 
139 	ctxt->sc_rdma = rdma;
140 	ctxt->sc_send_wr.next = NULL;
141 	ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe;
142 	ctxt->sc_send_wr.sg_list = ctxt->sc_sges;
143 	ctxt->sc_send_wr.send_flags = IB_SEND_SIGNALED;
144 	ctxt->sc_cqe.done = svc_rdma_wc_send;
145 	INIT_LIST_HEAD(&ctxt->sc_write_info_list);
146 	ctxt->sc_xprt_buf = buffer;
147 	xdr_buf_init(&ctxt->sc_hdrbuf, ctxt->sc_xprt_buf,
148 		     rdma->sc_max_req_size);
149 	ctxt->sc_sges[0].addr = addr;
150 
151 	for (i = 0; i < rdma->sc_max_send_sges; i++)
152 		ctxt->sc_sges[i].lkey = rdma->sc_pd->local_dma_lkey;
153 	return ctxt;
154 
155 fail2:
156 	kfree(buffer);
157 fail1:
158 	kfree(ctxt);
159 fail0:
160 	return NULL;
161 }
162 
163 /**
164  * svc_rdma_send_ctxts_destroy - Release all send_ctxt's for an xprt
165  * @rdma: svcxprt_rdma being torn down
166  *
167  */
168 void svc_rdma_send_ctxts_destroy(struct svcxprt_rdma *rdma)
169 {
170 	struct svc_rdma_send_ctxt *ctxt;
171 	struct llist_node *node;
172 
173 	while ((node = llist_del_first(&rdma->sc_send_ctxts)) != NULL) {
174 		ctxt = llist_entry(node, struct svc_rdma_send_ctxt, sc_node);
175 		ib_dma_unmap_single(rdma->sc_pd->device,
176 				    ctxt->sc_sges[0].addr,
177 				    rdma->sc_max_req_size,
178 				    DMA_TO_DEVICE);
179 		kfree(ctxt->sc_xprt_buf);
180 		kfree(ctxt);
181 	}
182 }
183 
184 /**
185  * svc_rdma_send_ctxt_get - Get a free send_ctxt
186  * @rdma: controlling svcxprt_rdma
187  *
188  * Returns a ready-to-use send_ctxt, or NULL if none are
189  * available and a fresh one cannot be allocated.
190  */
191 struct svc_rdma_send_ctxt *svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma)
192 {
193 	struct svc_rdma_send_ctxt *ctxt;
194 	struct llist_node *node;
195 
196 	spin_lock(&rdma->sc_send_lock);
197 	node = llist_del_first(&rdma->sc_send_ctxts);
198 	spin_unlock(&rdma->sc_send_lock);
199 	if (!node)
200 		goto out_empty;
201 
202 	ctxt = llist_entry(node, struct svc_rdma_send_ctxt, sc_node);
203 
204 out:
205 	rpcrdma_set_xdrlen(&ctxt->sc_hdrbuf, 0);
206 	xdr_init_encode(&ctxt->sc_stream, &ctxt->sc_hdrbuf,
207 			ctxt->sc_xprt_buf, NULL);
208 
209 	svc_rdma_cc_init(rdma, &ctxt->sc_reply_info.wi_cc);
210 	ctxt->sc_send_wr.num_sge = 0;
211 	ctxt->sc_cur_sge_no = 0;
212 	ctxt->sc_page_count = 0;
213 	ctxt->sc_wr_chain = &ctxt->sc_send_wr;
214 	ctxt->sc_sqecount = 1;
215 
216 	return ctxt;
217 
218 out_empty:
219 	ctxt = svc_rdma_send_ctxt_alloc(rdma);
220 	if (!ctxt)
221 		return NULL;
222 	goto out;
223 }
224 
225 static void svc_rdma_send_ctxt_release(struct svcxprt_rdma *rdma,
226 				       struct svc_rdma_send_ctxt *ctxt)
227 {
228 	struct ib_device *device = rdma->sc_cm_id->device;
229 	unsigned int i;
230 
231 	svc_rdma_write_chunk_release(rdma, ctxt);
232 	svc_rdma_reply_chunk_release(rdma, ctxt);
233 
234 	if (ctxt->sc_page_count)
235 		release_pages(ctxt->sc_pages, ctxt->sc_page_count);
236 
237 	/* The first SGE contains the transport header, which
238 	 * remains mapped until @ctxt is destroyed.
239 	 */
240 	for (i = 1; i < ctxt->sc_send_wr.num_sge; i++) {
241 		trace_svcrdma_dma_unmap_page(&ctxt->sc_cid,
242 					     ctxt->sc_sges[i].addr,
243 					     ctxt->sc_sges[i].length);
244 		ib_dma_unmap_page(device,
245 				  ctxt->sc_sges[i].addr,
246 				  ctxt->sc_sges[i].length,
247 				  DMA_TO_DEVICE);
248 	}
249 
250 	llist_add(&ctxt->sc_node, &rdma->sc_send_ctxts);
251 }
252 
253 static void svc_rdma_send_ctxt_put_async(struct work_struct *work)
254 {
255 	struct svc_rdma_send_ctxt *ctxt;
256 
257 	ctxt = container_of(work, struct svc_rdma_send_ctxt, sc_work);
258 	svc_rdma_send_ctxt_release(ctxt->sc_rdma, ctxt);
259 }
260 
261 /**
262  * svc_rdma_send_ctxt_put - Return send_ctxt to free list
263  * @rdma: controlling svcxprt_rdma
264  * @ctxt: object to return to the free list
265  *
266  * Pages left in sc_pages are DMA unmapped and released.
267  */
268 void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma,
269 			    struct svc_rdma_send_ctxt *ctxt)
270 {
271 	INIT_WORK(&ctxt->sc_work, svc_rdma_send_ctxt_put_async);
272 	queue_work(svcrdma_wq, &ctxt->sc_work);
273 }
274 
275 /**
276  * svc_rdma_wake_send_waiters - manage Send Queue accounting
277  * @rdma: controlling transport
278  * @avail: Number of additional SQEs that are now available
279  *
280  */
281 void svc_rdma_wake_send_waiters(struct svcxprt_rdma *rdma, int avail)
282 {
283 	atomic_add(avail, &rdma->sc_sq_avail);
284 	smp_mb__after_atomic();
285 	if (unlikely(waitqueue_active(&rdma->sc_send_wait)))
286 		wake_up(&rdma->sc_send_wait);
287 }
288 
289 /**
290  * svc_rdma_wc_send - Invoked by RDMA provider for each polled Send WC
291  * @cq: Completion Queue context
292  * @wc: Work Completion object
293  *
294  * NB: The svc_xprt/svcxprt_rdma is pinned whenever it's possible that
295  * the Send completion handler could be running.
296  */
297 static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
298 {
299 	struct svcxprt_rdma *rdma = cq->cq_context;
300 	struct ib_cqe *cqe = wc->wr_cqe;
301 	struct svc_rdma_send_ctxt *ctxt =
302 		container_of(cqe, struct svc_rdma_send_ctxt, sc_cqe);
303 
304 	svc_rdma_wake_send_waiters(rdma, ctxt->sc_sqecount);
305 
306 	if (unlikely(wc->status != IB_WC_SUCCESS))
307 		goto flushed;
308 
309 	trace_svcrdma_wc_send(&ctxt->sc_cid);
310 	svc_rdma_send_ctxt_put(rdma, ctxt);
311 	return;
312 
313 flushed:
314 	if (wc->status != IB_WC_WR_FLUSH_ERR)
315 		trace_svcrdma_wc_send_err(wc, &ctxt->sc_cid);
316 	else
317 		trace_svcrdma_wc_send_flush(wc, &ctxt->sc_cid);
318 	svc_rdma_send_ctxt_put(rdma, ctxt);
319 	svc_xprt_deferred_close(&rdma->sc_xprt);
320 }
321 
322 /**
323  * svc_rdma_post_send - Post a WR chain to the Send Queue
324  * @rdma: transport context
325  * @ctxt: WR chain to post
326  *
327  * Copy fields in @ctxt to stack variables in order to guarantee
328  * that these values remain available after the ib_post_send() call.
329  * In some error flow cases, svc_rdma_wc_send() releases @ctxt.
330  *
331  * Note there is potential for starvation when the Send Queue is
332  * full because there is no order to when waiting threads are
333  * awoken. The transport is typically provisioned with a deep
334  * enough Send Queue that SQ exhaustion should be a rare event.
335  *
336  * Return values:
337  *   %0: @ctxt's WR chain was posted successfully
338  *   %-ENOTCONN: The connection was lost
339  */
340 int svc_rdma_post_send(struct svcxprt_rdma *rdma,
341 		       struct svc_rdma_send_ctxt *ctxt)
342 {
343 	struct ib_send_wr *first_wr = ctxt->sc_wr_chain;
344 	struct ib_send_wr *send_wr = &ctxt->sc_send_wr;
345 	const struct ib_send_wr *bad_wr = first_wr;
346 	struct rpc_rdma_cid cid = ctxt->sc_cid;
347 	int ret, sqecount = ctxt->sc_sqecount;
348 
349 	might_sleep();
350 
351 	/* Sync the transport header buffer */
352 	ib_dma_sync_single_for_device(rdma->sc_pd->device,
353 				      send_wr->sg_list[0].addr,
354 				      send_wr->sg_list[0].length,
355 				      DMA_TO_DEVICE);
356 
357 	/* If the SQ is full, wait until an SQ entry is available */
358 	while (!test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags)) {
359 		if (atomic_sub_return(sqecount, &rdma->sc_sq_avail) < 0) {
360 			svc_rdma_wake_send_waiters(rdma, sqecount);
361 
362 			/* When the transport is torn down, assume
363 			 * ib_drain_sq() will trigger enough Send
364 			 * completions to wake us. The XPT_CLOSE test
365 			 * above should then cause the while loop to
366 			 * exit.
367 			 */
368 			percpu_counter_inc(&svcrdma_stat_sq_starve);
369 			trace_svcrdma_sq_full(rdma, &cid);
370 			wait_event(rdma->sc_send_wait,
371 				   atomic_read(&rdma->sc_sq_avail) > 0);
372 			trace_svcrdma_sq_retry(rdma, &cid);
373 			continue;
374 		}
375 
376 		trace_svcrdma_post_send(ctxt);
377 		ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr);
378 		if (ret) {
379 			trace_svcrdma_sq_post_err(rdma, &cid, ret);
380 			svc_xprt_deferred_close(&rdma->sc_xprt);
381 
382 			/* If even one WR was posted, there will be a
383 			 * Send completion that bumps sc_sq_avail.
384 			 */
385 			if (bad_wr == first_wr) {
386 				svc_rdma_wake_send_waiters(rdma, sqecount);
387 				break;
388 			}
389 		}
390 		return 0;
391 	}
392 	return -ENOTCONN;
393 }
394 
395 /**
396  * svc_rdma_encode_read_list - Encode RPC Reply's Read chunk list
397  * @sctxt: Send context for the RPC Reply
398  *
399  * Return values:
400  *   On success, returns length in bytes of the Reply XDR buffer
401  *   that was consumed by the Reply Read list
402  *   %-EMSGSIZE on XDR buffer overflow
403  */
404 static ssize_t svc_rdma_encode_read_list(struct svc_rdma_send_ctxt *sctxt)
405 {
406 	/* RPC-over-RDMA version 1 replies never have a Read list. */
407 	return xdr_stream_encode_item_absent(&sctxt->sc_stream);
408 }
409 
410 /**
411  * svc_rdma_encode_write_segment - Encode one Write segment
412  * @sctxt: Send context for the RPC Reply
413  * @chunk: Write chunk to push
414  * @remaining: remaining bytes of the payload left in the Write chunk
415  * @segno: which segment in the chunk
416  *
417  * Return values:
418  *   On success, returns length in bytes of the Reply XDR buffer
419  *   that was consumed by the Write segment, and updates @remaining
420  *   %-EMSGSIZE on XDR buffer overflow
421  */
422 static ssize_t svc_rdma_encode_write_segment(struct svc_rdma_send_ctxt *sctxt,
423 					     const struct svc_rdma_chunk *chunk,
424 					     u32 *remaining, unsigned int segno)
425 {
426 	const struct svc_rdma_segment *segment = &chunk->ch_segments[segno];
427 	const size_t len = rpcrdma_segment_maxsz * sizeof(__be32);
428 	u32 length;
429 	__be32 *p;
430 
431 	p = xdr_reserve_space(&sctxt->sc_stream, len);
432 	if (!p)
433 		return -EMSGSIZE;
434 
435 	length = min_t(u32, *remaining, segment->rs_length);
436 	*remaining -= length;
437 	xdr_encode_rdma_segment(p, segment->rs_handle, length,
438 				segment->rs_offset);
439 	trace_svcrdma_encode_wseg(sctxt, segno, segment->rs_handle, length,
440 				  segment->rs_offset);
441 	return len;
442 }
443 
444 /**
445  * svc_rdma_encode_write_chunk - Encode one Write chunk
446  * @sctxt: Send context for the RPC Reply
447  * @chunk: Write chunk to push
448  *
449  * Copy a Write chunk from the Call transport header to the
450  * Reply transport header. Update each segment's length field
451  * to reflect the number of bytes written in that segment.
452  *
453  * Return values:
454  *   On success, returns length in bytes of the Reply XDR buffer
455  *   that was consumed by the Write chunk
456  *   %-EMSGSIZE on XDR buffer overflow
457  */
458 static ssize_t svc_rdma_encode_write_chunk(struct svc_rdma_send_ctxt *sctxt,
459 					   const struct svc_rdma_chunk *chunk)
460 {
461 	u32 remaining = chunk->ch_payload_length;
462 	unsigned int segno;
463 	ssize_t len, ret;
464 
465 	len = 0;
466 	ret = xdr_stream_encode_item_present(&sctxt->sc_stream);
467 	if (ret < 0)
468 		return ret;
469 	len += ret;
470 
471 	ret = xdr_stream_encode_u32(&sctxt->sc_stream, chunk->ch_segcount);
472 	if (ret < 0)
473 		return ret;
474 	len += ret;
475 
476 	for (segno = 0; segno < chunk->ch_segcount; segno++) {
477 		ret = svc_rdma_encode_write_segment(sctxt, chunk, &remaining, segno);
478 		if (ret < 0)
479 			return ret;
480 		len += ret;
481 	}
482 
483 	return len;
484 }
485 
486 /**
487  * svc_rdma_encode_write_list - Encode RPC Reply's Write chunk list
488  * @rctxt: Reply context with information about the RPC Call
489  * @sctxt: Send context for the RPC Reply
490  *
491  * Return values:
492  *   On success, returns length in bytes of the Reply XDR buffer
493  *   that was consumed by the Reply's Write list
494  *   %-EMSGSIZE on XDR buffer overflow
495  */
496 static ssize_t svc_rdma_encode_write_list(struct svc_rdma_recv_ctxt *rctxt,
497 					  struct svc_rdma_send_ctxt *sctxt)
498 {
499 	struct svc_rdma_chunk *chunk;
500 	ssize_t len, ret;
501 
502 	len = 0;
503 	pcl_for_each_chunk(chunk, &rctxt->rc_write_pcl) {
504 		ret = svc_rdma_encode_write_chunk(sctxt, chunk);
505 		if (ret < 0)
506 			return ret;
507 		len += ret;
508 	}
509 
510 	/* Terminate the Write list */
511 	ret = xdr_stream_encode_item_absent(&sctxt->sc_stream);
512 	if (ret < 0)
513 		return ret;
514 
515 	return len + ret;
516 }
517 
518 /**
519  * svc_rdma_encode_reply_chunk - Encode RPC Reply's Reply chunk
520  * @rctxt: Reply context with information about the RPC Call
521  * @sctxt: Send context for the RPC Reply
522  * @length: size in bytes of the payload in the Reply chunk
523  *
524  * Return values:
525  *   On success, returns length in bytes of the Reply XDR buffer
526  *   that was consumed by the Reply's Reply chunk
527  *   %-EMSGSIZE on XDR buffer overflow
528  *   %-E2BIG if the RPC message is larger than the Reply chunk
529  */
530 static ssize_t
531 svc_rdma_encode_reply_chunk(struct svc_rdma_recv_ctxt *rctxt,
532 			    struct svc_rdma_send_ctxt *sctxt,
533 			    unsigned int length)
534 {
535 	struct svc_rdma_chunk *chunk;
536 
537 	if (pcl_is_empty(&rctxt->rc_reply_pcl))
538 		return xdr_stream_encode_item_absent(&sctxt->sc_stream);
539 
540 	chunk = pcl_first_chunk(&rctxt->rc_reply_pcl);
541 	if (length > chunk->ch_length)
542 		return -E2BIG;
543 
544 	chunk->ch_payload_length = length;
545 	return svc_rdma_encode_write_chunk(sctxt, chunk);
546 }
547 
548 struct svc_rdma_map_data {
549 	struct svcxprt_rdma		*md_rdma;
550 	struct svc_rdma_send_ctxt	*md_ctxt;
551 };
552 
553 /**
554  * svc_rdma_page_dma_map - DMA map one page
555  * @data: pointer to arguments
556  * @page: struct page to DMA map
557  * @offset: offset into the page
558  * @len: number of bytes to map
559  *
560  * Returns:
561  *   %0 if DMA mapping was successful
562  *   %-EIO if the page cannot be DMA mapped
563  */
564 static int svc_rdma_page_dma_map(void *data, struct page *page,
565 				 unsigned long offset, unsigned int len)
566 {
567 	struct svc_rdma_map_data *args = data;
568 	struct svcxprt_rdma *rdma = args->md_rdma;
569 	struct svc_rdma_send_ctxt *ctxt = args->md_ctxt;
570 	struct ib_device *dev = rdma->sc_cm_id->device;
571 	dma_addr_t dma_addr;
572 
573 	++ctxt->sc_cur_sge_no;
574 
575 	dma_addr = ib_dma_map_page(dev, page, offset, len, DMA_TO_DEVICE);
576 	if (ib_dma_mapping_error(dev, dma_addr))
577 		goto out_maperr;
578 
579 	trace_svcrdma_dma_map_page(&ctxt->sc_cid, dma_addr, len);
580 	ctxt->sc_sges[ctxt->sc_cur_sge_no].addr = dma_addr;
581 	ctxt->sc_sges[ctxt->sc_cur_sge_no].length = len;
582 	ctxt->sc_send_wr.num_sge++;
583 	return 0;
584 
585 out_maperr:
586 	trace_svcrdma_dma_map_err(&ctxt->sc_cid, dma_addr, len);
587 	return -EIO;
588 }
589 
590 /**
591  * svc_rdma_iov_dma_map - DMA map an iovec
592  * @data: pointer to arguments
593  * @iov: kvec to DMA map
594  *
595  * ib_dma_map_page() is used here because svc_rdma_dma_unmap()
596  * handles DMA-unmap and it uses ib_dma_unmap_page() exclusively.
597  *
598  * Returns:
599  *   %0 if DMA mapping was successful
600  *   %-EIO if the iovec cannot be DMA mapped
601  */
602 static int svc_rdma_iov_dma_map(void *data, const struct kvec *iov)
603 {
604 	if (!iov->iov_len)
605 		return 0;
606 	return svc_rdma_page_dma_map(data, virt_to_page(iov->iov_base),
607 				     offset_in_page(iov->iov_base),
608 				     iov->iov_len);
609 }
610 
611 /**
612  * svc_rdma_xb_dma_map - DMA map all segments of an xdr_buf
613  * @xdr: xdr_buf containing portion of an RPC message to transmit
614  * @data: pointer to arguments
615  *
616  * Returns:
617  *   %0 if DMA mapping was successful
618  *   %-EIO if DMA mapping failed
619  *
620  * On failure, any DMA mappings that have been already done must be
621  * unmapped by the caller.
622  */
623 static int svc_rdma_xb_dma_map(const struct xdr_buf *xdr, void *data)
624 {
625 	unsigned int len, remaining;
626 	unsigned long pageoff;
627 	struct page **ppages;
628 	int ret;
629 
630 	ret = svc_rdma_iov_dma_map(data, &xdr->head[0]);
631 	if (ret < 0)
632 		return ret;
633 
634 	ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
635 	pageoff = offset_in_page(xdr->page_base);
636 	remaining = xdr->page_len;
637 	while (remaining) {
638 		len = min_t(u32, PAGE_SIZE - pageoff, remaining);
639 
640 		ret = svc_rdma_page_dma_map(data, *ppages++, pageoff, len);
641 		if (ret < 0)
642 			return ret;
643 
644 		remaining -= len;
645 		pageoff = 0;
646 	}
647 
648 	ret = svc_rdma_iov_dma_map(data, &xdr->tail[0]);
649 	if (ret < 0)
650 		return ret;
651 
652 	return xdr->len;
653 }
654 
655 struct svc_rdma_pullup_data {
656 	u8		*pd_dest;
657 	unsigned int	pd_length;
658 	unsigned int	pd_num_sges;
659 };
660 
661 /**
662  * svc_rdma_xb_count_sges - Count how many SGEs will be needed
663  * @xdr: xdr_buf containing portion of an RPC message to transmit
664  * @data: pointer to arguments
665  *
666  * Returns:
667  *   Number of SGEs needed to Send the contents of @xdr inline
668  */
669 static int svc_rdma_xb_count_sges(const struct xdr_buf *xdr,
670 				  void *data)
671 {
672 	struct svc_rdma_pullup_data *args = data;
673 	unsigned int remaining;
674 	unsigned long offset;
675 
676 	if (xdr->head[0].iov_len)
677 		++args->pd_num_sges;
678 
679 	offset = offset_in_page(xdr->page_base);
680 	remaining = xdr->page_len;
681 	while (remaining) {
682 		++args->pd_num_sges;
683 		remaining -= min_t(u32, PAGE_SIZE - offset, remaining);
684 		offset = 0;
685 	}
686 
687 	if (xdr->tail[0].iov_len)
688 		++args->pd_num_sges;
689 
690 	args->pd_length += xdr->len;
691 	return 0;
692 }
693 
694 /**
695  * svc_rdma_pull_up_needed - Determine whether to use pull-up
696  * @rdma: controlling transport
697  * @sctxt: send_ctxt for the Send WR
698  * @write_pcl: Write chunk list provided by client
699  * @xdr: xdr_buf containing RPC message to transmit
700  *
701  * Returns:
702  *   %true if pull-up must be used
703  *   %false otherwise
704  */
705 static bool svc_rdma_pull_up_needed(const struct svcxprt_rdma *rdma,
706 				    const struct svc_rdma_send_ctxt *sctxt,
707 				    const struct svc_rdma_pcl *write_pcl,
708 				    const struct xdr_buf *xdr)
709 {
710 	/* Resources needed for the transport header */
711 	struct svc_rdma_pullup_data args = {
712 		.pd_length	= sctxt->sc_hdrbuf.len,
713 		.pd_num_sges	= 1,
714 	};
715 	int ret;
716 
717 	ret = pcl_process_nonpayloads(write_pcl, xdr,
718 				      svc_rdma_xb_count_sges, &args);
719 	if (ret < 0)
720 		return false;
721 
722 	if (args.pd_length < RPCRDMA_PULLUP_THRESH)
723 		return true;
724 	return args.pd_num_sges >= rdma->sc_max_send_sges;
725 }
726 
727 /**
728  * svc_rdma_xb_linearize - Copy region of xdr_buf to flat buffer
729  * @xdr: xdr_buf containing portion of an RPC message to copy
730  * @data: pointer to arguments
731  *
732  * Returns:
733  *   Always zero.
734  */
735 static int svc_rdma_xb_linearize(const struct xdr_buf *xdr,
736 				 void *data)
737 {
738 	struct svc_rdma_pullup_data *args = data;
739 	unsigned int len, remaining;
740 	unsigned long pageoff;
741 	struct page **ppages;
742 
743 	if (xdr->head[0].iov_len) {
744 		memcpy(args->pd_dest, xdr->head[0].iov_base, xdr->head[0].iov_len);
745 		args->pd_dest += xdr->head[0].iov_len;
746 	}
747 
748 	ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
749 	pageoff = offset_in_page(xdr->page_base);
750 	remaining = xdr->page_len;
751 	while (remaining) {
752 		len = min_t(u32, PAGE_SIZE - pageoff, remaining);
753 		memcpy(args->pd_dest, page_address(*ppages) + pageoff, len);
754 		remaining -= len;
755 		args->pd_dest += len;
756 		pageoff = 0;
757 		ppages++;
758 	}
759 
760 	if (xdr->tail[0].iov_len) {
761 		memcpy(args->pd_dest, xdr->tail[0].iov_base, xdr->tail[0].iov_len);
762 		args->pd_dest += xdr->tail[0].iov_len;
763 	}
764 
765 	args->pd_length += xdr->len;
766 	return 0;
767 }
768 
769 /**
770  * svc_rdma_pull_up_reply_msg - Copy Reply into a single buffer
771  * @rdma: controlling transport
772  * @sctxt: send_ctxt for the Send WR; xprt hdr is already prepared
773  * @write_pcl: Write chunk list provided by client
774  * @xdr: prepared xdr_buf containing RPC message
775  *
776  * The device is not capable of sending the reply directly.
777  * Assemble the elements of @xdr into the transport header buffer.
778  *
779  * Assumptions:
780  *  pull_up_needed has determined that @xdr will fit in the buffer.
781  *
782  * Returns:
783  *   %0 if pull-up was successful
784  *   %-EMSGSIZE if a buffer manipulation problem occurred
785  */
786 static int svc_rdma_pull_up_reply_msg(const struct svcxprt_rdma *rdma,
787 				      struct svc_rdma_send_ctxt *sctxt,
788 				      const struct svc_rdma_pcl *write_pcl,
789 				      const struct xdr_buf *xdr)
790 {
791 	struct svc_rdma_pullup_data args = {
792 		.pd_dest	= sctxt->sc_xprt_buf + sctxt->sc_hdrbuf.len,
793 	};
794 	int ret;
795 
796 	ret = pcl_process_nonpayloads(write_pcl, xdr,
797 				      svc_rdma_xb_linearize, &args);
798 	if (ret < 0)
799 		return ret;
800 
801 	sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len + args.pd_length;
802 	trace_svcrdma_send_pullup(sctxt, args.pd_length);
803 	return 0;
804 }
805 
806 /* svc_rdma_map_reply_msg - DMA map the buffer holding RPC message
807  * @rdma: controlling transport
808  * @sctxt: send_ctxt for the Send WR
809  * @write_pcl: Write chunk list provided by client
810  * @reply_pcl: Reply chunk provided by client
811  * @xdr: prepared xdr_buf containing RPC message
812  *
813  * Returns:
814  *   %0 if DMA mapping was successful.
815  *   %-EMSGSIZE if a buffer manipulation problem occurred
816  *   %-EIO if DMA mapping failed
817  *
818  * The Send WR's num_sge field is set in all cases.
819  */
820 int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
821 			   struct svc_rdma_send_ctxt *sctxt,
822 			   const struct svc_rdma_pcl *write_pcl,
823 			   const struct svc_rdma_pcl *reply_pcl,
824 			   const struct xdr_buf *xdr)
825 {
826 	struct svc_rdma_map_data args = {
827 		.md_rdma	= rdma,
828 		.md_ctxt	= sctxt,
829 	};
830 
831 	/* Set up the (persistently-mapped) transport header SGE. */
832 	sctxt->sc_send_wr.num_sge = 1;
833 	sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len;
834 
835 	/* If there is a Reply chunk, nothing follows the transport
836 	 * header, so there is nothing to map.
837 	 */
838 	if (!pcl_is_empty(reply_pcl))
839 		return 0;
840 
841 	/* For pull-up, svc_rdma_send() will sync the transport header.
842 	 * No additional DMA mapping is necessary.
843 	 */
844 	if (svc_rdma_pull_up_needed(rdma, sctxt, write_pcl, xdr))
845 		return svc_rdma_pull_up_reply_msg(rdma, sctxt, write_pcl, xdr);
846 
847 	return pcl_process_nonpayloads(write_pcl, xdr,
848 				       svc_rdma_xb_dma_map, &args);
849 }
850 
851 /* The svc_rqst and all resources it owns are released as soon as
852  * svc_rdma_sendto returns. Transfer pages under I/O to the ctxt
853  * so they are released by the Send completion handler.
854  */
855 static void svc_rdma_save_io_pages(struct svc_rqst *rqstp,
856 				   struct svc_rdma_send_ctxt *ctxt)
857 {
858 	int i, pages = rqstp->rq_next_page - rqstp->rq_respages;
859 
860 	ctxt->sc_page_count += pages;
861 	for (i = 0; i < pages; i++) {
862 		ctxt->sc_pages[i] = rqstp->rq_respages[i];
863 		rqstp->rq_respages[i] = NULL;
864 	}
865 
866 	/* Prevent svc_xprt_release from releasing pages in rq_pages */
867 	rqstp->rq_next_page = rqstp->rq_respages;
868 }
869 
870 /* Prepare the portion of the RPC Reply that will be transmitted
871  * via RDMA Send. The RPC-over-RDMA transport header is prepared
872  * in sc_sges[0], and the RPC xdr_buf is prepared in following sges.
873  *
874  * Depending on whether a Write list or Reply chunk is present,
875  * the server may Send all, a portion of, or none of the xdr_buf.
876  * In the latter case, only the transport header (sc_sges[0]) is
877  * transmitted.
878  *
879  * Assumptions:
880  * - The Reply's transport header will never be larger than a page.
881  */
882 static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma,
883 				   struct svc_rdma_send_ctxt *sctxt,
884 				   const struct svc_rdma_recv_ctxt *rctxt,
885 				   struct svc_rqst *rqstp)
886 {
887 	struct ib_send_wr *send_wr = &sctxt->sc_send_wr;
888 	int ret;
889 
890 	ret = svc_rdma_map_reply_msg(rdma, sctxt, &rctxt->rc_write_pcl,
891 				     &rctxt->rc_reply_pcl, &rqstp->rq_res);
892 	if (ret < 0)
893 		return ret;
894 
895 	/* Transfer pages involved in RDMA Writes to the sctxt's
896 	 * page array. Completion handling releases these pages.
897 	 */
898 	svc_rdma_save_io_pages(rqstp, sctxt);
899 
900 	if (rctxt->rc_inv_rkey) {
901 		send_wr->opcode = IB_WR_SEND_WITH_INV;
902 		send_wr->ex.invalidate_rkey = rctxt->rc_inv_rkey;
903 	} else {
904 		send_wr->opcode = IB_WR_SEND;
905 	}
906 
907 	return svc_rdma_post_send(rdma, sctxt);
908 }
909 
910 /**
911  * svc_rdma_send_error_msg - Send an RPC/RDMA v1 error response
912  * @rdma: controlling transport context
913  * @sctxt: Send context for the response
914  * @rctxt: Receive context for incoming bad message
915  * @status: negative errno indicating error that occurred
916  *
917  * Given the client-provided Read, Write, and Reply chunks, the
918  * server was not able to parse the Call or form a complete Reply.
919  * Return an RDMA_ERROR message so the client can retire the RPC
920  * transaction.
921  *
922  * The caller does not have to release @sctxt. It is released by
923  * Send completion, or by this function on error.
924  */
925 void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma,
926 			     struct svc_rdma_send_ctxt *sctxt,
927 			     struct svc_rdma_recv_ctxt *rctxt,
928 			     int status)
929 {
930 	__be32 *rdma_argp = rctxt->rc_recv_buf;
931 	__be32 *p;
932 
933 	rpcrdma_set_xdrlen(&sctxt->sc_hdrbuf, 0);
934 	xdr_init_encode(&sctxt->sc_stream, &sctxt->sc_hdrbuf,
935 			sctxt->sc_xprt_buf, NULL);
936 
937 	p = xdr_reserve_space(&sctxt->sc_stream,
938 			      rpcrdma_fixed_maxsz * sizeof(*p));
939 	if (!p)
940 		goto put_ctxt;
941 
942 	*p++ = *rdma_argp;
943 	*p++ = *(rdma_argp + 1);
944 	*p++ = rdma->sc_fc_credits;
945 	*p = rdma_error;
946 
947 	switch (status) {
948 	case -EPROTONOSUPPORT:
949 		p = xdr_reserve_space(&sctxt->sc_stream, 3 * sizeof(*p));
950 		if (!p)
951 			goto put_ctxt;
952 
953 		*p++ = err_vers;
954 		*p++ = rpcrdma_version;
955 		*p = rpcrdma_version;
956 		trace_svcrdma_err_vers(*rdma_argp);
957 		break;
958 	default:
959 		p = xdr_reserve_space(&sctxt->sc_stream, sizeof(*p));
960 		if (!p)
961 			goto put_ctxt;
962 
963 		*p = err_chunk;
964 		trace_svcrdma_err_chunk(*rdma_argp);
965 	}
966 
967 	/* Remote Invalidation is skipped for simplicity. */
968 	sctxt->sc_send_wr.num_sge = 1;
969 	sctxt->sc_send_wr.opcode = IB_WR_SEND;
970 	sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len;
971 	if (svc_rdma_post_send(rdma, sctxt))
972 		goto put_ctxt;
973 	return;
974 
975 put_ctxt:
976 	svc_rdma_send_ctxt_put(rdma, sctxt);
977 }
978 
979 /**
980  * svc_rdma_sendto - Transmit an RPC reply
981  * @rqstp: processed RPC request, reply XDR already in ::rq_res
982  *
983  * Any resources still associated with @rqstp are released upon return.
984  * If no reply message was possible, the connection is closed.
985  *
986  * Returns:
987  *	%0 if an RPC reply has been successfully posted,
988  *	%-ENOMEM if a resource shortage occurred (connection is lost),
989  *	%-ENOTCONN if posting failed (connection is lost).
990  */
991 int svc_rdma_sendto(struct svc_rqst *rqstp)
992 {
993 	struct svc_xprt *xprt = rqstp->rq_xprt;
994 	struct svcxprt_rdma *rdma =
995 		container_of(xprt, struct svcxprt_rdma, sc_xprt);
996 	struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt;
997 	__be32 *rdma_argp = rctxt->rc_recv_buf;
998 	struct svc_rdma_send_ctxt *sctxt;
999 	unsigned int rc_size;
1000 	__be32 *p;
1001 	int ret;
1002 
1003 	ret = -ENOTCONN;
1004 	if (svc_xprt_is_dead(xprt))
1005 		goto drop_connection;
1006 
1007 	ret = -ENOMEM;
1008 	sctxt = svc_rdma_send_ctxt_get(rdma);
1009 	if (!sctxt)
1010 		goto drop_connection;
1011 
1012 	ret = -EMSGSIZE;
1013 	p = xdr_reserve_space(&sctxt->sc_stream,
1014 			      rpcrdma_fixed_maxsz * sizeof(*p));
1015 	if (!p)
1016 		goto put_ctxt;
1017 
1018 	ret = svc_rdma_prepare_write_list(rdma, &rctxt->rc_write_pcl, sctxt,
1019 					  &rqstp->rq_res);
1020 	if (ret < 0)
1021 		goto put_ctxt;
1022 
1023 	rc_size = 0;
1024 	if (!pcl_is_empty(&rctxt->rc_reply_pcl)) {
1025 		ret = svc_rdma_prepare_reply_chunk(rdma, &rctxt->rc_write_pcl,
1026 						   &rctxt->rc_reply_pcl, sctxt,
1027 						   &rqstp->rq_res);
1028 		if (ret < 0)
1029 			goto reply_chunk;
1030 		rc_size = ret;
1031 	}
1032 
1033 	*p++ = *rdma_argp;
1034 	*p++ = *(rdma_argp + 1);
1035 	*p++ = rdma->sc_fc_credits;
1036 	*p = pcl_is_empty(&rctxt->rc_reply_pcl) ? rdma_msg : rdma_nomsg;
1037 
1038 	ret = svc_rdma_encode_read_list(sctxt);
1039 	if (ret < 0)
1040 		goto put_ctxt;
1041 	ret = svc_rdma_encode_write_list(rctxt, sctxt);
1042 	if (ret < 0)
1043 		goto put_ctxt;
1044 	ret = svc_rdma_encode_reply_chunk(rctxt, sctxt, rc_size);
1045 	if (ret < 0)
1046 		goto put_ctxt;
1047 
1048 	ret = svc_rdma_send_reply_msg(rdma, sctxt, rctxt, rqstp);
1049 	if (ret < 0)
1050 		goto put_ctxt;
1051 	return 0;
1052 
1053 reply_chunk:
1054 	if (ret != -E2BIG && ret != -EINVAL)
1055 		goto put_ctxt;
1056 
1057 	/* Send completion releases payload pages that were part
1058 	 * of previously posted RDMA Writes.
1059 	 */
1060 	svc_rdma_save_io_pages(rqstp, sctxt);
1061 	svc_rdma_send_error_msg(rdma, sctxt, rctxt, ret);
1062 	return 0;
1063 
1064 put_ctxt:
1065 	svc_rdma_send_ctxt_put(rdma, sctxt);
1066 drop_connection:
1067 	trace_svcrdma_send_err(rqstp, ret);
1068 	svc_xprt_deferred_close(&rdma->sc_xprt);
1069 	return -ENOTCONN;
1070 }
1071 
1072 /**
1073  * svc_rdma_result_payload - special processing for a result payload
1074  * @rqstp: RPC transaction context
1075  * @offset: payload's byte offset in @rqstp->rq_res
1076  * @length: size of payload, in bytes
1077  *
1078  * Assign the passed-in result payload to the current Write chunk,
1079  * and advance to cur_result_payload to the next Write chunk, if
1080  * there is one.
1081  *
1082  * Return values:
1083  *   %0 if successful or nothing needed to be done
1084  *   %-E2BIG if the payload was larger than the Write chunk
1085  */
1086 int svc_rdma_result_payload(struct svc_rqst *rqstp, unsigned int offset,
1087 			    unsigned int length)
1088 {
1089 	struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt;
1090 	struct svc_rdma_chunk *chunk;
1091 
1092 	chunk = rctxt->rc_cur_result_payload;
1093 	if (!length || !chunk)
1094 		return 0;
1095 	rctxt->rc_cur_result_payload =
1096 		pcl_next_chunk(&rctxt->rc_write_pcl, chunk);
1097 
1098 	if (length > chunk->ch_length)
1099 		return -E2BIG;
1100 	chunk->ch_position = offset;
1101 	chunk->ch_payload_length = length;
1102 	return 0;
1103 }
1104