xref: /linux/net/sunrpc/xprtrdma/svc_rdma_rw.c (revision 808094fcbf4196be0feb17afbbdc182ec95c8cec)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2016-2018 Oracle.  All rights reserved.
4  *
5  * Use the core R/W API to move RPC-over-RDMA Read and Write chunks.
6  */
7 
8 #include <rdma/rw.h>
9 
10 #include <linux/sunrpc/xdr.h>
11 #include <linux/sunrpc/rpc_rdma.h>
12 #include <linux/sunrpc/svc_rdma.h>
13 
14 #include "xprt_rdma.h"
15 #include <trace/events/rpcrdma.h>
16 
17 static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc);
18 static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc);
19 
20 /* Each R/W context contains state for one chain of RDMA Read or
21  * Write Work Requests.
22  *
23  * Each WR chain handles a single contiguous server-side buffer,
24  * because scatterlist entries after the first have to start on
25  * page alignment. xdr_buf iovecs cannot guarantee alignment.
26  *
27  * Each WR chain handles only one R_key. Each RPC-over-RDMA segment
28  * from a client may contain a unique R_key, so each WR chain moves
29  * up to one segment at a time.
30  *
31  * The scatterlist makes this data structure over 4KB in size. To
32  * make it less likely to fail, and to handle the allocation for
33  * smaller I/O requests without disabling bottom-halves, these
34  * contexts are created on demand, but cached and reused until the
35  * controlling svcxprt_rdma is destroyed.
36  */
37 struct svc_rdma_rw_ctxt {
38 	struct list_head	rw_list;
39 	struct rdma_rw_ctx	rw_ctx;
40 	unsigned int		rw_nents;
41 	struct sg_table		rw_sg_table;
42 	struct scatterlist	rw_first_sgl[];
43 };
44 
45 static inline struct svc_rdma_rw_ctxt *
46 svc_rdma_next_ctxt(struct list_head *list)
47 {
48 	return list_first_entry_or_null(list, struct svc_rdma_rw_ctxt,
49 					rw_list);
50 }
51 
52 static struct svc_rdma_rw_ctxt *
53 svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges)
54 {
55 	struct svc_rdma_rw_ctxt *ctxt;
56 
57 	spin_lock(&rdma->sc_rw_ctxt_lock);
58 
59 	ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts);
60 	if (ctxt) {
61 		list_del(&ctxt->rw_list);
62 		spin_unlock(&rdma->sc_rw_ctxt_lock);
63 	} else {
64 		spin_unlock(&rdma->sc_rw_ctxt_lock);
65 		ctxt = kmalloc(struct_size(ctxt, rw_first_sgl, SG_CHUNK_SIZE),
66 			       GFP_KERNEL);
67 		if (!ctxt)
68 			goto out_noctx;
69 		INIT_LIST_HEAD(&ctxt->rw_list);
70 	}
71 
72 	ctxt->rw_sg_table.sgl = ctxt->rw_first_sgl;
73 	if (sg_alloc_table_chained(&ctxt->rw_sg_table, sges,
74 				   ctxt->rw_sg_table.sgl,
75 				   SG_CHUNK_SIZE))
76 		goto out_free;
77 	return ctxt;
78 
79 out_free:
80 	kfree(ctxt);
81 out_noctx:
82 	trace_svcrdma_no_rwctx_err(rdma, sges);
83 	return NULL;
84 }
85 
86 static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma,
87 				 struct svc_rdma_rw_ctxt *ctxt)
88 {
89 	sg_free_table_chained(&ctxt->rw_sg_table, SG_CHUNK_SIZE);
90 
91 	spin_lock(&rdma->sc_rw_ctxt_lock);
92 	list_add(&ctxt->rw_list, &rdma->sc_rw_ctxts);
93 	spin_unlock(&rdma->sc_rw_ctxt_lock);
94 }
95 
96 /**
97  * svc_rdma_destroy_rw_ctxts - Free accumulated R/W contexts
98  * @rdma: transport about to be destroyed
99  *
100  */
101 void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma)
102 {
103 	struct svc_rdma_rw_ctxt *ctxt;
104 
105 	while ((ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts)) != NULL) {
106 		list_del(&ctxt->rw_list);
107 		kfree(ctxt);
108 	}
109 }
110 
111 /**
112  * svc_rdma_rw_ctx_init - Prepare a R/W context for I/O
113  * @rdma: controlling transport instance
114  * @ctxt: R/W context to prepare
115  * @offset: RDMA offset
116  * @handle: RDMA tag/handle
117  * @direction: I/O direction
118  *
119  * Returns on success, the number of WQEs that will be needed
120  * on the workqueue, or a negative errno.
121  */
122 static int svc_rdma_rw_ctx_init(struct svcxprt_rdma *rdma,
123 				struct svc_rdma_rw_ctxt *ctxt,
124 				u64 offset, u32 handle,
125 				enum dma_data_direction direction)
126 {
127 	int ret;
128 
129 	ret = rdma_rw_ctx_init(&ctxt->rw_ctx, rdma->sc_qp, rdma->sc_port_num,
130 			       ctxt->rw_sg_table.sgl, ctxt->rw_nents,
131 			       0, offset, handle, direction);
132 	if (unlikely(ret < 0)) {
133 		svc_rdma_put_rw_ctxt(rdma, ctxt);
134 		trace_svcrdma_dma_map_rw_err(rdma, ctxt->rw_nents, ret);
135 	}
136 	return ret;
137 }
138 
139 /* A chunk context tracks all I/O for moving one Read or Write
140  * chunk. This is a set of rdma_rw's that handle data movement
141  * for all segments of one chunk.
142  *
143  * These are small, acquired with a single allocator call, and
144  * no more than one is needed per chunk. They are allocated on
145  * demand, and not cached.
146  */
147 struct svc_rdma_chunk_ctxt {
148 	struct rpc_rdma_cid	cc_cid;
149 	struct ib_cqe		cc_cqe;
150 	struct svcxprt_rdma	*cc_rdma;
151 	struct list_head	cc_rwctxts;
152 	int			cc_sqecount;
153 };
154 
155 static void svc_rdma_cc_cid_init(struct svcxprt_rdma *rdma,
156 				 struct rpc_rdma_cid *cid)
157 {
158 	cid->ci_queue_id = rdma->sc_sq_cq->res.id;
159 	cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
160 }
161 
162 static void svc_rdma_cc_init(struct svcxprt_rdma *rdma,
163 			     struct svc_rdma_chunk_ctxt *cc)
164 {
165 	svc_rdma_cc_cid_init(rdma, &cc->cc_cid);
166 	cc->cc_rdma = rdma;
167 
168 	INIT_LIST_HEAD(&cc->cc_rwctxts);
169 	cc->cc_sqecount = 0;
170 }
171 
172 static void svc_rdma_cc_release(struct svc_rdma_chunk_ctxt *cc,
173 				enum dma_data_direction dir)
174 {
175 	struct svcxprt_rdma *rdma = cc->cc_rdma;
176 	struct svc_rdma_rw_ctxt *ctxt;
177 
178 	while ((ctxt = svc_rdma_next_ctxt(&cc->cc_rwctxts)) != NULL) {
179 		list_del(&ctxt->rw_list);
180 
181 		rdma_rw_ctx_destroy(&ctxt->rw_ctx, rdma->sc_qp,
182 				    rdma->sc_port_num, ctxt->rw_sg_table.sgl,
183 				    ctxt->rw_nents, dir);
184 		svc_rdma_put_rw_ctxt(rdma, ctxt);
185 	}
186 }
187 
188 /* State for sending a Write or Reply chunk.
189  *  - Tracks progress of writing one chunk over all its segments
190  *  - Stores arguments for the SGL constructor functions
191  */
192 struct svc_rdma_write_info {
193 	const struct svc_rdma_chunk	*wi_chunk;
194 
195 	/* write state of this chunk */
196 	unsigned int		wi_seg_off;
197 	unsigned int		wi_seg_no;
198 
199 	/* SGL constructor arguments */
200 	const struct xdr_buf	*wi_xdr;
201 	unsigned char		*wi_base;
202 	unsigned int		wi_next_off;
203 
204 	struct svc_rdma_chunk_ctxt	wi_cc;
205 };
206 
207 static struct svc_rdma_write_info *
208 svc_rdma_write_info_alloc(struct svcxprt_rdma *rdma,
209 			  const struct svc_rdma_chunk *chunk)
210 {
211 	struct svc_rdma_write_info *info;
212 
213 	info = kmalloc(sizeof(*info), GFP_KERNEL);
214 	if (!info)
215 		return info;
216 
217 	info->wi_chunk = chunk;
218 	info->wi_seg_off = 0;
219 	info->wi_seg_no = 0;
220 	svc_rdma_cc_init(rdma, &info->wi_cc);
221 	info->wi_cc.cc_cqe.done = svc_rdma_write_done;
222 	return info;
223 }
224 
225 static void svc_rdma_write_info_free(struct svc_rdma_write_info *info)
226 {
227 	svc_rdma_cc_release(&info->wi_cc, DMA_TO_DEVICE);
228 	kfree(info);
229 }
230 
231 /**
232  * svc_rdma_write_done - Write chunk completion
233  * @cq: controlling Completion Queue
234  * @wc: Work Completion
235  *
236  * Pages under I/O are freed by a subsequent Send completion.
237  */
238 static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
239 {
240 	struct ib_cqe *cqe = wc->wr_cqe;
241 	struct svc_rdma_chunk_ctxt *cc =
242 			container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe);
243 	struct svcxprt_rdma *rdma = cc->cc_rdma;
244 	struct svc_rdma_write_info *info =
245 			container_of(cc, struct svc_rdma_write_info, wi_cc);
246 
247 	trace_svcrdma_wc_write(wc, &cc->cc_cid);
248 
249 	atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
250 	wake_up(&rdma->sc_send_wait);
251 
252 	if (unlikely(wc->status != IB_WC_SUCCESS))
253 		set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
254 
255 	svc_rdma_write_info_free(info);
256 }
257 
258 /* State for pulling a Read chunk.
259  */
260 struct svc_rdma_read_info {
261 	struct svc_rqst			*ri_rqst;
262 	struct svc_rdma_recv_ctxt	*ri_readctxt;
263 	unsigned int			ri_pageno;
264 	unsigned int			ri_pageoff;
265 	unsigned int			ri_totalbytes;
266 
267 	struct svc_rdma_chunk_ctxt	ri_cc;
268 };
269 
270 static struct svc_rdma_read_info *
271 svc_rdma_read_info_alloc(struct svcxprt_rdma *rdma)
272 {
273 	struct svc_rdma_read_info *info;
274 
275 	info = kmalloc(sizeof(*info), GFP_KERNEL);
276 	if (!info)
277 		return info;
278 
279 	svc_rdma_cc_init(rdma, &info->ri_cc);
280 	info->ri_cc.cc_cqe.done = svc_rdma_wc_read_done;
281 	return info;
282 }
283 
284 static void svc_rdma_read_info_free(struct svc_rdma_read_info *info)
285 {
286 	svc_rdma_cc_release(&info->ri_cc, DMA_FROM_DEVICE);
287 	kfree(info);
288 }
289 
290 /**
291  * svc_rdma_wc_read_done - Handle completion of an RDMA Read ctx
292  * @cq: controlling Completion Queue
293  * @wc: Work Completion
294  *
295  */
296 static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc)
297 {
298 	struct ib_cqe *cqe = wc->wr_cqe;
299 	struct svc_rdma_chunk_ctxt *cc =
300 			container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe);
301 	struct svcxprt_rdma *rdma = cc->cc_rdma;
302 	struct svc_rdma_read_info *info =
303 			container_of(cc, struct svc_rdma_read_info, ri_cc);
304 
305 	trace_svcrdma_wc_read(wc, &cc->cc_cid);
306 
307 	atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
308 	wake_up(&rdma->sc_send_wait);
309 
310 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
311 		set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
312 		svc_rdma_recv_ctxt_put(rdma, info->ri_readctxt);
313 	} else {
314 		spin_lock(&rdma->sc_rq_dto_lock);
315 		list_add_tail(&info->ri_readctxt->rc_list,
316 			      &rdma->sc_read_complete_q);
317 		/* Note the unlock pairs with the smp_rmb in svc_xprt_ready: */
318 		set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags);
319 		spin_unlock(&rdma->sc_rq_dto_lock);
320 
321 		svc_xprt_enqueue(&rdma->sc_xprt);
322 	}
323 
324 	svc_rdma_read_info_free(info);
325 }
326 
327 /* This function sleeps when the transport's Send Queue is congested.
328  *
329  * Assumptions:
330  * - If ib_post_send() succeeds, only one completion is expected,
331  *   even if one or more WRs are flushed. This is true when posting
332  *   an rdma_rw_ctx or when posting a single signaled WR.
333  */
334 static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc)
335 {
336 	struct svcxprt_rdma *rdma = cc->cc_rdma;
337 	struct svc_xprt *xprt = &rdma->sc_xprt;
338 	struct ib_send_wr *first_wr;
339 	const struct ib_send_wr *bad_wr;
340 	struct list_head *tmp;
341 	struct ib_cqe *cqe;
342 	int ret;
343 
344 	if (cc->cc_sqecount > rdma->sc_sq_depth)
345 		return -EINVAL;
346 
347 	first_wr = NULL;
348 	cqe = &cc->cc_cqe;
349 	list_for_each(tmp, &cc->cc_rwctxts) {
350 		struct svc_rdma_rw_ctxt *ctxt;
351 
352 		ctxt = list_entry(tmp, struct svc_rdma_rw_ctxt, rw_list);
353 		first_wr = rdma_rw_ctx_wrs(&ctxt->rw_ctx, rdma->sc_qp,
354 					   rdma->sc_port_num, cqe, first_wr);
355 		cqe = NULL;
356 	}
357 
358 	do {
359 		if (atomic_sub_return(cc->cc_sqecount,
360 				      &rdma->sc_sq_avail) > 0) {
361 			ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr);
362 			if (ret)
363 				break;
364 			return 0;
365 		}
366 
367 		percpu_counter_inc(&svcrdma_stat_sq_starve);
368 		trace_svcrdma_sq_full(rdma);
369 		atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
370 		wait_event(rdma->sc_send_wait,
371 			   atomic_read(&rdma->sc_sq_avail) > cc->cc_sqecount);
372 		trace_svcrdma_sq_retry(rdma);
373 	} while (1);
374 
375 	trace_svcrdma_sq_post_err(rdma, ret);
376 	set_bit(XPT_CLOSE, &xprt->xpt_flags);
377 
378 	/* If even one was posted, there will be a completion. */
379 	if (bad_wr != first_wr)
380 		return 0;
381 
382 	atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
383 	wake_up(&rdma->sc_send_wait);
384 	return -ENOTCONN;
385 }
386 
387 /* Build and DMA-map an SGL that covers one kvec in an xdr_buf
388  */
389 static void svc_rdma_vec_to_sg(struct svc_rdma_write_info *info,
390 			       unsigned int len,
391 			       struct svc_rdma_rw_ctxt *ctxt)
392 {
393 	struct scatterlist *sg = ctxt->rw_sg_table.sgl;
394 
395 	sg_set_buf(&sg[0], info->wi_base, len);
396 	info->wi_base += len;
397 
398 	ctxt->rw_nents = 1;
399 }
400 
401 /* Build and DMA-map an SGL that covers part of an xdr_buf's pagelist.
402  */
403 static void svc_rdma_pagelist_to_sg(struct svc_rdma_write_info *info,
404 				    unsigned int remaining,
405 				    struct svc_rdma_rw_ctxt *ctxt)
406 {
407 	unsigned int sge_no, sge_bytes, page_off, page_no;
408 	const struct xdr_buf *xdr = info->wi_xdr;
409 	struct scatterlist *sg;
410 	struct page **page;
411 
412 	page_off = info->wi_next_off + xdr->page_base;
413 	page_no = page_off >> PAGE_SHIFT;
414 	page_off = offset_in_page(page_off);
415 	page = xdr->pages + page_no;
416 	info->wi_next_off += remaining;
417 	sg = ctxt->rw_sg_table.sgl;
418 	sge_no = 0;
419 	do {
420 		sge_bytes = min_t(unsigned int, remaining,
421 				  PAGE_SIZE - page_off);
422 		sg_set_page(sg, *page, sge_bytes, page_off);
423 
424 		remaining -= sge_bytes;
425 		sg = sg_next(sg);
426 		page_off = 0;
427 		sge_no++;
428 		page++;
429 	} while (remaining);
430 
431 	ctxt->rw_nents = sge_no;
432 }
433 
434 /* Construct RDMA Write WRs to send a portion of an xdr_buf containing
435  * an RPC Reply.
436  */
437 static int
438 svc_rdma_build_writes(struct svc_rdma_write_info *info,
439 		      void (*constructor)(struct svc_rdma_write_info *info,
440 					  unsigned int len,
441 					  struct svc_rdma_rw_ctxt *ctxt),
442 		      unsigned int remaining)
443 {
444 	struct svc_rdma_chunk_ctxt *cc = &info->wi_cc;
445 	struct svcxprt_rdma *rdma = cc->cc_rdma;
446 	const struct svc_rdma_segment *seg;
447 	struct svc_rdma_rw_ctxt *ctxt;
448 	int ret;
449 
450 	do {
451 		unsigned int write_len;
452 		u64 offset;
453 
454 		seg = &info->wi_chunk->ch_segments[info->wi_seg_no];
455 		if (!seg)
456 			goto out_overflow;
457 
458 		write_len = min(remaining, seg->rs_length - info->wi_seg_off);
459 		if (!write_len)
460 			goto out_overflow;
461 		ctxt = svc_rdma_get_rw_ctxt(rdma,
462 					    (write_len >> PAGE_SHIFT) + 2);
463 		if (!ctxt)
464 			return -ENOMEM;
465 
466 		constructor(info, write_len, ctxt);
467 		offset = seg->rs_offset + info->wi_seg_off;
468 		ret = svc_rdma_rw_ctx_init(rdma, ctxt, offset, seg->rs_handle,
469 					   DMA_TO_DEVICE);
470 		if (ret < 0)
471 			return -EIO;
472 		percpu_counter_inc(&svcrdma_stat_write);
473 
474 		list_add(&ctxt->rw_list, &cc->cc_rwctxts);
475 		cc->cc_sqecount += ret;
476 		if (write_len == seg->rs_length - info->wi_seg_off) {
477 			info->wi_seg_no++;
478 			info->wi_seg_off = 0;
479 		} else {
480 			info->wi_seg_off += write_len;
481 		}
482 		remaining -= write_len;
483 	} while (remaining);
484 
485 	return 0;
486 
487 out_overflow:
488 	trace_svcrdma_small_wrch_err(rdma, remaining, info->wi_seg_no,
489 				     info->wi_chunk->ch_segcount);
490 	return -E2BIG;
491 }
492 
493 /**
494  * svc_rdma_iov_write - Construct RDMA Writes from an iov
495  * @info: pointer to write arguments
496  * @iov: kvec to write
497  *
498  * Returns:
499  *   On succes, returns zero
500  *   %-E2BIG if the client-provided Write chunk is too small
501  *   %-ENOMEM if a resource has been exhausted
502  *   %-EIO if an rdma-rw error occurred
503  */
504 static int svc_rdma_iov_write(struct svc_rdma_write_info *info,
505 			      const struct kvec *iov)
506 {
507 	info->wi_base = iov->iov_base;
508 	return svc_rdma_build_writes(info, svc_rdma_vec_to_sg,
509 				     iov->iov_len);
510 }
511 
512 /**
513  * svc_rdma_pages_write - Construct RDMA Writes from pages
514  * @info: pointer to write arguments
515  * @xdr: xdr_buf with pages to write
516  * @offset: offset into the content of @xdr
517  * @length: number of bytes to write
518  *
519  * Returns:
520  *   On succes, returns zero
521  *   %-E2BIG if the client-provided Write chunk is too small
522  *   %-ENOMEM if a resource has been exhausted
523  *   %-EIO if an rdma-rw error occurred
524  */
525 static int svc_rdma_pages_write(struct svc_rdma_write_info *info,
526 				const struct xdr_buf *xdr,
527 				unsigned int offset,
528 				unsigned long length)
529 {
530 	info->wi_xdr = xdr;
531 	info->wi_next_off = offset - xdr->head[0].iov_len;
532 	return svc_rdma_build_writes(info, svc_rdma_pagelist_to_sg,
533 				     length);
534 }
535 
536 /**
537  * svc_rdma_xb_write - Construct RDMA Writes to write an xdr_buf
538  * @xdr: xdr_buf to write
539  * @data: pointer to write arguments
540  *
541  * Returns:
542  *   On succes, returns zero
543  *   %-E2BIG if the client-provided Write chunk is too small
544  *   %-ENOMEM if a resource has been exhausted
545  *   %-EIO if an rdma-rw error occurred
546  */
547 static int svc_rdma_xb_write(const struct xdr_buf *xdr, void *data)
548 {
549 	struct svc_rdma_write_info *info = data;
550 	int ret;
551 
552 	if (xdr->head[0].iov_len) {
553 		ret = svc_rdma_iov_write(info, &xdr->head[0]);
554 		if (ret < 0)
555 			return ret;
556 	}
557 
558 	if (xdr->page_len) {
559 		ret = svc_rdma_pages_write(info, xdr, xdr->head[0].iov_len,
560 					   xdr->page_len);
561 		if (ret < 0)
562 			return ret;
563 	}
564 
565 	if (xdr->tail[0].iov_len) {
566 		ret = svc_rdma_iov_write(info, &xdr->tail[0]);
567 		if (ret < 0)
568 			return ret;
569 	}
570 
571 	return xdr->len;
572 }
573 
574 /**
575  * svc_rdma_send_write_chunk - Write all segments in a Write chunk
576  * @rdma: controlling RDMA transport
577  * @chunk: Write chunk provided by the client
578  * @xdr: xdr_buf containing the data payload
579  *
580  * Returns a non-negative number of bytes the chunk consumed, or
581  *	%-E2BIG if the payload was larger than the Write chunk,
582  *	%-EINVAL if client provided too many segments,
583  *	%-ENOMEM if rdma_rw context pool was exhausted,
584  *	%-ENOTCONN if posting failed (connection is lost),
585  *	%-EIO if rdma_rw initialization failed (DMA mapping, etc).
586  */
587 int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma,
588 			      const struct svc_rdma_chunk *chunk,
589 			      const struct xdr_buf *xdr)
590 {
591 	struct svc_rdma_write_info *info;
592 	struct svc_rdma_chunk_ctxt *cc;
593 	int ret;
594 
595 	info = svc_rdma_write_info_alloc(rdma, chunk);
596 	if (!info)
597 		return -ENOMEM;
598 	cc = &info->wi_cc;
599 
600 	ret = svc_rdma_xb_write(xdr, info);
601 	if (ret != xdr->len)
602 		goto out_err;
603 
604 	trace_svcrdma_post_write_chunk(&cc->cc_cid, cc->cc_sqecount);
605 	ret = svc_rdma_post_chunk_ctxt(cc);
606 	if (ret < 0)
607 		goto out_err;
608 	return xdr->len;
609 
610 out_err:
611 	svc_rdma_write_info_free(info);
612 	return ret;
613 }
614 
615 /**
616  * svc_rdma_send_reply_chunk - Write all segments in the Reply chunk
617  * @rdma: controlling RDMA transport
618  * @rctxt: Write and Reply chunks from client
619  * @xdr: xdr_buf containing an RPC Reply
620  *
621  * Returns a non-negative number of bytes the chunk consumed, or
622  *	%-E2BIG if the payload was larger than the Reply chunk,
623  *	%-EINVAL if client provided too many segments,
624  *	%-ENOMEM if rdma_rw context pool was exhausted,
625  *	%-ENOTCONN if posting failed (connection is lost),
626  *	%-EIO if rdma_rw initialization failed (DMA mapping, etc).
627  */
628 int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma,
629 			      const struct svc_rdma_recv_ctxt *rctxt,
630 			      const struct xdr_buf *xdr)
631 {
632 	struct svc_rdma_write_info *info;
633 	struct svc_rdma_chunk_ctxt *cc;
634 	struct svc_rdma_chunk *chunk;
635 	int ret;
636 
637 	if (pcl_is_empty(&rctxt->rc_reply_pcl))
638 		return 0;
639 
640 	chunk = pcl_first_chunk(&rctxt->rc_reply_pcl);
641 	info = svc_rdma_write_info_alloc(rdma, chunk);
642 	if (!info)
643 		return -ENOMEM;
644 	cc = &info->wi_cc;
645 
646 	ret = pcl_process_nonpayloads(&rctxt->rc_write_pcl, xdr,
647 				      svc_rdma_xb_write, info);
648 	if (ret < 0)
649 		goto out_err;
650 
651 	trace_svcrdma_post_reply_chunk(&cc->cc_cid, cc->cc_sqecount);
652 	ret = svc_rdma_post_chunk_ctxt(cc);
653 	if (ret < 0)
654 		goto out_err;
655 
656 	return xdr->len;
657 
658 out_err:
659 	svc_rdma_write_info_free(info);
660 	return ret;
661 }
662 
663 /**
664  * svc_rdma_build_read_segment - Build RDMA Read WQEs to pull one RDMA segment
665  * @info: context for ongoing I/O
666  * @segment: co-ordinates of remote memory to be read
667  *
668  * Returns:
669  *   %0: the Read WR chain was constructed successfully
670  *   %-EINVAL: there were not enough rq_pages to finish
671  *   %-ENOMEM: allocating a local resources failed
672  *   %-EIO: a DMA mapping error occurred
673  */
674 static int svc_rdma_build_read_segment(struct svc_rdma_read_info *info,
675 				       const struct svc_rdma_segment *segment)
676 {
677 	struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
678 	struct svc_rdma_chunk_ctxt *cc = &info->ri_cc;
679 	struct svc_rqst *rqstp = info->ri_rqst;
680 	struct svc_rdma_rw_ctxt *ctxt;
681 	unsigned int sge_no, seg_len, len;
682 	struct scatterlist *sg;
683 	int ret;
684 
685 	len = segment->rs_length;
686 	sge_no = PAGE_ALIGN(info->ri_pageoff + len) >> PAGE_SHIFT;
687 	ctxt = svc_rdma_get_rw_ctxt(cc->cc_rdma, sge_no);
688 	if (!ctxt)
689 		return -ENOMEM;
690 	ctxt->rw_nents = sge_no;
691 
692 	sg = ctxt->rw_sg_table.sgl;
693 	for (sge_no = 0; sge_no < ctxt->rw_nents; sge_no++) {
694 		seg_len = min_t(unsigned int, len,
695 				PAGE_SIZE - info->ri_pageoff);
696 
697 		head->rc_arg.pages[info->ri_pageno] =
698 			rqstp->rq_pages[info->ri_pageno];
699 		if (!info->ri_pageoff)
700 			head->rc_page_count++;
701 
702 		sg_set_page(sg, rqstp->rq_pages[info->ri_pageno],
703 			    seg_len, info->ri_pageoff);
704 		sg = sg_next(sg);
705 
706 		info->ri_pageoff += seg_len;
707 		if (info->ri_pageoff == PAGE_SIZE) {
708 			info->ri_pageno++;
709 			info->ri_pageoff = 0;
710 		}
711 		len -= seg_len;
712 
713 		/* Safety check */
714 		if (len &&
715 		    &rqstp->rq_pages[info->ri_pageno + 1] > rqstp->rq_page_end)
716 			goto out_overrun;
717 	}
718 
719 	ret = svc_rdma_rw_ctx_init(cc->cc_rdma, ctxt, segment->rs_offset,
720 				   segment->rs_handle, DMA_FROM_DEVICE);
721 	if (ret < 0)
722 		return -EIO;
723 	percpu_counter_inc(&svcrdma_stat_read);
724 
725 	list_add(&ctxt->rw_list, &cc->cc_rwctxts);
726 	cc->cc_sqecount += ret;
727 	return 0;
728 
729 out_overrun:
730 	trace_svcrdma_page_overrun_err(cc->cc_rdma, rqstp, info->ri_pageno);
731 	return -EINVAL;
732 }
733 
734 /**
735  * svc_rdma_build_read_chunk - Build RDMA Read WQEs to pull one RDMA chunk
736  * @info: context for ongoing I/O
737  * @chunk: Read chunk to pull
738  *
739  * Return values:
740  *   %0: the Read WR chain was constructed successfully
741  *   %-EINVAL: there were not enough resources to finish
742  *   %-ENOMEM: allocating a local resources failed
743  *   %-EIO: a DMA mapping error occurred
744  */
745 static int svc_rdma_build_read_chunk(struct svc_rdma_read_info *info,
746 				     const struct svc_rdma_chunk *chunk)
747 {
748 	const struct svc_rdma_segment *segment;
749 	int ret;
750 
751 	ret = -EINVAL;
752 	pcl_for_each_segment(segment, chunk) {
753 		ret = svc_rdma_build_read_segment(info, segment);
754 		if (ret < 0)
755 			break;
756 		info->ri_totalbytes += segment->rs_length;
757 	}
758 	return ret;
759 }
760 
761 /**
762  * svc_rdma_copy_inline_range - Copy part of the inline content into pages
763  * @info: context for RDMA Reads
764  * @offset: offset into the Receive buffer of region to copy
765  * @remaining: length of region to copy
766  *
767  * Take a page at a time from rqstp->rq_pages and copy the inline
768  * content from the Receive buffer into that page. Update
769  * info->ri_pageno and info->ri_pageoff so that the next RDMA Read
770  * result will land contiguously with the copied content.
771  *
772  * Return values:
773  *   %0: Inline content was successfully copied
774  *   %-EINVAL: offset or length was incorrect
775  */
776 static int svc_rdma_copy_inline_range(struct svc_rdma_read_info *info,
777 				      unsigned int offset,
778 				      unsigned int remaining)
779 {
780 	struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
781 	unsigned char *dst, *src = head->rc_recv_buf;
782 	struct svc_rqst *rqstp = info->ri_rqst;
783 	unsigned int page_no, numpages;
784 
785 	numpages = PAGE_ALIGN(info->ri_pageoff + remaining) >> PAGE_SHIFT;
786 	for (page_no = 0; page_no < numpages; page_no++) {
787 		unsigned int page_len;
788 
789 		page_len = min_t(unsigned int, remaining,
790 				 PAGE_SIZE - info->ri_pageoff);
791 
792 		head->rc_arg.pages[info->ri_pageno] =
793 			rqstp->rq_pages[info->ri_pageno];
794 		if (!info->ri_pageoff)
795 			head->rc_page_count++;
796 
797 		dst = page_address(head->rc_arg.pages[info->ri_pageno]);
798 		memcpy(dst + info->ri_pageno, src + offset, page_len);
799 
800 		info->ri_totalbytes += page_len;
801 		info->ri_pageoff += page_len;
802 		if (info->ri_pageoff == PAGE_SIZE) {
803 			info->ri_pageno++;
804 			info->ri_pageoff = 0;
805 		}
806 		remaining -= page_len;
807 		offset += page_len;
808 	}
809 
810 	return -EINVAL;
811 }
812 
813 /**
814  * svc_rdma_read_multiple_chunks - Construct RDMA Reads to pull data item Read chunks
815  * @info: context for RDMA Reads
816  *
817  * The chunk data lands in head->rc_arg as a series of contiguous pages,
818  * like an incoming TCP call.
819  *
820  * Return values:
821  *   %0: RDMA Read WQEs were successfully built
822  *   %-EINVAL: client provided too many chunks or segments,
823  *   %-ENOMEM: rdma_rw context pool was exhausted,
824  *   %-ENOTCONN: posting failed (connection is lost),
825  *   %-EIO: rdma_rw initialization failed (DMA mapping, etc).
826  */
827 static noinline int svc_rdma_read_multiple_chunks(struct svc_rdma_read_info *info)
828 {
829 	struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
830 	const struct svc_rdma_pcl *pcl = &head->rc_read_pcl;
831 	struct svc_rdma_chunk *chunk, *next;
832 	struct xdr_buf *buf = &head->rc_arg;
833 	unsigned int start, length;
834 	int ret;
835 
836 	start = 0;
837 	chunk = pcl_first_chunk(pcl);
838 	length = chunk->ch_position;
839 	ret = svc_rdma_copy_inline_range(info, start, length);
840 	if (ret < 0)
841 		return ret;
842 
843 	pcl_for_each_chunk(chunk, pcl) {
844 		ret = svc_rdma_build_read_chunk(info, chunk);
845 		if (ret < 0)
846 			return ret;
847 
848 		next = pcl_next_chunk(pcl, chunk);
849 		if (!next)
850 			break;
851 
852 		start += length;
853 		length = next->ch_position - info->ri_totalbytes;
854 		ret = svc_rdma_copy_inline_range(info, start, length);
855 		if (ret < 0)
856 			return ret;
857 	}
858 
859 	start += length;
860 	length = head->rc_byte_len - start;
861 	ret = svc_rdma_copy_inline_range(info, start, length);
862 	if (ret < 0)
863 		return ret;
864 
865 	buf->len += info->ri_totalbytes;
866 	buf->buflen += info->ri_totalbytes;
867 
868 	head->rc_hdr_count = 1;
869 	buf->head[0].iov_base = page_address(head->rc_pages[0]);
870 	buf->head[0].iov_len = min_t(size_t, PAGE_SIZE, info->ri_totalbytes);
871 	buf->page_len = info->ri_totalbytes - buf->head[0].iov_len;
872 	return 0;
873 }
874 
875 /**
876  * svc_rdma_read_data_item - Construct RDMA Reads to pull data item Read chunks
877  * @info: context for RDMA Reads
878  *
879  * The chunk data lands in the page list of head->rc_arg.pages.
880  *
881  * Currently NFSD does not look at the head->rc_arg.tail[0] iovec.
882  * Therefore, XDR round-up of the Read chunk and trailing
883  * inline content must both be added at the end of the pagelist.
884  *
885  * Return values:
886  *   %0: RDMA Read WQEs were successfully built
887  *   %-EINVAL: client provided too many chunks or segments,
888  *   %-ENOMEM: rdma_rw context pool was exhausted,
889  *   %-ENOTCONN: posting failed (connection is lost),
890  *   %-EIO: rdma_rw initialization failed (DMA mapping, etc).
891  */
892 static int svc_rdma_read_data_item(struct svc_rdma_read_info *info)
893 {
894 	struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
895 	struct xdr_buf *buf = &head->rc_arg;
896 	struct svc_rdma_chunk *chunk;
897 	unsigned int length;
898 	int ret;
899 
900 	chunk = pcl_first_chunk(&head->rc_read_pcl);
901 	ret = svc_rdma_build_read_chunk(info, chunk);
902 	if (ret < 0)
903 		goto out;
904 
905 	head->rc_hdr_count = 0;
906 
907 	/* Split the Receive buffer between the head and tail
908 	 * buffers at Read chunk's position. XDR roundup of the
909 	 * chunk is not included in either the pagelist or in
910 	 * the tail.
911 	 */
912 	buf->tail[0].iov_base = buf->head[0].iov_base + chunk->ch_position;
913 	buf->tail[0].iov_len = buf->head[0].iov_len - chunk->ch_position;
914 	buf->head[0].iov_len = chunk->ch_position;
915 
916 	/* Read chunk may need XDR roundup (see RFC 8166, s. 3.4.5.2).
917 	 *
918 	 * If the client already rounded up the chunk length, the
919 	 * length does not change. Otherwise, the length of the page
920 	 * list is increased to include XDR round-up.
921 	 *
922 	 * Currently these chunks always start at page offset 0,
923 	 * thus the rounded-up length never crosses a page boundary.
924 	 */
925 	length = XDR_QUADLEN(info->ri_totalbytes) << 2;
926 	buf->page_len = length;
927 	buf->len += length;
928 	buf->buflen += length;
929 
930 out:
931 	return ret;
932 }
933 
934 /**
935  * svc_rdma_read_chunk_range - Build RDMA Read WQEs for portion of a chunk
936  * @info: context for RDMA Reads
937  * @chunk: parsed Call chunk to pull
938  * @offset: offset of region to pull
939  * @length: length of region to pull
940  *
941  * Return values:
942  *   %0: RDMA Read WQEs were successfully built
943  *   %-EINVAL: there were not enough resources to finish
944  *   %-ENOMEM: rdma_rw context pool was exhausted,
945  *   %-ENOTCONN: posting failed (connection is lost),
946  *   %-EIO: rdma_rw initialization failed (DMA mapping, etc).
947  */
948 static int svc_rdma_read_chunk_range(struct svc_rdma_read_info *info,
949 				     const struct svc_rdma_chunk *chunk,
950 				     unsigned int offset, unsigned int length)
951 {
952 	const struct svc_rdma_segment *segment;
953 	int ret;
954 
955 	ret = -EINVAL;
956 	pcl_for_each_segment(segment, chunk) {
957 		struct svc_rdma_segment dummy;
958 
959 		if (offset > segment->rs_length) {
960 			offset -= segment->rs_length;
961 			continue;
962 		}
963 
964 		dummy.rs_handle = segment->rs_handle;
965 		dummy.rs_length = min_t(u32, length, segment->rs_length) - offset;
966 		dummy.rs_offset = segment->rs_offset + offset;
967 
968 		ret = svc_rdma_build_read_segment(info, &dummy);
969 		if (ret < 0)
970 			break;
971 
972 		info->ri_totalbytes += dummy.rs_length;
973 		length -= dummy.rs_length;
974 		offset = 0;
975 	}
976 	return ret;
977 }
978 
979 /**
980  * svc_rdma_read_call_chunk - Build RDMA Read WQEs to pull a Long Message
981  * @info: context for RDMA Reads
982  *
983  * Return values:
984  *   %0: RDMA Read WQEs were successfully built
985  *   %-EINVAL: there were not enough resources to finish
986  *   %-ENOMEM: rdma_rw context pool was exhausted,
987  *   %-ENOTCONN: posting failed (connection is lost),
988  *   %-EIO: rdma_rw initialization failed (DMA mapping, etc).
989  */
990 static int svc_rdma_read_call_chunk(struct svc_rdma_read_info *info)
991 {
992 	struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
993 	const struct svc_rdma_chunk *call_chunk =
994 			pcl_first_chunk(&head->rc_call_pcl);
995 	const struct svc_rdma_pcl *pcl = &head->rc_read_pcl;
996 	struct svc_rdma_chunk *chunk, *next;
997 	unsigned int start, length;
998 	int ret;
999 
1000 	if (pcl_is_empty(pcl))
1001 		return svc_rdma_build_read_chunk(info, call_chunk);
1002 
1003 	start = 0;
1004 	chunk = pcl_first_chunk(pcl);
1005 	length = chunk->ch_position;
1006 	ret = svc_rdma_read_chunk_range(info, call_chunk, start, length);
1007 	if (ret < 0)
1008 		return ret;
1009 
1010 	pcl_for_each_chunk(chunk, pcl) {
1011 		ret = svc_rdma_build_read_chunk(info, chunk);
1012 		if (ret < 0)
1013 			return ret;
1014 
1015 		next = pcl_next_chunk(pcl, chunk);
1016 		if (!next)
1017 			break;
1018 
1019 		start += length;
1020 		length = next->ch_position - info->ri_totalbytes;
1021 		ret = svc_rdma_read_chunk_range(info, call_chunk,
1022 						start, length);
1023 		if (ret < 0)
1024 			return ret;
1025 	}
1026 
1027 	start += length;
1028 	length = call_chunk->ch_length - start;
1029 	return svc_rdma_read_chunk_range(info, call_chunk, start, length);
1030 }
1031 
1032 /**
1033  * svc_rdma_read_special - Build RDMA Read WQEs to pull a Long Message
1034  * @info: context for RDMA Reads
1035  *
1036  * The start of the data lands in the first page just after the
1037  * Transport header, and the rest lands in the page list of
1038  * head->rc_arg.pages.
1039  *
1040  * Assumptions:
1041  *	- A PZRC is never sent in an RDMA_MSG message, though it's
1042  *	  allowed by spec.
1043  *
1044  * Return values:
1045  *   %0: RDMA Read WQEs were successfully built
1046  *   %-EINVAL: client provided too many chunks or segments,
1047  *   %-ENOMEM: rdma_rw context pool was exhausted,
1048  *   %-ENOTCONN: posting failed (connection is lost),
1049  *   %-EIO: rdma_rw initialization failed (DMA mapping, etc).
1050  */
1051 static noinline int svc_rdma_read_special(struct svc_rdma_read_info *info)
1052 {
1053 	struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
1054 	struct xdr_buf *buf = &head->rc_arg;
1055 	int ret;
1056 
1057 	ret = svc_rdma_read_call_chunk(info);
1058 	if (ret < 0)
1059 		goto out;
1060 
1061 	buf->len += info->ri_totalbytes;
1062 	buf->buflen += info->ri_totalbytes;
1063 
1064 	head->rc_hdr_count = 1;
1065 	buf->head[0].iov_base = page_address(head->rc_pages[0]);
1066 	buf->head[0].iov_len = min_t(size_t, PAGE_SIZE, info->ri_totalbytes);
1067 	buf->page_len = info->ri_totalbytes - buf->head[0].iov_len;
1068 
1069 out:
1070 	return ret;
1071 }
1072 
1073 /* Pages under I/O have been copied to head->rc_pages. Ensure they
1074  * are not released by svc_xprt_release() until the I/O is complete.
1075  *
1076  * This has to be done after all Read WRs are constructed to properly
1077  * handle a page that is part of I/O on behalf of two different RDMA
1078  * segments.
1079  *
1080  * Do this only if I/O has been posted. Otherwise, we do indeed want
1081  * svc_xprt_release() to clean things up properly.
1082  */
1083 static void svc_rdma_save_io_pages(struct svc_rqst *rqstp,
1084 				   const unsigned int start,
1085 				   const unsigned int num_pages)
1086 {
1087 	unsigned int i;
1088 
1089 	for (i = start; i < num_pages + start; i++)
1090 		rqstp->rq_pages[i] = NULL;
1091 }
1092 
1093 /**
1094  * svc_rdma_process_read_list - Pull list of Read chunks from the client
1095  * @rdma: controlling RDMA transport
1096  * @rqstp: set of pages to use as Read sink buffers
1097  * @head: pages under I/O collect here
1098  *
1099  * The RPC/RDMA protocol assumes that the upper layer's XDR decoders
1100  * pull each Read chunk as they decode an incoming RPC message.
1101  *
1102  * On Linux, however, the server needs to have a fully-constructed RPC
1103  * message in rqstp->rq_arg when there is a positive return code from
1104  * ->xpo_recvfrom. So the Read list is safety-checked immediately when
1105  * it is received, then here the whole Read list is pulled all at once.
1106  * The ingress RPC message is fully reconstructed once all associated
1107  * RDMA Reads have completed.
1108  *
1109  * Return values:
1110  *   %1: all needed RDMA Reads were posted successfully,
1111  *   %-EINVAL: client provided too many chunks or segments,
1112  *   %-ENOMEM: rdma_rw context pool was exhausted,
1113  *   %-ENOTCONN: posting failed (connection is lost),
1114  *   %-EIO: rdma_rw initialization failed (DMA mapping, etc).
1115  */
1116 int svc_rdma_process_read_list(struct svcxprt_rdma *rdma,
1117 			       struct svc_rqst *rqstp,
1118 			       struct svc_rdma_recv_ctxt *head)
1119 {
1120 	struct svc_rdma_read_info *info;
1121 	struct svc_rdma_chunk_ctxt *cc;
1122 	int ret;
1123 
1124 	/* The request (with page list) is constructed in
1125 	 * head->rc_arg. Pages involved with RDMA Read I/O are
1126 	 * transferred there.
1127 	 */
1128 	head->rc_arg.head[0] = rqstp->rq_arg.head[0];
1129 	head->rc_arg.tail[0] = rqstp->rq_arg.tail[0];
1130 	head->rc_arg.pages = head->rc_pages;
1131 	head->rc_arg.page_base = 0;
1132 	head->rc_arg.page_len = 0;
1133 	head->rc_arg.len = rqstp->rq_arg.len;
1134 	head->rc_arg.buflen = rqstp->rq_arg.buflen;
1135 
1136 	info = svc_rdma_read_info_alloc(rdma);
1137 	if (!info)
1138 		return -ENOMEM;
1139 	cc = &info->ri_cc;
1140 	info->ri_rqst = rqstp;
1141 	info->ri_readctxt = head;
1142 	info->ri_pageno = 0;
1143 	info->ri_pageoff = 0;
1144 	info->ri_totalbytes = 0;
1145 
1146 	if (pcl_is_empty(&head->rc_call_pcl)) {
1147 		if (head->rc_read_pcl.cl_count == 1)
1148 			ret = svc_rdma_read_data_item(info);
1149 		else
1150 			ret = svc_rdma_read_multiple_chunks(info);
1151 	} else
1152 		ret = svc_rdma_read_special(info);
1153 	if (ret < 0)
1154 		goto out_err;
1155 
1156 	trace_svcrdma_post_read_chunk(&cc->cc_cid, cc->cc_sqecount);
1157 	ret = svc_rdma_post_chunk_ctxt(cc);
1158 	if (ret < 0)
1159 		goto out_err;
1160 	svc_rdma_save_io_pages(rqstp, 0, head->rc_page_count);
1161 	return 1;
1162 
1163 out_err:
1164 	svc_rdma_read_info_free(info);
1165 	return ret;
1166 }
1167