xref: /linux/net/sunrpc/xprtrdma/frwr_ops.c (revision b85900e91c8402bedc1db14e6d293e26f25d30d4)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2015, 2017 Oracle.  All rights reserved.
4  * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
5  */
6 
7 /* Lightweight memory registration using Fast Registration Work
8  * Requests (FRWR).
9  *
10  * FRWR features ordered asynchronous registration and invalidation
11  * of arbitrarily-sized memory regions. This is the fastest and safest
12  * but most complex memory registration mode.
13  */
14 
15 /* Normal operation
16  *
17  * A Memory Region is prepared for RDMA Read or Write using a FAST_REG
18  * Work Request (frwr_map). When the RDMA operation is finished, this
19  * Memory Region is invalidated using a LOCAL_INV Work Request
20  * (frwr_unmap_async and frwr_unmap_sync).
21  *
22  * Typically FAST_REG Work Requests are not signaled, and neither are
23  * RDMA Send Work Requests (with the exception of signaling occasionally
24  * to prevent provider work queue overflows). This greatly reduces HCA
25  * interrupt workload.
26  */
27 
28 /* Transport recovery
29  *
30  * frwr_map and frwr_unmap_* cannot run at the same time the transport
31  * connect worker is running. The connect worker holds the transport
32  * send lock, just as ->send_request does. This prevents frwr_map and
33  * the connect worker from running concurrently. When a connection is
34  * closed, the Receive completion queue is drained before the allowing
35  * the connect worker to get control. This prevents frwr_unmap and the
36  * connect worker from running concurrently.
37  *
38  * When the underlying transport disconnects, MRs that are in flight
39  * are flushed and are likely unusable. Thus all MRs are destroyed.
40  * New MRs are created on demand.
41  */
42 
43 #include <linux/sunrpc/svc_rdma.h>
44 
45 #include "xprt_rdma.h"
46 #include <trace/events/rpcrdma.h>
47 
frwr_cid_init(struct rpcrdma_ep * ep,struct rpcrdma_mr * mr)48 static void frwr_cid_init(struct rpcrdma_ep *ep,
49 			  struct rpcrdma_mr *mr)
50 {
51 	struct rpc_rdma_cid *cid = &mr->mr_cid;
52 
53 	cid->ci_queue_id = ep->re_attr.send_cq->res.id;
54 	cid->ci_completion_id = mr->mr_ibmr->res.id;
55 }
56 
frwr_mr_unmap(struct rpcrdma_mr * mr)57 static void frwr_mr_unmap(struct rpcrdma_mr *mr)
58 {
59 	if (mr->mr_device) {
60 		trace_xprtrdma_mr_unmap(mr);
61 		ib_dma_unmap_sg(mr->mr_device, mr->mr_sg, mr->mr_nents,
62 				mr->mr_dir);
63 		mr->mr_device = NULL;
64 	}
65 }
66 
67 /**
68  * frwr_mr_release - Destroy one MR
69  * @mr: MR allocated by frwr_mr_init
70  *
71  */
frwr_mr_release(struct rpcrdma_mr * mr)72 void frwr_mr_release(struct rpcrdma_mr *mr)
73 {
74 	int rc;
75 
76 	frwr_mr_unmap(mr);
77 
78 	rc = ib_dereg_mr(mr->mr_ibmr);
79 	if (rc)
80 		trace_xprtrdma_frwr_dereg(mr, rc);
81 	kfree(mr->mr_sg);
82 	kfree(mr);
83 }
84 
frwr_mr_put(struct rpcrdma_mr * mr)85 static void frwr_mr_put(struct rpcrdma_mr *mr)
86 {
87 	frwr_mr_unmap(mr);
88 
89 	/* The MR is returned to the req's MR free list instead
90 	 * of to the xprt's MR free list. No spinlock is needed.
91 	 */
92 	rpcrdma_mr_push(mr, &mr->mr_req->rl_free_mrs);
93 }
94 
95 /**
96  * frwr_reset - Place MRs back on @req's free list
97  * @req: request to reset
98  *
99  * Used after a failed marshal. For FRWR, this means the MRs
100  * don't have to be fully released and recreated.
101  *
102  * NB: This is safe only as long as none of @req's MRs are
103  * involved with an ongoing asynchronous FAST_REG or LOCAL_INV
104  * Work Request.
105  */
frwr_reset(struct rpcrdma_req * req)106 void frwr_reset(struct rpcrdma_req *req)
107 {
108 	struct rpcrdma_mr *mr;
109 
110 	while ((mr = rpcrdma_mr_pop(&req->rl_registered)))
111 		frwr_mr_put(mr);
112 }
113 
114 /**
115  * frwr_mr_init - Initialize one MR
116  * @r_xprt: controlling transport instance
117  * @mr: generic MR to prepare for FRWR
118  *
119  * Returns zero if successful. Otherwise a negative errno
120  * is returned.
121  */
frwr_mr_init(struct rpcrdma_xprt * r_xprt,struct rpcrdma_mr * mr)122 int frwr_mr_init(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr)
123 {
124 	struct rpcrdma_ep *ep = r_xprt->rx_ep;
125 	unsigned int depth = ep->re_max_fr_depth;
126 	struct scatterlist *sg;
127 	struct ib_mr *frmr;
128 
129 	sg = kcalloc_node(depth, sizeof(*sg), XPRTRDMA_GFP_FLAGS,
130 			  ibdev_to_node(ep->re_id->device));
131 	if (!sg)
132 		return -ENOMEM;
133 
134 	frmr = ib_alloc_mr(ep->re_pd, ep->re_mrtype, depth);
135 	if (IS_ERR(frmr))
136 		goto out_mr_err;
137 
138 	mr->mr_xprt = r_xprt;
139 	mr->mr_ibmr = frmr;
140 	mr->mr_device = NULL;
141 	INIT_LIST_HEAD(&mr->mr_list);
142 	init_completion(&mr->mr_linv_done);
143 	frwr_cid_init(ep, mr);
144 
145 	sg_init_table(sg, depth);
146 	mr->mr_sg = sg;
147 	return 0;
148 
149 out_mr_err:
150 	kfree(sg);
151 	trace_xprtrdma_frwr_alloc(mr, PTR_ERR(frmr));
152 	return PTR_ERR(frmr);
153 }
154 
155 /**
156  * frwr_query_device - Prepare a transport for use with FRWR
157  * @ep: endpoint to fill in
158  * @device: RDMA device to query
159  *
160  * On success, sets:
161  *	ep->re_attr
162  *	ep->re_max_requests
163  *	ep->re_max_rdma_segs
164  *	ep->re_max_fr_depth
165  *	ep->re_mrtype
166  *
167  * Return values:
168  *   On success, returns zero.
169  *   %-EINVAL - the device does not support FRWR memory registration
170  *   %-ENOMEM - the device is not sufficiently capable for NFS/RDMA
171  */
frwr_query_device(struct rpcrdma_ep * ep,const struct ib_device * device)172 int frwr_query_device(struct rpcrdma_ep *ep, const struct ib_device *device)
173 {
174 	const struct ib_device_attr *attrs = &device->attrs;
175 	int max_qp_wr, depth, delta;
176 	unsigned int max_sge;
177 
178 	if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) ||
179 	    attrs->max_fast_reg_page_list_len == 0) {
180 		pr_err("rpcrdma: 'frwr' mode is not supported by device %s\n",
181 		       device->name);
182 		return -EINVAL;
183 	}
184 
185 	max_sge = min_t(unsigned int, attrs->max_send_sge,
186 			RPCRDMA_MAX_SEND_SGES);
187 	if (max_sge < RPCRDMA_MIN_SEND_SGES) {
188 		pr_err("rpcrdma: HCA provides only %u send SGEs\n", max_sge);
189 		return -ENOMEM;
190 	}
191 	ep->re_attr.cap.max_send_sge = max_sge;
192 	ep->re_attr.cap.max_recv_sge = 1;
193 
194 	ep->re_mrtype = IB_MR_TYPE_MEM_REG;
195 	if (attrs->kernel_cap_flags & IBK_SG_GAPS_REG)
196 		ep->re_mrtype = IB_MR_TYPE_SG_GAPS;
197 
198 	/* Quirk: Some devices advertise a large max_fast_reg_page_list_len
199 	 * capability, but perform optimally when the MRs are not larger
200 	 * than a page.
201 	 */
202 	if (attrs->max_sge_rd > RPCRDMA_MAX_HDR_SEGS)
203 		ep->re_max_fr_depth = attrs->max_sge_rd;
204 	else
205 		ep->re_max_fr_depth = attrs->max_fast_reg_page_list_len;
206 	if (ep->re_max_fr_depth > RPCRDMA_MAX_DATA_SEGS)
207 		ep->re_max_fr_depth = RPCRDMA_MAX_DATA_SEGS;
208 
209 	/* Add room for frwr register and invalidate WRs.
210 	 * 1. FRWR reg WR for head
211 	 * 2. FRWR invalidate WR for head
212 	 * 3. N FRWR reg WRs for pagelist
213 	 * 4. N FRWR invalidate WRs for pagelist
214 	 * 5. FRWR reg WR for tail
215 	 * 6. FRWR invalidate WR for tail
216 	 * 7. The RDMA_SEND WR
217 	 */
218 	depth = 7;
219 
220 	/* Calculate N if the device max FRWR depth is smaller than
221 	 * RPCRDMA_MAX_DATA_SEGS.
222 	 */
223 	if (ep->re_max_fr_depth < RPCRDMA_MAX_DATA_SEGS) {
224 		delta = RPCRDMA_MAX_DATA_SEGS - ep->re_max_fr_depth;
225 		do {
226 			depth += 2; /* FRWR reg + invalidate */
227 			delta -= ep->re_max_fr_depth;
228 		} while (delta > 0);
229 	}
230 
231 	max_qp_wr = attrs->max_qp_wr;
232 	max_qp_wr -= RPCRDMA_BACKWARD_WRS;
233 	max_qp_wr -= 1;
234 	if (max_qp_wr < RPCRDMA_MIN_SLOT_TABLE)
235 		return -ENOMEM;
236 	if (ep->re_max_requests > max_qp_wr)
237 		ep->re_max_requests = max_qp_wr;
238 	ep->re_attr.cap.max_send_wr = ep->re_max_requests * depth;
239 	if (ep->re_attr.cap.max_send_wr > max_qp_wr) {
240 		ep->re_max_requests = max_qp_wr / depth;
241 		if (!ep->re_max_requests)
242 			return -ENOMEM;
243 		ep->re_attr.cap.max_send_wr = ep->re_max_requests * depth;
244 	}
245 	ep->re_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS;
246 	ep->re_attr.cap.max_send_wr += 1; /* for ib_drain_sq */
247 	ep->re_recv_batch = ep->re_max_requests >> 2;
248 	ep->re_attr.cap.max_recv_wr = ep->re_max_requests;
249 	ep->re_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
250 	ep->re_attr.cap.max_recv_wr += ep->re_recv_batch;
251 	ep->re_attr.cap.max_recv_wr += 1; /* for ib_drain_rq */
252 
253 	ep->re_max_rdma_segs =
254 		DIV_ROUND_UP(RPCRDMA_MAX_DATA_SEGS, ep->re_max_fr_depth);
255 	/* Reply chunks require segments for head and tail buffers */
256 	ep->re_max_rdma_segs += 2;
257 	if (ep->re_max_rdma_segs > RPCRDMA_MAX_HDR_SEGS)
258 		ep->re_max_rdma_segs = RPCRDMA_MAX_HDR_SEGS;
259 
260 	/* Ensure the underlying device is capable of conveying the
261 	 * largest r/wsize NFS will ask for. This guarantees that
262 	 * failing over from one RDMA device to another will not
263 	 * break NFS I/O.
264 	 */
265 	if ((ep->re_max_rdma_segs * ep->re_max_fr_depth) < RPCRDMA_MAX_SEGS)
266 		return -ENOMEM;
267 
268 	return 0;
269 }
270 
271 /**
272  * frwr_map - Register a memory region from an xdr_buf cursor
273  * @r_xprt: controlling transport
274  * @cur: cursor tracking position within the xdr_buf
275  * @writing: true when RDMA Write will be used
276  * @xid: XID of RPC using the registered memory
277  * @mr: MR to fill in
278  *
279  * Prepare a REG_MR Work Request to register a memory region
280  * for remote access via RDMA READ or RDMA WRITE.
281  *
282  * Returns 0 on success (cursor advanced past consumed data,
283  * @mr populated) or a negative errno on failure.
284  */
frwr_map(struct rpcrdma_xprt * r_xprt,struct rpcrdma_xdr_cursor * cur,bool writing,__be32 xid,struct rpcrdma_mr * mr)285 int frwr_map(struct rpcrdma_xprt *r_xprt,
286 	     struct rpcrdma_xdr_cursor *cur,
287 	     bool writing, __be32 xid,
288 	     struct rpcrdma_mr *mr)
289 {
290 	struct rpcrdma_ep *ep = r_xprt->rx_ep;
291 	const struct xdr_buf *xdrbuf = cur->xc_buf;
292 	bool sg_gaps = ep->re_mrtype == IB_MR_TYPE_SG_GAPS;
293 	unsigned int max_depth = ep->re_max_fr_depth;
294 	struct ib_reg_wr *reg_wr;
295 	int i, n, dma_nents;
296 	struct ib_mr *ibmr;
297 	u8 key;
298 
299 	i = 0;
300 
301 	/* Head kvec */
302 	if (!(cur->xc_flags & XC_HEAD_DONE)) {
303 		const struct kvec *head = &xdrbuf->head[0];
304 
305 		sg_set_page(&mr->mr_sg[i],
306 			    virt_to_page(head->iov_base),
307 			    head->iov_len,
308 			    offset_in_page(head->iov_base));
309 		cur->xc_flags |= XC_HEAD_DONE;
310 		i++;
311 		/* Without sg-gap support, each non-contiguous region
312 		 * must be registered as a separate MR.  Returning
313 		 * here after the head kvec causes the caller to
314 		 * invoke frwr_map() again for the page list and
315 		 * tail.
316 		 */
317 		if (!sg_gaps)
318 			goto finish;
319 	}
320 
321 	/* Page list */
322 	if (!(cur->xc_flags & XC_PAGES_DONE) && xdrbuf->page_len) {
323 		unsigned int page_base, remaining;
324 		struct page **ppages;
325 
326 		remaining = xdrbuf->page_len - cur->xc_page_offset;
327 		page_base = offset_in_page(xdrbuf->page_base +
328 					   cur->xc_page_offset);
329 		ppages = xdrbuf->pages +
330 			 ((xdrbuf->page_base + cur->xc_page_offset)
331 			  >> PAGE_SHIFT);
332 
333 		while (remaining > 0 && i < max_depth) {
334 			unsigned int len;
335 
336 			len = min_t(unsigned int,
337 				    PAGE_SIZE - page_base, remaining);
338 			sg_set_page(&mr->mr_sg[i], *ppages,
339 				    len, page_base);
340 			cur->xc_page_offset += len;
341 			i++;
342 			ppages++;
343 			remaining -= len;
344 
345 			if (!sg_gaps && remaining > 0 &&
346 			    offset_in_page(page_base + len))
347 				goto finish;
348 			page_base = 0;
349 		}
350 		if (remaining == 0)
351 			cur->xc_flags |= XC_PAGES_DONE;
352 	} else if (!(cur->xc_flags & XC_PAGES_DONE)) {
353 		cur->xc_flags |= XC_PAGES_DONE;
354 	}
355 
356 	/* Tail kvec */
357 	if (!(cur->xc_flags & XC_TAIL_DONE) && xdrbuf->tail[0].iov_len &&
358 	    i < max_depth) {
359 		const struct kvec *tail = &xdrbuf->tail[0];
360 
361 		if (!sg_gaps && i > 0) {
362 			struct scatterlist *prev = &mr->mr_sg[i - 1];
363 
364 			if (offset_in_page(prev->offset + prev->length) ||
365 			    offset_in_page(tail->iov_base))
366 				goto finish;
367 		}
368 		sg_set_page(&mr->mr_sg[i],
369 			    virt_to_page(tail->iov_base),
370 			    tail->iov_len,
371 			    offset_in_page(tail->iov_base));
372 		cur->xc_flags |= XC_TAIL_DONE;
373 		i++;
374 	} else if (!(cur->xc_flags & XC_TAIL_DONE) &&
375 		   !xdrbuf->tail[0].iov_len) {
376 		cur->xc_flags |= XC_TAIL_DONE;
377 	}
378 
379 finish:
380 	mr->mr_dir = rpcrdma_data_dir(writing);
381 	mr->mr_nents = i;
382 
383 	dma_nents = ib_dma_map_sg(ep->re_id->device, mr->mr_sg, mr->mr_nents,
384 				  mr->mr_dir);
385 	if (!dma_nents)
386 		goto out_dmamap_err;
387 	mr->mr_device = ep->re_id->device;
388 
389 	ibmr = mr->mr_ibmr;
390 	n = ib_map_mr_sg(ibmr, mr->mr_sg, dma_nents, NULL, PAGE_SIZE);
391 	if (n != dma_nents)
392 		goto out_mapmr_err;
393 
394 	ibmr->iova &= 0x00000000ffffffff;
395 	ibmr->iova |= ((u64)be32_to_cpu(xid)) << 32;
396 	key = (u8)(ibmr->rkey & 0x000000FF);
397 	ib_update_fast_reg_key(ibmr, ++key);
398 
399 	reg_wr = &mr->mr_regwr;
400 	reg_wr->mr = ibmr;
401 	reg_wr->key = ibmr->rkey;
402 	reg_wr->access = writing ?
403 			 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
404 			 IB_ACCESS_REMOTE_READ;
405 
406 	mr->mr_handle = ibmr->rkey;
407 	mr->mr_length = ibmr->length;
408 	mr->mr_offset = ibmr->iova;
409 	trace_xprtrdma_mr_map(mr);
410 
411 	return 0;
412 
413 out_dmamap_err:
414 	trace_xprtrdma_frwr_sgerr(mr, i);
415 	return -EIO;
416 
417 out_mapmr_err:
418 	trace_xprtrdma_frwr_maperr(mr, n);
419 	return -EIO;
420 }
421 
422 /**
423  * frwr_wc_fastreg - Invoked by RDMA provider for a flushed FastReg WC
424  * @cq: completion queue
425  * @wc: WCE for a completed FastReg WR
426  *
427  * Each flushed MR gets destroyed after the QP has drained.
428  */
frwr_wc_fastreg(struct ib_cq * cq,struct ib_wc * wc)429 static void frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
430 {
431 	struct ib_cqe *cqe = wc->wr_cqe;
432 	struct rpcrdma_mr *mr = container_of(cqe, struct rpcrdma_mr, mr_cqe);
433 
434 	/* WARNING: Only wr_cqe and status are reliable at this point */
435 	trace_xprtrdma_wc_fastreg(wc, &mr->mr_cid);
436 
437 	rpcrdma_flush_disconnect(cq->cq_context, wc);
438 }
439 
440 /**
441  * frwr_send - post Send WRs containing the RPC Call message
442  * @r_xprt: controlling transport instance
443  * @req: prepared RPC Call
444  *
445  * For FRWR, chain any FastReg WRs to the Send WR. Only a
446  * single ib_post_send call is needed to register memory
447  * and then post the Send WR.
448  *
449  * Returns the return code from ib_post_send.
450  *
451  * Caller must hold the transport send lock to ensure that the
452  * pointers to the transport's rdma_cm_id and QP are stable.
453  */
frwr_send(struct rpcrdma_xprt * r_xprt,struct rpcrdma_req * req)454 int frwr_send(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
455 {
456 	struct ib_send_wr *post_wr, *send_wr = &req->rl_wr;
457 	struct rpcrdma_ep *ep = r_xprt->rx_ep;
458 	struct rpcrdma_mr *mr;
459 	unsigned int num_wrs;
460 	int ret;
461 
462 	num_wrs = 1;
463 	post_wr = send_wr;
464 	list_for_each_entry(mr, &req->rl_registered, mr_list) {
465 		trace_xprtrdma_mr_fastreg(mr);
466 
467 		mr->mr_cqe.done = frwr_wc_fastreg;
468 		mr->mr_regwr.wr.next = post_wr;
469 		mr->mr_regwr.wr.wr_cqe = &mr->mr_cqe;
470 		mr->mr_regwr.wr.num_sge = 0;
471 		mr->mr_regwr.wr.opcode = IB_WR_REG_MR;
472 		mr->mr_regwr.wr.send_flags = 0;
473 		post_wr = &mr->mr_regwr.wr;
474 		++num_wrs;
475 	}
476 
477 	if ((kref_read(&req->rl_kref) > 1) || num_wrs > ep->re_send_count) {
478 		send_wr->send_flags |= IB_SEND_SIGNALED;
479 		ep->re_send_count = min_t(unsigned int, ep->re_send_batch,
480 					  num_wrs - ep->re_send_count);
481 	} else {
482 		send_wr->send_flags &= ~IB_SEND_SIGNALED;
483 		ep->re_send_count -= num_wrs;
484 	}
485 
486 	trace_xprtrdma_post_send(req);
487 	ret = ib_post_send(ep->re_id->qp, post_wr, NULL);
488 	if (ret)
489 		trace_xprtrdma_post_send_err(r_xprt, req, ret);
490 	return ret;
491 }
492 
493 /**
494  * frwr_reminv - handle a remotely invalidated mr on the @mrs list
495  * @rep: Received reply
496  * @mrs: list of MRs to check
497  *
498  */
frwr_reminv(struct rpcrdma_rep * rep,struct list_head * mrs)499 void frwr_reminv(struct rpcrdma_rep *rep, struct list_head *mrs)
500 {
501 	struct rpcrdma_mr *mr;
502 
503 	list_for_each_entry(mr, mrs, mr_list)
504 		if (mr->mr_handle == rep->rr_inv_rkey) {
505 			list_del_init(&mr->mr_list);
506 			trace_xprtrdma_mr_reminv(mr);
507 			frwr_mr_put(mr);
508 			break;	/* only one invalidated MR per RPC */
509 		}
510 }
511 
frwr_mr_done(struct ib_wc * wc,struct rpcrdma_mr * mr)512 static void frwr_mr_done(struct ib_wc *wc, struct rpcrdma_mr *mr)
513 {
514 	if (likely(wc->status == IB_WC_SUCCESS))
515 		frwr_mr_put(mr);
516 }
517 
518 /**
519  * frwr_wc_localinv - Invoked by RDMA provider for a LOCAL_INV WC
520  * @cq: completion queue
521  * @wc: WCE for a completed LocalInv WR
522  *
523  */
frwr_wc_localinv(struct ib_cq * cq,struct ib_wc * wc)524 static void frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
525 {
526 	struct ib_cqe *cqe = wc->wr_cqe;
527 	struct rpcrdma_mr *mr = container_of(cqe, struct rpcrdma_mr, mr_cqe);
528 
529 	/* WARNING: Only wr_cqe and status are reliable at this point */
530 	trace_xprtrdma_wc_li(wc, &mr->mr_cid);
531 	frwr_mr_done(wc, mr);
532 
533 	rpcrdma_flush_disconnect(cq->cq_context, wc);
534 }
535 
536 /**
537  * frwr_wc_localinv_wake - Invoked by RDMA provider for a LOCAL_INV WC
538  * @cq: completion queue
539  * @wc: WCE for a completed LocalInv WR
540  *
541  * Awaken anyone waiting for an MR to finish being fenced.
542  */
frwr_wc_localinv_wake(struct ib_cq * cq,struct ib_wc * wc)543 static void frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
544 {
545 	struct ib_cqe *cqe = wc->wr_cqe;
546 	struct rpcrdma_mr *mr = container_of(cqe, struct rpcrdma_mr, mr_cqe);
547 
548 	/* WARNING: Only wr_cqe and status are reliable at this point */
549 	trace_xprtrdma_wc_li_wake(wc, &mr->mr_cid);
550 	frwr_mr_done(wc, mr);
551 	complete(&mr->mr_linv_done);
552 
553 	rpcrdma_flush_disconnect(cq->cq_context, wc);
554 }
555 
556 /**
557  * frwr_unmap_sync - invalidate memory regions that were registered for @req
558  * @r_xprt: controlling transport instance
559  * @req: rpcrdma_req with a non-empty list of MRs to process
560  *
561  * Sleeps until it is safe for the host CPU to access the previously mapped
562  * memory regions. This guarantees that registered MRs are properly fenced
563  * from the server before the RPC consumer accesses the data in them. It
564  * also ensures proper Send flow control: waking the next RPC waits until
565  * this RPC has relinquished all its Send Queue entries.
566  */
frwr_unmap_sync(struct rpcrdma_xprt * r_xprt,struct rpcrdma_req * req)567 void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
568 {
569 	struct ib_send_wr *first, **prev, *last;
570 	struct rpcrdma_ep *ep = r_xprt->rx_ep;
571 	const struct ib_send_wr *bad_wr;
572 	struct rpcrdma_mr *mr;
573 	int rc;
574 
575 	/* ORDER: Invalidate all of the MRs first
576 	 *
577 	 * Chain the LOCAL_INV Work Requests and post them with
578 	 * a single ib_post_send() call.
579 	 */
580 	prev = &first;
581 	mr = rpcrdma_mr_pop(&req->rl_registered);
582 	do {
583 		trace_xprtrdma_mr_localinv(mr);
584 		r_xprt->rx_stats.local_inv_needed++;
585 
586 		last = &mr->mr_invwr;
587 		last->next = NULL;
588 		last->wr_cqe = &mr->mr_cqe;
589 		last->sg_list = NULL;
590 		last->num_sge = 0;
591 		last->opcode = IB_WR_LOCAL_INV;
592 		last->send_flags = IB_SEND_SIGNALED;
593 		last->ex.invalidate_rkey = mr->mr_handle;
594 
595 		last->wr_cqe->done = frwr_wc_localinv;
596 
597 		*prev = last;
598 		prev = &last->next;
599 	} while ((mr = rpcrdma_mr_pop(&req->rl_registered)));
600 
601 	mr = container_of(last, struct rpcrdma_mr, mr_invwr);
602 
603 	/* Strong send queue ordering guarantees that when the
604 	 * last WR in the chain completes, all WRs in the chain
605 	 * are complete.
606 	 */
607 	last->wr_cqe->done = frwr_wc_localinv_wake;
608 	reinit_completion(&mr->mr_linv_done);
609 
610 	/* Transport disconnect drains the receive CQ before it
611 	 * replaces the QP. The RPC reply handler won't call us
612 	 * unless re_id->qp is a valid pointer.
613 	 */
614 	bad_wr = NULL;
615 	rc = ib_post_send(ep->re_id->qp, first, &bad_wr);
616 
617 	/* The final LOCAL_INV WR in the chain is supposed to
618 	 * do the wake. If it was never posted, the wake will
619 	 * not happen, so don't wait in that case.
620 	 */
621 	if (bad_wr != first)
622 		wait_for_completion(&mr->mr_linv_done);
623 	if (!rc)
624 		return;
625 
626 	/* On error, the MRs get destroyed once the QP has drained. */
627 	trace_xprtrdma_post_linv_err(req, rc);
628 
629 	/* Force a connection loss to ensure complete recovery.
630 	 */
631 	rpcrdma_force_disconnect(ep);
632 }
633 
634 /**
635  * frwr_wc_localinv_done - Invoked by RDMA provider for a signaled LOCAL_INV WC
636  * @cq:	completion queue
637  * @wc:	WCE for a completed LocalInv WR
638  *
639  */
frwr_wc_localinv_done(struct ib_cq * cq,struct ib_wc * wc)640 static void frwr_wc_localinv_done(struct ib_cq *cq, struct ib_wc *wc)
641 {
642 	struct ib_cqe *cqe = wc->wr_cqe;
643 	struct rpcrdma_mr *mr = container_of(cqe, struct rpcrdma_mr, mr_cqe);
644 	struct rpcrdma_rep *rep;
645 
646 	/* WARNING: Only wr_cqe and status are reliable at this point */
647 	trace_xprtrdma_wc_li_done(wc, &mr->mr_cid);
648 
649 	/* Ensure that @rep is generated before the MR is released */
650 	rep = mr->mr_req->rl_reply;
651 	smp_rmb();
652 
653 	if (wc->status != IB_WC_SUCCESS) {
654 		if (rep)
655 			rpcrdma_unpin_rqst(rep);
656 		rpcrdma_flush_disconnect(cq->cq_context, wc);
657 		return;
658 	}
659 	frwr_mr_put(mr);
660 	rpcrdma_complete_rqst(rep);
661 }
662 
663 /**
664  * frwr_unmap_async - invalidate memory regions that were registered for @req
665  * @r_xprt: controlling transport instance
666  * @req: rpcrdma_req with a non-empty list of MRs to process
667  *
668  * This guarantees that registered MRs are properly fenced from the
669  * server before the RPC consumer accesses the data in them. It also
670  * ensures proper Send flow control: waking the next RPC waits until
671  * this RPC has relinquished all its Send Queue entries.
672  */
frwr_unmap_async(struct rpcrdma_xprt * r_xprt,struct rpcrdma_req * req)673 void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
674 {
675 	struct ib_send_wr *first, *last, **prev;
676 	struct rpcrdma_ep *ep = r_xprt->rx_ep;
677 	struct rpcrdma_mr *mr;
678 	int rc;
679 
680 	/* Chain the LOCAL_INV Work Requests and post them with
681 	 * a single ib_post_send() call.
682 	 */
683 	prev = &first;
684 	mr = rpcrdma_mr_pop(&req->rl_registered);
685 	do {
686 		trace_xprtrdma_mr_localinv(mr);
687 		r_xprt->rx_stats.local_inv_needed++;
688 
689 		last = &mr->mr_invwr;
690 		last->next = NULL;
691 		last->wr_cqe = &mr->mr_cqe;
692 		last->sg_list = NULL;
693 		last->num_sge = 0;
694 		last->opcode = IB_WR_LOCAL_INV;
695 		last->send_flags = IB_SEND_SIGNALED;
696 		last->ex.invalidate_rkey = mr->mr_handle;
697 
698 		last->wr_cqe->done = frwr_wc_localinv;
699 
700 		*prev = last;
701 		prev = &last->next;
702 	} while ((mr = rpcrdma_mr_pop(&req->rl_registered)));
703 
704 	/* Strong send queue ordering guarantees that when the
705 	 * last WR in the chain completes, all WRs in the chain
706 	 * are complete. The last completion will wake up the
707 	 * RPC waiter.
708 	 */
709 	last->wr_cqe->done = frwr_wc_localinv_done;
710 
711 	/* Transport disconnect drains the receive CQ before it
712 	 * replaces the QP. The RPC reply handler won't call us
713 	 * unless re_id->qp is a valid pointer.
714 	 */
715 	rc = ib_post_send(ep->re_id->qp, first, NULL);
716 	if (!rc)
717 		return;
718 
719 	/* On error, the MRs get destroyed once the QP has drained. */
720 	trace_xprtrdma_post_linv_err(req, rc);
721 
722 	/* The final LOCAL_INV WR in the chain is supposed to
723 	 * do the wake. If it was never posted, the wake does
724 	 * not happen. Unpin the rqst in preparation for its
725 	 * retransmission.
726 	 */
727 	rpcrdma_unpin_rqst(req->rl_reply);
728 
729 	/* Force a connection loss to ensure complete recovery.
730 	 */
731 	rpcrdma_force_disconnect(ep);
732 }
733 
734 /**
735  * frwr_wp_create - Create an MR for padding Write chunks
736  * @r_xprt: transport resources to use
737  *
738  * Return 0 on success, negative errno on failure.
739  */
frwr_wp_create(struct rpcrdma_xprt * r_xprt)740 int frwr_wp_create(struct rpcrdma_xprt *r_xprt)
741 {
742 	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
743 	struct rpcrdma_ep *ep = r_xprt->rx_ep;
744 	struct ib_reg_wr *reg_wr;
745 	struct rpcrdma_mr *mr;
746 	struct ib_mr *ibmr;
747 	int dma_nents;
748 	int ret;
749 
750 	mr = rpcrdma_mr_get(r_xprt);
751 	if (!mr)
752 		return -EAGAIN;
753 	mr->mr_req = NULL;
754 	ep->re_write_pad_mr = mr;
755 
756 	sg_init_table(mr->mr_sg, 1);
757 	sg_set_page(mr->mr_sg, virt_to_page(ep->re_write_pad),
758 		    XDR_UNIT, offset_in_page(ep->re_write_pad));
759 
760 	mr->mr_dir = DMA_FROM_DEVICE;
761 	mr->mr_nents = 1;
762 	dma_nents = ib_dma_map_sg(ep->re_id->device, mr->mr_sg,
763 				  mr->mr_nents, mr->mr_dir);
764 	if (!dma_nents) {
765 		ret = -EIO;
766 		goto out_mr;
767 	}
768 	mr->mr_device = ep->re_id->device;
769 
770 	ibmr = mr->mr_ibmr;
771 	if (ib_map_mr_sg(ibmr, mr->mr_sg, dma_nents, NULL,
772 			 PAGE_SIZE) != dma_nents) {
773 		ret = -EIO;
774 		goto out_unmap;
775 	}
776 
777 	/* IOVA is not tagged with an XID; the write-pad is not RPC-specific. */
778 	ib_update_fast_reg_key(ibmr, ib_inc_rkey(ibmr->rkey));
779 
780 	reg_wr = &mr->mr_regwr;
781 	reg_wr->mr = ibmr;
782 	reg_wr->key = ibmr->rkey;
783 	reg_wr->access = IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE;
784 
785 	mr->mr_handle = ibmr->rkey;
786 	mr->mr_length = ibmr->length;
787 	mr->mr_offset = ibmr->iova;
788 
789 	trace_xprtrdma_mr_fastreg(mr);
790 
791 	mr->mr_cqe.done = frwr_wc_fastreg;
792 	mr->mr_regwr.wr.next = NULL;
793 	mr->mr_regwr.wr.wr_cqe = &mr->mr_cqe;
794 	mr->mr_regwr.wr.num_sge = 0;
795 	mr->mr_regwr.wr.opcode = IB_WR_REG_MR;
796 	mr->mr_regwr.wr.send_flags = 0;
797 
798 	ret = ib_post_send(ep->re_id->qp, &mr->mr_regwr.wr, NULL);
799 	if (!ret)
800 		return 0;
801 
802 out_unmap:
803 	frwr_mr_unmap(mr);
804 out_mr:
805 	ep->re_write_pad_mr = NULL;
806 	spin_lock(&buf->rb_lock);
807 	rpcrdma_mr_push(mr, &buf->rb_mrs);
808 	spin_unlock(&buf->rb_lock);
809 	return ret;
810 }
811