xref: /linux/net/sunrpc/xprtrdma/svc_rdma_transport.c (revision 005438a8eef063495ac059d128eea71b58de50e5)
1 /*
2  * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
3  * Copyright (c) 2005-2007 Network Appliance, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the BSD-type
9  * license below:
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  *
15  *      Redistributions of source code must retain the above copyright
16  *      notice, this list of conditions and the following disclaimer.
17  *
18  *      Redistributions in binary form must reproduce the above
19  *      copyright notice, this list of conditions and the following
20  *      disclaimer in the documentation and/or other materials provided
21  *      with the distribution.
22  *
23  *      Neither the name of the Network Appliance, Inc. nor the names of
24  *      its contributors may be used to endorse or promote products
25  *      derived from this software without specific prior written
26  *      permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39  *
40  * Author: Tom Tucker <tom@opengridcomputing.com>
41  */
42 
43 #include <linux/sunrpc/svc_xprt.h>
44 #include <linux/sunrpc/debug.h>
45 #include <linux/sunrpc/rpc_rdma.h>
46 #include <linux/interrupt.h>
47 #include <linux/sched.h>
48 #include <linux/slab.h>
49 #include <linux/spinlock.h>
50 #include <linux/workqueue.h>
51 #include <rdma/ib_verbs.h>
52 #include <rdma/rdma_cm.h>
53 #include <linux/sunrpc/svc_rdma.h>
54 #include <linux/export.h>
55 #include "xprt_rdma.h"
56 
57 #define RPCDBG_FACILITY	RPCDBG_SVCXPRT
58 
59 static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
60 					struct net *net,
61 					struct sockaddr *sa, int salen,
62 					int flags);
63 static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt);
64 static void svc_rdma_release_rqst(struct svc_rqst *);
65 static void dto_tasklet_func(unsigned long data);
66 static void svc_rdma_detach(struct svc_xprt *xprt);
67 static void svc_rdma_free(struct svc_xprt *xprt);
68 static int svc_rdma_has_wspace(struct svc_xprt *xprt);
69 static int svc_rdma_secure_port(struct svc_rqst *);
70 static void rq_cq_reap(struct svcxprt_rdma *xprt);
71 static void sq_cq_reap(struct svcxprt_rdma *xprt);
72 
73 static DECLARE_TASKLET(dto_tasklet, dto_tasklet_func, 0UL);
74 static DEFINE_SPINLOCK(dto_lock);
75 static LIST_HEAD(dto_xprt_q);
76 
77 static struct svc_xprt_ops svc_rdma_ops = {
78 	.xpo_create = svc_rdma_create,
79 	.xpo_recvfrom = svc_rdma_recvfrom,
80 	.xpo_sendto = svc_rdma_sendto,
81 	.xpo_release_rqst = svc_rdma_release_rqst,
82 	.xpo_detach = svc_rdma_detach,
83 	.xpo_free = svc_rdma_free,
84 	.xpo_prep_reply_hdr = svc_rdma_prep_reply_hdr,
85 	.xpo_has_wspace = svc_rdma_has_wspace,
86 	.xpo_accept = svc_rdma_accept,
87 	.xpo_secure_port = svc_rdma_secure_port,
88 };
89 
90 struct svc_xprt_class svc_rdma_class = {
91 	.xcl_name = "rdma",
92 	.xcl_owner = THIS_MODULE,
93 	.xcl_ops = &svc_rdma_ops,
94 	.xcl_max_payload = RPCRDMA_MAXPAYLOAD,
95 	.xcl_ident = XPRT_TRANSPORT_RDMA,
96 };
97 
98 struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
99 {
100 	struct svc_rdma_op_ctxt *ctxt;
101 
102 	ctxt = kmem_cache_alloc(svc_rdma_ctxt_cachep,
103 				GFP_KERNEL | __GFP_NOFAIL);
104 	ctxt->xprt = xprt;
105 	INIT_LIST_HEAD(&ctxt->dto_q);
106 	ctxt->count = 0;
107 	ctxt->frmr = NULL;
108 	atomic_inc(&xprt->sc_ctxt_used);
109 	return ctxt;
110 }
111 
112 void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt)
113 {
114 	struct svcxprt_rdma *xprt = ctxt->xprt;
115 	int i;
116 	for (i = 0; i < ctxt->count && ctxt->sge[i].length; i++) {
117 		/*
118 		 * Unmap the DMA addr in the SGE if the lkey matches
119 		 * the sc_dma_lkey, otherwise, ignore it since it is
120 		 * an FRMR lkey and will be unmapped later when the
121 		 * last WR that uses it completes.
122 		 */
123 		if (ctxt->sge[i].lkey == xprt->sc_dma_lkey) {
124 			atomic_dec(&xprt->sc_dma_used);
125 			ib_dma_unmap_page(xprt->sc_cm_id->device,
126 					    ctxt->sge[i].addr,
127 					    ctxt->sge[i].length,
128 					    ctxt->direction);
129 		}
130 	}
131 }
132 
133 void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
134 {
135 	struct svcxprt_rdma *xprt;
136 	int i;
137 
138 	xprt = ctxt->xprt;
139 	if (free_pages)
140 		for (i = 0; i < ctxt->count; i++)
141 			put_page(ctxt->pages[i]);
142 
143 	kmem_cache_free(svc_rdma_ctxt_cachep, ctxt);
144 	atomic_dec(&xprt->sc_ctxt_used);
145 }
146 
147 /*
148  * Temporary NFS req mappings are shared across all transport
149  * instances. These are short lived and should be bounded by the number
150  * of concurrent server threads * depth of the SQ.
151  */
152 struct svc_rdma_req_map *svc_rdma_get_req_map(void)
153 {
154 	struct svc_rdma_req_map *map;
155 	map = kmem_cache_alloc(svc_rdma_map_cachep,
156 			       GFP_KERNEL | __GFP_NOFAIL);
157 	map->count = 0;
158 	return map;
159 }
160 
161 void svc_rdma_put_req_map(struct svc_rdma_req_map *map)
162 {
163 	kmem_cache_free(svc_rdma_map_cachep, map);
164 }
165 
166 /* ib_cq event handler */
167 static void cq_event_handler(struct ib_event *event, void *context)
168 {
169 	struct svc_xprt *xprt = context;
170 	dprintk("svcrdma: received CQ event %s (%d), context=%p\n",
171 		ib_event_msg(event->event), event->event, context);
172 	set_bit(XPT_CLOSE, &xprt->xpt_flags);
173 }
174 
175 /* QP event handler */
176 static void qp_event_handler(struct ib_event *event, void *context)
177 {
178 	struct svc_xprt *xprt = context;
179 
180 	switch (event->event) {
181 	/* These are considered benign events */
182 	case IB_EVENT_PATH_MIG:
183 	case IB_EVENT_COMM_EST:
184 	case IB_EVENT_SQ_DRAINED:
185 	case IB_EVENT_QP_LAST_WQE_REACHED:
186 		dprintk("svcrdma: QP event %s (%d) received for QP=%p\n",
187 			ib_event_msg(event->event), event->event,
188 			event->element.qp);
189 		break;
190 	/* These are considered fatal events */
191 	case IB_EVENT_PATH_MIG_ERR:
192 	case IB_EVENT_QP_FATAL:
193 	case IB_EVENT_QP_REQ_ERR:
194 	case IB_EVENT_QP_ACCESS_ERR:
195 	case IB_EVENT_DEVICE_FATAL:
196 	default:
197 		dprintk("svcrdma: QP ERROR event %s (%d) received for QP=%p, "
198 			"closing transport\n",
199 			ib_event_msg(event->event), event->event,
200 			event->element.qp);
201 		set_bit(XPT_CLOSE, &xprt->xpt_flags);
202 		break;
203 	}
204 }
205 
206 /*
207  * Data Transfer Operation Tasklet
208  *
209  * Walks a list of transports with I/O pending, removing entries as
210  * they are added to the server's I/O pending list. Two bits indicate
211  * if SQ, RQ, or both have I/O pending. The dto_lock is an irqsave
212  * spinlock that serializes access to the transport list with the RQ
213  * and SQ interrupt handlers.
214  */
215 static void dto_tasklet_func(unsigned long data)
216 {
217 	struct svcxprt_rdma *xprt;
218 	unsigned long flags;
219 
220 	spin_lock_irqsave(&dto_lock, flags);
221 	while (!list_empty(&dto_xprt_q)) {
222 		xprt = list_entry(dto_xprt_q.next,
223 				  struct svcxprt_rdma, sc_dto_q);
224 		list_del_init(&xprt->sc_dto_q);
225 		spin_unlock_irqrestore(&dto_lock, flags);
226 
227 		rq_cq_reap(xprt);
228 		sq_cq_reap(xprt);
229 
230 		svc_xprt_put(&xprt->sc_xprt);
231 		spin_lock_irqsave(&dto_lock, flags);
232 	}
233 	spin_unlock_irqrestore(&dto_lock, flags);
234 }
235 
236 /*
237  * Receive Queue Completion Handler
238  *
239  * Since an RQ completion handler is called on interrupt context, we
240  * need to defer the handling of the I/O to a tasklet
241  */
242 static void rq_comp_handler(struct ib_cq *cq, void *cq_context)
243 {
244 	struct svcxprt_rdma *xprt = cq_context;
245 	unsigned long flags;
246 
247 	/* Guard against unconditional flush call for destroyed QP */
248 	if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0)
249 		return;
250 
251 	/*
252 	 * Set the bit regardless of whether or not it's on the list
253 	 * because it may be on the list already due to an SQ
254 	 * completion.
255 	 */
256 	set_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags);
257 
258 	/*
259 	 * If this transport is not already on the DTO transport queue,
260 	 * add it
261 	 */
262 	spin_lock_irqsave(&dto_lock, flags);
263 	if (list_empty(&xprt->sc_dto_q)) {
264 		svc_xprt_get(&xprt->sc_xprt);
265 		list_add_tail(&xprt->sc_dto_q, &dto_xprt_q);
266 	}
267 	spin_unlock_irqrestore(&dto_lock, flags);
268 
269 	/* Tasklet does all the work to avoid irqsave locks. */
270 	tasklet_schedule(&dto_tasklet);
271 }
272 
273 /*
274  * rq_cq_reap - Process the RQ CQ.
275  *
276  * Take all completing WC off the CQE and enqueue the associated DTO
277  * context on the dto_q for the transport.
278  *
279  * Note that caller must hold a transport reference.
280  */
281 static void rq_cq_reap(struct svcxprt_rdma *xprt)
282 {
283 	int ret;
284 	struct ib_wc wc;
285 	struct svc_rdma_op_ctxt *ctxt = NULL;
286 
287 	if (!test_and_clear_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags))
288 		return;
289 
290 	ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
291 	atomic_inc(&rdma_stat_rq_poll);
292 
293 	while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
294 		ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
295 		ctxt->wc_status = wc.status;
296 		ctxt->byte_len = wc.byte_len;
297 		svc_rdma_unmap_dma(ctxt);
298 		if (wc.status != IB_WC_SUCCESS) {
299 			/* Close the transport */
300 			dprintk("svcrdma: transport closing putting ctxt %p\n", ctxt);
301 			set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
302 			svc_rdma_put_context(ctxt, 1);
303 			svc_xprt_put(&xprt->sc_xprt);
304 			continue;
305 		}
306 		spin_lock_bh(&xprt->sc_rq_dto_lock);
307 		list_add_tail(&ctxt->dto_q, &xprt->sc_rq_dto_q);
308 		spin_unlock_bh(&xprt->sc_rq_dto_lock);
309 		svc_xprt_put(&xprt->sc_xprt);
310 	}
311 
312 	if (ctxt)
313 		atomic_inc(&rdma_stat_rq_prod);
314 
315 	set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
316 	/*
317 	 * If data arrived before established event,
318 	 * don't enqueue. This defers RPC I/O until the
319 	 * RDMA connection is complete.
320 	 */
321 	if (!test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags))
322 		svc_xprt_enqueue(&xprt->sc_xprt);
323 }
324 
325 /*
326  * Process a completion context
327  */
328 static void process_context(struct svcxprt_rdma *xprt,
329 			    struct svc_rdma_op_ctxt *ctxt)
330 {
331 	svc_rdma_unmap_dma(ctxt);
332 
333 	switch (ctxt->wr_op) {
334 	case IB_WR_SEND:
335 		if (ctxt->frmr)
336 			pr_err("svcrdma: SEND: ctxt->frmr != NULL\n");
337 		svc_rdma_put_context(ctxt, 1);
338 		break;
339 
340 	case IB_WR_RDMA_WRITE:
341 		if (ctxt->frmr)
342 			pr_err("svcrdma: WRITE: ctxt->frmr != NULL\n");
343 		svc_rdma_put_context(ctxt, 0);
344 		break;
345 
346 	case IB_WR_RDMA_READ:
347 	case IB_WR_RDMA_READ_WITH_INV:
348 		svc_rdma_put_frmr(xprt, ctxt->frmr);
349 		if (test_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags)) {
350 			struct svc_rdma_op_ctxt *read_hdr = ctxt->read_hdr;
351 			if (read_hdr) {
352 				spin_lock_bh(&xprt->sc_rq_dto_lock);
353 				set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
354 				list_add_tail(&read_hdr->dto_q,
355 					      &xprt->sc_read_complete_q);
356 				spin_unlock_bh(&xprt->sc_rq_dto_lock);
357 			} else {
358 				pr_err("svcrdma: ctxt->read_hdr == NULL\n");
359 			}
360 			svc_xprt_enqueue(&xprt->sc_xprt);
361 		}
362 		svc_rdma_put_context(ctxt, 0);
363 		break;
364 
365 	default:
366 		printk(KERN_ERR "svcrdma: unexpected completion type, "
367 		       "opcode=%d\n",
368 		       ctxt->wr_op);
369 		break;
370 	}
371 }
372 
373 /*
374  * Send Queue Completion Handler - potentially called on interrupt context.
375  *
376  * Note that caller must hold a transport reference.
377  */
378 static void sq_cq_reap(struct svcxprt_rdma *xprt)
379 {
380 	struct svc_rdma_op_ctxt *ctxt = NULL;
381 	struct ib_wc wc_a[6];
382 	struct ib_wc *wc;
383 	struct ib_cq *cq = xprt->sc_sq_cq;
384 	int ret;
385 
386 	memset(wc_a, 0, sizeof(wc_a));
387 
388 	if (!test_and_clear_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags))
389 		return;
390 
391 	ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
392 	atomic_inc(&rdma_stat_sq_poll);
393 	while ((ret = ib_poll_cq(cq, ARRAY_SIZE(wc_a), wc_a)) > 0) {
394 		int i;
395 
396 		for (i = 0; i < ret; i++) {
397 			wc = &wc_a[i];
398 			if (wc->status != IB_WC_SUCCESS) {
399 				dprintk("svcrdma: sq wc err status %s (%d)\n",
400 					ib_wc_status_msg(wc->status),
401 					wc->status);
402 
403 				/* Close the transport */
404 				set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
405 			}
406 
407 			/* Decrement used SQ WR count */
408 			atomic_dec(&xprt->sc_sq_count);
409 			wake_up(&xprt->sc_send_wait);
410 
411 			ctxt = (struct svc_rdma_op_ctxt *)
412 				(unsigned long)wc->wr_id;
413 			if (ctxt)
414 				process_context(xprt, ctxt);
415 
416 			svc_xprt_put(&xprt->sc_xprt);
417 		}
418 	}
419 
420 	if (ctxt)
421 		atomic_inc(&rdma_stat_sq_prod);
422 }
423 
424 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
425 {
426 	struct svcxprt_rdma *xprt = cq_context;
427 	unsigned long flags;
428 
429 	/* Guard against unconditional flush call for destroyed QP */
430 	if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0)
431 		return;
432 
433 	/*
434 	 * Set the bit regardless of whether or not it's on the list
435 	 * because it may be on the list already due to an RQ
436 	 * completion.
437 	 */
438 	set_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags);
439 
440 	/*
441 	 * If this transport is not already on the DTO transport queue,
442 	 * add it
443 	 */
444 	spin_lock_irqsave(&dto_lock, flags);
445 	if (list_empty(&xprt->sc_dto_q)) {
446 		svc_xprt_get(&xprt->sc_xprt);
447 		list_add_tail(&xprt->sc_dto_q, &dto_xprt_q);
448 	}
449 	spin_unlock_irqrestore(&dto_lock, flags);
450 
451 	/* Tasklet does all the work to avoid irqsave locks. */
452 	tasklet_schedule(&dto_tasklet);
453 }
454 
455 static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv,
456 					     int listener)
457 {
458 	struct svcxprt_rdma *cma_xprt = kzalloc(sizeof *cma_xprt, GFP_KERNEL);
459 
460 	if (!cma_xprt)
461 		return NULL;
462 	svc_xprt_init(&init_net, &svc_rdma_class, &cma_xprt->sc_xprt, serv);
463 	INIT_LIST_HEAD(&cma_xprt->sc_accept_q);
464 	INIT_LIST_HEAD(&cma_xprt->sc_dto_q);
465 	INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q);
466 	INIT_LIST_HEAD(&cma_xprt->sc_read_complete_q);
467 	INIT_LIST_HEAD(&cma_xprt->sc_frmr_q);
468 	init_waitqueue_head(&cma_xprt->sc_send_wait);
469 
470 	spin_lock_init(&cma_xprt->sc_lock);
471 	spin_lock_init(&cma_xprt->sc_rq_dto_lock);
472 	spin_lock_init(&cma_xprt->sc_frmr_q_lock);
473 
474 	cma_xprt->sc_ord = svcrdma_ord;
475 
476 	cma_xprt->sc_max_req_size = svcrdma_max_req_size;
477 	cma_xprt->sc_max_requests = svcrdma_max_requests;
478 	cma_xprt->sc_sq_depth = svcrdma_max_requests * RPCRDMA_SQ_DEPTH_MULT;
479 	atomic_set(&cma_xprt->sc_sq_count, 0);
480 	atomic_set(&cma_xprt->sc_ctxt_used, 0);
481 
482 	if (listener)
483 		set_bit(XPT_LISTENER, &cma_xprt->sc_xprt.xpt_flags);
484 
485 	return cma_xprt;
486 }
487 
488 int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
489 {
490 	struct ib_recv_wr recv_wr, *bad_recv_wr;
491 	struct svc_rdma_op_ctxt *ctxt;
492 	struct page *page;
493 	dma_addr_t pa;
494 	int sge_no;
495 	int buflen;
496 	int ret;
497 
498 	ctxt = svc_rdma_get_context(xprt);
499 	buflen = 0;
500 	ctxt->direction = DMA_FROM_DEVICE;
501 	for (sge_no = 0; buflen < xprt->sc_max_req_size; sge_no++) {
502 		if (sge_no >= xprt->sc_max_sge) {
503 			pr_err("svcrdma: Too many sges (%d)\n", sge_no);
504 			goto err_put_ctxt;
505 		}
506 		page = alloc_page(GFP_KERNEL | __GFP_NOFAIL);
507 		ctxt->pages[sge_no] = page;
508 		pa = ib_dma_map_page(xprt->sc_cm_id->device,
509 				     page, 0, PAGE_SIZE,
510 				     DMA_FROM_DEVICE);
511 		if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa))
512 			goto err_put_ctxt;
513 		atomic_inc(&xprt->sc_dma_used);
514 		ctxt->sge[sge_no].addr = pa;
515 		ctxt->sge[sge_no].length = PAGE_SIZE;
516 		ctxt->sge[sge_no].lkey = xprt->sc_dma_lkey;
517 		ctxt->count = sge_no + 1;
518 		buflen += PAGE_SIZE;
519 	}
520 	recv_wr.next = NULL;
521 	recv_wr.sg_list = &ctxt->sge[0];
522 	recv_wr.num_sge = ctxt->count;
523 	recv_wr.wr_id = (u64)(unsigned long)ctxt;
524 
525 	svc_xprt_get(&xprt->sc_xprt);
526 	ret = ib_post_recv(xprt->sc_qp, &recv_wr, &bad_recv_wr);
527 	if (ret) {
528 		svc_rdma_unmap_dma(ctxt);
529 		svc_rdma_put_context(ctxt, 1);
530 		svc_xprt_put(&xprt->sc_xprt);
531 	}
532 	return ret;
533 
534  err_put_ctxt:
535 	svc_rdma_unmap_dma(ctxt);
536 	svc_rdma_put_context(ctxt, 1);
537 	return -ENOMEM;
538 }
539 
540 /*
541  * This function handles the CONNECT_REQUEST event on a listening
542  * endpoint. It is passed the cma_id for the _new_ connection. The context in
543  * this cma_id is inherited from the listening cma_id and is the svc_xprt
544  * structure for the listening endpoint.
545  *
546  * This function creates a new xprt for the new connection and enqueues it on
547  * the accept queue for the listent xprt. When the listen thread is kicked, it
548  * will call the recvfrom method on the listen xprt which will accept the new
549  * connection.
550  */
551 static void handle_connect_req(struct rdma_cm_id *new_cma_id, size_t client_ird)
552 {
553 	struct svcxprt_rdma *listen_xprt = new_cma_id->context;
554 	struct svcxprt_rdma *newxprt;
555 	struct sockaddr *sa;
556 
557 	/* Create a new transport */
558 	newxprt = rdma_create_xprt(listen_xprt->sc_xprt.xpt_server, 0);
559 	if (!newxprt) {
560 		dprintk("svcrdma: failed to create new transport\n");
561 		return;
562 	}
563 	newxprt->sc_cm_id = new_cma_id;
564 	new_cma_id->context = newxprt;
565 	dprintk("svcrdma: Creating newxprt=%p, cm_id=%p, listenxprt=%p\n",
566 		newxprt, newxprt->sc_cm_id, listen_xprt);
567 
568 	/* Save client advertised inbound read limit for use later in accept. */
569 	newxprt->sc_ord = client_ird;
570 
571 	/* Set the local and remote addresses in the transport */
572 	sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr;
573 	svc_xprt_set_remote(&newxprt->sc_xprt, sa, svc_addr_len(sa));
574 	sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr;
575 	svc_xprt_set_local(&newxprt->sc_xprt, sa, svc_addr_len(sa));
576 
577 	/*
578 	 * Enqueue the new transport on the accept queue of the listening
579 	 * transport
580 	 */
581 	spin_lock_bh(&listen_xprt->sc_lock);
582 	list_add_tail(&newxprt->sc_accept_q, &listen_xprt->sc_accept_q);
583 	spin_unlock_bh(&listen_xprt->sc_lock);
584 
585 	set_bit(XPT_CONN, &listen_xprt->sc_xprt.xpt_flags);
586 	svc_xprt_enqueue(&listen_xprt->sc_xprt);
587 }
588 
589 /*
590  * Handles events generated on the listening endpoint. These events will be
591  * either be incoming connect requests or adapter removal  events.
592  */
593 static int rdma_listen_handler(struct rdma_cm_id *cma_id,
594 			       struct rdma_cm_event *event)
595 {
596 	struct svcxprt_rdma *xprt = cma_id->context;
597 	int ret = 0;
598 
599 	switch (event->event) {
600 	case RDMA_CM_EVENT_CONNECT_REQUEST:
601 		dprintk("svcrdma: Connect request on cma_id=%p, xprt = %p, "
602 			"event = %s (%d)\n", cma_id, cma_id->context,
603 			rdma_event_msg(event->event), event->event);
604 		handle_connect_req(cma_id,
605 				   event->param.conn.initiator_depth);
606 		break;
607 
608 	case RDMA_CM_EVENT_ESTABLISHED:
609 		/* Accept complete */
610 		dprintk("svcrdma: Connection completed on LISTEN xprt=%p, "
611 			"cm_id=%p\n", xprt, cma_id);
612 		break;
613 
614 	case RDMA_CM_EVENT_DEVICE_REMOVAL:
615 		dprintk("svcrdma: Device removal xprt=%p, cm_id=%p\n",
616 			xprt, cma_id);
617 		if (xprt)
618 			set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
619 		break;
620 
621 	default:
622 		dprintk("svcrdma: Unexpected event on listening endpoint %p, "
623 			"event = %s (%d)\n", cma_id,
624 			rdma_event_msg(event->event), event->event);
625 		break;
626 	}
627 
628 	return ret;
629 }
630 
631 static int rdma_cma_handler(struct rdma_cm_id *cma_id,
632 			    struct rdma_cm_event *event)
633 {
634 	struct svc_xprt *xprt = cma_id->context;
635 	struct svcxprt_rdma *rdma =
636 		container_of(xprt, struct svcxprt_rdma, sc_xprt);
637 	switch (event->event) {
638 	case RDMA_CM_EVENT_ESTABLISHED:
639 		/* Accept complete */
640 		svc_xprt_get(xprt);
641 		dprintk("svcrdma: Connection completed on DTO xprt=%p, "
642 			"cm_id=%p\n", xprt, cma_id);
643 		clear_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags);
644 		svc_xprt_enqueue(xprt);
645 		break;
646 	case RDMA_CM_EVENT_DISCONNECTED:
647 		dprintk("svcrdma: Disconnect on DTO xprt=%p, cm_id=%p\n",
648 			xprt, cma_id);
649 		if (xprt) {
650 			set_bit(XPT_CLOSE, &xprt->xpt_flags);
651 			svc_xprt_enqueue(xprt);
652 			svc_xprt_put(xprt);
653 		}
654 		break;
655 	case RDMA_CM_EVENT_DEVICE_REMOVAL:
656 		dprintk("svcrdma: Device removal cma_id=%p, xprt = %p, "
657 			"event = %s (%d)\n", cma_id, xprt,
658 			rdma_event_msg(event->event), event->event);
659 		if (xprt) {
660 			set_bit(XPT_CLOSE, &xprt->xpt_flags);
661 			svc_xprt_enqueue(xprt);
662 		}
663 		break;
664 	default:
665 		dprintk("svcrdma: Unexpected event on DTO endpoint %p, "
666 			"event = %s (%d)\n", cma_id,
667 			rdma_event_msg(event->event), event->event);
668 		break;
669 	}
670 	return 0;
671 }
672 
673 /*
674  * Create a listening RDMA service endpoint.
675  */
676 static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
677 					struct net *net,
678 					struct sockaddr *sa, int salen,
679 					int flags)
680 {
681 	struct rdma_cm_id *listen_id;
682 	struct svcxprt_rdma *cma_xprt;
683 	int ret;
684 
685 	dprintk("svcrdma: Creating RDMA socket\n");
686 	if (sa->sa_family != AF_INET) {
687 		dprintk("svcrdma: Address family %d is not supported.\n", sa->sa_family);
688 		return ERR_PTR(-EAFNOSUPPORT);
689 	}
690 	cma_xprt = rdma_create_xprt(serv, 1);
691 	if (!cma_xprt)
692 		return ERR_PTR(-ENOMEM);
693 
694 	listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP,
695 				   IB_QPT_RC);
696 	if (IS_ERR(listen_id)) {
697 		ret = PTR_ERR(listen_id);
698 		dprintk("svcrdma: rdma_create_id failed = %d\n", ret);
699 		goto err0;
700 	}
701 
702 	ret = rdma_bind_addr(listen_id, sa);
703 	if (ret) {
704 		dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret);
705 		goto err1;
706 	}
707 	cma_xprt->sc_cm_id = listen_id;
708 
709 	ret = rdma_listen(listen_id, RPCRDMA_LISTEN_BACKLOG);
710 	if (ret) {
711 		dprintk("svcrdma: rdma_listen failed = %d\n", ret);
712 		goto err1;
713 	}
714 
715 	/*
716 	 * We need to use the address from the cm_id in case the
717 	 * caller specified 0 for the port number.
718 	 */
719 	sa = (struct sockaddr *)&cma_xprt->sc_cm_id->route.addr.src_addr;
720 	svc_xprt_set_local(&cma_xprt->sc_xprt, sa, salen);
721 
722 	return &cma_xprt->sc_xprt;
723 
724  err1:
725 	rdma_destroy_id(listen_id);
726  err0:
727 	kfree(cma_xprt);
728 	return ERR_PTR(ret);
729 }
730 
731 static struct svc_rdma_fastreg_mr *rdma_alloc_frmr(struct svcxprt_rdma *xprt)
732 {
733 	struct ib_mr *mr;
734 	struct ib_fast_reg_page_list *pl;
735 	struct svc_rdma_fastreg_mr *frmr;
736 
737 	frmr = kmalloc(sizeof(*frmr), GFP_KERNEL);
738 	if (!frmr)
739 		goto err;
740 
741 	mr = ib_alloc_fast_reg_mr(xprt->sc_pd, RPCSVC_MAXPAGES);
742 	if (IS_ERR(mr))
743 		goto err_free_frmr;
744 
745 	pl = ib_alloc_fast_reg_page_list(xprt->sc_cm_id->device,
746 					 RPCSVC_MAXPAGES);
747 	if (IS_ERR(pl))
748 		goto err_free_mr;
749 
750 	frmr->mr = mr;
751 	frmr->page_list = pl;
752 	INIT_LIST_HEAD(&frmr->frmr_list);
753 	return frmr;
754 
755  err_free_mr:
756 	ib_dereg_mr(mr);
757  err_free_frmr:
758 	kfree(frmr);
759  err:
760 	return ERR_PTR(-ENOMEM);
761 }
762 
763 static void rdma_dealloc_frmr_q(struct svcxprt_rdma *xprt)
764 {
765 	struct svc_rdma_fastreg_mr *frmr;
766 
767 	while (!list_empty(&xprt->sc_frmr_q)) {
768 		frmr = list_entry(xprt->sc_frmr_q.next,
769 				  struct svc_rdma_fastreg_mr, frmr_list);
770 		list_del_init(&frmr->frmr_list);
771 		ib_dereg_mr(frmr->mr);
772 		ib_free_fast_reg_page_list(frmr->page_list);
773 		kfree(frmr);
774 	}
775 }
776 
777 struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *rdma)
778 {
779 	struct svc_rdma_fastreg_mr *frmr = NULL;
780 
781 	spin_lock_bh(&rdma->sc_frmr_q_lock);
782 	if (!list_empty(&rdma->sc_frmr_q)) {
783 		frmr = list_entry(rdma->sc_frmr_q.next,
784 				  struct svc_rdma_fastreg_mr, frmr_list);
785 		list_del_init(&frmr->frmr_list);
786 		frmr->map_len = 0;
787 		frmr->page_list_len = 0;
788 	}
789 	spin_unlock_bh(&rdma->sc_frmr_q_lock);
790 	if (frmr)
791 		return frmr;
792 
793 	return rdma_alloc_frmr(rdma);
794 }
795 
796 static void frmr_unmap_dma(struct svcxprt_rdma *xprt,
797 			   struct svc_rdma_fastreg_mr *frmr)
798 {
799 	int page_no;
800 	for (page_no = 0; page_no < frmr->page_list_len; page_no++) {
801 		dma_addr_t addr = frmr->page_list->page_list[page_no];
802 		if (ib_dma_mapping_error(frmr->mr->device, addr))
803 			continue;
804 		atomic_dec(&xprt->sc_dma_used);
805 		ib_dma_unmap_page(frmr->mr->device, addr, PAGE_SIZE,
806 				  frmr->direction);
807 	}
808 }
809 
810 void svc_rdma_put_frmr(struct svcxprt_rdma *rdma,
811 		       struct svc_rdma_fastreg_mr *frmr)
812 {
813 	if (frmr) {
814 		frmr_unmap_dma(rdma, frmr);
815 		spin_lock_bh(&rdma->sc_frmr_q_lock);
816 		WARN_ON_ONCE(!list_empty(&frmr->frmr_list));
817 		list_add(&frmr->frmr_list, &rdma->sc_frmr_q);
818 		spin_unlock_bh(&rdma->sc_frmr_q_lock);
819 	}
820 }
821 
822 /*
823  * This is the xpo_recvfrom function for listening endpoints. Its
824  * purpose is to accept incoming connections. The CMA callback handler
825  * has already created a new transport and attached it to the new CMA
826  * ID.
827  *
828  * There is a queue of pending connections hung on the listening
829  * transport. This queue contains the new svc_xprt structure. This
830  * function takes svc_xprt structures off the accept_q and completes
831  * the connection.
832  */
833 static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
834 {
835 	struct svcxprt_rdma *listen_rdma;
836 	struct svcxprt_rdma *newxprt = NULL;
837 	struct rdma_conn_param conn_param;
838 	struct ib_cq_init_attr cq_attr = {};
839 	struct ib_qp_init_attr qp_attr;
840 	struct ib_device_attr devattr;
841 	int uninitialized_var(dma_mr_acc);
842 	int need_dma_mr = 0;
843 	int ret;
844 	int i;
845 
846 	listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt);
847 	clear_bit(XPT_CONN, &xprt->xpt_flags);
848 	/* Get the next entry off the accept list */
849 	spin_lock_bh(&listen_rdma->sc_lock);
850 	if (!list_empty(&listen_rdma->sc_accept_q)) {
851 		newxprt = list_entry(listen_rdma->sc_accept_q.next,
852 				     struct svcxprt_rdma, sc_accept_q);
853 		list_del_init(&newxprt->sc_accept_q);
854 	}
855 	if (!list_empty(&listen_rdma->sc_accept_q))
856 		set_bit(XPT_CONN, &listen_rdma->sc_xprt.xpt_flags);
857 	spin_unlock_bh(&listen_rdma->sc_lock);
858 	if (!newxprt)
859 		return NULL;
860 
861 	dprintk("svcrdma: newxprt from accept queue = %p, cm_id=%p\n",
862 		newxprt, newxprt->sc_cm_id);
863 
864 	ret = ib_query_device(newxprt->sc_cm_id->device, &devattr);
865 	if (ret) {
866 		dprintk("svcrdma: could not query device attributes on "
867 			"device %p, rc=%d\n", newxprt->sc_cm_id->device, ret);
868 		goto errout;
869 	}
870 
871 	/* Qualify the transport resource defaults with the
872 	 * capabilities of this particular device */
873 	newxprt->sc_max_sge = min((size_t)devattr.max_sge,
874 				  (size_t)RPCSVC_MAXPAGES);
875 	newxprt->sc_max_requests = min((size_t)devattr.max_qp_wr,
876 				   (size_t)svcrdma_max_requests);
877 	newxprt->sc_sq_depth = RPCRDMA_SQ_DEPTH_MULT * newxprt->sc_max_requests;
878 
879 	/*
880 	 * Limit ORD based on client limit, local device limit, and
881 	 * configured svcrdma limit.
882 	 */
883 	newxprt->sc_ord = min_t(size_t, devattr.max_qp_rd_atom, newxprt->sc_ord);
884 	newxprt->sc_ord = min_t(size_t,	svcrdma_ord, newxprt->sc_ord);
885 
886 	newxprt->sc_pd = ib_alloc_pd(newxprt->sc_cm_id->device);
887 	if (IS_ERR(newxprt->sc_pd)) {
888 		dprintk("svcrdma: error creating PD for connect request\n");
889 		goto errout;
890 	}
891 	cq_attr.cqe = newxprt->sc_sq_depth;
892 	newxprt->sc_sq_cq = ib_create_cq(newxprt->sc_cm_id->device,
893 					 sq_comp_handler,
894 					 cq_event_handler,
895 					 newxprt,
896 					 &cq_attr);
897 	if (IS_ERR(newxprt->sc_sq_cq)) {
898 		dprintk("svcrdma: error creating SQ CQ for connect request\n");
899 		goto errout;
900 	}
901 	cq_attr.cqe = newxprt->sc_max_requests;
902 	newxprt->sc_rq_cq = ib_create_cq(newxprt->sc_cm_id->device,
903 					 rq_comp_handler,
904 					 cq_event_handler,
905 					 newxprt,
906 					 &cq_attr);
907 	if (IS_ERR(newxprt->sc_rq_cq)) {
908 		dprintk("svcrdma: error creating RQ CQ for connect request\n");
909 		goto errout;
910 	}
911 
912 	memset(&qp_attr, 0, sizeof qp_attr);
913 	qp_attr.event_handler = qp_event_handler;
914 	qp_attr.qp_context = &newxprt->sc_xprt;
915 	qp_attr.cap.max_send_wr = newxprt->sc_sq_depth;
916 	qp_attr.cap.max_recv_wr = newxprt->sc_max_requests;
917 	qp_attr.cap.max_send_sge = newxprt->sc_max_sge;
918 	qp_attr.cap.max_recv_sge = newxprt->sc_max_sge;
919 	qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
920 	qp_attr.qp_type = IB_QPT_RC;
921 	qp_attr.send_cq = newxprt->sc_sq_cq;
922 	qp_attr.recv_cq = newxprt->sc_rq_cq;
923 	dprintk("svcrdma: newxprt->sc_cm_id=%p, newxprt->sc_pd=%p\n"
924 		"    cm_id->device=%p, sc_pd->device=%p\n"
925 		"    cap.max_send_wr = %d\n"
926 		"    cap.max_recv_wr = %d\n"
927 		"    cap.max_send_sge = %d\n"
928 		"    cap.max_recv_sge = %d\n",
929 		newxprt->sc_cm_id, newxprt->sc_pd,
930 		newxprt->sc_cm_id->device, newxprt->sc_pd->device,
931 		qp_attr.cap.max_send_wr,
932 		qp_attr.cap.max_recv_wr,
933 		qp_attr.cap.max_send_sge,
934 		qp_attr.cap.max_recv_sge);
935 
936 	ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, &qp_attr);
937 	if (ret) {
938 		dprintk("svcrdma: failed to create QP, ret=%d\n", ret);
939 		goto errout;
940 	}
941 	newxprt->sc_qp = newxprt->sc_cm_id->qp;
942 
943 	/*
944 	 * Use the most secure set of MR resources based on the
945 	 * transport type and available memory management features in
946 	 * the device. Here's the table implemented below:
947 	 *
948 	 *		Fast	Global	DMA	Remote WR
949 	 *		Reg	LKEY	MR	Access
950 	 *		Sup'd	Sup'd	Needed	Needed
951 	 *
952 	 * IWARP	N	N	Y	Y
953 	 *		N	Y	Y	Y
954 	 *		Y	N	Y	N
955 	 *		Y	Y	N	-
956 	 *
957 	 * IB		N	N	Y	N
958 	 *		N	Y	N	-
959 	 *		Y	N	Y	N
960 	 *		Y	Y	N	-
961 	 *
962 	 * NB:	iWARP requires remote write access for the data sink
963 	 *	of an RDMA_READ. IB does not.
964 	 */
965 	newxprt->sc_reader = rdma_read_chunk_lcl;
966 	if (devattr.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
967 		newxprt->sc_frmr_pg_list_len =
968 			devattr.max_fast_reg_page_list_len;
969 		newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_FAST_REG;
970 		newxprt->sc_reader = rdma_read_chunk_frmr;
971 	}
972 
973 	/*
974 	 * Determine if a DMA MR is required and if so, what privs are required
975 	 */
976 	if (!rdma_protocol_iwarp(newxprt->sc_cm_id->device,
977 				 newxprt->sc_cm_id->port_num) &&
978 	    !rdma_ib_or_roce(newxprt->sc_cm_id->device,
979 			     newxprt->sc_cm_id->port_num))
980 		goto errout;
981 
982 	if (!(newxprt->sc_dev_caps & SVCRDMA_DEVCAP_FAST_REG) ||
983 	    !(devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)) {
984 		need_dma_mr = 1;
985 		dma_mr_acc = IB_ACCESS_LOCAL_WRITE;
986 		if (rdma_protocol_iwarp(newxprt->sc_cm_id->device,
987 					newxprt->sc_cm_id->port_num) &&
988 		    !(newxprt->sc_dev_caps & SVCRDMA_DEVCAP_FAST_REG))
989 			dma_mr_acc |= IB_ACCESS_REMOTE_WRITE;
990 	}
991 
992 	if (rdma_protocol_iwarp(newxprt->sc_cm_id->device,
993 				newxprt->sc_cm_id->port_num))
994 		newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_READ_W_INV;
995 
996 	/* Create the DMA MR if needed, otherwise, use the DMA LKEY */
997 	if (need_dma_mr) {
998 		/* Register all of physical memory */
999 		newxprt->sc_phys_mr =
1000 			ib_get_dma_mr(newxprt->sc_pd, dma_mr_acc);
1001 		if (IS_ERR(newxprt->sc_phys_mr)) {
1002 			dprintk("svcrdma: Failed to create DMA MR ret=%d\n",
1003 				ret);
1004 			goto errout;
1005 		}
1006 		newxprt->sc_dma_lkey = newxprt->sc_phys_mr->lkey;
1007 	} else
1008 		newxprt->sc_dma_lkey =
1009 			newxprt->sc_cm_id->device->local_dma_lkey;
1010 
1011 	/* Post receive buffers */
1012 	for (i = 0; i < newxprt->sc_max_requests; i++) {
1013 		ret = svc_rdma_post_recv(newxprt);
1014 		if (ret) {
1015 			dprintk("svcrdma: failure posting receive buffers\n");
1016 			goto errout;
1017 		}
1018 	}
1019 
1020 	/* Swap out the handler */
1021 	newxprt->sc_cm_id->event_handler = rdma_cma_handler;
1022 
1023 	/*
1024 	 * Arm the CQs for the SQ and RQ before accepting so we can't
1025 	 * miss the first message
1026 	 */
1027 	ib_req_notify_cq(newxprt->sc_sq_cq, IB_CQ_NEXT_COMP);
1028 	ib_req_notify_cq(newxprt->sc_rq_cq, IB_CQ_NEXT_COMP);
1029 
1030 	/* Accept Connection */
1031 	set_bit(RDMAXPRT_CONN_PENDING, &newxprt->sc_flags);
1032 	memset(&conn_param, 0, sizeof conn_param);
1033 	conn_param.responder_resources = 0;
1034 	conn_param.initiator_depth = newxprt->sc_ord;
1035 	ret = rdma_accept(newxprt->sc_cm_id, &conn_param);
1036 	if (ret) {
1037 		dprintk("svcrdma: failed to accept new connection, ret=%d\n",
1038 		       ret);
1039 		goto errout;
1040 	}
1041 
1042 	dprintk("svcrdma: new connection %p accepted with the following "
1043 		"attributes:\n"
1044 		"    local_ip        : %pI4\n"
1045 		"    local_port	     : %d\n"
1046 		"    remote_ip       : %pI4\n"
1047 		"    remote_port     : %d\n"
1048 		"    max_sge         : %d\n"
1049 		"    sq_depth        : %d\n"
1050 		"    max_requests    : %d\n"
1051 		"    ord             : %d\n",
1052 		newxprt,
1053 		&((struct sockaddr_in *)&newxprt->sc_cm_id->
1054 			 route.addr.src_addr)->sin_addr.s_addr,
1055 		ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id->
1056 		       route.addr.src_addr)->sin_port),
1057 		&((struct sockaddr_in *)&newxprt->sc_cm_id->
1058 			 route.addr.dst_addr)->sin_addr.s_addr,
1059 		ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id->
1060 		       route.addr.dst_addr)->sin_port),
1061 		newxprt->sc_max_sge,
1062 		newxprt->sc_sq_depth,
1063 		newxprt->sc_max_requests,
1064 		newxprt->sc_ord);
1065 
1066 	return &newxprt->sc_xprt;
1067 
1068  errout:
1069 	dprintk("svcrdma: failure accepting new connection rc=%d.\n", ret);
1070 	/* Take a reference in case the DTO handler runs */
1071 	svc_xprt_get(&newxprt->sc_xprt);
1072 	if (newxprt->sc_qp && !IS_ERR(newxprt->sc_qp))
1073 		ib_destroy_qp(newxprt->sc_qp);
1074 	rdma_destroy_id(newxprt->sc_cm_id);
1075 	/* This call to put will destroy the transport */
1076 	svc_xprt_put(&newxprt->sc_xprt);
1077 	return NULL;
1078 }
1079 
1080 static void svc_rdma_release_rqst(struct svc_rqst *rqstp)
1081 {
1082 }
1083 
1084 /*
1085  * When connected, an svc_xprt has at least two references:
1086  *
1087  * - A reference held by the cm_id between the ESTABLISHED and
1088  *   DISCONNECTED events. If the remote peer disconnected first, this
1089  *   reference could be gone.
1090  *
1091  * - A reference held by the svc_recv code that called this function
1092  *   as part of close processing.
1093  *
1094  * At a minimum one references should still be held.
1095  */
1096 static void svc_rdma_detach(struct svc_xprt *xprt)
1097 {
1098 	struct svcxprt_rdma *rdma =
1099 		container_of(xprt, struct svcxprt_rdma, sc_xprt);
1100 	dprintk("svc: svc_rdma_detach(%p)\n", xprt);
1101 
1102 	/* Disconnect and flush posted WQE */
1103 	rdma_disconnect(rdma->sc_cm_id);
1104 }
1105 
1106 static void __svc_rdma_free(struct work_struct *work)
1107 {
1108 	struct svcxprt_rdma *rdma =
1109 		container_of(work, struct svcxprt_rdma, sc_work);
1110 	dprintk("svcrdma: svc_rdma_free(%p)\n", rdma);
1111 
1112 	/* We should only be called from kref_put */
1113 	if (atomic_read(&rdma->sc_xprt.xpt_ref.refcount) != 0)
1114 		pr_err("svcrdma: sc_xprt still in use? (%d)\n",
1115 		       atomic_read(&rdma->sc_xprt.xpt_ref.refcount));
1116 
1117 	/*
1118 	 * Destroy queued, but not processed read completions. Note
1119 	 * that this cleanup has to be done before destroying the
1120 	 * cm_id because the device ptr is needed to unmap the dma in
1121 	 * svc_rdma_put_context.
1122 	 */
1123 	while (!list_empty(&rdma->sc_read_complete_q)) {
1124 		struct svc_rdma_op_ctxt *ctxt;
1125 		ctxt = list_entry(rdma->sc_read_complete_q.next,
1126 				  struct svc_rdma_op_ctxt,
1127 				  dto_q);
1128 		list_del_init(&ctxt->dto_q);
1129 		svc_rdma_put_context(ctxt, 1);
1130 	}
1131 
1132 	/* Destroy queued, but not processed recv completions */
1133 	while (!list_empty(&rdma->sc_rq_dto_q)) {
1134 		struct svc_rdma_op_ctxt *ctxt;
1135 		ctxt = list_entry(rdma->sc_rq_dto_q.next,
1136 				  struct svc_rdma_op_ctxt,
1137 				  dto_q);
1138 		list_del_init(&ctxt->dto_q);
1139 		svc_rdma_put_context(ctxt, 1);
1140 	}
1141 
1142 	/* Warn if we leaked a resource or under-referenced */
1143 	if (atomic_read(&rdma->sc_ctxt_used) != 0)
1144 		pr_err("svcrdma: ctxt still in use? (%d)\n",
1145 		       atomic_read(&rdma->sc_ctxt_used));
1146 	if (atomic_read(&rdma->sc_dma_used) != 0)
1147 		pr_err("svcrdma: dma still in use? (%d)\n",
1148 		       atomic_read(&rdma->sc_dma_used));
1149 
1150 	/* De-allocate fastreg mr */
1151 	rdma_dealloc_frmr_q(rdma);
1152 
1153 	/* Destroy the QP if present (not a listener) */
1154 	if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
1155 		ib_destroy_qp(rdma->sc_qp);
1156 
1157 	if (rdma->sc_sq_cq && !IS_ERR(rdma->sc_sq_cq))
1158 		ib_destroy_cq(rdma->sc_sq_cq);
1159 
1160 	if (rdma->sc_rq_cq && !IS_ERR(rdma->sc_rq_cq))
1161 		ib_destroy_cq(rdma->sc_rq_cq);
1162 
1163 	if (rdma->sc_phys_mr && !IS_ERR(rdma->sc_phys_mr))
1164 		ib_dereg_mr(rdma->sc_phys_mr);
1165 
1166 	if (rdma->sc_pd && !IS_ERR(rdma->sc_pd))
1167 		ib_dealloc_pd(rdma->sc_pd);
1168 
1169 	/* Destroy the CM ID */
1170 	rdma_destroy_id(rdma->sc_cm_id);
1171 
1172 	kfree(rdma);
1173 }
1174 
1175 static void svc_rdma_free(struct svc_xprt *xprt)
1176 {
1177 	struct svcxprt_rdma *rdma =
1178 		container_of(xprt, struct svcxprt_rdma, sc_xprt);
1179 	INIT_WORK(&rdma->sc_work, __svc_rdma_free);
1180 	queue_work(svc_rdma_wq, &rdma->sc_work);
1181 }
1182 
1183 static int svc_rdma_has_wspace(struct svc_xprt *xprt)
1184 {
1185 	struct svcxprt_rdma *rdma =
1186 		container_of(xprt, struct svcxprt_rdma, sc_xprt);
1187 
1188 	/*
1189 	 * If there are already waiters on the SQ,
1190 	 * return false.
1191 	 */
1192 	if (waitqueue_active(&rdma->sc_send_wait))
1193 		return 0;
1194 
1195 	/* Otherwise return true. */
1196 	return 1;
1197 }
1198 
1199 static int svc_rdma_secure_port(struct svc_rqst *rqstp)
1200 {
1201 	return 1;
1202 }
1203 
1204 /*
1205  * Attempt to register the kvec representing the RPC memory with the
1206  * device.
1207  *
1208  * Returns:
1209  *  NULL : The device does not support fastreg or there were no more
1210  *         fastreg mr.
1211  *  frmr : The kvec register request was successfully posted.
1212  *    <0 : An error was encountered attempting to register the kvec.
1213  */
1214 int svc_rdma_fastreg(struct svcxprt_rdma *xprt,
1215 		     struct svc_rdma_fastreg_mr *frmr)
1216 {
1217 	struct ib_send_wr fastreg_wr;
1218 	u8 key;
1219 
1220 	/* Bump the key */
1221 	key = (u8)(frmr->mr->lkey & 0x000000FF);
1222 	ib_update_fast_reg_key(frmr->mr, ++key);
1223 
1224 	/* Prepare FASTREG WR */
1225 	memset(&fastreg_wr, 0, sizeof fastreg_wr);
1226 	fastreg_wr.opcode = IB_WR_FAST_REG_MR;
1227 	fastreg_wr.send_flags = IB_SEND_SIGNALED;
1228 	fastreg_wr.wr.fast_reg.iova_start = (unsigned long)frmr->kva;
1229 	fastreg_wr.wr.fast_reg.page_list = frmr->page_list;
1230 	fastreg_wr.wr.fast_reg.page_list_len = frmr->page_list_len;
1231 	fastreg_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
1232 	fastreg_wr.wr.fast_reg.length = frmr->map_len;
1233 	fastreg_wr.wr.fast_reg.access_flags = frmr->access_flags;
1234 	fastreg_wr.wr.fast_reg.rkey = frmr->mr->lkey;
1235 	return svc_rdma_send(xprt, &fastreg_wr);
1236 }
1237 
1238 int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
1239 {
1240 	struct ib_send_wr *bad_wr, *n_wr;
1241 	int wr_count;
1242 	int i;
1243 	int ret;
1244 
1245 	if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
1246 		return -ENOTCONN;
1247 
1248 	wr_count = 1;
1249 	for (n_wr = wr->next; n_wr; n_wr = n_wr->next)
1250 		wr_count++;
1251 
1252 	/* If the SQ is full, wait until an SQ entry is available */
1253 	while (1) {
1254 		spin_lock_bh(&xprt->sc_lock);
1255 		if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
1256 			spin_unlock_bh(&xprt->sc_lock);
1257 			atomic_inc(&rdma_stat_sq_starve);
1258 
1259 			/* See if we can opportunistically reap SQ WR to make room */
1260 			sq_cq_reap(xprt);
1261 
1262 			/* Wait until SQ WR available if SQ still full */
1263 			wait_event(xprt->sc_send_wait,
1264 				   atomic_read(&xprt->sc_sq_count) <
1265 				   xprt->sc_sq_depth);
1266 			if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
1267 				return -ENOTCONN;
1268 			continue;
1269 		}
1270 		/* Take a transport ref for each WR posted */
1271 		for (i = 0; i < wr_count; i++)
1272 			svc_xprt_get(&xprt->sc_xprt);
1273 
1274 		/* Bump used SQ WR count and post */
1275 		atomic_add(wr_count, &xprt->sc_sq_count);
1276 		ret = ib_post_send(xprt->sc_qp, wr, &bad_wr);
1277 		if (ret) {
1278 			set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
1279 			atomic_sub(wr_count, &xprt->sc_sq_count);
1280 			for (i = 0; i < wr_count; i ++)
1281 				svc_xprt_put(&xprt->sc_xprt);
1282 			dprintk("svcrdma: failed to post SQ WR rc=%d, "
1283 			       "sc_sq_count=%d, sc_sq_depth=%d\n",
1284 			       ret, atomic_read(&xprt->sc_sq_count),
1285 			       xprt->sc_sq_depth);
1286 		}
1287 		spin_unlock_bh(&xprt->sc_lock);
1288 		if (ret)
1289 			wake_up(&xprt->sc_send_wait);
1290 		break;
1291 	}
1292 	return ret;
1293 }
1294 
1295 void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
1296 			 enum rpcrdma_errcode err)
1297 {
1298 	struct ib_send_wr err_wr;
1299 	struct page *p;
1300 	struct svc_rdma_op_ctxt *ctxt;
1301 	__be32 *va;
1302 	int length;
1303 	int ret;
1304 
1305 	p = alloc_page(GFP_KERNEL | __GFP_NOFAIL);
1306 	va = page_address(p);
1307 
1308 	/* XDR encode error */
1309 	length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va);
1310 
1311 	ctxt = svc_rdma_get_context(xprt);
1312 	ctxt->direction = DMA_FROM_DEVICE;
1313 	ctxt->count = 1;
1314 	ctxt->pages[0] = p;
1315 
1316 	/* Prepare SGE for local address */
1317 	ctxt->sge[0].addr = ib_dma_map_page(xprt->sc_cm_id->device,
1318 					    p, 0, length, DMA_FROM_DEVICE);
1319 	if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) {
1320 		put_page(p);
1321 		svc_rdma_put_context(ctxt, 1);
1322 		return;
1323 	}
1324 	atomic_inc(&xprt->sc_dma_used);
1325 	ctxt->sge[0].lkey = xprt->sc_dma_lkey;
1326 	ctxt->sge[0].length = length;
1327 
1328 	/* Prepare SEND WR */
1329 	memset(&err_wr, 0, sizeof err_wr);
1330 	ctxt->wr_op = IB_WR_SEND;
1331 	err_wr.wr_id = (unsigned long)ctxt;
1332 	err_wr.sg_list = ctxt->sge;
1333 	err_wr.num_sge = 1;
1334 	err_wr.opcode = IB_WR_SEND;
1335 	err_wr.send_flags = IB_SEND_SIGNALED;
1336 
1337 	/* Post It */
1338 	ret = svc_rdma_send(xprt, &err_wr);
1339 	if (ret) {
1340 		dprintk("svcrdma: Error %d posting send for protocol error\n",
1341 			ret);
1342 		svc_rdma_unmap_dma(ctxt);
1343 		svc_rdma_put_context(ctxt, 1);
1344 	}
1345 }
1346