xref: /linux/net/sunrpc/xprtrdma/verbs.c (revision a8fe58cec351c25e09c393bf46117c0c47b5a17c)
1 /*
2  * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the BSD-type
8  * license below:
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  *
14  *      Redistributions of source code must retain the above copyright
15  *      notice, this list of conditions and the following disclaimer.
16  *
17  *      Redistributions in binary form must reproduce the above
18  *      copyright notice, this list of conditions and the following
19  *      disclaimer in the documentation and/or other materials provided
20  *      with the distribution.
21  *
22  *      Neither the name of the Network Appliance, Inc. nor the names of
23  *      its contributors may be used to endorse or promote products
24  *      derived from this software without specific prior written
25  *      permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 /*
41  * verbs.c
42  *
43  * Encapsulates the major functions managing:
44  *  o adapters
45  *  o endpoints
46  *  o connections
47  *  o buffer memory
48  */
49 
50 #include <linux/interrupt.h>
51 #include <linux/slab.h>
52 #include <linux/prefetch.h>
53 #include <linux/sunrpc/addr.h>
54 #include <asm/bitops.h>
55 #include <linux/module.h> /* try_module_get()/module_put() */
56 
57 #include "xprt_rdma.h"
58 
59 /*
60  * Globals/Macros
61  */
62 
63 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
64 # define RPCDBG_FACILITY	RPCDBG_TRANS
65 #endif
66 
67 /*
68  * internal functions
69  */
70 
71 static struct workqueue_struct *rpcrdma_receive_wq;
72 
73 int
74 rpcrdma_alloc_wq(void)
75 {
76 	struct workqueue_struct *recv_wq;
77 
78 	recv_wq = alloc_workqueue("xprtrdma_receive",
79 				  WQ_MEM_RECLAIM | WQ_UNBOUND | WQ_HIGHPRI,
80 				  0);
81 	if (!recv_wq)
82 		return -ENOMEM;
83 
84 	rpcrdma_receive_wq = recv_wq;
85 	return 0;
86 }
87 
88 void
89 rpcrdma_destroy_wq(void)
90 {
91 	struct workqueue_struct *wq;
92 
93 	if (rpcrdma_receive_wq) {
94 		wq = rpcrdma_receive_wq;
95 		rpcrdma_receive_wq = NULL;
96 		destroy_workqueue(wq);
97 	}
98 }
99 
100 static void
101 rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context)
102 {
103 	struct rpcrdma_ep *ep = context;
104 
105 	pr_err("RPC:       %s: %s on device %s ep %p\n",
106 	       __func__, ib_event_msg(event->event),
107 		event->device->name, context);
108 	if (ep->rep_connected == 1) {
109 		ep->rep_connected = -EIO;
110 		rpcrdma_conn_func(ep);
111 		wake_up_all(&ep->rep_connect_wait);
112 	}
113 }
114 
115 static void
116 rpcrdma_cq_async_error_upcall(struct ib_event *event, void *context)
117 {
118 	struct rpcrdma_ep *ep = context;
119 
120 	pr_err("RPC:       %s: %s on device %s ep %p\n",
121 	       __func__, ib_event_msg(event->event),
122 		event->device->name, context);
123 	if (ep->rep_connected == 1) {
124 		ep->rep_connected = -EIO;
125 		rpcrdma_conn_func(ep);
126 		wake_up_all(&ep->rep_connect_wait);
127 	}
128 }
129 
130 static void
131 rpcrdma_sendcq_process_wc(struct ib_wc *wc)
132 {
133 	/* WARNING: Only wr_id and status are reliable at this point */
134 	if (wc->wr_id == RPCRDMA_IGNORE_COMPLETION) {
135 		if (wc->status != IB_WC_SUCCESS &&
136 		    wc->status != IB_WC_WR_FLUSH_ERR)
137 			pr_err("RPC:       %s: SEND: %s\n",
138 			       __func__, ib_wc_status_msg(wc->status));
139 	} else {
140 		struct rpcrdma_mw *r;
141 
142 		r = (struct rpcrdma_mw *)(unsigned long)wc->wr_id;
143 		r->mw_sendcompletion(wc);
144 	}
145 }
146 
147 /* The common case is a single send completion is waiting. By
148  * passing two WC entries to ib_poll_cq, a return code of 1
149  * means there is exactly one WC waiting and no more. We don't
150  * have to invoke ib_poll_cq again to know that the CQ has been
151  * properly drained.
152  */
153 static void
154 rpcrdma_sendcq_poll(struct ib_cq *cq)
155 {
156 	struct ib_wc *pos, wcs[2];
157 	int count, rc;
158 
159 	do {
160 		pos = wcs;
161 
162 		rc = ib_poll_cq(cq, ARRAY_SIZE(wcs), pos);
163 		if (rc < 0)
164 			break;
165 
166 		count = rc;
167 		while (count-- > 0)
168 			rpcrdma_sendcq_process_wc(pos++);
169 	} while (rc == ARRAY_SIZE(wcs));
170 	return;
171 }
172 
173 /* Handle provider send completion upcalls.
174  */
175 static void
176 rpcrdma_sendcq_upcall(struct ib_cq *cq, void *cq_context)
177 {
178 	do {
179 		rpcrdma_sendcq_poll(cq);
180 	} while (ib_req_notify_cq(cq, IB_CQ_NEXT_COMP |
181 				  IB_CQ_REPORT_MISSED_EVENTS) > 0);
182 }
183 
184 static void
185 rpcrdma_receive_worker(struct work_struct *work)
186 {
187 	struct rpcrdma_rep *rep =
188 			container_of(work, struct rpcrdma_rep, rr_work);
189 
190 	rpcrdma_reply_handler(rep);
191 }
192 
193 static void
194 rpcrdma_recvcq_process_wc(struct ib_wc *wc)
195 {
196 	struct rpcrdma_rep *rep =
197 			(struct rpcrdma_rep *)(unsigned long)wc->wr_id;
198 
199 	/* WARNING: Only wr_id and status are reliable at this point */
200 	if (wc->status != IB_WC_SUCCESS)
201 		goto out_fail;
202 
203 	/* status == SUCCESS means all fields in wc are trustworthy */
204 	if (wc->opcode != IB_WC_RECV)
205 		return;
206 
207 	dprintk("RPC:       %s: rep %p opcode 'recv', length %u: success\n",
208 		__func__, rep, wc->byte_len);
209 
210 	rep->rr_len = wc->byte_len;
211 	ib_dma_sync_single_for_cpu(rep->rr_device,
212 				   rdmab_addr(rep->rr_rdmabuf),
213 				   rep->rr_len, DMA_FROM_DEVICE);
214 	prefetch(rdmab_to_msg(rep->rr_rdmabuf));
215 
216 out_schedule:
217 	queue_work(rpcrdma_receive_wq, &rep->rr_work);
218 	return;
219 
220 out_fail:
221 	if (wc->status != IB_WC_WR_FLUSH_ERR)
222 		pr_err("RPC:       %s: rep %p: %s\n",
223 		       __func__, rep, ib_wc_status_msg(wc->status));
224 	rep->rr_len = RPCRDMA_BAD_LEN;
225 	goto out_schedule;
226 }
227 
228 /* The wc array is on stack: automatic memory is always CPU-local.
229  *
230  * struct ib_wc is 64 bytes, making the poll array potentially
231  * large. But this is at the bottom of the call chain. Further
232  * substantial work is done in another thread.
233  */
234 static void
235 rpcrdma_recvcq_poll(struct ib_cq *cq)
236 {
237 	struct ib_wc *pos, wcs[4];
238 	int count, rc;
239 
240 	do {
241 		pos = wcs;
242 
243 		rc = ib_poll_cq(cq, ARRAY_SIZE(wcs), pos);
244 		if (rc < 0)
245 			break;
246 
247 		count = rc;
248 		while (count-- > 0)
249 			rpcrdma_recvcq_process_wc(pos++);
250 	} while (rc == ARRAY_SIZE(wcs));
251 }
252 
253 /* Handle provider receive completion upcalls.
254  */
255 static void
256 rpcrdma_recvcq_upcall(struct ib_cq *cq, void *cq_context)
257 {
258 	do {
259 		rpcrdma_recvcq_poll(cq);
260 	} while (ib_req_notify_cq(cq, IB_CQ_NEXT_COMP |
261 				  IB_CQ_REPORT_MISSED_EVENTS) > 0);
262 }
263 
264 static void
265 rpcrdma_flush_cqs(struct rpcrdma_ep *ep)
266 {
267 	struct ib_wc wc;
268 
269 	while (ib_poll_cq(ep->rep_attr.recv_cq, 1, &wc) > 0)
270 		rpcrdma_recvcq_process_wc(&wc);
271 	while (ib_poll_cq(ep->rep_attr.send_cq, 1, &wc) > 0)
272 		rpcrdma_sendcq_process_wc(&wc);
273 }
274 
275 static int
276 rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
277 {
278 	struct rpcrdma_xprt *xprt = id->context;
279 	struct rpcrdma_ia *ia = &xprt->rx_ia;
280 	struct rpcrdma_ep *ep = &xprt->rx_ep;
281 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
282 	struct sockaddr *sap = (struct sockaddr *)&ep->rep_remote_addr;
283 #endif
284 	struct ib_qp_attr *attr = &ia->ri_qp_attr;
285 	struct ib_qp_init_attr *iattr = &ia->ri_qp_init_attr;
286 	int connstate = 0;
287 
288 	switch (event->event) {
289 	case RDMA_CM_EVENT_ADDR_RESOLVED:
290 	case RDMA_CM_EVENT_ROUTE_RESOLVED:
291 		ia->ri_async_rc = 0;
292 		complete(&ia->ri_done);
293 		break;
294 	case RDMA_CM_EVENT_ADDR_ERROR:
295 		ia->ri_async_rc = -EHOSTUNREACH;
296 		dprintk("RPC:       %s: CM address resolution error, ep 0x%p\n",
297 			__func__, ep);
298 		complete(&ia->ri_done);
299 		break;
300 	case RDMA_CM_EVENT_ROUTE_ERROR:
301 		ia->ri_async_rc = -ENETUNREACH;
302 		dprintk("RPC:       %s: CM route resolution error, ep 0x%p\n",
303 			__func__, ep);
304 		complete(&ia->ri_done);
305 		break;
306 	case RDMA_CM_EVENT_ESTABLISHED:
307 		connstate = 1;
308 		ib_query_qp(ia->ri_id->qp, attr,
309 			    IB_QP_MAX_QP_RD_ATOMIC | IB_QP_MAX_DEST_RD_ATOMIC,
310 			    iattr);
311 		dprintk("RPC:       %s: %d responder resources"
312 			" (%d initiator)\n",
313 			__func__, attr->max_dest_rd_atomic,
314 			attr->max_rd_atomic);
315 		goto connected;
316 	case RDMA_CM_EVENT_CONNECT_ERROR:
317 		connstate = -ENOTCONN;
318 		goto connected;
319 	case RDMA_CM_EVENT_UNREACHABLE:
320 		connstate = -ENETDOWN;
321 		goto connected;
322 	case RDMA_CM_EVENT_REJECTED:
323 		connstate = -ECONNREFUSED;
324 		goto connected;
325 	case RDMA_CM_EVENT_DISCONNECTED:
326 		connstate = -ECONNABORTED;
327 		goto connected;
328 	case RDMA_CM_EVENT_DEVICE_REMOVAL:
329 		connstate = -ENODEV;
330 connected:
331 		dprintk("RPC:       %s: %sconnected\n",
332 					__func__, connstate > 0 ? "" : "dis");
333 		ep->rep_connected = connstate;
334 		rpcrdma_conn_func(ep);
335 		wake_up_all(&ep->rep_connect_wait);
336 		/*FALLTHROUGH*/
337 	default:
338 		dprintk("RPC:       %s: %pIS:%u (ep 0x%p): %s\n",
339 			__func__, sap, rpc_get_port(sap), ep,
340 			rdma_event_msg(event->event));
341 		break;
342 	}
343 
344 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
345 	if (connstate == 1) {
346 		int ird = attr->max_dest_rd_atomic;
347 		int tird = ep->rep_remote_cma.responder_resources;
348 
349 		pr_info("rpcrdma: connection to %pIS:%u on %s, memreg '%s', %d credits, %d responders%s\n",
350 			sap, rpc_get_port(sap),
351 			ia->ri_device->name,
352 			ia->ri_ops->ro_displayname,
353 			xprt->rx_buf.rb_max_requests,
354 			ird, ird < 4 && ird < tird / 2 ? " (low!)" : "");
355 	} else if (connstate < 0) {
356 		pr_info("rpcrdma: connection to %pIS:%u closed (%d)\n",
357 			sap, rpc_get_port(sap), connstate);
358 	}
359 #endif
360 
361 	return 0;
362 }
363 
364 static void rpcrdma_destroy_id(struct rdma_cm_id *id)
365 {
366 	if (id) {
367 		module_put(id->device->owner);
368 		rdma_destroy_id(id);
369 	}
370 }
371 
372 static struct rdma_cm_id *
373 rpcrdma_create_id(struct rpcrdma_xprt *xprt,
374 			struct rpcrdma_ia *ia, struct sockaddr *addr)
375 {
376 	struct rdma_cm_id *id;
377 	int rc;
378 
379 	init_completion(&ia->ri_done);
380 
381 	id = rdma_create_id(&init_net, rpcrdma_conn_upcall, xprt, RDMA_PS_TCP,
382 			    IB_QPT_RC);
383 	if (IS_ERR(id)) {
384 		rc = PTR_ERR(id);
385 		dprintk("RPC:       %s: rdma_create_id() failed %i\n",
386 			__func__, rc);
387 		return id;
388 	}
389 
390 	ia->ri_async_rc = -ETIMEDOUT;
391 	rc = rdma_resolve_addr(id, NULL, addr, RDMA_RESOLVE_TIMEOUT);
392 	if (rc) {
393 		dprintk("RPC:       %s: rdma_resolve_addr() failed %i\n",
394 			__func__, rc);
395 		goto out;
396 	}
397 	wait_for_completion_interruptible_timeout(&ia->ri_done,
398 				msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1);
399 
400 	/* FIXME:
401 	 * Until xprtrdma supports DEVICE_REMOVAL, the provider must
402 	 * be pinned while there are active NFS/RDMA mounts to prevent
403 	 * hangs and crashes at umount time.
404 	 */
405 	if (!ia->ri_async_rc && !try_module_get(id->device->owner)) {
406 		dprintk("RPC:       %s: Failed to get device module\n",
407 			__func__);
408 		ia->ri_async_rc = -ENODEV;
409 	}
410 	rc = ia->ri_async_rc;
411 	if (rc)
412 		goto out;
413 
414 	ia->ri_async_rc = -ETIMEDOUT;
415 	rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT);
416 	if (rc) {
417 		dprintk("RPC:       %s: rdma_resolve_route() failed %i\n",
418 			__func__, rc);
419 		goto put;
420 	}
421 	wait_for_completion_interruptible_timeout(&ia->ri_done,
422 				msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1);
423 	rc = ia->ri_async_rc;
424 	if (rc)
425 		goto put;
426 
427 	return id;
428 put:
429 	module_put(id->device->owner);
430 out:
431 	rdma_destroy_id(id);
432 	return ERR_PTR(rc);
433 }
434 
435 /*
436  * Drain any cq, prior to teardown.
437  */
438 static void
439 rpcrdma_clean_cq(struct ib_cq *cq)
440 {
441 	struct ib_wc wc;
442 	int count = 0;
443 
444 	while (1 == ib_poll_cq(cq, 1, &wc))
445 		++count;
446 
447 	if (count)
448 		dprintk("RPC:       %s: flushed %d events (last 0x%x)\n",
449 			__func__, count, wc.opcode);
450 }
451 
452 /*
453  * Exported functions.
454  */
455 
456 /*
457  * Open and initialize an Interface Adapter.
458  *  o initializes fields of struct rpcrdma_ia, including
459  *    interface and provider attributes and protection zone.
460  */
461 int
462 rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
463 {
464 	struct rpcrdma_ia *ia = &xprt->rx_ia;
465 	int rc;
466 
467 	ia->ri_dma_mr = NULL;
468 
469 	ia->ri_id = rpcrdma_create_id(xprt, ia, addr);
470 	if (IS_ERR(ia->ri_id)) {
471 		rc = PTR_ERR(ia->ri_id);
472 		goto out1;
473 	}
474 	ia->ri_device = ia->ri_id->device;
475 
476 	ia->ri_pd = ib_alloc_pd(ia->ri_device);
477 	if (IS_ERR(ia->ri_pd)) {
478 		rc = PTR_ERR(ia->ri_pd);
479 		dprintk("RPC:       %s: ib_alloc_pd() failed %i\n",
480 			__func__, rc);
481 		goto out2;
482 	}
483 
484 	if (memreg == RPCRDMA_FRMR) {
485 		if (!(ia->ri_device->attrs.device_cap_flags &
486 				IB_DEVICE_MEM_MGT_EXTENSIONS) ||
487 		    (ia->ri_device->attrs.max_fast_reg_page_list_len == 0)) {
488 			dprintk("RPC:       %s: FRMR registration "
489 				"not supported by HCA\n", __func__);
490 			memreg = RPCRDMA_MTHCAFMR;
491 		}
492 	}
493 	if (memreg == RPCRDMA_MTHCAFMR) {
494 		if (!ia->ri_device->alloc_fmr) {
495 			dprintk("RPC:       %s: MTHCAFMR registration "
496 				"not supported by HCA\n", __func__);
497 			rc = -EINVAL;
498 			goto out3;
499 		}
500 	}
501 
502 	switch (memreg) {
503 	case RPCRDMA_FRMR:
504 		ia->ri_ops = &rpcrdma_frwr_memreg_ops;
505 		break;
506 	case RPCRDMA_ALLPHYSICAL:
507 		ia->ri_ops = &rpcrdma_physical_memreg_ops;
508 		break;
509 	case RPCRDMA_MTHCAFMR:
510 		ia->ri_ops = &rpcrdma_fmr_memreg_ops;
511 		break;
512 	default:
513 		printk(KERN_ERR "RPC: Unsupported memory "
514 				"registration mode: %d\n", memreg);
515 		rc = -ENOMEM;
516 		goto out3;
517 	}
518 	dprintk("RPC:       %s: memory registration strategy is '%s'\n",
519 		__func__, ia->ri_ops->ro_displayname);
520 
521 	rwlock_init(&ia->ri_qplock);
522 	return 0;
523 
524 out3:
525 	ib_dealloc_pd(ia->ri_pd);
526 	ia->ri_pd = NULL;
527 out2:
528 	rpcrdma_destroy_id(ia->ri_id);
529 	ia->ri_id = NULL;
530 out1:
531 	return rc;
532 }
533 
534 /*
535  * Clean up/close an IA.
536  *   o if event handles and PD have been initialized, free them.
537  *   o close the IA
538  */
539 void
540 rpcrdma_ia_close(struct rpcrdma_ia *ia)
541 {
542 	dprintk("RPC:       %s: entering\n", __func__);
543 	if (ia->ri_id != NULL && !IS_ERR(ia->ri_id)) {
544 		if (ia->ri_id->qp)
545 			rdma_destroy_qp(ia->ri_id);
546 		rpcrdma_destroy_id(ia->ri_id);
547 		ia->ri_id = NULL;
548 	}
549 
550 	/* If the pd is still busy, xprtrdma missed freeing a resource */
551 	if (ia->ri_pd && !IS_ERR(ia->ri_pd))
552 		ib_dealloc_pd(ia->ri_pd);
553 }
554 
555 /*
556  * Create unconnected endpoint.
557  */
558 int
559 rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
560 				struct rpcrdma_create_data_internal *cdata)
561 {
562 	struct ib_cq *sendcq, *recvcq;
563 	struct ib_cq_init_attr cq_attr = {};
564 	unsigned int max_qp_wr;
565 	int rc, err;
566 
567 	if (ia->ri_device->attrs.max_sge < RPCRDMA_MAX_IOVS) {
568 		dprintk("RPC:       %s: insufficient sge's available\n",
569 			__func__);
570 		return -ENOMEM;
571 	}
572 
573 	if (ia->ri_device->attrs.max_qp_wr <= RPCRDMA_BACKWARD_WRS) {
574 		dprintk("RPC:       %s: insufficient wqe's available\n",
575 			__func__);
576 		return -ENOMEM;
577 	}
578 	max_qp_wr = ia->ri_device->attrs.max_qp_wr - RPCRDMA_BACKWARD_WRS;
579 
580 	/* check provider's send/recv wr limits */
581 	if (cdata->max_requests > max_qp_wr)
582 		cdata->max_requests = max_qp_wr;
583 
584 	ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall;
585 	ep->rep_attr.qp_context = ep;
586 	ep->rep_attr.srq = NULL;
587 	ep->rep_attr.cap.max_send_wr = cdata->max_requests;
588 	ep->rep_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS;
589 	rc = ia->ri_ops->ro_open(ia, ep, cdata);
590 	if (rc)
591 		return rc;
592 	ep->rep_attr.cap.max_recv_wr = cdata->max_requests;
593 	ep->rep_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
594 	ep->rep_attr.cap.max_send_sge = RPCRDMA_MAX_IOVS;
595 	ep->rep_attr.cap.max_recv_sge = 1;
596 	ep->rep_attr.cap.max_inline_data = 0;
597 	ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
598 	ep->rep_attr.qp_type = IB_QPT_RC;
599 	ep->rep_attr.port_num = ~0;
600 
601 	dprintk("RPC:       %s: requested max: dtos: send %d recv %d; "
602 		"iovs: send %d recv %d\n",
603 		__func__,
604 		ep->rep_attr.cap.max_send_wr,
605 		ep->rep_attr.cap.max_recv_wr,
606 		ep->rep_attr.cap.max_send_sge,
607 		ep->rep_attr.cap.max_recv_sge);
608 
609 	/* set trigger for requesting send completion */
610 	ep->rep_cqinit = ep->rep_attr.cap.max_send_wr/2 - 1;
611 	if (ep->rep_cqinit <= 2)
612 		ep->rep_cqinit = 0;	/* always signal? */
613 	INIT_CQCOUNT(ep);
614 	init_waitqueue_head(&ep->rep_connect_wait);
615 	INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker);
616 
617 	cq_attr.cqe = ep->rep_attr.cap.max_send_wr + 1;
618 	sendcq = ib_create_cq(ia->ri_device, rpcrdma_sendcq_upcall,
619 			      rpcrdma_cq_async_error_upcall, NULL, &cq_attr);
620 	if (IS_ERR(sendcq)) {
621 		rc = PTR_ERR(sendcq);
622 		dprintk("RPC:       %s: failed to create send CQ: %i\n",
623 			__func__, rc);
624 		goto out1;
625 	}
626 
627 	rc = ib_req_notify_cq(sendcq, IB_CQ_NEXT_COMP);
628 	if (rc) {
629 		dprintk("RPC:       %s: ib_req_notify_cq failed: %i\n",
630 			__func__, rc);
631 		goto out2;
632 	}
633 
634 	cq_attr.cqe = ep->rep_attr.cap.max_recv_wr + 1;
635 	recvcq = ib_create_cq(ia->ri_device, rpcrdma_recvcq_upcall,
636 			      rpcrdma_cq_async_error_upcall, NULL, &cq_attr);
637 	if (IS_ERR(recvcq)) {
638 		rc = PTR_ERR(recvcq);
639 		dprintk("RPC:       %s: failed to create recv CQ: %i\n",
640 			__func__, rc);
641 		goto out2;
642 	}
643 
644 	rc = ib_req_notify_cq(recvcq, IB_CQ_NEXT_COMP);
645 	if (rc) {
646 		dprintk("RPC:       %s: ib_req_notify_cq failed: %i\n",
647 			__func__, rc);
648 		ib_destroy_cq(recvcq);
649 		goto out2;
650 	}
651 
652 	ep->rep_attr.send_cq = sendcq;
653 	ep->rep_attr.recv_cq = recvcq;
654 
655 	/* Initialize cma parameters */
656 
657 	/* RPC/RDMA does not use private data */
658 	ep->rep_remote_cma.private_data = NULL;
659 	ep->rep_remote_cma.private_data_len = 0;
660 
661 	/* Client offers RDMA Read but does not initiate */
662 	ep->rep_remote_cma.initiator_depth = 0;
663 	if (ia->ri_device->attrs.max_qp_rd_atom > 32)	/* arbitrary but <= 255 */
664 		ep->rep_remote_cma.responder_resources = 32;
665 	else
666 		ep->rep_remote_cma.responder_resources =
667 						ia->ri_device->attrs.max_qp_rd_atom;
668 
669 	ep->rep_remote_cma.retry_count = 7;
670 	ep->rep_remote_cma.flow_control = 0;
671 	ep->rep_remote_cma.rnr_retry_count = 0;
672 
673 	return 0;
674 
675 out2:
676 	err = ib_destroy_cq(sendcq);
677 	if (err)
678 		dprintk("RPC:       %s: ib_destroy_cq returned %i\n",
679 			__func__, err);
680 out1:
681 	if (ia->ri_dma_mr)
682 		ib_dereg_mr(ia->ri_dma_mr);
683 	return rc;
684 }
685 
686 /*
687  * rpcrdma_ep_destroy
688  *
689  * Disconnect and destroy endpoint. After this, the only
690  * valid operations on the ep are to free it (if dynamically
691  * allocated) or re-create it.
692  */
693 void
694 rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
695 {
696 	int rc;
697 
698 	dprintk("RPC:       %s: entering, connected is %d\n",
699 		__func__, ep->rep_connected);
700 
701 	cancel_delayed_work_sync(&ep->rep_connect_worker);
702 
703 	if (ia->ri_id->qp)
704 		rpcrdma_ep_disconnect(ep, ia);
705 
706 	rpcrdma_clean_cq(ep->rep_attr.recv_cq);
707 	rpcrdma_clean_cq(ep->rep_attr.send_cq);
708 
709 	if (ia->ri_id->qp) {
710 		rdma_destroy_qp(ia->ri_id);
711 		ia->ri_id->qp = NULL;
712 	}
713 
714 	rc = ib_destroy_cq(ep->rep_attr.recv_cq);
715 	if (rc)
716 		dprintk("RPC:       %s: ib_destroy_cq returned %i\n",
717 			__func__, rc);
718 
719 	rc = ib_destroy_cq(ep->rep_attr.send_cq);
720 	if (rc)
721 		dprintk("RPC:       %s: ib_destroy_cq returned %i\n",
722 			__func__, rc);
723 
724 	if (ia->ri_dma_mr) {
725 		rc = ib_dereg_mr(ia->ri_dma_mr);
726 		dprintk("RPC:       %s: ib_dereg_mr returned %i\n",
727 			__func__, rc);
728 	}
729 }
730 
731 /*
732  * Connect unconnected endpoint.
733  */
734 int
735 rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
736 {
737 	struct rdma_cm_id *id, *old;
738 	int rc = 0;
739 	int retry_count = 0;
740 
741 	if (ep->rep_connected != 0) {
742 		struct rpcrdma_xprt *xprt;
743 retry:
744 		dprintk("RPC:       %s: reconnecting...\n", __func__);
745 
746 		rpcrdma_ep_disconnect(ep, ia);
747 		rpcrdma_flush_cqs(ep);
748 
749 		xprt = container_of(ia, struct rpcrdma_xprt, rx_ia);
750 		id = rpcrdma_create_id(xprt, ia,
751 				(struct sockaddr *)&xprt->rx_data.addr);
752 		if (IS_ERR(id)) {
753 			rc = -EHOSTUNREACH;
754 			goto out;
755 		}
756 		/* TEMP TEMP TEMP - fail if new device:
757 		 * Deregister/remarshal *all* requests!
758 		 * Close and recreate adapter, pd, etc!
759 		 * Re-determine all attributes still sane!
760 		 * More stuff I haven't thought of!
761 		 * Rrrgh!
762 		 */
763 		if (ia->ri_device != id->device) {
764 			printk("RPC:       %s: can't reconnect on "
765 				"different device!\n", __func__);
766 			rpcrdma_destroy_id(id);
767 			rc = -ENETUNREACH;
768 			goto out;
769 		}
770 		/* END TEMP */
771 		rc = rdma_create_qp(id, ia->ri_pd, &ep->rep_attr);
772 		if (rc) {
773 			dprintk("RPC:       %s: rdma_create_qp failed %i\n",
774 				__func__, rc);
775 			rpcrdma_destroy_id(id);
776 			rc = -ENETUNREACH;
777 			goto out;
778 		}
779 
780 		write_lock(&ia->ri_qplock);
781 		old = ia->ri_id;
782 		ia->ri_id = id;
783 		write_unlock(&ia->ri_qplock);
784 
785 		rdma_destroy_qp(old);
786 		rpcrdma_destroy_id(old);
787 	} else {
788 		dprintk("RPC:       %s: connecting...\n", __func__);
789 		rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr);
790 		if (rc) {
791 			dprintk("RPC:       %s: rdma_create_qp failed %i\n",
792 				__func__, rc);
793 			/* do not update ep->rep_connected */
794 			return -ENETUNREACH;
795 		}
796 	}
797 
798 	ep->rep_connected = 0;
799 
800 	rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma);
801 	if (rc) {
802 		dprintk("RPC:       %s: rdma_connect() failed with %i\n",
803 				__func__, rc);
804 		goto out;
805 	}
806 
807 	wait_event_interruptible(ep->rep_connect_wait, ep->rep_connected != 0);
808 
809 	/*
810 	 * Check state. A non-peer reject indicates no listener
811 	 * (ECONNREFUSED), which may be a transient state. All
812 	 * others indicate a transport condition which has already
813 	 * undergone a best-effort.
814 	 */
815 	if (ep->rep_connected == -ECONNREFUSED &&
816 	    ++retry_count <= RDMA_CONNECT_RETRY_MAX) {
817 		dprintk("RPC:       %s: non-peer_reject, retry\n", __func__);
818 		goto retry;
819 	}
820 	if (ep->rep_connected <= 0) {
821 		/* Sometimes, the only way to reliably connect to remote
822 		 * CMs is to use same nonzero values for ORD and IRD. */
823 		if (retry_count++ <= RDMA_CONNECT_RETRY_MAX + 1 &&
824 		    (ep->rep_remote_cma.responder_resources == 0 ||
825 		     ep->rep_remote_cma.initiator_depth !=
826 				ep->rep_remote_cma.responder_resources)) {
827 			if (ep->rep_remote_cma.responder_resources == 0)
828 				ep->rep_remote_cma.responder_resources = 1;
829 			ep->rep_remote_cma.initiator_depth =
830 				ep->rep_remote_cma.responder_resources;
831 			goto retry;
832 		}
833 		rc = ep->rep_connected;
834 	} else {
835 		struct rpcrdma_xprt *r_xprt;
836 		unsigned int extras;
837 
838 		dprintk("RPC:       %s: connected\n", __func__);
839 
840 		r_xprt = container_of(ia, struct rpcrdma_xprt, rx_ia);
841 		extras = r_xprt->rx_buf.rb_bc_srv_max_requests;
842 
843 		if (extras) {
844 			rc = rpcrdma_ep_post_extra_recv(r_xprt, extras);
845 			if (rc) {
846 				pr_warn("%s: rpcrdma_ep_post_extra_recv: %i\n",
847 					__func__, rc);
848 				rc = 0;
849 			}
850 		}
851 	}
852 
853 out:
854 	if (rc)
855 		ep->rep_connected = rc;
856 	return rc;
857 }
858 
859 /*
860  * rpcrdma_ep_disconnect
861  *
862  * This is separate from destroy to facilitate the ability
863  * to reconnect without recreating the endpoint.
864  *
865  * This call is not reentrant, and must not be made in parallel
866  * on the same endpoint.
867  */
868 void
869 rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
870 {
871 	int rc;
872 
873 	rpcrdma_flush_cqs(ep);
874 	rc = rdma_disconnect(ia->ri_id);
875 	if (!rc) {
876 		/* returns without wait if not connected */
877 		wait_event_interruptible(ep->rep_connect_wait,
878 							ep->rep_connected != 1);
879 		dprintk("RPC:       %s: after wait, %sconnected\n", __func__,
880 			(ep->rep_connected == 1) ? "still " : "dis");
881 	} else {
882 		dprintk("RPC:       %s: rdma_disconnect %i\n", __func__, rc);
883 		ep->rep_connected = rc;
884 	}
885 }
886 
887 struct rpcrdma_req *
888 rpcrdma_create_req(struct rpcrdma_xprt *r_xprt)
889 {
890 	struct rpcrdma_buffer *buffer = &r_xprt->rx_buf;
891 	struct rpcrdma_req *req;
892 
893 	req = kzalloc(sizeof(*req), GFP_KERNEL);
894 	if (req == NULL)
895 		return ERR_PTR(-ENOMEM);
896 
897 	INIT_LIST_HEAD(&req->rl_free);
898 	spin_lock(&buffer->rb_reqslock);
899 	list_add(&req->rl_all, &buffer->rb_allreqs);
900 	spin_unlock(&buffer->rb_reqslock);
901 	req->rl_buffer = &r_xprt->rx_buf;
902 	return req;
903 }
904 
905 struct rpcrdma_rep *
906 rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt)
907 {
908 	struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
909 	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
910 	struct rpcrdma_rep *rep;
911 	int rc;
912 
913 	rc = -ENOMEM;
914 	rep = kzalloc(sizeof(*rep), GFP_KERNEL);
915 	if (rep == NULL)
916 		goto out;
917 
918 	rep->rr_rdmabuf = rpcrdma_alloc_regbuf(ia, cdata->inline_rsize,
919 					       GFP_KERNEL);
920 	if (IS_ERR(rep->rr_rdmabuf)) {
921 		rc = PTR_ERR(rep->rr_rdmabuf);
922 		goto out_free;
923 	}
924 
925 	rep->rr_device = ia->ri_device;
926 	rep->rr_rxprt = r_xprt;
927 	INIT_WORK(&rep->rr_work, rpcrdma_receive_worker);
928 	return rep;
929 
930 out_free:
931 	kfree(rep);
932 out:
933 	return ERR_PTR(rc);
934 }
935 
936 int
937 rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
938 {
939 	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
940 	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
941 	int i, rc;
942 
943 	buf->rb_max_requests = r_xprt->rx_data.max_requests;
944 	buf->rb_bc_srv_max_requests = 0;
945 	spin_lock_init(&buf->rb_lock);
946 
947 	rc = ia->ri_ops->ro_init(r_xprt);
948 	if (rc)
949 		goto out;
950 
951 	INIT_LIST_HEAD(&buf->rb_send_bufs);
952 	INIT_LIST_HEAD(&buf->rb_allreqs);
953 	spin_lock_init(&buf->rb_reqslock);
954 	for (i = 0; i < buf->rb_max_requests; i++) {
955 		struct rpcrdma_req *req;
956 
957 		req = rpcrdma_create_req(r_xprt);
958 		if (IS_ERR(req)) {
959 			dprintk("RPC:       %s: request buffer %d alloc"
960 				" failed\n", __func__, i);
961 			rc = PTR_ERR(req);
962 			goto out;
963 		}
964 		req->rl_backchannel = false;
965 		list_add(&req->rl_free, &buf->rb_send_bufs);
966 	}
967 
968 	INIT_LIST_HEAD(&buf->rb_recv_bufs);
969 	for (i = 0; i < buf->rb_max_requests + 2; i++) {
970 		struct rpcrdma_rep *rep;
971 
972 		rep = rpcrdma_create_rep(r_xprt);
973 		if (IS_ERR(rep)) {
974 			dprintk("RPC:       %s: reply buffer %d alloc failed\n",
975 				__func__, i);
976 			rc = PTR_ERR(rep);
977 			goto out;
978 		}
979 		list_add(&rep->rr_list, &buf->rb_recv_bufs);
980 	}
981 
982 	return 0;
983 out:
984 	rpcrdma_buffer_destroy(buf);
985 	return rc;
986 }
987 
988 static struct rpcrdma_req *
989 rpcrdma_buffer_get_req_locked(struct rpcrdma_buffer *buf)
990 {
991 	struct rpcrdma_req *req;
992 
993 	req = list_first_entry(&buf->rb_send_bufs,
994 			       struct rpcrdma_req, rl_free);
995 	list_del(&req->rl_free);
996 	return req;
997 }
998 
999 static struct rpcrdma_rep *
1000 rpcrdma_buffer_get_rep_locked(struct rpcrdma_buffer *buf)
1001 {
1002 	struct rpcrdma_rep *rep;
1003 
1004 	rep = list_first_entry(&buf->rb_recv_bufs,
1005 			       struct rpcrdma_rep, rr_list);
1006 	list_del(&rep->rr_list);
1007 	return rep;
1008 }
1009 
1010 static void
1011 rpcrdma_destroy_rep(struct rpcrdma_ia *ia, struct rpcrdma_rep *rep)
1012 {
1013 	rpcrdma_free_regbuf(ia, rep->rr_rdmabuf);
1014 	kfree(rep);
1015 }
1016 
1017 void
1018 rpcrdma_destroy_req(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
1019 {
1020 	rpcrdma_free_regbuf(ia, req->rl_sendbuf);
1021 	rpcrdma_free_regbuf(ia, req->rl_rdmabuf);
1022 	kfree(req);
1023 }
1024 
1025 void
1026 rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
1027 {
1028 	struct rpcrdma_ia *ia = rdmab_to_ia(buf);
1029 
1030 	while (!list_empty(&buf->rb_recv_bufs)) {
1031 		struct rpcrdma_rep *rep;
1032 
1033 		rep = rpcrdma_buffer_get_rep_locked(buf);
1034 		rpcrdma_destroy_rep(ia, rep);
1035 	}
1036 
1037 	spin_lock(&buf->rb_reqslock);
1038 	while (!list_empty(&buf->rb_allreqs)) {
1039 		struct rpcrdma_req *req;
1040 
1041 		req = list_first_entry(&buf->rb_allreqs,
1042 				       struct rpcrdma_req, rl_all);
1043 		list_del(&req->rl_all);
1044 
1045 		spin_unlock(&buf->rb_reqslock);
1046 		rpcrdma_destroy_req(ia, req);
1047 		spin_lock(&buf->rb_reqslock);
1048 	}
1049 	spin_unlock(&buf->rb_reqslock);
1050 
1051 	ia->ri_ops->ro_destroy(buf);
1052 }
1053 
1054 struct rpcrdma_mw *
1055 rpcrdma_get_mw(struct rpcrdma_xprt *r_xprt)
1056 {
1057 	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1058 	struct rpcrdma_mw *mw = NULL;
1059 
1060 	spin_lock(&buf->rb_mwlock);
1061 	if (!list_empty(&buf->rb_mws)) {
1062 		mw = list_first_entry(&buf->rb_mws,
1063 				      struct rpcrdma_mw, mw_list);
1064 		list_del_init(&mw->mw_list);
1065 	}
1066 	spin_unlock(&buf->rb_mwlock);
1067 
1068 	if (!mw)
1069 		pr_err("RPC:       %s: no MWs available\n", __func__);
1070 	return mw;
1071 }
1072 
1073 void
1074 rpcrdma_put_mw(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mw *mw)
1075 {
1076 	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1077 
1078 	spin_lock(&buf->rb_mwlock);
1079 	list_add_tail(&mw->mw_list, &buf->rb_mws);
1080 	spin_unlock(&buf->rb_mwlock);
1081 }
1082 
1083 /*
1084  * Get a set of request/reply buffers.
1085  *
1086  * Reply buffer (if available) is attached to send buffer upon return.
1087  */
1088 struct rpcrdma_req *
1089 rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
1090 {
1091 	struct rpcrdma_req *req;
1092 
1093 	spin_lock(&buffers->rb_lock);
1094 	if (list_empty(&buffers->rb_send_bufs))
1095 		goto out_reqbuf;
1096 	req = rpcrdma_buffer_get_req_locked(buffers);
1097 	if (list_empty(&buffers->rb_recv_bufs))
1098 		goto out_repbuf;
1099 	req->rl_reply = rpcrdma_buffer_get_rep_locked(buffers);
1100 	spin_unlock(&buffers->rb_lock);
1101 	return req;
1102 
1103 out_reqbuf:
1104 	spin_unlock(&buffers->rb_lock);
1105 	pr_warn("RPC:       %s: out of request buffers\n", __func__);
1106 	return NULL;
1107 out_repbuf:
1108 	spin_unlock(&buffers->rb_lock);
1109 	pr_warn("RPC:       %s: out of reply buffers\n", __func__);
1110 	req->rl_reply = NULL;
1111 	return req;
1112 }
1113 
1114 /*
1115  * Put request/reply buffers back into pool.
1116  * Pre-decrement counter/array index.
1117  */
1118 void
1119 rpcrdma_buffer_put(struct rpcrdma_req *req)
1120 {
1121 	struct rpcrdma_buffer *buffers = req->rl_buffer;
1122 	struct rpcrdma_rep *rep = req->rl_reply;
1123 
1124 	req->rl_niovs = 0;
1125 	req->rl_reply = NULL;
1126 
1127 	spin_lock(&buffers->rb_lock);
1128 	list_add_tail(&req->rl_free, &buffers->rb_send_bufs);
1129 	if (rep)
1130 		list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs);
1131 	spin_unlock(&buffers->rb_lock);
1132 }
1133 
1134 /*
1135  * Recover reply buffers from pool.
1136  * This happens when recovering from disconnect.
1137  */
1138 void
1139 rpcrdma_recv_buffer_get(struct rpcrdma_req *req)
1140 {
1141 	struct rpcrdma_buffer *buffers = req->rl_buffer;
1142 
1143 	spin_lock(&buffers->rb_lock);
1144 	if (!list_empty(&buffers->rb_recv_bufs))
1145 		req->rl_reply = rpcrdma_buffer_get_rep_locked(buffers);
1146 	spin_unlock(&buffers->rb_lock);
1147 }
1148 
1149 /*
1150  * Put reply buffers back into pool when not attached to
1151  * request. This happens in error conditions.
1152  */
1153 void
1154 rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
1155 {
1156 	struct rpcrdma_buffer *buffers = &rep->rr_rxprt->rx_buf;
1157 
1158 	spin_lock(&buffers->rb_lock);
1159 	list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs);
1160 	spin_unlock(&buffers->rb_lock);
1161 }
1162 
1163 /*
1164  * Wrappers for internal-use kmalloc memory registration, used by buffer code.
1165  */
1166 
1167 void
1168 rpcrdma_mapping_error(struct rpcrdma_mr_seg *seg)
1169 {
1170 	dprintk("RPC:       map_one: offset %p iova %llx len %zu\n",
1171 		seg->mr_offset,
1172 		(unsigned long long)seg->mr_dma, seg->mr_dmalen);
1173 }
1174 
1175 /**
1176  * rpcrdma_alloc_regbuf - kmalloc and register memory for SEND/RECV buffers
1177  * @ia: controlling rpcrdma_ia
1178  * @size: size of buffer to be allocated, in bytes
1179  * @flags: GFP flags
1180  *
1181  * Returns pointer to private header of an area of internally
1182  * registered memory, or an ERR_PTR. The registered buffer follows
1183  * the end of the private header.
1184  *
1185  * xprtrdma uses a regbuf for posting an outgoing RDMA SEND, or for
1186  * receiving the payload of RDMA RECV operations. regbufs are not
1187  * used for RDMA READ/WRITE operations, thus are registered only for
1188  * LOCAL access.
1189  */
1190 struct rpcrdma_regbuf *
1191 rpcrdma_alloc_regbuf(struct rpcrdma_ia *ia, size_t size, gfp_t flags)
1192 {
1193 	struct rpcrdma_regbuf *rb;
1194 	struct ib_sge *iov;
1195 
1196 	rb = kmalloc(sizeof(*rb) + size, flags);
1197 	if (rb == NULL)
1198 		goto out;
1199 
1200 	iov = &rb->rg_iov;
1201 	iov->addr = ib_dma_map_single(ia->ri_device,
1202 				      (void *)rb->rg_base, size,
1203 				      DMA_BIDIRECTIONAL);
1204 	if (ib_dma_mapping_error(ia->ri_device, iov->addr))
1205 		goto out_free;
1206 
1207 	iov->length = size;
1208 	iov->lkey = ia->ri_pd->local_dma_lkey;
1209 	rb->rg_size = size;
1210 	rb->rg_owner = NULL;
1211 	return rb;
1212 
1213 out_free:
1214 	kfree(rb);
1215 out:
1216 	return ERR_PTR(-ENOMEM);
1217 }
1218 
1219 /**
1220  * rpcrdma_free_regbuf - deregister and free registered buffer
1221  * @ia: controlling rpcrdma_ia
1222  * @rb: regbuf to be deregistered and freed
1223  */
1224 void
1225 rpcrdma_free_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb)
1226 {
1227 	struct ib_sge *iov;
1228 
1229 	if (!rb)
1230 		return;
1231 
1232 	iov = &rb->rg_iov;
1233 	ib_dma_unmap_single(ia->ri_device,
1234 			    iov->addr, iov->length, DMA_BIDIRECTIONAL);
1235 	kfree(rb);
1236 }
1237 
1238 /*
1239  * Prepost any receive buffer, then post send.
1240  *
1241  * Receive buffer is donated to hardware, reclaimed upon recv completion.
1242  */
1243 int
1244 rpcrdma_ep_post(struct rpcrdma_ia *ia,
1245 		struct rpcrdma_ep *ep,
1246 		struct rpcrdma_req *req)
1247 {
1248 	struct ib_device *device = ia->ri_device;
1249 	struct ib_send_wr send_wr, *send_wr_fail;
1250 	struct rpcrdma_rep *rep = req->rl_reply;
1251 	struct ib_sge *iov = req->rl_send_iov;
1252 	int i, rc;
1253 
1254 	if (rep) {
1255 		rc = rpcrdma_ep_post_recv(ia, ep, rep);
1256 		if (rc)
1257 			goto out;
1258 		req->rl_reply = NULL;
1259 	}
1260 
1261 	send_wr.next = NULL;
1262 	send_wr.wr_id = RPCRDMA_IGNORE_COMPLETION;
1263 	send_wr.sg_list = iov;
1264 	send_wr.num_sge = req->rl_niovs;
1265 	send_wr.opcode = IB_WR_SEND;
1266 
1267 	for (i = 0; i < send_wr.num_sge; i++)
1268 		ib_dma_sync_single_for_device(device, iov[i].addr,
1269 					      iov[i].length, DMA_TO_DEVICE);
1270 	dprintk("RPC:       %s: posting %d s/g entries\n",
1271 		__func__, send_wr.num_sge);
1272 
1273 	if (DECR_CQCOUNT(ep) > 0)
1274 		send_wr.send_flags = 0;
1275 	else { /* Provider must take a send completion every now and then */
1276 		INIT_CQCOUNT(ep);
1277 		send_wr.send_flags = IB_SEND_SIGNALED;
1278 	}
1279 
1280 	rc = ib_post_send(ia->ri_id->qp, &send_wr, &send_wr_fail);
1281 	if (rc)
1282 		dprintk("RPC:       %s: ib_post_send returned %i\n", __func__,
1283 			rc);
1284 out:
1285 	return rc;
1286 }
1287 
1288 /*
1289  * (Re)post a receive buffer.
1290  */
1291 int
1292 rpcrdma_ep_post_recv(struct rpcrdma_ia *ia,
1293 		     struct rpcrdma_ep *ep,
1294 		     struct rpcrdma_rep *rep)
1295 {
1296 	struct ib_recv_wr recv_wr, *recv_wr_fail;
1297 	int rc;
1298 
1299 	recv_wr.next = NULL;
1300 	recv_wr.wr_id = (u64) (unsigned long) rep;
1301 	recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
1302 	recv_wr.num_sge = 1;
1303 
1304 	ib_dma_sync_single_for_cpu(ia->ri_device,
1305 				   rdmab_addr(rep->rr_rdmabuf),
1306 				   rdmab_length(rep->rr_rdmabuf),
1307 				   DMA_BIDIRECTIONAL);
1308 
1309 	rc = ib_post_recv(ia->ri_id->qp, &recv_wr, &recv_wr_fail);
1310 
1311 	if (rc)
1312 		dprintk("RPC:       %s: ib_post_recv returned %i\n", __func__,
1313 			rc);
1314 	return rc;
1315 }
1316 
1317 /**
1318  * rpcrdma_ep_post_extra_recv - Post buffers for incoming backchannel requests
1319  * @r_xprt: transport associated with these backchannel resources
1320  * @min_reqs: minimum number of incoming requests expected
1321  *
1322  * Returns zero if all requested buffers were posted, or a negative errno.
1323  */
1324 int
1325 rpcrdma_ep_post_extra_recv(struct rpcrdma_xprt *r_xprt, unsigned int count)
1326 {
1327 	struct rpcrdma_buffer *buffers = &r_xprt->rx_buf;
1328 	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
1329 	struct rpcrdma_ep *ep = &r_xprt->rx_ep;
1330 	struct rpcrdma_rep *rep;
1331 	int rc;
1332 
1333 	while (count--) {
1334 		spin_lock(&buffers->rb_lock);
1335 		if (list_empty(&buffers->rb_recv_bufs))
1336 			goto out_reqbuf;
1337 		rep = rpcrdma_buffer_get_rep_locked(buffers);
1338 		spin_unlock(&buffers->rb_lock);
1339 
1340 		rc = rpcrdma_ep_post_recv(ia, ep, rep);
1341 		if (rc)
1342 			goto out_rc;
1343 	}
1344 
1345 	return 0;
1346 
1347 out_reqbuf:
1348 	spin_unlock(&buffers->rb_lock);
1349 	pr_warn("%s: no extra receive buffers\n", __func__);
1350 	return -ENOMEM;
1351 
1352 out_rc:
1353 	rpcrdma_recv_buffer_put(rep);
1354 	return rc;
1355 }
1356 
1357 /* How many chunk list items fit within our inline buffers?
1358  */
1359 unsigned int
1360 rpcrdma_max_segments(struct rpcrdma_xprt *r_xprt)
1361 {
1362 	struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
1363 	int bytes, segments;
1364 
1365 	bytes = min_t(unsigned int, cdata->inline_wsize, cdata->inline_rsize);
1366 	bytes -= RPCRDMA_HDRLEN_MIN;
1367 	if (bytes < sizeof(struct rpcrdma_segment) * 2) {
1368 		pr_warn("RPC:       %s: inline threshold too small\n",
1369 			__func__);
1370 		return 0;
1371 	}
1372 
1373 	segments = 1 << (fls(bytes / sizeof(struct rpcrdma_segment)) - 1);
1374 	dprintk("RPC:       %s: max chunk list size = %d segments\n",
1375 		__func__, segments);
1376 	return segments;
1377 }
1378