Lines Matching +full:no +full:- +full:memory +full:- +full:wc

1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (c) 2014-2017 Oracle. All rights reserved.
4 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
9 * COPYING in the main directory of this source tree, or the BSD-type
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
49 * o buffer memory
91 struct rpcrdma_ep *ep = r_xprt->rx_ep; in rpcrdma_xprt_drain()
92 struct rdma_cm_id *id = ep->re_id; in rpcrdma_xprt_drain()
97 if (atomic_inc_return(&ep->re_receiving) > 1) in rpcrdma_xprt_drain()
98 wait_for_completion(&ep->re_done); in rpcrdma_xprt_drain()
103 ib_drain_rq(id->qp); in rpcrdma_xprt_drain()
108 ib_drain_sq(id->qp); in rpcrdma_xprt_drain()
119 if (atomic_add_unless(&ep->re_force_disconnect, 1, 1)) in rpcrdma_force_disconnect()
120 xprt_force_disconnect(ep->re_xprt); in rpcrdma_force_disconnect()
124 * rpcrdma_flush_disconnect - Disconnect on flushed completion
126 * @wc: work completion entry
130 void rpcrdma_flush_disconnect(struct rpcrdma_xprt *r_xprt, struct ib_wc *wc) in rpcrdma_flush_disconnect() argument
132 if (wc->status != IB_WC_SUCCESS) in rpcrdma_flush_disconnect()
133 rpcrdma_force_disconnect(r_xprt->rx_ep); in rpcrdma_flush_disconnect()
137 * rpcrdma_wc_send - Invoked by RDMA provider for each polled Send WC
139 * @wc: WCE for a completed Send WR
142 static void rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc) in rpcrdma_wc_send() argument
144 struct ib_cqe *cqe = wc->wr_cqe; in rpcrdma_wc_send()
147 struct rpcrdma_xprt *r_xprt = cq->cq_context; in rpcrdma_wc_send()
150 trace_xprtrdma_wc_send(wc, &sc->sc_cid); in rpcrdma_wc_send()
152 rpcrdma_flush_disconnect(r_xprt, wc); in rpcrdma_wc_send()
156 * rpcrdma_wc_receive - Invoked by RDMA provider for each polled Receive WC
158 * @wc: WCE for a completed Receive WR
161 static void rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) in rpcrdma_wc_receive() argument
163 struct ib_cqe *cqe = wc->wr_cqe; in rpcrdma_wc_receive()
166 struct rpcrdma_xprt *r_xprt = cq->cq_context; in rpcrdma_wc_receive()
169 trace_xprtrdma_wc_receive(wc, &rep->rr_cid); in rpcrdma_wc_receive()
170 --r_xprt->rx_ep->re_receive_count; in rpcrdma_wc_receive()
171 if (wc->status != IB_WC_SUCCESS) in rpcrdma_wc_receive()
174 /* status == SUCCESS means all fields in wc are trustworthy */ in rpcrdma_wc_receive()
175 rpcrdma_set_xdrlen(&rep->rr_hdrbuf, wc->byte_len); in rpcrdma_wc_receive()
176 rep->rr_wc_flags = wc->wc_flags; in rpcrdma_wc_receive()
177 rep->rr_inv_rkey = wc->ex.invalidate_rkey; in rpcrdma_wc_receive()
179 ib_dma_sync_single_for_cpu(rdmab_device(rep->rr_rdmabuf), in rpcrdma_wc_receive()
180 rdmab_addr(rep->rr_rdmabuf), in rpcrdma_wc_receive()
181 wc->byte_len, DMA_FROM_DEVICE); in rpcrdma_wc_receive()
187 rpcrdma_flush_disconnect(r_xprt, wc); in rpcrdma_wc_receive()
188 rpcrdma_rep_put(&r_xprt->rx_buf, rep); in rpcrdma_wc_receive()
194 const struct rpcrdma_connect_private *pmsg = param->private_data; in rpcrdma_update_cm_private()
197 /* Default settings for RPC-over-RDMA Version One */ in rpcrdma_update_cm_private()
202 pmsg->cp_magic == rpcrdma_cmp_magic && in rpcrdma_update_cm_private()
203 pmsg->cp_version == RPCRDMA_CMP_VERSION) { in rpcrdma_update_cm_private()
204 rsize = rpcrdma_decode_buffer_size(pmsg->cp_send_size); in rpcrdma_update_cm_private()
205 wsize = rpcrdma_decode_buffer_size(pmsg->cp_recv_size); in rpcrdma_update_cm_private()
208 if (rsize < ep->re_inline_recv) in rpcrdma_update_cm_private()
209 ep->re_inline_recv = rsize; in rpcrdma_update_cm_private()
210 if (wsize < ep->re_inline_send) in rpcrdma_update_cm_private()
211 ep->re_inline_send = wsize; in rpcrdma_update_cm_private()
217 * rpcrdma_cm_event_handler - Handle RDMA CM events
227 struct rpcrdma_ep *ep = id->context; in rpcrdma_cm_event_handler()
231 switch (event->event) { in rpcrdma_cm_event_handler()
234 ep->re_async_rc = 0; in rpcrdma_cm_event_handler()
235 complete(&ep->re_done); in rpcrdma_cm_event_handler()
238 ep->re_async_rc = -EPROTO; in rpcrdma_cm_event_handler()
239 complete(&ep->re_done); in rpcrdma_cm_event_handler()
242 ep->re_async_rc = -ENETUNREACH; in rpcrdma_cm_event_handler()
243 complete(&ep->re_done); in rpcrdma_cm_event_handler()
246 ep->re_connect_status = -ENODEV; in rpcrdma_cm_event_handler()
250 ep->re_connect_status = 1; in rpcrdma_cm_event_handler()
251 rpcrdma_update_cm_private(ep, &event->param.conn); in rpcrdma_cm_event_handler()
253 wake_up_all(&ep->re_connect_wait); in rpcrdma_cm_event_handler()
256 ep->re_connect_status = -ENOTCONN; in rpcrdma_cm_event_handler()
259 ep->re_connect_status = -ENETUNREACH; in rpcrdma_cm_event_handler()
262 ep->re_connect_status = -ECONNREFUSED; in rpcrdma_cm_event_handler()
263 if (event->status == IB_CM_REJ_STALE_CONN) in rpcrdma_cm_event_handler()
264 ep->re_connect_status = -ENOTCONN; in rpcrdma_cm_event_handler()
266 wake_up_all(&ep->re_connect_wait); in rpcrdma_cm_event_handler()
269 ep->re_connect_status = -ECONNABORTED; in rpcrdma_cm_event_handler()
284 trace_xprtrdma_device_removal(ep->re_id); in rpcrdma_ep_removal_done()
285 xprt_force_disconnect(ep->re_xprt); in rpcrdma_ep_removal_done()
292 struct rpc_xprt *xprt = &r_xprt->rx_xprt; in rpcrdma_create_id()
296 init_completion(&ep->re_done); in rpcrdma_create_id()
298 id = rdma_create_id(xprt->xprt_net, rpcrdma_cm_event_handler, ep, in rpcrdma_create_id()
303 ep->re_async_rc = -ETIMEDOUT; in rpcrdma_create_id()
304 rc = rdma_resolve_addr(id, NULL, (struct sockaddr *)&xprt->addr, in rpcrdma_create_id()
308 rc = wait_for_completion_interruptible_timeout(&ep->re_done, wtimeout); in rpcrdma_create_id()
312 rc = ep->re_async_rc; in rpcrdma_create_id()
316 ep->re_async_rc = -ETIMEDOUT; in rpcrdma_create_id()
320 rc = wait_for_completion_interruptible_timeout(&ep->re_done, wtimeout); in rpcrdma_create_id()
323 rc = ep->re_async_rc; in rpcrdma_create_id()
327 rc = rpcrdma_rn_register(id->device, &ep->re_rn, rpcrdma_ep_removal_done); in rpcrdma_create_id()
342 if (ep->re_id->qp) { in rpcrdma_ep_destroy()
343 rdma_destroy_qp(ep->re_id); in rpcrdma_ep_destroy()
344 ep->re_id->qp = NULL; in rpcrdma_ep_destroy()
347 if (ep->re_attr.recv_cq) in rpcrdma_ep_destroy()
348 ib_free_cq(ep->re_attr.recv_cq); in rpcrdma_ep_destroy()
349 ep->re_attr.recv_cq = NULL; in rpcrdma_ep_destroy()
350 if (ep->re_attr.send_cq) in rpcrdma_ep_destroy()
351 ib_free_cq(ep->re_attr.send_cq); in rpcrdma_ep_destroy()
352 ep->re_attr.send_cq = NULL; in rpcrdma_ep_destroy()
354 if (ep->re_pd) in rpcrdma_ep_destroy()
355 ib_dealloc_pd(ep->re_pd); in rpcrdma_ep_destroy()
356 ep->re_pd = NULL; in rpcrdma_ep_destroy()
358 rpcrdma_rn_unregister(ep->re_id->device, &ep->re_rn); in rpcrdma_ep_destroy()
366 kref_get(&ep->re_kref); in rpcrdma_ep_get()
375 return kref_put(&ep->re_kref, rpcrdma_ep_destroy); in rpcrdma_ep_put()
388 return -ENOTCONN; in rpcrdma_ep_create()
389 ep->re_xprt = &r_xprt->rx_xprt; in rpcrdma_ep_create()
390 kref_init(&ep->re_kref); in rpcrdma_ep_create()
398 device = id->device; in rpcrdma_ep_create()
399 ep->re_id = id; in rpcrdma_ep_create()
400 reinit_completion(&ep->re_done); in rpcrdma_ep_create()
402 ep->re_max_requests = r_xprt->rx_xprt.max_reqs; in rpcrdma_ep_create()
403 ep->re_inline_send = xprt_rdma_max_inline_write; in rpcrdma_ep_create()
404 ep->re_inline_recv = xprt_rdma_max_inline_read; in rpcrdma_ep_create()
409 r_xprt->rx_buf.rb_max_requests = cpu_to_be32(ep->re_max_requests); in rpcrdma_ep_create()
411 ep->re_attr.srq = NULL; in rpcrdma_ep_create()
412 ep->re_attr.cap.max_inline_data = 0; in rpcrdma_ep_create()
413 ep->re_attr.sq_sig_type = IB_SIGNAL_REQ_WR; in rpcrdma_ep_create()
414 ep->re_attr.qp_type = IB_QPT_RC; in rpcrdma_ep_create()
415 ep->re_attr.port_num = ~0; in rpcrdma_ep_create()
417 ep->re_send_batch = ep->re_max_requests >> 3; in rpcrdma_ep_create()
418 ep->re_send_count = ep->re_send_batch; in rpcrdma_ep_create()
419 init_waitqueue_head(&ep->re_connect_wait); in rpcrdma_ep_create()
421 ep->re_attr.send_cq = ib_alloc_cq_any(device, r_xprt, in rpcrdma_ep_create()
422 ep->re_attr.cap.max_send_wr, in rpcrdma_ep_create()
424 if (IS_ERR(ep->re_attr.send_cq)) { in rpcrdma_ep_create()
425 rc = PTR_ERR(ep->re_attr.send_cq); in rpcrdma_ep_create()
426 ep->re_attr.send_cq = NULL; in rpcrdma_ep_create()
430 ep->re_attr.recv_cq = ib_alloc_cq_any(device, r_xprt, in rpcrdma_ep_create()
431 ep->re_attr.cap.max_recv_wr, in rpcrdma_ep_create()
433 if (IS_ERR(ep->re_attr.recv_cq)) { in rpcrdma_ep_create()
434 rc = PTR_ERR(ep->re_attr.recv_cq); in rpcrdma_ep_create()
435 ep->re_attr.recv_cq = NULL; in rpcrdma_ep_create()
438 ep->re_receive_count = 0; in rpcrdma_ep_create()
441 memset(&ep->re_remote_cma, 0, sizeof(ep->re_remote_cma)); in rpcrdma_ep_create()
443 /* Prepare RDMA-CM private message */ in rpcrdma_ep_create()
444 pmsg = &ep->re_cm_private; in rpcrdma_ep_create()
445 pmsg->cp_magic = rpcrdma_cmp_magic; in rpcrdma_ep_create()
446 pmsg->cp_version = RPCRDMA_CMP_VERSION; in rpcrdma_ep_create()
447 pmsg->cp_flags |= RPCRDMA_CMP_F_SND_W_INV_OK; in rpcrdma_ep_create()
448 pmsg->cp_send_size = rpcrdma_encode_buffer_size(ep->re_inline_send); in rpcrdma_ep_create()
449 pmsg->cp_recv_size = rpcrdma_encode_buffer_size(ep->re_inline_recv); in rpcrdma_ep_create()
450 ep->re_remote_cma.private_data = pmsg; in rpcrdma_ep_create()
451 ep->re_remote_cma.private_data_len = sizeof(*pmsg); in rpcrdma_ep_create()
454 ep->re_remote_cma.initiator_depth = 0; in rpcrdma_ep_create()
455 ep->re_remote_cma.responder_resources = in rpcrdma_ep_create()
456 min_t(int, U8_MAX, device->attrs.max_qp_rd_atom); in rpcrdma_ep_create()
459 * GID changes quickly. RPC layer handles re-establishing in rpcrdma_ep_create()
462 ep->re_remote_cma.retry_count = 6; in rpcrdma_ep_create()
464 /* RPC-over-RDMA handles its own flow control. In addition, in rpcrdma_ep_create()
465 * make all RNR NAKs visible so we know that RPC-over-RDMA in rpcrdma_ep_create()
466 * flow control is working correctly (no NAKs should be seen). in rpcrdma_ep_create()
468 ep->re_remote_cma.flow_control = 0; in rpcrdma_ep_create()
469 ep->re_remote_cma.rnr_retry_count = 0; in rpcrdma_ep_create()
471 ep->re_pd = ib_alloc_pd(device, 0); in rpcrdma_ep_create()
472 if (IS_ERR(ep->re_pd)) { in rpcrdma_ep_create()
473 rc = PTR_ERR(ep->re_pd); in rpcrdma_ep_create()
474 ep->re_pd = NULL; in rpcrdma_ep_create()
478 rc = rdma_create_qp(id, ep->re_pd, &ep->re_attr); in rpcrdma_ep_create()
482 r_xprt->rx_ep = ep; in rpcrdma_ep_create()
492 * rpcrdma_xprt_connect - Connect an unconnected transport
499 struct rpc_xprt *xprt = &r_xprt->rx_xprt; in rpcrdma_xprt_connect()
506 ep = r_xprt->rx_ep; in rpcrdma_xprt_connect()
517 rc = rdma_connect(ep->re_id, &ep->re_remote_cma); in rpcrdma_xprt_connect()
521 if (xprt->reestablish_timeout < RPCRDMA_INIT_REEST_TO) in rpcrdma_xprt_connect()
522 xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO; in rpcrdma_xprt_connect()
523 wait_event_interruptible(ep->re_connect_wait, in rpcrdma_xprt_connect()
524 ep->re_connect_status != 0); in rpcrdma_xprt_connect()
525 if (ep->re_connect_status <= 0) { in rpcrdma_xprt_connect()
526 rc = ep->re_connect_status; in rpcrdma_xprt_connect()
532 rc = -ENOTCONN; in rpcrdma_xprt_connect()
538 rc = -ENOTCONN; in rpcrdma_xprt_connect()
550 * rpcrdma_xprt_disconnect - Disconnect underlying transport
557 * resources and prepared for the next ->connect operation.
561 struct rpcrdma_ep *ep = r_xprt->rx_ep; in rpcrdma_xprt_disconnect()
568 id = ep->re_id; in rpcrdma_xprt_disconnect()
581 r_xprt->rx_ep = NULL; in rpcrdma_xprt_disconnect()
584 /* Fixed-size circular FIFO queue. This implementation is wait-free and
585 * lock-free.
590 * ->send_request call at a time.
603 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_sendctxs_destroy()
606 if (!buf->rb_sc_ctxs) in rpcrdma_sendctxs_destroy()
608 for (i = 0; i <= buf->rb_sc_last; i++) in rpcrdma_sendctxs_destroy()
609 kfree(buf->rb_sc_ctxs[i]); in rpcrdma_sendctxs_destroy()
610 kfree(buf->rb_sc_ctxs); in rpcrdma_sendctxs_destroy()
611 buf->rb_sc_ctxs = NULL; in rpcrdma_sendctxs_destroy()
618 sc = kzalloc(struct_size(sc, sc_sges, ep->re_attr.cap.max_send_sge), in rpcrdma_sendctx_create()
623 sc->sc_cqe.done = rpcrdma_wc_send; in rpcrdma_sendctx_create()
624 sc->sc_cid.ci_queue_id = ep->re_attr.send_cq->res.id; in rpcrdma_sendctx_create()
625 sc->sc_cid.ci_completion_id = in rpcrdma_sendctx_create()
626 atomic_inc_return(&ep->re_completion_ids); in rpcrdma_sendctx_create()
632 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_sendctxs_create()
638 * the ->send_request call to fail temporarily before too many in rpcrdma_sendctxs_create()
641 i = r_xprt->rx_ep->re_max_requests + RPCRDMA_MAX_BC_REQUESTS; in rpcrdma_sendctxs_create()
642 buf->rb_sc_ctxs = kcalloc(i, sizeof(sc), XPRTRDMA_GFP_FLAGS); in rpcrdma_sendctxs_create()
643 if (!buf->rb_sc_ctxs) in rpcrdma_sendctxs_create()
644 return -ENOMEM; in rpcrdma_sendctxs_create()
646 buf->rb_sc_last = i - 1; in rpcrdma_sendctxs_create()
647 for (i = 0; i <= buf->rb_sc_last; i++) { in rpcrdma_sendctxs_create()
648 sc = rpcrdma_sendctx_create(r_xprt->rx_ep); in rpcrdma_sendctxs_create()
650 return -ENOMEM; in rpcrdma_sendctxs_create()
652 buf->rb_sc_ctxs[i] = sc; in rpcrdma_sendctxs_create()
655 buf->rb_sc_head = 0; in rpcrdma_sendctxs_create()
656 buf->rb_sc_tail = 0; in rpcrdma_sendctxs_create()
667 return likely(item < buf->rb_sc_last) ? item + 1 : 0; in rpcrdma_sendctx_next()
671 * rpcrdma_sendctx_get_locked - Acquire a send context
680 * provides an effective memory barrier that flushes the new value
685 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_sendctx_get_locked()
689 next_head = rpcrdma_sendctx_next(buf, buf->rb_sc_head); in rpcrdma_sendctx_get_locked()
691 if (next_head == READ_ONCE(buf->rb_sc_tail)) in rpcrdma_sendctx_get_locked()
695 sc = buf->rb_sc_ctxs[next_head]; in rpcrdma_sendctx_get_locked()
697 /* Releasing the lock in the caller acts as a memory in rpcrdma_sendctx_get_locked()
700 buf->rb_sc_head = next_head; in rpcrdma_sendctx_get_locked()
709 xprt_wait_for_buffer_space(&r_xprt->rx_xprt); in rpcrdma_sendctx_get_locked()
710 r_xprt->rx_stats.empty_sendctx_q++; in rpcrdma_sendctx_get_locked()
715 * rpcrdma_sendctx_put_locked - Release a send context
727 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_sendctx_put_locked()
733 next_tail = buf->rb_sc_tail; in rpcrdma_sendctx_put_locked()
738 rpcrdma_sendctx_unmap(buf->rb_sc_ctxs[next_tail]); in rpcrdma_sendctx_put_locked()
740 } while (buf->rb_sc_ctxs[next_tail] != sc); in rpcrdma_sendctx_put_locked()
743 smp_store_release(&buf->rb_sc_tail, next_tail); in rpcrdma_sendctx_put_locked()
745 xprt_write_space(&r_xprt->rx_xprt); in rpcrdma_sendctx_put_locked()
751 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_mrs_create()
752 struct rpcrdma_ep *ep = r_xprt->rx_ep; in rpcrdma_mrs_create()
753 struct ib_device *device = ep->re_id->device; in rpcrdma_mrs_create()
756 /* Try to allocate enough to perform one full-sized I/O */ in rpcrdma_mrs_create()
757 for (count = 0; count < ep->re_max_rdma_segs; count++) { in rpcrdma_mrs_create()
772 spin_lock(&buf->rb_lock); in rpcrdma_mrs_create()
773 rpcrdma_mr_push(mr, &buf->rb_mrs); in rpcrdma_mrs_create()
774 list_add(&mr->mr_all, &buf->rb_all_mrs); in rpcrdma_mrs_create()
775 spin_unlock(&buf->rb_lock); in rpcrdma_mrs_create()
778 r_xprt->rx_stats.mrs_allocated += count; in rpcrdma_mrs_create()
791 xprt_write_space(&r_xprt->rx_xprt); in rpcrdma_mr_refresh_worker()
795 * rpcrdma_mrs_refresh - Wake the MR refresh worker
801 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_mrs_refresh()
802 struct rpcrdma_ep *ep = r_xprt->rx_ep; in rpcrdma_mrs_refresh()
804 /* If there is no underlying connection, it's no use in rpcrdma_mrs_refresh()
807 if (ep->re_connect_status != 1) in rpcrdma_mrs_refresh()
809 queue_work(system_highpri_wq, &buf->rb_refresh_worker); in rpcrdma_mrs_refresh()
813 * rpcrdma_req_create - Allocate an rpcrdma_req object
822 struct rpcrdma_buffer *buffer = &r_xprt->rx_buf; in rpcrdma_req_create()
829 req->rl_sendbuf = rpcrdma_regbuf_alloc(size, DMA_TO_DEVICE); in rpcrdma_req_create()
830 if (!req->rl_sendbuf) in rpcrdma_req_create()
833 req->rl_recvbuf = rpcrdma_regbuf_alloc(size, DMA_NONE); in rpcrdma_req_create()
834 if (!req->rl_recvbuf) in rpcrdma_req_create()
837 INIT_LIST_HEAD(&req->rl_free_mrs); in rpcrdma_req_create()
838 INIT_LIST_HEAD(&req->rl_registered); in rpcrdma_req_create()
839 spin_lock(&buffer->rb_lock); in rpcrdma_req_create()
840 list_add(&req->rl_all, &buffer->rb_allreqs); in rpcrdma_req_create()
841 spin_unlock(&buffer->rb_lock); in rpcrdma_req_create()
845 rpcrdma_regbuf_free(req->rl_sendbuf); in rpcrdma_req_create()
853 * rpcrdma_req_setup - Per-connection instance setup of an rpcrdma_req object
866 r_xprt->rx_ep->re_max_rdma_segs * rpcrdma_readchunk_maxsz; in rpcrdma_req_setup()
876 req->rl_rdmabuf = rb; in rpcrdma_req_setup()
877 xdr_buf_init(&req->rl_hdrbuf, rdmab_data(rb), rdmab_length(rb)); in rpcrdma_req_setup()
883 return -ENOMEM; in rpcrdma_req_setup()
893 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_reqs_setup()
897 list_for_each_entry(req, &buf->rb_allreqs, rl_all) { in rpcrdma_reqs_setup()
910 req->rl_slot.rq_cong = 0; in rpcrdma_req_reset()
912 rpcrdma_regbuf_free(req->rl_rdmabuf); in rpcrdma_req_reset()
913 req->rl_rdmabuf = NULL; in rpcrdma_req_reset()
915 rpcrdma_regbuf_dma_unmap(req->rl_sendbuf); in rpcrdma_req_reset()
916 rpcrdma_regbuf_dma_unmap(req->rl_recvbuf); in rpcrdma_req_reset()
919 * req->rl_registered list unless a successful completion in rpcrdma_req_reset()
920 * has occurred, so they cannot be re-used. in rpcrdma_req_reset()
922 while ((mr = rpcrdma_mr_pop(&req->rl_registered))) { in rpcrdma_req_reset()
923 struct rpcrdma_buffer *buf = &mr->mr_xprt->rx_buf; in rpcrdma_req_reset()
925 spin_lock(&buf->rb_lock); in rpcrdma_req_reset()
926 list_del(&mr->mr_all); in rpcrdma_req_reset()
927 spin_unlock(&buf->rb_lock); in rpcrdma_req_reset()
940 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_reqs_reset()
943 list_for_each_entry(req, &buf->rb_allreqs, rl_all) in rpcrdma_reqs_reset()
950 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_rep_create()
951 struct rpcrdma_ep *ep = r_xprt->rx_ep; in rpcrdma_rep_create()
952 struct ib_device *device = ep->re_id->device; in rpcrdma_rep_create()
959 rep->rr_rdmabuf = rpcrdma_regbuf_alloc_node(ep->re_inline_recv, in rpcrdma_rep_create()
962 if (!rep->rr_rdmabuf) in rpcrdma_rep_create()
965 rep->rr_cid.ci_completion_id = in rpcrdma_rep_create()
966 atomic_inc_return(&r_xprt->rx_ep->re_completion_ids); in rpcrdma_rep_create()
968 xdr_buf_init(&rep->rr_hdrbuf, rdmab_data(rep->rr_rdmabuf), in rpcrdma_rep_create()
969 rdmab_length(rep->rr_rdmabuf)); in rpcrdma_rep_create()
970 rep->rr_cqe.done = rpcrdma_wc_receive; in rpcrdma_rep_create()
971 rep->rr_rxprt = r_xprt; in rpcrdma_rep_create()
972 rep->rr_recv_wr.next = NULL; in rpcrdma_rep_create()
973 rep->rr_recv_wr.wr_cqe = &rep->rr_cqe; in rpcrdma_rep_create()
974 rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov; in rpcrdma_rep_create()
975 rep->rr_recv_wr.num_sge = 1; in rpcrdma_rep_create()
977 spin_lock(&buf->rb_lock); in rpcrdma_rep_create()
978 list_add(&rep->rr_all, &buf->rb_all_reps); in rpcrdma_rep_create()
979 spin_unlock(&buf->rb_lock); in rpcrdma_rep_create()
990 rpcrdma_regbuf_free(rep->rr_rdmabuf); in rpcrdma_rep_free()
999 node = llist_del_first(&buf->rb_free_reps); in rpcrdma_rep_get_locked()
1006 * rpcrdma_rep_put - Release rpcrdma_rep back to free list
1013 llist_add(&rep->rr_node, &buf->rb_free_reps); in rpcrdma_rep_put()
1022 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_reps_unmap()
1025 list_for_each_entry(rep, &buf->rb_all_reps, rr_all) in rpcrdma_reps_unmap()
1026 rpcrdma_regbuf_dma_unmap(rep->rr_rdmabuf); in rpcrdma_reps_unmap()
1033 spin_lock(&buf->rb_lock); in rpcrdma_reps_destroy()
1034 while ((rep = list_first_entry_or_null(&buf->rb_all_reps, in rpcrdma_reps_destroy()
1037 list_del(&rep->rr_all); in rpcrdma_reps_destroy()
1038 spin_unlock(&buf->rb_lock); in rpcrdma_reps_destroy()
1042 spin_lock(&buf->rb_lock); in rpcrdma_reps_destroy()
1044 spin_unlock(&buf->rb_lock); in rpcrdma_reps_destroy()
1048 * rpcrdma_buffer_create - Create initial set of req/rep objects
1055 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_buffer_create()
1058 buf->rb_bc_srv_max_requests = 0; in rpcrdma_buffer_create()
1059 spin_lock_init(&buf->rb_lock); in rpcrdma_buffer_create()
1060 INIT_LIST_HEAD(&buf->rb_mrs); in rpcrdma_buffer_create()
1061 INIT_LIST_HEAD(&buf->rb_all_mrs); in rpcrdma_buffer_create()
1062 INIT_WORK(&buf->rb_refresh_worker, rpcrdma_mr_refresh_worker); in rpcrdma_buffer_create()
1064 INIT_LIST_HEAD(&buf->rb_send_bufs); in rpcrdma_buffer_create()
1065 INIT_LIST_HEAD(&buf->rb_allreqs); in rpcrdma_buffer_create()
1066 INIT_LIST_HEAD(&buf->rb_all_reps); in rpcrdma_buffer_create()
1068 rc = -ENOMEM; in rpcrdma_buffer_create()
1069 for (i = 0; i < r_xprt->rx_xprt.max_reqs; i++) { in rpcrdma_buffer_create()
1076 list_add(&req->rl_list, &buf->rb_send_bufs); in rpcrdma_buffer_create()
1079 init_llist_head(&buf->rb_free_reps); in rpcrdma_buffer_create()
1088 * rpcrdma_req_destroy - Destroy an rpcrdma_req object
1092 * removing req->rl_all from buf->rb_all_reqs safely.
1098 list_del(&req->rl_all); in rpcrdma_req_destroy()
1100 while ((mr = rpcrdma_mr_pop(&req->rl_free_mrs))) { in rpcrdma_req_destroy()
1101 struct rpcrdma_buffer *buf = &mr->mr_xprt->rx_buf; in rpcrdma_req_destroy()
1103 spin_lock(&buf->rb_lock); in rpcrdma_req_destroy()
1104 list_del(&mr->mr_all); in rpcrdma_req_destroy()
1105 spin_unlock(&buf->rb_lock); in rpcrdma_req_destroy()
1110 rpcrdma_regbuf_free(req->rl_recvbuf); in rpcrdma_req_destroy()
1111 rpcrdma_regbuf_free(req->rl_sendbuf); in rpcrdma_req_destroy()
1112 rpcrdma_regbuf_free(req->rl_rdmabuf); in rpcrdma_req_destroy()
1117 * rpcrdma_mrs_destroy - Release all of a transport's MRs
1121 * removing mr->mr_list from req->rl_free_mrs safely.
1125 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_mrs_destroy()
1128 cancel_work_sync(&buf->rb_refresh_worker); in rpcrdma_mrs_destroy()
1130 spin_lock(&buf->rb_lock); in rpcrdma_mrs_destroy()
1131 while ((mr = list_first_entry_or_null(&buf->rb_all_mrs, in rpcrdma_mrs_destroy()
1134 list_del(&mr->mr_list); in rpcrdma_mrs_destroy()
1135 list_del(&mr->mr_all); in rpcrdma_mrs_destroy()
1136 spin_unlock(&buf->rb_lock); in rpcrdma_mrs_destroy()
1140 spin_lock(&buf->rb_lock); in rpcrdma_mrs_destroy()
1142 spin_unlock(&buf->rb_lock); in rpcrdma_mrs_destroy()
1146 * rpcrdma_buffer_destroy - Release all hw resources
1150 * - No more Send or Receive completions can occur
1151 * - All MRs, reps, and reqs are returned to their free lists
1158 while (!list_empty(&buf->rb_send_bufs)) { in rpcrdma_buffer_destroy()
1161 req = list_first_entry(&buf->rb_send_bufs, in rpcrdma_buffer_destroy()
1163 list_del(&req->rl_list); in rpcrdma_buffer_destroy()
1169 * rpcrdma_mr_get - Allocate an rpcrdma_mr object
1172 * Returns an initialized rpcrdma_mr or NULL if no free
1178 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_mr_get()
1181 spin_lock(&buf->rb_lock); in rpcrdma_mr_get()
1182 mr = rpcrdma_mr_pop(&buf->rb_mrs); in rpcrdma_mr_get()
1183 spin_unlock(&buf->rb_lock); in rpcrdma_mr_get()
1188 * rpcrdma_reply_put - Put reply buffers back into pool
1195 if (req->rl_reply) { in rpcrdma_reply_put()
1196 rpcrdma_rep_put(buffers, req->rl_reply); in rpcrdma_reply_put()
1197 req->rl_reply = NULL; in rpcrdma_reply_put()
1202 * rpcrdma_buffer_get - Get a request buffer
1212 spin_lock(&buffers->rb_lock); in rpcrdma_buffer_get()
1213 req = list_first_entry_or_null(&buffers->rb_send_bufs, in rpcrdma_buffer_get()
1216 list_del_init(&req->rl_list); in rpcrdma_buffer_get()
1217 spin_unlock(&buffers->rb_lock); in rpcrdma_buffer_get()
1222 * rpcrdma_buffer_put - Put request/reply buffers back into pool
1231 spin_lock(&buffers->rb_lock); in rpcrdma_buffer_put()
1232 list_add(&req->rl_list, &buffers->rb_send_bufs); in rpcrdma_buffer_put()
1233 spin_unlock(&buffers->rb_lock); in rpcrdma_buffer_put()
1251 rb->rg_data = kmalloc_node(size, XPRTRDMA_GFP_FLAGS, node); in rpcrdma_regbuf_alloc_node()
1252 if (!rb->rg_data) { in rpcrdma_regbuf_alloc_node()
1257 rb->rg_device = NULL; in rpcrdma_regbuf_alloc_node()
1258 rb->rg_direction = direction; in rpcrdma_regbuf_alloc_node()
1259 rb->rg_iov.length = size; in rpcrdma_regbuf_alloc_node()
1270 * rpcrdma_regbuf_realloc - re-allocate a SEND/RECV buffer
1287 kfree(rb->rg_data); in rpcrdma_regbuf_realloc()
1289 rb->rg_data = buf; in rpcrdma_regbuf_realloc()
1290 rb->rg_iov.length = size; in rpcrdma_regbuf_realloc()
1295 * __rpcrdma_regbuf_dma_map - DMA-map a regbuf
1304 struct ib_device *device = r_xprt->rx_ep->re_id->device; in __rpcrdma_regbuf_dma_map()
1306 if (rb->rg_direction == DMA_NONE) in __rpcrdma_regbuf_dma_map()
1309 rb->rg_iov.addr = ib_dma_map_single(device, rdmab_data(rb), in __rpcrdma_regbuf_dma_map()
1310 rdmab_length(rb), rb->rg_direction); in __rpcrdma_regbuf_dma_map()
1316 rb->rg_device = device; in __rpcrdma_regbuf_dma_map()
1317 rb->rg_iov.lkey = r_xprt->rx_ep->re_pd->local_dma_lkey; in __rpcrdma_regbuf_dma_map()
1329 ib_dma_unmap_single(rb->rg_device, rdmab_addr(rb), rdmab_length(rb), in rpcrdma_regbuf_dma_unmap()
1330 rb->rg_direction); in rpcrdma_regbuf_dma_unmap()
1331 rb->rg_device = NULL; in rpcrdma_regbuf_dma_unmap()
1338 kfree(rb->rg_data); in rpcrdma_regbuf_free()
1343 * rpcrdma_post_recvs - Refill the Receive Queue
1350 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_post_recvs()
1351 struct rpcrdma_ep *ep = r_xprt->rx_ep; in rpcrdma_post_recvs()
1359 if (likely(ep->re_receive_count > needed)) in rpcrdma_post_recvs()
1361 needed -= ep->re_receive_count; in rpcrdma_post_recvs()
1364 if (atomic_inc_return(&ep->re_receiving) > 1) in rpcrdma_post_recvs()
1375 if (!rpcrdma_regbuf_dma_map(r_xprt, rep->rr_rdmabuf)) { in rpcrdma_post_recvs()
1380 rep->rr_cid.ci_queue_id = ep->re_attr.recv_cq->res.id; in rpcrdma_post_recvs()
1381 trace_xprtrdma_post_recv(&rep->rr_cid); in rpcrdma_post_recvs()
1382 rep->rr_recv_wr.next = wr; in rpcrdma_post_recvs()
1383 wr = &rep->rr_recv_wr; in rpcrdma_post_recvs()
1384 --needed; in rpcrdma_post_recvs()
1390 rc = ib_post_recv(ep->re_id->qp, wr, in rpcrdma_post_recvs()
1398 wr = wr->next; in rpcrdma_post_recvs()
1400 --count; in rpcrdma_post_recvs()
1403 if (atomic_dec_return(&ep->re_receiving) > 0) in rpcrdma_post_recvs()
1404 complete(&ep->re_done); in rpcrdma_post_recvs()
1408 ep->re_receive_count += count; in rpcrdma_post_recvs()