Lines Matching refs:rdma

55 svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges)  in svc_rdma_get_rw_ctxt()  argument
57 struct ib_device *dev = rdma->sc_cm_id->device; in svc_rdma_get_rw_ctxt()
62 spin_lock(&rdma->sc_rw_ctxt_lock); in svc_rdma_get_rw_ctxt()
63 node = llist_del_first(&rdma->sc_rw_ctxts); in svc_rdma_get_rw_ctxt()
64 spin_unlock(&rdma->sc_rw_ctxt_lock); in svc_rdma_get_rw_ctxt()
87 trace_svcrdma_rwctx_empty(rdma, sges); in svc_rdma_get_rw_ctxt()
98 static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma, in svc_rdma_put_rw_ctxt() argument
101 __svc_rdma_put_rw_ctxt(ctxt, &rdma->sc_rw_ctxts); in svc_rdma_put_rw_ctxt()
109 void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma) in svc_rdma_destroy_rw_ctxts() argument
114 while ((node = llist_del_first(&rdma->sc_rw_ctxts)) != NULL) { in svc_rdma_destroy_rw_ctxts()
131 static int svc_rdma_rw_ctx_init(struct svcxprt_rdma *rdma, in svc_rdma_rw_ctx_init() argument
138 ret = rdma_rw_ctx_init(&ctxt->rw_ctx, rdma->sc_qp, rdma->sc_port_num, in svc_rdma_rw_ctx_init()
142 trace_svcrdma_dma_map_rw_err(rdma, offset, handle, in svc_rdma_rw_ctx_init()
144 svc_rdma_put_rw_ctxt(rdma, ctxt); in svc_rdma_rw_ctx_init()
154 void svc_rdma_cc_init(struct svcxprt_rdma *rdma, in svc_rdma_cc_init() argument
160 svc_rdma_send_cid_init(rdma, cid); in svc_rdma_cc_init()
172 void svc_rdma_cc_release(struct svcxprt_rdma *rdma, in svc_rdma_cc_release() argument
186 rdma_rw_ctx_destroy(&ctxt->rw_ctx, rdma->sc_qp, in svc_rdma_cc_release()
187 rdma->sc_port_num, ctxt->rw_sg_table.sgl, in svc_rdma_cc_release()
197 llist_add_batch(first, last, &rdma->sc_rw_ctxts); in svc_rdma_cc_release()
201 svc_rdma_write_info_alloc(struct svcxprt_rdma *rdma, in svc_rdma_write_info_alloc() argument
207 ibdev_to_node(rdma->sc_cm_id->device)); in svc_rdma_write_info_alloc()
211 info->wi_rdma = rdma; in svc_rdma_write_info_alloc()
213 svc_rdma_cc_init(rdma, &info->wi_cc); in svc_rdma_write_info_alloc()
238 void svc_rdma_reply_chunk_release(struct svcxprt_rdma *rdma, in svc_rdma_reply_chunk_release() argument
245 svc_rdma_cc_release(rdma, cc, DMA_TO_DEVICE); in svc_rdma_reply_chunk_release()
260 struct svcxprt_rdma *rdma = cq->cq_context; in svc_rdma_reply_done() local
273 svc_xprt_deferred_close(&rdma->sc_xprt); in svc_rdma_reply_done()
285 struct svcxprt_rdma *rdma = cq->cq_context; in svc_rdma_write_done() local
303 svc_rdma_wake_send_waiters(rdma, cc->cc_sqecount); in svc_rdma_write_done()
306 svc_xprt_deferred_close(&rdma->sc_xprt); in svc_rdma_write_done()
319 struct svcxprt_rdma *rdma = cq->cq_context; in svc_rdma_wc_read_done() local
325 svc_rdma_wake_send_waiters(rdma, cc->cc_sqecount); in svc_rdma_wc_read_done()
333 spin_lock(&rdma->sc_rq_dto_lock); in svc_rdma_wc_read_done()
334 list_add_tail(&ctxt->rc_list, &rdma->sc_read_complete_q); in svc_rdma_wc_read_done()
336 set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags); in svc_rdma_wc_read_done()
337 spin_unlock(&rdma->sc_rq_dto_lock); in svc_rdma_wc_read_done()
338 svc_xprt_enqueue(&rdma->sc_xprt); in svc_rdma_wc_read_done()
351 svc_rdma_cc_release(rdma, cc, DMA_FROM_DEVICE); in svc_rdma_wc_read_done()
352 svc_rdma_recv_ctxt_put(rdma, ctxt); in svc_rdma_wc_read_done()
353 svc_xprt_deferred_close(&rdma->sc_xprt); in svc_rdma_wc_read_done()
362 static int svc_rdma_post_chunk_ctxt(struct svcxprt_rdma *rdma, in svc_rdma_post_chunk_ctxt() argument
373 if (cc->cc_sqecount > rdma->sc_sq_depth) in svc_rdma_post_chunk_ctxt()
382 first_wr = rdma_rw_ctx_wrs(&ctxt->rw_ctx, rdma->sc_qp, in svc_rdma_post_chunk_ctxt()
383 rdma->sc_port_num, cqe, first_wr); in svc_rdma_post_chunk_ctxt()
389 &rdma->sc_sq_avail) > 0) { in svc_rdma_post_chunk_ctxt()
391 ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr); in svc_rdma_post_chunk_ctxt()
398 trace_svcrdma_sq_full(rdma, &cc->cc_cid); in svc_rdma_post_chunk_ctxt()
399 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail); in svc_rdma_post_chunk_ctxt()
400 wait_event(rdma->sc_send_wait, in svc_rdma_post_chunk_ctxt()
401 atomic_read(&rdma->sc_sq_avail) > cc->cc_sqecount); in svc_rdma_post_chunk_ctxt()
402 trace_svcrdma_sq_retry(rdma, &cc->cc_cid); in svc_rdma_post_chunk_ctxt()
405 trace_svcrdma_sq_post_err(rdma, &cc->cc_cid, ret); in svc_rdma_post_chunk_ctxt()
406 svc_xprt_deferred_close(&rdma->sc_xprt); in svc_rdma_post_chunk_ctxt()
412 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail); in svc_rdma_post_chunk_ctxt()
413 wake_up(&rdma->sc_send_wait); in svc_rdma_post_chunk_ctxt()
475 struct svcxprt_rdma *rdma = info->wi_rdma; in svc_rdma_build_writes() local
491 ctxt = svc_rdma_get_rw_ctxt(rdma, in svc_rdma_build_writes()
498 ret = svc_rdma_rw_ctx_init(rdma, ctxt, offset, seg->rs_handle, in svc_rdma_build_writes()
604 static int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, in svc_rdma_send_write_chunk() argument
617 info = svc_rdma_write_info_alloc(rdma, chunk); in svc_rdma_send_write_chunk()
627 ret = svc_rdma_post_chunk_ctxt(rdma, cc); in svc_rdma_send_write_chunk()
646 int svc_rdma_send_write_list(struct svcxprt_rdma *rdma, in svc_rdma_send_write_list() argument
656 ret = svc_rdma_send_write_chunk(rdma, chunk, xdr); in svc_rdma_send_write_list()
678 int svc_rdma_prepare_reply_chunk(struct svcxprt_rdma *rdma, in svc_rdma_prepare_reply_chunk() argument
691 info->wi_rdma = rdma; in svc_rdma_prepare_reply_chunk()
708 first_wr = rdma_rw_ctx_wrs(&rwc->rw_ctx, rdma->sc_qp, in svc_rdma_prepare_reply_chunk()
709 rdma->sc_port_num, cqe, first_wr); in svc_rdma_prepare_reply_chunk()
735 struct svcxprt_rdma *rdma = svc_rdma_rqst_rdma(rqstp); in svc_rdma_build_read_segment() local
744 ctxt = svc_rdma_get_rw_ctxt(rdma, sge_no); in svc_rdma_build_read_segment()
772 ret = svc_rdma_rw_ctx_init(rdma, ctxt, segment->rs_offset, in svc_rdma_build_read_segment()
1115 int svc_rdma_process_read_list(struct svcxprt_rdma *rdma, in svc_rdma_process_read_list() argument
1140 ret = svc_rdma_post_chunk_ctxt(rdma, cc); in svc_rdma_process_read_list()