Lines Matching +full:wr +full:- +full:active

1 // SPDX-License-Identifier: GPL-2.0-only
7 * Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
8 * Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com>
9 * Copyright (C) 1997-2002 by Ron Minnich <rminnich@sarnoff.com>
46 * struct p9_trans_rdma - RDMA transport instance
63 * @req_lock: Protects the active request list
97 * struct p9_rdma_context - Keeps track of in-process WR
100 * @busa: Bus address to unmap when the WR completes
114 * struct p9_rdma_opts - Collection of mount options
152 struct p9_trans_rdma *rdma = clnt->trans; in p9_rdma_show_options()
154 if (rdma->port != P9_PORT) in p9_rdma_show_options()
155 seq_printf(m, ",port=%u", rdma->port); in p9_rdma_show_options()
156 if (rdma->sq_depth != P9_RDMA_SQ_DEPTH) in p9_rdma_show_options()
157 seq_printf(m, ",sq=%u", rdma->sq_depth); in p9_rdma_show_options()
158 if (rdma->rq_depth != P9_RDMA_RQ_DEPTH) in p9_rdma_show_options()
159 seq_printf(m, ",rq=%u", rdma->rq_depth); in p9_rdma_show_options()
160 if (rdma->timeout != P9_RDMA_TIMEOUT) in p9_rdma_show_options()
161 seq_printf(m, ",timeout=%lu", rdma->timeout); in p9_rdma_show_options()
162 if (rdma->privport) in p9_rdma_show_options()
168 * parse_opts - parse mount options into rdma options structure
170 * @opts: rdma transport-specific structure to parse options into
172 * Returns 0 upon success, -ERRNO upon failure
181 opts->port = P9_PORT; in parse_opts()
182 opts->sq_depth = P9_RDMA_SQ_DEPTH; in parse_opts()
183 opts->rq_depth = P9_RDMA_RQ_DEPTH; in parse_opts()
184 opts->timeout = P9_RDMA_TIMEOUT; in parse_opts()
185 opts->privport = false; in parse_opts()
194 return -ENOMEM; in parse_opts()
214 opts->port = option; in parse_opts()
217 opts->sq_depth = option; in parse_opts()
220 opts->rq_depth = option; in parse_opts()
223 opts->timeout = option; in parse_opts()
226 opts->privport = true; in parse_opts()
233 opts->rq_depth = max(opts->rq_depth, opts->sq_depth); in parse_opts()
241 struct p9_client *c = id->context; in p9_cm_event_handler()
242 struct p9_trans_rdma *rdma = c->trans; in p9_cm_event_handler()
243 switch (event->event) { in p9_cm_event_handler()
245 BUG_ON(rdma->state != P9_RDMA_INIT); in p9_cm_event_handler()
246 rdma->state = P9_RDMA_ADDR_RESOLVED; in p9_cm_event_handler()
250 BUG_ON(rdma->state != P9_RDMA_ADDR_RESOLVED); in p9_cm_event_handler()
251 rdma->state = P9_RDMA_ROUTE_RESOLVED; in p9_cm_event_handler()
255 BUG_ON(rdma->state != P9_RDMA_ROUTE_RESOLVED); in p9_cm_event_handler()
256 rdma->state = P9_RDMA_CONNECTED; in p9_cm_event_handler()
261 rdma->state = P9_RDMA_CLOSED; in p9_cm_event_handler()
262 c->status = Disconnected; in p9_cm_event_handler()
279 c->status = Disconnected; in p9_cm_event_handler()
280 rdma_disconnect(rdma->cm_id); in p9_cm_event_handler()
285 complete(&rdma->cm_done); in p9_cm_event_handler()
292 struct p9_client *client = cq->cq_context; in recv_done()
293 struct p9_trans_rdma *rdma = client->trans; in recv_done()
295 container_of(wc->wr_cqe, struct p9_rdma_context, cqe); in recv_done()
301 ib_dma_unmap_single(rdma->cm_id->device, c->busa, client->msize, in recv_done()
304 if (wc->status != IB_WC_SUCCESS) in recv_done()
307 c->rc.size = wc->byte_len; in recv_done()
308 err = p9_parse_header(&c->rc, NULL, NULL, &tag, 1); in recv_done()
318 if (unlikely(req->rc.sdata)) { in recv_done()
323 req->rc.size = c->rc.size; in recv_done()
324 req->rc.sdata = c->rc.sdata; in recv_done()
328 up(&rdma->rq_sem); in recv_done()
334 req, err, wc->status); in recv_done()
335 rdma->state = P9_RDMA_FLUSHING; in recv_done()
336 client->status = Disconnected; in recv_done()
343 struct p9_client *client = cq->cq_context; in send_done()
344 struct p9_trans_rdma *rdma = client->trans; in send_done()
346 container_of(wc->wr_cqe, struct p9_rdma_context, cqe); in send_done()
348 ib_dma_unmap_single(rdma->cm_id->device, in send_done()
349 c->busa, c->req->tc.size, in send_done()
351 up(&rdma->sq_sem); in send_done()
352 p9_req_put(client, c->req); in send_done()
359 event->event, context); in qp_event_handler()
367 if (rdma->qp && !IS_ERR(rdma->qp)) in rdma_destroy_trans()
368 ib_destroy_qp(rdma->qp); in rdma_destroy_trans()
370 if (rdma->pd && !IS_ERR(rdma->pd)) in rdma_destroy_trans()
371 ib_dealloc_pd(rdma->pd); in rdma_destroy_trans()
373 if (rdma->cq && !IS_ERR(rdma->cq)) in rdma_destroy_trans()
374 ib_free_cq(rdma->cq); in rdma_destroy_trans()
376 if (rdma->cm_id && !IS_ERR(rdma->cm_id)) in rdma_destroy_trans()
377 rdma_destroy_id(rdma->cm_id); in rdma_destroy_trans()
385 struct p9_trans_rdma *rdma = client->trans; in post_recv()
386 struct ib_recv_wr wr; in post_recv() local
390 c->busa = ib_dma_map_single(rdma->cm_id->device, in post_recv()
391 c->rc.sdata, client->msize, in post_recv()
393 if (ib_dma_mapping_error(rdma->cm_id->device, c->busa)) in post_recv()
396 c->cqe.done = recv_done; in post_recv()
398 sge.addr = c->busa; in post_recv()
399 sge.length = client->msize; in post_recv()
400 sge.lkey = rdma->pd->local_dma_lkey; in post_recv()
402 wr.next = NULL; in post_recv()
403 wr.wr_cqe = &c->cqe; in post_recv()
404 wr.sg_list = &sge; in post_recv()
405 wr.num_sge = 1; in post_recv()
407 ret = ib_post_recv(rdma->qp, &wr, NULL); in post_recv()
409 ib_dma_unmap_single(rdma->cm_id->device, c->busa, in post_recv()
410 client->msize, DMA_FROM_DEVICE); in post_recv()
415 return -EIO; in post_recv()
420 struct p9_trans_rdma *rdma = client->trans; in rdma_request()
421 struct ib_send_wr wr; in rdma_request() local
430 * Since there is no way to "un-post" it, we remember it and skip in rdma_request()
436 if (unlikely(atomic_read(&rdma->excess_rc) > 0)) { in rdma_request()
437 if ((atomic_sub_return(1, &rdma->excess_rc) >= 0)) { in rdma_request()
439 p9_fcall_fini(&req->rc); in rdma_request()
440 req->rc.sdata = NULL; in rdma_request()
444 atomic_inc(&rdma->excess_rc); in rdma_request()
451 err = -ENOMEM; in rdma_request()
454 rpl_context->rc.sdata = req->rc.sdata; in rdma_request()
463 if (down_interruptible(&rdma->rq_sem)) { in rdma_request()
464 err = -EINTR; in rdma_request()
474 req->rc.sdata = NULL; in rdma_request()
480 err = -ENOMEM; in rdma_request()
483 c->req = req; in rdma_request()
485 c->busa = ib_dma_map_single(rdma->cm_id->device, in rdma_request()
486 c->req->tc.sdata, c->req->tc.size, in rdma_request()
488 if (ib_dma_mapping_error(rdma->cm_id->device, c->busa)) { in rdma_request()
489 err = -EIO; in rdma_request()
493 c->cqe.done = send_done; in rdma_request()
495 sge.addr = c->busa; in rdma_request()
496 sge.length = c->req->tc.size; in rdma_request()
497 sge.lkey = rdma->pd->local_dma_lkey; in rdma_request()
499 wr.next = NULL; in rdma_request()
500 wr.wr_cqe = &c->cqe; in rdma_request()
501 wr.opcode = IB_WR_SEND; in rdma_request()
502 wr.send_flags = IB_SEND_SIGNALED; in rdma_request()
503 wr.sg_list = &sge; in rdma_request()
504 wr.num_sge = 1; in rdma_request()
506 if (down_interruptible(&rdma->sq_sem)) { in rdma_request()
507 err = -EINTR; in rdma_request()
515 WRITE_ONCE(req->status, REQ_STATUS_SENT); in rdma_request()
516 err = ib_post_send(rdma->qp, &wr, NULL); in rdma_request()
524 ib_dma_unmap_single(rdma->cm_id->device, c->busa, in rdma_request()
525 c->req->tc.size, DMA_TO_DEVICE); in rdma_request()
528 WRITE_ONCE(req->status, REQ_STATUS_ERROR); in rdma_request()
535 atomic_inc(&rdma->excess_rc); in rdma_request()
541 spin_lock_irqsave(&rdma->req_lock, flags); in rdma_request()
542 if (err != -EINTR && rdma->state < P9_RDMA_CLOSING) { in rdma_request()
543 rdma->state = P9_RDMA_CLOSING; in rdma_request()
544 spin_unlock_irqrestore(&rdma->req_lock, flags); in rdma_request()
545 rdma_disconnect(rdma->cm_id); in rdma_request()
547 spin_unlock_irqrestore(&rdma->req_lock, flags); in rdma_request()
558 rdma = client->trans; in rdma_close()
562 client->status = Disconnected; in rdma_close()
563 rdma_disconnect(rdma->cm_id); in rdma_close()
568 * alloc_rdma - Allocate and initialize the rdma transport structure
579 rdma->port = opts->port; in alloc_rdma()
580 rdma->privport = opts->privport; in alloc_rdma()
581 rdma->sq_depth = opts->sq_depth; in alloc_rdma()
582 rdma->rq_depth = opts->rq_depth; in alloc_rdma()
583 rdma->timeout = opts->timeout; in alloc_rdma()
584 spin_lock_init(&rdma->req_lock); in alloc_rdma()
585 init_completion(&rdma->cm_done); in alloc_rdma()
586 sema_init(&rdma->sq_sem, rdma->sq_depth); in alloc_rdma()
587 sema_init(&rdma->rq_sem, rdma->rq_depth); in alloc_rdma()
588 atomic_set(&rdma->excess_rc, 0); in alloc_rdma()
606 struct p9_trans_rdma *rdma = client->trans; in rdma_cancelled()
607 atomic_inc(&rdma->excess_rc); in rdma_cancelled()
617 int port, err = -EINVAL; in p9_rdma_bind_privport()
619 for (port = P9_DEF_MAX_RESVPORT; port >= P9_DEF_MIN_RESVPORT; port--) { in p9_rdma_bind_privport()
621 err = rdma_bind_addr(rdma->cm_id, (struct sockaddr *)&cl); in p9_rdma_bind_privport()
622 if (err != -EADDRINUSE) in p9_rdma_bind_privport()
629 * rdma_create_trans - Transport method for creating a transport instance
644 return -EINVAL; in rdma_create_trans()
654 return -ENOMEM; in rdma_create_trans()
657 rdma->cm_id = rdma_create_id(&init_net, p9_cm_event_handler, client, in rdma_create_trans()
659 if (IS_ERR(rdma->cm_id)) in rdma_create_trans()
663 client->trans = rdma; in rdma_create_trans()
670 __func__, task_pid_nr(current), -err); in rdma_create_trans()
676 rdma->addr.sin_family = AF_INET; in rdma_create_trans()
677 rdma->addr.sin_addr.s_addr = in_aton(addr); in rdma_create_trans()
678 rdma->addr.sin_port = htons(opts.port); in rdma_create_trans()
679 err = rdma_resolve_addr(rdma->cm_id, NULL, in rdma_create_trans()
680 (struct sockaddr *)&rdma->addr, in rdma_create_trans()
681 rdma->timeout); in rdma_create_trans()
684 err = wait_for_completion_interruptible(&rdma->cm_done); in rdma_create_trans()
685 if (err || (rdma->state != P9_RDMA_ADDR_RESOLVED)) in rdma_create_trans()
689 err = rdma_resolve_route(rdma->cm_id, rdma->timeout); in rdma_create_trans()
692 err = wait_for_completion_interruptible(&rdma->cm_done); in rdma_create_trans()
693 if (err || (rdma->state != P9_RDMA_ROUTE_RESOLVED)) in rdma_create_trans()
697 rdma->cq = ib_alloc_cq_any(rdma->cm_id->device, client, in rdma_create_trans()
700 if (IS_ERR(rdma->cq)) in rdma_create_trans()
704 rdma->pd = ib_alloc_pd(rdma->cm_id->device, 0); in rdma_create_trans()
705 if (IS_ERR(rdma->pd)) in rdma_create_trans()
718 qp_attr.send_cq = rdma->cq; in rdma_create_trans()
719 qp_attr.recv_cq = rdma->cq; in rdma_create_trans()
720 err = rdma_create_qp(rdma->cm_id, rdma->pd, &qp_attr); in rdma_create_trans()
723 rdma->qp = rdma->cm_id->qp; in rdma_create_trans()
731 err = rdma_connect(rdma->cm_id, &conn_param); in rdma_create_trans()
734 err = wait_for_completion_interruptible(&rdma->cm_done); in rdma_create_trans()
735 if (err || (rdma->state != P9_RDMA_CONNECTED)) in rdma_create_trans()
738 client->status = Connected; in rdma_create_trans()
744 return -ENOTCONN; in rdma_create_trans()
762 * p9_trans_rdma_init - Register the 9P RDMA transport driver