Lines Matching full:queue
73 struct nvme_rdma_queue *queue; member
159 static inline int nvme_rdma_queue_idx(struct nvme_rdma_queue *queue) in nvme_rdma_queue_idx() argument
161 return queue - queue->ctrl->queues; in nvme_rdma_queue_idx()
164 static bool nvme_rdma_poll_queue(struct nvme_rdma_queue *queue) in nvme_rdma_poll_queue() argument
166 return nvme_rdma_queue_idx(queue) > in nvme_rdma_poll_queue()
167 queue->ctrl->io_queues[HCTX_TYPE_DEFAULT] + in nvme_rdma_poll_queue()
168 queue->ctrl->io_queues[HCTX_TYPE_READ]; in nvme_rdma_poll_queue()
171 static inline size_t nvme_rdma_inline_data_size(struct nvme_rdma_queue *queue) in nvme_rdma_inline_data_size() argument
173 return queue->cmnd_capsule_len - sizeof(struct nvme_command); in nvme_rdma_inline_data_size()
223 * Bind the CQEs (post recv buffers) DMA mapping to the RDMA queue in nvme_rdma_alloc_ring()
225 * will issue error recovery and queue re-creation. in nvme_rdma_alloc_ring()
246 static int nvme_rdma_wait_for_cm(struct nvme_rdma_queue *queue) in nvme_rdma_wait_for_cm() argument
250 ret = wait_for_completion_interruptible(&queue->cm_done); in nvme_rdma_wait_for_cm()
253 WARN_ON_ONCE(queue->cm_error > 0); in nvme_rdma_wait_for_cm()
254 return queue->cm_error; in nvme_rdma_wait_for_cm()
257 static int nvme_rdma_create_qp(struct nvme_rdma_queue *queue, const int factor) in nvme_rdma_create_qp() argument
259 struct nvme_rdma_device *dev = queue->device; in nvme_rdma_create_qp()
266 init_attr.cap.max_send_wr = factor * queue->queue_size + 1; in nvme_rdma_create_qp()
268 init_attr.cap.max_recv_wr = queue->queue_size + 1; in nvme_rdma_create_qp()
273 init_attr.send_cq = queue->ib_cq; in nvme_rdma_create_qp()
274 init_attr.recv_cq = queue->ib_cq; in nvme_rdma_create_qp()
275 if (queue->pi_support) in nvme_rdma_create_qp()
277 init_attr.qp_context = queue; in nvme_rdma_create_qp()
279 ret = rdma_create_qp(queue->cm_id, dev->pd, &init_attr); in nvme_rdma_create_qp()
281 queue->qp = queue->cm_id->qp; in nvme_rdma_create_qp()
300 struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx]; in nvme_rdma_init_request() local
308 if (queue->pi_support) in nvme_rdma_init_request()
313 req->queue = queue; in nvme_rdma_init_request()
323 struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1]; in nvme_rdma_init_hctx() local
327 hctx->driver_data = queue; in nvme_rdma_init_hctx()
335 struct nvme_rdma_queue *queue = &ctrl->queues[0]; in nvme_rdma_init_admin_hctx() local
339 hctx->driver_data = queue; in nvme_rdma_init_admin_hctx()
413 static void nvme_rdma_free_cq(struct nvme_rdma_queue *queue) in nvme_rdma_free_cq() argument
415 if (nvme_rdma_poll_queue(queue)) in nvme_rdma_free_cq()
416 ib_free_cq(queue->ib_cq); in nvme_rdma_free_cq()
418 ib_cq_pool_put(queue->ib_cq, queue->cq_size); in nvme_rdma_free_cq()
421 static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue) in nvme_rdma_destroy_queue_ib() argument
426 if (!test_and_clear_bit(NVME_RDMA_Q_TR_READY, &queue->flags)) in nvme_rdma_destroy_queue_ib()
429 dev = queue->device; in nvme_rdma_destroy_queue_ib()
432 if (queue->pi_support) in nvme_rdma_destroy_queue_ib()
433 ib_mr_pool_destroy(queue->qp, &queue->qp->sig_mrs); in nvme_rdma_destroy_queue_ib()
434 ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs); in nvme_rdma_destroy_queue_ib()
441 ib_destroy_qp(queue->qp); in nvme_rdma_destroy_queue_ib()
442 nvme_rdma_free_cq(queue); in nvme_rdma_destroy_queue_ib()
444 nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size, in nvme_rdma_destroy_queue_ib()
463 struct nvme_rdma_queue *queue) in nvme_rdma_create_cq() argument
465 int ret, comp_vector, idx = nvme_rdma_queue_idx(queue); in nvme_rdma_create_cq()
468 * Spread I/O queues completion vectors according their queue index. in nvme_rdma_create_cq()
474 if (nvme_rdma_poll_queue(queue)) in nvme_rdma_create_cq()
475 queue->ib_cq = ib_alloc_cq(ibdev, queue, queue->cq_size, in nvme_rdma_create_cq()
478 queue->ib_cq = ib_cq_pool_get(ibdev, queue->cq_size, in nvme_rdma_create_cq()
481 if (IS_ERR(queue->ib_cq)) { in nvme_rdma_create_cq()
482 ret = PTR_ERR(queue->ib_cq); in nvme_rdma_create_cq()
489 static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue) in nvme_rdma_create_queue_ib() argument
496 queue->device = nvme_rdma_find_get_device(queue->cm_id); in nvme_rdma_create_queue_ib()
497 if (!queue->device) { in nvme_rdma_create_queue_ib()
498 dev_err(queue->cm_id->device->dev.parent, in nvme_rdma_create_queue_ib()
502 ibdev = queue->device->dev; in nvme_rdma_create_queue_ib()
505 queue->cq_size = cq_factor * queue->queue_size + 1; in nvme_rdma_create_queue_ib()
507 ret = nvme_rdma_create_cq(ibdev, queue); in nvme_rdma_create_queue_ib()
511 ret = nvme_rdma_create_qp(queue, send_wr_factor); in nvme_rdma_create_queue_ib()
515 queue->rsp_ring = nvme_rdma_alloc_ring(ibdev, queue->queue_size, in nvme_rdma_create_queue_ib()
517 if (!queue->rsp_ring) { in nvme_rdma_create_queue_ib()
527 pages_per_mr = nvme_rdma_get_max_fr_pages(ibdev, queue->pi_support) + 1; in nvme_rdma_create_queue_ib()
528 ret = ib_mr_pool_init(queue->qp, &queue->qp->rdma_mrs, in nvme_rdma_create_queue_ib()
529 queue->queue_size, in nvme_rdma_create_queue_ib()
533 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_create_queue_ib()
535 queue->queue_size, nvme_rdma_queue_idx(queue)); in nvme_rdma_create_queue_ib()
539 if (queue->pi_support) { in nvme_rdma_create_queue_ib()
540 ret = ib_mr_pool_init(queue->qp, &queue->qp->sig_mrs, in nvme_rdma_create_queue_ib()
541 queue->queue_size, IB_MR_TYPE_INTEGRITY, in nvme_rdma_create_queue_ib()
544 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_create_queue_ib()
546 queue->queue_size, nvme_rdma_queue_idx(queue)); in nvme_rdma_create_queue_ib()
551 set_bit(NVME_RDMA_Q_TR_READY, &queue->flags); in nvme_rdma_create_queue_ib()
556 ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs); in nvme_rdma_create_queue_ib()
558 nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size, in nvme_rdma_create_queue_ib()
561 rdma_destroy_qp(queue->cm_id); in nvme_rdma_create_queue_ib()
563 nvme_rdma_free_cq(queue); in nvme_rdma_create_queue_ib()
565 nvme_rdma_dev_put(queue->device); in nvme_rdma_create_queue_ib()
572 struct nvme_rdma_queue *queue; in nvme_rdma_alloc_queue() local
576 queue = &ctrl->queues[idx]; in nvme_rdma_alloc_queue()
577 mutex_init(&queue->queue_lock); in nvme_rdma_alloc_queue()
578 queue->ctrl = ctrl; in nvme_rdma_alloc_queue()
580 queue->pi_support = true; in nvme_rdma_alloc_queue()
582 queue->pi_support = false; in nvme_rdma_alloc_queue()
583 init_completion(&queue->cm_done); in nvme_rdma_alloc_queue()
586 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16; in nvme_rdma_alloc_queue()
588 queue->cmnd_capsule_len = sizeof(struct nvme_command); in nvme_rdma_alloc_queue()
590 queue->queue_size = queue_size; in nvme_rdma_alloc_queue()
592 queue->cm_id = rdma_create_id(&init_net, nvme_rdma_cm_handler, queue, in nvme_rdma_alloc_queue()
594 if (IS_ERR(queue->cm_id)) { in nvme_rdma_alloc_queue()
596 "failed to create CM ID: %ld\n", PTR_ERR(queue->cm_id)); in nvme_rdma_alloc_queue()
597 ret = PTR_ERR(queue->cm_id); in nvme_rdma_alloc_queue()
604 queue->cm_error = -ETIMEDOUT; in nvme_rdma_alloc_queue()
605 ret = rdma_resolve_addr(queue->cm_id, src_addr, in nvme_rdma_alloc_queue()
614 ret = nvme_rdma_wait_for_cm(queue); in nvme_rdma_alloc_queue()
621 set_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags); in nvme_rdma_alloc_queue()
626 rdma_destroy_id(queue->cm_id); in nvme_rdma_alloc_queue()
627 nvme_rdma_destroy_queue_ib(queue); in nvme_rdma_alloc_queue()
629 mutex_destroy(&queue->queue_lock); in nvme_rdma_alloc_queue()
633 static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue) in __nvme_rdma_stop_queue() argument
635 rdma_disconnect(queue->cm_id); in __nvme_rdma_stop_queue()
636 ib_drain_qp(queue->qp); in __nvme_rdma_stop_queue()
639 static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue) in nvme_rdma_stop_queue() argument
641 if (!test_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags)) in nvme_rdma_stop_queue()
644 mutex_lock(&queue->queue_lock); in nvme_rdma_stop_queue()
645 if (test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags)) in nvme_rdma_stop_queue()
646 __nvme_rdma_stop_queue(queue); in nvme_rdma_stop_queue()
647 mutex_unlock(&queue->queue_lock); in nvme_rdma_stop_queue()
650 static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue) in nvme_rdma_free_queue() argument
652 if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags)) in nvme_rdma_free_queue()
655 rdma_destroy_id(queue->cm_id); in nvme_rdma_free_queue()
656 nvme_rdma_destroy_queue_ib(queue); in nvme_rdma_free_queue()
657 mutex_destroy(&queue->queue_lock); in nvme_rdma_free_queue()
678 struct nvme_rdma_queue *queue = &ctrl->queues[idx]; in nvme_rdma_start_queue() local
687 set_bit(NVME_RDMA_Q_LIVE, &queue->flags); in nvme_rdma_start_queue()
689 if (test_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags)) in nvme_rdma_start_queue()
690 __nvme_rdma_stop_queue(queue); in nvme_rdma_start_queue()
692 "failed to connect queue: %d ret=%d\n", idx, ret); in nvme_rdma_start_queue()
802 * Bind the async event SQE DMA mapping to the admin queue lifetime. in nvme_rdma_configure_admin_queue()
804 * error recovery and queue re-creation. in nvme_rdma_configure_admin_queue()
881 * queue number might have changed. in nvme_rdma_configure_io_queues()
1042 "ctrl sqsize %u > max queue size %u, clamping down\n", in nvme_rdma_setup_ctrl()
1170 struct nvme_rdma_queue *queue = wc->qp->qp_context; in nvme_rdma_wr_error() local
1171 struct nvme_rdma_ctrl *ctrl = queue->ctrl; in nvme_rdma_wr_error()
1198 static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue, in nvme_rdma_inv_rkey() argument
1212 return ib_post_send(queue->qp, &wr, NULL); in nvme_rdma_inv_rkey()
1231 static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue, in nvme_rdma_unmap_data() argument
1235 struct nvme_rdma_device *dev = queue->device; in nvme_rdma_unmap_data()
1237 struct list_head *pool = &queue->qp->rdma_mrs; in nvme_rdma_unmap_data()
1243 pool = &queue->qp->sig_mrs; in nvme_rdma_unmap_data()
1246 ib_mr_pool_put(queue->qp, pool, req->mr); in nvme_rdma_unmap_data()
1264 static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue, in nvme_rdma_map_sg_inline() argument
1277 sge->lkey = queue->device->pd->local_dma_lkey; in nvme_rdma_map_sg_inline()
1282 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff); in nvme_rdma_map_sg_inline()
1290 static int nvme_rdma_map_sg_single(struct nvme_rdma_queue *queue, in nvme_rdma_map_sg_single() argument
1297 put_unaligned_le32(queue->device->pd->unsafe_global_rkey, sg->key); in nvme_rdma_map_sg_single()
1302 static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue, in nvme_rdma_map_sg_fr() argument
1309 req->mr = ib_mr_pool_get(queue->qp, &queue->qp->rdma_mrs); in nvme_rdma_map_sg_fr()
1320 ib_mr_pool_put(queue->qp, &queue->qp->rdma_mrs, req->mr); in nvme_rdma_map_sg_fr()
1406 static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue, in nvme_rdma_map_sg_pi() argument
1420 req->mr = ib_mr_pool_get(queue->qp, &queue->qp->sig_mrs); in nvme_rdma_map_sg_pi()
1459 ib_mr_pool_put(queue->qp, &queue->qp->sig_mrs, req->mr); in nvme_rdma_map_sg_pi()
1525 static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, in nvme_rdma_map_data() argument
1529 struct nvme_rdma_device *dev = queue->device; in nvme_rdma_map_data()
1547 ret = nvme_rdma_map_sg_pi(queue, req, c, count, pi_count); in nvme_rdma_map_data()
1552 if (rq_data_dir(rq) == WRITE && nvme_rdma_queue_idx(queue) && in nvme_rdma_map_data()
1553 queue->ctrl->use_inline_data && in nvme_rdma_map_data()
1555 nvme_rdma_inline_data_size(queue)) { in nvme_rdma_map_data()
1556 ret = nvme_rdma_map_sg_inline(queue, req, c, count); in nvme_rdma_map_data()
1561 ret = nvme_rdma_map_sg_single(queue, req, c); in nvme_rdma_map_data()
1566 ret = nvme_rdma_map_sg_fr(queue, req, c, count); in nvme_rdma_map_data()
1591 static int nvme_rdma_post_send(struct nvme_rdma_queue *queue, in nvme_rdma_post_send() argument
1600 sge->lkey = queue->device->pd->local_dma_lkey; in nvme_rdma_post_send()
1614 ret = ib_post_send(queue->qp, first, NULL); in nvme_rdma_post_send()
1616 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_post_send()
1622 static int nvme_rdma_post_recv(struct nvme_rdma_queue *queue, in nvme_rdma_post_recv() argument
1631 list.lkey = queue->device->pd->local_dma_lkey; in nvme_rdma_post_recv()
1640 ret = ib_post_recv(queue->qp, &wr, NULL); in nvme_rdma_post_recv()
1642 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_post_recv()
1648 static struct blk_mq_tags *nvme_rdma_tagset(struct nvme_rdma_queue *queue) in nvme_rdma_tagset() argument
1650 u32 queue_idx = nvme_rdma_queue_idx(queue); in nvme_rdma_tagset()
1653 return queue->ctrl->admin_tag_set.tags[queue_idx]; in nvme_rdma_tagset()
1654 return queue->ctrl->tag_set.tags[queue_idx - 1]; in nvme_rdma_tagset()
1666 struct nvme_rdma_queue *queue = &ctrl->queues[0]; in nvme_rdma_submit_async_event() local
1667 struct ib_device *dev = queue->device->dev; in nvme_rdma_submit_async_event()
1686 ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL); in nvme_rdma_submit_async_event()
1690 static void nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue, in nvme_rdma_process_nvme_rsp() argument
1696 rq = nvme_find_rq(nvme_rdma_tagset(queue), cqe->command_id); in nvme_rdma_process_nvme_rsp()
1698 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_process_nvme_rsp()
1700 cqe->command_id, queue->qp->qp_num); in nvme_rdma_process_nvme_rsp()
1701 nvme_rdma_error_recovery(queue->ctrl); in nvme_rdma_process_nvme_rsp()
1712 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_process_nvme_rsp()
1715 nvme_rdma_error_recovery(queue->ctrl); in nvme_rdma_process_nvme_rsp()
1720 ret = nvme_rdma_inv_rkey(queue, req); in nvme_rdma_process_nvme_rsp()
1722 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_process_nvme_rsp()
1725 nvme_rdma_error_recovery(queue->ctrl); in nvme_rdma_process_nvme_rsp()
1738 struct nvme_rdma_queue *queue = wc->qp->qp_context; in nvme_rdma_recv_done() local
1739 struct ib_device *ibdev = queue->device->dev; in nvme_rdma_recv_done()
1750 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_recv_done()
1752 nvme_rdma_error_recovery(queue->ctrl); in nvme_rdma_recv_done()
1759 * survive any kind of queue freeze and often don't respond to in nvme_rdma_recv_done()
1763 if (unlikely(nvme_is_aen_req(nvme_rdma_queue_idx(queue), in nvme_rdma_recv_done()
1765 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status, in nvme_rdma_recv_done()
1768 nvme_rdma_process_nvme_rsp(queue, cqe, wc); in nvme_rdma_recv_done()
1771 nvme_rdma_post_recv(queue, qe); in nvme_rdma_recv_done()
1774 static int nvme_rdma_conn_established(struct nvme_rdma_queue *queue) in nvme_rdma_conn_established() argument
1778 for (i = 0; i < queue->queue_size; i++) { in nvme_rdma_conn_established()
1779 ret = nvme_rdma_post_recv(queue, &queue->rsp_ring[i]); in nvme_rdma_conn_established()
1787 static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue, in nvme_rdma_conn_rejected() argument
1790 struct rdma_cm_id *cm_id = queue->cm_id; in nvme_rdma_conn_rejected()
1802 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_conn_rejected()
1806 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_conn_rejected()
1813 static int nvme_rdma_addr_resolved(struct nvme_rdma_queue *queue) in nvme_rdma_addr_resolved() argument
1815 struct nvme_ctrl *ctrl = &queue->ctrl->ctrl; in nvme_rdma_addr_resolved()
1818 ret = nvme_rdma_create_queue_ib(queue); in nvme_rdma_addr_resolved()
1823 rdma_set_service_type(queue->cm_id, ctrl->opts->tos); in nvme_rdma_addr_resolved()
1824 ret = rdma_resolve_route(queue->cm_id, NVME_RDMA_CM_TIMEOUT_MS); in nvme_rdma_addr_resolved()
1827 queue->cm_error); in nvme_rdma_addr_resolved()
1834 nvme_rdma_destroy_queue_ib(queue); in nvme_rdma_addr_resolved()
1838 static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue) in nvme_rdma_route_resolved() argument
1840 struct nvme_rdma_ctrl *ctrl = queue->ctrl; in nvme_rdma_route_resolved()
1845 param.qp_num = queue->qp->qp_num; in nvme_rdma_route_resolved()
1848 param.responder_resources = queue->device->dev->attrs.max_qp_rd_atom; in nvme_rdma_route_resolved()
1856 priv.qid = cpu_to_le16(nvme_rdma_queue_idx(queue)); in nvme_rdma_route_resolved()
1858 * set the admin queue depth to the minimum size in nvme_rdma_route_resolved()
1870 priv.hrqsize = cpu_to_le16(queue->queue_size); in nvme_rdma_route_resolved()
1871 priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize); in nvme_rdma_route_resolved()
1872 /* cntlid should only be set when creating an I/O queue */ in nvme_rdma_route_resolved()
1876 ret = rdma_connect_locked(queue->cm_id, ¶m); in nvme_rdma_route_resolved()
1889 struct nvme_rdma_queue *queue = cm_id->context; in nvme_rdma_cm_handler() local
1892 dev_dbg(queue->ctrl->ctrl.device, "%s (%d): status %d id %p\n", in nvme_rdma_cm_handler()
1898 cm_error = nvme_rdma_addr_resolved(queue); in nvme_rdma_cm_handler()
1901 cm_error = nvme_rdma_route_resolved(queue); in nvme_rdma_cm_handler()
1904 queue->cm_error = nvme_rdma_conn_established(queue); in nvme_rdma_cm_handler()
1906 complete(&queue->cm_done); in nvme_rdma_cm_handler()
1909 cm_error = nvme_rdma_conn_rejected(queue, ev); in nvme_rdma_cm_handler()
1915 dev_dbg(queue->ctrl->ctrl.device, in nvme_rdma_cm_handler()
1922 dev_dbg(queue->ctrl->ctrl.device, in nvme_rdma_cm_handler()
1924 nvme_rdma_error_recovery(queue->ctrl); in nvme_rdma_cm_handler()
1930 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_cm_handler()
1932 nvme_rdma_error_recovery(queue->ctrl); in nvme_rdma_cm_handler()
1937 queue->cm_error = cm_error; in nvme_rdma_cm_handler()
1938 complete(&queue->cm_done); in nvme_rdma_cm_handler()
1947 struct nvme_rdma_queue *queue = req->queue; in nvme_rdma_complete_timed_out() local
1949 nvme_rdma_stop_queue(queue); in nvme_rdma_complete_timed_out()
1956 struct nvme_rdma_queue *queue = req->queue; in nvme_rdma_timeout() local
1957 struct nvme_rdma_ctrl *ctrl = queue->ctrl; in nvme_rdma_timeout()
1959 int qid = nvme_rdma_queue_idx(queue); in nvme_rdma_timeout()
1995 struct nvme_ns *ns = hctx->queue->queuedata; in nvme_rdma_queue_rq()
1996 struct nvme_rdma_queue *queue = hctx->driver_data; in nvme_rdma_queue_rq() local
2002 bool queue_ready = test_bit(NVME_RDMA_Q_LIVE, &queue->flags); in nvme_rdma_queue_rq()
2008 if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) in nvme_rdma_queue_rq()
2009 return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq); in nvme_rdma_queue_rq()
2011 dev = queue->device->dev; in nvme_rdma_queue_rq()
2030 queue->pi_support && in nvme_rdma_queue_rq()
2038 err = nvme_rdma_map_data(queue, rq, c); in nvme_rdma_queue_rq()
2040 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_queue_rq()
2050 err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge, in nvme_rdma_queue_rq()
2058 nvme_rdma_unmap_data(queue, rq); in nvme_rdma_queue_rq()
2075 struct nvme_rdma_queue *queue = hctx->driver_data; in nvme_rdma_poll() local
2077 return ib_process_cq_direct(queue->ib_cq, -1); in nvme_rdma_poll()
2114 struct nvme_rdma_queue *queue = req->queue; in nvme_rdma_complete_rq() local
2115 struct ib_device *ibdev = queue->device->dev; in nvme_rdma_complete_rq()
2120 nvme_rdma_unmap_data(queue, rq); in nvme_rdma_complete_rq()