Lines Matching refs:queue
52 struct nvmet_rdma_queue *queue; member
66 struct nvmet_rdma_queue *queue; member
172 static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
213 nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) in nvmet_rdma_get_rsp() argument
218 tag = sbitmap_get(&queue->rsp_tags); in nvmet_rdma_get_rsp()
220 rsp = &queue->rsps[tag]; in nvmet_rdma_get_rsp()
228 ret = nvmet_rdma_alloc_rsp(queue->dev, rsp, in nvmet_rdma_get_rsp()
243 nvmet_rdma_free_rsp(rsp->queue->dev, rsp); in nvmet_rdma_put_rsp()
248 sbitmap_clear_bit(&rsp->queue->rsp_tags, rsp->tag); in nvmet_rdma_put_rsp()
448 nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue) in nvmet_rdma_alloc_rsps() argument
450 struct nvmet_rdma_device *ndev = queue->dev; in nvmet_rdma_alloc_rsps()
451 int nr_rsps = queue->recv_queue_size * 2; in nvmet_rdma_alloc_rsps()
454 if (sbitmap_init_node(&queue->rsp_tags, nr_rsps, -1, GFP_KERNEL, in nvmet_rdma_alloc_rsps()
458 queue->rsps = kvcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp), in nvmet_rdma_alloc_rsps()
460 if (!queue->rsps) in nvmet_rdma_alloc_rsps()
464 struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; in nvmet_rdma_alloc_rsps()
475 nvmet_rdma_free_rsp(ndev, &queue->rsps[i]); in nvmet_rdma_alloc_rsps()
476 kvfree(queue->rsps); in nvmet_rdma_alloc_rsps()
478 sbitmap_free(&queue->rsp_tags); in nvmet_rdma_alloc_rsps()
483 static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue) in nvmet_rdma_free_rsps() argument
485 struct nvmet_rdma_device *ndev = queue->dev; in nvmet_rdma_free_rsps()
486 int i, nr_rsps = queue->recv_queue_size * 2; in nvmet_rdma_free_rsps()
489 nvmet_rdma_free_rsp(ndev, &queue->rsps[i]); in nvmet_rdma_free_rsps()
490 kvfree(queue->rsps); in nvmet_rdma_free_rsps()
491 sbitmap_free(&queue->rsp_tags); in nvmet_rdma_free_rsps()
506 ret = ib_post_recv(cmd->queue->qp, &cmd->wr, NULL); in nvmet_rdma_post_recv()
514 static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue) in nvmet_rdma_process_wr_wait_list() argument
516 spin_lock(&queue->rsp_wr_wait_lock); in nvmet_rdma_process_wr_wait_list()
517 while (!list_empty(&queue->rsp_wr_wait_list)) { in nvmet_rdma_process_wr_wait_list()
521 rsp = list_entry(queue->rsp_wr_wait_list.next, in nvmet_rdma_process_wr_wait_list()
525 spin_unlock(&queue->rsp_wr_wait_lock); in nvmet_rdma_process_wr_wait_list()
527 spin_lock(&queue->rsp_wr_wait_lock); in nvmet_rdma_process_wr_wait_list()
530 list_add(&rsp->wait_list, &queue->rsp_wr_wait_list); in nvmet_rdma_process_wr_wait_list()
534 spin_unlock(&queue->rsp_wr_wait_lock); in nvmet_rdma_process_wr_wait_list()
629 struct rdma_cm_id *cm_id = rsp->queue->cm_id; in nvmet_rdma_rw_ctx_init()
648 struct rdma_cm_id *cm_id = rsp->queue->cm_id; in nvmet_rdma_rw_ctx_destroy()
663 struct nvmet_rdma_queue *queue = rsp->queue; in nvmet_rdma_release_rsp() local
665 atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail); in nvmet_rdma_release_rsp()
673 if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list))) in nvmet_rdma_release_rsp()
674 nvmet_rdma_process_wr_wait_list(queue); in nvmet_rdma_release_rsp()
679 static void nvmet_rdma_error_comp(struct nvmet_rdma_queue *queue) in nvmet_rdma_error_comp() argument
681 if (queue->nvme_sq.ctrl) { in nvmet_rdma_error_comp()
682 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl); in nvmet_rdma_error_comp()
689 nvmet_rdma_queue_disconnect(queue); in nvmet_rdma_error_comp()
697 struct nvmet_rdma_queue *queue = wc->qp->qp_context; in nvmet_rdma_send_done() local
705 nvmet_rdma_error_comp(queue); in nvmet_rdma_send_done()
713 struct rdma_cm_id *cm_id = rsp->queue->cm_id; in nvmet_rdma_queue_response()
734 nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd); in nvmet_rdma_queue_response()
736 ib_dma_sync_single_for_device(rsp->queue->dev->device, in nvmet_rdma_queue_response()
750 struct nvmet_rdma_queue *queue = wc->qp->qp_context; in nvmet_rdma_read_data_done() local
754 atomic_add(rsp->n_rdma, &queue->sq_wr_avail); in nvmet_rdma_read_data_done()
764 nvmet_rdma_error_comp(queue); in nvmet_rdma_read_data_done()
783 struct nvmet_rdma_queue *queue = wc->qp->qp_context; in nvmet_rdma_write_data_done() local
784 struct rdma_cm_id *cm_id = rsp->queue->cm_id; in nvmet_rdma_write_data_done()
791 atomic_add(rsp->n_rdma, &queue->sq_wr_avail); in nvmet_rdma_write_data_done()
801 nvmet_rdma_error_comp(queue); in nvmet_rdma_write_data_done()
858 if (off + len > rsp->queue->dev->inline_data_size) { in nvmet_rdma_map_sgl_inline()
945 struct nvmet_rdma_queue *queue = rsp->queue; in nvmet_rdma_execute_command() local
948 &queue->sq_wr_avail) < 0)) { in nvmet_rdma_execute_command()
950 1 + rsp->n_rdma, queue->idx, in nvmet_rdma_execute_command()
951 queue->nvme_sq.ctrl->cntlid); in nvmet_rdma_execute_command()
952 atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail); in nvmet_rdma_execute_command()
957 if (rdma_rw_ctx_post(&rsp->rw, queue->qp, in nvmet_rdma_execute_command()
958 queue->cm_id->port_num, &rsp->read_cqe, NULL)) in nvmet_rdma_execute_command()
967 static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue, in nvmet_rdma_handle_command() argument
972 ib_dma_sync_single_for_cpu(queue->dev->device, in nvmet_rdma_handle_command()
975 ib_dma_sync_single_for_cpu(queue->dev->device, in nvmet_rdma_handle_command()
979 if (!nvmet_req_init(&cmd->req, &queue->nvme_sq, &nvmet_rdma_ops)) in nvmet_rdma_handle_command()
987 spin_lock(&queue->rsp_wr_wait_lock); in nvmet_rdma_handle_command()
988 list_add_tail(&cmd->wait_list, &queue->rsp_wr_wait_list); in nvmet_rdma_handle_command()
989 spin_unlock(&queue->rsp_wr_wait_lock); in nvmet_rdma_handle_command()
998 static bool nvmet_rdma_recv_not_live(struct nvmet_rdma_queue *queue, in nvmet_rdma_recv_not_live() argument
1004 spin_lock_irqsave(&queue->state_lock, flags); in nvmet_rdma_recv_not_live()
1009 if (queue->state == NVMET_RDMA_Q_LIVE) in nvmet_rdma_recv_not_live()
1011 else if (queue->state == NVMET_RDMA_Q_CONNECTING) in nvmet_rdma_recv_not_live()
1012 list_add_tail(&rsp->wait_list, &queue->rsp_wait_list); in nvmet_rdma_recv_not_live()
1015 spin_unlock_irqrestore(&queue->state_lock, flags); in nvmet_rdma_recv_not_live()
1023 struct nvmet_rdma_queue *queue = wc->qp->qp_context; in nvmet_rdma_recv_done() local
1031 nvmet_rdma_error_comp(queue); in nvmet_rdma_recv_done()
1038 nvmet_rdma_error_comp(queue); in nvmet_rdma_recv_done()
1042 cmd->queue = queue; in nvmet_rdma_recv_done()
1043 rsp = nvmet_rdma_get_rsp(queue); in nvmet_rdma_recv_done()
1050 nvmet_rdma_post_recv(queue->dev, cmd); in nvmet_rdma_recv_done()
1053 rsp->queue = queue; in nvmet_rdma_recv_done()
1057 rsp->req.port = queue->port; in nvmet_rdma_recv_done()
1061 if (unlikely(queue->state != NVMET_RDMA_Q_LIVE) && in nvmet_rdma_recv_done()
1062 nvmet_rdma_recv_not_live(queue, rsp)) in nvmet_rdma_recv_done()
1065 nvmet_rdma_handle_command(queue, rsp); in nvmet_rdma_recv_done()
1263 static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue) in nvmet_rdma_create_queue_ib() argument
1266 struct nvmet_rdma_device *ndev = queue->dev; in nvmet_rdma_create_queue_ib()
1272 nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size; in nvmet_rdma_create_queue_ib()
1274 queue->cq = ib_cq_pool_get(ndev->device, nr_cqe + 1, in nvmet_rdma_create_queue_ib()
1275 queue->comp_vector, IB_POLL_WORKQUEUE); in nvmet_rdma_create_queue_ib()
1276 if (IS_ERR(queue->cq)) { in nvmet_rdma_create_queue_ib()
1277 ret = PTR_ERR(queue->cq); in nvmet_rdma_create_queue_ib()
1283 qp_attr.qp_context = queue; in nvmet_rdma_create_queue_ib()
1285 qp_attr.send_cq = queue->cq; in nvmet_rdma_create_queue_ib()
1286 qp_attr.recv_cq = queue->cq; in nvmet_rdma_create_queue_ib()
1290 qp_attr.cap.max_send_wr = queue->send_queue_size + 1; in nvmet_rdma_create_queue_ib()
1291 factor = rdma_rw_mr_factor(ndev->device, queue->cm_id->port_num, in nvmet_rdma_create_queue_ib()
1293 qp_attr.cap.max_rdma_ctxs = queue->send_queue_size * factor; in nvmet_rdma_create_queue_ib()
1297 if (queue->nsrq) { in nvmet_rdma_create_queue_ib()
1298 qp_attr.srq = queue->nsrq->srq; in nvmet_rdma_create_queue_ib()
1301 qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size; in nvmet_rdma_create_queue_ib()
1305 if (queue->port->pi_enable && queue->host_qid) in nvmet_rdma_create_queue_ib()
1308 ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr); in nvmet_rdma_create_queue_ib()
1313 queue->qp = queue->cm_id->qp; in nvmet_rdma_create_queue_ib()
1315 atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr); in nvmet_rdma_create_queue_ib()
1318 __func__, queue->cq->cqe, qp_attr.cap.max_send_sge, in nvmet_rdma_create_queue_ib()
1319 qp_attr.cap.max_send_wr, queue->cm_id); in nvmet_rdma_create_queue_ib()
1321 if (!queue->nsrq) { in nvmet_rdma_create_queue_ib()
1322 for (i = 0; i < queue->recv_queue_size; i++) { in nvmet_rdma_create_queue_ib()
1323 queue->cmds[i].queue = queue; in nvmet_rdma_create_queue_ib()
1324 ret = nvmet_rdma_post_recv(ndev, &queue->cmds[i]); in nvmet_rdma_create_queue_ib()
1334 rdma_destroy_qp(queue->cm_id); in nvmet_rdma_create_queue_ib()
1336 ib_cq_pool_put(queue->cq, nr_cqe + 1); in nvmet_rdma_create_queue_ib()
1340 static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue) in nvmet_rdma_destroy_queue_ib() argument
1342 ib_drain_qp(queue->qp); in nvmet_rdma_destroy_queue_ib()
1343 if (queue->cm_id) in nvmet_rdma_destroy_queue_ib()
1344 rdma_destroy_id(queue->cm_id); in nvmet_rdma_destroy_queue_ib()
1345 ib_destroy_qp(queue->qp); in nvmet_rdma_destroy_queue_ib()
1346 ib_cq_pool_put(queue->cq, queue->recv_queue_size + 2 * in nvmet_rdma_destroy_queue_ib()
1347 queue->send_queue_size + 1); in nvmet_rdma_destroy_queue_ib()
1350 static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue) in nvmet_rdma_free_queue() argument
1352 pr_debug("freeing queue %d\n", queue->idx); in nvmet_rdma_free_queue()
1354 nvmet_sq_destroy(&queue->nvme_sq); in nvmet_rdma_free_queue()
1355 nvmet_cq_put(&queue->nvme_cq); in nvmet_rdma_free_queue()
1357 nvmet_rdma_destroy_queue_ib(queue); in nvmet_rdma_free_queue()
1358 if (!queue->nsrq) { in nvmet_rdma_free_queue()
1359 nvmet_rdma_free_cmds(queue->dev, queue->cmds, in nvmet_rdma_free_queue()
1360 queue->recv_queue_size, in nvmet_rdma_free_queue()
1361 !queue->host_qid); in nvmet_rdma_free_queue()
1363 nvmet_rdma_free_rsps(queue); in nvmet_rdma_free_queue()
1364 ida_free(&nvmet_rdma_queue_ida, queue->idx); in nvmet_rdma_free_queue()
1365 kfree(queue); in nvmet_rdma_free_queue()
1370 struct nvmet_rdma_queue *queue = in nvmet_rdma_release_queue_work() local
1372 struct nvmet_rdma_device *dev = queue->dev; in nvmet_rdma_release_queue_work()
1374 nvmet_rdma_free_queue(queue); in nvmet_rdma_release_queue_work()
1381 struct nvmet_rdma_queue *queue) in nvmet_rdma_parse_cm_connect_req() argument
1392 queue->host_qid = le16_to_cpu(req->qid); in nvmet_rdma_parse_cm_connect_req()
1398 queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1; in nvmet_rdma_parse_cm_connect_req()
1399 queue->send_queue_size = le16_to_cpu(req->hrqsize); in nvmet_rdma_parse_cm_connect_req()
1401 if (!queue->host_qid && queue->recv_queue_size > NVME_AQ_DEPTH) in nvmet_rdma_parse_cm_connect_req()
1430 struct nvmet_rdma_queue *queue; in nvmet_rdma_alloc_queue() local
1433 queue = kzalloc(sizeof(*queue), GFP_KERNEL); in nvmet_rdma_alloc_queue()
1434 if (!queue) { in nvmet_rdma_alloc_queue()
1439 nvmet_cq_init(&queue->nvme_cq); in nvmet_rdma_alloc_queue()
1440 ret = nvmet_sq_init(&queue->nvme_sq, &queue->nvme_cq); in nvmet_rdma_alloc_queue()
1446 ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn, queue); in nvmet_rdma_alloc_queue()
1454 INIT_WORK(&queue->release_work, nvmet_rdma_release_queue_work); in nvmet_rdma_alloc_queue()
1455 queue->dev = ndev; in nvmet_rdma_alloc_queue()
1456 queue->cm_id = cm_id; in nvmet_rdma_alloc_queue()
1457 queue->port = port->nport; in nvmet_rdma_alloc_queue()
1459 spin_lock_init(&queue->state_lock); in nvmet_rdma_alloc_queue()
1460 queue->state = NVMET_RDMA_Q_CONNECTING; in nvmet_rdma_alloc_queue()
1461 INIT_LIST_HEAD(&queue->rsp_wait_list); in nvmet_rdma_alloc_queue()
1462 INIT_LIST_HEAD(&queue->rsp_wr_wait_list); in nvmet_rdma_alloc_queue()
1463 spin_lock_init(&queue->rsp_wr_wait_lock); in nvmet_rdma_alloc_queue()
1464 INIT_LIST_HEAD(&queue->queue_list); in nvmet_rdma_alloc_queue()
1466 queue->idx = ida_alloc(&nvmet_rdma_queue_ida, GFP_KERNEL); in nvmet_rdma_alloc_queue()
1467 if (queue->idx < 0) { in nvmet_rdma_alloc_queue()
1476 queue->comp_vector = !queue->host_qid ? 0 : in nvmet_rdma_alloc_queue()
1477 queue->idx % ndev->device->num_comp_vectors; in nvmet_rdma_alloc_queue()
1480 ret = nvmet_rdma_alloc_rsps(queue); in nvmet_rdma_alloc_queue()
1487 queue->nsrq = ndev->srqs[queue->comp_vector % ndev->srq_count]; in nvmet_rdma_alloc_queue()
1489 queue->cmds = nvmet_rdma_alloc_cmds(ndev, in nvmet_rdma_alloc_queue()
1490 queue->recv_queue_size, in nvmet_rdma_alloc_queue()
1491 !queue->host_qid); in nvmet_rdma_alloc_queue()
1492 if (IS_ERR(queue->cmds)) { in nvmet_rdma_alloc_queue()
1498 ret = nvmet_rdma_create_queue_ib(queue); in nvmet_rdma_alloc_queue()
1506 return queue; in nvmet_rdma_alloc_queue()
1509 if (!queue->nsrq) { in nvmet_rdma_alloc_queue()
1510 nvmet_rdma_free_cmds(queue->dev, queue->cmds, in nvmet_rdma_alloc_queue()
1511 queue->recv_queue_size, in nvmet_rdma_alloc_queue()
1512 !queue->host_qid); in nvmet_rdma_alloc_queue()
1515 nvmet_rdma_free_rsps(queue); in nvmet_rdma_alloc_queue()
1517 ida_free(&nvmet_rdma_queue_ida, queue->idx); in nvmet_rdma_alloc_queue()
1519 nvmet_sq_destroy(&queue->nvme_sq); in nvmet_rdma_alloc_queue()
1521 nvmet_cq_put(&queue->nvme_cq); in nvmet_rdma_alloc_queue()
1522 kfree(queue); in nvmet_rdma_alloc_queue()
1530 struct nvmet_rdma_queue *queue = priv; in nvmet_rdma_qp_event() local
1534 rdma_notify(queue->cm_id, event->event); in nvmet_rdma_qp_event()
1538 queue); in nvmet_rdma_qp_event()
1548 struct nvmet_rdma_queue *queue, in nvmet_rdma_cm_accept() argument
1558 queue->dev->device->attrs.max_qp_init_rd_atom); in nvmet_rdma_cm_accept()
1562 priv.crqsize = cpu_to_le16(queue->recv_queue_size); in nvmet_rdma_cm_accept()
1575 struct nvmet_rdma_queue *queue; in nvmet_rdma_queue_connect() local
1584 queue = nvmet_rdma_alloc_queue(ndev, cm_id, event); in nvmet_rdma_queue_connect()
1585 if (!queue) { in nvmet_rdma_queue_connect()
1590 if (queue->host_qid == 0) { in nvmet_rdma_queue_connect()
1597 if (q->nvme_sq.ctrl == queue->nvme_sq.ctrl && in nvmet_rdma_queue_connect()
1606 ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn); in nvmet_rdma_queue_connect()
1612 queue->cm_id = NULL; in nvmet_rdma_queue_connect()
1617 list_add_tail(&queue->queue_list, &nvmet_rdma_queue_list); in nvmet_rdma_queue_connect()
1623 nvmet_rdma_free_queue(queue); in nvmet_rdma_queue_connect()
1630 static void nvmet_rdma_queue_established(struct nvmet_rdma_queue *queue) in nvmet_rdma_queue_established() argument
1634 spin_lock_irqsave(&queue->state_lock, flags); in nvmet_rdma_queue_established()
1635 if (queue->state != NVMET_RDMA_Q_CONNECTING) { in nvmet_rdma_queue_established()
1639 queue->state = NVMET_RDMA_Q_LIVE; in nvmet_rdma_queue_established()
1641 while (!list_empty(&queue->rsp_wait_list)) { in nvmet_rdma_queue_established()
1644 cmd = list_first_entry(&queue->rsp_wait_list, in nvmet_rdma_queue_established()
1648 spin_unlock_irqrestore(&queue->state_lock, flags); in nvmet_rdma_queue_established()
1649 nvmet_rdma_handle_command(queue, cmd); in nvmet_rdma_queue_established()
1650 spin_lock_irqsave(&queue->state_lock, flags); in nvmet_rdma_queue_established()
1654 spin_unlock_irqrestore(&queue->state_lock, flags); in nvmet_rdma_queue_established()
1657 static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) in __nvmet_rdma_queue_disconnect() argument
1662 pr_debug("cm_id= %p queue->state= %d\n", queue->cm_id, queue->state); in __nvmet_rdma_queue_disconnect()
1664 spin_lock_irqsave(&queue->state_lock, flags); in __nvmet_rdma_queue_disconnect()
1665 switch (queue->state) { in __nvmet_rdma_queue_disconnect()
1667 while (!list_empty(&queue->rsp_wait_list)) { in __nvmet_rdma_queue_disconnect()
1670 rsp = list_first_entry(&queue->rsp_wait_list, in __nvmet_rdma_queue_disconnect()
1678 queue->state = NVMET_RDMA_Q_DISCONNECTING; in __nvmet_rdma_queue_disconnect()
1684 spin_unlock_irqrestore(&queue->state_lock, flags); in __nvmet_rdma_queue_disconnect()
1687 rdma_disconnect(queue->cm_id); in __nvmet_rdma_queue_disconnect()
1688 queue_work(nvmet_wq, &queue->release_work); in __nvmet_rdma_queue_disconnect()
1692 static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) in nvmet_rdma_queue_disconnect() argument
1697 if (!list_empty(&queue->queue_list)) { in nvmet_rdma_queue_disconnect()
1698 list_del_init(&queue->queue_list); in nvmet_rdma_queue_disconnect()
1704 __nvmet_rdma_queue_disconnect(queue); in nvmet_rdma_queue_disconnect()
1708 struct nvmet_rdma_queue *queue) in nvmet_rdma_queue_connect_fail() argument
1710 WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING); in nvmet_rdma_queue_connect_fail()
1713 if (!list_empty(&queue->queue_list)) in nvmet_rdma_queue_connect_fail()
1714 list_del_init(&queue->queue_list); in nvmet_rdma_queue_connect_fail()
1717 pr_err("failed to connect queue %d\n", queue->idx); in nvmet_rdma_queue_connect_fail()
1718 queue_work(nvmet_wq, &queue->release_work); in nvmet_rdma_queue_connect_fail()
1737 struct nvmet_rdma_queue *queue) in nvmet_rdma_device_removal() argument
1741 if (queue) { in nvmet_rdma_device_removal()
1771 struct nvmet_rdma_queue *queue = NULL; in nvmet_rdma_cm_handler() local
1775 queue = cm_id->qp->qp_context; in nvmet_rdma_cm_handler()
1786 nvmet_rdma_queue_established(queue); in nvmet_rdma_cm_handler()
1789 if (!queue) { in nvmet_rdma_cm_handler()
1798 nvmet_rdma_queue_disconnect(queue); in nvmet_rdma_cm_handler()
1801 ret = nvmet_rdma_device_removal(cm_id, queue); in nvmet_rdma_cm_handler()
1809 nvmet_rdma_queue_connect_fail(cm_id, queue); in nvmet_rdma_cm_handler()
1822 struct nvmet_rdma_queue *queue, *n; in nvmet_rdma_delete_ctrl() local
1825 list_for_each_entry_safe(queue, n, &nvmet_rdma_queue_list, queue_list) { in nvmet_rdma_delete_ctrl()
1826 if (queue->nvme_sq.ctrl != ctrl) in nvmet_rdma_delete_ctrl()
1828 list_del_init(&queue->queue_list); in nvmet_rdma_delete_ctrl()
1829 __nvmet_rdma_queue_disconnect(queue); in nvmet_rdma_delete_ctrl()
1836 struct nvmet_rdma_queue *queue, *tmp; in nvmet_rdma_destroy_port_queues() local
1840 list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list, in nvmet_rdma_destroy_port_queues()
1842 if (queue->port != nport) in nvmet_rdma_destroy_port_queues()
1845 list_del_init(&queue->queue_list); in nvmet_rdma_destroy_port_queues()
1846 __nvmet_rdma_queue_disconnect(queue); in nvmet_rdma_destroy_port_queues()
2007 struct rdma_cm_id *req_cm_id = rsp->queue->cm_id; in nvmet_rdma_disc_port_addr()
2020 struct nvmet_rdma_queue *queue = in nvmet_rdma_host_port_addr() local
2024 (struct sockaddr *)&queue->cm_id->route.addr.dst_addr); in nvmet_rdma_host_port_addr()
2058 struct nvmet_rdma_queue *queue, *tmp; in nvmet_rdma_remove_one() local
2079 list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list, in nvmet_rdma_remove_one()
2081 if (queue->dev->device != ib_device) in nvmet_rdma_remove_one()
2084 pr_info("Removing queue %d\n", queue->idx); in nvmet_rdma_remove_one()
2085 list_del_init(&queue->queue_list); in nvmet_rdma_remove_one()
2086 __nvmet_rdma_queue_disconnect(queue); in nvmet_rdma_remove_one()