Lines Matching refs:cm_id

90 	struct rdma_cm_id	*cm_id;
123 struct rdma_cm_id *cm_id;
629 struct rdma_cm_id *cm_id = rsp->queue->cm_id;
634 ret = rdma_rw_ctx_signature_init(&rsp->rw, cm_id->qp,
635 cm_id->port_num, req->sg, req->sg_cnt,
639 ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
648 struct rdma_cm_id *cm_id = rsp->queue->cm_id;
652 rdma_rw_ctx_destroy_signature(&rsp->rw, cm_id->qp,
653 cm_id->port_num, req->sg, req->sg_cnt,
657 rdma_rw_ctx_destroy(&rsp->rw, cm_id->qp, cm_id->port_num,
713 struct rdma_cm_id *cm_id = rsp->queue->cm_id;
725 first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp,
726 cm_id->port_num, &rsp->write_cqe, NULL);
728 first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp,
729 cm_id->port_num, NULL, &rsp->send_wr);
740 if (unlikely(ib_post_send(cm_id->qp, first_wr, NULL))) {
784 struct rdma_cm_id *cm_id = rsp->queue->cm_id;
816 if (unlikely(ib_post_send(cm_id->qp, &rsp->send_wr, NULL))) {
958 queue->cm_id->port_num, &rsp->read_cqe, NULL))
1195 nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
1197 struct nvmet_rdma_port *port = cm_id->context;
1206 if (ndev->device->node_guid == cm_id->device->node_guid &&
1216 inline_sge_count = max(cm_id->device->attrs.max_sge_rd,
1217 cm_id->device->attrs.max_recv_sge) - 1;
1220 nport->inline_data_size, cm_id->device->name,
1228 if (nport->pi_enable && !(cm_id->device->attrs.kernel_cap_flags &
1231 cm_id->device->name);
1235 ndev->device = cm_id->device;
1291 factor = rdma_rw_mr_factor(ndev->device, queue->cm_id->port_num,
1308 ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr);
1313 queue->qp = queue->cm_id->qp;
1317 pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n",
1319 qp_attr.cap.max_send_wr, queue->cm_id);
1334 rdma_destroy_qp(queue->cm_id);
1343 if (queue->cm_id)
1344 rdma_destroy_id(queue->cm_id);
1409 static int nvmet_rdma_cm_reject(struct rdma_cm_id *cm_id,
1420 return rdma_reject(cm_id, (void *)&rej, sizeof(rej),
1426 struct rdma_cm_id *cm_id,
1429 struct nvmet_rdma_port *port = cm_id->context;
1456 queue->cm_id = cm_id;
1524 nvmet_rdma_cm_reject(cm_id, ret);
1534 rdma_notify(queue->cm_id, event->event);
1547 static int nvmet_rdma_cm_accept(struct rdma_cm_id *cm_id,
1564 ret = rdma_accept(cm_id, &param);
1571 static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
1578 ndev = nvmet_rdma_find_get_device(cm_id);
1580 nvmet_rdma_cm_reject(cm_id, NVME_RDMA_CM_NO_RSC);
1584 queue = nvmet_rdma_alloc_queue(ndev, cm_id, event);
1606 ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
1609 * Don't destroy the cm_id in free path, as we implicitly
1610 * destroy the cm_id here with non-zero ret code.
1612 queue->cm_id = NULL;
1662 pr_debug("cm_id= %p queue->state= %d\n", queue->cm_id, queue->state);
1687 rdma_disconnect(queue->cm_id);
1707 static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
1723 * @cm_id: rdma_cm id, used for nvmet port
1728 * queue cm_id and/or a device bound listener cm_id (where in this
1733 * we nullify the priv to prevent double cm_id destruction and destroying
1734 * the cm_id implicitely by returning a non-zero rc to the callout.
1736 static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
1743 * This is a queue cm_id. we have registered
1750 port = cm_id->context;
1753 * This is a listener cm_id. Make sure that
1755 * cm_id destroy. use atomic xchg to make sure
1758 if (xchg(&port->cm_id, NULL) != cm_id)
1768 static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
1774 if (cm_id->qp)
1775 queue = cm_id->qp->qp_context;
1779 event->status, cm_id);
1783 ret = nvmet_rdma_queue_connect(cm_id, event);
1790 struct nvmet_rdma_port *port = cm_id->context;
1801 ret = nvmet_rdma_device_removal(cm_id, queue);
1805 rdma_reject_msg(cm_id, event->status));
1809 nvmet_rdma_queue_connect_fail(cm_id, queue);
1853 struct rdma_cm_id *cm_id = xchg(&port->cm_id, NULL);
1855 if (cm_id)
1856 rdma_destroy_id(cm_id);
1869 struct rdma_cm_id *cm_id;
1872 cm_id = rdma_create_id(&init_net, nvmet_rdma_cm_handler, port,
1874 if (IS_ERR(cm_id)) {
1876 return PTR_ERR(cm_id);
1883 ret = rdma_set_afonly(cm_id, 1);
1889 ret = rdma_bind_addr(cm_id, addr);
1895 ret = rdma_listen(cm_id, NVMET_RDMA_BACKLOG);
1901 port->cm_id = cm_id;
1905 rdma_destroy_id(cm_id);
2002 struct rdma_cm_id *cm_id = port->cm_id;
2004 if (inet_addr_is_any(&cm_id->route.addr.src_addr)) {
2007 struct rdma_cm_id *req_cm_id = rsp->queue->cm_id;
2024 (struct sockaddr *)&queue->cm_id->route.addr.dst_addr);