Lines Matching full:ch
303 static int srp_new_ib_cm_id(struct srp_rdma_ch *ch) in srp_new_ib_cm_id() argument
305 struct srp_target_port *target = ch->target; in srp_new_ib_cm_id()
309 srp_ib_cm_handler, ch); in srp_new_ib_cm_id()
313 if (ch->ib_cm.cm_id) in srp_new_ib_cm_id()
314 ib_destroy_cm_id(ch->ib_cm.cm_id); in srp_new_ib_cm_id()
315 ch->ib_cm.cm_id = new_cm_id; in srp_new_ib_cm_id()
318 ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_OPA; in srp_new_ib_cm_id()
320 ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_IB; in srp_new_ib_cm_id()
321 ch->ib_cm.path.sgid = target->sgid; in srp_new_ib_cm_id()
322 ch->ib_cm.path.dgid = target->ib_cm.orig_dgid; in srp_new_ib_cm_id()
323 ch->ib_cm.path.pkey = target->ib_cm.pkey; in srp_new_ib_cm_id()
324 ch->ib_cm.path.service_id = target->ib_cm.service_id; in srp_new_ib_cm_id()
329 static int srp_new_rdma_cm_id(struct srp_rdma_ch *ch) in srp_new_rdma_cm_id() argument
331 struct srp_target_port *target = ch->target; in srp_new_rdma_cm_id()
335 new_cm_id = rdma_create_id(target->net, srp_rdma_cm_handler, ch, in srp_new_rdma_cm_id()
343 init_completion(&ch->done); in srp_new_rdma_cm_id()
353 ret = wait_for_completion_interruptible(&ch->done); in srp_new_rdma_cm_id()
357 ret = ch->status; in srp_new_rdma_cm_id()
364 swap(ch->rdma_cm.cm_id, new_cm_id); in srp_new_rdma_cm_id()
373 static int srp_new_cm_id(struct srp_rdma_ch *ch) in srp_new_cm_id() argument
375 struct srp_target_port *target = ch->target; in srp_new_cm_id()
377 return target->using_rdma_cm ? srp_new_rdma_cm_id(ch) : in srp_new_cm_id()
378 srp_new_ib_cm_id(ch); in srp_new_cm_id()
507 * @ch: SRP RDMA channel.
513 static void srp_destroy_qp(struct srp_rdma_ch *ch) in srp_destroy_qp() argument
515 spin_lock_irq(&ch->lock); in srp_destroy_qp()
516 ib_process_cq_direct(ch->send_cq, -1); in srp_destroy_qp()
517 spin_unlock_irq(&ch->lock); in srp_destroy_qp()
519 ib_drain_qp(ch->qp); in srp_destroy_qp()
520 ib_destroy_qp(ch->qp); in srp_destroy_qp()
523 static int srp_create_ch_ib(struct srp_rdma_ch *ch) in srp_create_ch_ib() argument
525 struct srp_target_port *target = ch->target; in srp_create_ch_ib()
540 recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1, in srp_create_ch_ib()
541 ch->comp_vector, IB_POLL_SOFTIRQ); in srp_create_ch_ib()
547 send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size, in srp_create_ch_ib()
548 ch->comp_vector, IB_POLL_DIRECT); in srp_create_ch_ib()
564 ch->max_imm_sge = min(init_attr->cap.max_send_sge - 1U, 255U); in srp_create_ch_ib()
567 ret = rdma_create_qp(ch->rdma_cm.cm_id, dev->pd, init_attr); in srp_create_ch_ib()
568 qp = ch->rdma_cm.cm_id->qp; in srp_create_ch_ib()
595 if (ch->qp) in srp_create_ch_ib()
596 srp_destroy_qp(ch); in srp_create_ch_ib()
597 if (ch->recv_cq) in srp_create_ch_ib()
598 ib_free_cq(ch->recv_cq); in srp_create_ch_ib()
599 if (ch->send_cq) in srp_create_ch_ib()
600 ib_free_cq(ch->send_cq); in srp_create_ch_ib()
602 ch->qp = qp; in srp_create_ch_ib()
603 ch->recv_cq = recv_cq; in srp_create_ch_ib()
604 ch->send_cq = send_cq; in srp_create_ch_ib()
607 if (ch->fr_pool) in srp_create_ch_ib()
608 srp_destroy_fr_pool(ch->fr_pool); in srp_create_ch_ib()
609 ch->fr_pool = fr_pool; in srp_create_ch_ib()
617 rdma_destroy_qp(ch->rdma_cm.cm_id); in srp_create_ch_ib()
634 * invoked. Hence the ch->[rt]x_ring checks.
637 struct srp_rdma_ch *ch) in srp_free_ch_ib() argument
642 if (!ch->target) in srp_free_ch_ib()
646 if (ch->rdma_cm.cm_id) { in srp_free_ch_ib()
647 rdma_destroy_id(ch->rdma_cm.cm_id); in srp_free_ch_ib()
648 ch->rdma_cm.cm_id = NULL; in srp_free_ch_ib()
651 if (ch->ib_cm.cm_id) { in srp_free_ch_ib()
652 ib_destroy_cm_id(ch->ib_cm.cm_id); in srp_free_ch_ib()
653 ch->ib_cm.cm_id = NULL; in srp_free_ch_ib()
658 if (!ch->qp) in srp_free_ch_ib()
662 if (ch->fr_pool) in srp_free_ch_ib()
663 srp_destroy_fr_pool(ch->fr_pool); in srp_free_ch_ib()
666 srp_destroy_qp(ch); in srp_free_ch_ib()
667 ib_free_cq(ch->send_cq); in srp_free_ch_ib()
668 ib_free_cq(ch->recv_cq); in srp_free_ch_ib()
676 ch->target = NULL; in srp_free_ch_ib()
678 ch->qp = NULL; in srp_free_ch_ib()
679 ch->send_cq = ch->recv_cq = NULL; in srp_free_ch_ib()
681 if (ch->rx_ring) { in srp_free_ch_ib()
683 srp_free_iu(target->srp_host, ch->rx_ring[i]); in srp_free_ch_ib()
684 kfree(ch->rx_ring); in srp_free_ch_ib()
685 ch->rx_ring = NULL; in srp_free_ch_ib()
687 if (ch->tx_ring) { in srp_free_ch_ib()
689 srp_free_iu(target->srp_host, ch->tx_ring[i]); in srp_free_ch_ib()
690 kfree(ch->tx_ring); in srp_free_ch_ib()
691 ch->tx_ring = NULL; in srp_free_ch_ib()
699 struct srp_rdma_ch *ch = ch_ptr; in srp_path_rec_completion() local
700 struct srp_target_port *target = ch->target; in srp_path_rec_completion()
702 ch->status = status; in srp_path_rec_completion()
707 ch->ib_cm.path = *pathrec; in srp_path_rec_completion()
708 complete(&ch->done); in srp_path_rec_completion()
711 static int srp_ib_lookup_path(struct srp_rdma_ch *ch) in srp_ib_lookup_path() argument
713 struct srp_target_port *target = ch->target; in srp_ib_lookup_path()
716 ch->ib_cm.path.numb_path = 1; in srp_ib_lookup_path()
718 init_completion(&ch->done); in srp_ib_lookup_path()
720 ch->ib_cm.path_query_id = ib_sa_path_rec_get(&srp_sa_client, in srp_ib_lookup_path()
723 &ch->ib_cm.path, in srp_ib_lookup_path()
732 ch, &ch->ib_cm.path_query); in srp_ib_lookup_path()
733 if (ch->ib_cm.path_query_id < 0) in srp_ib_lookup_path()
734 return ch->ib_cm.path_query_id; in srp_ib_lookup_path()
736 ret = wait_for_completion_interruptible(&ch->done); in srp_ib_lookup_path()
740 if (ch->status < 0) in srp_ib_lookup_path()
743 ch->ib_cm.path.sgid.raw, ch->ib_cm.path.dgid.raw, in srp_ib_lookup_path()
747 return ch->status; in srp_ib_lookup_path()
750 static int srp_rdma_lookup_path(struct srp_rdma_ch *ch) in srp_rdma_lookup_path() argument
752 struct srp_target_port *target = ch->target; in srp_rdma_lookup_path()
755 init_completion(&ch->done); in srp_rdma_lookup_path()
757 ret = rdma_resolve_route(ch->rdma_cm.cm_id, SRP_PATH_REC_TIMEOUT_MS); in srp_rdma_lookup_path()
761 wait_for_completion_interruptible(&ch->done); in srp_rdma_lookup_path()
763 if (ch->status != 0) in srp_rdma_lookup_path()
767 return ch->status; in srp_rdma_lookup_path()
770 static int srp_lookup_path(struct srp_rdma_ch *ch) in srp_lookup_path() argument
772 struct srp_target_port *target = ch->target; in srp_lookup_path()
774 return target->using_rdma_cm ? srp_rdma_lookup_path(ch) : in srp_lookup_path()
775 srp_ib_lookup_path(ch); in srp_lookup_path()
795 static int srp_send_req(struct srp_rdma_ch *ch, uint32_t max_iu_len, in srp_send_req() argument
798 struct srp_target_port *target = ch->target; in srp_send_req()
859 req->ib_param.primary_path = &ch->ib_cm.path; in srp_send_req()
864 req->ib_param.qp_num = ch->qp->qp_num; in srp_send_req()
865 req->ib_param.qp_type = ch->qp->qp_type; in srp_send_req()
911 status = rdma_connect(ch->rdma_cm.cm_id, &req->rdma_param); in srp_send_req()
913 status = ib_send_cm_req(ch->ib_cm.cm_id, &req->ib_param); in srp_send_req()
939 struct srp_rdma_ch *ch; in srp_disconnect_target() local
945 ch = &target->ch[i]; in srp_disconnect_target()
946 ch->connected = false; in srp_disconnect_target()
949 if (ch->rdma_cm.cm_id) in srp_disconnect_target()
950 rdma_disconnect(ch->rdma_cm.cm_id); in srp_disconnect_target()
952 if (ch->ib_cm.cm_id) in srp_disconnect_target()
953 ret = ib_send_cm_dreq(ch->ib_cm.cm_id, in srp_disconnect_target()
1039 struct srp_rdma_ch *ch; in srp_remove_target() local
1052 ch = &target->ch[i]; in srp_remove_target()
1053 srp_free_ch_ib(target, ch); in srp_remove_target()
1057 kfree(target->ch); in srp_remove_target()
1058 target->ch = NULL; in srp_remove_target()
1093 c += target->ch[i].connected; in srp_connected_ch()
1098 static int srp_connect_ch(struct srp_rdma_ch *ch, uint32_t max_iu_len, in srp_connect_ch() argument
1101 struct srp_target_port *target = ch->target; in srp_connect_ch()
1106 ret = srp_lookup_path(ch); in srp_connect_ch()
1111 init_completion(&ch->done); in srp_connect_ch()
1112 ret = srp_send_req(ch, max_iu_len, multich); in srp_connect_ch()
1115 ret = wait_for_completion_interruptible(&ch->done); in srp_connect_ch()
1125 ret = ch->status; in srp_connect_ch()
1128 ch->connected = true; in srp_connect_ch()
1132 ret = srp_lookup_path(ch); in srp_connect_ch()
1160 static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch, in srp_inv_rkey() argument
1173 return ib_post_send(ch->qp, &wr, NULL); in srp_inv_rkey()
1177 struct srp_rdma_ch *ch, in srp_unmap_data() argument
1180 struct srp_target_port *target = ch->target; in srp_unmap_data()
1194 res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey); in srp_unmap_data()
1204 srp_fr_pool_put(ch->fr_pool, req->fr_list, in srp_unmap_data()
1214 * @ch: SRP RDMA channel.
1223 static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch, in srp_claim_req() argument
1230 spin_lock_irqsave(&ch->lock, flags); in srp_claim_req()
1239 spin_unlock_irqrestore(&ch->lock, flags); in srp_claim_req()
1245 * srp_free_req() - Unmap data and adjust ch->req_lim.
1246 * @ch: SRP RDMA channel.
1251 static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req, in srp_free_req() argument
1256 srp_unmap_data(scmnd, ch, req); in srp_free_req()
1258 spin_lock_irqsave(&ch->lock, flags); in srp_free_req()
1259 ch->req_lim += req_lim_delta; in srp_free_req()
1260 spin_unlock_irqrestore(&ch->lock, flags); in srp_free_req()
1263 static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req, in srp_finish_req() argument
1266 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL); in srp_finish_req()
1269 srp_free_req(ch, req, scmnd, 0); in srp_finish_req()
1285 struct srp_rdma_ch *ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)]; in srp_terminate_cmd() local
1288 srp_finish_req(ch, req, NULL, context->scsi_result); in srp_terminate_cmd()
1334 struct srp_rdma_ch *ch; in srp_rport_reconnect() local
1352 ch = &target->ch[i]; in srp_rport_reconnect()
1353 ret += srp_new_cm_id(ch); in srp_rport_reconnect()
1363 ch = &target->ch[i]; in srp_rport_reconnect()
1369 ret += srp_create_ch_ib(ch); in srp_rport_reconnect()
1371 INIT_LIST_HEAD(&ch->free_tx); in srp_rport_reconnect()
1373 list_add(&ch->tx_ring[j]->list, &ch->free_tx); in srp_rport_reconnect()
1379 ch = &target->ch[i]; in srp_rport_reconnect()
1382 ret = srp_connect_ch(ch, max_iu_len, multich); in srp_rport_reconnect()
1422 struct srp_rdma_ch *ch, int sg_nents, in srp_map_finish_fr() argument
1425 struct srp_target_port *target = ch->target; in srp_map_finish_fr()
1433 shost_printk(KERN_ERR, ch->target->scsi_host, in srp_map_finish_fr()
1435 ch->target->mr_per_cmd); in srp_map_finish_fr()
1452 desc = srp_fr_pool_get(ch->fr_pool); in srp_map_finish_fr()
1462 srp_fr_pool_put(ch->fr_pool, &desc, 1); in srp_map_finish_fr()
1490 err = ib_post_send(ch->qp, &wr.wr, NULL); in srp_map_finish_fr()
1499 static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch, in srp_map_sg_fr() argument
1506 state->fr.end = req->fr_list + ch->target->mr_per_cmd; in srp_map_sg_fr()
1515 n = srp_map_finish_fr(state, req, ch, count, &sg_offset); in srp_map_sg_fr()
1527 static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch, in srp_map_sg_dma() argument
1531 struct srp_target_port *target = ch->target; in srp_map_sg_dma()
1550 static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req, in srp_map_idb() argument
1554 struct srp_target_port *target = ch->target; in srp_map_idb()
1576 ret = srp_map_finish_fr(&state, req, ch, 1, NULL); in srp_map_idb()
1590 struct srp_rdma_ch *ch, struct srp_request *req, in srp_check_mapping() argument
1593 struct srp_device *dev = ch->target->srp_host->srp_dev; in srp_check_mapping()
1613 * @ch: SRP RDMA channel
1620 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch, in srp_map_data() argument
1623 struct srp_target_port *target = ch->target; in srp_map_data()
1660 if (ch->use_imm_data && in srp_map_data()
1661 count <= ch->max_imm_sge && in srp_map_data()
1662 SRP_IMM_DATA_OFFSET + data_len <= ch->max_it_iu_len && in srp_map_data()
1716 ret = srp_map_sg_fr(&state, ch, req, scat, count); in srp_map_data()
1718 ret = srp_map_sg_dma(&state, ch, req, scat, count); in srp_map_data()
1727 srp_check_mapping(&state, ch, req, scat, count); in srp_map_data()
1769 ret = srp_map_idb(ch, req, state.gen.next, state.gen.end, in srp_map_data()
1800 srp_unmap_data(scmnd, ch, req); in srp_map_data()
1809 static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu, in srp_put_tx_iu() argument
1814 spin_lock_irqsave(&ch->lock, flags); in srp_put_tx_iu()
1815 list_add(&iu->list, &ch->free_tx); in srp_put_tx_iu()
1817 ++ch->req_lim; in srp_put_tx_iu()
1818 spin_unlock_irqrestore(&ch->lock, flags); in srp_put_tx_iu()
1822 * Must be called with ch->lock held to protect req_lim and free_tx.
1834 static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch, in __srp_get_tx_iu() argument
1837 struct srp_target_port *target = ch->target; in __srp_get_tx_iu()
1841 lockdep_assert_held(&ch->lock); in __srp_get_tx_iu()
1843 ib_process_cq_direct(ch->send_cq, -1); in __srp_get_tx_iu()
1845 if (list_empty(&ch->free_tx)) in __srp_get_tx_iu()
1850 if (ch->req_lim <= rsv) { in __srp_get_tx_iu()
1855 --ch->req_lim; in __srp_get_tx_iu()
1858 iu = list_first_entry(&ch->free_tx, struct srp_iu, list); in __srp_get_tx_iu()
1865 * be called without ch->lock being held. If ib_drain_sq() dequeues a WQE
1871 struct srp_rdma_ch *ch = cq->cq_context; in srp_send_done() local
1878 lockdep_assert_held(&ch->lock); in srp_send_done()
1880 list_add(&iu->list, &ch->free_tx); in srp_send_done()
1885 * @ch: RDMA channel over which to send the information unit.
1889 static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len) in srp_post_send() argument
1891 struct srp_target_port *target = ch->target; in srp_post_send()
1910 return ib_post_send(ch->qp, &wr, NULL); in srp_post_send()
1913 static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu) in srp_post_recv() argument
1915 struct srp_target_port *target = ch->target; in srp_post_recv()
1930 return ib_post_recv(ch->qp, &wr, NULL); in srp_post_recv()
1933 static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp) in srp_process_rsp() argument
1935 struct srp_target_port *target = ch->target; in srp_process_rsp()
1941 spin_lock_irqsave(&ch->lock, flags); in srp_process_rsp()
1942 ch->req_lim += be32_to_cpu(rsp->req_lim_delta); in srp_process_rsp()
1943 if (rsp->tag == ch->tsk_mgmt_tag) { in srp_process_rsp()
1944 ch->tsk_mgmt_status = -1; in srp_process_rsp()
1946 ch->tsk_mgmt_status = rsp->data[3]; in srp_process_rsp()
1947 complete(&ch->tsk_mgmt_done); in srp_process_rsp()
1953 spin_unlock_irqrestore(&ch->lock, flags); in srp_process_rsp()
1958 scmnd = srp_claim_req(ch, req, NULL, scmnd); in srp_process_rsp()
1962 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n", in srp_process_rsp()
1963 rsp->tag, ch - target->ch, ch->qp->qp_num); in srp_process_rsp()
1965 spin_lock_irqsave(&ch->lock, flags); in srp_process_rsp()
1966 ch->req_lim += be32_to_cpu(rsp->req_lim_delta); in srp_process_rsp()
1967 spin_unlock_irqrestore(&ch->lock, flags); in srp_process_rsp()
1985 srp_free_req(ch, req, scmnd, in srp_process_rsp()
1992 static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta, in srp_response_common() argument
1995 struct srp_target_port *target = ch->target; in srp_response_common()
2001 spin_lock_irqsave(&ch->lock, flags); in srp_response_common()
2002 ch->req_lim += req_delta; in srp_response_common()
2003 iu = __srp_get_tx_iu(ch, SRP_IU_RSP); in srp_response_common()
2004 spin_unlock_irqrestore(&ch->lock, flags); in srp_response_common()
2017 err = srp_post_send(ch, iu, len); in srp_response_common()
2021 srp_put_tx_iu(ch, iu, SRP_IU_RSP); in srp_response_common()
2027 static void srp_process_cred_req(struct srp_rdma_ch *ch, in srp_process_cred_req() argument
2036 if (srp_response_common(ch, delta, &rsp, sizeof(rsp))) in srp_process_cred_req()
2037 shost_printk(KERN_ERR, ch->target->scsi_host, PFX in srp_process_cred_req()
2041 static void srp_process_aer_req(struct srp_rdma_ch *ch, in srp_process_aer_req() argument
2044 struct srp_target_port *target = ch->target; in srp_process_aer_req()
2054 if (srp_response_common(ch, delta, &rsp, sizeof(rsp))) in srp_process_aer_req()
2062 struct srp_rdma_ch *ch = cq->cq_context; in srp_recv_done() local
2063 struct srp_target_port *target = ch->target; in srp_recv_done()
2073 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len, in srp_recv_done()
2087 srp_process_rsp(ch, iu->buf); in srp_recv_done()
2091 srp_process_cred_req(ch, iu->buf); in srp_recv_done()
2095 srp_process_aer_req(ch, iu->buf); in srp_recv_done()
2110 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len, in srp_recv_done()
2113 res = srp_post_recv(ch, iu); in srp_recv_done()
2138 struct srp_rdma_ch *ch = cq->cq_context; in srp_handle_qp_err() local
2139 struct srp_target_port *target = ch->target; in srp_handle_qp_err()
2141 if (ch->connected && !target->qp_in_error) { in srp_handle_qp_err()
2155 struct srp_rdma_ch *ch; in srp_queuecommand() local
2170 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)]; in srp_queuecommand()
2172 spin_lock_irqsave(&ch->lock, flags); in srp_queuecommand()
2173 iu = __srp_get_tx_iu(ch, SRP_IU_CMD); in srp_queuecommand()
2174 spin_unlock_irqrestore(&ch->lock, flags); in srp_queuecommand()
2180 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_it_iu_len, in srp_queuecommand()
2200 len = srp_map_data(scmnd, ch, req); in srp_queuecommand()
2215 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_it_iu_len, in srp_queuecommand()
2218 if (srp_post_send(ch, iu, len)) { in srp_queuecommand()
2227 srp_unmap_data(scmnd, ch, req); in srp_queuecommand()
2230 srp_put_tx_iu(ch, iu, SRP_IU_CMD); in srp_queuecommand()
2253 static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch) in srp_alloc_iu_bufs() argument
2255 struct srp_target_port *target = ch->target; in srp_alloc_iu_bufs()
2258 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring), in srp_alloc_iu_bufs()
2260 if (!ch->rx_ring) in srp_alloc_iu_bufs()
2262 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring), in srp_alloc_iu_bufs()
2264 if (!ch->tx_ring) in srp_alloc_iu_bufs()
2268 ch->rx_ring[i] = srp_alloc_iu(target->srp_host, in srp_alloc_iu_bufs()
2269 ch->max_ti_iu_len, in srp_alloc_iu_bufs()
2271 if (!ch->rx_ring[i]) in srp_alloc_iu_bufs()
2276 ch->tx_ring[i] = srp_alloc_iu(target->srp_host, in srp_alloc_iu_bufs()
2277 ch->max_it_iu_len, in srp_alloc_iu_bufs()
2279 if (!ch->tx_ring[i]) in srp_alloc_iu_bufs()
2282 list_add(&ch->tx_ring[i]->list, &ch->free_tx); in srp_alloc_iu_bufs()
2289 srp_free_iu(target->srp_host, ch->rx_ring[i]); in srp_alloc_iu_bufs()
2290 srp_free_iu(target->srp_host, ch->tx_ring[i]); in srp_alloc_iu_bufs()
2295 kfree(ch->tx_ring); in srp_alloc_iu_bufs()
2296 ch->tx_ring = NULL; in srp_alloc_iu_bufs()
2297 kfree(ch->rx_ring); in srp_alloc_iu_bufs()
2298 ch->rx_ring = NULL; in srp_alloc_iu_bufs()
2332 struct srp_rdma_ch *ch) in srp_cm_rep_handler() argument
2334 struct srp_target_port *target = ch->target; in srp_cm_rep_handler()
2341 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len); in srp_cm_rep_handler()
2342 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta); in srp_cm_rep_handler()
2343 ch->use_imm_data = srp_use_imm_data && in srp_cm_rep_handler()
2345 ch->max_it_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt, in srp_cm_rep_handler()
2346 ch->use_imm_data, in srp_cm_rep_handler()
2348 WARN_ON_ONCE(ch->max_it_iu_len > in srp_cm_rep_handler()
2351 if (ch->use_imm_data) in srp_cm_rep_handler()
2360 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE, in srp_cm_rep_handler()
2372 if (!ch->rx_ring) { in srp_cm_rep_handler()
2373 ret = srp_alloc_iu_bufs(ch); in srp_cm_rep_handler()
2379 struct srp_iu *iu = ch->rx_ring[i]; in srp_cm_rep_handler()
2381 ret = srp_post_recv(ch, iu); in srp_cm_rep_handler()
2397 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask); in srp_cm_rep_handler()
2408 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask); in srp_cm_rep_handler()
2419 ch->status = ret; in srp_cm_rep_handler()
2424 struct srp_rdma_ch *ch) in srp_ib_cm_rej_handler() argument
2426 struct srp_target_port *target = ch->target; in srp_ib_cm_rej_handler()
2436 sa_path_set_dlid(&ch->ib_cm.path, dlid); in srp_ib_cm_rej_handler()
2437 ch->ib_cm.path.pkey = cpi->redirect_pkey; in srp_ib_cm_rej_handler()
2439 memcpy(ch->ib_cm.path.dgid.raw, cpi->redirect_gid, 16); in srp_ib_cm_rej_handler()
2441 ch->status = dlid ? SRP_DLID_REDIRECT : SRP_PORT_REDIRECT; in srp_ib_cm_rej_handler()
2446 union ib_gid *dgid = &ch->ib_cm.path.dgid; in srp_ib_cm_rej_handler()
2460 ch->status = SRP_PORT_REDIRECT; in srp_ib_cm_rej_handler()
2464 ch->status = -ECONNRESET; in srp_ib_cm_rej_handler()
2471 ch->status = -ECONNRESET; in srp_ib_cm_rej_handler()
2493 ch->status = -ECONNRESET; in srp_ib_cm_rej_handler()
2498 ch->status = SRP_STALE_CONN; in srp_ib_cm_rej_handler()
2504 ch->status = -ECONNRESET; in srp_ib_cm_rej_handler()
2511 struct srp_rdma_ch *ch = cm_id->context; in srp_ib_cm_handler() local
2512 struct srp_target_port *target = ch->target; in srp_ib_cm_handler()
2520 ch->status = -ECONNRESET; in srp_ib_cm_handler()
2525 srp_cm_rep_handler(cm_id, event->private_data, ch); in srp_ib_cm_handler()
2532 srp_ib_cm_rej_handler(cm_id, event, ch); in srp_ib_cm_handler()
2538 ch->connected = false; in srp_ib_cm_handler()
2550 ch->status = 0; in srp_ib_cm_handler()
2565 complete(&ch->done); in srp_ib_cm_handler()
2570 static void srp_rdma_cm_rej_handler(struct srp_rdma_ch *ch, in srp_rdma_cm_rej_handler() argument
2573 struct srp_target_port *target = ch->target; in srp_rdma_cm_rej_handler()
2581 ch->status = -ECONNRESET; in srp_rdma_cm_rej_handler()
2603 ch->status = -ECONNRESET; in srp_rdma_cm_rej_handler()
2609 ch->status = SRP_STALE_CONN; in srp_rdma_cm_rej_handler()
2615 ch->status = -ECONNRESET; in srp_rdma_cm_rej_handler()
2623 struct srp_rdma_ch *ch = cm_id->context; in srp_rdma_cm_handler() local
2624 struct srp_target_port *target = ch->target; in srp_rdma_cm_handler()
2629 ch->status = 0; in srp_rdma_cm_handler()
2634 ch->status = -ENXIO; in srp_rdma_cm_handler()
2639 ch->status = 0; in srp_rdma_cm_handler()
2645 ch->status = -EHOSTUNREACH; in srp_rdma_cm_handler()
2653 ch->status = -ECONNRESET; in srp_rdma_cm_handler()
2658 srp_cm_rep_handler(NULL, event->param.conn.private_data, ch); in srp_rdma_cm_handler()
2665 srp_rdma_cm_rej_handler(ch, event); in srp_rdma_cm_handler()
2669 if (ch->connected) { in srp_rdma_cm_handler()
2672 rdma_disconnect(ch->rdma_cm.cm_id); in srp_rdma_cm_handler()
2674 ch->status = 0; in srp_rdma_cm_handler()
2684 ch->status = 0; in srp_rdma_cm_handler()
2694 complete(&ch->done); in srp_rdma_cm_handler()
2714 static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun, in srp_send_tsk_mgmt() argument
2717 struct srp_target_port *target = ch->target; in srp_send_tsk_mgmt()
2724 if (!ch->connected || target->qp_in_error) in srp_send_tsk_mgmt()
2732 spin_lock_irq(&ch->lock); in srp_send_tsk_mgmt()
2733 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT); in srp_send_tsk_mgmt()
2734 spin_unlock_irq(&ch->lock); in srp_send_tsk_mgmt()
2754 spin_lock_irq(&ch->lock); in srp_send_tsk_mgmt()
2755 ch->tsk_mgmt_tag = (ch->tsk_mgmt_tag + 1) | SRP_TAG_TSK_MGMT; in srp_send_tsk_mgmt()
2756 tsk_mgmt->tag = ch->tsk_mgmt_tag; in srp_send_tsk_mgmt()
2757 spin_unlock_irq(&ch->lock); in srp_send_tsk_mgmt()
2759 init_completion(&ch->tsk_mgmt_done); in srp_send_tsk_mgmt()
2763 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) { in srp_send_tsk_mgmt()
2764 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT); in srp_send_tsk_mgmt()
2769 res = wait_for_completion_timeout(&ch->tsk_mgmt_done, in srp_send_tsk_mgmt()
2772 *status = ch->tsk_mgmt_status; in srp_send_tsk_mgmt()
2786 struct srp_rdma_ch *ch; in srp_abort() local
2794 ch = &target->ch[ch_idx]; in srp_abort()
2795 if (!srp_claim_req(ch, req, NULL, scmnd)) in srp_abort()
2799 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun, in srp_abort()
2801 srp_free_req(ch, req, scmnd, 0); in srp_abort()
2813 struct srp_rdma_ch *ch; in srp_reset_device() local
2818 ch = &target->ch[0]; in srp_reset_device()
2819 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun, in srp_reset_device()
2923 struct srp_rdma_ch *ch = &target->ch[0]; in dgid_show() local
2928 return sysfs_emit(buf, "%pI6\n", ch->ib_cm.path.dgid.raw); in dgid_show()
2950 struct srp_rdma_ch *ch; in req_lim_show() local
2954 ch = &target->ch[i]; in req_lim_show()
2955 req_lim = min(req_lim, ch->req_lim); in req_lim_show()
3690 struct srp_rdma_ch *ch; in add_target_store() local
3825 target->ch = kcalloc(target->ch_count, sizeof(*target->ch), in add_target_store()
3827 if (!target->ch) in add_target_store()
3831 ch = &target->ch[ch_idx]; in add_target_store()
3832 ch->target = target; in add_target_store()
3833 ch->comp_vector = ch_idx % ibdev->num_comp_vectors; in add_target_store()
3834 spin_lock_init(&ch->lock); in add_target_store()
3835 INIT_LIST_HEAD(&ch->free_tx); in add_target_store()
3836 ret = srp_new_cm_id(ch); in add_target_store()
3840 ret = srp_create_ch_ib(ch); in add_target_store()
3844 ret = srp_connect_ch(ch, max_iu_len, multich); in add_target_store()
3861 srp_free_ch_ib(target, ch); in add_target_store()
3862 target->ch_count = ch - target->ch; in add_target_store()
3920 ch = &target->ch[i]; in add_target_store()
3921 srp_free_ch_ib(target, ch); in add_target_store()
3924 kfree(target->ch); in add_target_store()