/linux/drivers/infiniband/core/ |
H A D | uverbs_std_types_qp.c | 95 struct ib_cq *send_cq = NULL; in UVERBS_HANDLER() local 168 send_cq = uverbs_attr_get_obj(attrs, in UVERBS_HANDLER() 170 if (IS_ERR(send_cq)) in UVERBS_HANDLER() 171 return PTR_ERR(send_cq); in UVERBS_HANDLER() 175 send_cq = uverbs_attr_get_obj(attrs, in UVERBS_HANDLER() 177 if (IS_ERR(send_cq)) in UVERBS_HANDLER() 178 return PTR_ERR(send_cq); in UVERBS_HANDLER() 234 attr.send_cq = send_cq; in UVERBS_HANDLER()
|
H A D | verbs.c | 1190 qp->send_cq = qp->recv_cq = NULL; in create_xrc_qp_user() 1244 qp->send_cq = attr->send_cq; in create_qp() 1258 qp->send_cq = attr->send_cq; in create_qp() 1318 if (qp->send_cq) in ib_qp_usecnt_inc() 1319 atomic_inc(&qp->send_cq->usecnt); in ib_qp_usecnt_inc() 1337 if (qp->send_cq) in ib_qp_usecnt_dec() 1338 atomic_dec(&qp->send_cq->usecnt); in ib_qp_usecnt_dec() 2831 struct ib_cq *cq = qp->send_cq; in __ib_drain_sq() 2990 trace_cq_drain_complete(qp->send_cq); in ib_drain_sq()
|
/linux/drivers/infiniband/ulp/ipoib/ |
H A D | ipoib_verbs.c | 187 priv->send_cq = ib_create_cq(priv->ca, ipoib_ib_tx_completion, NULL, in ipoib_transport_dev_init() 189 if (IS_ERR(priv->send_cq)) { in ipoib_transport_dev_init() 197 init_attr.send_cq = priv->send_cq; in ipoib_transport_dev_init() 218 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP)) in ipoib_transport_dev_init() 244 ib_destroy_cq(priv->send_cq); in ipoib_transport_dev_init() 266 ib_destroy_cq(priv->send_cq); in ipoib_transport_dev_cleanup()
|
H A D | ipoib_ib.c | 439 n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc); in poll_tx() 507 n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc); in ipoib_tx_poll() 519 if (unlikely(ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP | in ipoib_tx_poll() 673 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP | in ipoib_send()
|
H A D | ipoib_cm.c | 255 .send_cq = priv->recv_cq, /* For drain WR */ in ipoib_cm_create_rx_qp() 770 rc = ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP | in ipoib_cm_send() 1061 .send_cq = priv->send_cq, in ipoib_cm_create_tx_qp()
|
/linux/drivers/infiniband/hw/mthca/ |
H A D | mthca_qp.c | 735 qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn); in __mthca_modify_qp() 837 if (qp->ibqp.send_cq != qp->ibqp.recv_cq) in __mthca_modify_qp() 838 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, NULL); in __mthca_modify_qp() 1164 struct mthca_cq *send_cq, in mthca_alloc_qp_common() argument 1293 struct mthca_cq *send_cq, in mthca_alloc_qp() argument 1321 err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, in mthca_alloc_qp() 1336 static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) in mthca_lock_cqs() argument 1337 __acquires(&send_cq->lock) __acquires(&recv_cq->lock) in mthca_lock_cqs() 1339 if (send_cq == recv_cq) { in mthca_lock_cqs() 1340 spin_lock_irq(&send_cq->lock); in mthca_lock_cqs() [all …]
|
H A D | mthca_dev.h | 535 struct mthca_cq *send_cq, 544 struct mthca_cq *send_cq,
|
/linux/include/rdma/ |
H A D | rdmavt_qp.h | 806 struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.send_cq); in rvt_send_cq() 916 * @send_cq - The cq for send 921 static inline u32 ib_cq_tail(struct ib_cq *send_cq) in ib_cq_tail() 923 struct rvt_cq *cq = ibcq_to_rvtcq(send_cq); in ib_cq_tail() 925 return ibcq_to_rvtcq(send_cq)->ip ? in ib_cq_tail() 927 ibcq_to_rvtcq(send_cq)->kqueue->tail; in ib_cq_tail() 932 * @send_cq - The cq for send 937 static inline u32 ib_cq_head(struct ib_cq *send_cq) in ib_cq_head() 939 struct rvt_cq *cq = ibcq_to_rvtcq(send_cq); in ib_cq_head() 941 return ibcq_to_rvtcq(send_cq) in ib_cq_head() 920 ib_cq_tail(struct ib_cq * send_cq) ib_cq_tail() argument 936 ib_cq_head(struct ib_cq * send_cq) ib_cq_head() argument [all...] |
/linux/drivers/infiniband/hw/mana/ |
H A D | qp.c | 270 struct mana_ib_cq *send_cq = in mana_ib_create_qp_raw() local 271 container_of(attr->send_cq, struct mana_ib_cq, ibcq); in mana_ib_create_qp_raw() 340 cq_spec.gdma_region = send_cq->queue.gdma_region; in mana_ib_create_qp_raw() 341 cq_spec.queue_size = send_cq->cqe * COMP_ENTRY_SIZE; in mana_ib_create_qp_raw() 343 eq_vec = send_cq->comp_vector; in mana_ib_create_qp_raw() 358 send_cq->queue.gdma_region = GDMA_INVALID_DMA_REGION; in mana_ib_create_qp_raw() 361 send_cq->queue.id = cq_spec.queue_index; in mana_ib_create_qp_raw() 364 err = mana_ib_install_cq_cb(mdev, send_cq); in mana_ib_create_qp_raw() 370 qp->qp_handle, qp->raw_sq.id, send_cq->queue.id); in mana_ib_create_qp_raw() 373 resp.cqid = send_cq->queue.id; in mana_ib_create_qp_raw() [all …]
|
H A D | main.c | 935 struct mana_ib_cq *send_cq = container_of(qp->ibqp.send_cq, struct mana_ib_cq, ibcq); in mana_ib_gd_create_rc_qp() local 947 req.send_cq_handle = send_cq->cq_handle; in mana_ib_gd_create_rc_qp()
|
/linux/drivers/infiniband/hw/mlx4/ |
H A D | qp.c | 52 static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, 54 static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, 762 if (init_attr->send_cq || init_attr->cap.max_send_wr) { in _mlx4_ib_create_qp_rss() 960 mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq), in create_rq() 969 mcq = to_mcq(init_attr->send_cq); in create_rq() 973 mlx4_ib_unlock_cqs(to_mcq(init_attr->send_cq), in create_rq() 1234 mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq), in create_qp_common() 1243 mcq = to_mcq(init_attr->send_cq); in create_qp_common() 1247 mlx4_ib_unlock_cqs(to_mcq(init_attr->send_cq), in create_qp_common() 1302 static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq) in mlx4_ib_lock_cqs() argument [all …]
|
/linux/net/sunrpc/xprtrdma/ |
H A D | verbs.c | 350 if (ep->re_attr.send_cq) in rpcrdma_ep_destroy() 351 ib_free_cq(ep->re_attr.send_cq); in rpcrdma_ep_destroy() 352 ep->re_attr.send_cq = NULL; in rpcrdma_ep_destroy() 421 ep->re_attr.send_cq = ib_alloc_cq_any(device, r_xprt, in rpcrdma_ep_create() 424 if (IS_ERR(ep->re_attr.send_cq)) { in rpcrdma_ep_create() 425 rc = PTR_ERR(ep->re_attr.send_cq); in rpcrdma_ep_create() 426 ep->re_attr.send_cq = NULL; in rpcrdma_ep_create() 624 sc->sc_cid.ci_queue_id = ep->re_attr.send_cq->res.id; in rpcrdma_sendctx_create()
|
/linux/Documentation/translations/zh_CN/infiniband/ |
H A D | tag_matching.rst | 39 1. Eager协议--当发送方处理完发送时,完整的信息就会被发送。在send_cq中会收到
|
/linux/fs/smb/server/ |
H A D | transport_rdma.c | 104 struct ib_cq *send_cq; member 446 if (t->send_cq) in free_transport() 447 ib_free_cq(t->send_cq); in free_transport() 1884 t->send_cq = ib_alloc_cq(t->cm_id->device, t, in smb_direct_create_qpair() 1887 if (IS_ERR(t->send_cq)) { in smb_direct_create_qpair() 1889 ret = PTR_ERR(t->send_cq); in smb_direct_create_qpair() 1890 t->send_cq = NULL; in smb_direct_create_qpair() 1909 qp_attr.send_cq = t->send_cq; in smb_direct_create_qpair() 1944 if (t->send_cq) { in smb_direct_create_qpair() 1945 ib_destroy_cq(t->send_cq); in smb_direct_create_qpair() [all …]
|
/linux/drivers/net/ethernet/ibm/ehea/ |
H A D | ehea_main.c | 199 arr[i++].fwh = pr->send_cq->fw_handle; in ehea_update_firmware_handles() 804 struct ehea_cq *send_cq = pr->send_cq; in ehea_proc_cqes() local 813 cqe = ehea_poll_cq(send_cq); in ehea_proc_cqes() 815 ehea_inc_cq(send_cq); in ehea_proc_cqes() 855 cqe = ehea_poll_cq(send_cq); in ehea_proc_cqes() 858 ehea_update_feca(send_cq, cqe_counter); in ehea_proc_cqes() 893 ehea_reset_cq_ep(pr->send_cq); in ehea_poll() 895 ehea_reset_cq_n1(pr->send_cq); in ehea_poll() 898 cqe_skb = ehea_poll_cq(pr->send_cq); in ehea_poll() 1477 pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq, in ehea_init_port_res() [all …]
|
H A D | ehea.h | 351 struct ehea_cq *send_cq; member
|
/linux/drivers/infiniband/hw/mlx5/ |
H A D | gsi.c | 50 struct ib_cq *gsi_cq = mqp->ibqp.send_cq; in generate_completions() 141 hw_init_attr.send_cq = gsi->cq; in mlx5_ib_create_gsi() 205 .send_cq = gsi->cq, in create_gsi_ud_qp()
|
/linux/drivers/infiniband/hw/vmw_pvrdma/ |
H A D | pvrdma_qp.c | 58 static inline void get_cqs(struct pvrdma_qp *qp, struct pvrdma_cq **send_cq, in get_cqs() argument 61 *send_cq = to_vcq(qp->ibqp.send_cq); in get_cqs() 359 cmd->send_cq_handle = to_vcq(init_attr->send_cq)->cq_handle; in pvrdma_create_qp() 1034 init_attr->send_cq = qp->ibqp.send_cq; in pvrdma_query_qp()
|
/linux/fs/smb/client/ |
H A D | smbdirect.c | 1358 ib_free_cq(info->send_cq); in smbd_destroy() 1547 info->send_cq = NULL; in _smbd_get_connection() 1549 info->send_cq = in _smbd_get_connection() 1552 if (IS_ERR(info->send_cq)) { in _smbd_get_connection() 1553 info->send_cq = NULL; in _smbd_get_connection() 1575 qp_attr.send_cq = info->send_cq; in _smbd_get_connection() 1690 if (info->send_cq) in _smbd_get_connection() 1691 ib_free_cq(info->send_cq); in _smbd_get_connection()
|
/linux/drivers/infiniband/ulp/srp/ |
H A D | ib_srp.c | 516 ib_process_cq_direct(ch->send_cq, -1); in srp_destroy_qp() 529 struct ib_cq *recv_cq, *send_cq; in srp_create_ch_ib() local 547 send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size, in srp_create_ch_ib() 549 if (IS_ERR(send_cq)) { in srp_create_ch_ib() 550 ret = PTR_ERR(send_cq); in srp_create_ch_ib() 561 init_attr->send_cq = send_cq; in srp_create_ch_ib() 599 if (ch->send_cq) in srp_create_ch_ib() 600 ib_free_cq(ch->send_cq); in srp_create_ch_ib() 604 ch->send_cq = send_cq; in srp_create_ch_ib() 622 ib_free_cq(send_cq); in srp_create_ch_ib() [all …]
|
H A D | ib_srp.h | 157 struct ib_cq *send_cq; member
|
/linux/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/ |
H A D | send.c | 479 struct mlx5hws_send_ring_cq *cq = &send_ring->send_cq; in hws_send_engine_poll_cq() 873 hws_send_ring_close_cq(&queue->send_ring.send_cq); in hws_send_ring_close() 883 err = hws_send_ring_open_cq(ctx->mdev, queue, numa_node, &ring->send_cq); in mlx5hws_send_ring_open() 888 &ring->send_cq); in mlx5hws_send_ring_open() 895 hws_send_ring_close_cq(&ring->send_cq); in mlx5hws_send_ring_open()
|
/linux/drivers/infiniband/hw/hns/ |
H A D | hns_roce_hw_v2.c | 714 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { in hns_roce_v2_post_send() 2663 free_mr->rsv_qp[i]->ibqp.send_cq = cq; in free_mr_init_qp() 2721 qp_init_attr.send_cq = cq; in free_mr_alloc_res() 4322 hr_reg_write(context, QPC_TX_CQN, get_cqn(ibqp->send_cq)); in modify_qp_reset_to_init() 4350 hr_reg_write(context, QPC_TX_CQN, get_cqn(ibqp->send_cq)); in modify_qp_init_to_init() 5253 if (ibqp->send_cq) in clear_qp() 5254 hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq), in clear_qp() 5257 if (ibqp->recv_cq && ibqp->recv_cq != ibqp->send_cq) in clear_qp() 5558 qp_init_attr->send_cq = ibqp->send_cq; in hns_roce_v2_query_qp() 5582 struct hns_roce_cq *send_cq, *recv_cq; in hns_roce_v2_destroy_qp_common() local [all …]
|
/linux/drivers/infiniband/sw/siw/ |
H A D | siw_verbs.c | 362 if (!attrs->send_cq || (!attrs->recv_cq && !attrs->srq)) { in siw_create_qp() 406 qp->scq = to_siw_cq(attrs->send_cq); in siw_create_qp() 543 qp_init_attr->send_cq = base_qp->send_cq; in siw_query_qp()
|
/linux/Documentation/infiniband/ |
H A D | tag_matching.rst | 32 processed by the sender. A completion send is received in the send_cq
|