Home
last modified time | relevance | path

Searched refs:send_cq (Results 1 – 25 of 39) sorted by relevance

12

/linux/drivers/infiniband/ulp/ipoib/
H A Dipoib_verbs.c187 priv->send_cq = ib_create_cq(priv->ca, ipoib_ib_tx_completion, NULL, in ipoib_transport_dev_init()
189 if (IS_ERR(priv->send_cq)) { in ipoib_transport_dev_init()
197 init_attr.send_cq = priv->send_cq; in ipoib_transport_dev_init()
218 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP)) in ipoib_transport_dev_init()
244 ib_destroy_cq(priv->send_cq); in ipoib_transport_dev_init()
266 ib_destroy_cq(priv->send_cq); in ipoib_transport_dev_cleanup()
H A Dipoib_ib.c440 n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc); in poll_tx()
508 n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc); in ipoib_tx_poll()
520 if (unlikely(ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP | in ipoib_tx_poll()
674 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP | in ipoib_send()
H A Dipoib_cm.c255 .send_cq = priv->recv_cq, /* For drain WR */ in ipoib_cm_create_rx_qp()
770 rc = ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP | in ipoib_cm_send()
1061 .send_cq = priv->send_cq, in ipoib_cm_create_tx_qp()
/linux/drivers/infiniband/hw/mthca/
H A Dmthca_qp.c735 qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn); in __mthca_modify_qp()
837 if (qp->ibqp.send_cq != qp->ibqp.recv_cq) in __mthca_modify_qp()
838 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, NULL); in __mthca_modify_qp()
1164 struct mthca_cq *send_cq, in mthca_alloc_qp_common() argument
1293 struct mthca_cq *send_cq, in mthca_alloc_qp() argument
1321 err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, in mthca_alloc_qp()
1336 static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) in mthca_lock_cqs() argument
1337 __acquires(&send_cq->lock) __acquires(&recv_cq->lock) in mthca_lock_cqs()
1339 if (send_cq == recv_cq) { in mthca_lock_cqs()
1340 spin_lock_irq(&send_cq->lock); in mthca_lock_cqs()
[all …]
H A Dmthca_dev.h535 struct mthca_cq *send_cq,
544 struct mthca_cq *send_cq,
H A Dmthca_provider.c504 to_mcq(init_attr->send_cq), in mthca_create_qp()
529 to_mcq(init_attr->send_cq), in mthca_create_qp()
/linux/include/rdma/
H A Drdmavt_qp.h808 struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.send_cq); in rvt_send_cq()
923 static inline u32 ib_cq_tail(struct ib_cq *send_cq) in ib_cq_tail() argument
925 struct rvt_cq *cq = ibcq_to_rvtcq(send_cq); in ib_cq_tail()
927 return ibcq_to_rvtcq(send_cq)->ip ? in ib_cq_tail()
929 ibcq_to_rvtcq(send_cq)->kqueue->tail; in ib_cq_tail()
939 static inline u32 ib_cq_head(struct ib_cq *send_cq) in ib_cq_head() argument
941 struct rvt_cq *cq = ibcq_to_rvtcq(send_cq); in ib_cq_head()
943 return ibcq_to_rvtcq(send_cq)->ip ? in ib_cq_head()
945 ibcq_to_rvtcq(send_cq)->kqueue->head; in ib_cq_head()
/linux/net/sunrpc/xprtrdma/
H A Dverbs.c350 if (ep->re_attr.send_cq) in rpcrdma_ep_destroy()
351 ib_free_cq(ep->re_attr.send_cq); in rpcrdma_ep_destroy()
352 ep->re_attr.send_cq = NULL; in rpcrdma_ep_destroy()
421 ep->re_attr.send_cq = ib_alloc_cq_any(device, r_xprt, in rpcrdma_ep_create()
424 if (IS_ERR(ep->re_attr.send_cq)) { in rpcrdma_ep_create()
425 rc = PTR_ERR(ep->re_attr.send_cq); in rpcrdma_ep_create()
426 ep->re_attr.send_cq = NULL; in rpcrdma_ep_create()
624 sc->sc_cid.ci_queue_id = ep->re_attr.send_cq->res.id; in rpcrdma_sendctx_create()
H A Dsvc_rdma_transport.c509 qp_attr.send_cq = newxprt->sc_sq_cq; in svc_rdma_accept()
H A Dfrwr_ops.c53 cid->ci_queue_id = ep->re_attr.send_cq->res.id; in frwr_cid_init()
/linux/Documentation/translations/zh_CN/infiniband/
H A Dtag_matching.rst39 1. Eager协议--当发送方处理完发送时,完整的信息就会被发送。在send_cq中会收到
/linux/drivers/infiniband/hw/ionic/
H A Dionic_controlpath.c1322 struct ionic_cq *send_cq, in ionic_create_qp_cmd() argument
1355 wr.wqe.cmd.create_qp.sq_cq_id = cpu_to_le32(send_cq->cqid); in ionic_create_qp_cmd()
2186 udma_mask &= to_ionic_vcq(attr->send_cq)->udma_mask; in ionic_create_qp()
2238 to_ionic_vcq_cq(attr->send_cq, qp->udma_idx), in ionic_create_qp()
2342 cq = to_ionic_vcq_cq(attr->send_cq, qp->udma_idx); in ionic_create_qp()
2402 if (qp->ibqp.send_cq) in ionic_notify_qp_cqs()
2403 ionic_notify_flush_cq(to_ionic_vcq_cq(qp->ibqp.send_cq, in ionic_notify_qp_cqs()
2405 if (qp->ibqp.recv_cq && qp->ibqp.recv_cq != qp->ibqp.send_cq) in ionic_notify_qp_cqs()
2415 if (qp->ibqp.send_cq) { in ionic_flush_qp()
2416 cq = to_ionic_vcq_cq(qp->ibqp.send_cq, qp->udma_idx); in ionic_flush_qp()
[all …]
/linux/drivers/net/ethernet/ibm/ehea/
H A Dehea_main.c199 arr[i++].fwh = pr->send_cq->fw_handle; in ehea_update_firmware_handles()
804 struct ehea_cq *send_cq = pr->send_cq; in ehea_proc_cqes() local
813 cqe = ehea_poll_cq(send_cq); in ehea_proc_cqes()
815 ehea_inc_cq(send_cq); in ehea_proc_cqes()
855 cqe = ehea_poll_cq(send_cq); in ehea_proc_cqes()
858 ehea_update_feca(send_cq, cqe_counter); in ehea_proc_cqes()
893 ehea_reset_cq_ep(pr->send_cq); in ehea_poll()
895 ehea_reset_cq_n1(pr->send_cq); in ehea_poll()
898 cqe_skb = ehea_poll_cq(pr->send_cq); in ehea_poll()
1477 pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq, in ehea_init_port_res()
[all …]
H A Dehea.h351 struct ehea_cq *send_cq; member
/linux/drivers/infiniband/hw/mlx5/
H A Dgsi.c50 struct ib_cq *gsi_cq = mqp->ibqp.send_cq; in generate_completions()
141 hw_init_attr.send_cq = gsi->cq; in mlx5_ib_create_gsi()
206 .send_cq = gsi->cq, in create_gsi_ud_qp()
/linux/drivers/infiniband/sw/rxe/
H A Drxe_qp.c123 if (!init->recv_cq || !init->send_cq) { in rxe_qp_chk_init()
405 struct rxe_cq *scq = to_rcq(init->send_cq); in rxe_qp_from_init()
466 init->send_cq = qp->ibqp.send_cq; in rxe_qp_to_init()
/linux/drivers/infiniband/ulp/srp/
H A Dib_srp.c518 ib_process_cq_direct(ch->send_cq, -1); in srp_destroy_qp()
531 struct ib_cq *recv_cq, *send_cq; in srp_create_ch_ib() local
549 send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size, in srp_create_ch_ib()
551 if (IS_ERR(send_cq)) { in srp_create_ch_ib()
552 ret = PTR_ERR(send_cq); in srp_create_ch_ib()
563 init_attr->send_cq = send_cq; in srp_create_ch_ib()
601 if (ch->send_cq) in srp_create_ch_ib()
602 ib_free_cq(ch->send_cq); in srp_create_ch_ib()
606 ch->send_cq = send_cq; in srp_create_ch_ib()
624 ib_free_cq(send_cq); in srp_create_ch_ib()
[all …]
H A Dib_srp.h157 struct ib_cq *send_cq; member
/linux/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/
H A Dsend.c603 struct mlx5hws_send_ring_cq *cq = &send_ring->send_cq; in hws_send_engine_poll_cq()
990 hws_send_ring_close_cq(&queue->send_ring.send_cq); in hws_send_ring_close()
1000 err = hws_send_ring_open_cq(ctx->mdev, queue, numa_node, &ring->send_cq); in mlx5hws_send_ring_open()
1005 &ring->send_cq); in mlx5hws_send_ring_open()
1012 hws_send_ring_close_cq(&ring->send_cq); in mlx5hws_send_ring_open()
H A Ddebug.c245 cq = &send_ring->send_cq; in hws_debug_dump_context_send_engine()
/linux/drivers/infiniband/hw/hns/
H A Dhns_roce_hw_v2.c720 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { in hns_roce_v2_post_send()
1008 struct ib_cq *cq = ibqp->send_cq; in hns_roce_v2_drain_sq()
2964 free_mr->rsv_qp[i]->ibqp.send_cq = cq; in free_mr_init_qp()
3022 qp_init_attr.send_cq = cq; in free_mr_alloc_res()
4620 hr_reg_write(context, QPC_TX_CQN, get_cqn(ibqp->send_cq)); in modify_qp_reset_to_init()
4648 hr_reg_write(context, QPC_TX_CQN, get_cqn(ibqp->send_cq)); in modify_qp_init_to_init()
5570 if (ibqp->send_cq) in clear_qp()
5571 hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq), in clear_qp()
5574 if (ibqp->recv_cq && ibqp->recv_cq != ibqp->send_cq) in clear_qp()
5882 qp_init_attr->send_cq = ibqp->send_cq; in hns_roce_v2_query_qp()
[all …]
/linux/Documentation/infiniband/
H A Dtag_matching.rst32 processed by the sender. A completion send is received in the send_cq
/linux/drivers/infiniband/hw/qedr/
H A Dqedr_roce_cm.c62 dev->gsi_sqcq = get_qedr_cq(attrs->send_cq); in qedr_store_gsi_qp_cq()
139 cq = get_qedr_cq(attrs->send_cq); in qedr_destroy_gsi_cq()
/linux/net/9p/
H A Dtrans_rdma.c605 qp_attr.send_cq = rdma->cq; in rdma_create_trans()
/linux/drivers/infiniband/ulp/rtrs/
H A Drtrs.c270 init_attr.send_cq = con->cq; in create_qp()

12