Lines Matching refs:ibqp

261 	event.element.qp  = &qp->ibqp;  in mthca_qp_event()
262 if (qp->ibqp.event_handler) in mthca_qp_event()
263 qp->ibqp.event_handler(&event, qp->ibqp.qp_context); in mthca_qp_event()
425 int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, in mthca_query_qp() argument
428 struct mthca_dev *dev = to_mdev(ibqp->device); in mthca_query_qp()
429 struct mthca_qp *qp = to_mqp(ibqp); in mthca_query_qp()
544 static int __mthca_modify_qp(struct ib_qp *ibqp, in __mthca_modify_qp() argument
550 struct mthca_dev *dev = to_mdev(ibqp->device); in __mthca_modify_qp()
551 struct mthca_qp *qp = to_mqp(ibqp); in __mthca_modify_qp()
614 if (qp->ibqp.uobject) in __mthca_modify_qp()
655 if (ibqp->qp_type == IB_QPT_RC && in __mthca_modify_qp()
657 u8 sched_queue = ibqp->uobject ? 0x2 : 0x1; in __mthca_modify_qp()
697 qp_context->pd = cpu_to_be32(to_mpd(ibqp->pd)->pd_num); in __mthca_modify_qp()
723 qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn); in __mthca_modify_qp()
747 if (ibqp->srq) in __mthca_modify_qp()
762 qp_context->cqn_rcv = cpu_to_be32(to_mcq(ibqp->recv_cq)->cqn); in __mthca_modify_qp()
772 if (ibqp->srq) in __mthca_modify_qp()
774 to_msrq(ibqp->srq)->srqn); in __mthca_modify_qp()
822 if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) { in __mthca_modify_qp()
823 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn, in __mthca_modify_qp()
824 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); in __mthca_modify_qp()
825 if (qp->ibqp.send_cq != qp->ibqp.recv_cq) in __mthca_modify_qp()
826 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, NULL); in __mthca_modify_qp()
846 int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, in mthca_modify_qp() argument
849 struct mthca_dev *dev = to_mdev(ibqp->device); in mthca_modify_qp()
850 struct mthca_qp *qp = to_mqp(ibqp); in mthca_modify_qp()
867 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) { in mthca_modify_qp()
907 err = __mthca_modify_qp(ibqp, attr, attr_mask, cur_state, new_state, in mthca_modify_qp()
1433 send_cq = to_mcq(qp->ibqp.send_cq); in mthca_free_qp()
1434 recv_cq = to_mcq(qp->ibqp.recv_cq); in mthca_free_qp()
1461 if (!qp->ibqp.uobject) { in mthca_free_qp()
1463 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); in mthca_free_qp()
1474 atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count)); in mthca_free_qp()
1501 mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) | in build_mlx_header()
1522 sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; in build_mlx_header()
1526 if (!sqp->qp.ibqp.qp_num) in build_mlx_header()
1537 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num); in build_mlx_header()
1544 data->lkey = cpu_to_be32(to_mpd(sqp->qp.ibqp.pd)->ntmr.ibmr.lkey); in build_mlx_header()
1608 int mthca_tavor_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, in mthca_tavor_post_send() argument
1611 struct mthca_dev *dev = to_mdev(ibqp->device); in mthca_tavor_post_send()
1612 struct mthca_qp *qp = to_mqp(ibqp); in mthca_tavor_post_send()
1639 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { in mthca_tavor_post_send()
1811 int mthca_tavor_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr, in mthca_tavor_post_receive() argument
1814 struct mthca_dev *dev = to_mdev(ibqp->device); in mthca_tavor_post_receive()
1815 struct mthca_qp *qp = to_mqp(ibqp); in mthca_tavor_post_receive()
1840 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { in mthca_tavor_post_receive()
1922 int mthca_arbel_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, in mthca_arbel_post_send() argument
1925 struct mthca_dev *dev = to_mdev(ibqp->device); in mthca_arbel_post_send()
1926 struct mthca_qp *qp = to_mqp(ibqp); in mthca_arbel_post_send()
1980 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { in mthca_arbel_post_send()
2162 int mthca_arbel_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr, in mthca_arbel_post_receive() argument
2165 struct mthca_dev *dev = to_mdev(ibqp->device); in mthca_arbel_post_receive()
2166 struct mthca_qp *qp = to_mqp(ibqp); in mthca_arbel_post_receive()
2181 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { in mthca_arbel_post_receive()
2242 if (qp->ibqp.srq && !is_send) { in mthca_free_err_wqe()