Lines Matching full:wr

1485 			    int ind, const struct ib_ud_wr *wr,  in build_mlx_header()  argument
1494 mthca_ah_grh_present(to_mah(wr->ah)), 0, 0, 0, in build_mlx_header()
1497 err = mthca_read_ah(dev, to_mah(wr->ah), &sqp->ud_header); in build_mlx_header()
1508 switch (wr->wr.opcode) { in build_mlx_header()
1516 sqp->ud_header.immediate_data = wr->wr.ex.imm_data; in build_mlx_header()
1525 sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED); in build_mlx_header()
1531 wr->pkey_index, &pkey); in build_mlx_header()
1533 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn); in build_mlx_header()
1535 sqp->ud_header.deth.qkey = cpu_to_be32(wr->remote_qkey & 0x80000000 ? in build_mlx_header()
1536 sqp->qkey : wr->remote_qkey); in build_mlx_header()
1578 const struct ib_atomic_wr *wr) in set_atomic_seg() argument
1580 if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) { in set_atomic_seg()
1581 aseg->swap_add = cpu_to_be64(wr->swap); in set_atomic_seg()
1582 aseg->compare = cpu_to_be64(wr->compare_add); in set_atomic_seg()
1584 aseg->swap_add = cpu_to_be64(wr->compare_add); in set_atomic_seg()
1591 const struct ib_ud_wr *wr) in set_tavor_ud_seg() argument
1593 useg->lkey = cpu_to_be32(to_mah(wr->ah)->key); in set_tavor_ud_seg()
1594 useg->av_addr = cpu_to_be64(to_mah(wr->ah)->avdma); in set_tavor_ud_seg()
1595 useg->dqpn = cpu_to_be32(wr->remote_qpn); in set_tavor_ud_seg()
1596 useg->qkey = cpu_to_be32(wr->remote_qkey); in set_tavor_ud_seg()
1601 const struct ib_ud_wr *wr) in set_arbel_ud_seg() argument
1603 memcpy(useg->av, to_mah(wr->ah)->av, MTHCA_AV_SIZE); in set_arbel_ud_seg()
1604 useg->dqpn = cpu_to_be32(wr->remote_qpn); in set_arbel_ud_seg()
1605 useg->qkey = cpu_to_be32(wr->remote_qkey); in set_arbel_ud_seg()
1608 int mthca_tavor_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, in mthca_tavor_post_send() argument
1638 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mthca_tavor_post_send()
1645 *bad_wr = wr; in mthca_tavor_post_send()
1656 ((wr->send_flags & IB_SEND_SIGNALED) ? in mthca_tavor_post_send()
1658 ((wr->send_flags & IB_SEND_SOLICITED) ? in mthca_tavor_post_send()
1661 if (wr->opcode == IB_WR_SEND_WITH_IMM || in mthca_tavor_post_send()
1662 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) in mthca_tavor_post_send()
1663 ((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data; in mthca_tavor_post_send()
1670 switch (wr->opcode) { in mthca_tavor_post_send()
1673 set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, in mthca_tavor_post_send()
1674 atomic_wr(wr)->rkey); in mthca_tavor_post_send()
1677 set_atomic_seg(wqe, atomic_wr(wr)); in mthca_tavor_post_send()
1686 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, in mthca_tavor_post_send()
1687 rdma_wr(wr)->rkey); in mthca_tavor_post_send()
1700 switch (wr->opcode) { in mthca_tavor_post_send()
1703 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, in mthca_tavor_post_send()
1704 rdma_wr(wr)->rkey); in mthca_tavor_post_send()
1717 set_tavor_ud_seg(wqe, ud_wr(wr)); in mthca_tavor_post_send()
1723 err = build_mlx_header(dev, to_msqp(qp), ind, ud_wr(wr), in mthca_tavor_post_send()
1727 *bad_wr = wr; in mthca_tavor_post_send()
1735 if (wr->num_sge > qp->sq.max_gs) { in mthca_tavor_post_send()
1738 *bad_wr = wr; in mthca_tavor_post_send()
1742 for (i = 0; i < wr->num_sge; ++i) { in mthca_tavor_post_send()
1743 mthca_set_data_seg(wqe, wr->sg_list + i); in mthca_tavor_post_send()
1757 qp->wrid[ind + qp->rq.max] = wr->wr_id; in mthca_tavor_post_send()
1759 if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) { in mthca_tavor_post_send()
1762 *bad_wr = wr; in mthca_tavor_post_send()
1769 mthca_opcode[wr->opcode]); in mthca_tavor_post_send()
1773 ((wr->send_flags & IB_SEND_FENCE) ? in mthca_tavor_post_send()
1778 op0 = mthca_opcode[wr->opcode]; in mthca_tavor_post_send()
1779 f0 = wr->send_flags & IB_SEND_FENCE ? in mthca_tavor_post_send()
1811 int mthca_tavor_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr, in mthca_tavor_post_receive() argument
1839 for (nreq = 0; wr; wr = wr->next) { in mthca_tavor_post_receive()
1846 *bad_wr = wr; in mthca_tavor_post_receive()
1861 if (unlikely(wr->num_sge > qp->rq.max_gs)) { in mthca_tavor_post_receive()
1863 *bad_wr = wr; in mthca_tavor_post_receive()
1867 for (i = 0; i < wr->num_sge; ++i) { in mthca_tavor_post_receive()
1868 mthca_set_data_seg(wqe, wr->sg_list + i); in mthca_tavor_post_receive()
1873 qp->wrid[ind] = wr->wr_id; in mthca_tavor_post_receive()
1922 int mthca_arbel_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, in mthca_arbel_post_send() argument
1953 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mthca_arbel_post_send()
1986 *bad_wr = wr; in mthca_arbel_post_send()
1995 ((wr->send_flags & IB_SEND_SIGNALED) ? in mthca_arbel_post_send()
1997 ((wr->send_flags & IB_SEND_SOLICITED) ? in mthca_arbel_post_send()
1999 ((wr->send_flags & IB_SEND_IP_CSUM) ? in mthca_arbel_post_send()
2002 if (wr->opcode == IB_WR_SEND_WITH_IMM || in mthca_arbel_post_send()
2003 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) in mthca_arbel_post_send()
2004 ((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data; in mthca_arbel_post_send()
2011 switch (wr->opcode) { in mthca_arbel_post_send()
2014 set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, in mthca_arbel_post_send()
2015 atomic_wr(wr)->rkey); in mthca_arbel_post_send()
2018 set_atomic_seg(wqe, atomic_wr(wr)); in mthca_arbel_post_send()
2027 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, in mthca_arbel_post_send()
2028 rdma_wr(wr)->rkey); in mthca_arbel_post_send()
2041 switch (wr->opcode) { in mthca_arbel_post_send()
2044 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, in mthca_arbel_post_send()
2045 rdma_wr(wr)->rkey); in mthca_arbel_post_send()
2058 set_arbel_ud_seg(wqe, ud_wr(wr)); in mthca_arbel_post_send()
2064 err = build_mlx_header(dev, to_msqp(qp), ind, ud_wr(wr), in mthca_arbel_post_send()
2068 *bad_wr = wr; in mthca_arbel_post_send()
2076 if (wr->num_sge > qp->sq.max_gs) { in mthca_arbel_post_send()
2079 *bad_wr = wr; in mthca_arbel_post_send()
2083 for (i = 0; i < wr->num_sge; ++i) { in mthca_arbel_post_send()
2084 mthca_set_data_seg(wqe, wr->sg_list + i); in mthca_arbel_post_send()
2098 qp->wrid[ind + qp->rq.max] = wr->wr_id; in mthca_arbel_post_send()
2100 if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) { in mthca_arbel_post_send()
2103 *bad_wr = wr; in mthca_arbel_post_send()
2110 mthca_opcode[wr->opcode]); in mthca_arbel_post_send()
2114 ((wr->send_flags & IB_SEND_FENCE) ? in mthca_arbel_post_send()
2119 op0 = mthca_opcode[wr->opcode]; in mthca_arbel_post_send()
2120 f0 = wr->send_flags & IB_SEND_FENCE ? in mthca_arbel_post_send()
2162 int mthca_arbel_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr, in mthca_arbel_post_receive() argument
2180 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mthca_arbel_post_receive()
2187 *bad_wr = wr; in mthca_arbel_post_receive()
2197 if (unlikely(wr->num_sge > qp->rq.max_gs)) { in mthca_arbel_post_receive()
2199 *bad_wr = wr; in mthca_arbel_post_receive()
2203 for (i = 0; i < wr->num_sge; ++i) { in mthca_arbel_post_receive()
2204 mthca_set_data_seg(wqe, wr->sg_list + i); in mthca_arbel_post_receive()
2211 qp->wrid[ind] = wr->wr_id; in mthca_arbel_post_receive()