Lines Matching full:wr
3060 const struct ib_send_wr *wr, void *qend, in set_eth_seg() argument
3067 if (wr->send_flags & IB_SEND_IP_CSUM) in set_eth_seg()
3074 if (wr->opcode == IB_WR_LSO) { in set_eth_seg()
3075 struct ib_ud_wr *ud_wr = container_of(wr, struct ib_ud_wr, wr); in set_eth_seg()
3113 const struct ib_send_wr *wr) in set_datagram_seg() argument
3115 memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av)); in set_datagram_seg()
3116 dseg->av.dqp_dct = cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV); in set_datagram_seg()
3117 dseg->av.key.qkey.qkey = cpu_to_be32(ud_wr(wr)->remote_qkey); in set_datagram_seg()
3272 const struct ib_send_wr *wr) in set_reg_umr_segment() argument
3274 const struct mlx5_umr_wr *umrwr = umr_wr(wr); in set_reg_umr_segment()
3278 if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE) in set_reg_umr_segment()
3283 if (!(wr->send_flags & MLX5_IB_SEND_UMR_UNREG)) { in set_reg_umr_segment()
3285 if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT) { in set_reg_umr_segment()
3290 if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION) in set_reg_umr_segment()
3292 if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_ACCESS) in set_reg_umr_segment()
3294 if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD) in set_reg_umr_segment()
3302 if (!wr->num_sge) in set_reg_umr_segment()
3343 static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, const struct ib_send_wr *wr) in set_reg_mkey_segment() argument
3345 const struct mlx5_umr_wr *umrwr = umr_wr(wr); in set_reg_mkey_segment()
3348 if (wr->send_flags & MLX5_IB_SEND_UMR_UNREG) { in set_reg_mkey_segment()
3354 if (!(wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT)) { in set_reg_mkey_segment()
3376 static __be32 send_ieth(const struct ib_send_wr *wr) in send_ieth() argument
3378 switch (wr->opcode) { in send_ieth()
3381 return wr->ex.imm_data; in send_ieth()
3384 return cpu_to_be32(wr->ex.invalidate_rkey); in send_ieth()
3408 static int set_data_inl_seg(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, in set_data_inl_seg() argument
3421 for (i = 0; i < wr->num_sge; i++) { in set_data_inl_seg()
3422 addr = (void *)(unsigned long)(wr->sg_list[i].addr); in set_data_inl_seg()
3423 len = wr->sg_list[i].length; in set_data_inl_seg()
3554 static int set_sig_data_segment(const struct ib_sig_handover_wr *wr, in set_sig_data_segment() argument
3557 struct ib_sig_attrs *sig_attrs = wr->sig_attrs; in set_sig_data_segment()
3558 struct ib_mr *sig_mr = wr->sig_mr; in set_sig_data_segment()
3560 u32 data_len = wr->wr.sg_list->length; in set_sig_data_segment()
3561 u32 data_key = wr->wr.sg_list->lkey; in set_sig_data_segment()
3562 u64 data_va = wr->wr.sg_list->addr; in set_sig_data_segment()
3566 if (!wr->prot || in set_sig_data_segment()
3567 (data_key == wr->prot->lkey && in set_sig_data_segment()
3568 data_va == wr->prot->addr && in set_sig_data_segment()
3569 data_len == wr->prot->length)) { in set_sig_data_segment()
3603 u32 prot_key = wr->prot->lkey; in set_sig_data_segment()
3604 u64 prot_va = wr->prot->addr; in set_sig_data_segment()
3656 const struct ib_sig_handover_wr *wr, u32 nelements, in set_sig_mkey_segment() argument
3659 struct ib_mr *sig_mr = wr->sig_mr; in set_sig_mkey_segment()
3665 seg->flags = get_umr_flags(wr->access_flags) | in set_sig_mkey_segment()
3690 const struct ib_sig_handover_wr *wr = sig_handover_wr(send_wr); in set_sig_umr_wr() local
3691 struct mlx5_ib_mr *sig_mr = to_mmr(wr->sig_mr); in set_sig_umr_wr()
3696 if (unlikely(wr->wr.num_sge != 1) || in set_sig_umr_wr()
3697 unlikely(wr->access_flags & IB_ACCESS_REMOTE_ATOMIC) || in set_sig_umr_wr()
3703 region_len = wr->wr.sg_list->length; in set_sig_umr_wr()
3704 if (wr->prot && in set_sig_umr_wr()
3705 (wr->prot->lkey != wr->wr.sg_list->lkey || in set_sig_umr_wr()
3706 wr->prot->addr != wr->wr.sg_list->addr || in set_sig_umr_wr()
3707 wr->prot->length != wr->wr.sg_list->length)) in set_sig_umr_wr()
3708 region_len += wr->prot->length; in set_sig_umr_wr()
3715 klm_oct_size = wr->prot ? 3 : 1; in set_sig_umr_wr()
3723 set_sig_mkey_segment(*seg, wr, klm_oct_size, region_len, pdn); in set_sig_umr_wr()
3729 ret = set_sig_data_segment(wr, qp, seg, size); in set_sig_umr_wr()
3764 const struct ib_reg_wr *wr, in set_reg_wr() argument
3767 struct mlx5_ib_mr *mr = to_mmr(wr->mr); in set_reg_wr()
3770 if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) { in set_reg_wr()
3782 set_reg_mkey_seg(*seg, mr, wr->key, wr->access); in set_reg_wr()
3829 static u8 get_fence(u8 fence, const struct ib_send_wr *wr) in get_fence() argument
3831 if (unlikely(wr->opcode == IB_WR_LOCAL_INV && in get_fence()
3832 wr->send_flags & IB_SEND_FENCE)) in get_fence()
3836 if (wr->send_flags & IB_SEND_FENCE) in get_fence()
3840 } else if (unlikely(wr->send_flags & IB_SEND_FENCE)) { in get_fence()
3849 const struct ib_send_wr *wr, unsigned *idx, in begin_wqe() argument
3859 (*ctrl)->imm = send_ieth(wr); in begin_wqe()
3896 int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, in mlx5_ib_post_send() argument
3920 return mlx5_ib_gsi_post_send(ibqp, wr, bad_wr); in mlx5_ib_post_send()
3930 *bad_wr = wr; in mlx5_ib_post_send()
3935 for (nreq = 0; wr; nreq++, wr = wr->next) { in mlx5_ib_post_send()
3936 if (unlikely(wr->opcode < 0 || wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) { in mlx5_ib_post_send()
3939 *bad_wr = wr; in mlx5_ib_post_send()
3944 num_sge = wr->num_sge; in mlx5_ib_post_send()
3948 *bad_wr = wr; in mlx5_ib_post_send()
3952 err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, nreq, wr->send_flags); in mlx5_ib_post_send()
3956 *bad_wr = wr; in mlx5_ib_post_send()
3967 switch (wr->opcode) { in mlx5_ib_post_send()
3971 set_raddr_seg(seg, rdma_wr(wr)->remote_addr, in mlx5_ib_post_send()
3972 rdma_wr(wr)->rkey); in mlx5_ib_post_send()
3982 *bad_wr = wr; in mlx5_ib_post_send()
3988 ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey); in mlx5_ib_post_send()
3996 ctrl->imm = cpu_to_be32(reg_wr(wr)->key); in mlx5_ib_post_send()
3997 err = set_reg_wr(qp, reg_wr(wr), &seg, &size); in mlx5_ib_post_send()
3999 *bad_wr = wr; in mlx5_ib_post_send()
4007 mr = to_mmr(sig_handover_wr(wr)->sig_mr); in mlx5_ib_post_send()
4010 err = set_sig_umr_wr(wr, qp, &seg, &size); in mlx5_ib_post_send()
4013 *bad_wr = wr; in mlx5_ib_post_send()
4017 finish_wqe(qp, ctrl, size, idx, wr->wr_id, in mlx5_ib_post_send()
4018 nreq, get_fence(fence, wr), in mlx5_ib_post_send()
4024 err = begin_wqe(qp, &seg, &ctrl, wr, in mlx5_ib_post_send()
4029 *bad_wr = wr; in mlx5_ib_post_send()
4033 err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->mem, in mlx5_ib_post_send()
4038 *bad_wr = wr; in mlx5_ib_post_send()
4042 finish_wqe(qp, ctrl, size, idx, wr->wr_id, in mlx5_ib_post_send()
4043 nreq, get_fence(fence, wr), in mlx5_ib_post_send()
4045 err = begin_wqe(qp, &seg, &ctrl, wr, in mlx5_ib_post_send()
4046 &idx, &size, nreq, wr->send_flags); in mlx5_ib_post_send()
4050 *bad_wr = wr; in mlx5_ib_post_send()
4055 err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->wire, in mlx5_ib_post_send()
4060 *bad_wr = wr; in mlx5_ib_post_send()
4064 finish_wqe(qp, ctrl, size, idx, wr->wr_id, in mlx5_ib_post_send()
4065 nreq, get_fence(fence, wr), in mlx5_ib_post_send()
4076 switch (wr->opcode) { in mlx5_ib_post_send()
4079 set_raddr_seg(seg, rdma_wr(wr)->remote_addr, in mlx5_ib_post_send()
4080 rdma_wr(wr)->rkey); in mlx5_ib_post_send()
4092 set_datagram_seg(seg, wr); in mlx5_ib_post_send()
4099 set_datagram_seg(seg, wr); in mlx5_ib_post_send()
4115 seg = set_eth_seg(seg, wr, qend, qp, &size); in mlx5_ib_post_send()
4122 if (wr->opcode != MLX5_IB_WR_UMR) { in mlx5_ib_post_send()
4128 ctrl->imm = cpu_to_be32(umr_wr(wr)->mkey); in mlx5_ib_post_send()
4129 set_reg_umr_segment(seg, wr); in mlx5_ib_post_send()
4134 set_reg_mkey_segment(seg, wr); in mlx5_ib_post_send()
4145 if (wr->send_flags & IB_SEND_INLINE && num_sge) { in mlx5_ib_post_send()
4148 err = set_data_inl_seg(qp, wr, seg, &sz); in mlx5_ib_post_send()
4151 *bad_wr = wr; in mlx5_ib_post_send()
4162 if (likely(wr->sg_list[i].length)) { in mlx5_ib_post_send()
4163 set_data_ptr_seg(dpseg, wr->sg_list + i); in mlx5_ib_post_send()
4170 finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, in mlx5_ib_post_send()
4171 get_fence(fence, wr), next_fence, in mlx5_ib_post_send()
4172 mlx5_ib_opcode[wr->opcode]); in mlx5_ib_post_send()
4211 int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, in mlx5_ib_post_recv() argument
4226 return mlx5_ib_gsi_post_recv(ibqp, wr, bad_wr); in mlx5_ib_post_recv()
4232 *bad_wr = wr; in mlx5_ib_post_recv()
4239 for (nreq = 0; wr; nreq++, wr = wr->next) { in mlx5_ib_post_recv()
4242 *bad_wr = wr; in mlx5_ib_post_recv()
4246 if (unlikely(wr->num_sge > qp->rq.max_gs)) { in mlx5_ib_post_recv()
4248 *bad_wr = wr; in mlx5_ib_post_recv()
4256 for (i = 0; i < wr->num_sge; i++) in mlx5_ib_post_recv()
4257 set_data_ptr_seg(scat + i, wr->sg_list + i); in mlx5_ib_post_recv()
4270 qp->rq.wrid[ind] = wr->wr_id; in mlx5_ib_post_recv()