Lines Matching refs:seg

55 			void **seg, int *size, void **cur_edge)  in set_eth_seg()  argument
57 struct mlx5_wqe_eth_seg *eseg = *seg; in set_eth_seg()
85 *seg += stride; in set_eth_seg()
88 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in set_eth_seg()
91 mlx5r_memcpy_send_wqe(&qp->sq, cur_edge, seg, size, in set_eth_seg()
98 *seg += sizeof(struct mlx5_wqe_eth_seg); in set_eth_seg()
189 static void set_reg_mkey_seg(struct mlx5_mkey_seg *seg, in set_reg_mkey_seg() argument
195 memset(seg, 0, sizeof(*seg)); in set_reg_mkey_seg()
198 seg->log2_page_size = ilog2(mr->ibmr.page_size); in set_reg_mkey_seg()
203 seg->flags = get_umr_flags(access) | mr->access_mode; in set_reg_mkey_seg()
204 seg->qpn_mkey7_0 = cpu_to_be32((key & 0xff) | 0xffffff00); in set_reg_mkey_seg()
205 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL); in set_reg_mkey_seg()
206 seg->start_addr = cpu_to_be64(mr->ibmr.iova); in set_reg_mkey_seg()
207 seg->len = cpu_to_be64(mr->ibmr.length); in set_reg_mkey_seg()
208 seg->xlt_oct_size = cpu_to_be32(ndescs); in set_reg_mkey_seg()
211 static void set_linv_mkey_seg(struct mlx5_mkey_seg *seg) in set_linv_mkey_seg() argument
213 memset(seg, 0, sizeof(*seg)); in set_linv_mkey_seg()
214 seg->status = MLX5_MKEY_STATUS_FREE; in set_linv_mkey_seg()
263 struct mlx5_wqe_inline_seg *seg; in set_data_inl_seg() local
268 seg = *wqe; in set_data_inl_seg()
269 *wqe += sizeof(*seg); in set_data_inl_seg()
270 offset = sizeof(*seg); in set_data_inl_seg()
300 seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG); in set_data_inl_seg()
302 *wqe_sz += ALIGN(inl + sizeof(seg->byte_count), 16) / 16; in set_data_inl_seg()
419 struct mlx5_ib_qp *qp, void **seg, int *size, in set_sig_data_segment() argument
457 struct mlx5_klm *data_klm = *seg; in set_sig_data_segment()
483 sblock_ctrl = *seg; in set_sig_data_segment()
512 *seg += wqe_size; in set_sig_data_segment()
514 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in set_sig_data_segment()
516 bsf = *seg; in set_sig_data_segment()
521 *seg += sizeof(*bsf); in set_sig_data_segment()
523 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in set_sig_data_segment()
528 static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg, in set_sig_mkey_segment() argument
535 memset(seg, 0, sizeof(*seg)); in set_sig_mkey_segment()
537 seg->flags = get_umr_flags(access_flags) | MLX5_MKC_ACCESS_MODE_KLMS; in set_sig_mkey_segment()
538 seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00); in set_sig_mkey_segment()
539 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 | in set_sig_mkey_segment()
541 seg->len = cpu_to_be64(length); in set_sig_mkey_segment()
542 seg->xlt_oct_size = cpu_to_be32(mlx5r_umr_get_xlt_octo(size)); in set_sig_mkey_segment()
543 seg->bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE); in set_sig_mkey_segment()
558 struct mlx5_ib_qp *qp, void **seg, int *size, in set_pi_umr_wr() argument
588 set_sig_umr_segment(*seg, xlt_size); in set_pi_umr_wr()
589 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); in set_pi_umr_wr()
591 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in set_pi_umr_wr()
593 set_sig_mkey_segment(*seg, wr->mr, wr->access, xlt_size, region_len, in set_pi_umr_wr()
595 *seg += sizeof(struct mlx5_mkey_seg); in set_pi_umr_wr()
597 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in set_pi_umr_wr()
599 ret = set_sig_data_segment(send_wr, wr->mr, sig_attrs, qp, seg, size, in set_pi_umr_wr()
609 u32 psv_idx, void **seg, int *size) in set_psv_wr() argument
611 struct mlx5_seg_set_psv *psv_seg = *seg; in set_psv_wr()
629 *seg += sizeof(*psv_seg); in set_psv_wr()
637 void **seg, int *size, void **cur_edge, in set_reg_wr() argument
670 set_reg_umr_seg(*seg, mr, flags, atomic); in set_reg_wr()
671 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); in set_reg_wr()
673 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in set_reg_wr()
675 set_reg_mkey_seg(*seg, mr, wr->key, wr->access); in set_reg_wr()
676 *seg += sizeof(struct mlx5_mkey_seg); in set_reg_wr()
678 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in set_reg_wr()
681 mlx5r_memcpy_send_wqe(&qp->sq, cur_edge, seg, size, mr->descs, in set_reg_wr()
685 set_reg_data_seg(*seg, mr, pd); in set_reg_wr()
686 *seg += sizeof(struct mlx5_wqe_data_seg); in set_reg_wr()
692 static void set_linv_wr(struct mlx5_ib_qp *qp, void **seg, int *size, in set_linv_wr() argument
695 set_linv_umr_seg(*seg); in set_linv_wr()
696 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); in set_linv_wr()
698 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in set_linv_wr()
699 set_linv_mkey_seg(*seg); in set_linv_wr()
700 *seg += sizeof(struct mlx5_mkey_seg); in set_linv_wr()
702 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in set_linv_wr()
724 int mlx5r_begin_wqe(struct mlx5_ib_qp *qp, void **seg, in mlx5r_begin_wqe() argument
733 *seg = mlx5_frag_buf_get_wqe(&qp->sq.fbc, *idx); in mlx5r_begin_wqe()
734 *ctrl = *seg; in mlx5r_begin_wqe()
735 *(uint32_t *)(*seg + 8) = 0; in mlx5r_begin_wqe()
741 *seg += sizeof(**ctrl); in mlx5r_begin_wqe()
748 static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, in begin_wqe() argument
753 return mlx5r_begin_wqe(qp, seg, ctrl, idx, size, cur_edge, nreq, in begin_wqe()
759 void *seg, u8 size, void *cur_edge, unsigned int idx, in mlx5r_finish_wqe() argument
780 seg = PTR_ALIGN(seg, MLX5_SEND_WQE_BB); in mlx5r_finish_wqe()
781 qp->sq.cur_edge = (unlikely(seg == cur_edge)) ? in mlx5r_finish_wqe()
787 static void handle_rdma_op(const struct ib_send_wr *wr, void **seg, int *size) in handle_rdma_op() argument
789 set_raddr_seg(*seg, rdma_wr(wr)->remote_addr, rdma_wr(wr)->rkey); in handle_rdma_op()
790 *seg += sizeof(struct mlx5_wqe_raddr_seg); in handle_rdma_op()
795 struct mlx5_wqe_ctrl_seg **ctrl, void **seg, in handle_local_inv() argument
800 set_linv_wr(qp, seg, size, cur_edge); in handle_local_inv()
804 struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size, in handle_reg_mr() argument
809 return set_reg_wr(qp, reg_wr(wr), seg, size, cur_edge, true); in handle_reg_mr()
814 struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size, in handle_psv() argument
824 err = mlx5r_begin_wqe(qp, seg, ctrl, idx, size, cur_edge, nreq, in handle_psv()
831 err = set_psv_wr(domain, psv_index, seg, size); in handle_psv()
836 mlx5r_finish_wqe(qp, *ctrl, *seg, *size, *cur_edge, *idx, wr->wr_id, in handle_psv()
846 struct mlx5_wqe_ctrl_seg **ctrl, void **seg, in handle_reg_mr_integrity() argument
873 err = set_reg_wr(qp, &reg_pi_wr, seg, size, cur_edge, false); in handle_reg_mr_integrity()
877 mlx5r_finish_wqe(qp, *ctrl, *seg, *size, *cur_edge, *idx, in handle_reg_mr_integrity()
880 err = begin_wqe(qp, seg, ctrl, wr, idx, size, cur_edge, nreq); in handle_reg_mr_integrity()
904 err = set_pi_umr_wr(wr, qp, seg, size, cur_edge); in handle_reg_mr_integrity()
909 mlx5r_finish_wqe(qp, *ctrl, *seg, *size, *cur_edge, *idx, wr->wr_id, in handle_reg_mr_integrity()
913 err = handle_psv(dev, qp, wr, ctrl, seg, size, cur_edge, idx, nreq, in handle_reg_mr_integrity()
919 err = handle_psv(dev, qp, wr, ctrl, seg, size, cur_edge, idx, nreq, in handle_reg_mr_integrity()
933 struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size, in handle_qpt_rc() argument
943 handle_rdma_op(wr, seg, size); in handle_qpt_rc()
954 handle_local_inv(qp, wr, ctrl, seg, size, cur_edge, *idx); in handle_qpt_rc()
959 err = handle_reg_mr(qp, wr, ctrl, seg, size, cur_edge, *idx); in handle_qpt_rc()
966 err = handle_reg_mr_integrity(dev, qp, wr, ctrl, seg, size, in handle_qpt_rc()
982 static void handle_qpt_uc(const struct ib_send_wr *wr, void **seg, int *size) in handle_qpt_uc() argument
987 handle_rdma_op(wr, seg, size); in handle_qpt_uc()
995 const struct ib_send_wr *wr, void **seg, in handle_qpt_hw_gsi() argument
998 set_datagram_seg(*seg, wr); in handle_qpt_hw_gsi()
999 *seg += sizeof(struct mlx5_wqe_datagram_seg); in handle_qpt_hw_gsi()
1001 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in handle_qpt_hw_gsi()
1005 void **seg, int *size, void **cur_edge) in handle_qpt_ud() argument
1007 set_datagram_seg(*seg, wr); in handle_qpt_ud()
1008 *seg += sizeof(struct mlx5_wqe_datagram_seg); in handle_qpt_ud()
1010 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in handle_qpt_ud()
1016 pad = *seg; in handle_qpt_ud()
1018 *seg += sizeof(struct mlx5_wqe_eth_pad); in handle_qpt_ud()
1020 set_eth_seg(wr, qp, seg, size, cur_edge); in handle_qpt_ud()
1021 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in handle_qpt_ud()
1065 void *seg; in mlx5_ib_post_send() local
1098 err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, &cur_edge, in mlx5_ib_post_send()
1124 xrc = seg; in mlx5_ib_post_send()
1125 seg += sizeof(*xrc); in mlx5_ib_post_send()
1129 err = handle_qpt_rc(dev, qp, wr, &ctrl, &seg, &size, in mlx5_ib_post_send()
1141 handle_qpt_uc(wr, &seg, &size); in mlx5_ib_post_send()
1152 handle_qpt_hw_gsi(qp, wr, &seg, &size, &cur_edge); in mlx5_ib_post_send()
1155 handle_qpt_ud(qp, wr, &seg, &size, &cur_edge); in mlx5_ib_post_send()
1163 err = set_data_inl_seg(qp, wr, &seg, &size, &cur_edge); in mlx5_ib_post_send()
1171 handle_post_send_edge(&qp->sq, &seg, size, in mlx5_ib_post_send()
1177 (struct mlx5_wqe_data_seg *)seg, in mlx5_ib_post_send()
1180 seg += sizeof(struct mlx5_wqe_data_seg); in mlx5_ib_post_send()
1185 mlx5r_finish_wqe(qp, ctrl, seg, size, cur_edge, idx, wr->wr_id, in mlx5_ib_post_send()