Lines Matching full:seg

273 	struct mlx5_wqe_inline_seg *seg;  in set_data_inl_seg()  local
282 seg = wqe; in set_data_inl_seg()
283 wqe += sizeof *seg; in set_data_inl_seg()
305 seg->byte_count = htobe32(inl | MLX5_INLINE_SEG); in set_data_inl_seg()
306 *sz = align(inl + sizeof seg->byte_count, 16) / 16; in set_data_inl_seg()
423 uint32_t qpn, void **seg, int *size) in set_umr_data_seg() argument
428 } *data = *seg; in set_umr_data_seg()
437 *seg += sizeof(*data); in set_umr_data_seg()
443 uint32_t qpn, void **seg, int *size) in set_umr_mkey_seg() argument
445 struct mlx5_wqe_mkey_context_seg *mkey = *seg; in set_umr_mkey_seg()
475 *seg += sizeof(struct mlx5_wqe_mkey_context_seg); in set_umr_mkey_seg()
481 uint32_t qpn, void **seg, int *size) in set_umr_control_seg() argument
483 struct mlx5_wqe_umr_ctrl_seg *ctrl = *seg; in set_umr_control_seg()
512 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); in set_umr_control_seg()
518 uint32_t qpn, void **seg, int *size) in set_bind_wr() argument
544 set_umr_control_seg(qp, type, rkey, bind_info, qpn, seg, size); in set_bind_wr()
545 if (unlikely((*seg == qend))) in set_bind_wr()
546 *seg = mlx5_get_send_wqe(qp, 0); in set_bind_wr()
548 set_umr_mkey_seg(qp, type, rkey, bind_info, qpn, seg, size); in set_bind_wr()
552 if (unlikely((seg == qend))) in set_bind_wr()
553 *seg = mlx5_get_send_wqe(qp, 0); in set_bind_wr()
555 set_umr_data_seg(qp, type, rkey, bind_info, qpn, seg, size); in set_bind_wr()
562 static inline int set_tso_eth_seg(void **seg, struct ibv_send_wr *wr, in set_tso_eth_seg() argument
565 struct mlx5_wqe_eth_seg *eseg = *seg; in set_tso_eth_seg()
596 *seg += align(copy_sz - size_of_inl_hdr_start, 16) - 16; in set_tso_eth_seg()
601 *seg = mlx5_get_send_wqe(qp, 0); in set_tso_eth_seg()
604 memcpy(*seg, pdata, left); in set_tso_eth_seg()
605 *seg += align(left, 16); in set_tso_eth_seg()
617 void *seg; in _mlx5_post_send() local
673 ctrl = seg = mlx5_get_send_wqe(qp, idx); in _mlx5_post_send()
674 *(uint32_t *)(seg + 8) = 0; in _mlx5_post_send()
682 seg += sizeof *ctrl; in _mlx5_post_send()
689 xrc = seg; in _mlx5_post_send()
691 seg += sizeof(*xrc); in _mlx5_post_send()
700 set_raddr_seg(seg, wr->wr.rdma.remote_addr, in _mlx5_post_send()
702 seg += sizeof(struct mlx5_wqe_raddr_seg); in _mlx5_post_send()
714 set_raddr_seg(seg, wr->wr.atomic.remote_addr, in _mlx5_post_send()
716 seg += sizeof(struct mlx5_wqe_raddr_seg); in _mlx5_post_send()
718 set_atomic_seg(seg, wr->opcode, in _mlx5_post_send()
721 seg += sizeof(struct mlx5_wqe_atomic_seg); in _mlx5_post_send()
733 ibqp->qp_num, &seg, &size); in _mlx5_post_send()
748 &seg, &size); in _mlx5_post_send()
767 set_raddr_seg(seg, wr->wr.rdma.remote_addr, in _mlx5_post_send()
769 seg += sizeof(struct mlx5_wqe_raddr_seg); in _mlx5_post_send()
778 ibqp->qp_num, &seg, &size); in _mlx5_post_send()
793 &seg, &size); in _mlx5_post_send()
809 set_datagram_seg(seg, wr); in _mlx5_post_send()
810 seg += sizeof(struct mlx5_wqe_datagram_seg); in _mlx5_post_send()
812 if (unlikely((seg == qend))) in _mlx5_post_send()
813 seg = mlx5_get_send_wqe(qp, 0); in _mlx5_post_send()
817 memset(seg, 0, sizeof(struct mlx5_wqe_eth_seg)); in _mlx5_post_send()
818 eseg = seg; in _mlx5_post_send()
832 err = set_tso_eth_seg(&seg, wr, qend, qp, &size); in _mlx5_post_send()
838 err = copy_eth_inline_headers(ibqp, wr, seg, &sg_copy_ptr); in _mlx5_post_send()
848 seg += sizeof(struct mlx5_wqe_eth_seg); in _mlx5_post_send()
859 err = set_data_inl_seg(qp, wr, seg, &sz, &sg_copy_ptr); in _mlx5_post_send()
869 dpseg = seg; in _mlx5_post_send()
872 seg = mlx5_get_send_wqe(qp, 0); in _mlx5_post_send()
873 dpseg = seg; in _mlx5_post_send()