| /linux/drivers/infiniband/sw/rxe/ |
| H A D | rxe_req.c | 13 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe, 17 struct rxe_send_wqe *wqe, int npsn) in retry_first_write_send() argument 22 int to_send = (wqe->dma.resid > qp->mtu) ? in retry_first_write_send() 23 qp->mtu : wqe->dma.resid; in retry_first_write_send() 25 qp->req.opcode = next_opcode(qp, wqe, in retry_first_write_send() 26 wqe->wr.opcode); in retry_first_write_send() 28 if (wqe->wr.send_flags & IB_SEND_INLINE) { in retry_first_write_send() 29 wqe->dma.resid -= to_send; in retry_first_write_send() 30 wqe->dma.sge_offset += to_send; in retry_first_write_send() 32 advance_dma_data(&wqe->dma, to_send); in retry_first_write_send() [all …]
|
| H A D | rxe_comp.c | 141 struct rxe_send_wqe *wqe; in get_wqe() local 146 wqe = queue_head(qp->sq.queue, QUEUE_TYPE_FROM_CLIENT); in get_wqe() 147 *wqe_p = wqe; in get_wqe() 150 if (!wqe || wqe->state == wqe_state_posted) in get_wqe() 154 if (wqe->state == wqe_state_done) in get_wqe() 158 if (wqe->state == wqe_state_error) in get_wqe() 174 struct rxe_send_wqe *wqe) in check_psn() argument 181 diff = psn_compare(pkt->psn, wqe->last_psn); in check_psn() 183 if (wqe->state == wqe_state_pending) { in check_psn() 184 if (wqe->mask & WR_ATOMIC_OR_READ_MASK) in check_psn() [all …]
|
| H A D | rxe_mw.c | 50 static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe, in rxe_check_bind_mw() argument 83 if (unlikely(!mr || wqe->wr.wr.mw.length == 0)) { in rxe_check_bind_mw() 117 if (unlikely(wqe->wr.wr.mw.length > mr->ibmr.length)) { in rxe_check_bind_mw() 123 if (unlikely((wqe->wr.wr.mw.addr < mr->ibmr.iova) || in rxe_check_bind_mw() 124 ((wqe->wr.wr.mw.addr + wqe->wr.wr.mw.length) > in rxe_check_bind_mw() 135 static void rxe_do_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe, in rxe_do_bind_mw() argument 138 u32 key = wqe->wr.wr.mw.rkey & 0xff; in rxe_do_bind_mw() 143 mw->addr = wqe->wr.wr.mw.addr; in rxe_do_bind_mw() 144 mw->length = wqe->wr.wr.mw.length; in rxe_do_bind_mw() 164 int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe) in rxe_bind_mw() argument [all …]
|
| /linux/drivers/infiniband/sw/siw/ |
| H A D | siw_qp_tx.c | 51 struct siw_wqe *wqe = &c_tx->wqe_active; in siw_try_1seg() local 52 struct siw_sge *sge = &wqe->sqe.sge[0]; in siw_try_1seg() 55 if (bytes > MAX_HDR_INLINE || wqe->sqe.num_sge != 1) in siw_try_1seg() 61 if (tx_flags(wqe) & SIW_WQE_INLINE) { in siw_try_1seg() 62 memcpy(paddr, &wqe->sqe.sge[1], bytes); in siw_try_1seg() 64 struct siw_mem *mem = wqe->mem[0]; in siw_try_1seg() 119 struct siw_wqe *wqe = &c_tx->wqe_active; in siw_qp_prepare_tx() local 123 switch (tx_type(wqe)) { in siw_qp_prepare_tx() 135 c_tx->pkt.rreq.sink_stag = htonl(wqe->sqe.sge[0].lkey); in siw_qp_prepare_tx() 137 cpu_to_be64(wqe->sqe.sge[0].laddr); in siw_qp_prepare_tx() [all …]
|
| H A D | siw_qp_rx.c | 169 struct siw_wqe *wqe = &frx->wqe_active; in siw_rresp_check_ntoh() local 176 srx->ddp_stag = wqe->sqe.sge[0].lkey; in siw_rresp_check_ntoh() 177 srx->ddp_to = wqe->sqe.sge[0].laddr; in siw_rresp_check_ntoh() 204 (wqe->processed + srx->fpdu_part_rem != wqe->bytes))) { in siw_rresp_check_ntoh() 207 wqe->processed + srx->fpdu_part_rem, wqe->bytes); in siw_rresp_check_ntoh() 281 struct siw_wqe *wqe = &frx->wqe_active; in siw_send_check_ntoh() local 301 if (unlikely(ddp_mo != wqe->processed)) { in siw_send_check_ntoh() 303 qp_id(rx_qp(srx)), ddp_mo, wqe->processed); in siw_send_check_ntoh() 316 if (unlikely(wqe->bytes < wqe->processed + srx->fpdu_part_rem)) { in siw_send_check_ntoh() 318 wqe->bytes, wqe->processed, srx->fpdu_part_rem); in siw_send_check_ntoh() [all …]
|
| /linux/drivers/infiniband/sw/rdmavt/ |
| H A D | trace_tx.h | 49 TP_PROTO(struct rvt_qp *qp, struct rvt_swqe *wqe, int wr_num_sge), 50 TP_ARGS(qp, wqe, wr_num_sge), 54 __field(struct rvt_swqe *, wqe) 73 __entry->wqe = wqe; 74 __entry->wr_id = wqe->wr.wr_id; 77 __entry->psn = wqe->psn; 78 __entry->lpsn = wqe->lpsn; 79 __entry->length = wqe->length; 80 __entry->opcode = wqe->wr.opcode; 86 __entry->ssn = wqe->ssn; [all …]
|
| H A D | rc.c | 162 u32 rvt_restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, u32 len) in rvt_restart_sge() argument 164 ss->sge = wqe->sg_list[0]; in rvt_restart_sge() 165 ss->sg_list = wqe->sg_list + 1; in rvt_restart_sge() 166 ss->num_sge = wqe->wr.num_sge; in rvt_restart_sge() 167 ss->total_len = wqe->length; in rvt_restart_sge() 169 return wqe->length - len; in rvt_restart_sge()
|
| /linux/drivers/infiniband/hw/irdma/ |
| H A D | uk.c | 15 static void irdma_set_fragment(__le64 *wqe, u32 offset, struct ib_sge *sge, in irdma_set_fragment() argument 19 set_64bit_val(wqe, offset, in irdma_set_fragment() 21 set_64bit_val(wqe, offset + 8, in irdma_set_fragment() 26 set_64bit_val(wqe, offset, 0); in irdma_set_fragment() 27 set_64bit_val(wqe, offset + 8, in irdma_set_fragment() 39 static void irdma_set_fragment_gen_1(__le64 *wqe, u32 offset, in irdma_set_fragment_gen_1() argument 43 set_64bit_val(wqe, offset, in irdma_set_fragment_gen_1() 45 set_64bit_val(wqe, offset + 8, in irdma_set_fragment_gen_1() 49 set_64bit_val(wqe, offset, 0); in irdma_set_fragment_gen_1() 50 set_64bit_val(wqe, offset + 8, 0); in irdma_set_fragment_gen_1() [all …]
|
| H A D | uda.c | 23 __le64 *wqe; in irdma_sc_access_ah() local 26 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); in irdma_sc_access_ah() 27 if (!wqe) in irdma_sc_access_ah() 30 set_64bit_val(wqe, 0, ether_addr_to_u64(info->mac_addr) << 16); in irdma_sc_access_ah() 41 set_64bit_val(wqe, 40, in irdma_sc_access_ah() 44 set_64bit_val(wqe, 32, in irdma_sc_access_ah() 48 set_64bit_val(wqe, 56, in irdma_sc_access_ah() 51 set_64bit_val(wqe, 48, in irdma_sc_access_ah() 55 set_64bit_val(wqe, 32, in irdma_sc_access_ah() 58 set_64bit_val(wqe, 48, in irdma_sc_access_ah() [all …]
|
| H A D | ctrl.c | 194 __le64 *wqe; in irdma_sc_add_arp_cache_entry() local 197 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); in irdma_sc_add_arp_cache_entry() 198 if (!wqe) in irdma_sc_add_arp_cache_entry() 200 set_64bit_val(wqe, 8, info->reach_max); in irdma_sc_add_arp_cache_entry() 201 set_64bit_val(wqe, 16, ether_addr_to_u64(info->mac_addr)); in irdma_sc_add_arp_cache_entry() 210 set_64bit_val(wqe, 24, hdr); in irdma_sc_add_arp_cache_entry() 213 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false); in irdma_sc_add_arp_cache_entry() 230 __le64 *wqe; in irdma_sc_del_arp_cache_entry() local 233 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); in irdma_sc_del_arp_cache_entry() 234 if (!wqe) in irdma_sc_del_arp_cache_entry() [all …]
|
| /linux/drivers/infiniband/hw/hfi1/ |
| H A D | rc.c | 394 struct rvt_swqe *wqe; in hfi1_make_rc_req() local 449 wqe = rvt_get_swqe_ptr(qp, qp->s_last); in hfi1_make_rc_req() 450 hfi1_trdma_send_complete(qp, wqe, qp->s_last != qp->s_acked ? in hfi1_make_rc_req() 469 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); in hfi1_make_rc_req() 495 if ((wqe->wr.send_flags & IB_SEND_FENCE) && in hfi1_make_rc_req() 497 (wqe->wr.opcode != IB_WR_TID_RDMA_READ || in hfi1_make_rc_req() 506 if (wqe->wr.opcode == IB_WR_REG_MR || in hfi1_make_rc_req() 507 wqe->wr.opcode == IB_WR_LOCAL_INV) { in hfi1_make_rc_req() 517 if (!(wqe->wr.send_flags & in hfi1_make_rc_req() 521 wqe->wr.ex.invalidate_rkey); in hfi1_make_rc_req() [all …]
|
| H A D | uc.c | 26 struct rvt_swqe *wqe; in hfi1_make_uc_req() local 49 wqe = rvt_get_swqe_ptr(qp, qp->s_last); in hfi1_make_uc_req() 50 rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); in hfi1_make_uc_req() 72 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); in hfi1_make_uc_req() 88 if (wqe->wr.opcode == IB_WR_REG_MR || in hfi1_make_uc_req() 89 wqe->wr.opcode == IB_WR_LOCAL_INV) { in hfi1_make_uc_req() 97 if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) { in hfi1_make_uc_req() 99 qp, wqe->wr.ex.invalidate_rkey); in hfi1_make_uc_req() 102 rvt_send_complete(qp, wqe, err ? IB_WC_LOC_PROT_ERR in hfi1_make_uc_req() 111 qp->s_psn = wqe->psn; in hfi1_make_uc_req() [all …]
|
| H A D | tid_rdma.h | 214 void __trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe); 221 static inline void trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe) in trdma_clean_swqe() argument 223 if (!wqe->priv) in trdma_clean_swqe() 225 __trdma_clean_swqe(qp, wqe); in trdma_clean_swqe() 244 u32 hfi1_build_tid_rdma_read_packet(struct rvt_swqe *wqe, 247 u32 hfi1_build_tid_rdma_read_req(struct rvt_qp *qp, struct rvt_swqe *wqe, 258 void hfi1_tid_rdma_restart_req(struct rvt_qp *qp, struct rvt_swqe *wqe, 261 bool hfi1_tid_rdma_wqe_interlock(struct rvt_qp *qp, struct rvt_swqe *wqe); 263 void setup_tid_rdma_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe); 265 struct rvt_swqe *wqe) in hfi1_setup_tid_rdma_wqe() argument [all …]
|
| H A D | tid_rdma.c | 378 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, i); in hfi1_qp_priv_init() local 386 priv->tid_req.e.swqe = wqe; in hfi1_qp_priv_init() 387 wqe->priv = priv; in hfi1_qp_priv_init() 416 struct rvt_swqe *wqe; in hfi1_qp_priv_tid_free() local 421 wqe = rvt_get_swqe_ptr(qp, i); in hfi1_qp_priv_tid_free() 422 kfree(wqe->priv); in hfi1_qp_priv_tid_free() 423 wqe->priv = NULL; in hfi1_qp_priv_tid_free() 1621 void __trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe) in __trdma_clean_swqe() argument 1623 struct hfi1_swqe_priv *p = wqe->priv; in __trdma_clean_swqe() 1703 u32 hfi1_build_tid_rdma_read_packet(struct rvt_swqe *wqe, in hfi1_build_tid_rdma_read_packet() argument [all …]
|
| H A D | ud.c | 224 static void hfi1_make_bth_deth(struct rvt_qp *qp, struct rvt_swqe *wqe, in hfi1_make_bth_deth() argument 232 if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) { in hfi1_make_bth_deth() 233 ohdr->u.ud.imm_data = wqe->wr.ex.imm_data; in hfi1_make_bth_deth() 239 if (wqe->wr.send_flags & IB_SEND_SOLICITED) in hfi1_make_bth_deth() 243 *pkey = hfi1_get_pkey(ibp, rvt_get_swqe_pkey_index(wqe)); in hfi1_make_bth_deth() 249 ohdr->bth[1] = cpu_to_be32(rvt_get_swqe_remote_qpn(wqe)); in hfi1_make_bth_deth() 250 ohdr->bth[2] = cpu_to_be32(mask_psn(wqe->psn)); in hfi1_make_bth_deth() 256 cpu_to_be32((int)rvt_get_swqe_remote_qkey(wqe) < 0 ? qp->qkey : in hfi1_make_bth_deth() 257 rvt_get_swqe_remote_qkey(wqe)); in hfi1_make_bth_deth() 262 struct rvt_swqe *wqe) in hfi1_make_ud_req_9B() argument [all …]
|
| H A D | trace_rc.h | 80 struct rvt_swqe *wqe), 81 TP_ARGS(qp, aeth, psn, wqe), 96 __entry->opcode = wqe->wr.opcode; 97 __entry->spsn = wqe->psn; 98 __entry->lpsn = wqe->lpsn; 115 struct rvt_swqe *wqe), 116 TP_ARGS(qp, aeth, psn, wqe)
|
| /linux/drivers/scsi/lpfc/ |
| H A D | lpfc_nvmet.c | 80 union lpfc_wqe128 *wqe; in lpfc_nvmet_cmd_template() local 83 wqe = &lpfc_tsend_cmd_template; in lpfc_nvmet_cmd_template() 84 memset(wqe, 0, sizeof(union lpfc_wqe128)); in lpfc_nvmet_cmd_template() 97 bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE); in lpfc_nvmet_cmd_template() 98 bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, PARM_REL_OFF); in lpfc_nvmet_cmd_template() 99 bf_set(wqe_class, &wqe->fcp_tsend.wqe_com, CLASS3); in lpfc_nvmet_cmd_template() 100 bf_set(wqe_ct, &wqe->fcp_tsend.wqe_com, SLI4_CT_RPI); in lpfc_nvmet_cmd_template() 101 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1); in lpfc_nvmet_cmd_template() 108 bf_set(wqe_xchg, &wqe->fcp_tsend.wqe_com, LPFC_NVME_XCHG); in lpfc_nvmet_cmd_template() 109 bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1); in lpfc_nvmet_cmd_template() [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ |
| H A D | ktls_txrx.c | 74 mlx5e_ktls_build_static_params(struct mlx5e_set_tls_static_params_wqe *wqe, in mlx5e_ktls_build_static_params() argument 80 struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl; in mlx5e_ktls_build_static_params() 81 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; in mlx5e_ktls_build_static_params() 86 #define STATIC_PARAMS_DS_CNT DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS) in mlx5e_ktls_build_static_params() 97 fill_static_params(&wqe->params, crypto_info, key_id, resync_tcp_sn); in mlx5e_ktls_build_static_params() 117 mlx5e_ktls_build_progress_params(struct mlx5e_set_tls_progress_params_wqe *wqe, in mlx5e_ktls_build_progress_params() argument 123 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; in mlx5e_ktls_build_progress_params() 128 #define PROGRESS_PARAMS_DS_CNT DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS) in mlx5e_ktls_build_progress_params() 136 fill_progress_params(&wqe->params, tis_tir_num, next_record_tcp_sn); in mlx5e_ktls_build_progress_params()
|
| /linux/drivers/infiniband/hw/mthca/ |
| H A D | mthca_srq.c | 92 static inline int *wqe_to_link(void *wqe) in wqe_to_link() argument 94 return (int *) (wqe + offsetof(struct mthca_next_seg, imm)); in wqe_to_link() 158 void *wqe; in mthca_alloc_srq_buf() local 185 next = wqe = get_wqe(srq, i); in mthca_alloc_srq_buf() 188 *wqe_to_link(wqe) = i + 1; in mthca_alloc_srq_buf() 191 *wqe_to_link(wqe) = -1; in mthca_alloc_srq_buf() 195 for (scatter = wqe + sizeof (struct mthca_next_seg); in mthca_alloc_srq_buf() 196 (void *) scatter < wqe + (1 << srq->wqe_shift); in mthca_alloc_srq_buf() 495 void *wqe; in mthca_tavor_post_srq_recv() local 504 wqe = get_wqe(srq, ind); in mthca_tavor_post_srq_recv() [all …]
|
| H A D | mthca_qp.c | 1630 void *wqe; in mthca_tavor_post_send() local 1666 wqe = get_send_wqe(qp, ind); in mthca_tavor_post_send() 1668 qp->sq.last = wqe; in mthca_tavor_post_send() 1670 ((struct mthca_next_seg *) wqe)->nda_op = 0; in mthca_tavor_post_send() 1671 ((struct mthca_next_seg *) wqe)->ee_nds = 0; in mthca_tavor_post_send() 1672 ((struct mthca_next_seg *) wqe)->flags = in mthca_tavor_post_send() 1680 ((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data; in mthca_tavor_post_send() 1682 wqe += sizeof (struct mthca_next_seg); in mthca_tavor_post_send() 1690 set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, in mthca_tavor_post_send() 1692 wqe += sizeof (struct mthca_raddr_seg); in mthca_tavor_post_send() [all …]
|
| /linux/drivers/infiniband/hw/bnxt_re/ |
| H A D | ib_verbs.c | 471 struct bnxt_qplib_swqe *wqe = &fence->bind_wqe; in bnxt_re_create_fence_wqe() local 477 memset(wqe, 0, sizeof(*wqe)); in bnxt_re_create_fence_wqe() 478 wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW; in bnxt_re_create_fence_wqe() 479 wqe->wr_id = BNXT_QPLIB_FENCE_WRID; in bnxt_re_create_fence_wqe() 480 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; in bnxt_re_create_fence_wqe() 481 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; in bnxt_re_create_fence_wqe() 482 wqe->bind.zero_based = false; in bnxt_re_create_fence_wqe() 483 wqe->bind.parent_l_key = ib_mr->lkey; in bnxt_re_create_fence_wqe() 484 wqe->bind.va = (u64)(unsigned long)fence->va; in bnxt_re_create_fence_wqe() 485 wqe->bind.length = fence->size; in bnxt_re_create_fence_wqe() [all …]
|
| H A D | qplib_fp.c | 754 struct bnxt_qplib_swqe *wqe) in bnxt_qplib_post_srq_recv() argument 776 i < wqe->num_sge; i++, hw_sge++) { in bnxt_qplib_post_srq_recv() 777 hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr); in bnxt_qplib_post_srq_recv() 778 hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey); in bnxt_qplib_post_srq_recv() 779 hw_sge->size = cpu_to_le32(wqe->sg_list[i].size); in bnxt_qplib_post_srq_recv() 781 srqe->wqe_type = wqe->type; in bnxt_qplib_post_srq_recv() 782 srqe->flags = wqe->flags; in bnxt_qplib_post_srq_recv() 783 srqe->wqe_size = wqe->num_sge + in bnxt_qplib_post_srq_recv() 786 srq->swq[next].wr_id = wqe->wr_id; in bnxt_qplib_post_srq_recv() 1676 struct bnxt_qplib_swqe *wqe, in bnxt_qplib_fill_msn_search() argument [all …]
|
| /linux/drivers/infiniband/hw/hns/ |
| H A D | hns_roce_trace.h | 49 TP_PROTO(unsigned long qpn, u32 idx, void *wqe, u32 len, 51 TP_ARGS(qpn, idx, wqe, len, id, type), 55 __array(u32, wqe, 68 __entry->wqe[i] = le32_to_cpu(((__le32 *)wqe)[i]); 74 __print_array(__entry->wqe, __entry->len, 79 TP_PROTO(unsigned long qpn, u32 idx, void *wqe, u32 len, u64 id, 81 TP_ARGS(qpn, idx, wqe, len, id, type)); 83 TP_PROTO(unsigned long qpn, u32 idx, void *wqe, u32 len, u64 id, 85 TP_ARGS(qpn, idx, wqe, len, id, type)); 87 TP_PROTO(unsigned long qpn, u32 idx, void *wqe, u32 len, u64 id, [all …]
|
| /linux/drivers/net/ethernet/huawei/hinic/ |
| H A D | hinic_hw_cmdq.c | 54 #define CMDQ_WQE_HEADER(wqe) ((struct hinic_cmdq_header *)(wqe)) argument 177 static void cmdq_prepare_wqe_ctrl(struct hinic_cmdq_wqe *wqe, int wrapped, in cmdq_prepare_wqe_ctrl() argument 191 wqe_lcmd = &wqe->wqe_lcmd; in cmdq_prepare_wqe_ctrl() 197 wqe_scmd = &wqe->direct_wqe.wqe_scmd; in cmdq_prepare_wqe_ctrl() 209 CMDQ_WQE_HEADER(wqe)->header_info = in cmdq_prepare_wqe_ctrl() 218 saved_data = CMDQ_WQE_HEADER(wqe)->saved_data; in cmdq_prepare_wqe_ctrl() 222 CMDQ_WQE_HEADER(wqe)->saved_data |= in cmdq_prepare_wqe_ctrl() 225 CMDQ_WQE_HEADER(wqe)->saved_data = saved_data; in cmdq_prepare_wqe_ctrl() 234 static void cmdq_set_direct_wqe_data(struct hinic_cmdq_direct_wqe *wqe, in cmdq_set_direct_wqe_data() argument 237 struct hinic_cmdq_wqe_scmd *wqe_scmd = &wqe->wqe_scmd; in cmdq_set_direct_wqe_data() [all …]
|
| /linux/drivers/infiniband/hw/mlx5/ |
| H A D | odp.c | 68 } wqe; member 434 pfault->wqe.wq_num : pfault->token; in mlx5_ib_page_fault_resume() 1168 void *wqe, in pagefault_data_segments() argument 1184 while (wqe < wqe_end) { in pagefault_data_segments() 1185 struct mlx5_wqe_data_seg *dseg = wqe; in pagefault_data_segments() 1195 wqe += ALIGN(sizeof(struct mlx5_wqe_inline_seg) + bcnt, in pagefault_data_segments() 1198 wqe += sizeof(*dseg); in pagefault_data_segments() 1240 struct mlx5_ib_qp *qp, void **wqe, void **wqe_end, int wqe_length) in mlx5_ib_mr_initiator_pfault_handler() argument 1242 struct mlx5_wqe_ctrl_seg *ctrl = *wqe; in mlx5_ib_mr_initiator_pfault_handler() 1243 u16 wqe_index = pfault->wqe.wqe_index; in mlx5_ib_mr_initiator_pfault_handler() [all …]
|