| /linux/drivers/infiniband/sw/siw/ |
| H A D | siw_qp_tx.c | 51 struct siw_wqe *wqe = &c_tx->wqe_active; in siw_try_1seg() local 52 struct siw_sge *sge = &wqe->sqe.sge[0]; in siw_try_1seg() 55 if (bytes > MAX_HDR_INLINE || wqe->sqe.num_sge != 1) in siw_try_1seg() 61 if (tx_flags(wqe) & SIW_WQE_INLINE) { in siw_try_1seg() 62 memcpy(paddr, &wqe->sqe.sge[1], bytes); in siw_try_1seg() 64 struct siw_mem *mem = wqe->mem[0]; in siw_try_1seg() 119 struct siw_wqe *wqe = &c_tx->wqe_active; in siw_qp_prepare_tx() local 123 switch (tx_type(wqe)) { in siw_qp_prepare_tx() 135 c_tx->pkt.rreq.sink_stag = htonl(wqe->sqe.sge[0].lkey); in siw_qp_prepare_tx() 137 cpu_to_be64(wqe->sqe.sge[0].laddr); in siw_qp_prepare_tx() [all …]
|
| H A D | siw_qp.c | 238 struct siw_wqe *wqe = tx_wqe(qp); in siw_qp_mpa_rts() local 244 if (unlikely(wqe->wr_status != SIW_WR_IDLE)) { in siw_qp_mpa_rts() 248 memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE); in siw_qp_mpa_rts() 250 wqe->wr_status = SIW_WR_QUEUED; in siw_qp_mpa_rts() 251 wqe->sqe.flags = 0; in siw_qp_mpa_rts() 252 wqe->sqe.num_sge = 1; in siw_qp_mpa_rts() 253 wqe->sqe.sge[0].length = 0; in siw_qp_mpa_rts() 254 wqe->sqe.sge[0].laddr = 0; in siw_qp_mpa_rts() 255 wqe->sqe.sge[0].lkey = 0; in siw_qp_mpa_rts() 260 wqe->sqe.rkey = 1; in siw_qp_mpa_rts() [all …]
|
| /linux/drivers/infiniband/sw/rdmavt/ |
| H A D | trace_tx.h | 49 TP_PROTO(struct rvt_qp *qp, struct rvt_swqe *wqe, int wr_num_sge), 50 TP_ARGS(qp, wqe, wr_num_sge), 54 __field(struct rvt_swqe *, wqe) 73 __entry->wqe = wqe; 74 __entry->wr_id = wqe->wr.wr_id; 77 __entry->psn = wqe->psn; 78 __entry->lpsn = wqe->lpsn; 79 __entry->length = wqe->length; 80 __entry->opcode = wqe->wr.opcode; 86 __entry->ssn = wqe->ssn; [all …]
|
| H A D | rc.c | 162 u32 rvt_restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, u32 len) in rvt_restart_sge() argument 164 ss->sge = wqe->sg_list[0]; in rvt_restart_sge() 165 ss->sg_list = wqe->sg_list + 1; in rvt_restart_sge() 166 ss->num_sge = wqe->wr.num_sge; in rvt_restart_sge() 167 ss->total_len = wqe->length; in rvt_restart_sge() 169 return wqe->length - len; in rvt_restart_sge()
|
| /linux/drivers/infiniband/hw/irdma/ |
| H A D | uk.c | 15 static void irdma_set_fragment(__le64 *wqe, u32 offset, struct ib_sge *sge, in irdma_set_fragment() argument 19 set_64bit_val(wqe, offset, in irdma_set_fragment() 21 set_64bit_val(wqe, offset + 8, in irdma_set_fragment() 26 set_64bit_val(wqe, offset, 0); in irdma_set_fragment() 27 set_64bit_val(wqe, offset + 8, in irdma_set_fragment() 39 static void irdma_set_fragment_gen_1(__le64 *wqe, u32 offset, in irdma_set_fragment_gen_1() argument 43 set_64bit_val(wqe, offset, in irdma_set_fragment_gen_1() 45 set_64bit_val(wqe, offset + 8, in irdma_set_fragment_gen_1() 49 set_64bit_val(wqe, offset, 0); in irdma_set_fragment_gen_1() 50 set_64bit_val(wqe, offset + 8, 0); in irdma_set_fragment_gen_1() [all …]
|
| H A D | uda.c | 23 __le64 *wqe; in irdma_sc_access_ah() local 26 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); in irdma_sc_access_ah() 27 if (!wqe) in irdma_sc_access_ah() 30 set_64bit_val(wqe, 0, ether_addr_to_u64(info->mac_addr) << 16); in irdma_sc_access_ah() 41 set_64bit_val(wqe, 40, in irdma_sc_access_ah() 44 set_64bit_val(wqe, 32, in irdma_sc_access_ah() 48 set_64bit_val(wqe, 56, in irdma_sc_access_ah() 51 set_64bit_val(wqe, 48, in irdma_sc_access_ah() 55 set_64bit_val(wqe, 32, in irdma_sc_access_ah() 58 set_64bit_val(wqe, 48, in irdma_sc_access_ah() [all …]
|
| /linux/drivers/infiniband/hw/hfi1/ |
| H A D | rc.c | 394 struct rvt_swqe *wqe; in hfi1_make_rc_req() local 449 wqe = rvt_get_swqe_ptr(qp, qp->s_last); in hfi1_make_rc_req() 450 hfi1_trdma_send_complete(qp, wqe, qp->s_last != qp->s_acked ? in hfi1_make_rc_req() 469 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); in hfi1_make_rc_req() 495 if ((wqe->wr.send_flags & IB_SEND_FENCE) && in hfi1_make_rc_req() 497 (wqe->wr.opcode != IB_WR_TID_RDMA_READ || in hfi1_make_rc_req() 506 if (wqe->wr.opcode == IB_WR_REG_MR || in hfi1_make_rc_req() 507 wqe->wr.opcode == IB_WR_LOCAL_INV) { in hfi1_make_rc_req() 517 if (!(wqe->wr.send_flags & in hfi1_make_rc_req() 521 wqe->wr.ex.invalidate_rkey); in hfi1_make_rc_req() [all …]
|
| H A D | uc.c | 26 struct rvt_swqe *wqe; in hfi1_make_uc_req() local 49 wqe = rvt_get_swqe_ptr(qp, qp->s_last); in hfi1_make_uc_req() 50 rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); in hfi1_make_uc_req() 72 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); in hfi1_make_uc_req() 88 if (wqe->wr.opcode == IB_WR_REG_MR || in hfi1_make_uc_req() 89 wqe->wr.opcode == IB_WR_LOCAL_INV) { in hfi1_make_uc_req() 97 if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) { in hfi1_make_uc_req() 99 qp, wqe->wr.ex.invalidate_rkey); in hfi1_make_uc_req() 102 rvt_send_complete(qp, wqe, err ? IB_WC_LOC_PROT_ERR in hfi1_make_uc_req() 111 qp->s_psn = wqe->psn; in hfi1_make_uc_req() [all …]
|
| H A D | tid_rdma.h | 214 void __trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe); 221 static inline void trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe) in trdma_clean_swqe() argument 223 if (!wqe->priv) in trdma_clean_swqe() 225 __trdma_clean_swqe(qp, wqe); in trdma_clean_swqe() 244 u32 hfi1_build_tid_rdma_read_packet(struct rvt_swqe *wqe, 247 u32 hfi1_build_tid_rdma_read_req(struct rvt_qp *qp, struct rvt_swqe *wqe, 258 void hfi1_tid_rdma_restart_req(struct rvt_qp *qp, struct rvt_swqe *wqe, 261 bool hfi1_tid_rdma_wqe_interlock(struct rvt_qp *qp, struct rvt_swqe *wqe); 263 void setup_tid_rdma_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe); 265 struct rvt_swqe *wqe) in hfi1_setup_tid_rdma_wqe() argument [all …]
|
| H A D | tid_rdma.c | 378 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, i); in hfi1_qp_priv_init() local 386 priv->tid_req.e.swqe = wqe; in hfi1_qp_priv_init() 387 wqe->priv = priv; in hfi1_qp_priv_init() 416 struct rvt_swqe *wqe; in hfi1_qp_priv_tid_free() local 421 wqe = rvt_get_swqe_ptr(qp, i); in hfi1_qp_priv_tid_free() 422 kfree(wqe->priv); in hfi1_qp_priv_tid_free() 423 wqe->priv = NULL; in hfi1_qp_priv_tid_free() 1621 void __trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe) in __trdma_clean_swqe() argument 1623 struct hfi1_swqe_priv *p = wqe->priv; in __trdma_clean_swqe() 1703 u32 hfi1_build_tid_rdma_read_packet(struct rvt_swqe *wqe, in hfi1_build_tid_rdma_read_packet() argument [all …]
|
| H A D | ud.c | 224 static void hfi1_make_bth_deth(struct rvt_qp *qp, struct rvt_swqe *wqe, in hfi1_make_bth_deth() argument 232 if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) { in hfi1_make_bth_deth() 233 ohdr->u.ud.imm_data = wqe->wr.ex.imm_data; in hfi1_make_bth_deth() 239 if (wqe->wr.send_flags & IB_SEND_SOLICITED) in hfi1_make_bth_deth() 243 *pkey = hfi1_get_pkey(ibp, rvt_get_swqe_pkey_index(wqe)); in hfi1_make_bth_deth() 249 ohdr->bth[1] = cpu_to_be32(rvt_get_swqe_remote_qpn(wqe)); in hfi1_make_bth_deth() 250 ohdr->bth[2] = cpu_to_be32(mask_psn(wqe->psn)); in hfi1_make_bth_deth() 256 cpu_to_be32((int)rvt_get_swqe_remote_qkey(wqe) < 0 ? qp->qkey : in hfi1_make_bth_deth() 257 rvt_get_swqe_remote_qkey(wqe)); in hfi1_make_bth_deth() 262 struct rvt_swqe *wqe) in hfi1_make_ud_req_9B() argument [all …]
|
| H A D | trace_rc.h | 80 struct rvt_swqe *wqe), 81 TP_ARGS(qp, aeth, psn, wqe), 96 __entry->opcode = wqe->wr.opcode; 97 __entry->spsn = wqe->psn; 98 __entry->lpsn = wqe->lpsn; 115 struct rvt_swqe *wqe), 116 TP_ARGS(qp, aeth, psn, wqe)
|
| H A D | rc.h | 35 static inline u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, in restart_sge() argument 40 len = delta_psn(psn, wqe->psn) * pmtu; in restart_sge() 41 return rvt_restart_sge(ss, wqe, len); in restart_sge() 56 struct rvt_swqe *do_rc_completion(struct rvt_qp *qp, struct rvt_swqe *wqe,
|
| /linux/drivers/infiniband/hw/cxgb4/ |
| H A D | qp.c | 486 static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe, in build_rdma_send() argument 498 wqe->send.sendop_pkd = cpu_to_be32( in build_rdma_send() 501 wqe->send.sendop_pkd = cpu_to_be32( in build_rdma_send() 503 wqe->send.stag_inv = 0; in build_rdma_send() 507 wqe->send.sendop_pkd = cpu_to_be32( in build_rdma_send() 510 wqe->send.sendop_pkd = cpu_to_be32( in build_rdma_send() 512 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); in build_rdma_send() 518 wqe->send.r3 = 0; in build_rdma_send() 519 wqe->send.r4 = 0; in build_rdma_send() 524 ret = build_immd(sq, wqe->send.u.immd_src, wr, in build_rdma_send() [all …]
|
| H A D | t4.h | 119 static inline void init_wr_hdr(union t4_wr *wqe, u16 wrid, in init_wr_hdr() argument 122 wqe->send.opcode = (u8)opcode; in init_wr_hdr() 123 wqe->send.flags = flags; in init_wr_hdr() 124 wqe->send.wrid = wrid; in init_wr_hdr() 125 wqe->send.r1[0] = 0; in init_wr_hdr() 126 wqe->send.r1[1] = 0; in init_wr_hdr() 127 wqe->send.r1[2] = 0; in init_wr_hdr() 128 wqe->send.len16 = len16; in init_wr_hdr() 394 union t4_recv_wr wqe; member 583 union t4_recv_wr *wqe) in t4_ring_srq_db() argument [all …]
|
| /linux/drivers/infiniband/sw/rxe/ |
| H A D | rxe_mw.c | 50 static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe, in rxe_check_bind_mw() argument 83 if (unlikely(!mr || wqe->wr.wr.mw.length == 0)) { in rxe_check_bind_mw() 117 if (unlikely(wqe->wr.wr.mw.length > mr->ibmr.length)) { in rxe_check_bind_mw() 123 if (unlikely((wqe->wr.wr.mw.addr < mr->ibmr.iova) || in rxe_check_bind_mw() 124 ((wqe->wr.wr.mw.addr + wqe->wr.wr.mw.length) > in rxe_check_bind_mw() 135 static void rxe_do_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe, in rxe_do_bind_mw() argument 138 u32 key = wqe->wr.wr.mw.rkey & 0xff; in rxe_do_bind_mw() 143 mw->addr = wqe->wr.wr.mw.addr; in rxe_do_bind_mw() 144 mw->length = wqe->wr.wr.mw.length; in rxe_do_bind_mw() 164 int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe) in rxe_bind_mw() argument [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ |
| H A D | ktls_txrx.c | 74 mlx5e_ktls_build_static_params(struct mlx5e_set_tls_static_params_wqe *wqe, in mlx5e_ktls_build_static_params() argument 80 struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl; in mlx5e_ktls_build_static_params() 81 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; in mlx5e_ktls_build_static_params() 86 #define STATIC_PARAMS_DS_CNT DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS) in mlx5e_ktls_build_static_params() 97 fill_static_params(&wqe->params, crypto_info, key_id, resync_tcp_sn); in mlx5e_ktls_build_static_params() 117 mlx5e_ktls_build_progress_params(struct mlx5e_set_tls_progress_params_wqe *wqe, in mlx5e_ktls_build_progress_params() argument 123 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; in mlx5e_ktls_build_progress_params() 128 #define PROGRESS_PARAMS_DS_CNT DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS) in mlx5e_ktls_build_progress_params() 136 fill_progress_params(&wqe->params, tis_tir_num, next_record_tcp_sn); in mlx5e_ktls_build_progress_params()
|
| /linux/drivers/infiniband/hw/mthca/ |
| H A D | mthca_srq.c | 92 static inline int *wqe_to_link(void *wqe) in wqe_to_link() argument 94 return (int *) (wqe + offsetof(struct mthca_next_seg, imm)); in wqe_to_link() 158 void *wqe; in mthca_alloc_srq_buf() local 185 next = wqe = get_wqe(srq, i); in mthca_alloc_srq_buf() 188 *wqe_to_link(wqe) = i + 1; in mthca_alloc_srq_buf() 191 *wqe_to_link(wqe) = -1; in mthca_alloc_srq_buf() 195 for (scatter = wqe + sizeof (struct mthca_next_seg); in mthca_alloc_srq_buf() 196 (void *) scatter < wqe + (1 << srq->wqe_shift); in mthca_alloc_srq_buf() 495 void *wqe; in mthca_tavor_post_srq_recv() local 504 wqe = get_wqe(srq, ind); in mthca_tavor_post_srq_recv() [all …]
|
| H A D | mthca_qp.c | 1630 void *wqe; in mthca_tavor_post_send() local 1666 wqe = get_send_wqe(qp, ind); in mthca_tavor_post_send() 1668 qp->sq.last = wqe; in mthca_tavor_post_send() 1670 ((struct mthca_next_seg *) wqe)->nda_op = 0; in mthca_tavor_post_send() 1671 ((struct mthca_next_seg *) wqe)->ee_nds = 0; in mthca_tavor_post_send() 1672 ((struct mthca_next_seg *) wqe)->flags = in mthca_tavor_post_send() 1680 ((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data; in mthca_tavor_post_send() 1682 wqe += sizeof (struct mthca_next_seg); in mthca_tavor_post_send() 1690 set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, in mthca_tavor_post_send() 1692 wqe += sizeof (struct mthca_raddr_seg); in mthca_tavor_post_send() [all …]
|
| /linux/drivers/infiniband/hw/hns/ |
| H A D | hns_roce_trace.h | 49 TP_PROTO(unsigned long qpn, u32 idx, void *wqe, u32 len, 51 TP_ARGS(qpn, idx, wqe, len, id, type), 55 __array(u32, wqe, 68 __entry->wqe[i] = le32_to_cpu(((__le32 *)wqe)[i]); 74 __print_array(__entry->wqe, __entry->len, 79 TP_PROTO(unsigned long qpn, u32 idx, void *wqe, u32 len, u64 id, 81 TP_ARGS(qpn, idx, wqe, len, id, type)); 83 TP_PROTO(unsigned long qpn, u32 idx, void *wqe, u32 len, u64 id, 85 TP_ARGS(qpn, idx, wqe, len, id, type)); 87 TP_PROTO(unsigned long qpn, u32 idx, void *wqe, u32 len, u64 id, [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/ |
| H A D | en_tx.c | 354 struct mlx5e_tx_wqe *wqe; in mlx5e_tx_flush() local 367 wqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc); in mlx5e_tx_flush() 368 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &wqe->ctrl); in mlx5e_tx_flush() 426 struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more) in mlx5e_sq_xmit_wqe() argument 440 cseg = &wqe->ctrl; in mlx5e_sq_xmit_wqe() 441 eseg = &wqe->eth; in mlx5e_sq_xmit_wqe() 442 dseg = wqe->data; in mlx5e_sq_xmit_wqe() 489 return !memcmp(&session->wqe->eth, eseg, MLX5E_ACCEL_ESEG_LEN); in mlx5e_tx_mpwqe_same_eseg() 496 struct mlx5e_tx_wqe *wqe; in mlx5e_tx_mpwqe_session_start() local 500 wqe = MLX5E_TX_FETCH_WQE(sq, pi); in mlx5e_tx_mpwqe_session_start() [all …]
|
| /linux/drivers/net/ethernet/huawei/hinic/ |
| H A D | hinic_hw_cmdq.c | 54 #define CMDQ_WQE_HEADER(wqe) ((struct hinic_cmdq_header *)(wqe)) argument 177 static void cmdq_prepare_wqe_ctrl(struct hinic_cmdq_wqe *wqe, int wrapped, in cmdq_prepare_wqe_ctrl() argument 191 wqe_lcmd = &wqe->wqe_lcmd; in cmdq_prepare_wqe_ctrl() 197 wqe_scmd = &wqe->direct_wqe.wqe_scmd; in cmdq_prepare_wqe_ctrl() 209 CMDQ_WQE_HEADER(wqe)->header_info = in cmdq_prepare_wqe_ctrl() 218 saved_data = CMDQ_WQE_HEADER(wqe)->saved_data; in cmdq_prepare_wqe_ctrl() 222 CMDQ_WQE_HEADER(wqe)->saved_data |= in cmdq_prepare_wqe_ctrl() 225 CMDQ_WQE_HEADER(wqe)->saved_data = saved_data; in cmdq_prepare_wqe_ctrl() 234 static void cmdq_set_direct_wqe_data(struct hinic_cmdq_direct_wqe *wqe, in cmdq_set_direct_wqe_data() argument 237 struct hinic_cmdq_wqe_scmd *wqe_scmd = &wqe->wqe_scmd; in cmdq_set_direct_wqe_data() [all …]
|
| H A D | hinic_hw_qp.h | 178 void hinic_sq_prepare_wqe(struct hinic_sq *sq, struct hinic_sq_wqe *wqe, 190 struct hinic_sq_wqe *wqe, struct sk_buff *skb, 203 void hinic_sq_get_sges(struct hinic_sq_wqe *wqe, struct hinic_sge *sges, 210 struct hinic_rq_wqe *wqe, struct sk_buff *skb); 224 void hinic_rq_get_sge(struct hinic_rq *rq, struct hinic_rq_wqe *wqe, 228 struct hinic_rq_wqe *wqe, struct hinic_sge *sge);
|
| /linux/drivers/crypto/marvell/octeontx2/ |
| H A D | otx2_cptvf_main.c | 138 if (!lfs->lf[i].wqe) in cleanup_tasklet_work() 141 tasklet_kill(&lfs->lf[i].wqe->work); in cleanup_tasklet_work() 142 kfree(lfs->lf[i].wqe); in cleanup_tasklet_work() 143 lfs->lf[i].wqe = NULL; in cleanup_tasklet_work() 149 struct otx2_cptlf_wqe *wqe; in init_tasklet_work() local 153 wqe = kzalloc_obj(struct otx2_cptlf_wqe); in init_tasklet_work() 154 if (!wqe) { in init_tasklet_work() 159 tasklet_init(&wqe->work, cptlf_work_handler, (u64) wqe); in init_tasklet_work() 160 wqe->lfs = lfs; in init_tasklet_work() 161 wqe->lf_num = i; in init_tasklet_work() [all …]
|
| /linux/include/rdma/ |
| H A D | rdmavt_qp.h | 587 static inline void rvt_put_swqe(struct rvt_swqe *wqe) in rvt_put_swqe() argument 591 for (i = 0; i < wqe->wr.num_sge; i++) { in rvt_put_swqe() 592 struct rvt_sge *sge = &wqe->sg_list[i]; in rvt_put_swqe() 608 struct rvt_swqe *wqe) in rvt_qp_wqe_reserve() argument 652 u32 rvt_restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, u32 len); 750 static inline void rvt_put_qp_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe) in rvt_put_qp_swqe() argument 752 rvt_put_swqe(wqe); in rvt_put_qp_swqe() 754 rdma_destroy_ah_attr(wqe->ud_wr.attr); in rvt_put_qp_swqe() 832 struct rvt_swqe *wqe, in rvt_qp_complete_swqe() argument 839 int flags = wqe->wr.send_flags; in rvt_qp_complete_swqe() [all …]
|