/freebsd/contrib/ofed/libcxgb4/ |
H A D | qp.c | 45 static void copy_wr_to_sq(struct t4_wq *wq, union t4_wr *wqe, u8 len16) in copy_wr_to_sq() argument 51 src = &wqe->flits[0]; in copy_wr_to_sq() 81 static void copy_wr_to_rq(struct t4_wq *wq, union t4_recv_wr *wqe, u8 len16) in copy_wr_to_rq() argument 87 src = &wqe->flits[0]; in copy_wr_to_rq() 158 static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe, in build_rdma_send() argument 168 wqe->send.sendop_pkd = htobe32( in build_rdma_send() 171 wqe->send.sendop_pkd = htobe32( in build_rdma_send() 173 wqe->send.stag_inv = 0; in build_rdma_send() 174 wqe->send.r3 = 0; in build_rdma_send() 175 wqe->send.r4 = 0; in build_rdma_send() [all …]
|
H A D | t4.h | 142 static inline void init_wr_hdr(union t4_wr *wqe, u16 wrid, in init_wr_hdr() argument 145 wqe->send.opcode = (u8)opcode; in init_wr_hdr() 146 wqe->send.flags = flags; in init_wr_hdr() 147 wqe->send.wrid = wrid; in init_wr_hdr() 148 wqe->send.r1[0] = 0; in init_wr_hdr() 149 wqe->send.r1[1] = 0; in init_wr_hdr() 150 wqe->send.r1[2] = 0; in init_wr_hdr() 151 wqe->send.len16 = len16; in init_wr_hdr() 458 static void copy_wqe_to_udb(volatile u32 *udb_offset, void *wqe) in copy_wqe_to_udb() argument 463 src = (u64 *)wqe; in copy_wqe_to_udb() [all …]
|
/freebsd/sys/dev/irdma/ |
H A D | irdma_uda.c | 54 __le64 *wqe; in irdma_sc_access_ah() local 57 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); in irdma_sc_access_ah() 58 if (!wqe) in irdma_sc_access_ah() 61 set_64bit_val(wqe, IRDMA_BYTE_0, in irdma_sc_access_ah() 74 set_64bit_val(wqe, IRDMA_BYTE_40, in irdma_sc_access_ah() 77 set_64bit_val(wqe, IRDMA_BYTE_32, in irdma_sc_access_ah() 81 set_64bit_val(wqe, IRDMA_BYTE_56, in irdma_sc_access_ah() 84 set_64bit_val(wqe, IRDMA_BYTE_48, in irdma_sc_access_ah() 88 set_64bit_val(wqe, IRDMA_BYTE_32, in irdma_sc_access_ah() 91 set_64bit_val(wqe, IRDMA_BYTE_48, in irdma_sc_access_ah() [all …]
|
H A D | irdma_uk.c | 48 irdma_set_fragment(__le64 * wqe, u32 offset, struct ib_sge *sge, in irdma_set_fragment() argument 52 set_64bit_val(wqe, offset, in irdma_set_fragment() 54 set_64bit_val(wqe, offset + IRDMA_BYTE_8, in irdma_set_fragment() 59 set_64bit_val(wqe, offset, 0); in irdma_set_fragment() 60 set_64bit_val(wqe, offset + IRDMA_BYTE_8, in irdma_set_fragment() 73 irdma_set_fragment_gen_1(__le64 * wqe, u32 offset, in irdma_set_fragment_gen_1() argument 77 set_64bit_val(wqe, offset, in irdma_set_fragment_gen_1() 79 set_64bit_val(wqe, offset + IRDMA_BYTE_8, in irdma_set_fragment_gen_1() 83 set_64bit_val(wqe, offset, 0); in irdma_set_fragment_gen_1() 84 set_64bit_val(wqe, offset + IRDMA_BYTE_8, 0); in irdma_set_fragment_gen_1() [all …]
|
H A D | irdma_ctrl.c | 241 __le64 *wqe; in irdma_sc_add_arp_cache_entry() local 244 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); in irdma_sc_add_arp_cache_entry() 245 if (!wqe) in irdma_sc_add_arp_cache_entry() 247 set_64bit_val(wqe, IRDMA_BYTE_8, info->reach_max); in irdma_sc_add_arp_cache_entry() 249 set_64bit_val(wqe, IRDMA_BYTE_16, irdma_mac_to_u64(info->mac_addr)); in irdma_sc_add_arp_cache_entry() 258 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); in irdma_sc_add_arp_cache_entry() 260 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "ARP_CACHE_ENTRY WQE", wqe, in irdma_sc_add_arp_cache_entry() 279 __le64 *wqe; in irdma_sc_del_arp_cache_entry() local 282 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); in irdma_sc_del_arp_cache_entry() 283 if (!wqe) in irdma_sc_del_arp_cache_entry() [all …]
|
H A D | irdma_puda.c | 121 __le64 *wqe; in irdma_puda_post_recvbuf() local 128 wqe = qp->qp_uk.rq_base[wqe_idx].elem; in irdma_puda_post_recvbuf() 130 get_64bit_val(wqe, IRDMA_BYTE_24, &offset24); in irdma_puda_post_recvbuf() 134 set_64bit_val(wqe, IRDMA_BYTE_16, 0); in irdma_puda_post_recvbuf() 135 set_64bit_val(wqe, 0, buf->mem.pa); in irdma_puda_post_recvbuf() 137 set_64bit_val(wqe, IRDMA_BYTE_8, in irdma_puda_post_recvbuf() 140 set_64bit_val(wqe, IRDMA_BYTE_8, in irdma_puda_post_recvbuf() 146 set_64bit_val(wqe, IRDMA_BYTE_24, offset24); in irdma_puda_post_recvbuf() 466 __le64 *wqe; in irdma_puda_send() local 482 wqe = irdma_puda_get_next_send_wqe(&qp->qp_uk, &wqe_idx); in irdma_puda_send() [all …]
|
/freebsd/contrib/ofed/libirdma/ |
H A D | irdma_uk.c | 48 irdma_set_fragment(__le64 * wqe, u32 offset, struct ibv_sge *sge, in irdma_set_fragment() argument 52 set_64bit_val(wqe, offset, in irdma_set_fragment() 54 set_64bit_val(wqe, offset + IRDMA_BYTE_8, in irdma_set_fragment() 59 set_64bit_val(wqe, offset, 0); in irdma_set_fragment() 60 set_64bit_val(wqe, offset + IRDMA_BYTE_8, in irdma_set_fragment() 73 irdma_set_fragment_gen_1(__le64 * wqe, u32 offset, in irdma_set_fragment_gen_1() argument 77 set_64bit_val(wqe, offset, in irdma_set_fragment_gen_1() 79 set_64bit_val(wqe, offset + IRDMA_BYTE_8, in irdma_set_fragment_gen_1() 83 set_64bit_val(wqe, offset, 0); in irdma_set_fragment_gen_1() 84 set_64bit_val(wqe, offset + IRDMA_BYTE_8, 0); in irdma_set_fragment_gen_1() [all …]
|
/freebsd/sys/dev/mlx5/mlx5_en/ |
H A D | mlx5_en_tx.c | 58 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi); in mlx5e_send_nop() 60 memset(&wqe->ctrl, 0, sizeof(wqe->ctrl)); in mlx5e_send_nop() 62 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP); in mlx5e_send_nop() 63 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); in mlx5e_send_nop() 65 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; in mlx5e_send_nop() 67 wqe->ctrl.fm_ce_se = 0; in mlx5e_send_nop() 70 memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32)); in mlx5e_send_nop() 353 mlx5e_get_vxlan_header_size(const struct mbuf *mb, struct mlx5e_tx_wqe *wqe, in mlx5e_get_vxlan_header_size() 397 wqe in mlx5e_get_vxlan_header_size() 57 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi); mlx5e_send_nop() local 352 mlx5e_get_vxlan_header_size(const struct mbuf * mb,struct mlx5e_tx_wqe * wqe,uint8_t cs_mask,uint8_t opcode) mlx5e_get_vxlan_header_size() argument 550 struct mlx5_wqe_dump_seg *wqe; mlx5e_sq_dump_xmit() local 689 struct mlx5e_tx_wqe *wqe; mlx5e_sq_xmit() local [all...] |
H A D | mlx5_en_hw_tls_rx.c | 142 struct mlx5e_tx_umr_wqe *wqe; in mlx5e_tls_rx_send_static_parameters() local 151 wqe = mlx5_wq_cyc_get_wqe(&iq->wq, pi); in mlx5e_tls_rx_send_static_parameters() 153 memset(wqe, 0, sizeof(*wqe)); in mlx5e_tls_rx_send_static_parameters() 155 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((iq->pc << 8) | in mlx5e_tls_rx_send_static_parameters() 157 wqe->ctrl.qpn_ds = cpu_to_be32((iq->sqn << 8) | ds_cnt); in mlx5e_tls_rx_send_static_parameters() 158 wqe->ctrl.imm = cpu_to_be32(ptag->tirn << 8); in mlx5e_tls_rx_send_static_parameters() 159 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE | MLX5_FENCE_MODE_INITIATOR_SMALL; in mlx5e_tls_rx_send_static_parameters() 162 wqe->umr.flags = 0x80; /* inline data */ in mlx5e_tls_rx_send_static_parameters() 163 wqe->umr.bsf_octowords = in mlx5e_tls_rx_send_static_parameters() 167 memcpy(wqe + 1, MLX5_ADDR_OF(sw_tls_rx_cntx, ptag->crypto_params, param), in mlx5e_tls_rx_send_static_parameters() [all …]
|
H A D | mlx5_en_hw_tls.c | 574 struct mlx5e_tx_umr_wqe *wqe; in mlx5e_tls_send_static_parameters() local 578 wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi); in mlx5e_tls_send_static_parameters() 580 memset(wqe, 0, sizeof(*wqe)); in mlx5e_tls_send_static_parameters() 582 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | in mlx5e_tls_send_static_parameters() 584 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); in mlx5e_tls_send_static_parameters() 585 wqe->ctrl.imm = cpu_to_be32(ptag->tisn << 8); in mlx5e_tls_send_static_parameters() 588 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE | MLX5_FENCE_MODE_INITIATOR_SMALL; in mlx5e_tls_send_static_parameters() 590 wqe->ctrl.fm_ce_se = MLX5_FENCE_MODE_INITIATOR_SMALL; in mlx5e_tls_send_static_parameters() 593 wqe->umr.flags = 0x80; /* inline data */ in mlx5e_tls_send_static_parameters() 594 wqe->umr.bsf_octowords = cpu_to_be16(MLX5_FLD_SZ_BYTES(sw_tls_cntx, param) / 16); in mlx5e_tls_send_static_parameters() [all …]
|
H A D | mlx5_en_rx.c | 36 struct mlx5e_rx_wqe *wqe, u16 ix) in mlx5e_alloc_rx_wqe() argument 82 wqe->data[0].addr = cpu_to_be64(segs[0].ds_addr); in mlx5e_alloc_rx_wqe() 83 wqe->data[0].byte_count = cpu_to_be32(segs[0].ds_len | in mlx5e_alloc_rx_wqe() 86 wqe->data[i].addr = cpu_to_be64(segs[i].ds_addr); in mlx5e_alloc_rx_wqe() 87 wqe->data[i].byte_count = cpu_to_be32(segs[i].ds_len); in mlx5e_alloc_rx_wqe() 90 wqe->data[i].addr = 0; in mlx5e_alloc_rx_wqe() 91 wqe->data[i].byte_count = 0; in mlx5e_alloc_rx_wqe() 113 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, rq->wq.head); in mlx5e_post_rx_wqes() local 115 if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, rq->wq.head))) { in mlx5e_post_rx_wqes() 119 mlx5_wq_ll_push(&rq->wq, be16_to_cpu(wqe->next.next_wqe_index)); in mlx5e_post_rx_wqes() [all …]
|
H A D | mlx5_en_iq.c | 107 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&iq->wq, pi); in mlx5e_iq_send_nop() local 111 memset(&wqe->ctrl, 0, sizeof(wqe->ctrl)); in mlx5e_iq_send_nop() 113 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((iq->pc << 8) | MLX5_OPCODE_NOP); in mlx5e_iq_send_nop() 114 wqe->ctrl.qpn_ds = cpu_to_be32((iq->sqn << 8) | ds_cnt); in mlx5e_iq_send_nop() 115 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; in mlx5e_iq_send_nop() 118 memcpy(iq->doorbell.d32, &wqe->ctrl, sizeof(iq->doorbell.d32)); in mlx5e_iq_send_nop()
|
H A D | mlx5_en_rl.c | 430 struct mlx5e_tx_qos_remap_wqe *wqe; in mlx5e_rl_post_sq_remap_wqe() local 439 wqe = mlx5_wq_cyc_get_wqe(&iq->wq, pi); in mlx5e_rl_post_sq_remap_wqe() 441 memset(wqe, 0, sizeof(*wqe)); in mlx5e_rl_post_sq_remap_wqe() 443 wqe->qos_remap.qos_handle = cpu_to_be32(scq_handle); in mlx5e_rl_post_sq_remap_wqe() 444 wqe->qos_remap.queue_handle = cpu_to_be32(sq_handle); in mlx5e_rl_post_sq_remap_wqe() 446 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((iq->pc << 8) | in mlx5e_rl_post_sq_remap_wqe() 448 wqe->ctrl.qpn_ds = cpu_to_be32((iq->sqn << 8) | ds_cnt); in mlx5e_rl_post_sq_remap_wqe() 449 wqe->ctrl.imm = cpu_to_be32(iq->priv->tisn[0] << 8); in mlx5e_rl_post_sq_remap_wqe() 450 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE | MLX5_FENCE_MODE_INITIATOR_SMALL; in mlx5e_rl_post_sq_remap_wqe() 453 memcpy(iq->doorbell.d32, &wqe->ctrl, sizeof(iq->doorbell.d32)); in mlx5e_rl_post_sq_remap_wqe()
|
/freebsd/sys/dev/cxgbe/iw_cxgbe/ |
H A D | qp.c | 408 static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe, in build_rdma_send() argument 420 wqe->send.sendop_pkd = cpu_to_be32( in build_rdma_send() 423 wqe->send.sendop_pkd = cpu_to_be32( in build_rdma_send() 425 wqe->send.stag_inv = 0; in build_rdma_send() 429 wqe->send.sendop_pkd = cpu_to_be32( in build_rdma_send() 432 wqe->send.sendop_pkd = cpu_to_be32( in build_rdma_send() 434 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); in build_rdma_send() 440 wqe->send.r3 = 0; in build_rdma_send() 441 wqe->send.r4 = 0; in build_rdma_send() 446 ret = build_immd(sq, wqe->send.u.immd_src, wr, in build_rdma_send() [all …]
|
H A D | t4.h | 131 static inline void init_wr_hdr(union t4_wr *wqe, u16 wrid, in init_wr_hdr() argument 134 wqe->send.opcode = (u8)opcode; in init_wr_hdr() 135 wqe->send.flags = flags; in init_wr_hdr() 136 wqe->send.wrid = wrid; in init_wr_hdr() 137 wqe->send.r1[0] = 0; in init_wr_hdr() 138 wqe->send.r1[1] = 0; in init_wr_hdr() 139 wqe->send.r1[2] = 0; in init_wr_hdr() 140 wqe->send.len16 = len16; in init_wr_hdr() 482 t4_ring_sq_db(struct t4_wq *wq, u16 inc, union t4_wr *wqe, u8 wc) in t4_ring_sq_db() argument 487 if (wc && inc == 1 && wq->sq.bar2_qid == 0 && wqe) { in t4_ring_sq_db() [all …]
|
/freebsd/sys/dev/bnxt/bnxt_re/ |
H A D | ib_verbs.c | 538 struct bnxt_qplib_swqe *wqe = &fence->bind_wqe; in bnxt_re_legacy_create_fence_wqe() local 544 memset(wqe, 0, sizeof(*wqe)); in bnxt_re_legacy_create_fence_wqe() 545 wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW; in bnxt_re_legacy_create_fence_wqe() 546 wqe->wr_id = BNXT_QPLIB_FENCE_WRID; in bnxt_re_legacy_create_fence_wqe() 547 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; in bnxt_re_legacy_create_fence_wqe() 548 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; in bnxt_re_legacy_create_fence_wqe() 549 wqe->bind.zero_based = false; in bnxt_re_legacy_create_fence_wqe() 550 wqe->bind.parent_l_key = ib_mr->lkey; in bnxt_re_legacy_create_fence_wqe() 551 wqe->bind.va = (u64)fence->va; in bnxt_re_legacy_create_fence_wqe() 552 wqe->bind.length = fence->size; in bnxt_re_legacy_create_fence_wqe() [all …]
|
H A D | qplib_fp.c | 766 struct bnxt_qplib_swqe *wqe) in bnxt_qplib_post_srq_recv() argument 790 i < wqe->num_sge; i++, hw_sge++) { in bnxt_qplib_post_srq_recv() 791 hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr); in bnxt_qplib_post_srq_recv() 792 hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey); in bnxt_qplib_post_srq_recv() 793 hw_sge->size = cpu_to_le32(wqe->sg_list[i].size); in bnxt_qplib_post_srq_recv() 795 srqe->wqe_type = wqe->type; in bnxt_qplib_post_srq_recv() 796 srqe->flags = wqe->flags; in bnxt_qplib_post_srq_recv() 797 srqe->wqe_size = wqe->num_sge + in bnxt_qplib_post_srq_recv() 799 if (!wqe->num_sge) in bnxt_qplib_post_srq_recv() 802 srq->swq[next].wr_id = wqe->wr_id; in bnxt_qplib_post_srq_recv() [all …]
|
/freebsd/contrib/ofed/libmlx4/ |
H A D | qp.c | 76 uint32_t *wqe = get_send_wqe(qp, n); in stamp_send_wqe() local 78 int ds = (((struct mlx4_wqe_ctrl_seg *)wqe)->fence_size & 0x3f) << 2; in stamp_send_wqe() 81 wqe[i] = 0xffffffff; in stamp_send_wqe() 218 void *wqe; in mlx4_post_send() local 252 ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1)); in mlx4_post_send() 268 wqe += sizeof *ctrl; in mlx4_post_send() 280 set_raddr_seg(wqe, wr->wr.atomic.remote_addr, in mlx4_post_send() 282 wqe += sizeof (struct mlx4_wqe_raddr_seg); in mlx4_post_send() 284 set_atomic_seg(wqe, wr); in mlx4_post_send() 285 wqe += sizeof (struct mlx4_wqe_atomic_seg); in mlx4_post_send() [all …]
|
/freebsd/sys/dev/mthca/ |
H A D | mthca_srq.c | 92 static inline int *wqe_to_link(void *wqe) in wqe_to_link() argument 94 return (int *) (wqe + offsetof(struct mthca_next_seg, imm)); in wqe_to_link() 152 void *wqe; in mthca_alloc_srq_buf() local 179 next = wqe = get_wqe(srq, i); in mthca_alloc_srq_buf() 182 *wqe_to_link(wqe) = i + 1; in mthca_alloc_srq_buf() 185 *wqe_to_link(wqe) = -1; in mthca_alloc_srq_buf() 189 for (scatter = wqe + sizeof (struct mthca_next_seg); in mthca_alloc_srq_buf() 190 (void *) scatter < wqe + (1 << srq->wqe_shift); in mthca_alloc_srq_buf() 489 void *wqe; in mthca_tavor_post_srq_recv() local 498 wqe = get_wqe(srq, ind); in mthca_tavor_post_srq_recv() [all …]
|
H A D | mthca_qp.c | 1613 void *wqe; in mthca_tavor_post_send() local 1649 wqe = get_send_wqe(qp, ind); in mthca_tavor_post_send() 1651 qp->sq.last = wqe; in mthca_tavor_post_send() 1653 ((struct mthca_next_seg *) wqe)->nda_op = 0; in mthca_tavor_post_send() 1654 ((struct mthca_next_seg *) wqe)->ee_nds = 0; in mthca_tavor_post_send() 1655 ((struct mthca_next_seg *) wqe)->flags = in mthca_tavor_post_send() 1663 ((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data; in mthca_tavor_post_send() 1665 wqe += sizeof (struct mthca_next_seg); in mthca_tavor_post_send() 1673 set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, in mthca_tavor_post_send() 1675 wqe += sizeof (struct mthca_raddr_seg); in mthca_tavor_post_send() [all …]
|
H A D | mthca_cq.c | 126 __be32 wqe; member 140 __be32 wqe; member 312 mthca_free_srq_wqe(srq, be32_to_cpu(cqe->wqe)); in mthca_cq_clean() 388 be32_to_cpu(cqe->my_qpn), be32_to_cpu(cqe->wqe), in handle_error_cqe() 477 cqe->wqe = new_wqe; in handle_error_cqe() 511 be32_to_cpu(cqe->wqe)); in mthca_poll_one() 540 wqe_index = ((be32_to_cpu(cqe->wqe) - (*cur_qp)->send_wqe_offset) in mthca_poll_one() 546 u32 wqe = be32_to_cpu(cqe->wqe); in mthca_poll_one() local 548 wqe_index = wqe >> srq->wqe_shift; in mthca_poll_one() 550 mthca_free_srq_wqe(srq, wqe); in mthca_poll_one() [all …]
|
/freebsd/sys/dev/mlx4/mlx4_ib/ |
H A D | mlx4_ib_qp.c | 218 __be32 *wqe; in stamp_send_wqe() local 233 wqe = buf + (i & ((1 << qp->sq.wqe_shift) - 1)); in stamp_send_wqe() 234 *wqe = stamp; in stamp_send_wqe() 240 wqe = buf + i; in stamp_send_wqe() 241 *wqe = cpu_to_be32(0xffffffff); in stamp_send_wqe() 250 void *wqe; in post_nop_wqe() local 253 ctrl = wqe = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1)); in post_nop_wqe() 257 struct mlx4_wqe_datagram_seg *dgram = wqe + sizeof *ctrl; in post_nop_wqe() 266 inl = wqe + s; in post_nop_wqe() 2299 void *wqe, unsigned *mlx_seg_len) in build_sriov_qp0_header() argument [all …]
|
/freebsd/sys/ofed/include/rdma/ |
H A D | rdmavt_qp.h | 498 struct rvt_swqe *wqe) in rvt_qp_wqe_reserve() argument 500 wqe->wr.send_flags |= RVT_SEND_RESERVE_USED; in rvt_qp_wqe_reserve() 521 struct rvt_swqe *wqe) in rvt_qp_wqe_unreserve() argument 523 if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED)) { in rvt_qp_wqe_unreserve() 524 wqe->wr.send_flags &= ~RVT_SEND_RESERVE_USED; in rvt_qp_wqe_unreserve()
|
/freebsd/sys/dev/mlx5/mlx5_accel/ |
H A D | mlx5_ipsec_rxtx.c | 76 mlx5e_accel_ipsec_handle_tx_wqe(struct mbuf *mb, struct mlx5e_tx_wqe *wqe, in mlx5e_accel_ipsec_handle_tx_wqe() argument 79 wqe->eth.flow_table_metadata = cpu_to_be32( in mlx5e_accel_ipsec_handle_tx_wqe()
|
H A D | ipsec.h | 240 void mlx5e_accel_ipsec_handle_tx_wqe(struct mbuf *mb, struct mlx5e_tx_wqe *wqe, 249 mlx5e_accel_ipsec_handle_tx(struct mbuf *mb, struct mlx5e_tx_wqe *wqe) in mlx5e_accel_ipsec_handle_tx() argument 256 mlx5e_accel_ipsec_handle_tx_wqe(mb, wqe, tag); in mlx5e_accel_ipsec_handle_tx()
|