Home
last modified time | relevance | path

Searched refs:qp (Results 1 – 25 of 252) sorted by relevance

1234567891011

/freebsd/sys/dev/nvmf/host/
H A Dnvmf_qpair.c27 struct nvmf_qpair *qp; member
50 nvmf_allocate_request(struct nvmf_host_qpair *qp, void *sqe, in nvmf_allocate_request() argument
63 mtx_lock(&qp->lock); in nvmf_allocate_request()
64 nq = qp->qp; in nvmf_allocate_request()
66 mtx_unlock(&qp->lock); in nvmf_allocate_request()
70 qp->allocating++; in nvmf_allocate_request()
71 MPASS(qp->allocating != 0); in nvmf_allocate_request()
72 mtx_unlock(&qp->lock); in nvmf_allocate_request()
74 req->qp = qp; in nvmf_allocate_request()
83 mtx_lock(&qp->lock); in nvmf_allocate_request()
[all …]
/freebsd/sys/dev/nvmf/controller/
H A Dnvmft_qpair.c26 struct nvmf_qpair *qp; member
45 static int _nvmft_send_generic_error(struct nvmft_qpair *qp,
52 struct nvmft_qpair *qp = arg; in nvmft_qpair_error() local
53 struct nvmft_controller *ctrlr = qp->ctrlr; in nvmft_qpair_error()
64 nvmft_printf(ctrlr, "error %d on %s\n", error, qp->name); in nvmft_qpair_error()
65 nvmft_controller_error(ctrlr, qp, error); in nvmft_qpair_error()
71 struct nvmft_qpair *qp = arg; in nvmft_receive_capsule() local
72 struct nvmft_controller *ctrlr = qp->ctrlr; in nvmft_receive_capsule()
79 qp->name, le16toh(cmd->cid), cmd->opc); in nvmft_receive_capsule()
86 _nvmft_send_generic_error(qp, nc, sc_status); in nvmft_receive_capsule()
[all …]
/freebsd/contrib/nvi/common/
H A Dseq.c38 SEQ *lastqp, *qp; in seq_set() local
48 if ((qp = in seq_set()
59 free(qp->output); in seq_set()
60 qp->olen = olen; in seq_set()
61 qp->output = p; in seq_set()
66 CALLOC(sp, qp, 1, sizeof(SEQ)); in seq_set()
67 if (qp == NULL) { in seq_set()
74 qp->name = NULL; in seq_set()
75 else if ((qp->name = v_wstrdup(sp, name, nlen)) == NULL) { in seq_set()
79 qp->nlen = nlen; in seq_set()
[all …]
/freebsd/sys/dev/ntb/
H A Dntb_transport.c108 struct ntb_transport_qp *qp; member
130 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
140 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
265 #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count) argument
274 static int ntb_process_tx(struct ntb_transport_qp *qp,
277 static int ntb_process_rxc(struct ntb_transport_qp *qp);
278 static void ntb_memcpy_rx(struct ntb_transport_qp *qp,
280 static inline void ntb_rx_copy_callback(struct ntb_transport_qp *qp,
282 static void ntb_complete_rxc(struct ntb_transport_qp *qp);
348 int rc, i, db_count, spad_count, qp, qpu, qpo, qpt; ntb_transport_attach() local
597 struct ntb_transport_qp *qp; ntb_transport_init_queue() local
657 ntb_transport_free_queue(struct ntb_transport_qp * qp) ntb_transport_free_queue() argument
706 struct ntb_transport_qp *qp; ntb_transport_create_queue() local
745 ntb_transport_link_up(struct ntb_transport_qp * qp) ntb_transport_link_up() argument
773 ntb_transport_tx_enqueue(struct ntb_transport_qp * qp,void * cb,void * data,unsigned int len) ntb_transport_tx_enqueue() argument
813 struct ntb_transport_qp *qp = entry->qp; ntb_tx_copy_callback() local
862 ntb_async_tx(struct ntb_transport_qp * qp,struct ntb_queue_entry * entry) ntb_async_tx() argument
879 ntb_process_tx(struct ntb_transport_qp * qp,struct ntb_queue_entry * entry) ntb_process_tx() argument
920 struct ntb_transport_qp *qp = arg; ntb_transport_rxc_db() local
940 ntb_process_rxc(struct ntb_transport_qp * qp) ntb_process_rxc() argument
1009 ntb_memcpy_rx(struct ntb_transport_qp * qp,struct ntb_queue_entry * entry,void * offset) ntb_memcpy_rx() argument
1029 ntb_rx_copy_callback(struct ntb_transport_qp * qp,void * data) ntb_rx_copy_callback() argument
1039 ntb_complete_rxc(struct ntb_transport_qp * qp) ntb_complete_rxc() argument
1090 struct ntb_transport_qp *qp; ntb_transport_doorbell_callback() local
1135 struct ntb_transport_qp *qp; ntb_transport_link_work() local
1329 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num]; ntb_transport_setup_qp_mw() local
1378 struct ntb_transport_qp *qp = arg; ntb_qp_link_work() local
1410 struct ntb_transport_qp *qp; ntb_transport_link_cleanup() local
1441 ntb_qp_link_down(struct ntb_transport_qp * qp) ntb_qp_link_down() argument
1448 ntb_qp_link_down_reset(struct ntb_transport_qp * qp) ntb_qp_link_down_reset() argument
1466 ntb_qp_link_cleanup(struct ntb_transport_qp * qp) ntb_qp_link_cleanup() argument
1486 ntb_transport_link_down(struct ntb_transport_qp * qp) ntb_transport_link_down() argument
1514 ntb_transport_link_query(struct ntb_transport_qp * qp) ntb_transport_link_query() argument
1529 ntb_transport_link_speed(struct ntb_transport_qp * qp) ntb_transport_link_speed() argument
1558 ntb_send_link_down(struct ntb_transport_qp * qp) ntb_send_link_down() argument
1648 ntb_transport_qp_num(struct ntb_transport_qp * qp) ntb_transport_qp_num() argument
1663 ntb_transport_max_size(struct ntb_transport_qp * qp) ntb_transport_max_size() argument
1670 ntb_transport_tx_free_entry(struct ntb_transport_qp * qp) ntb_transport_tx_free_entry() argument
[all...]
/freebsd/sys/dev/mthca/
H A Dmthca_qp.c196 static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp) in is_sqp() argument
198 return qp->qpn >= dev->qp_table.sqp_start && in is_sqp()
199 qp->qpn <= dev->qp_table.sqp_start + 3; in is_sqp()
202 static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp) in is_qp0() argument
204 return qp->qpn >= dev->qp_table.sqp_start && in is_qp0()
205 qp->qpn <= dev->qp_table.sqp_start + 1; in is_qp0()
208 static void *get_recv_wqe(struct mthca_qp *qp, int n) in get_recv_wqe() argument
210 if (qp->is_direct) in get_recv_wqe()
211 return qp->queue.direct.buf + (n << qp->rq.wqe_shift); in get_recv_wqe()
213 return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf + in get_recv_wqe()
[all …]
/freebsd/contrib/ofed/libirdma/
H A Dirdma_uk.c92 static inline u64 irdma_nop_hdr(struct irdma_qp_uk *qp){ in irdma_nop_hdr() argument
95 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity); in irdma_nop_hdr()
103 irdma_nop_1(struct irdma_qp_uk *qp) in irdma_nop_1() argument
108 if (!qp->sq_ring.head) in irdma_nop_1()
111 wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring); in irdma_nop_1()
112 wqe = qp->sq_base[wqe_idx].elem; in irdma_nop_1()
114 qp->sq_wrtrk_array[wqe_idx].quanta = IRDMA_QP_WQE_MIN_QUANTA; in irdma_nop_1()
123 set_64bit_val(wqe, IRDMA_BYTE_24, irdma_nop_hdr(qp)); in irdma_nop_1()
134 irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx) in irdma_clr_wqes() argument
140 wqe_idx = (qp_wqe_idx + 128) % qp->sq_ring.size; in irdma_clr_wqes()
[all …]
/freebsd/sys/dev/irdma/
H A Dirdma_uk.c92 static inline u64 irdma_nop_hdr(struct irdma_qp_uk *qp){ in irdma_nop_hdr() argument
95 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity); in irdma_nop_hdr()
103 irdma_nop_1(struct irdma_qp_uk *qp) in irdma_nop_1() argument
108 if (!qp->sq_ring.head) in irdma_nop_1()
111 wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring); in irdma_nop_1()
112 wqe = qp->sq_base[wqe_idx].elem; in irdma_nop_1()
114 qp->sq_wrtrk_array[wqe_idx].quanta = IRDMA_QP_WQE_MIN_QUANTA; in irdma_nop_1()
123 set_64bit_val(wqe, IRDMA_BYTE_24, irdma_nop_hdr(qp)); in irdma_nop_1()
134 irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx) in irdma_clr_wqes() argument
140 wqe_idx = (qp_wqe_idx + 128) % qp->sq_ring.size; in irdma_clr_wqes()
[all …]
/freebsd/sys/dev/mlx4/mlx4_ib/
H A Dmlx4_ib_qp.c87 struct mlx4_ib_qp qp; member
128 return container_of(mqp, struct mlx4_ib_sqp, qp); in to_msqp()
131 static int is_tunnel_qp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in is_tunnel_qp() argument
136 return qp->mqp.qpn >= dev->dev->phys_caps.base_tunnel_sqpn && in is_tunnel_qp()
137 qp->mqp.qpn < dev->dev->phys_caps.base_tunnel_sqpn + in is_tunnel_qp()
141 static int is_sqp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in is_sqp() argument
148 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && in is_sqp()
149 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 3); in is_sqp()
155 if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i] || in is_sqp()
156 qp->mqp.qpn == dev->dev->caps.qp1_proxy[i]) { in is_sqp()
[all …]
/freebsd/sys/dev/nvmf/
H A Dnvmf_tcp.c39 struct nvmf_tcp_qpair *qp; member
64 struct nvmf_qpair qp; member
133 #define TQP(qp) ((struct nvmf_tcp_qpair *)(qp)) argument
174 tcp_alloc_command_buffer(struct nvmf_tcp_qpair *qp, in tcp_alloc_command_buffer() argument
181 cb->qp = qp; in tcp_alloc_command_buffer()
264 nvmf_tcp_write_pdu(struct nvmf_tcp_qpair *qp, struct mbuf *m) in nvmf_tcp_write_pdu() argument
266 struct socket *so = qp->so; in nvmf_tcp_write_pdu()
269 mbufq_enqueue(&qp->tx_pdus, m); in nvmf_tcp_write_pdu()
272 cv_signal(&qp->tx_cv); in nvmf_tcp_write_pdu()
277 nvmf_tcp_report_error(struct nvmf_tcp_qpair *qp, uint16_t fes, uint32_t fei, in nvmf_tcp_report_error() argument
[all …]
/freebsd/lib/libnvmf/
H A Dnvmf_tcp.c26 struct nvmf_tcp_qpair *qp; member
62 struct nvmf_qpair qp; member
82 #define TQP(qp) ((struct nvmf_tcp_qpair *)(qp)) argument
93 tcp_alloc_command_buffer(struct nvmf_tcp_qpair *qp, void *data, in tcp_alloc_command_buffer() argument
100 cb->qp = qp; in tcp_alloc_command_buffer()
109 LIST_INSERT_HEAD(&qp->rx_buffers, cb, link); in tcp_alloc_command_buffer()
111 LIST_INSERT_HEAD(&qp->tx_buffers, cb, link); in tcp_alloc_command_buffer()
116 tcp_find_command_buffer(struct nvmf_tcp_qpair *qp, uint16_t cid, uint16_t ttag, in tcp_find_command_buffer() argument
122 list = receive ? &qp->rx_buffers : &qp->tx_buffers; in tcp_find_command_buffer()
131 tcp_purge_command_buffer(struct nvmf_tcp_qpair *qp, uint16_t cid, uint16_t ttag, in tcp_purge_command_buffer() argument
[all …]
H A Dnvmf_host.c47 struct nvmf_qpair *qp; in nvmf_connect() local
52 qp = NULL; in nvmf_connect()
86 qp = nvmf_allocate_qpair(na, params); in nvmf_connect()
87 if (qp == NULL) in nvmf_connect()
100 cc = nvmf_allocate_command(qp, &cmd); in nvmf_connect()
127 error = nvmf_receive_capsule(qp, &rc); in nvmf_connect()
163 qp->nq_flow_control = false; in nvmf_connect()
165 qp->nq_flow_control = true; in nvmf_connect()
166 qp->nq_sqhd = sqhd; in nvmf_connect()
167 qp->nq_sqtail = sqhd; in nvmf_connect()
[all …]
/freebsd/sys/crypto/ccp/
H A Dccp_hardware.c123 ccp_queue_write_tail(struct ccp_queue *qp) in ccp_queue_write_tail() argument
125 ccp_write_queue_4(qp->cq_softc, qp->cq_qindex, CMD_Q_TAIL_LO_BASE, in ccp_queue_write_tail()
126 ((uint32_t)qp->desc_ring_bus_addr) + (Q_DESC_SIZE * qp->cq_tail)); in ccp_queue_write_tail()
134 ccp_queue_lsb_entry(struct ccp_queue *qp, unsigned lsb_entry) in ccp_queue_lsb_entry() argument
136 return ((qp->private_lsb * LSB_REGION_LENGTH + lsb_entry)); in ccp_queue_lsb_entry()
144 ccp_queue_lsb_address(struct ccp_queue *qp, unsigned lsb_entry) in ccp_queue_lsb_address() argument
146 return (ccp_queue_lsb_entry(qp, lsb_entry) * LSB_ENTRY_SIZE); in ccp_queue_lsb_address()
213 struct ccp_queue *qp; in ccp_hw_attach_queue() local
220 qp = &sc->queues[queue]; in ccp_hw_attach_queue()
232 if (qp->lsb_mask == 0) { in ccp_hw_attach_queue()
[all …]
H A Dccp.c140 struct ccp_queue *qp; in ccp_initialize_queues() local
144 qp = &sc->queues[i]; in ccp_initialize_queues()
146 qp->cq_softc = sc; in ccp_initialize_queues()
147 qp->cq_qindex = i; in ccp_initialize_queues()
148 mtx_init(&qp->cq_lock, "ccp queue", NULL, MTX_DEF); in ccp_initialize_queues()
150 qp->cq_sg_crp = sglist_alloc(32, M_WAITOK); in ccp_initialize_queues()
152 qp->cq_sg_ulptx = sglist_alloc(34, M_WAITOK); in ccp_initialize_queues()
153 qp->cq_sg_dst = sglist_alloc(2, M_WAITOK); in ccp_initialize_queues()
160 struct ccp_queue *qp; in ccp_free_queues() local
164 qp = &sc->queues[i]; in ccp_free_queues()
[all …]
/freebsd/sys/dev/qlnx/qlnxe/
H A Decore_roce.c218 static void ecore_rdma_copy_gids(struct ecore_rdma_qp *qp, __le32 *src_gid, in ecore_rdma_copy_gids() argument
222 if (qp->roce_mode == ROCE_V2_IPV4) { in ecore_rdma_copy_gids()
228 src_gid[3] = OSAL_CPU_TO_LE32(qp->sgid.ipv4_addr); in ecore_rdma_copy_gids()
229 dst_gid[3] = OSAL_CPU_TO_LE32(qp->dgid.ipv4_addr); in ecore_rdma_copy_gids()
234 for (i = 0; i < OSAL_ARRAY_SIZE(qp->sgid.dwords); i++) { in ecore_rdma_copy_gids()
235 src_gid[i] = OSAL_CPU_TO_LE32(qp->sgid.dwords[i]); in ecore_rdma_copy_gids()
236 dst_gid[i] = OSAL_CPU_TO_LE32(qp->dgid.dwords[i]); in ecore_rdma_copy_gids()
391 struct ecore_rdma_qp *qp) in ecore_roce_sp_create_responder() argument
403 if (!qp->has_resp) in ecore_roce_sp_create_responder()
406 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "qp_idx = %08x\n", qp->qp_idx); in ecore_roce_sp_create_responder()
[all …]
/freebsd/contrib/ofed/libmlx4/
H A Dqp.c59 static void *get_recv_wqe(struct mlx4_qp *qp, int n) in get_recv_wqe() argument
61 return qp->buf.buf + qp->rq.offset + (n << qp->rq.wqe_shift); in get_recv_wqe()
64 static void *get_send_wqe(struct mlx4_qp *qp, int n) in get_send_wqe() argument
66 return qp->buf.buf + qp->sq.offset + (n << qp->sq.wqe_shift); in get_send_wqe()
74 static void stamp_send_wqe(struct mlx4_qp *qp, int n) in stamp_send_wqe() argument
76 uint32_t *wqe = get_send_wqe(qp, n); in stamp_send_wqe()
84 void mlx4_init_qp_indices(struct mlx4_qp *qp) in mlx4_init_qp_indices() argument
86 qp->sq.head = 0; in mlx4_init_qp_indices()
87 qp->sq.tail = 0; in mlx4_init_qp_indices()
88 qp->rq.head = 0; in mlx4_init_qp_indices()
[all …]
H A Dverbs.c351 int mlx4_bind_mw(struct ibv_qp *qp, struct ibv_mw *mw, in mlx4_bind_mw() argument
369 ret = mlx4_post_send(qp, &wr, &bad_wr); in mlx4_bind_mw()
758 struct mlx4_qp *qp) in mlx4_cmd_create_qp_ex() argument
774 ret = ibv_cmd_create_qp_ex2(context, &qp->verbs_qp, in mlx4_cmd_create_qp_ex()
775 sizeof(qp->verbs_qp), attr, in mlx4_cmd_create_qp_ex()
798 struct mlx4_qp *qp; in mlx4_create_qp_ex() local
821 qp = calloc(1, sizeof *qp); in mlx4_create_qp_ex()
822 if (!qp) in mlx4_create_qp_ex()
826 attr->cap.max_send_wr = qp->sq.wqe_cnt = 0; in mlx4_create_qp_ex()
828 mlx4_calc_sq_wqe_size(&attr->cap, attr->qp_type, qp); in mlx4_create_qp_ex()
[all …]
/freebsd/sys/dev/bnxt/bnxt_re/
H A Dqplib_fp.c49 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
51 static void bnxt_re_legacy_cancel_phantom_processing(struct bnxt_qplib_qp *qp) in bnxt_re_legacy_cancel_phantom_processing() argument
53 qp->sq.condition = false; in bnxt_re_legacy_cancel_phantom_processing()
54 qp->sq.legacy_send_phantom = false; in bnxt_re_legacy_cancel_phantom_processing()
55 qp->sq.single = false; in bnxt_re_legacy_cancel_phantom_processing()
58 static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp) in __bnxt_qplib_add_flush_qp() argument
62 scq = qp->scq; in __bnxt_qplib_add_flush_qp()
63 rcq = qp->rcq; in __bnxt_qplib_add_flush_qp()
65 if (!qp->sq.flushed) { in __bnxt_qplib_add_flush_qp()
68 qp); in __bnxt_qplib_add_flush_qp()
[all …]
/freebsd/sys/dev/qlnx/qlnxr/
H A Dqlnxr_cm.c41 struct qlnxr_qp *qp, in qlnxr_store_gsi_qp_cq() argument
49 dev->gsi_qp = qp; in qlnxr_store_gsi_qp_cq()
67 struct qlnxr_qp *qp = dev->gsi_qp; in qlnxr_ll2_complete_tx_packet() local
76 spin_lock_irqsave(&qp->q_lock, flags); in qlnxr_ll2_complete_tx_packet()
78 qlnxr_inc_sw_gsi_cons(&qp->sq); in qlnxr_ll2_complete_tx_packet()
80 spin_unlock_irqrestore(&qp->q_lock, flags); in qlnxr_ll2_complete_tx_packet()
97 struct qlnxr_qp *qp = NULL; in qlnxr_ll2_complete_rx_packet() local
113 qp = dev->gsi_qp; in qlnxr_ll2_complete_rx_packet()
123 spin_lock_irqsave(&qp->q_lock, flags); in qlnxr_ll2_complete_rx_packet()
125 qp->rqe_wr_id[qp->rq.gsi_cons].rc = in qlnxr_ll2_complete_rx_packet()
[all …]
H A Dqlnxr_verbs.c839 struct qlnxr_qp *qp, in get_gid_info() argument
850 &dev->sgid_tbl[qp->sgid_idx].raw[0], in get_gid_info()
2118 struct qlnxr_qp *qp) in qlnxr_copy_rq_uresp() argument
2125 QL_DPRINT12(ha, "enter qp->srq = %p\n", qp->srq); in qlnxr_copy_rq_uresp()
2127 if (qp->srq) in qlnxr_copy_rq_uresp()
2144 uresp->rq_icid = qp->icid; in qlnxr_copy_rq_uresp()
2153 struct qlnxr_qp *qp) in qlnxr_copy_sq_uresp() argument
2165 uresp->sq_icid = qp->icid; in qlnxr_copy_sq_uresp()
2168 uresp->sq_icid = qp->icid + 1; in qlnxr_copy_sq_uresp()
2176 struct qlnxr_qp *qp, in qlnxr_copy_qp_uresp() argument
[all …]
/freebsd/contrib/ofed/libmlx5/
H A Dverbs.c824 struct mlx5_qp *qp) in mlx5_calc_send_wqe() argument
842 qp->max_tso_header = attr->max_tso_header; in mlx5_calc_send_wqe()
861 struct mlx5_qp *qp) in mlx5_calc_rcv_wqe() argument
871 if (qp->wq_sig) in mlx5_calc_rcv_wqe()
884 struct mlx5_qp *qp) in mlx5_calc_sq_size() argument
893 wqe_size = mlx5_calc_send_wqe(ctx, attr, qp); in mlx5_calc_sq_size()
904 qp->max_inline_data = wqe_size - sq_overhead(attr->qp_type) - in mlx5_calc_sq_size()
906 attr->cap.max_inline_data = qp->max_inline_data; in mlx5_calc_sq_size()
918 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB; in mlx5_calc_sq_size()
919 if (qp in mlx5_calc_sq_size()
967 mlx5_calc_rq_size(struct mlx5_context * ctx,struct ibv_qp_init_attr_ex * attr,struct mlx5_qp * qp) mlx5_calc_rq_size() argument
1008 mlx5_calc_wq_size(struct mlx5_context * ctx,struct ibv_qp_init_attr_ex * attr,struct mlx5_qp * qp) mlx5_calc_wq_size() argument
1030 map_uuar(struct ibv_context * context,struct mlx5_qp * qp,int uuar_index) map_uuar() argument
1051 mlx5_alloc_qp_buf(struct ibv_context * context,struct ibv_qp_init_attr_ex * attr,struct mlx5_qp * qp,int size) mlx5_alloc_qp_buf() argument
1148 mlx5_free_qp_buf(struct mlx5_qp * qp) mlx5_free_qp_buf() argument
1172 mlx5_cmd_create_rss_qp(struct ibv_context * context,struct ibv_qp_init_attr_ex * attr,struct mlx5_qp * qp) mlx5_cmd_create_rss_qp() argument
1204 mlx5_cmd_create_qp_ex(struct ibv_context * context,struct ibv_qp_init_attr_ex * attr,struct mlx5_create_qp * cmd,struct mlx5_qp * qp,struct mlx5_create_qp_resp_ex * resp) mlx5_cmd_create_qp_ex() argument
1251 struct mlx5_qp *qp; create_qp() local
1437 struct ibv_qp *qp; mlx5_create_qp() local
1451 mlx5_lock_cqs(struct ibv_qp * qp) mlx5_lock_cqs() argument
1473 mlx5_unlock_cqs(struct ibv_qp * qp) mlx5_unlock_cqs() argument
1497 struct mlx5_qp *qp = to_mqp(ibqp); mlx5_destroy_qp() local
1550 struct mlx5_qp *qp = to_mqp(ibqp); mlx5_query_qp() local
1573 mlx5_modify_qp(struct ibv_qp * qp,struct ibv_qp_attr * attr,int attr_mask) mlx5_modify_qp() argument
1759 mlx5_attach_mcast(struct ibv_qp * qp,const union ibv_gid * gid,uint16_t lid) mlx5_attach_mcast() argument
1764 mlx5_detach_mcast(struct ibv_qp * qp,const union ibv_gid * gid,uint16_t lid) mlx5_detach_mcast() argument
[all...]
H A Dqp.c61 static void *get_recv_wqe(struct mlx5_qp *qp, int n) in get_recv_wqe() argument
63 return qp->buf.buf + qp->rq.offset + (n << qp->rq.wqe_shift); in get_recv_wqe()
93 int mlx5_copy_to_recv_wqe(struct mlx5_qp *qp, int idx, void *buf, int size) in mlx5_copy_to_recv_wqe() argument
96 int max = 1 << (qp->rq.wqe_shift - 4); in mlx5_copy_to_recv_wqe()
98 scat = get_recv_wqe(qp, idx); in mlx5_copy_to_recv_wqe()
99 if (unlikely(qp->wq_sig)) in mlx5_copy_to_recv_wqe()
105 int mlx5_copy_to_send_wqe(struct mlx5_qp *qp, int idx, void *buf, int size) in mlx5_copy_to_send_wqe() argument
112 idx &= (qp->sq.wqe_cnt - 1); in mlx5_copy_to_send_wqe()
113 ctrl = mlx5_get_send_wqe(qp, idx); in mlx5_copy_to_send_wqe()
114 if (qp->ibv_qp->qp_type != IBV_QPT_RC) { in mlx5_copy_to_send_wqe()
[all …]
/freebsd/sys/dev/mlx5/mlx5_ib/
H A Dmlx5_ib_qp.c100 static void *get_wqe(struct mlx5_ib_qp *qp, int offset) in get_wqe() argument
102 return mlx5_buf_offset(&qp->buf, offset); in get_wqe()
105 static void *get_recv_wqe(struct mlx5_ib_qp *qp, int n) in get_recv_wqe() argument
107 return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift)); in get_recv_wqe()
110 void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n) in mlx5_get_send_wqe() argument
112 return get_wqe(qp, qp->sq.offset + (n << MLX5_IB_SQ_STRIDE)); in mlx5_get_send_wqe()
132 int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index, in mlx5_ib_read_user_wqe() argument
136 struct ib_device *ibdev = qp->ibqp.device; in mlx5_ib_read_user_wqe()
138 struct mlx5_ib_wq *wq = send ? &qp->sq : &qp->rq; in mlx5_ib_read_user_wqe()
148 qp->ibqp.qp_type); in mlx5_ib_read_user_wqe()
[all …]
/freebsd/sys/ofed/drivers/infiniband/core/
H A Dib_verbs.c787 struct ib_qp *qp = context; in __ib_shared_qp_event_handler() local
790 spin_lock_irqsave(&qp->device->event_handler_lock, flags); in __ib_shared_qp_event_handler()
791 list_for_each_entry(event->element.qp, &qp->open_list, open_list) in __ib_shared_qp_event_handler()
792 if (event->element.qp->event_handler) in __ib_shared_qp_event_handler()
793 event->element.qp->event_handler(event, event->element.qp->qp_context); in __ib_shared_qp_event_handler()
794 spin_unlock_irqrestore(&qp->device->event_handler_lock, flags); in __ib_shared_qp_event_handler()
797 static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp) in __ib_insert_xrcd_qp() argument
800 list_add(&qp->xrcd_list, &xrcd->tgt_qp_list); in __ib_insert_xrcd_qp()
808 struct ib_qp *qp; in __ib_open_qp() local
811 qp = kzalloc(sizeof *qp, GFP_KERNEL); in __ib_open_qp()
[all …]
/freebsd/sys/dev/mlx5/mlx5_fpga/
H A Dmlx5fpga_conn.c108 if (unlikely(conn->qp.rq.pc - conn->qp.rq.cc >= conn->qp.rq.size)) { in mlx5_fpga_conn_post_recv()
113 ix = conn->qp.rq.pc & (conn->qp.rq.size - 1); in mlx5_fpga_conn_post_recv()
114 data = mlx5_wq_cyc_get_wqe(&conn->qp.wq.rq, ix); in mlx5_fpga_conn_post_recv()
119 conn->qp.rq.pc++; in mlx5_fpga_conn_post_recv()
120 conn->qp.rq.bufs[ix] = buf; in mlx5_fpga_conn_post_recv()
124 *conn->qp.wq.rq.db = cpu_to_be32(conn->qp.rq.pc & 0xffff); in mlx5_fpga_conn_post_recv()
133 *conn->qp.wq.sq.db = cpu_to_be32(conn->qp.sq.pc); in mlx5_fpga_conn_notify_hw()
147 ix = conn->qp.sq.pc & (conn->qp.sq.size - 1); in mlx5_fpga_conn_post_send()
149 ctrl = mlx5_wq_cyc_get_wqe(&conn->qp.wq.sq, ix); in mlx5_fpga_conn_post_send()
164 ctrl->opmod_idx_opcode = cpu_to_be32(((conn->qp.sq.pc & 0xffff) << 8) | in mlx5_fpga_conn_post_send()
[all …]
/freebsd/contrib/ncurses/ncurses/tinfo/
H A Dcomp_parse.c404 ENTRY *qp, *rp, *lastread = 0; in _nc_resolve_uses2() local
415 for_entry_list(qp) { in _nc_resolve_uses2()
418 for_entry_list2(rp, qp->next) { in _nc_resolve_uses2()
419 if (qp > rp in _nc_resolve_uses2()
420 && check_collisions(qp->tterm.term_names, in _nc_resolve_uses2()
426 (void) fprintf(stderr, "and\t%s\n", qp->tterm.term_names); in _nc_resolve_uses2()
428 qp->tterm.term_names)) { in _nc_resolve_uses2()
446 for_entry_list(qp) { in _nc_resolve_uses2()
447 for (i = 0; i < qp->nuses; i++) { in _nc_resolve_uses2()
449 char *child = _nc_first_name(qp->tterm.term_names); in _nc_resolve_uses2()
[all …]

1234567891011