| /linux/drivers/infiniband/ulp/iser/ |
| H A D | iser_verbs.c | 235 unsigned int max_send_wr, cq_size; in iser_create_ib_conn_res() local 250 cq_size = max_send_wr + ISER_QP_MAX_RECV_DTOS; in iser_create_ib_conn_res() 251 ib_conn->cq = ib_cq_pool_get(ib_dev, cq_size, -1, IB_POLL_SOFTIRQ); in iser_create_ib_conn_res() 256 ib_conn->cq_size = cq_size; in iser_create_ib_conn_res() 285 ib_cq_pool_put(ib_conn->cq, ib_conn->cq_size); in iser_create_ib_conn_res() 382 ib_cq_pool_put(ib_conn->cq, ib_conn->cq_size); in iser_free_ib_conn_res()
|
| H A D | iscsi_iser.h | 371 u32 cq_size; member
|
| /linux/drivers/net/ethernet/mellanox/mlxbf_gige/ |
| H A D | mlxbf_gige_rx.c | 122 size_t wq_size, cq_size; in mlxbf_gige_rx_init() local 156 cq_size = MLXBF_GIGE_RX_CQE_SZ * priv->rx_q_entries; in mlxbf_gige_rx_init() 157 priv->rx_cqe_base = dma_alloc_coherent(priv->dev, cq_size, in mlxbf_gige_rx_init()
|
| /linux/drivers/net/ethernet/amd/pds_core/ |
| H A D | core.c | 153 dma_free_coherent(dev, qcq->cq_size, in pdsc_qcq_free() 239 qcq->q_size + qcq->cq_size, in pdsc_qcq_alloc() 272 qcq->cq_size = PDS_PAGE_SIZE + (num_descs * cq_desc_size); in pdsc_qcq_alloc() 273 qcq->cq_base = dma_alloc_coherent(dev, qcq->cq_size, in pdsc_qcq_alloc()
|
| H A D | debugfs.c | 122 debugfs_create_x32("cq_size", 0400, qcq_dentry, &qcq->cq_size); in pdsc_debugfs_add_qcq()
|
| /linux/drivers/net/ethernet/microsoft/mana/ |
| H A D | hw_channel.c | 399 u32 eq_size, cq_size; in mana_hwc_create_cq() local 406 cq_size = roundup_pow_of_two(GDMA_CQE_SIZE * q_depth); in mana_hwc_create_cq() 407 if (cq_size < MANA_MIN_QSIZE) in mana_hwc_create_cq() 408 cq_size = MANA_MIN_QSIZE; in mana_hwc_create_cq() 421 err = mana_hwc_create_gdma_cq(hwc, cq_size, hwc_cq, mana_hwc_comp_event, in mana_hwc_create_cq()
|
| H A D | mana_en.c | 1545 req.cq_size = cq_spec->queue_size; in mana_create_wq_obj() 2382 u32 cq_size; in mana_create_txq() local 2401 cq_size = apc->tx_queue_size * COMP_ENTRY_SIZE; in mana_create_txq() 2435 spec.queue_size = cq_size; in mana_create_txq() 2592 struct mana_rxq *rxq, u32 *rxq_size, u32 *cq_size) in mana_alloc_rx_wqe() 2603 *cq_size = 0; in mana_alloc_rx_wqe() 2625 *cq_size += COMP_ENTRY_SIZE; in mana_alloc_rx_wqe() 2693 u32 cq_size, rq_size; in mana_create_rxq() 2717 err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size); in mana_create_rxq() 2722 cq_size in mana_create_rxq() 2589 mana_alloc_rx_wqe(struct mana_port_context * apc,struct mana_rxq * rxq,u32 * rxq_size,u32 * cq_size) mana_alloc_rx_wqe() argument 2690 u32 cq_size, rq_size; mana_create_rxq() local [all...] |
| /linux/include/uapi/rdma/ |
| H A D | irdma-abi.h | 108 __u32 cq_size; member
|
| /linux/drivers/infiniband/hw/irdma/ |
| H A D | uk.c | 1042 void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int cq_size) in irdma_uk_cq_resize() argument 1045 cq->cq_size = cq_size; in irdma_uk_cq_resize() 1046 IRDMA_RING_INIT(cq->cq_ring, cq->cq_size); in irdma_uk_cq_resize() 1762 cq->cq_size = info->cq_size; in irdma_uk_cq_init() 1767 IRDMA_RING_INIT(cq->cq_ring, cq->cq_size); in irdma_uk_cq_init()
|
| H A D | verbs.c | 2066 info.cq_size = max(entries, 4); in irdma_resize_cq() 2068 if (info.cq_size == iwcq->sc_cq.cq_uk.cq_size - 1) in irdma_resize_cq() 2105 rsize = info.cq_size * sizeof(struct irdma_cqe); in irdma_resize_cq() 2155 ibcq->cqe = info.cq_size - 1; in irdma_resize_cq() 2478 #define IRDMA_CREATE_CQ_MIN_RESP_LEN offsetofend(struct irdma_create_cq_resp, cq_size) in irdma_create_cq() 2518 ukinfo->cq_size = max(entries, 4); in irdma_create_cq() 2524 iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size; in irdma_create_cq() 2605 ukinfo->cq_size = entries; in irdma_create_cq() 2608 rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_extended_cqe); in irdma_create_cq() 2610 rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_cqe); in irdma_create_cq() [all …]
|
| /linux/io_uring/ |
| H A D | register.c | 633 size_t cq_size; in io_register_resize_rings() local 636 cq_size = sizeof(struct io_uring_cqe); in io_register_resize_rings() 641 cq_size <<= 1; in io_register_resize_rings() 645 memcpy(&n.rings->cqes[index & dst_mask], &o.rings->cqes[index & src_mask], cq_size); in io_register_resize_rings()
|
| /linux/drivers/infiniband/ulp/isert/ |
| H A D | ib_isert.h | 184 u32 cq_size; member
|
| H A D | ib_isert.c | 103 u32 cq_size = ISERT_QP_MAX_REQ_DTOS + ISERT_QP_MAX_RECV_DTOS + 2; in isert_create_qp() local 109 isert_conn->cq = ib_cq_pool_get(ib_dev, cq_size, -1, IB_POLL_WORKQUEUE); in isert_create_qp() 115 isert_conn->cq_size = cq_size; in isert_create_qp() 137 ib_cq_pool_put(isert_conn->cq, isert_conn->cq_size); in isert_create_qp() 407 ib_cq_pool_put(isert_conn->cq, isert_conn->cq_size); in isert_destroy_qp()
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/fpga/ |
| H A D | conn.c | 411 static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size) in mlx5_fpga_conn_create_cq() argument 431 cq_size = roundup_pow_of_two(cq_size); in mlx5_fpga_conn_create_cq() 432 MLX5_SET(cqc, temp_cqc, log_cq_size, ilog2(cq_size)); in mlx5_fpga_conn_create_cq() 462 MLX5_SET(cqc, cqc, log_cq_size, ilog2(cq_size)); in mlx5_fpga_conn_create_cq()
|
| /linux/drivers/infiniband/ulp/srpt/ |
| H A D | ib_srpt.h | 304 u32 cq_size; member
|
| /linux/drivers/net/ethernet/pensando/ionic/ |
| H A D | ionic_debugfs.c | 133 debugfs_create_x32("cq_size", 0400, qcq_dentry, &qcq->cq_size); in ionic_debugfs_add_qcq()
|
| H A D | ionic_lif.c | 440 dma_free_coherent(dev, qcq->cq_size, qcq->cq_base, qcq->cq_base_pa); in ionic_qcq_free() 698 new->cq_size = PAGE_SIZE + (num_descs * cq_desc_size); in ionic_qcq_alloc() 699 new->cq_base = dma_alloc_coherent(dev, new->cq_size, &new->cq_base_pa, in ionic_qcq_alloc() 734 dma_free_coherent(dev, new->cq_size, new->cq_base, new->cq_base_pa); in ionic_qcq_alloc() 827 memset(qcq->cq_base, 0, qcq->cq_size); in ionic_qcq_sanitize() 2922 swap(a->cq_size, b->cq_size); in ionic_swap_queues()
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/ |
| H A D | dr_send.c | 1206 int cq_size; in mlx5dr_send_ring_alloc() local 1214 cq_size = QUEUE_SIZE + 1; in mlx5dr_send_ring_alloc() 1215 dmn->send_ring->cq = dr_create_cq(dmn->mdev, dmn->uar, cq_size); in mlx5dr_send_ring_alloc()
|
| /linux/drivers/dma/ |
| H A D | hisi_dma.c | 586 size_t cq_size = sizeof(struct hisi_dma_cqe) * hdma_dev->chan_depth; in hisi_dma_alloc_qps_mem() local 598 chan->cq = dmam_alloc_coherent(dev, cq_size, &chan->cq_dma, in hisi_dma_alloc_qps_mem()
|
| /linux/include/linux/qed/ |
| H A D | qed_rdma_if.h | 257 u32 cq_size; member
|
| /linux/drivers/infiniband/hw/mlx4/ |
| H A D | mad.c | 1979 int ret, cq_size; in create_pv_resources() local 2006 cq_size = 2 * nmbr_bufs; in create_pv_resources() 2008 cq_size *= 2; in create_pv_resources() 2010 cq_attr.cqe = cq_size; in create_pv_resources()
|
| /linux/drivers/net/ethernet/brocade/bna/ |
| H A D | bna_tx_rx.c | 2138 u32 cq_size, hq_size, dq_size; in bna_rx_res_req() local 2149 cq_size = cq_depth * BFI_CQ_WI_SIZE; in bna_rx_res_req() 2150 cq_size = ALIGN(cq_size, PAGE_SIZE); in bna_rx_res_req() 2151 cpage_count = SIZE_TO_PAGES(cq_size); in bna_rx_res_req()
|
| /linux/drivers/accel/habanalabs/common/ |
| H A D | habanalabs_ioctl.c | 644 info.cq_size = hdev->captured_err_info.undef_opcode.cq_size; in undefined_opcode_info()
|
| /linux/drivers/infiniband/core/ |
| H A D | mad.c | 3197 int ret, cq_size; in ib_mad_port_open() local 3220 cq_size = mad_sendq_size + mad_recvq_size; in ib_mad_port_open() 3223 cq_size *= 2; in ib_mad_port_open() 3232 port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0, in ib_mad_port_open()
|
| /linux/drivers/infiniband/hw/cxgb4/ |
| H A D | t4.h | 832 u64 cq_size; member
|