| /linux/drivers/net/ethernet/microsoft/mana/ |
| H A D | hw_channel.c | 389 static int mana_hwc_create_cq(struct hw_channel_context *hwc, u16 q_depth, in mana_hwc_create_cq() argument 402 eq_size = roundup_pow_of_two(GDMA_EQE_SIZE * q_depth); in mana_hwc_create_cq() 406 cq_size = roundup_pow_of_two(GDMA_CQE_SIZE * q_depth); in mana_hwc_create_cq() 429 comp_buf = kzalloc_objs(*comp_buf, q_depth); in mana_hwc_create_cq() 437 hwc_cq->queue_depth = q_depth; in mana_hwc_create_cq() 450 static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth, in mana_hwc_alloc_dma_buf() argument 464 dma_buf = kzalloc_flex(*dma_buf, reqs, q_depth); in mana_hwc_alloc_dma_buf() 468 dma_buf->num_reqs = q_depth; in mana_hwc_alloc_dma_buf() 470 buf_size = MANA_PAGE_ALIGN(q_depth * max_msg_size); in mana_hwc_alloc_dma_buf() 483 for (i = 0; i < q_depth; i++) { in mana_hwc_alloc_dma_buf() [all …]
|
| /linux/drivers/net/ethernet/huawei/hinic/ |
| H A D | hinic_hw_wq.c | 34 #define WQ_SIZE(wq) ((wq)->q_depth * (wq)->wqebb_size) 504 u16 wqebb_size, u32 wq_page_size, u16 q_depth, in hinic_wq_allocate() argument 523 if (q_depth & (q_depth - 1)) { in hinic_wq_allocate() 547 wq->q_depth = q_depth; in hinic_wq_allocate() 564 atomic_set(&wq->delta, q_depth); in hinic_wq_allocate() 565 wq->mask = q_depth - 1; in hinic_wq_allocate() 602 u16 q_depth, u16 max_wqe_size) in hinic_wqs_cmdq_alloc() argument 620 if (q_depth & (q_depth - 1)) { in hinic_wqs_cmdq_alloc() 650 wq[i].q_depth = q_depth; in hinic_wqs_cmdq_alloc() 668 atomic_set(&wq[i].delta, q_depth); in hinic_wqs_cmdq_alloc() [all …]
|
| H A D | hinic_hw_wq.h | 30 u16 q_depth; member 80 u16 q_depth, u16 max_wqe_size); 91 u16 wqebb_size, u32 wq_page_size, u16 q_depth,
|
| H A D | hinic_hw_cmdq.c | 363 if (next_prod_idx >= wq->q_depth) { in cmdq_sync_cmd_direct_resp() 365 next_prod_idx -= wq->q_depth; in cmdq_sync_cmd_direct_resp() 442 if (next_prod_idx >= wq->q_depth) { in cmdq_set_arm_bit() 444 next_prod_idx -= wq->q_depth; in cmdq_set_arm_bit() 749 cmdq->done = vzalloc(array_size(sizeof(*cmdq->done), wq->q_depth)); in init_cmdq() 754 wq->q_depth)); in init_cmdq()
|
| H A D | hinic_hw_qp.c | 224 skb_arr_size = wq->q_depth * sizeof(*sq->saved_skb); in alloc_sq_skb_arr() 252 skb_arr_size = wq->q_depth * sizeof(*rq->saved_skb); in alloc_rq_skb_arr() 324 cqe_size = wq->q_depth * sizeof(*rq->cqe); in alloc_rq_cqe() 329 cqe_dma_size = wq->q_depth * sizeof(*rq->cqe_dma); in alloc_rq_cqe() 334 for (i = 0; i < wq->q_depth; i++) { in alloc_rq_cqe() 367 for (i = 0; i < wq->q_depth; i++) in free_rq_cqe()
|
| /linux/drivers/net/ethernet/amazon/ena/ |
| H A D | ena_eth_com.c | 15 head_masked = io_cq->head & (io_cq->q_depth - 1); in ena_com_get_next_rx_cdesc() 40 tail_masked = io_sq->tail & (io_sq->q_depth - 1); in get_sq_desc_regular_queue() 55 dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1); in ena_com_write_bounce_buffer_to_dev() 83 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0)) in ena_com_write_bounce_buffer_to_dev() 217 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0)) in ena_com_sq_update_tail() 226 idx &= (io_cq->q_depth - 1); in ena_com_rx_cdesc_idx_to_ptr() 266 head_masked = io_cq->head & (io_cq->q_depth - 1); in ena_com_cdesc_rx_pkt_get() 551 u16 q_depth = io_cq->q_depth; in ena_com_rx_pkt() local 583 if (unlikely(ena_buf[i].req_id >= q_depth)) in ena_com_rx_pkt()
|
| H A D | ena_eth_com.h | 82 return io_sq->q_depth - 1 - cnt; in ena_com_free_q_entries() 196 if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0)) in ena_com_cq_inc_head() 207 masked_head = io_cq->head & (io_cq->q_depth - 1); in ena_com_tx_comp_req_id_get() 225 if (unlikely(*req_id >= io_cq->q_depth)) { in ena_com_tx_comp_req_id_get()
|
| H A D | ena_com.c | 97 u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth); in ena_com_admin_init_sq() 119 u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth); in ena_com_admin_init_cq() 141 ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH; in ena_com_admin_init_aenq() 150 aenq->head = aenq->q_depth; in ena_com_admin_init_aenq() 160 aenq_caps |= ena_dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK; in ena_com_admin_init_aenq() 186 if (unlikely(command_id >= admin_queue->q_depth)) { in get_comp_ctxt() 189 command_id, admin_queue->q_depth); in get_comp_ctxt() 222 queue_size_mask = admin_queue->q_depth - 1; in __ena_com_submit_admin_cmd() 228 if (cnt >= admin_queue->q_depth) { in __ena_com_submit_admin_cmd() 272 size_t size = admin_queue->q_depth * sizeof(struct ena_comp_ctx); in ena_com_init_comp_ctxt() [all …]
|
| H A D | ena_com.h | 129 u16 q_depth; member 171 u16 q_depth; member 228 u16 q_depth; member 255 u16 q_depth; member
|
| /linux/drivers/net/ethernet/huawei/hinic3/ |
| H A D | hinic3_queue_common.c | 9 void hinic3_queue_pages_init(struct hinic3_queue_pages *qpages, u32 q_depth, in hinic3_queue_pages_init() argument 14 elem_per_page = min(page_size / elem_size, q_depth); in hinic3_queue_pages_init() 18 qpages->num_pages = max(q_depth / elem_per_page, 1); in hinic3_queue_pages_init()
|
| H A D | hinic3_rx.c | 80 rxq->q_depth = nic_dev->q_params.rq_depth; in hinic3_alloc_rxqs() 127 for (i = 0; i < rxq->q_depth; i++) { in rq_associate_cqes() 196 u32 q_depth) in hinic3_free_rx_buffers() argument 202 for (i = 0; i < q_depth; i++) { in hinic3_free_rx_buffers() 514 rxq->q_depth = rq_depth; in hinic3_configure_rxqs() 515 rxq->delta = rxq->q_depth; in hinic3_configure_rxqs() 516 rxq->q_mask = rxq->q_depth - 1; in hinic3_configure_rxqs()
|
| H A D | hinic3_cmdq.c | 515 if (next_prod_idx >= wq->q_depth) { in cmdq_sync_cmd_direct_resp() 517 next_prod_idx -= wq->q_depth; in cmdq_sync_cmd_direct_resp() 617 cmdq->cmd_infos = kzalloc_objs(*cmdq->cmd_infos, cmdq->wq.q_depth); in init_cmdq() 815 for (i = 0; i < cmdq->wq.q_depth; i++) in hinic3_cmdq_reset_all_cmd_buf()
|
| H A D | hinic3_tx.c | 59 txq->q_depth = nic_dev->q_params.sq_depth; in hinic3_alloc_txqs() 441 if (unlikely(curr_pi + wqebb_cnt >= sq->wq.q_depth)) in hinic3_get_and_update_sq_owner() 738 txq->q_depth = sq_depth; in hinic3_configure_txqs()
|
| /linux/drivers/net/ethernet/brocade/bna/ |
| H A D | bnad.c | 79 for (i = 0; i < ccb->q_depth; i++) { in bnad_cq_cleanup() 92 u32 q_depth, u32 index) in bnad_tx_buff_unmap() argument 115 BNA_QE_INDX_INC(index, q_depth); in bnad_tx_buff_unmap() 127 BNA_QE_INDX_INC(index, q_depth); in bnad_tx_buff_unmap() 144 for (i = 0; i < tcb->q_depth; i++) { in bnad_txq_cleanup() 148 bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i); in bnad_txq_cleanup() 163 u32 wis, unmap_wis, hw_cons, cons, q_depth; in bnad_txcmpl_process() local 175 q_depth = tcb->q_depth; in bnad_txcmpl_process() 177 wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth); in bnad_txcmpl_process() 178 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth))); in bnad_txcmpl_process() [all …]
|
| H A D | bna_types.h | 428 u32 q_depth; member 559 u32 q_depth; member 575 int q_depth; member 623 u32 q_depth; member
|
| /linux/drivers/nvme/host/ |
| H A D | pci.c | 34 #define SQ_SIZE(q) ((q)->q_depth << (q)->sqes) 35 #define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion)) 304 u32 q_depth; member 376 u32 q_depth; member 718 if (next_tail == nvmeq->q_depth) in nvme_write_sq_db() 735 if (++nvmeq->sq_tail == nvmeq->q_depth) in nvme_sq_copy_cmd() 1570 if (tmp == nvmeq->q_depth) { in nvme_update_cq_head() 1735 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); in adapter_alloc_cq() 1764 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); in adapter_alloc_sq() 2055 int q_depth = dev->q_depth; in nvme_cmb_qdepth() local [all …]
|
| /linux/drivers/net/ethernet/marvell/octeon_ep/ |
| H A D | octep_ctrl_mbox.c | 228 u32 pi, ci, r_sz, buf_sz, q_depth; in octep_ctrl_mbox_recv() local 240 q_depth = octep_ctrl_mbox_circq_depth(pi, ci, q->sz); in octep_ctrl_mbox_recv() 241 if (q_depth < mbox_hdr_sz) { in octep_ctrl_mbox_recv()
|
| /linux/drivers/crypto/hisilicon/sec2/ |
| H A D | sec_crypto.c | 389 u16 q_depth = res->depth; in sec_alloc_civ_resource() local 392 res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ(q_depth), in sec_alloc_civ_resource() 397 for (i = 1; i < q_depth; i++) { in sec_alloc_civ_resource() 414 u16 q_depth = res->depth; in sec_alloc_aiv_resource() local 417 res->a_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ(q_depth), in sec_alloc_aiv_resource() 422 for (i = 1; i < q_depth; i++) { in sec_alloc_aiv_resource() 439 u16 q_depth = res->depth; in sec_alloc_mac_resource() local 442 res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ(q_depth) << 1, in sec_alloc_mac_resource() 447 for (i = 1; i < q_depth; i++) { in sec_alloc_mac_resource() 476 u16 q_depth = res->depth; in sec_alloc_pbuf_resource() local [all …]
|
| /linux/drivers/crypto/hisilicon/zip/ |
| H A D | zip_crypto.c | 464 u16 q_depth = ctx->qp_ctx[0].qp->sq_depth; in hisi_zip_create_req_q() local 470 req_q->size = q_depth; in hisi_zip_create_req_q() 515 u16 q_depth = ctx->qp_ctx[0].qp->sq_depth; in hisi_zip_create_sgl_pool() local 523 tmp->sgl_pool = hisi_acc_create_sgl_pool(dev, q_depth << 1, in hisi_zip_create_sgl_pool()
|
| /linux/drivers/net/ethernet/fungible/funcore/ |
| H A D | fun_dev.h | 69 unsigned int q_depth; /* max queue depth supported by device */ member
|
| H A D | fun_queue.c | 89 if (sq_depth > fdev->q_depth) in fun_sq_create() 138 if (cq_depth > fdev->q_depth) in fun_cq_create()
|
| /linux/tools/testing/selftests/ublk/ |
| H A D | kublk.h | 172 int q_depth; member 547 return ublk_queue_idx_in_thread(t, q) * q->q_depth + tag; in ublk_batch_io_buf_idx()
|
| H A D | kublk.c | 417 return __ublk_queue_cmd_buf_sz(q->q_depth); in ublk_queue_cmd_buf_sz() 423 int nr_ios = q->q_depth; in ublk_queue_deinit() 461 q->q_depth = depth; in ublk_queue_init() 481 for (i = 0; i < q->q_depth; i++) { in ublk_queue_init() 807 for (i = 0; i < q->q_depth; i++) { in ublk_submit_fetch_commands() 856 ublk_assert(tag < q->q_depth); in ublk_handle_uring_cmd()
|
| /linux/drivers/scsi/mpi3mr/ |
| H A D | mpi3mr_os.c | 1018 int q_depth) in mpi3mr_change_queue_depth() argument 1025 q_depth = 1; in mpi3mr_change_queue_depth() 1026 if (q_depth > shost->can_queue) in mpi3mr_change_queue_depth() 1027 q_depth = shost->can_queue; in mpi3mr_change_queue_depth() 1028 else if (!q_depth) in mpi3mr_change_queue_depth() 1029 q_depth = MPI3MR_DEFAULT_SDEV_QD; in mpi3mr_change_queue_depth() 1030 retval = scsi_change_queue_depth(sdev, q_depth); in mpi3mr_change_queue_depth() 1076 mpi3mr_change_queue_depth(sdev, tgtdev->q_depth); in mpi3mr_update_sdev() 1256 tgtdev->q_depth = le16_to_cpu(dev_pg0->queue_depth); in mpi3mr_update_tgtdev() 1421 tg->fw_qd = tgtdev->q_depth; in mpi3mr_update_tgtdev() [all …]
|
| /linux/drivers/block/ |
| H A D | ublk_drv.c | 233 int q_depth; member 289 struct ublk_io ios[] __counted_by(q_depth); 2282 for (i = 0; i < ubq->q_depth; i++) { in ublk_queue_reinit() 2406 for (j = 0; j < ubq->q_depth; j++) { in ublk_check_and_reset_active_ref() 2625 for (i = 0; i < ubq->q_depth; i++) { in ublk_abort_queue() 2783 if (WARN_ON_ONCE(pdu->tag >= ubq->q_depth)) in ublk_uring_cmd_cancel_fn() 2799 return ubq->nr_io_ready == ubq->q_depth; in ublk_queue_ready() 2816 for (i = 0; i < ubq->q_depth; i++) in ublk_cancel_queue() 2920 for (j = 0; j < ubq->q_depth; j++) in ublk_queue_reset_io_flags() 4051 for (i = 0; i < ubq->q_depth; i++) { in __ublk_deinit_queue() [all …]
|