/linux/drivers/net/ethernet/microsoft/mana/ |
H A D | hw_channel.c | 357 static int mana_hwc_create_cq(struct hw_channel_context *hwc, u16 q_depth, in mana_hwc_create_cq() argument 370 eq_size = roundup_pow_of_two(GDMA_EQE_SIZE * q_depth); in mana_hwc_create_cq() 374 cq_size = roundup_pow_of_two(GDMA_CQE_SIZE * q_depth); in mana_hwc_create_cq() 397 comp_buf = kcalloc(q_depth, sizeof(*comp_buf), GFP_KERNEL); in mana_hwc_create_cq() 405 hwc_cq->queue_depth = q_depth; in mana_hwc_create_cq() 418 static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth, in mana_hwc_alloc_dma_buf() argument 432 dma_buf = kzalloc(struct_size(dma_buf, reqs, q_depth), GFP_KERNEL); in mana_hwc_alloc_dma_buf() 436 dma_buf->num_reqs = q_depth; in mana_hwc_alloc_dma_buf() 438 buf_size = MANA_PAGE_ALIGN(q_depth * max_msg_size); in mana_hwc_alloc_dma_buf() 450 for (i = 0; i < q_depth; i++) { in mana_hwc_alloc_dma_buf() [all …]
|
/linux/drivers/net/ethernet/huawei/hinic/ |
H A D | hinic_hw_wq.c | 34 #define WQ_SIZE(wq) ((wq)->q_depth * (wq)->wqebb_size) 504 u16 wqebb_size, u32 wq_page_size, u16 q_depth, in hinic_wq_allocate() argument 523 if (q_depth & (q_depth - 1)) { in hinic_wq_allocate() 547 wq->q_depth = q_depth; in hinic_wq_allocate() 564 atomic_set(&wq->delta, q_depth); in hinic_wq_allocate() 565 wq->mask = q_depth - 1; in hinic_wq_allocate() 602 u16 q_depth, u16 max_wqe_size) in hinic_wqs_cmdq_alloc() argument 620 if (q_depth & (q_depth - 1)) { in hinic_wqs_cmdq_alloc() 650 wq[i].q_depth = q_depth; in hinic_wqs_cmdq_alloc() 668 atomic_set(&wq[i].delta, q_depth); in hinic_wqs_cmdq_alloc() [all …]
|
H A D | hinic_hw_wq.h | 30 u16 q_depth; member 80 u16 q_depth, u16 max_wqe_size); 91 u16 wqebb_size, u32 wq_page_size, u16 q_depth,
|
H A D | hinic_hw_cmdq.c | 363 if (next_prod_idx >= wq->q_depth) { in cmdq_sync_cmd_direct_resp() 365 next_prod_idx -= wq->q_depth; in cmdq_sync_cmd_direct_resp() 442 if (next_prod_idx >= wq->q_depth) { in cmdq_set_arm_bit() 444 next_prod_idx -= wq->q_depth; in cmdq_set_arm_bit() 749 cmdq->done = vzalloc(array_size(sizeof(*cmdq->done), wq->q_depth)); in init_cmdq() 754 wq->q_depth)); in init_cmdq()
|
H A D | hinic_hw_qp.c | 224 skb_arr_size = wq->q_depth * sizeof(*sq->saved_skb); in alloc_sq_skb_arr() 252 skb_arr_size = wq->q_depth * sizeof(*rq->saved_skb); in alloc_rq_skb_arr() 324 cqe_size = wq->q_depth * sizeof(*rq->cqe); in alloc_rq_cqe() 329 cqe_dma_size = wq->q_depth * sizeof(*rq->cqe_dma); in alloc_rq_cqe() 334 for (i = 0; i < wq->q_depth; i++) { in alloc_rq_cqe() 367 for (i = 0; i < wq->q_depth; i++) in free_rq_cqe()
|
/linux/drivers/net/ethernet/amazon/ena/ |
H A D | ena_eth_com.c | 15 head_masked = io_cq->head & (io_cq->q_depth - 1); in ena_com_get_next_rx_cdesc() 40 tail_masked = io_sq->tail & (io_sq->q_depth - 1); in get_sq_desc_regular_queue() 55 dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1); in ena_com_write_bounce_buffer_to_dev() 83 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0)) in ena_com_write_bounce_buffer_to_dev() 217 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0)) in ena_com_sq_update_tail() 226 idx &= (io_cq->q_depth - 1); in ena_com_rx_cdesc_idx_to_ptr() 266 head_masked = io_cq->head & (io_cq->q_depth - 1); in ena_com_cdesc_rx_pkt_get() 551 u16 q_depth = io_cq->q_depth; in ena_com_rx_pkt() local 583 if (unlikely(ena_buf[i].req_id >= q_depth)) in ena_com_rx_pkt()
|
H A D | ena_eth_com.h | 82 return io_sq->q_depth - 1 - cnt; in ena_com_free_q_entries() 196 if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0)) in ena_com_cq_inc_head() 207 masked_head = io_cq->head & (io_cq->q_depth - 1); in ena_com_tx_comp_req_id_get() 225 if (unlikely(*req_id >= io_cq->q_depth)) { in ena_com_tx_comp_req_id_get()
|
H A D | ena_com.c | 91 u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth); in ena_com_admin_init_sq() 113 u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth); in ena_com_admin_init_cq() 135 ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH; in ena_com_admin_init_aenq() 144 aenq->head = aenq->q_depth; in ena_com_admin_init_aenq() 154 aenq_caps |= ena_dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK; in ena_com_admin_init_aenq() 180 if (unlikely(command_id >= admin_queue->q_depth)) { in get_comp_ctxt() 183 command_id, admin_queue->q_depth); in get_comp_ctxt() 216 queue_size_mask = admin_queue->q_depth - 1; in __ena_com_submit_admin_cmd() 222 if (cnt >= admin_queue->q_depth) { in __ena_com_submit_admin_cmd() 266 size_t size = admin_queue->q_depth * sizeof(struct ena_comp_ctx); in ena_com_init_comp_ctxt() [all …]
|
H A D | ena_com.h | 129 u16 q_depth; member 171 u16 q_depth; member 220 u16 q_depth; member 247 u16 q_depth; member
|
/linux/drivers/net/ethernet/brocade/bna/ |
H A D | bnad.c | 78 for (i = 0; i < ccb->q_depth; i++) { in bnad_cq_cleanup() 91 u32 q_depth, u32 index) in bnad_tx_buff_unmap() argument 114 BNA_QE_INDX_INC(index, q_depth); in bnad_tx_buff_unmap() 126 BNA_QE_INDX_INC(index, q_depth); in bnad_tx_buff_unmap() 143 for (i = 0; i < tcb->q_depth; i++) { in bnad_txq_cleanup() 147 bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i); in bnad_txq_cleanup() 162 u32 wis, unmap_wis, hw_cons, cons, q_depth; in bnad_txcmpl_process() local 174 q_depth = tcb->q_depth; in bnad_txcmpl_process() 176 wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth); in bnad_txcmpl_process() 177 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth))); in bnad_txcmpl_process() [all …]
|
H A D | bna_types.h | 428 u32 q_depth; member 559 u32 q_depth; member 575 int q_depth; member 623 u32 q_depth; member
|
H A D | bfa_msgq.c | 516 msgq_cfg->cmdq.q_depth = htons(msgq->cmdq.depth); in bfa_msgq_init() 518 msgq_cfg->rspq.q_depth = htons(msgq->rspq.depth); in bfa_msgq_init()
|
H A D | bfi.h | 413 u16 q_depth; /* Total num of entries in the queue */ member
|
H A D | bna_tx_rx.c | 2385 q0->rcb->q_depth = rx_cfg->q0_depth; in bna_rx_create() 2386 q0->q_depth = rx_cfg->q0_depth; in bna_rx_create() 2412 q1->rcb->q_depth = rx_cfg->q1_depth; in bna_rx_create() 2413 q1->q_depth = rx_cfg->q1_depth; in bna_rx_create() 2444 rxp->cq.ccb->q_depth = cq_depth; in bna_rx_create() 3466 txq->tcb->q_depth = tx_cfg->txq_depth; in bna_tx_create()
|
/linux/drivers/nvme/host/ |
H A D | pci.c | 35 #define SQ_SIZE(q) ((q)->q_depth << (q)->sqes) 36 #define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion)) 131 u32 q_depth; member 203 u32 q_depth; member 482 if (next_tail == nvmeq->q_depth) in nvme_write_sq_db() 499 if (++nvmeq->sq_tail == nvmeq->q_depth) in nvme_sq_copy_cmd() 1143 if (tmp == nvmeq->q_depth) { in nvme_update_cq_head() 1300 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); in adapter_alloc_cq() 1329 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); in adapter_alloc_sq() 1608 int q_depth = dev->q_depth; in nvme_cmb_qdepth() local [all …]
|
/linux/drivers/net/ethernet/marvell/octeon_ep/ |
H A D | octep_ctrl_mbox.c | 228 u32 pi, ci, r_sz, buf_sz, q_depth; in octep_ctrl_mbox_recv() local 240 q_depth = octep_ctrl_mbox_circq_depth(pi, ci, q->sz); in octep_ctrl_mbox_recv() 241 if (q_depth < mbox_hdr_sz) { in octep_ctrl_mbox_recv()
|
/linux/drivers/crypto/hisilicon/zip/ |
H A D | zip_crypto.c | 446 u16 q_depth = ctx->qp_ctx[0].qp->sq_depth; in hisi_zip_create_req_q() local 452 req_q->size = q_depth; in hisi_zip_create_req_q() 498 u16 q_depth = ctx->qp_ctx[0].qp->sq_depth; in hisi_zip_create_sgl_pool() local 506 tmp->sgl_pool = hisi_acc_create_sgl_pool(dev, q_depth << 1, in hisi_zip_create_sgl_pool()
|
/linux/drivers/block/ |
H A D | ublk_drv.c | 138 int q_depth; member 681 return __ublk_queue_cmd_buf_size(ubq->q_depth); in ublk_queue_cmd_buf_size() 1271 for (i = 0; i < ubq->q_depth; i++) { in ublk_timeout() 1279 if (nr_inflight == ubq->q_depth) { in ublk_timeout() 1436 for (i = 0; i < ubq->q_depth; i++) { in ublk_abort_queue() 1522 if (WARN_ON_ONCE(pdu->tag >= ubq->q_depth)) in ublk_uring_cmd_cancel_fn() 1543 return ubq->nr_io_ready == ubq->q_depth; in ublk_queue_ready() 1550 for (i = 0; i < ubq->q_depth; i++) in ublk_cancel_queue() 1768 if (tag >= ubq->q_depth) in __ublk_ch_uring_cmd() 1985 if (tag >= ubq->q_depth) in ublk_check_and_get_req() [all …]
|
/linux/drivers/net/ethernet/fungible/funcore/ |
H A D | fun_dev.h | 69 unsigned int q_depth; /* max queue depth supported by device */ member
|
H A D | fun_dev.c | 783 fdev->q_depth = NVME_CAP_MQES(fdev->cap_reg) + 1; in fun_dev_enable() 813 fdev->q_depth, fdev->db_stride, fdev->max_qid, in fun_dev_enable()
|
/linux/drivers/scsi/mpi3mr/ |
H A D | mpi3mr_os.c | 1007 int q_depth) in mpi3mr_change_queue_depth() argument 1014 q_depth = 1; in mpi3mr_change_queue_depth() 1015 if (q_depth > shost->can_queue) in mpi3mr_change_queue_depth() 1016 q_depth = shost->can_queue; in mpi3mr_change_queue_depth() 1017 else if (!q_depth) in mpi3mr_change_queue_depth() 1018 q_depth = MPI3MR_DEFAULT_SDEV_QD; in mpi3mr_change_queue_depth() 1019 retval = scsi_change_queue_depth(sdev, q_depth); in mpi3mr_change_queue_depth() 1065 mpi3mr_change_queue_depth(sdev, tgtdev->q_depth); in mpi3mr_update_sdev() 1158 tgtdev->q_depth = le16_to_cpu(dev_pg0->queue_depth); in mpi3mr_update_tgtdev() 1313 tg->fw_qd = tgtdev->q_depth; in mpi3mr_update_tgtdev() [all …]
|
H A D | mpi3mr.h | 755 u16 q_depth; member
|
/linux/drivers/crypto/hisilicon/ |
H A D | debugfs.c | 281 u32 *e_id, u32 *q_id, u16 q_depth) in q_dump_param_parse() argument 307 if (ret || *e_id >= q_depth) { in q_dump_param_parse() 308 dev_err(dev, "Please input sqe num (0-%u)", q_depth - 1); in q_dump_param_parse()
|
/linux/drivers/net/ethernet/fungible/funeth/ |
H A D | funeth_main.c | 1781 fp->sq_depth = min(SQ_DEPTH, fdev->q_depth); in fun_create_netdev() 1782 fp->cq_depth = min(CQ_DEPTH, fdev->q_depth); in fun_create_netdev() 1783 fp->rq_depth = min_t(unsigned int, RQ_DEPTH, fdev->q_depth); in fun_create_netdev()
|
/linux/drivers/scsi/bfa/ |
H A D | bfa_defs_svc.h | 958 u16 q_depth; /* SCSI Queue depth */ member
|