Home
last modified time | relevance | path

Searched refs:q_depth (Results 1 – 22 of 22) sorted by relevance

/linux/drivers/net/ethernet/huawei/hinic/
H A Dhinic_hw_wq.c34 #define WQ_SIZE(wq) ((wq)->q_depth * (wq)->wqebb_size)
504 u16 wqebb_size, u32 wq_page_size, u16 q_depth, in hinic_wq_allocate() argument
523 if (q_depth & (q_depth - 1)) { in hinic_wq_allocate()
547 wq->q_depth = q_depth; in hinic_wq_allocate()
564 atomic_set(&wq->delta, q_depth); in hinic_wq_allocate()
565 wq->mask = q_depth - 1; in hinic_wq_allocate()
602 u16 q_depth, u16 max_wqe_size) in hinic_wqs_cmdq_alloc() argument
620 if (q_depth & (q_depth - 1)) { in hinic_wqs_cmdq_alloc()
650 wq[i].q_depth = q_depth; in hinic_wqs_cmdq_alloc()
668 atomic_set(&wq[i].delta, q_depth); in hinic_wqs_cmdq_alloc()
[all …]
H A Dhinic_hw_wq.h30 u16 q_depth; member
80 u16 q_depth, u16 max_wqe_size);
91 u16 wqebb_size, u32 wq_page_size, u16 q_depth,
H A Dhinic_hw_cmdq.c363 if (next_prod_idx >= wq->q_depth) { in cmdq_sync_cmd_direct_resp()
365 next_prod_idx -= wq->q_depth; in cmdq_sync_cmd_direct_resp()
442 if (next_prod_idx >= wq->q_depth) { in cmdq_set_arm_bit()
444 next_prod_idx -= wq->q_depth; in cmdq_set_arm_bit()
749 cmdq->done = vzalloc(array_size(sizeof(*cmdq->done), wq->q_depth)); in init_cmdq()
754 wq->q_depth)); in init_cmdq()
H A Dhinic_hw_qp.c224 skb_arr_size = wq->q_depth * sizeof(*sq->saved_skb); in alloc_sq_skb_arr()
252 skb_arr_size = wq->q_depth * sizeof(*rq->saved_skb); in alloc_rq_skb_arr()
324 cqe_size = wq->q_depth * sizeof(*rq->cqe); in alloc_rq_cqe()
329 cqe_dma_size = wq->q_depth * sizeof(*rq->cqe_dma); in alloc_rq_cqe()
334 for (i = 0; i < wq->q_depth; i++) { in alloc_rq_cqe()
367 for (i = 0; i < wq->q_depth; i++) in free_rq_cqe()
/linux/drivers/net/ethernet/amazon/ena/
H A Dena_eth_com.c15 head_masked = io_cq->head & (io_cq->q_depth - 1); in ena_com_get_next_rx_cdesc()
40 tail_masked = io_sq->tail & (io_sq->q_depth - 1); in get_sq_desc_regular_queue()
55 dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1); in ena_com_write_bounce_buffer_to_dev()
83 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0)) in ena_com_write_bounce_buffer_to_dev()
217 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0)) in ena_com_sq_update_tail()
226 idx &= (io_cq->q_depth - 1); in ena_com_rx_cdesc_idx_to_ptr()
266 head_masked = io_cq->head & (io_cq->q_depth - 1); in ena_com_cdesc_rx_pkt_get()
551 u16 q_depth = io_cq->q_depth; in ena_com_rx_pkt() local
583 if (unlikely(ena_buf[i].req_id >= q_depth)) in ena_com_rx_pkt()
H A Dena_eth_com.h82 return io_sq->q_depth - 1 - cnt; in ena_com_free_q_entries()
196 if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0)) in ena_com_cq_inc_head()
207 masked_head = io_cq->head & (io_cq->q_depth - 1); in ena_com_tx_comp_req_id_get()
225 if (unlikely(*req_id >= io_cq->q_depth)) { in ena_com_tx_comp_req_id_get()
H A Dena_com.c97 u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth); in ena_com_admin_init_sq()
119 u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth); in ena_com_admin_init_cq()
141 ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH; in ena_com_admin_init_aenq()
150 aenq->head = aenq->q_depth; in ena_com_admin_init_aenq()
160 aenq_caps |= ena_dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK; in ena_com_admin_init_aenq()
186 if (unlikely(command_id >= admin_queue->q_depth)) { in get_comp_ctxt()
189 command_id, admin_queue->q_depth); in get_comp_ctxt()
222 queue_size_mask = admin_queue->q_depth - 1; in __ena_com_submit_admin_cmd()
228 if (cnt >= admin_queue->q_depth) { in __ena_com_submit_admin_cmd()
272 size_t size = admin_queue->q_depth * sizeof(struct ena_comp_ctx); in ena_com_init_comp_ctxt()
[all …]
H A Dena_com.h129 u16 q_depth; member
171 u16 q_depth; member
228 u16 q_depth; member
255 u16 q_depth; member
/linux/drivers/net/ethernet/brocade/bna/
H A Dbnad.c79 for (i = 0; i < ccb->q_depth; i++) { in bnad_cq_cleanup()
92 u32 q_depth, u32 index) in bnad_tx_buff_unmap() argument
115 BNA_QE_INDX_INC(index, q_depth); in bnad_tx_buff_unmap()
127 BNA_QE_INDX_INC(index, q_depth); in bnad_tx_buff_unmap()
144 for (i = 0; i < tcb->q_depth; i++) { in bnad_txq_cleanup()
148 bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i); in bnad_txq_cleanup()
163 u32 wis, unmap_wis, hw_cons, cons, q_depth; in bnad_txcmpl_process() local
175 q_depth = tcb->q_depth; in bnad_txcmpl_process()
177 wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth); in bnad_txcmpl_process()
178 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth))); in bnad_txcmpl_process()
[all …]
H A Dbna_types.h428 u32 q_depth; member
559 u32 q_depth; member
575 int q_depth; member
623 u32 q_depth; member
H A Dbfa_msgq.c516 msgq_cfg->cmdq.q_depth = htons(msgq->cmdq.depth); in bfa_msgq_init()
518 msgq_cfg->rspq.q_depth = htons(msgq->rspq.depth); in bfa_msgq_init()
H A Dbfi.h413 u16 q_depth; /* Total num of entries in the queue */ member
H A Dbna_tx_rx.c2385 q0->rcb->q_depth = rx_cfg->q0_depth; in bna_rx_create()
2386 q0->q_depth = rx_cfg->q0_depth; in bna_rx_create()
2412 q1->rcb->q_depth = rx_cfg->q1_depth; in bna_rx_create()
2413 q1->q_depth = rx_cfg->q1_depth; in bna_rx_create()
2444 rxp->cq.ccb->q_depth = cq_depth; in bna_rx_create()
3466 txq->tcb->q_depth = tx_cfg->txq_depth; in bna_tx_create()
/linux/drivers/nvme/host/
H A Dpci.c34 #define SQ_SIZE(q) ((q)->q_depth << (q)->sqes)
35 #define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion))
160 u32 q_depth; member
232 u32 q_depth; member
574 if (next_tail == nvmeq->q_depth) in nvme_write_sq_db()
591 if (++nvmeq->sq_tail == nvmeq->q_depth) in nvme_sq_copy_cmd()
1414 if (tmp == nvmeq->q_depth) { in nvme_update_cq_head()
1573 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); in adapter_alloc_cq()
1602 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); in adapter_alloc_sq()
1892 int q_depth = dev->q_depth; in nvme_cmb_qdepth() local
[all …]
/linux/drivers/net/ethernet/marvell/octeon_ep/
H A Doctep_ctrl_mbox.c228 u32 pi, ci, r_sz, buf_sz, q_depth; in octep_ctrl_mbox_recv() local
240 q_depth = octep_ctrl_mbox_circq_depth(pi, ci, q->sz); in octep_ctrl_mbox_recv()
241 if (q_depth < mbox_hdr_sz) { in octep_ctrl_mbox_recv()
/linux/drivers/net/ethernet/fungible/funcore/
H A Dfun_dev.h69 unsigned int q_depth; /* max queue depth supported by device */ member
H A Dfun_dev.c783 fdev->q_depth = NVME_CAP_MQES(fdev->cap_reg) + 1; in fun_dev_enable()
813 fdev->q_depth, fdev->db_stride, fdev->max_qid, in fun_dev_enable()
/linux/tools/testing/selftests/ublk/
H A Dkublk.c407 return __ublk_queue_cmd_buf_sz(q->q_depth); in ublk_queue_cmd_buf_sz()
413 int nr_ios = q->q_depth; in ublk_queue_deinit()
445 q->q_depth = depth; in ublk_queue_init()
463 for (i = 0; i < q->q_depth; i++) { in ublk_queue_init()
741 for (i = 0; i < q->q_depth; i++) { in ublk_handle_uring_cmd()
788 assert(tag < q->q_depth); in ublk_handle_cqe()
H A Dkublk.h163 int q_depth; member
/linux/drivers/scsi/mpi3mr/
H A Dmpi3mr_os.c1018 int q_depth) in mpi3mr_change_queue_depth() argument
1025 q_depth = 1; in mpi3mr_change_queue_depth()
1026 if (q_depth > shost->can_queue) in mpi3mr_change_queue_depth()
1027 q_depth = shost->can_queue; in mpi3mr_change_queue_depth()
1028 else if (!q_depth) in mpi3mr_change_queue_depth()
1029 q_depth = MPI3MR_DEFAULT_SDEV_QD; in mpi3mr_change_queue_depth()
1030 retval = scsi_change_queue_depth(sdev, q_depth); in mpi3mr_change_queue_depth()
1076 mpi3mr_change_queue_depth(sdev, tgtdev->q_depth); in mpi3mr_update_sdev()
1169 tgtdev->q_depth = le16_to_cpu(dev_pg0->queue_depth); in mpi3mr_update_tgtdev()
1332 tg->fw_qd = tgtdev->q_depth; in mpi3mr_update_tgtdev()
[all …]
H A Dmpi3mr.h764 u16 q_depth; member
/linux/drivers/scsi/bfa/
H A Dbfa_defs_svc.h958 u16 q_depth; /* SCSI Queue depth */ member