Searched refs:q_depth (Results 1 – 8 of 8) sorted by relevance
398 uint16_t q_depth, in mana_hwc_create_cq() argument410 eq_size = roundup_pow_of_two(GDMA_EQE_SIZE * q_depth); in mana_hwc_create_cq()414 cq_size = roundup_pow_of_two(GDMA_CQE_SIZE * q_depth); in mana_hwc_create_cq()437 comp_buf = mallocarray(q_depth, sizeof(struct gdma_comp), in mana_hwc_create_cq()442 hwc_cq->queue_depth = q_depth; in mana_hwc_create_cq()456 mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, uint16_t q_depth, in mana_hwc_alloc_dma_buf() argument471 q_depth * sizeof(struct hwc_work_request), in mana_hwc_alloc_dma_buf()474 dma_buf->num_reqs = q_depth; in mana_hwc_alloc_dma_buf()476 buf_size = ALIGN(q_depth * max_msg_size, PAGE_SIZE); in mana_hwc_alloc_dma_buf()489 for (i = 0; i < q_depth; i++) { in mana_hwc_alloc_dma_buf()[all …]
43 head_masked = io_cq->head & (io_cq->q_depth - 1); in ena_com_get_next_rx_cdesc()68 tail_masked = io_sq->tail & (io_sq->q_depth - 1); in get_sq_desc_regular_queue()83 dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1); in ena_com_write_bounce_buffer_to_dev()113 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0)) in ena_com_write_bounce_buffer_to_dev()246 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0)) in ena_com_sq_update_reqular_queue_tail()263 idx &= (io_cq->q_depth - 1); in ena_com_rx_cdesc_idx_to_ptr()312 head_masked = io_cq->head & (io_cq->q_depth - 1); in ena_com_cdesc_rx_pkt_get()604 u16 q_depth = io_cq->q_depth; in ena_com_rx_pkt() local639 if (unlikely(ena_buf[i].req_id >= q_depth)) in ena_com_rx_pkt()
113 return io_sq->q_depth - 1 - cnt; in ena_com_free_q_entries()228 if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0)) in ena_com_cq_inc_head()241 masked_head = io_cq->head & (io_cq->q_depth - 1); in ena_com_tx_comp_req_id_get()269 if (unlikely(*req_id >= io_cq->q_depth)) { in ena_com_tx_comp_req_id_get()
133 u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth); in ena_com_admin_init_sq()156 u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth); in ena_com_admin_init_cq()179 ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH; in ena_com_admin_init_aenq()191 aenq->head = aenq->q_depth; in ena_com_admin_init_aenq()201 aenq_caps |= ena_dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK; in ena_com_admin_init_aenq()228 if (unlikely(command_id >= admin_queue->q_depth)) { in get_comp_ctxt()231 command_id, admin_queue->q_depth); in get_comp_ctxt()266 queue_size_mask = admin_queue->q_depth - 1; in __ena_com_submit_admin_cmd()272 if (unlikely(cnt >= admin_queue->q_depth)) { in __ena_com_submit_admin_cmd()318 size_t size = admin_queue->q_depth * sizeof(struct ena_comp_ctx); in ena_com_init_comp_ctxt()[all …]
149 u16 q_depth; member192 u16 q_depth; member251 u16 q_depth; member284 u16 q_depth; member
270 for (i = 0; i < admin_queue->q_depth; i++) { \
122 uint64_t q_depth; member
3579 tgtdev->q_depth = dev_pg0->QueueDepth; in mpi3mr_update_device()3624 tg->fw_qd = tgtdev->q_depth; in mpi3mr_update_device()3625 tg->modified_qd = tgtdev->q_depth; in mpi3mr_update_device()