| /linux/drivers/net/ethernet/fungible/funcore/ |
| H A D | fun_queue.c | 126 u8 cqe_size_log2, u32 cq_depth, dma_addr_t dma_addr, in fun_cq_create() argument 138 if (cq_depth > fdev->q_depth) in fun_cq_create() 146 cq_depth - 1, dma_addr, tailroom, in fun_cq_create() 292 max = funq->cq_depth - 1; in __fun_process_cq() 305 if (++funq->cq_head == funq->cq_depth) { in __fun_process_cq() 366 funq->cqes = fun_alloc_ring_mem(funq->fdev->dev, funq->cq_depth, in fun_alloc_cqes() 388 fun_free_ring_mem(dev, funq->cq_depth, 1 << funq->cqe_size_log2, false, in fun_free_queue() 441 funq->cq_depth = req->cq_depth; in fun_alloc_queue()
|
| H A D | fun_dev.c | 230 .cq_depth = areq->cq_depth, in fun_enable_admin_queue() 243 areq->cq_depth < AQA_MIN_QUEUE_SIZE || in fun_enable_admin_queue() 244 areq->cq_depth > AQA_MAX_QUEUE_SIZE) in fun_enable_admin_queue() 270 (funq->cq_depth - 1) << AQA_ACQS_SHIFT, in fun_enable_admin_queue()
|
| H A D | fun_dev.h | 92 u16 cq_depth; member
|
| /linux/tools/testing/selftests/ublk/ |
| H A D | kublk.c | 34 int cq_depth, unsigned flags) in ublk_setup_ring() 40 p.cq_entries = cq_depth; in ublk_setup_ring() 521 int ring_depth = dev->tgt.sq_depth, cq_depth = dev->tgt.cq_depth; in ublk_thread_init() 526 cq_depth += dev->dev_info.queue_depth * 2; in ublk_thread_init() 528 ret = ublk_setup_ring(&t->ring, ring_depth, cq_depth, in ublk_thread_init() 1719 dev->tgt.cq_depth = depth; in __cmd_create_help() 33 ublk_setup_ring(struct io_uring * r,int depth,int cq_depth,unsigned flags) ublk_setup_ring() argument 520 int ring_depth = dev->tgt.sq_depth, cq_depth = dev->tgt.cq_depth; ublk_thread_init() local
|
| H A D | null.c | 42 dev->tgt.sq_depth = dev->tgt.cq_depth = 2 * info->queue_depth; in ublk_null_tgt_init()
|
| H A D | kublk.h | 167 unsigned int cq_depth; 161 unsigned int cq_depth; global() member
|
| H A D | stripe.c | 351 dev->tgt.cq_depth = mul * dev->dev_info.queue_depth * conf->nr_files; in ublk_stripe_tgt_init()
|
| /linux/drivers/net/ethernet/fungible/funeth/ |
| H A D | funeth_main.c | 508 err = alloc_rxqs(netdev, rxqs, qset->nrxqs, qset->cq_depth, in fun_alloc_rings() 841 .cq_depth = fp->cq_depth, in funeth_open() 1630 .cq_depth = fp->cq_depth, in fun_change_num_queues() 1770 fp->cq_depth = min(CQ_DEPTH, fdev->q_depth); in fun_create_netdev() 1989 .cq_depth = ADMIN_CQ_DEPTH, in funeth_probe()
|
| /linux/drivers/net/ethernet/brocade/bna/ |
| H A D | bna_tx_rx.c | 2141 u32 cq_depth; in bna_rx_res_req() local 2147 cq_depth = roundup_pow_of_two(dq_depth + hq_depth); in bna_rx_res_req() 2149 cq_size = cq_depth * BFI_CQ_WI_SIZE; in bna_rx_res_req() 2277 u32 cq_depth, i; in bna_rx_create() local 2437 cq_depth = rx_cfg->q0_depth + in bna_rx_create() 2443 cq_depth = roundup_pow_of_two(cq_depth); in bna_rx_create() 2444 rxp->cq.ccb->q_depth = cq_depth; in bna_rx_create()
|
| /linux/drivers/net/ethernet/amazon/ena/ |
| H A D | ena_admin_defs.h | 340 u16 cq_depth; member
|
| H A D | ena_com.c | 1350 create_cmd.cq_depth = io_cq->q_depth; in ena_com_create_io_cq()
|
| /linux/kernel/locking/ |
| H A D | lockdep.c | 1744 unsigned int cq_depth; in __bfs() local 1832 cq_depth = __cq_get_elem_count(cq); in __bfs() 1833 if (max_bfs_queue_depth < cq_depth) in __bfs() 1834 max_bfs_queue_depth = cq_depth; in __bfs()
|
| /linux/drivers/infiniband/hw/hns/ |
| H A D | hns_roce_hw_v2.h | 1211 __le32 cq_depth; member
|
| H A D | hns_roce_hw_v2.c | 3785 return (hr_reg_read(cqe, CQE_OWNER) ^ !!(n & hr_cq->cq_depth)) ? cqe : in get_sw_cqe_v2() 3868 hr_reg_write(cq_context, CQC_SHIFT, ilog2(hr_cq->cq_depth)); in hns_roce_v2_write_cqc()
|