/linux/tools/include/io_uring/ |
H A D | mini_liburing.h | 24 struct io_uring_cqe *cqes; member 49 struct io_uring_cqe *cqes; member 98 cq->ring_sz = p->cq_off.cqes + p->cq_entries * sizeof(struct io_uring_cqe); in io_uring_mmap() 111 cq->cqes = ptr + p->cq_off.cqes; in io_uring_mmap() 173 *cqe_ptr = &cq->cqes[head & mask]; in io_uring_wait_cqe()
|
/linux/drivers/infiniband/hw/cxgb4/ |
H A D | restrack.c | 334 struct t4_cqe *cqes) in fill_hwcqes() argument 339 if (fill_cqe(msg, cqes, idx, "hwcq_idx")) in fill_hwcqes() 342 if (fill_cqe(msg, cqes + 1, idx, "hwcq_idx")) in fill_hwcqes() 351 struct t4_cqe *cqes) in fill_swcqes() argument 359 if (fill_cqe(msg, cqes, idx, "swcq_idx")) in fill_swcqes() 364 if (fill_cqe(msg, cqes + 1, idx, "swcq_idx")) in fill_swcqes()
|
/linux/drivers/net/ethernet/fungible/funeth/ |
H A D | funeth_rx.c | 353 q->next_cqe_info = cqe_to_info(q->cqes); in advance_cq() 644 q->cqes = fun_alloc_ring_mem(q->dma_dev, ncqe, FUNETH_CQE_SIZE, 0, in fun_rxq_create_sw() 647 if (!q->cqes) in fun_rxq_create_sw() 665 dma_free_coherent(q->dma_dev, ncqe * FUNETH_CQE_SIZE, q->cqes, in fun_rxq_create_sw() 686 q->cqes, q->cq_dma_addr); in fun_rxq_free_sw() 722 q->next_cqe_info = cqe_to_info(q->cqes); in fun_rxq_create_dev()
|
H A D | funeth_txrx.h | 167 void *cqes; /* base of CQ descriptor ring */ member
|
/linux/drivers/net/ethernet/broadcom/ |
H A D | cnic.c | 1432 struct kcqe *cqes[], u32 num_cqes) in cnic_reply_bnx2x_kcqes() argument 1441 cqes, num_cqes); in cnic_reply_bnx2x_kcqes() 1559 struct kcqe *cqes[1]; in cnic_bnx2x_iscsi_init2() local 1589 cqes[0] = (struct kcqe *) &kcqe; in cnic_bnx2x_iscsi_init2() 1590 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); in cnic_bnx2x_iscsi_init2() 1888 struct kcqe *cqes[1]; in cnic_bnx2x_iscsi_ofld1() local 1941 cqes[0] = (struct kcqe *) &kcqe; in cnic_bnx2x_iscsi_ofld1() 1942 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); in cnic_bnx2x_iscsi_ofld1() 2006 struct kcqe *cqes[1]; in cnic_bnx2x_iscsi_destroy() local 2039 cqes[0] = (struct kcqe *) &kcqe; in cnic_bnx2x_iscsi_destroy() [all …]
|
H A D | cnic_if.h | 370 void (*indicate_kcqes)(void *ulp_ctx, struct kcqe *cqes[],
|
/linux/drivers/nvme/host/ |
H A D | apple.c | 135 struct nvme_completion *cqes; member 571 struct nvme_completion *hcqe = &q->cqes[q->cq_head]; in apple_nvme_cqe_pending() 589 struct nvme_completion *cqe = &q->cqes[idx]; in apple_nvme_handle_cqe() 977 memset(q->cqes, 0, depth * sizeof(struct nvme_completion)); in apple_nvme_init_queue() 1310 q->cqes = dmam_alloc_coherent(anv->dev, in apple_nvme_queue_alloc() 1313 if (!q->cqes) in apple_nvme_queue_alloc()
|
H A D | pci.c | 198 struct nvme_completion *cqes; member 1083 struct nvme_completion *hcqe = &nvmeq->cqes[nvmeq->cq_head]; in nvme_cqe_pending() 1107 struct nvme_completion *cqe = &nvmeq->cqes[idx]; in nvme_handle_cqe() 1539 (void *)nvmeq->cqes, nvmeq->cq_dma_addr); in nvme_free_queue() 1664 nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(nvmeq), in nvme_alloc_queue() 1666 if (!nvmeq->cqes) in nvme_alloc_queue() 1684 dma_free_coherent(dev->dev, CQ_SIZE(nvmeq), (void *)nvmeq->cqes, in nvme_alloc_queue() 1713 memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq)); in nvme_init_queue()
|
/linux/drivers/net/ethernet/mellanox/mlx5/core/ |
H A D | en_stats.c | 290 s->tx_xdp_cqes += xdpsq_red_stats->cqes; in mlx5e_stats_grp_sw_update_stats_xdp_red() 302 s->rx_xdp_tx_cqe += xdpsq_stats->cqes; in mlx5e_stats_grp_sw_update_stats_xdpsq() 313 s->tx_xsk_cqes += xsksq_stats->cqes; in mlx5e_stats_grp_sw_update_stats_xsksq() 451 s->tx_cqes += sq_stats->cqes; in mlx5e_stats_grp_sw_update_stats_sq() 2146 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqes) }, 2158 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) }, 2168 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) }, 2198 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, cqes) }, 2222 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqes) }, 2299 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqes) },
|
H A D | en_stats.h | 444 u64 cqes ____cacheline_aligned_in_smp; 457 u64 cqes ____cacheline_aligned_in_smp;
|
H A D | en_tx.c | 877 stats->cqes += i; in mlx5e_poll_tx_cq()
|
/linux/drivers/nvme/target/ |
H A D | passthru.c | 134 id->cqes = min_t(__u8, ((0x4 << 4) | 0x4), id->cqes); in nvmet_passthru_override_id_ctrl()
|
H A D | admin-cmd.c | 737 id->cqes = (0x4 << 4) | 0x4; in nvmet_execute_identify_ctrl()
|
/linux/tools/include/uapi/linux/ |
H A D | io_uring.h | 460 __u32 cqes; member
|
/linux/include/linux/ |
H A D | io_uring_types.h | 191 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp; member
|
H A D | nvme.h | 366 __u8 cqes; member
|
/linux/io_uring/ |
H A D | register.c | 547 n.rings->cqes[dst_head] = o.rings->cqes[src_head]; in io_register_resize_rings()
|
H A D | io_uring.c | 799 ctx->cqe_cached = &rings->cqes[off]; in io_cqe_cache_refill() 2650 off = struct_size(rings, cqes, cq_entries); in rings_size() 3598 p->cq_off.cqes = offsetof(struct io_rings, cqes); in io_uring_fill_params()
|
/linux/include/uapi/linux/ |
H A D | io_uring.h | 521 __u32 cqes; member
|