/linux/drivers/scsi/snic/ |
H A D | vnic_cq.c | 34 unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail, in svnic_cq_init() argument 46 iowrite32(cq_head, &cq->ctrl->cq_head); in svnic_cq_init() 61 iowrite32(0, &cq->ctrl->cq_head); in svnic_cq_clean()
|
H A D | vnic_cq.h | 19 u32 cq_head; /* 0x20 */ member 91 unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
|
/linux/drivers/scsi/fnic/ |
H A D | vnic_cq.c | 41 unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail, in vnic_cq_init() argument 53 iowrite32(cq_head, &cq->ctrl->cq_head); in vnic_cq_init() 68 iowrite32(0, &cq->ctrl->cq_head); in vnic_cq_clean()
|
H A D | vnic_cq.h | 31 u32 cq_head; /* 0x20 */ member 103 unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
|
/linux/drivers/net/ethernet/cisco/enic/ |
H A D | vnic_cq.c | 39 unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail, in vnic_cq_init() argument 51 iowrite32(cq_head, &cq->ctrl->cq_head); in vnic_cq_init() 68 iowrite32(0, &cq->ctrl->cq_head); in vnic_cq_clean()
|
H A D | vnic_cq.h | 22 u32 cq_head; /* 0x20 */ member 104 unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
|
/linux/io_uring/ |
H A D | fdinfo.c | 102 unsigned int cq_head = READ_ONCE(r->cq.head); in io_uring_show_fdinfo() local 128 seq_printf(m, "CqHead:\t%u\n", cq_head); in io_uring_show_fdinfo() 164 seq_printf(m, "CQEs:\t%u\n", cq_tail - cq_head); in io_uring_show_fdinfo() 165 cq_entries = min(cq_tail - cq_head, ctx->cq_entries); in io_uring_show_fdinfo() 167 unsigned int entry = i + cq_head; in io_uring_show_fdinfo()
|
/linux/drivers/net/ethernet/marvell/octeontx2/nic/ |
H A D | otx2_txrx.c | 50 cq->cq_head = (status >> 20) & 0xFFFFF; in otx2_nix_cq_op_status() 51 if (cq->cq_tail < cq->cq_head) in otx2_nix_cq_op_status() 52 cq->pend_cqe = (cq->cqe_cnt - cq->cq_head) + in otx2_nix_cq_op_status() 55 cq->pend_cqe = cq->cq_tail - cq->cq_head; in otx2_nix_cq_op_status() 64 cqe_hdr = (struct nix_cqe_hdr_s *)CQE_ADDR(cq, cq->cq_head); in otx2_get_next_cqe() 68 cq->cq_head++; in otx2_get_next_cqe() 69 cq->cq_head &= (cq->cqe_cnt - 1); in otx2_get_next_cqe() 409 cqe = (struct nix_cqe_rx_s *)CQE_ADDR(cq, cq->cq_head); in otx2_rx_napi_handler() 416 cq->cq_head++; in otx2_rx_napi_handler() 417 cq->cq_head &= (cq->cqe_cnt - 1); in otx2_rx_napi_handler()
|
/linux/drivers/dma/ |
H A D | hisi_dma.c | 152 u32 cq_head; member 472 chan->cq_head = 0; in hisi_dma_free_chan_resources() 738 cqe = chan->cq + chan->cq_head; in hisi_dma_irq() 741 chan->cq_head = (chan->cq_head + 1) % hdma_dev->chan_depth; in hisi_dma_irq() 743 chan->qp_num, chan->cq_head); in hisi_dma_irq()
|
/linux/drivers/net/ethernet/fungible/funeth/ |
H A D | funeth_rx.c | 350 if (unlikely(q->cq_head == q->cq_mask)) { in advance_cq() 351 q->cq_head = 0; in advance_cq() 355 q->cq_head++; in advance_cq() 515 u32 cq_db_val = q->cq_head; in fun_rxq_napi_poll() 716 q->cq_head = 0; in fun_rxq_create_dev()
|
H A D | funeth_txrx.h | 170 unsigned int cq_head; /* CQ head index */ member
|
/linux/drivers/nvme/host/ |
H A D | apple.c | 145 u16 cq_head; member 571 struct nvme_completion *hcqe = &q->cqes[q->cq_head]; in apple_nvme_cqe_pending() 609 u32 tmp = q->cq_head + 1; in apple_nvme_update_cq_head() 612 q->cq_head = 0; in apple_nvme_update_cq_head() 615 q->cq_head = tmp; in apple_nvme_update_cq_head() 632 apple_nvme_handle_cqe(q, iob, q->cq_head); in apple_nvme_poll_cq() 637 writel(q->cq_head, q->cq_db); in apple_nvme_poll_cq() 973 q->cq_head = 0; in apple_nvme_init_queue()
|
H A D | pci.c | 207 u16 cq_head; member 1084 struct nvme_completion *hcqe = &nvmeq->cqes[nvmeq->cq_head]; in nvme_cqe_pending() 1091 u16 head = nvmeq->cq_head; in nvme_ring_cq_doorbell() 1141 u32 tmp = nvmeq->cq_head + 1; in nvme_update_cq_head() 1144 nvmeq->cq_head = 0; in nvme_update_cq_head() 1147 nvmeq->cq_head = tmp; in nvme_update_cq_head() 1163 nvme_handle_cqe(nvmeq, iob, nvmeq->cq_head); in nvme_poll_cq() 1676 nvmeq->cq_head = 0; in nvme_alloc_queue() 1711 nvmeq->cq_head = 0; in nvme_init_queue()
|
/linux/drivers/infiniband/hw/irdma/ |
H A D | uk.c | 1495 u32 cq_head; in irdma_uk_clean_cq() local 1498 cq_head = cq->cq_ring.head; in irdma_uk_clean_cq() 1502 cqe = ((struct irdma_extended_cqe *)(cq->cq_base))[cq_head].buf; in irdma_uk_clean_cq() 1504 cqe = cq->cq_base[cq_head].buf; in irdma_uk_clean_cq() 1518 cq_head = (cq_head + 1) % cq->cq_ring.size; in irdma_uk_clean_cq() 1519 if (!cq_head) in irdma_uk_clean_cq()
|
H A D | verbs.h | 118 u16 cq_head; member
|
/linux/drivers/crypto/hisilicon/ |
H A D | qm.c | 905 if (qp->qp_status.cq_head == qp->cq_depth - 1) { in qm_cq_head_update() 907 qp->qp_status.cq_head = 0; in qm_cq_head_update() 909 qp->qp_status.cq_head++; in qm_cq_head_update() 915 struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head; in qm_poll_req_cb() 923 cqe = qp->cqe + qp->qp_status.cq_head; in qm_poll_req_cb() 925 qp->qp_status.cq_head, 0); in qm_poll_req_cb() 932 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, qp->qp_status.cq_head, 1); in qm_poll_req_cb() 1134 qp_status->cq_head = 0; in qm_init_qp_status() 2442 struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head; in hisi_qm_is_q_updated() 2449 cqe = qp->cqe + qp->qp_status.cq_head; in hisi_qm_is_q_updated()
|
/linux/drivers/net/ethernet/cavium/thunder/ |
H A D | nicvf_main.c | 958 u64 cq_head; in nicvf_poll() local 971 cq_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, in nicvf_poll() 975 cq->cq_idx, cq_head); in nicvf_poll()
|