Home
last modified time | relevance | path

Searched refs:cq_head (Results 1 – 17 of 17) sorted by relevance

/linux/drivers/scsi/snic/
H A Dvnic_cq.c34 unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail, in svnic_cq_init() argument
46 iowrite32(cq_head, &cq->ctrl->cq_head); in svnic_cq_init()
61 iowrite32(0, &cq->ctrl->cq_head); in svnic_cq_clean()
H A Dvnic_cq.h19 u32 cq_head; /* 0x20 */ member
91 unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
/linux/drivers/scsi/fnic/
H A Dvnic_cq.c41 unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail, in vnic_cq_init() argument
53 iowrite32(cq_head, &cq->ctrl->cq_head); in vnic_cq_init()
68 iowrite32(0, &cq->ctrl->cq_head); in vnic_cq_clean()
H A Dvnic_cq.h31 u32 cq_head; /* 0x20 */ member
103 unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
/linux/drivers/net/ethernet/cisco/enic/
H A Dvnic_cq.c39 unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail, in vnic_cq_init() argument
51 iowrite32(cq_head, &cq->ctrl->cq_head); in vnic_cq_init()
68 iowrite32(0, &cq->ctrl->cq_head); in vnic_cq_clean()
H A Dvnic_cq.h22 u32 cq_head; /* 0x20 */ member
104 unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
/linux/io_uring/
H A Dfdinfo.c102 unsigned int cq_head = READ_ONCE(r->cq.head); in io_uring_show_fdinfo() local
128 seq_printf(m, "CqHead:\t%u\n", cq_head); in io_uring_show_fdinfo()
164 seq_printf(m, "CQEs:\t%u\n", cq_tail - cq_head); in io_uring_show_fdinfo()
165 cq_entries = min(cq_tail - cq_head, ctx->cq_entries); in io_uring_show_fdinfo()
167 unsigned int entry = i + cq_head; in io_uring_show_fdinfo()
/linux/drivers/net/ethernet/marvell/octeontx2/nic/
H A Dotx2_txrx.c50 cq->cq_head = (status >> 20) & 0xFFFFF; in otx2_nix_cq_op_status()
51 if (cq->cq_tail < cq->cq_head) in otx2_nix_cq_op_status()
52 cq->pend_cqe = (cq->cqe_cnt - cq->cq_head) + in otx2_nix_cq_op_status()
55 cq->pend_cqe = cq->cq_tail - cq->cq_head; in otx2_nix_cq_op_status()
64 cqe_hdr = (struct nix_cqe_hdr_s *)CQE_ADDR(cq, cq->cq_head); in otx2_get_next_cqe()
68 cq->cq_head++; in otx2_get_next_cqe()
69 cq->cq_head &= (cq->cqe_cnt - 1); in otx2_get_next_cqe()
409 cqe = (struct nix_cqe_rx_s *)CQE_ADDR(cq, cq->cq_head); in otx2_rx_napi_handler()
416 cq->cq_head++; in otx2_rx_napi_handler()
417 cq->cq_head &= (cq->cqe_cnt - 1); in otx2_rx_napi_handler()
/linux/drivers/dma/
H A Dhisi_dma.c152 u32 cq_head; member
472 chan->cq_head = 0; in hisi_dma_free_chan_resources()
738 cqe = chan->cq + chan->cq_head; in hisi_dma_irq()
741 chan->cq_head = (chan->cq_head + 1) % hdma_dev->chan_depth; in hisi_dma_irq()
743 chan->qp_num, chan->cq_head); in hisi_dma_irq()
/linux/drivers/net/ethernet/fungible/funeth/
H A Dfuneth_rx.c350 if (unlikely(q->cq_head == q->cq_mask)) { in advance_cq()
351 q->cq_head = 0; in advance_cq()
355 q->cq_head++; in advance_cq()
515 u32 cq_db_val = q->cq_head; in fun_rxq_napi_poll()
716 q->cq_head = 0; in fun_rxq_create_dev()
H A Dfuneth_txrx.h170 unsigned int cq_head; /* CQ head index */ member
/linux/drivers/nvme/host/
H A Dapple.c145 u16 cq_head; member
571 struct nvme_completion *hcqe = &q->cqes[q->cq_head]; in apple_nvme_cqe_pending()
609 u32 tmp = q->cq_head + 1; in apple_nvme_update_cq_head()
612 q->cq_head = 0; in apple_nvme_update_cq_head()
615 q->cq_head = tmp; in apple_nvme_update_cq_head()
632 apple_nvme_handle_cqe(q, iob, q->cq_head); in apple_nvme_poll_cq()
637 writel(q->cq_head, q->cq_db); in apple_nvme_poll_cq()
973 q->cq_head = 0; in apple_nvme_init_queue()
H A Dpci.c207 u16 cq_head; member
1084 struct nvme_completion *hcqe = &nvmeq->cqes[nvmeq->cq_head]; in nvme_cqe_pending()
1091 u16 head = nvmeq->cq_head; in nvme_ring_cq_doorbell()
1141 u32 tmp = nvmeq->cq_head + 1; in nvme_update_cq_head()
1144 nvmeq->cq_head = 0; in nvme_update_cq_head()
1147 nvmeq->cq_head = tmp; in nvme_update_cq_head()
1163 nvme_handle_cqe(nvmeq, iob, nvmeq->cq_head); in nvme_poll_cq()
1676 nvmeq->cq_head = 0; in nvme_alloc_queue()
1711 nvmeq->cq_head = 0; in nvme_init_queue()
/linux/drivers/infiniband/hw/irdma/
H A Duk.c1495 u32 cq_head; in irdma_uk_clean_cq() local
1498 cq_head = cq->cq_ring.head; in irdma_uk_clean_cq()
1502 cqe = ((struct irdma_extended_cqe *)(cq->cq_base))[cq_head].buf; in irdma_uk_clean_cq()
1504 cqe = cq->cq_base[cq_head].buf; in irdma_uk_clean_cq()
1518 cq_head = (cq_head + 1) % cq->cq_ring.size; in irdma_uk_clean_cq()
1519 if (!cq_head) in irdma_uk_clean_cq()
H A Dverbs.h118 u16 cq_head; member
/linux/drivers/crypto/hisilicon/
H A Dqm.c905 if (qp->qp_status.cq_head == qp->cq_depth - 1) { in qm_cq_head_update()
907 qp->qp_status.cq_head = 0; in qm_cq_head_update()
909 qp->qp_status.cq_head++; in qm_cq_head_update()
915 struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head; in qm_poll_req_cb()
923 cqe = qp->cqe + qp->qp_status.cq_head; in qm_poll_req_cb()
925 qp->qp_status.cq_head, 0); in qm_poll_req_cb()
932 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, qp->qp_status.cq_head, 1); in qm_poll_req_cb()
1134 qp_status->cq_head = 0; in qm_init_qp_status()
2442 struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head; in hisi_qm_is_q_updated()
2449 cqe = qp->cqe + qp->qp_status.cq_head; in hisi_qm_is_q_updated()
/linux/drivers/net/ethernet/cavium/thunder/
H A Dnicvf_main.c958 u64 cq_head; in nicvf_poll() local
971 cq_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, in nicvf_poll()
975 cq->cq_idx, cq_head); in nicvf_poll()