Home
last modified time | relevance | path

Searched refs:q_no (Results 1 – 24 of 24) sorted by relevance

/linux/drivers/net/ethernet/cavium/liquidio/
H A Dcn23xx_vf_device.c54 u32 q_no; in cn23xx_vf_reset_io_queues() local
57 for (q_no = 0; q_no < num_queues; q_no++) { in cn23xx_vf_reset_io_queues()
60 CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no)); in cn23xx_vf_reset_io_queues()
62 octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no), in cn23xx_vf_reset_io_queues()
67 for (q_no = 0; q_no < num_queues; q_no++) { in cn23xx_vf_reset_io_queues()
69 CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no)); in cn23xx_vf_reset_io_queues()
74 oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no))); in cn23xx_vf_reset_io_queues()
80 q_no); in cn23xx_vf_reset_io_queues()
85 octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no), in cn23xx_vf_reset_io_queues()
89 oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no))); in cn23xx_vf_reset_io_queues()
[all …]
H A Dcn23xx_pf_device.c180 u32 q_no, srn, ern; in cn23xx_reset_io_queues() local
190 for (q_no = srn; q_no < ern; q_no++) { in cn23xx_reset_io_queues()
192 d64 = octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); in cn23xx_reset_io_queues()
194 octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), d64); in cn23xx_reset_io_queues()
198 for (q_no = srn; q_no < ern; q_no++) { in cn23xx_reset_io_queues()
200 CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); in cn23xx_reset_io_queues()
205 oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no))); in cn23xx_reset_io_queues()
210 q_no); in cn23xx_reset_io_queues()
215 octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), in cn23xx_reset_io_queues()
219 oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no))); in cn23xx_reset_io_queues()
[all …]
H A Docteon_droq.c194 int octeon_delete_droq(struct octeon_device *oct, u32 q_no) in octeon_delete_droq() argument
196 struct octeon_droq *droq = oct->droq[q_no]; in octeon_delete_droq()
198 dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no); in octeon_delete_droq()
208 oct->io_qmask.oq &= ~(1ULL << q_no); in octeon_delete_droq()
209 vfree(oct->droq[q_no]); in octeon_delete_droq()
210 oct->droq[q_no] = NULL; in octeon_delete_droq()
218 u32 q_no, in octeon_init_droq() argument
228 dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no); in octeon_init_droq()
230 droq = oct->droq[q_no]; in octeon_init_droq()
234 droq->q_no = q_no; in octeon_init_droq()
[all …]
H A Docteon_droq.h248 u32 q_no; member
338 u32 q_no,
350 int octeon_delete_droq(struct octeon_device *oct_dev, u32 q_no);
365 u32 q_no,
376 int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no);
400 int octeon_create_droq(struct octeon_device *oct, u32 q_no,
410 int octeon_enable_irq(struct octeon_device *oct, u32 q_no);
H A Dlio_core.c174 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; in liquidio_set_feature()
441 struct cavium_wq *wq = &lio->rxq_status_wq[droq->q_no]; in octeon_schedule_rxq_oom_work()
452 int q_no = wk->ctxul; in octnet_poll_check_rxq_oom_status() local
453 struct octeon_droq *droq = oct->droq[q_no]; in octnet_poll_check_rxq_oom_status()
467 int q, q_no; in setup_rx_oom_poll_fn() local
470 q_no = lio->linfo.rxpciq[q].s.q_no; in setup_rx_oom_poll_fn()
471 wq = &lio->rxq_status_wq[q_no]; in setup_rx_oom_poll_fn()
482 wq->wk.ctxul = q_no; in setup_rx_oom_poll_fn()
494 int q_no; in cleanup_rx_oom_poll_fn() local
496 for (q_no = 0; q_no < oct->num_oqs; q_no++) { in cleanup_rx_oom_poll_fn()
[all …]
H A Docteon_mailbox.c65 mbox->mbox_req.q_no = mbox->q_no; in octeon_mbox_read()
77 mbox->mbox_resp.q_no = mbox->q_no; in octeon_mbox_read()
134 struct octeon_mbox *mbox = oct->mbox[mbox_cmd->q_no]; in octeon_mbox_write()
262 mbox->q_no); in octeon_mbox_process_cmd()
263 pcie_flr(oct->sriov_info.dpiring_to_vfpcidev_lut[mbox->q_no]); in octeon_mbox_process_cmd()
355 int octeon_mbox_cancel(struct octeon_device *oct, int q_no) in octeon_mbox_cancel() argument
357 struct octeon_mbox *mbox = oct->mbox[q_no]; in octeon_mbox_cancel()
H A Docteon_device.c901 txpciq.s.q_no = iq_no; in octeon_setup_instr_queues()
962 u32 q_no; in octeon_set_io_queues_off() local
968 for (q_no = 0; q_no < oct->sriov_info.rings_per_vf; q_no++) { in octeon_set_io_queues_off()
970 oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no)); in octeon_set_io_queues_off()
976 oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); in octeon_set_io_queues_off()
982 q_no); in octeon_set_io_queues_off()
988 CN23XX_SLI_IQ_PKT_CONTROL64(q_no), in octeon_set_io_queues_off()
992 oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); in octeon_set_io_queues_off()
995 "unable to reset qno %u\n", q_no); in octeon_set_io_queues_off()
1005 u32 q_no, in octeon_set_droq_pkt_op() argument
[all …]
H A Docteon_mailbox.h68 u32 q_no; member
91 u32 q_no; member
123 int octeon_mbox_cancel(struct octeon_device *oct, int q_no);
H A Dlio_ethtool.c483 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; in lio_send_queue_count_update()
718 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; in octnet_gpio_access()
744 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; in octnet_id_active()
787 sc->iq_no = lio->linfo.txpciq[0].s.q_no; in octnet_mdio45_access()
1074 lio->txq = lio->linfo.txpciq[0].s.q_no; in lio_23xx_reconfigure_queue_count()
1075 lio->rxq = lio->linfo.rxpciq[0].s.q_no; in lio_23xx_reconfigure_queue_count()
1397 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; in lio_set_pauseparam()
1762 j = lio->linfo.txpciq[vj].s.q_no; in lio_vf_get_ethtool_stats()
1804 j = lio->linfo.rxpciq[vj].s.q_no; in lio_vf_get_ethtool_stats()
2021 sc->iq_no = lio->linfo.txpciq[0].s.q_no; in octnet_get_intrmod_cfg()
[all …]
H A Dcn66xx_regs.h473 #define CN6XXX_DPI_DMA_ENG_ENB(q_no) \ argument
474 (CN6XXX_DPI_DMA_ENG0_ENB + ((q_no) * 8))
478 #define CN6XXX_DPI_DMA_ENG_BUF(q_no) \ argument
479 (CN6XXX_DPI_DMA_ENG0_BUF + ((q_no) * 8))
H A Docteon_nic.h85 u32 q_no; member
112 static inline int octnet_iq_is_full(struct octeon_device *oct, u32 q_no) in octnet_iq_is_full() argument
114 return ((u32)atomic_read(&oct->instr_queue[q_no]->instr_pending) in octnet_iq_is_full()
115 >= (oct->instr_queue[q_no]->max_count - 2)); in octnet_iq_is_full()
H A Dlio_main.c155 int q_no; in octeon_droq_bh() local
161 for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) { in octeon_droq_bh()
162 if (!(oct->io_qmask.oq & BIT_ULL(q_no))) in octeon_droq_bh()
164 reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no], in octeon_droq_bh()
166 lio_enable_irq(oct->droq[q_no], NULL); in octeon_droq_bh()
172 int adjusted_q_no = q_no + oct->sriov_info.pf_srn; in octeon_droq_bh()
465 lio->oct_dev->num_iqs].s.q_no; in check_txq_status()
631 sc->iq_no = lio->linfo.txpciq[0].s.q_no; in lio_sync_octeon_time()
1170 sc->iq_no = lio->linfo.txpciq[0].s.q_no; in send_rx_ctrl_cmd()
1291 lio->linfo.rxpciq[j].s.q_no); in liquidio_stop_nic_module()
[all …]
H A Dlio_vf_main.c623 sc->iq_no = lio->linfo.txpciq[0].s.q_no; in send_rx_ctrl_cmd()
732 lio->linfo.rxpciq[j].s.q_no); in liquidio_stop_nic_module()
1055 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; in liquidio_set_uc_list()
1104 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; in liquidio_set_mcast_list()
1147 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; in liquidio_set_mac()
1189 iq_no = lio->linfo.txpciq[i].s.q_no; in liquidio_get_stats64()
1205 oq_no = lio->linfo.rxpciq[i].s.q_no; in liquidio_get_stats64()
1369 sc->iq_no = ndata->q_no; in send_nic_timestamp_pkt()
1418 iq_no = lio->linfo.txpciq[q_idx].s.q_no; in liquidio_xmit()
1445 ndata.q_no = iq_no; in liquidio_xmit()
[all …]
H A Dliquidio_common.h728 u64 q_no:8; member
744 u64 q_no:8;
756 u64 q_no:8; member
760 u64 q_no:8;
H A Docteon_device.h844 int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no);
846 int octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no);
858 void octeon_set_droq_pkt_op(struct octeon_device *oct, u32 q_no, u32 enable);
H A Dcn23xx_pf_regs.h558 #define CN23XX_DPI_DMA_REQQ_CTL(q_no) \ argument
559 (CN23XX_DPI_DMA_REQQ0_CTL + ((q_no) * 8))
H A Docteon_nic.c90 return octeon_send_command(oct, ndata->q_no, ring_doorbell, &ndata->cmd, in octnet_send_nic_data_pkt()
H A Drequest_manager.c53 u32 iq_no = (u32)txpciq.s.q_no; in octeon_init_instr_queue()
198 u32 iq_no = (u32)txpciq.s.q_no; in octeon_setup_iq()
H A Docteon_network.h577 qno = lio->linfo.txpciq[i % lio->oct_dev->num_iqs].s.q_no; in wake_txqs()
/linux/drivers/net/ethernet/marvell/octeon_ep/
H A Doctep_rx.c52 oq->q_no); in octep_oq_fill_ring_buffers()
101 oq->q_no); in octep_oq_refill()
125 static int octep_setup_oq(struct octep_device *oct, int q_no) in octep_setup_oq() argument
133 oct->oq[q_no] = oq; in octep_setup_oq()
138 oq->q_no = q_no; in octep_setup_oq()
139 oq->stats = &oct->stats_oq[q_no]; in octep_setup_oq()
160 "Failed to allocate DMA memory for OQ-%d !!\n", q_no); in octep_setup_oq()
167 "Failed to allocate buffer info for OQ-%d\n", q_no); in octep_setup_oq()
175 if (oct->hw_ops.setup_oq_regs(oct, q_no)) in octep_setup_oq()
193 oct->oq[q_no] = NULL; in octep_setup_oq()
[all …]
H A Doctep_main.c569 netdev_dbg(iq->netdev, "enabling intr for Q-%u\n", iq->q_no); in octep_update_pkt()
825 netif_stop_subqueue(iq->netdev, iq->q_no); in octep_iq_full_check()
839 netif_start_subqueue(iq->netdev, iq->q_no); in octep_iq_full_check()
870 u16 q_no, wi; in octep_start_xmit() local
875 q_no = skb_get_queue_mapping(skb); in octep_start_xmit()
876 if (q_no >= oct->num_iqs) { in octep_start_xmit()
877 netdev_err(netdev, "Invalid Tx skb->queue_mapping=%d\n", q_no); in octep_start_xmit()
878 q_no = q_no % oct->num_iqs; in octep_start_xmit()
881 iq = oct->iq[q_no]; in octep_start_xmit()
/linux/drivers/net/ethernet/marvell/octeon_ep_vf/
H A Doctep_vf_rx.c52 oq->q_no); in octep_vf_oq_fill_ring_buffers()
101 oq->q_no); in octep_vf_oq_refill()
125 static int octep_vf_setup_oq(struct octep_vf_device *oct, int q_no) in octep_vf_setup_oq() argument
133 oct->oq[q_no] = oq; in octep_vf_setup_oq()
138 oq->q_no = q_no; in octep_vf_setup_oq()
139 oq->stats = &oct->stats_oq[q_no]; in octep_vf_setup_oq()
160 "Failed to allocate DMA memory for OQ-%d !!\n", q_no); in octep_vf_setup_oq()
168 "Failed to allocate buffer info for OQ-%d\n", q_no); in octep_vf_setup_oq()
176 if (oct->hw_ops.setup_oq_regs(oct, q_no)) in octep_vf_setup_oq()
194 oct->oq[q_no] = NULL; in octep_vf_setup_oq()
[all …]
H A Doctep_vf_main.c302 netdev_dbg(iq->netdev, "enabling intr for Q-%u\n", iq->q_no); in octep_vf_update_pkt()
581 ret = netif_subqueue_maybe_stop(iq->netdev, iq->q_no, IQ_INSTR_SPACE(iq), in octep_vf_iq_full_check()
625 u16 q_no, wi; in octep_vf_start_xmit() local
630 q_no = skb_get_queue_mapping(skb); in octep_vf_start_xmit()
631 if (q_no >= oct->num_iqs) { in octep_vf_start_xmit()
632 netdev_err(netdev, "Invalid Tx skb->queue_mapping=%d\n", q_no); in octep_vf_start_xmit()
633 q_no = q_no % oct->num_iqs; in octep_vf_start_xmit()
636 iq = oct->iq[q_no]; in octep_vf_start_xmit()
/linux/drivers/scsi/
H A Dadvansys.c244 #define ASC_QNO_TO_QADDR(q_no) ((ASC_QADR_BEG)+((int)(q_no) << 6)) argument
248 uchar q_no; member
292 uchar q_no; member
344 uchar q_no; member
6636 scsiq->q_no = (uchar)(_val >> 8); in _AscCopyLramScsiDoneQ()
7871 static int AscPutReadyQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq, uchar q_no) in AscPutReadyQueue() argument
7893 q_addr = ASC_QNO_TO_QADDR(q_no); in AscPutReadyQueue()
7909 q_no << 8) | (ushort)QS_READY)); in AscPutReadyQueue()
7914 AscPutReadySgListQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq, uchar q_no) in AscPutReadySgListQueue() argument
7944 q_addr = ASC_QNO_TO_QADDR(q_no); in AscPutReadySgListQueue()
[all …]