/linux/drivers/net/ethernet/cavium/liquidio/ |
H A D | cn23xx_vf_device.c | 54 u32 q_no; in cn23xx_vf_reset_io_queues() local 57 for (q_no = 0; q_no < num_queues; q_no++) { in cn23xx_vf_reset_io_queues() 60 CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no)); in cn23xx_vf_reset_io_queues() 62 octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no), in cn23xx_vf_reset_io_queues() 67 for (q_no = 0; q_no < num_queues; q_no++) { in cn23xx_vf_reset_io_queues() 69 CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no)); in cn23xx_vf_reset_io_queues() 74 oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no))); in cn23xx_vf_reset_io_queues() 80 q_no); in cn23xx_vf_reset_io_queues() 85 octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no), in cn23xx_vf_reset_io_queues() 89 oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no))); in cn23xx_vf_reset_io_queues() [all …]
|
H A D | cn23xx_pf_device.c | 349 u32 q_no, srn, ern; in cn23xx_reset_io_queues() local 359 for (q_no = srn; q_no < ern; q_no++) { in cn23xx_reset_io_queues() 361 d64 = octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); in cn23xx_reset_io_queues() 363 octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), d64); in cn23xx_reset_io_queues() 367 for (q_no = srn; q_no < ern; q_no++) { in cn23xx_reset_io_queues() 369 CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); in cn23xx_reset_io_queues() 374 oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no))); in cn23xx_reset_io_queues() 379 q_no); in cn23xx_reset_io_queues() 384 octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), in cn23xx_reset_io_queues() 388 oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no))); in cn23xx_reset_io_queues() [all …]
|
H A D | octeon_droq.c | 194 int octeon_delete_droq(struct octeon_device *oct, u32 q_no) in octeon_delete_droq() argument 196 struct octeon_droq *droq = oct->droq[q_no]; in octeon_delete_droq() 198 dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no); in octeon_delete_droq() 208 oct->io_qmask.oq &= ~(1ULL << q_no); in octeon_delete_droq() 209 vfree(oct->droq[q_no]); in octeon_delete_droq() 210 oct->droq[q_no] = NULL; in octeon_delete_droq() 218 u32 q_no, in octeon_init_droq() argument 228 dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no); in octeon_init_droq() 230 droq = oct->droq[q_no]; in octeon_init_droq() 234 droq->q_no = q_no; in octeon_init_droq() [all …]
|
H A D | octeon_droq.h | 248 u32 q_no; member 338 u32 q_no, 350 int octeon_delete_droq(struct octeon_device *oct_dev, u32 q_no); 365 u32 q_no, 376 int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no); 400 int octeon_create_droq(struct octeon_device *oct, u32 q_no, 410 int octeon_enable_irq(struct octeon_device *oct, u32 q_no);
|
H A D | lio_core.c | 176 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; in liquidio_set_feature() 443 struct cavium_wq *wq = &lio->rxq_status_wq[droq->q_no]; in octeon_schedule_rxq_oom_work() 454 int q_no = wk->ctxul; in octnet_poll_check_rxq_oom_status() local 455 struct octeon_droq *droq = oct->droq[q_no]; in octnet_poll_check_rxq_oom_status() 469 int q, q_no; in setup_rx_oom_poll_fn() local 472 q_no = lio->linfo.rxpciq[q].s.q_no; in setup_rx_oom_poll_fn() 473 wq = &lio->rxq_status_wq[q_no]; in setup_rx_oom_poll_fn() 484 wq->wk.ctxul = q_no; in setup_rx_oom_poll_fn() 496 int q_no; in cleanup_rx_oom_poll_fn() local 498 for (q_no = 0; q_no < oct->num_oqs; q_no++) { in cleanup_rx_oom_poll_fn() [all …]
|
H A D | octeon_mailbox.c | 65 mbox->mbox_req.q_no = mbox->q_no; in octeon_mbox_read() 77 mbox->mbox_resp.q_no = mbox->q_no; in octeon_mbox_read() 134 struct octeon_mbox *mbox = oct->mbox[mbox_cmd->q_no]; in octeon_mbox_write() 262 mbox->q_no); in octeon_mbox_process_cmd() 263 pcie_flr(oct->sriov_info.dpiring_to_vfpcidev_lut[mbox->q_no]); in octeon_mbox_process_cmd() 355 int octeon_mbox_cancel(struct octeon_device *oct, int q_no) in octeon_mbox_cancel() argument 357 struct octeon_mbox *mbox = oct->mbox[q_no]; in octeon_mbox_cancel()
|
H A D | octeon_device.c | 901 txpciq.s.q_no = iq_no; in octeon_setup_instr_queues() 962 u32 q_no; in octeon_set_io_queues_off() local 968 for (q_no = 0; q_no < oct->sriov_info.rings_per_vf; q_no++) { in octeon_set_io_queues_off() 970 oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no)); in octeon_set_io_queues_off() 976 oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); in octeon_set_io_queues_off() 982 q_no); in octeon_set_io_queues_off() 988 CN23XX_SLI_IQ_PKT_CONTROL64(q_no), in octeon_set_io_queues_off() 992 oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); in octeon_set_io_queues_off() 995 "unable to reset qno %u\n", q_no); in octeon_set_io_queues_off() 1005 u32 q_no, in octeon_set_droq_pkt_op() argument [all …]
|
H A D | octeon_mailbox.h | 68 u32 q_no; member 91 u32 q_no; member 123 int octeon_mbox_cancel(struct octeon_device *oct, int q_no);
|
H A D | lio_ethtool.c | 483 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; in lio_send_queue_count_update() 718 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; in octnet_gpio_access() 744 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; in octnet_id_active() 787 sc->iq_no = lio->linfo.txpciq[0].s.q_no; in octnet_mdio45_access() 1074 lio->txq = lio->linfo.txpciq[0].s.q_no; in lio_23xx_reconfigure_queue_count() 1075 lio->rxq = lio->linfo.rxpciq[0].s.q_no; in lio_23xx_reconfigure_queue_count() 1397 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; in lio_set_pauseparam() 1762 j = lio->linfo.txpciq[vj].s.q_no; in lio_vf_get_ethtool_stats() 1804 j = lio->linfo.rxpciq[vj].s.q_no; in lio_vf_get_ethtool_stats() 2021 sc->iq_no = lio->linfo.txpciq[0].s.q_no; in octnet_get_intrmod_cfg() [all …]
|
H A D | octeon_nic.h | 85 u32 q_no; member 112 static inline int octnet_iq_is_full(struct octeon_device *oct, u32 q_no) in octnet_iq_is_full() argument 114 return ((u32)atomic_read(&oct->instr_queue[q_no]->instr_pending) in octnet_iq_is_full() 115 >= (oct->instr_queue[q_no]->max_count - 2)); in octnet_iq_is_full()
|
H A D | cn66xx_regs.h | 473 #define CN6XXX_DPI_DMA_ENG_ENB(q_no) \ argument 474 (CN6XXX_DPI_DMA_ENG0_ENB + ((q_no) * 8)) 478 #define CN6XXX_DPI_DMA_ENG_BUF(q_no) \ argument 479 (CN6XXX_DPI_DMA_ENG0_BUF + ((q_no) * 8))
|
H A D | lio_main.c | 155 int q_no; in octeon_droq_bh() local 161 for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) { in octeon_droq_bh() 162 if (!(oct->io_qmask.oq & BIT_ULL(q_no))) in octeon_droq_bh() 164 reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no], in octeon_droq_bh() 166 lio_enable_irq(oct->droq[q_no], NULL); in octeon_droq_bh() 172 int adjusted_q_no = q_no + oct->sriov_info.pf_srn; in octeon_droq_bh() 465 lio->oct_dev->num_iqs].s.q_no; in check_txq_status() 630 sc->iq_no = lio->linfo.txpciq[0].s.q_no; in lio_sync_octeon_time() 1168 sc->iq_no = lio->linfo.txpciq[0].s.q_no; in send_rx_ctrl_cmd() 1289 lio->linfo.rxpciq[j].s.q_no); in liquidio_stop_nic_module() [all …]
|
H A D | lio_vf_main.c | 622 sc->iq_no = lio->linfo.txpciq[0].s.q_no; in send_rx_ctrl_cmd() 731 lio->linfo.rxpciq[j].s.q_no); in liquidio_stop_nic_module() 1054 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; in liquidio_set_uc_list() 1103 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; in liquidio_set_mcast_list() 1146 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; in liquidio_set_mac() 1188 iq_no = lio->linfo.txpciq[i].s.q_no; in liquidio_get_stats64() 1204 oq_no = lio->linfo.rxpciq[i].s.q_no; in liquidio_get_stats64() 1379 sc->iq_no = ndata->q_no; in send_nic_timestamp_pkt() 1428 iq_no = lio->linfo.txpciq[q_idx].s.q_no; in liquidio_xmit() 1455 ndata.q_no = iq_no; in liquidio_xmit() [all …]
|
H A D | liquidio_common.h | 728 u64 q_no:8; member 744 u64 q_no:8; 756 u64 q_no:8; member 760 u64 q_no:8;
|
H A D | octeon_device.h | 851 int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no); 853 int octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no); 865 void octeon_set_droq_pkt_op(struct octeon_device *oct, u32 q_no, u32 enable);
|
/linux/drivers/net/ethernet/marvell/octeon_ep/ |
H A D | octep_tx.c | 91 if (unlikely(__netif_subqueue_stopped(iq->netdev, iq->q_no)) && in octep_iq_process_completions() 94 netif_wake_subqueue(iq->netdev, iq->q_no); in octep_iq_process_completions() 146 netdev_tx_reset_queue(netdev_get_tx_queue(iq->netdev, iq->q_no)); in octep_iq_free_pending() 175 static int octep_setup_iq(struct octep_device *oct, int q_no) in octep_setup_iq() argument 184 oct->iq[q_no] = iq; in octep_setup_iq() 189 iq->q_no = q_no; in octep_setup_iq() 193 iq->netdev_q = netdev_get_tx_queue(iq->netdev, q_no); in octep_setup_iq() 201 "Failed to allocate DMA memory for IQ-%d\n", q_no); in octep_setup_iq() 213 q_no); in octep_setup_iq() 222 "Failed to allocate buff info for IQ-%d\n", q_no); in octep_setup_iq() [all …]
|
H A D | octep_rx.c | 50 oq->q_no); in octep_oq_fill_ring_buffers() 99 oq->q_no); in octep_oq_refill() 123 static int octep_setup_oq(struct octep_device *oct, int q_no) in octep_setup_oq() argument 131 oct->oq[q_no] = oq; in octep_setup_oq() 136 oq->q_no = q_no; in octep_setup_oq() 157 "Failed to allocate DMA memory for OQ-%d !!\n", q_no); in octep_setup_oq() 164 "Failed to allocate buffer info for OQ-%d\n", q_no); in octep_setup_oq() 172 oct->hw_ops.setup_oq_regs(oct, q_no); in octep_setup_oq() 186 oct->oq[q_no] = NULL; in octep_setup_oq() 228 int q_no = oq->q_no; in octep_free_oq() local [all …]
|
H A D | octep_cnxk_pf.c | 127 static int cnxk_reset_iq(struct octep_device *oct, int q_no) in cnxk_reset_iq() argument 132 dev_dbg(&oct->pdev->dev, "Reset PF IQ-%d\n", q_no); in cnxk_reset_iq() 135 q_no += conf->pf_ring_cfg.srn; in cnxk_reset_iq() 138 octep_write_csr64(oct, CNXK_SDP_R_IN_ENABLE(q_no), val); in cnxk_reset_iq() 141 octep_write_csr64(oct, CNXK_SDP_R_IN_CNTS(q_no), val); in cnxk_reset_iq() 142 octep_write_csr64(oct, CNXK_SDP_R_IN_INT_LEVELS(q_no), val); in cnxk_reset_iq() 143 octep_write_csr64(oct, CNXK_SDP_R_IN_PKT_CNT(q_no), val); in cnxk_reset_iq() 144 octep_write_csr64(oct, CNXK_SDP_R_IN_BYTE_CNT(q_no), val); in cnxk_reset_iq() 145 octep_write_csr64(oct, CNXK_SDP_R_IN_INSTR_BADDR(q_no), val); in cnxk_reset_iq() 146 octep_write_csr64(oct, CNXK_SDP_R_IN_INSTR_RSIZE(q_no), val); in cnxk_reset_iq() [all …]
|
H A D | octep_cn9k_pf.c | 107 static int cn93_reset_iq(struct octep_device *oct, int q_no) in cn93_reset_iq() argument 112 dev_dbg(&oct->pdev->dev, "Reset PF IQ-%d\n", q_no); in cn93_reset_iq() 115 q_no += conf->pf_ring_cfg.srn; in cn93_reset_iq() 118 octep_write_csr64(oct, CN93_SDP_R_IN_ENABLE(q_no), val); in cn93_reset_iq() 121 octep_write_csr64(oct, CN93_SDP_R_IN_CNTS(q_no), val); in cn93_reset_iq() 122 octep_write_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(q_no), val); in cn93_reset_iq() 123 octep_write_csr64(oct, CN93_SDP_R_IN_PKT_CNT(q_no), val); in cn93_reset_iq() 124 octep_write_csr64(oct, CN93_SDP_R_IN_BYTE_CNT(q_no), val); in cn93_reset_iq() 125 octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_BADDR(q_no), val); in cn93_reset_iq() 126 octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_RSIZE(q_no), val); in cn93_reset_iq() [all …]
|
H A D | octep_main.c | 567 netdev_dbg(iq->netdev, "enabling intr for Q-%u\n", iq->q_no); in octep_enable_ioq_irq() 810 netif_stop_subqueue(iq->netdev, iq->q_no); in octep_iq_full_check() 824 netif_start_subqueue(iq->netdev, iq->q_no); in octep_iq_full_check() 855 u16 q_no, wi; in octep_start_xmit() local 860 q_no = skb_get_queue_mapping(skb); in octep_start_xmit() 861 if (q_no >= oct->num_iqs) { in octep_start_xmit() 862 netdev_err(netdev, "Invalid Tx skb->queue_mapping=%d\n", q_no); in octep_start_xmit() 863 q_no = q_no % oct->num_iqs; in octep_start_xmit() 866 iq = oct->iq[q_no]; in octep_start_xmit()
|
/linux/drivers/net/ethernet/marvell/octeon_ep_vf/ |
H A D | octep_vf_tx.c | 90 netif_subqueue_completed_wake(iq->netdev, iq->q_no, compl_pkts, in octep_vf_iq_process_completions() 145 netdev_tx_reset_queue(netdev_get_tx_queue(iq->netdev, iq->q_no)); in octep_vf_iq_free_pending() 174 static int octep_vf_setup_iq(struct octep_vf_device *oct, int q_no) in octep_vf_setup_iq() argument 183 oct->iq[q_no] = iq; in octep_vf_setup_iq() 188 iq->q_no = q_no; in octep_vf_setup_iq() 192 iq->netdev_q = netdev_get_tx_queue(iq->netdev, q_no); in octep_vf_setup_iq() 200 "Failed to allocate DMA memory for IQ-%d\n", q_no); in octep_vf_setup_iq() 212 q_no); in octep_vf_setup_iq() 221 "Failed to allocate buff info for IQ-%d\n", q_no); in octep_vf_setup_iq() 237 oct->hw_ops.setup_iq_regs(oct, q_no); in octep_vf_setup_iq() [all …]
|
H A D | octep_vf_cn9k.c | 81 static void cn93_vf_reset_iq(struct octep_vf_device *oct, int q_no) in cn93_vf_reset_iq() argument 85 dev_dbg(&oct->pdev->dev, "Reset VF IQ-%d\n", q_no); in cn93_vf_reset_iq() 88 octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(q_no), val); in cn93_vf_reset_iq() 91 octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q_no), val); in cn93_vf_reset_iq() 92 octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_PKT_CNT(q_no), val); in cn93_vf_reset_iq() 93 octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_BYTE_CNT(q_no), val); in cn93_vf_reset_iq() 94 octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_BADDR(q_no), val); in cn93_vf_reset_iq() 95 octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_RSIZE(q_no), val); in cn93_vf_reset_iq() 98 octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_DBELL(q_no), val); in cn93_vf_reset_iq() 100 val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CNTS(q_no)); in cn93_vf_reset_iq() [all …]
|
H A D | octep_vf_cnxk.c | 84 static void cnxk_vf_reset_iq(struct octep_vf_device *oct, int q_no) in cnxk_vf_reset_iq() argument 88 dev_dbg(&oct->pdev->dev, "Reset VF IQ-%d\n", q_no); in cnxk_vf_reset_iq() 91 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(q_no), val); in cnxk_vf_reset_iq() 94 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q_no), val); in cnxk_vf_reset_iq() 95 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_PKT_CNT(q_no), val); in cnxk_vf_reset_iq() 96 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_BYTE_CNT(q_no), val); in cnxk_vf_reset_iq() 97 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_BADDR(q_no), val); in cnxk_vf_reset_iq() 98 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_RSIZE(q_no), val); in cnxk_vf_reset_iq() 101 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_DBELL(q_no), val); in cnxk_vf_reset_iq() 103 val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CNTS(q_no)); in cnxk_vf_reset_iq() [all …]
|
H A D | octep_vf_rx.c | 50 oq->q_no); in octep_vf_oq_fill_ring_buffers() 99 oq->q_no); in octep_vf_oq_refill() 123 static int octep_vf_setup_oq(struct octep_vf_device *oct, int q_no) in octep_vf_setup_oq() argument 131 oct->oq[q_no] = oq; in octep_vf_setup_oq() 136 oq->q_no = q_no; in octep_vf_setup_oq() 157 "Failed to allocate DMA memory for OQ-%d !!\n", q_no); in octep_vf_setup_oq() 165 "Failed to allocate buffer info for OQ-%d\n", q_no); in octep_vf_setup_oq() 173 oct->hw_ops.setup_oq_regs(oct, q_no); in octep_vf_setup_oq() 187 oct->oq[q_no] = NULL; in octep_vf_setup_oq() 229 int q_no = oq->q_no; in octep_vf_free_oq() local [all …]
|
H A D | octep_vf_main.c | 300 netdev_dbg(iq->netdev, "enabling intr for Q-%u\n", iq->q_no); in octep_vf_enable_ioq_irq() 565 ret = netif_subqueue_maybe_stop(iq->netdev, iq->q_no, IQ_INSTR_SPACE(iq), in octep_vf_iq_full_check() 609 u16 q_no, wi; in octep_vf_start_xmit() local 614 q_no = skb_get_queue_mapping(skb); in octep_vf_start_xmit() 615 if (q_no >= oct->num_iqs) { in octep_vf_start_xmit() 616 netdev_err(netdev, "Invalid Tx skb->queue_mapping=%d\n", q_no); in octep_vf_start_xmit() 617 q_no = q_no % oct->num_iqs; in octep_vf_start_xmit() 620 iq = oct->iq[q_no]; in octep_vf_start_xmit()
|