Lines Matching refs:vsi
40 struct ixl_vsi *vsi = &pf->vsi; in ixl_configure_tx_itr() local
41 struct ixl_tx_queue *que = vsi->tx_queues; in ixl_configure_tx_itr()
43 vsi->tx_itr_setting = pf->tx_itr; in ixl_configure_tx_itr()
45 for (int i = 0; i < vsi->num_tx_queues; i++, que++) { in ixl_configure_tx_itr()
49 vsi->tx_itr_setting); in ixl_configure_tx_itr()
50 txr->itr = vsi->tx_itr_setting; in ixl_configure_tx_itr()
59 struct ixl_vsi *vsi = &pf->vsi; in ixl_configure_rx_itr() local
60 struct ixl_rx_queue *que = vsi->rx_queues; in ixl_configure_rx_itr()
62 vsi->rx_itr_setting = pf->rx_itr; in ixl_configure_rx_itr()
64 for (int i = 0; i < vsi->num_rx_queues; i++, que++) { in ixl_configure_rx_itr()
68 vsi->rx_itr_setting); in ixl_configure_rx_itr()
69 rxr->itr = vsi->rx_itr_setting; in ixl_configure_rx_itr()
79 struct ixl_vsi *vsi = &pf->vsi; in ixl_intr() local
80 struct ixl_rx_queue *que = vsi->rx_queues; in ixl_intr()
86 if (vsi->shared->isc_intr == IFLIB_INTR_LEGACY) in ixl_intr()
96 iflib_iov_intr_deferred(vsi->ctx); in ixl_intr()
100 iflib_admin_intr_deferred(vsi->ctx); in ixl_intr()
223 iflib_iov_intr_deferred(pf->vsi.ctx); in ixl_msix_adminq()
245 struct ixl_vsi *vsi = &pf->vsi; in ixl_configure_queue_intr_msix() local
249 for (int i = 0; i < max(vsi->num_rx_queues, vsi->num_tx_queues); i++, vector++) { in ixl_configure_queue_intr_msix()
283 struct ixl_vsi *vsi = &pf->vsi; in ixl_configure_legacy() local
286 vsi->rx_queues[0].rxr.itr = vsi->rx_itr_setting; in ixl_configure_legacy()
322 struct ixl_vsi *vsi = &pf->vsi; in ixl_free_pci_resources() local
323 device_t dev = iflib_get_dev(vsi->ctx); in ixl_free_pci_resources()
324 struct ixl_rx_queue *rx_que = vsi->rx_queues; in ixl_free_pci_resources()
333 iflib_irq_free(vsi->ctx, &vsi->irq); in ixl_free_pci_resources()
335 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) in ixl_free_pci_resources()
336 iflib_irq_free(vsi->ctx, &rx_que->que_irq); in ixl_free_pci_resources()
351 struct ixl_vsi *vsi = &pf->vsi; in ixl_setup_interface() local
352 if_ctx_t ctx = vsi->ctx; in ixl_setup_interface()
360 vsi->shared->isc_max_frame_size = in ixl_setup_interface()
386 ixl_add_ifmedia(vsi->media, hw->phy.phy_types); in ixl_setup_interface()
391 ifmedia_add(vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL); in ixl_setup_interface()
392 ifmedia_set(vsi->media, IFM_ETHER | IFM_AUTO); in ixl_setup_interface()
404 device_t dev = iflib_get_dev(pf->vsi.ctx); in ixl_link_event()
419 (if_getflags(pf->vsi.ifp) & IFF_UP) && in ixl_link_event()
436 ixl_initialize_vsi(struct ixl_vsi *vsi) in ixl_initialize_vsi() argument
438 struct ixl_pf *pf = vsi->back; in ixl_initialize_vsi()
439 if_softc_ctx_t scctx = iflib_get_softc_ctx(vsi->ctx); in ixl_initialize_vsi()
440 struct ixl_tx_queue *tx_que = vsi->tx_queues; in ixl_initialize_vsi()
441 struct ixl_rx_queue *rx_que = vsi->rx_queues; in ixl_initialize_vsi()
442 device_t dev = iflib_get_dev(vsi->ctx); in ixl_initialize_vsi()
443 struct i40e_hw *hw = vsi->hw; in ixl_initialize_vsi()
449 ctxt.seid = vsi->seid; in ixl_initialize_vsi()
490 if (if_getcapenable(vsi->ifp) & IFCAP_VLAN_HWTAGGING) in ixl_initialize_vsi()
504 vsi->vsi_num = ctxt.vsi_number; in ixl_initialize_vsi()
505 bcopy(&ctxt.info, &vsi->info, sizeof(vsi->info)); in ixl_initialize_vsi()
516 for (int i = 0; i < vsi->num_tx_queues; i++, tx_que++) { in ixl_initialize_vsi()
531 tctx.rdylist = vsi->info.qs_handle[0]; in ixl_initialize_vsi()
537 if (vsi->enable_head_writeback) { in ixl_initialize_vsi()
564 ixl_init_tx_ring(vsi, tx_que); in ixl_initialize_vsi()
566 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) { in ixl_initialize_vsi()
571 rxr->mbuf_sz = iflib_get_rx_mbuf_sz(vsi->ctx); in ixl_initialize_vsi()
609 wr32(vsi->hw, I40E_QRX_TAIL(i), 0); in ixl_initialize_vsi()
622 struct ixl_vsi *vsi = que->vsi; in ixl_set_queue_rx_itr() local
623 struct ixl_pf *pf = (struct ixl_pf *)vsi->back; in ixl_set_queue_rx_itr()
624 struct i40e_hw *hw = vsi->hw; in ixl_set_queue_rx_itr()
674 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC) in ixl_set_queue_rx_itr()
675 vsi->rx_itr_setting = pf->rx_itr; in ixl_set_queue_rx_itr()
677 if (rxr->itr != vsi->rx_itr_setting) { in ixl_set_queue_rx_itr()
678 rxr->itr = vsi->rx_itr_setting; in ixl_set_queue_rx_itr()
695 struct ixl_vsi *vsi = que->vsi; in ixl_set_queue_tx_itr() local
696 struct ixl_pf *pf = (struct ixl_pf *)vsi->back; in ixl_set_queue_tx_itr()
697 struct i40e_hw *hw = vsi->hw; in ixl_set_queue_tx_itr()
748 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC) in ixl_set_queue_tx_itr()
749 vsi->tx_itr_setting = pf->tx_itr; in ixl_set_queue_tx_itr()
751 if (txr->itr != vsi->tx_itr_setting) { in ixl_set_queue_tx_itr()
752 txr->itr = vsi->tx_itr_setting; in ixl_set_queue_tx_itr()
778 val = rd32(tx_que->vsi->hw, tx_que->txr.tail); in ixl_sysctl_qtx_tail_handler()
800 val = rd32(rx_que->vsi->hw, rx_que->rxr.tail); in ixl_sysctl_qrx_tail_handler()
811 struct ixl_vsi *vsi = &pf->vsi; in ixl_add_hw_stats() local
812 device_t dev = iflib_get_dev(vsi->ctx); in ixl_add_hw_stats()
824 sysctl_ctx_init(&vsi->sysctl_ctx); in ixl_add_hw_stats()
825 ixl_vsi_add_sysctls(vsi, "pf", true); in ixl_add_hw_stats()
834 struct ixl_vsi *vsi = &pf->vsi; in ixl_set_rss_hlut() local
835 device_t dev = iflib_get_dev(vsi->ctx); in ixl_set_rss_hlut()
853 que_id = que_id % vsi->num_rx_queues; in ixl_set_rss_hlut()
855 que_id = i % vsi->num_rx_queues; in ixl_set_rss_hlut()
862 status = i40e_aq_set_rss_lut(hw, vsi->vsi_num, TRUE, hlut_buf, sizeof(hlut_buf)); in ixl_set_rss_hlut()
875 ixl_enable_rings(struct ixl_vsi *vsi) in ixl_enable_rings() argument
877 struct ixl_pf *pf = vsi->back; in ixl_enable_rings()
880 for (int i = 0; i < vsi->num_tx_queues; i++) in ixl_enable_rings()
883 for (int i = 0; i < vsi->num_rx_queues; i++) in ixl_enable_rings()
890 ixl_disable_rings(struct ixl_pf *pf, struct ixl_vsi *vsi, struct ixl_pf_qtag *qtag) in ixl_disable_rings() argument
894 for (int i = 0; i < vsi->num_tx_queues; i++) in ixl_disable_rings()
897 for (int i = 0; i < vsi->num_rx_queues; i++) in ixl_disable_rings()
904 ixl_enable_intr(struct ixl_vsi *vsi) in ixl_enable_intr() argument
906 struct i40e_hw *hw = vsi->hw; in ixl_enable_intr()
907 struct ixl_rx_queue *que = vsi->rx_queues; in ixl_enable_intr()
909 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) { in ixl_enable_intr()
910 for (int i = 0; i < vsi->num_rx_queues; i++, que++) in ixl_enable_intr()
917 ixl_disable_rings_intr(struct ixl_vsi *vsi) in ixl_disable_rings_intr() argument
919 struct i40e_hw *hw = vsi->hw; in ixl_disable_rings_intr()
920 struct ixl_rx_queue *que = vsi->rx_queues; in ixl_disable_rings_intr()
922 for (int i = 0; i < vsi->num_rx_queues; i++, que++) in ixl_disable_rings_intr()
934 ixl_if_stop(pf->vsi.ctx); in ixl_prepare_for_reset()
953 struct ixl_vsi *vsi = &pf->vsi; in ixl_rebuild_hw_structs_after_reset() local
970 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) { in ixl_rebuild_hw_structs_after_reset()
991 error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, vsi->num_tx_queues, &pf->qtag); in ixl_rebuild_hw_structs_after_reset()
1024 ixl_del_default_hw_filters(vsi); in ixl_rebuild_hw_structs_after_reset()
1027 i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL); in ixl_rebuild_hw_structs_after_reset()
1046 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) { in ixl_rebuild_hw_structs_after_reset()
1052 iflib_request_reset(vsi->ctx); in ixl_rebuild_hw_structs_after_reset()
1053 iflib_admin_intr_deferred(vsi->ctx); in ixl_rebuild_hw_structs_after_reset()