Lines Matching refs:vsi
80 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_adv_fltr);
117 struct ice_vsi *vsi = NULL; in ice_check_for_hang_subtask() local
124 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) { in ice_check_for_hang_subtask()
125 vsi = pf->vsi[v]; in ice_check_for_hang_subtask()
129 if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state)) in ice_check_for_hang_subtask()
132 if (!(vsi->netdev && netif_carrier_ok(vsi->netdev))) in ice_check_for_hang_subtask()
135 hw = &vsi->back->hw; in ice_check_for_hang_subtask()
137 ice_for_each_txq(vsi, i) { in ice_check_for_hang_subtask()
138 struct ice_tx_ring *tx_ring = vsi->tx_rings[i]; in ice_check_for_hang_subtask()
185 struct ice_vsi *vsi; in ice_init_mac_fltr() local
188 vsi = ice_get_main_vsi(pf); in ice_init_mac_fltr()
189 if (!vsi) in ice_init_mac_fltr()
192 perm_addr = vsi->port_info->mac.perm_addr; in ice_init_mac_fltr()
193 return ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI); in ice_init_mac_fltr()
209 struct ice_vsi *vsi = np->vsi; in ice_add_mac_to_sync_list() local
211 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr, in ice_add_mac_to_sync_list()
231 struct ice_vsi *vsi = np->vsi; in ice_add_mac_to_unsync_list() local
241 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr, in ice_add_mac_to_unsync_list()
254 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi) in ice_vsi_fltr_changed() argument
256 return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) || in ice_vsi_fltr_changed()
257 test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); in ice_vsi_fltr_changed()
266 static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m) in ice_set_promisc() argument
270 if (vsi->type != ICE_VSI_PF) in ice_set_promisc()
273 if (ice_vsi_has_non_zero_vlans(vsi)) { in ice_set_promisc()
275 status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi, in ice_set_promisc()
278 status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_set_promisc()
284 netdev_dbg(vsi->netdev, "set promisc filter bits for VSI %i: 0x%x\n", in ice_set_promisc()
285 vsi->vsi_num, promisc_m); in ice_set_promisc()
295 static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m) in ice_clear_promisc() argument
299 if (vsi->type != ICE_VSI_PF) in ice_clear_promisc()
302 if (ice_vsi_has_non_zero_vlans(vsi)) { in ice_clear_promisc()
304 status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi, in ice_clear_promisc()
307 status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_clear_promisc()
311 netdev_dbg(vsi->netdev, "clear promisc filter bits for VSI %i: 0x%x\n", in ice_clear_promisc()
312 vsi->vsi_num, promisc_m); in ice_clear_promisc()
322 static int ice_vsi_sync_fltr(struct ice_vsi *vsi) in ice_vsi_sync_fltr() argument
324 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); in ice_vsi_sync_fltr()
325 struct device *dev = ice_pf_to_dev(vsi->back); in ice_vsi_sync_fltr()
326 struct net_device *netdev = vsi->netdev; in ice_vsi_sync_fltr()
328 struct ice_pf *pf = vsi->back; in ice_vsi_sync_fltr()
333 if (!vsi->netdev) in ice_vsi_sync_fltr()
336 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) in ice_vsi_sync_fltr()
339 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; in ice_vsi_sync_fltr()
340 vsi->current_netdev_flags = vsi->netdev->flags; in ice_vsi_sync_fltr()
342 INIT_LIST_HEAD(&vsi->tmp_sync_list); in ice_vsi_sync_fltr()
343 INIT_LIST_HEAD(&vsi->tmp_unsync_list); in ice_vsi_sync_fltr()
345 if (ice_vsi_fltr_changed(vsi)) { in ice_vsi_sync_fltr()
346 clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); in ice_vsi_sync_fltr()
347 clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); in ice_vsi_sync_fltr()
360 err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list); in ice_vsi_sync_fltr()
361 ice_fltr_free_list(dev, &vsi->tmp_unsync_list); in ice_vsi_sync_fltr()
370 err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list); in ice_vsi_sync_fltr()
371 ice_fltr_free_list(dev, &vsi->tmp_sync_list); in ice_vsi_sync_fltr()
384 vsi->state)) { in ice_vsi_sync_fltr()
387 vsi->vsi_num); in ice_vsi_sync_fltr()
395 if (vsi->current_netdev_flags & IFF_ALLMULTI) { in ice_vsi_sync_fltr()
396 err = ice_set_promisc(vsi, ICE_MCAST_PROMISC_BITS); in ice_vsi_sync_fltr()
398 vsi->current_netdev_flags &= ~IFF_ALLMULTI; in ice_vsi_sync_fltr()
403 err = ice_clear_promisc(vsi, ICE_MCAST_PROMISC_BITS); in ice_vsi_sync_fltr()
405 vsi->current_netdev_flags |= IFF_ALLMULTI; in ice_vsi_sync_fltr()
412 test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) { in ice_vsi_sync_fltr()
413 clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state); in ice_vsi_sync_fltr()
414 if (vsi->current_netdev_flags & IFF_PROMISC) { in ice_vsi_sync_fltr()
416 if (!ice_is_dflt_vsi_in_use(vsi->port_info)) { in ice_vsi_sync_fltr()
417 err = ice_set_dflt_vsi(vsi); in ice_vsi_sync_fltr()
420 err, vsi->vsi_num); in ice_vsi_sync_fltr()
421 vsi->current_netdev_flags &= in ice_vsi_sync_fltr()
426 vlan_ops->dis_rx_filtering(vsi); in ice_vsi_sync_fltr()
433 err = ice_set_promisc(vsi, in ice_vsi_sync_fltr()
440 if (ice_is_vsi_dflt_vsi(vsi)) { in ice_vsi_sync_fltr()
441 err = ice_clear_dflt_vsi(vsi); in ice_vsi_sync_fltr()
444 err, vsi->vsi_num); in ice_vsi_sync_fltr()
445 vsi->current_netdev_flags |= in ice_vsi_sync_fltr()
449 if (vsi->netdev->features & in ice_vsi_sync_fltr()
451 vlan_ops->ena_rx_filtering(vsi); in ice_vsi_sync_fltr()
457 if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) { in ice_vsi_sync_fltr()
458 err = ice_clear_promisc(vsi, in ice_vsi_sync_fltr()
462 err, vsi->vsi_num); in ice_vsi_sync_fltr()
470 set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state); in ice_vsi_sync_fltr()
474 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); in ice_vsi_sync_fltr()
475 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); in ice_vsi_sync_fltr()
477 clear_bit(ICE_CFG_BUSY, vsi->state); in ice_vsi_sync_fltr()
495 if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) && in ice_sync_fltr_subtask()
496 ice_vsi_sync_fltr(pf->vsi[v])) { in ice_sync_fltr_subtask()
514 if (pf->vsi[v]) in ice_pf_dis_all_vsi()
515 ice_dis_vsi(pf->vsi[v], locked); in ice_pf_dis_all_vsi()
535 struct ice_vsi *vsi; in ice_prepare_for_reset() local
566 vsi = ice_get_main_vsi(pf); in ice_prepare_for_reset()
567 if (!vsi) in ice_prepare_for_reset()
573 vsi->orig_rss_size = 0; in ice_prepare_for_reset()
577 vsi->old_ena_tc = vsi->all_enatc; in ice_prepare_for_reset()
578 vsi->old_numtc = vsi->all_numtc; in ice_prepare_for_reset()
580 ice_remove_q_channels(vsi, true); in ice_prepare_for_reset()
585 vsi->old_ena_tc = 0; in ice_prepare_for_reset()
586 vsi->all_enatc = 0; in ice_prepare_for_reset()
587 vsi->old_numtc = 0; in ice_prepare_for_reset()
588 vsi->all_numtc = 0; in ice_prepare_for_reset()
589 vsi->req_txq = 0; in ice_prepare_for_reset()
590 vsi->req_rxq = 0; in ice_prepare_for_reset()
592 memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt)); in ice_prepare_for_reset()
596 if (vsi->netdev) in ice_prepare_for_reset()
597 netif_device_detach(vsi->netdev); in ice_prepare_for_reset()
746 static void ice_print_topo_conflict(struct ice_vsi *vsi) in ice_print_topo_conflict() argument
748 switch (vsi->port_info->phy.link_info.topo_media_conflict) { in ice_print_topo_conflict()
754 …netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not … in ice_print_topo_conflict()
757 if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags)) in ice_print_topo_conflict()
758 …netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet … in ice_print_topo_conflict()
760 …netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was d… in ice_print_topo_conflict()
772 void ice_print_link_msg(struct ice_vsi *vsi, bool isup) in ice_print_link_msg() argument
783 if (!vsi) in ice_print_link_msg()
786 if (vsi->current_isup == isup) in ice_print_link_msg()
789 vsi->current_isup = isup; in ice_print_link_msg()
792 netdev_info(vsi->netdev, "NIC Link is Down\n"); in ice_print_link_msg()
796 switch (vsi->port_info->phy.link_info.link_speed) { in ice_print_link_msg()
835 switch (vsi->port_info->fc.current_mode) { in ice_print_link_msg()
854 switch (vsi->port_info->phy.link_info.fec_info) { in ice_print_link_msg()
868 if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) in ice_print_link_msg()
881 status = ice_aq_get_phy_caps(vsi->port_info, false, in ice_print_link_msg()
884 netdev_info(vsi->netdev, "Get phy capability failed.\n"); in ice_print_link_msg()
900 …netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s,… in ice_print_link_msg()
902 ice_print_topo_conflict(vsi); in ice_print_link_msg()
910 static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up) in ice_vsi_link_event() argument
912 if (!vsi) in ice_vsi_link_event()
915 if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev) in ice_vsi_link_event()
918 if (vsi->type == ICE_VSI_PF) { in ice_vsi_link_event()
919 if (link_up == netif_carrier_ok(vsi->netdev)) in ice_vsi_link_event()
923 netif_carrier_on(vsi->netdev); in ice_vsi_link_event()
924 netif_tx_wake_all_queues(vsi->netdev); in ice_vsi_link_event()
926 netif_carrier_off(vsi->netdev); in ice_vsi_link_event()
927 netif_tx_stop_all_queues(vsi->netdev); in ice_vsi_link_event()
1104 struct ice_vsi *vsi; in ice_link_event() local
1132 vsi = ice_get_main_vsi(pf); in ice_link_event()
1133 if (!vsi || !vsi->port_info) in ice_link_event()
1140 ice_set_link(vsi, false); in ice_link_event()
1156 ice_vsi_link_event(vsi, link_up); in ice_link_event()
1157 ice_print_link_msg(vsi, link_up); in ice_link_event()
1189 if (pf->vsi[i] && pf->vsi[i]->netdev) in ice_watchdog_subtask()
1190 ice_update_vsi_stats(pf->vsi[i]); in ice_watchdog_subtask()
1955 static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up) in ice_force_phys_link_state() argument
1963 if (!vsi || !vsi->port_info || !vsi->back) in ice_force_phys_link_state()
1965 if (vsi->type != ICE_VSI_PF) in ice_force_phys_link_state()
1968 dev = ice_pf_to_dev(vsi->back); in ice_force_phys_link_state()
1970 pi = vsi->port_info; in ice_force_phys_link_state()
1980 vsi->vsi_num, retcode); in ice_force_phys_link_state()
2006 retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL); in ice_force_phys_link_state()
2009 vsi->vsi_num, retcode); in ice_force_phys_link_state()
2200 static int ice_configure_phy(struct ice_vsi *vsi) in ice_configure_phy() argument
2202 struct device *dev = ice_pf_to_dev(vsi->back); in ice_configure_phy()
2203 struct ice_port_info *pi = vsi->port_info; in ice_configure_phy()
2207 struct ice_pf *pf = vsi->back; in ice_configure_phy()
2214 ice_print_topo_conflict(vsi); in ice_configure_phy()
2221 return ice_force_phys_link_state(vsi, true); in ice_configure_phy()
2232 vsi->vsi_num, err); in ice_configure_phy()
2253 vsi->vsi_num, err); in ice_configure_phy()
2269 vsi->back->state)) { in ice_configure_phy()
2309 vsi->vsi_num, err); in ice_configure_phy()
2327 struct ice_vsi *vsi; in ice_check_media_subtask() local
2334 vsi = ice_get_main_vsi(pf); in ice_check_media_subtask()
2335 if (!vsi) in ice_check_media_subtask()
2339 pi = vsi->port_info; in ice_check_media_subtask()
2353 if (test_bit(ICE_VSI_DOWN, vsi->state) && in ice_check_media_subtask()
2354 test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) in ice_check_media_subtask()
2357 err = ice_configure_phy(vsi); in ice_check_media_subtask()
2562 static int ice_vsi_ena_irq(struct ice_vsi *vsi) in ice_vsi_ena_irq() argument
2564 struct ice_hw *hw = &vsi->back->hw; in ice_vsi_ena_irq()
2567 ice_for_each_q_vector(vsi, i) in ice_vsi_ena_irq()
2568 ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]); in ice_vsi_ena_irq()
2579 static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) in ice_vsi_req_irq_msix() argument
2581 int q_vectors = vsi->num_q_vectors; in ice_vsi_req_irq_msix()
2582 struct ice_pf *pf = vsi->back; in ice_vsi_req_irq_msix()
2591 struct ice_q_vector *q_vector = vsi->q_vectors[vector]; in ice_vsi_req_irq_msix()
2609 if (vsi->type == ICE_VSI_CTRL && vsi->vf) in ice_vsi_req_irq_msix()
2610 err = devm_request_irq(dev, irq_num, vsi->irq_handler, in ice_vsi_req_irq_msix()
2614 err = devm_request_irq(dev, irq_num, vsi->irq_handler, in ice_vsi_req_irq_msix()
2617 netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n", in ice_vsi_req_irq_msix()
2636 err = ice_set_cpu_rx_rmap(vsi); in ice_vsi_req_irq_msix()
2638 netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n", in ice_vsi_req_irq_msix()
2639 vsi->vsi_num, ERR_PTR(err)); in ice_vsi_req_irq_msix()
2643 vsi->irqs_ready = true; in ice_vsi_req_irq_msix()
2648 irq_num = vsi->q_vectors[vector]->irq.virq; in ice_vsi_req_irq_msix()
2652 devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]); in ice_vsi_req_irq_msix()
2663 static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi) in ice_xdp_alloc_setup_rings() argument
2665 struct device *dev = ice_pf_to_dev(vsi->back); in ice_xdp_alloc_setup_rings()
2669 ice_for_each_xdp_txq(vsi, i) { in ice_xdp_alloc_setup_rings()
2670 u16 xdp_q_idx = vsi->alloc_txq + i; in ice_xdp_alloc_setup_rings()
2686 xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx]; in ice_xdp_alloc_setup_rings()
2687 xdp_ring->vsi = vsi; in ice_xdp_alloc_setup_rings()
2690 xdp_ring->count = vsi->num_tx_desc; in ice_xdp_alloc_setup_rings()
2691 WRITE_ONCE(vsi->xdp_rings[i], xdp_ring); in ice_xdp_alloc_setup_rings()
2706 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) { in ice_xdp_alloc_setup_rings()
2707 kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu); in ice_xdp_alloc_setup_rings()
2708 vsi->xdp_rings[i]->ring_stats = NULL; in ice_xdp_alloc_setup_rings()
2709 ice_free_tx_ring(vsi->xdp_rings[i]); in ice_xdp_alloc_setup_rings()
2720 static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog) in ice_vsi_assign_bpf_prog() argument
2725 old_prog = xchg(&vsi->xdp_prog, prog); in ice_vsi_assign_bpf_prog()
2726 ice_for_each_rxq(vsi, i) in ice_vsi_assign_bpf_prog()
2727 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog); in ice_vsi_assign_bpf_prog()
2733 static struct ice_tx_ring *ice_xdp_ring_from_qid(struct ice_vsi *vsi, int qid) in ice_xdp_ring_from_qid() argument
2739 return vsi->xdp_rings[qid % vsi->num_xdp_txq]; in ice_xdp_ring_from_qid()
2741 q_vector = vsi->rx_rings[qid]->q_vector; in ice_xdp_ring_from_qid()
2756 void ice_map_xdp_rings(struct ice_vsi *vsi) in ice_map_xdp_rings() argument
2758 int xdp_rings_rem = vsi->num_xdp_txq; in ice_map_xdp_rings()
2762 ice_for_each_q_vector(vsi, v_idx) { in ice_map_xdp_rings()
2763 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; in ice_map_xdp_rings()
2767 vsi->num_q_vectors - v_idx); in ice_map_xdp_rings()
2768 q_base = vsi->num_xdp_txq - xdp_rings_rem; in ice_map_xdp_rings()
2771 struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id]; in ice_map_xdp_rings()
2780 ice_for_each_rxq(vsi, q_idx) { in ice_map_xdp_rings()
2781 vsi->rx_rings[q_idx]->xdp_ring = ice_xdp_ring_from_qid(vsi, in ice_map_xdp_rings()
2783 ice_tx_xsk_pool(vsi, q_idx); in ice_map_xdp_rings()
2795 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog, in ice_prepare_xdp_rings() argument
2799 struct ice_pf *pf = vsi->back; in ice_prepare_xdp_rings()
2804 .q_count = vsi->num_xdp_txq, in ice_prepare_xdp_rings()
2806 .vsi_map = vsi->txq_map, in ice_prepare_xdp_rings()
2807 .vsi_map_offset = vsi->alloc_txq, in ice_prepare_xdp_rings()
2814 vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq, in ice_prepare_xdp_rings()
2815 sizeof(*vsi->xdp_rings), GFP_KERNEL); in ice_prepare_xdp_rings()
2816 if (!vsi->xdp_rings) in ice_prepare_xdp_rings()
2819 vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode; in ice_prepare_xdp_rings()
2824 netdev_warn(vsi->netdev, in ice_prepare_xdp_rings()
2827 if (ice_xdp_alloc_setup_rings(vsi)) in ice_prepare_xdp_rings()
2837 ice_map_xdp_rings(vsi); in ice_prepare_xdp_rings()
2842 for (i = 0; i < vsi->tc_cfg.numtc; i++) in ice_prepare_xdp_rings()
2843 max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq; in ice_prepare_xdp_rings()
2845 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, in ice_prepare_xdp_rings()
2862 if (!ice_is_xdp_ena_vsi(vsi)) in ice_prepare_xdp_rings()
2863 ice_vsi_assign_bpf_prog(vsi, prog); in ice_prepare_xdp_rings()
2867 ice_for_each_xdp_txq(vsi, i) in ice_prepare_xdp_rings()
2868 if (vsi->xdp_rings[i]) { in ice_prepare_xdp_rings()
2869 kfree_rcu(vsi->xdp_rings[i], rcu); in ice_prepare_xdp_rings()
2870 vsi->xdp_rings[i] = NULL; in ice_prepare_xdp_rings()
2875 ice_for_each_xdp_txq(vsi, i) { in ice_prepare_xdp_rings()
2876 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); in ice_prepare_xdp_rings()
2877 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX; in ice_prepare_xdp_rings()
2881 devm_kfree(dev, vsi->xdp_rings); in ice_prepare_xdp_rings()
2893 int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type) in ice_destroy_xdp_rings() argument
2896 struct ice_pf *pf = vsi->back; in ice_destroy_xdp_rings()
2905 ice_for_each_q_vector(vsi, v_idx) { in ice_destroy_xdp_rings()
2906 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; in ice_destroy_xdp_rings()
2919 ice_for_each_xdp_txq(vsi, i) { in ice_destroy_xdp_rings()
2920 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); in ice_destroy_xdp_rings()
2921 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX; in ice_destroy_xdp_rings()
2925 ice_for_each_xdp_txq(vsi, i) in ice_destroy_xdp_rings()
2926 if (vsi->xdp_rings[i]) { in ice_destroy_xdp_rings()
2927 if (vsi->xdp_rings[i]->desc) { in ice_destroy_xdp_rings()
2929 ice_free_tx_ring(vsi->xdp_rings[i]); in ice_destroy_xdp_rings()
2931 kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu); in ice_destroy_xdp_rings()
2932 vsi->xdp_rings[i]->ring_stats = NULL; in ice_destroy_xdp_rings()
2933 kfree_rcu(vsi->xdp_rings[i], rcu); in ice_destroy_xdp_rings()
2934 vsi->xdp_rings[i] = NULL; in ice_destroy_xdp_rings()
2937 devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings); in ice_destroy_xdp_rings()
2938 vsi->xdp_rings = NULL; in ice_destroy_xdp_rings()
2946 ice_vsi_assign_bpf_prog(vsi, NULL); in ice_destroy_xdp_rings()
2951 for (i = 0; i < vsi->tc_cfg.numtc; i++) in ice_destroy_xdp_rings()
2952 max_txqs[i] = vsi->num_txq; in ice_destroy_xdp_rings()
2955 vsi->num_xdp_txq = 0; in ice_destroy_xdp_rings()
2957 return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, in ice_destroy_xdp_rings()
2965 static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi) in ice_vsi_rx_napi_schedule() argument
2969 ice_for_each_rxq(vsi, i) { in ice_vsi_rx_napi_schedule()
2970 struct ice_rx_ring *rx_ring = vsi->rx_rings[i]; in ice_vsi_rx_napi_schedule()
2984 int ice_vsi_determine_xdp_res(struct ice_vsi *vsi) in ice_vsi_determine_xdp_res() argument
2986 u16 avail = ice_get_avail_txq_count(vsi->back); in ice_vsi_determine_xdp_res()
2992 if (vsi->type == ICE_VSI_SF) in ice_vsi_determine_xdp_res()
2993 avail = vsi->alloc_txq; in ice_vsi_determine_xdp_res()
2995 vsi->num_xdp_txq = min_t(u16, avail, cpus); in ice_vsi_determine_xdp_res()
2997 if (vsi->num_xdp_txq < cpus) in ice_vsi_determine_xdp_res()
3007 static int ice_max_xdp_frame_size(struct ice_vsi *vsi) in ice_max_xdp_frame_size() argument
3009 if (test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) in ice_max_xdp_frame_size()
3022 ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, in ice_xdp_setup_prog() argument
3025 unsigned int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD; in ice_xdp_setup_prog()
3030 if (frame_size > ice_max_xdp_frame_size(vsi)) { in ice_xdp_setup_prog()
3038 if (ice_is_xdp_ena_vsi(vsi) == !!prog || in ice_xdp_setup_prog()
3039 test_bit(ICE_VSI_REBUILD_PENDING, vsi->state)) { in ice_xdp_setup_prog()
3040 ice_vsi_assign_bpf_prog(vsi, prog); in ice_xdp_setup_prog()
3044 if_running = netif_running(vsi->netdev) && in ice_xdp_setup_prog()
3045 !test_and_set_bit(ICE_VSI_DOWN, vsi->state); in ice_xdp_setup_prog()
3049 ret = ice_down(vsi); in ice_xdp_setup_prog()
3056 if (!ice_is_xdp_ena_vsi(vsi) && prog) { in ice_xdp_setup_prog()
3057 xdp_ring_err = ice_vsi_determine_xdp_res(vsi); in ice_xdp_setup_prog()
3061 xdp_ring_err = ice_prepare_xdp_rings(vsi, prog, in ice_xdp_setup_prog()
3066 xdp_features_set_redirect_target(vsi->netdev, true); in ice_xdp_setup_prog()
3068 xdp_ring_err = ice_realloc_zc_buf(vsi, true); in ice_xdp_setup_prog()
3071 } else if (ice_is_xdp_ena_vsi(vsi) && !prog) { in ice_xdp_setup_prog()
3072 xdp_features_clear_redirect_target(vsi->netdev); in ice_xdp_setup_prog()
3073 xdp_ring_err = ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_FULL); in ice_xdp_setup_prog()
3077 xdp_ring_err = ice_realloc_zc_buf(vsi, false); in ice_xdp_setup_prog()
3083 ret = ice_up(vsi); in ice_xdp_setup_prog()
3086 ice_vsi_rx_napi_schedule(vsi); in ice_xdp_setup_prog()
3113 struct ice_vsi *vsi = np->vsi; in ice_xdp() local
3116 if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_SF) { in ice_xdp()
3121 mutex_lock(&vsi->xdp_state_lock); in ice_xdp()
3125 ret = ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack); in ice_xdp()
3128 ret = ice_xsk_pool_setup(vsi, xdp->xsk.pool, xdp->xsk.queue_id); in ice_xdp()
3134 mutex_unlock(&vsi->xdp_state_lock); in ice_xdp()
3580 static void ice_set_ops(struct ice_vsi *vsi) in ice_set_ops() argument
3582 struct net_device *netdev = vsi->netdev; in ice_set_ops()
3596 if (vsi->type != ICE_VSI_PF) in ice_set_ops()
3795 struct ice_vsi *vsi = np->vsi; in ice_vlan_rx_add_vid() local
3803 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) in ice_vlan_rx_add_vid()
3809 if (vsi->current_netdev_flags & IFF_ALLMULTI) { in ice_vlan_rx_add_vid()
3810 ret = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_vlan_rx_add_vid()
3817 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); in ice_vlan_rx_add_vid()
3823 ret = vlan_ops->add_vlan(vsi, &vlan); in ice_vlan_rx_add_vid()
3831 if ((vsi->current_netdev_flags & IFF_ALLMULTI) && in ice_vlan_rx_add_vid()
3832 ice_vsi_num_non_zero_vlans(vsi) == 1) { in ice_vlan_rx_add_vid()
3833 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_vlan_rx_add_vid()
3835 ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_vlan_rx_add_vid()
3840 clear_bit(ICE_CFG_BUSY, vsi->state); in ice_vlan_rx_add_vid()
3857 struct ice_vsi *vsi = np->vsi; in ice_vlan_rx_kill_vid() local
3865 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) in ice_vlan_rx_kill_vid()
3868 ret = ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_vlan_rx_kill_vid()
3872 vsi->vsi_num); in ice_vlan_rx_kill_vid()
3873 vsi->current_netdev_flags |= IFF_ALLMULTI; in ice_vlan_rx_kill_vid()
3876 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); in ice_vlan_rx_kill_vid()
3882 ret = vlan_ops->del_vlan(vsi, &vlan); in ice_vlan_rx_kill_vid()
3889 if (vsi->current_netdev_flags & IFF_ALLMULTI) in ice_vlan_rx_kill_vid()
3890 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_vlan_rx_kill_vid()
3893 if (!ice_vsi_has_non_zero_vlans(vsi)) { in ice_vlan_rx_kill_vid()
3898 if (vsi->current_netdev_flags & IFF_ALLMULTI) { in ice_vlan_rx_kill_vid()
3899 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_vlan_rx_kill_vid()
3902 ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_vlan_rx_kill_vid()
3908 clear_bit(ICE_CFG_BUSY, vsi->state); in ice_vlan_rx_kill_vid()
3929 static void ice_tc_indir_block_unregister(struct ice_vsi *vsi) in ice_tc_indir_block_unregister() argument
3931 struct ice_netdev_priv *np = netdev_priv(vsi->netdev); in ice_tc_indir_block_unregister()
3943 static int ice_tc_indir_block_register(struct ice_vsi *vsi) in ice_tc_indir_block_register() argument
3947 if (!vsi || !vsi->netdev) in ice_tc_indir_block_register()
3950 np = netdev_priv(vsi->netdev); in ice_tc_indir_block_register()
4159 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked) in ice_vsi_recfg_qs() argument
4161 struct ice_pf *pf = vsi->back; in ice_vsi_recfg_qs()
4175 vsi->req_txq = (u16)new_tx; in ice_vsi_recfg_qs()
4177 vsi->req_rxq = (u16)new_rx; in ice_vsi_recfg_qs()
4180 if (!netif_running(vsi->netdev)) { in ice_vsi_recfg_qs()
4181 err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); in ice_vsi_recfg_qs()
4188 ice_vsi_close(vsi); in ice_vsi_recfg_qs()
4189 err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); in ice_vsi_recfg_qs()
4194 if (vsi->tc_cfg.ena_tc & BIT(i)) in ice_vsi_recfg_qs()
4195 netdev_set_tc_queue(vsi->netdev, in ice_vsi_recfg_qs()
4196 vsi->tc_cfg.tc_info[i].netdev_tc, in ice_vsi_recfg_qs()
4197 vsi->tc_cfg.tc_info[i].qcount_tx, in ice_vsi_recfg_qs()
4198 vsi->tc_cfg.tc_info[i].qoffset); in ice_vsi_recfg_qs()
4201 ice_vsi_open(vsi); in ice_vsi_recfg_qs()
4221 struct ice_vsi *vsi = ice_get_main_vsi(pf); in ice_set_safe_mode_vlan_cfg() local
4226 if (!vsi) in ice_set_safe_mode_vlan_cfg()
4234 ctxt->info = vsi->info; in ice_set_safe_mode_vlan_cfg()
4252 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); in ice_set_safe_mode_vlan_cfg()
4254 dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n", in ice_set_safe_mode_vlan_cfg()
4257 vsi->info.sec_flags = ctxt->info.sec_flags; in ice_set_safe_mode_vlan_cfg()
4258 vsi->info.sw_flags2 = ctxt->info.sw_flags2; in ice_set_safe_mode_vlan_cfg()
4259 vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags; in ice_set_safe_mode_vlan_cfg()
4461 pf->vsi[pf->ctrl_vsi_idx] = NULL; in ice_init_fdir()
4469 struct ice_vsi *vsi = ice_get_ctrl_vsi(pf); in ice_deinit_fdir() local
4471 if (!vsi) in ice_deinit_fdir()
4474 ice_vsi_manage_fdir(vsi, false); in ice_deinit_fdir()
4475 ice_vsi_release(vsi); in ice_deinit_fdir()
4477 pf->vsi[pf->ctrl_vsi_idx] = NULL; in ice_deinit_fdir()
4692 static int ice_register_netdev(struct ice_vsi *vsi) in ice_register_netdev() argument
4696 if (!vsi || !vsi->netdev) in ice_register_netdev()
4699 err = register_netdev(vsi->netdev); in ice_register_netdev()
4703 set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); in ice_register_netdev()
4704 netif_carrier_off(vsi->netdev); in ice_register_netdev()
4705 netif_tx_stop_all_queues(vsi->netdev); in ice_register_netdev()
4710 static void ice_unregister_netdev(struct ice_vsi *vsi) in ice_unregister_netdev() argument
4712 if (!vsi || !vsi->netdev) in ice_unregister_netdev()
4715 unregister_netdev(vsi->netdev); in ice_unregister_netdev()
4716 clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); in ice_unregister_netdev()
4725 static int ice_cfg_netdev(struct ice_vsi *vsi) in ice_cfg_netdev() argument
4731 netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq, in ice_cfg_netdev()
4732 vsi->alloc_rxq); in ice_cfg_netdev()
4736 set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); in ice_cfg_netdev()
4737 vsi->netdev = netdev; in ice_cfg_netdev()
4739 np->vsi = vsi; in ice_cfg_netdev()
4742 ice_set_ops(vsi); in ice_cfg_netdev()
4744 if (vsi->type == ICE_VSI_PF) { in ice_cfg_netdev()
4745 SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back)); in ice_cfg_netdev()
4746 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); in ice_cfg_netdev()
4753 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc); in ice_cfg_netdev()
4760 static void ice_decfg_netdev(struct ice_vsi *vsi) in ice_decfg_netdev() argument
4762 clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); in ice_decfg_netdev()
4763 free_netdev(vsi->netdev); in ice_decfg_netdev()
4764 vsi->netdev = NULL; in ice_decfg_netdev()
4957 struct ice_vsi *vsi = ice_get_main_vsi(pf); in ice_init_link() local
4959 if (vsi) in ice_init_link()
4960 ice_configure_phy(vsi); in ice_init_link()
4972 struct ice_vsi *vsi; in ice_init_pf_sw() local
4994 vsi = ice_pf_vsi_setup(pf, pf->hw.port_info); in ice_init_pf_sw()
4995 if (!vsi) { in ice_init_pf_sw()
5010 struct ice_vsi *vsi = ice_get_main_vsi(pf); in ice_deinit_pf_sw() local
5012 if (!vsi) in ice_deinit_pf_sw()
5015 ice_vsi_release(vsi); in ice_deinit_pf_sw()
5034 pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi), in ice_alloc_vsis()
5036 if (!pf->vsi) in ice_alloc_vsis()
5042 devm_kfree(dev, pf->vsi); in ice_alloc_vsis()
5055 devm_kfree(ice_pf_to_dev(pf), pf->vsi); in ice_dealloc_vsis()
5056 pf->vsi = NULL; in ice_dealloc_vsis()
5152 struct ice_vsi *vsi; in ice_load() local
5157 vsi = ice_get_main_vsi(pf); in ice_load()
5160 INIT_LIST_HEAD(&vsi->ch_list); in ice_load()
5162 err = ice_cfg_netdev(vsi); in ice_load()
5167 ice_dcbnl_setup(vsi); in ice_load()
5177 SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port); in ice_load()
5179 err = ice_register_netdev(vsi); in ice_load()
5183 err = ice_tc_indir_block_register(vsi); in ice_load()
5187 ice_napi_add(vsi); in ice_load()
5201 ice_tc_indir_block_unregister(vsi); in ice_load()
5203 ice_unregister_netdev(vsi); in ice_load()
5208 ice_decfg_netdev(vsi); in ice_load()
5220 struct ice_vsi *vsi = ice_get_main_vsi(pf); in ice_unload() local
5226 ice_tc_indir_block_unregister(vsi); in ice_unload()
5227 ice_unregister_netdev(vsi); in ice_unload()
5229 ice_decfg_netdev(vsi); in ice_unload()
5431 struct ice_vsi *vsi; in ice_setup_mc_magic_wake() local
5438 vsi = ice_get_main_vsi(pf); in ice_setup_mc_magic_wake()
5439 if (!vsi) in ice_setup_mc_magic_wake()
5443 if (vsi->netdev) in ice_setup_mc_magic_wake()
5444 ether_addr_copy(mac_addr, vsi->netdev->dev_addr); in ice_setup_mc_magic_wake()
5446 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); in ice_setup_mc_magic_wake()
5548 if (pf->vsi[v]) in ice_prepare_for_shutdown()
5549 pf->vsi[v]->vsi_num = 0; in ice_prepare_for_shutdown()
5581 if (!pf->vsi[v]) in ice_reinit_interrupt_scheme()
5584 ret = ice_vsi_alloc_q_vectors(pf->vsi[v]); in ice_reinit_interrupt_scheme()
5587 ice_vsi_map_rings_to_vectors(pf->vsi[v]); in ice_reinit_interrupt_scheme()
5589 ice_vsi_set_napi_queues(pf->vsi[v]); in ice_reinit_interrupt_scheme()
5604 if (pf->vsi[v]) { in ice_reinit_interrupt_scheme()
5606 ice_vsi_clear_napi_queues(pf->vsi[v]); in ice_reinit_interrupt_scheme()
5608 ice_vsi_free_q_vectors(pf->vsi[v]); in ice_reinit_interrupt_scheme()
5672 if (!pf->vsi[v]) in ice_suspend()
5675 ice_vsi_clear_napi_queues(pf->vsi[v]); in ice_suspend()
5677 ice_vsi_free_q_vectors(pf->vsi[v]); in ice_suspend()
6030 struct ice_vsi *vsi = np->vsi; in ice_set_mac_address() local
6031 struct ice_pf *pf = vsi->back; in ice_set_mac_address()
6064 err = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI); in ice_set_mac_address()
6071 err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI); in ice_set_mac_address()
6096 netdev_dbg(vsi->netdev, "updated MAC address to %pM\n", in ice_set_mac_address()
6116 struct ice_vsi *vsi = np->vsi; in ice_set_rx_mode() local
6118 if (!vsi || ice_is_switchdev_running(vsi->back)) in ice_set_rx_mode()
6125 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); in ice_set_rx_mode()
6126 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); in ice_set_rx_mode()
6127 set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags); in ice_set_rx_mode()
6132 ice_service_task_schedule(vsi->back); in ice_set_rx_mode()
6145 struct ice_vsi *vsi = np->vsi; in ice_set_tx_maxrate() local
6157 q_handle = vsi->tx_rings[queue_index]->q_handle; in ice_set_tx_maxrate()
6158 tc = ice_dcb_get_tc(vsi, queue_index); in ice_set_tx_maxrate()
6160 vsi = ice_locate_vsi_using_queue(vsi, queue_index); in ice_set_tx_maxrate()
6161 if (!vsi) { in ice_set_tx_maxrate()
6169 status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc, in ice_set_tx_maxrate()
6172 status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc, in ice_set_tx_maxrate()
6320 if (ice_is_dvm_ena(&np->vsi->back->hw)) { in ice_fix_features()
6353 !ice_vsi_has_non_zero_vlans(np->vsi)) { in ice_fix_features()
6370 ice_set_rx_rings_vlan_proto(struct ice_vsi *vsi, __be16 vlan_ethertype) in ice_set_rx_rings_vlan_proto() argument
6374 ice_for_each_alloc_rxq(vsi, i) in ice_set_rx_rings_vlan_proto()
6375 vsi->rx_rings[i]->pkt_ctx.vlan_proto = vlan_ethertype; in ice_set_rx_rings_vlan_proto()
6388 ice_set_vlan_offload_features(struct ice_vsi *vsi, netdev_features_t features) in ice_set_vlan_offload_features() argument
6395 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); in ice_set_vlan_offload_features()
6408 strip_err = vlan_ops->ena_stripping(vsi, vlan_ethertype); in ice_set_vlan_offload_features()
6410 strip_err = vlan_ops->dis_stripping(vsi); in ice_set_vlan_offload_features()
6413 insert_err = vlan_ops->ena_insertion(vsi, vlan_ethertype); in ice_set_vlan_offload_features()
6415 insert_err = vlan_ops->dis_insertion(vsi); in ice_set_vlan_offload_features()
6420 ice_set_rx_rings_vlan_proto(vsi, enable_stripping ? in ice_set_vlan_offload_features()
6435 ice_set_vlan_filtering_features(struct ice_vsi *vsi, netdev_features_t features) in ice_set_vlan_filtering_features() argument
6437 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); in ice_set_vlan_filtering_features()
6446 !ice_is_eswitch_mode_switchdev(vsi->back)) in ice_set_vlan_filtering_features()
6447 err = vlan_ops->ena_rx_filtering(vsi); in ice_set_vlan_filtering_features()
6449 err = vlan_ops->dis_rx_filtering(vsi); in ice_set_vlan_filtering_features()
6467 struct ice_vsi *vsi = np->vsi; in ice_set_vlan_features() local
6475 dev_err(ice_pf_to_dev(vsi->back), in ice_set_vlan_features()
6480 err = ice_set_vlan_offload_features(vsi, features); in ice_set_vlan_features()
6489 err = ice_set_vlan_filtering_features(vsi, features); in ice_set_vlan_features()
6502 static int ice_set_loopback(struct ice_vsi *vsi, bool ena) in ice_set_loopback() argument
6504 bool if_running = netif_running(vsi->netdev); in ice_set_loopback()
6507 if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) { in ice_set_loopback()
6508 ret = ice_down(vsi); in ice_set_loopback()
6510 netdev_err(vsi->netdev, "Preparing device to toggle loopback failed\n"); in ice_set_loopback()
6514 ret = ice_aq_set_mac_loopback(&vsi->back->hw, ena, NULL); in ice_set_loopback()
6516 netdev_err(vsi->netdev, "Failed to toggle loopback state\n"); in ice_set_loopback()
6518 ret = ice_up(vsi); in ice_set_loopback()
6533 struct ice_vsi *vsi = np->vsi; in ice_set_features() local
6534 struct ice_pf *pf = vsi->back; in ice_set_features()
6555 ice_vsi_manage_rss_lut(vsi, !!(features & NETIF_F_RXHASH)); in ice_set_features()
6567 dev_err(ice_pf_to_dev(vsi->back), in ice_set_features()
6572 ice_vsi_cfg_crc_strip(vsi, !!(features & NETIF_F_RXFCS)); in ice_set_features()
6573 ret = ice_down_up(vsi); in ice_set_features()
6581 ice_vsi_manage_fdir(vsi, ena); in ice_set_features()
6582 ena ? ice_init_arfs(vsi) : ice_clear_arfs(vsi); in ice_set_features()
6598 ret = ice_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK)); in ice_set_features()
6607 static int ice_vsi_vlan_setup(struct ice_vsi *vsi) in ice_vsi_vlan_setup() argument
6611 err = ice_set_vlan_offload_features(vsi, vsi->netdev->features); in ice_vsi_vlan_setup()
6615 err = ice_set_vlan_filtering_features(vsi, vsi->netdev->features); in ice_vsi_vlan_setup()
6619 return ice_vsi_add_vlan_zero(vsi); in ice_vsi_vlan_setup()
6628 int ice_vsi_cfg_lan(struct ice_vsi *vsi) in ice_vsi_cfg_lan() argument
6632 if (vsi->netdev && vsi->type == ICE_VSI_PF) { in ice_vsi_cfg_lan()
6633 ice_set_rx_mode(vsi->netdev); in ice_vsi_cfg_lan()
6635 err = ice_vsi_vlan_setup(vsi); in ice_vsi_cfg_lan()
6639 ice_vsi_cfg_dcb_rings(vsi); in ice_vsi_cfg_lan()
6641 err = ice_vsi_cfg_lan_txqs(vsi); in ice_vsi_cfg_lan()
6642 if (!err && ice_is_xdp_ena_vsi(vsi)) in ice_vsi_cfg_lan()
6643 err = ice_vsi_cfg_xdp_txqs(vsi); in ice_vsi_cfg_lan()
6645 err = ice_vsi_cfg_rxqs(vsi); in ice_vsi_cfg_lan()
6774 static void ice_napi_enable_all(struct ice_vsi *vsi) in ice_napi_enable_all() argument
6778 if (!vsi->netdev) in ice_napi_enable_all()
6781 ice_for_each_q_vector(vsi, q_idx) { in ice_napi_enable_all()
6782 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; in ice_napi_enable_all()
6797 static int ice_up_complete(struct ice_vsi *vsi) in ice_up_complete() argument
6799 struct ice_pf *pf = vsi->back; in ice_up_complete()
6802 ice_vsi_cfg_msix(vsi); in ice_up_complete()
6808 err = ice_vsi_start_all_rx_rings(vsi); in ice_up_complete()
6812 clear_bit(ICE_VSI_DOWN, vsi->state); in ice_up_complete()
6813 ice_napi_enable_all(vsi); in ice_up_complete()
6814 ice_vsi_ena_irq(vsi); in ice_up_complete()
6816 if (vsi->port_info && in ice_up_complete()
6817 (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) && in ice_up_complete()
6818 ((vsi->netdev && (vsi->type == ICE_VSI_PF || in ice_up_complete()
6819 vsi->type == ICE_VSI_SF)))) { in ice_up_complete()
6820 ice_print_link_msg(vsi, true); in ice_up_complete()
6821 netif_tx_start_all_queues(vsi->netdev); in ice_up_complete()
6822 netif_carrier_on(vsi->netdev); in ice_up_complete()
6829 ice_update_eth_stats(vsi); in ice_up_complete()
6831 if (vsi->type == ICE_VSI_PF) in ice_up_complete()
6841 int ice_up(struct ice_vsi *vsi) in ice_up() argument
6845 err = ice_vsi_cfg_lan(vsi); in ice_up()
6847 err = ice_up_complete(vsi); in ice_up()
6883 ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, in ice_update_vsi_tx_ring_stats() argument
6901 vsi->tx_restart += ring->ring_stats->tx_stats.restart_q; in ice_update_vsi_tx_ring_stats()
6902 vsi->tx_busy += ring->ring_stats->tx_stats.tx_busy; in ice_update_vsi_tx_ring_stats()
6903 vsi->tx_linearize += ring->ring_stats->tx_stats.tx_linearize; in ice_update_vsi_tx_ring_stats()
6911 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) in ice_update_vsi_ring_stats() argument
6915 struct ice_pf *pf = vsi->back; in ice_update_vsi_ring_stats()
6924 vsi->tx_restart = 0; in ice_update_vsi_ring_stats()
6925 vsi->tx_busy = 0; in ice_update_vsi_ring_stats()
6926 vsi->tx_linearize = 0; in ice_update_vsi_ring_stats()
6927 vsi->rx_buf_failed = 0; in ice_update_vsi_ring_stats()
6928 vsi->rx_page_failed = 0; in ice_update_vsi_ring_stats()
6933 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings, in ice_update_vsi_ring_stats()
6934 vsi->num_txq); in ice_update_vsi_ring_stats()
6937 ice_for_each_rxq(vsi, i) { in ice_update_vsi_ring_stats()
6938 struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]); in ice_update_vsi_ring_stats()
6947 vsi->rx_buf_failed += ring_stats->rx_stats.alloc_buf_failed; in ice_update_vsi_ring_stats()
6948 vsi->rx_page_failed += ring_stats->rx_stats.alloc_page_failed; in ice_update_vsi_ring_stats()
6952 if (ice_is_xdp_ena_vsi(vsi)) in ice_update_vsi_ring_stats()
6953 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings, in ice_update_vsi_ring_stats()
6954 vsi->num_xdp_txq); in ice_update_vsi_ring_stats()
6958 net_stats = &vsi->net_stats; in ice_update_vsi_ring_stats()
6959 stats_prev = &vsi->net_stats_prev; in ice_update_vsi_ring_stats()
6985 void ice_update_vsi_stats(struct ice_vsi *vsi) in ice_update_vsi_stats() argument
6987 struct rtnl_link_stats64 *cur_ns = &vsi->net_stats; in ice_update_vsi_stats()
6988 struct ice_eth_stats *cur_es = &vsi->eth_stats; in ice_update_vsi_stats()
6989 struct ice_pf *pf = vsi->back; in ice_update_vsi_stats()
6991 if (test_bit(ICE_VSI_DOWN, vsi->state) || in ice_update_vsi_stats()
6996 ice_update_vsi_ring_stats(vsi); in ice_update_vsi_stats()
6999 ice_update_eth_stats(vsi); in ice_update_vsi_stats()
7007 if (vsi->type == ICE_VSI_PF) { in ice_update_vsi_stats()
7181 struct ice_vsi *vsi = np->vsi; in ice_get_stats64() local
7183 vsi_stats = &vsi->net_stats; in ice_get_stats64()
7185 if (!vsi->num_txq || !vsi->num_rxq) in ice_get_stats64()
7193 if (!test_bit(ICE_VSI_DOWN, vsi->state)) in ice_get_stats64()
7194 ice_update_vsi_ring_stats(vsi); in ice_get_stats64()
7217 static void ice_napi_disable_all(struct ice_vsi *vsi) in ice_napi_disable_all() argument
7221 if (!vsi->netdev) in ice_napi_disable_all()
7224 ice_for_each_q_vector(vsi, q_idx) { in ice_napi_disable_all()
7225 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; in ice_napi_disable_all()
7239 static void ice_vsi_dis_irq(struct ice_vsi *vsi) in ice_vsi_dis_irq() argument
7241 struct ice_pf *pf = vsi->back; in ice_vsi_dis_irq()
7249 if (vsi->rx_rings) { in ice_vsi_dis_irq()
7250 ice_for_each_rxq(vsi, i) { in ice_vsi_dis_irq()
7251 if (vsi->rx_rings[i]) { in ice_vsi_dis_irq()
7254 reg = vsi->rx_rings[i]->reg_idx; in ice_vsi_dis_irq()
7263 ice_for_each_q_vector(vsi, i) { in ice_vsi_dis_irq()
7264 if (!vsi->q_vectors[i]) in ice_vsi_dis_irq()
7266 wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0); in ice_vsi_dis_irq()
7272 if (vsi->type == ICE_VSI_VF) in ice_vsi_dis_irq()
7275 ice_for_each_q_vector(vsi, i) in ice_vsi_dis_irq()
7276 synchronize_irq(vsi->q_vectors[i]->irq.virq); in ice_vsi_dis_irq()
7285 int ice_down(struct ice_vsi *vsi) in ice_down() argument
7289 WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state)); in ice_down()
7291 if (vsi->netdev) { in ice_down()
7292 vlan_err = ice_vsi_del_vlan_zero(vsi); in ice_down()
7293 ice_ptp_link_change(vsi->back, false); in ice_down()
7294 netif_carrier_off(vsi->netdev); in ice_down()
7295 netif_tx_disable(vsi->netdev); in ice_down()
7298 ice_vsi_dis_irq(vsi); in ice_down()
7300 tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0); in ice_down()
7302 netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n", in ice_down()
7303 vsi->vsi_num, tx_err); in ice_down()
7304 if (!tx_err && vsi->xdp_rings) { in ice_down()
7305 tx_err = ice_vsi_stop_xdp_tx_rings(vsi); in ice_down()
7307 netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n", in ice_down()
7308 vsi->vsi_num, tx_err); in ice_down()
7311 rx_err = ice_vsi_stop_all_rx_rings(vsi); in ice_down()
7313 netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n", in ice_down()
7314 vsi->vsi_num, rx_err); in ice_down()
7316 ice_napi_disable_all(vsi); in ice_down()
7318 ice_for_each_txq(vsi, i) in ice_down()
7319 ice_clean_tx_ring(vsi->tx_rings[i]); in ice_down()
7321 if (vsi->xdp_rings) in ice_down()
7322 ice_for_each_xdp_txq(vsi, i) in ice_down()
7323 ice_clean_tx_ring(vsi->xdp_rings[i]); in ice_down()
7325 ice_for_each_rxq(vsi, i) in ice_down()
7326 ice_clean_rx_ring(vsi->rx_rings[i]); in ice_down()
7329 netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n", in ice_down()
7330 vsi->vsi_num, vsi->vsw->sw_id); in ice_down()
7341 int ice_down_up(struct ice_vsi *vsi) in ice_down_up() argument
7346 if (test_and_set_bit(ICE_VSI_DOWN, vsi->state)) in ice_down_up()
7349 ret = ice_down(vsi); in ice_down_up()
7353 ret = ice_up(vsi); in ice_down_up()
7355 …netdev_err(vsi->netdev, "reallocating resources failed during netdev features change, may need to … in ice_down_up()
7368 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) in ice_vsi_setup_tx_rings() argument
7372 if (!vsi->num_txq) { in ice_vsi_setup_tx_rings()
7373 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n", in ice_vsi_setup_tx_rings()
7374 vsi->vsi_num); in ice_vsi_setup_tx_rings()
7378 ice_for_each_txq(vsi, i) { in ice_vsi_setup_tx_rings()
7379 struct ice_tx_ring *ring = vsi->tx_rings[i]; in ice_vsi_setup_tx_rings()
7384 if (vsi->netdev) in ice_vsi_setup_tx_rings()
7385 ring->netdev = vsi->netdev; in ice_vsi_setup_tx_rings()
7400 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) in ice_vsi_setup_rx_rings() argument
7404 if (!vsi->num_rxq) { in ice_vsi_setup_rx_rings()
7405 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n", in ice_vsi_setup_rx_rings()
7406 vsi->vsi_num); in ice_vsi_setup_rx_rings()
7410 ice_for_each_rxq(vsi, i) { in ice_vsi_setup_rx_rings()
7411 struct ice_rx_ring *ring = vsi->rx_rings[i]; in ice_vsi_setup_rx_rings()
7416 if (vsi->netdev) in ice_vsi_setup_rx_rings()
7417 ring->netdev = vsi->netdev; in ice_vsi_setup_rx_rings()
7434 int ice_vsi_open_ctrl(struct ice_vsi *vsi) in ice_vsi_open_ctrl() argument
7437 struct ice_pf *pf = vsi->back; in ice_vsi_open_ctrl()
7443 err = ice_vsi_setup_tx_rings(vsi); in ice_vsi_open_ctrl()
7447 err = ice_vsi_setup_rx_rings(vsi); in ice_vsi_open_ctrl()
7451 err = ice_vsi_cfg_lan(vsi); in ice_vsi_open_ctrl()
7457 err = ice_vsi_req_irq_msix(vsi, int_name); in ice_vsi_open_ctrl()
7461 ice_vsi_cfg_msix(vsi); in ice_vsi_open_ctrl()
7463 err = ice_vsi_start_all_rx_rings(vsi); in ice_vsi_open_ctrl()
7467 clear_bit(ICE_VSI_DOWN, vsi->state); in ice_vsi_open_ctrl()
7468 ice_vsi_ena_irq(vsi); in ice_vsi_open_ctrl()
7473 ice_down(vsi); in ice_vsi_open_ctrl()
7475 ice_vsi_free_rx_rings(vsi); in ice_vsi_open_ctrl()
7477 ice_vsi_free_tx_rings(vsi); in ice_vsi_open_ctrl()
7490 int ice_vsi_open(struct ice_vsi *vsi) in ice_vsi_open() argument
7493 struct ice_pf *pf = vsi->back; in ice_vsi_open()
7497 err = ice_vsi_setup_tx_rings(vsi); in ice_vsi_open()
7501 err = ice_vsi_setup_rx_rings(vsi); in ice_vsi_open()
7505 err = ice_vsi_cfg_lan(vsi); in ice_vsi_open()
7510 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name); in ice_vsi_open()
7511 err = ice_vsi_req_irq_msix(vsi, int_name); in ice_vsi_open()
7515 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc); in ice_vsi_open()
7517 if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_SF) { in ice_vsi_open()
7519 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq); in ice_vsi_open()
7523 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq); in ice_vsi_open()
7527 ice_vsi_set_napi_queues(vsi); in ice_vsi_open()
7530 err = ice_up_complete(vsi); in ice_vsi_open()
7537 ice_down(vsi); in ice_vsi_open()
7539 ice_vsi_free_irq(vsi); in ice_vsi_open()
7541 ice_vsi_free_rx_rings(vsi); in ice_vsi_open()
7543 ice_vsi_free_tx_rings(vsi); in ice_vsi_open()
7556 if (!pf->vsi) in ice_vsi_release_all()
7560 if (!pf->vsi[i]) in ice_vsi_release_all()
7563 if (pf->vsi[i]->type == ICE_VSI_CHNL) in ice_vsi_release_all()
7566 err = ice_vsi_release(pf->vsi[i]); in ice_vsi_release_all()
7569 i, err, pf->vsi[i]->vsi_num); in ice_vsi_release_all()
7586 struct ice_vsi *vsi = pf->vsi[i]; in ice_vsi_rebuild_by_type() local
7588 if (!vsi || vsi->type != type) in ice_vsi_rebuild_by_type()
7592 err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT); in ice_vsi_rebuild_by_type()
7595 err, vsi->idx, ice_vsi_type_str(type)); in ice_vsi_rebuild_by_type()
7600 err = ice_replay_vsi(&pf->hw, vsi->idx); in ice_vsi_rebuild_by_type()
7603 err, vsi->idx, ice_vsi_type_str(type)); in ice_vsi_rebuild_by_type()
7610 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); in ice_vsi_rebuild_by_type()
7613 err = ice_ena_vsi(vsi, false); in ice_vsi_rebuild_by_type()
7616 err, vsi->idx, ice_vsi_type_str(type)); in ice_vsi_rebuild_by_type()
7620 dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx, in ice_vsi_rebuild_by_type()
7637 struct ice_vsi *vsi = pf->vsi[i]; in ice_update_pf_netdev_link() local
7639 if (!vsi || vsi->type != ICE_VSI_PF) in ice_update_pf_netdev_link()
7642 ice_get_link_status(pf->vsi[i]->port_info, &link_up); in ice_update_pf_netdev_link()
7644 netif_carrier_on(pf->vsi[i]->netdev); in ice_update_pf_netdev_link()
7645 netif_tx_wake_all_queues(pf->vsi[i]->netdev); in ice_update_pf_netdev_link()
7647 netif_carrier_off(pf->vsi[i]->netdev); in ice_update_pf_netdev_link()
7648 netif_tx_stop_all_queues(pf->vsi[i]->netdev); in ice_update_pf_netdev_link()
7665 struct ice_vsi *vsi = ice_get_main_vsi(pf); in ice_rebuild() local
7808 if (vsi && vsi->netdev) in ice_rebuild()
7809 netif_device_attach(vsi->netdev); in ice_rebuild()
7858 struct ice_vsi *vsi = np->vsi; in ice_change_mtu() local
7859 struct ice_pf *pf = vsi->back; in ice_change_mtu()
7869 prog = vsi->xdp_prog; in ice_change_mtu()
7871 int frame_size = ice_max_xdp_frame_size(vsi); in ice_change_mtu()
7903 err = ice_down_up(vsi); in ice_change_mtu()
7922 struct ice_pf *pf = np->vsi->back; in ice_eth_ioctl()
7984 int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) in ice_set_rss_lut() argument
7987 struct ice_hw *hw = &vsi->back->hw; in ice_set_rss_lut()
7993 params.vsi_handle = vsi->idx; in ice_set_rss_lut()
7995 params.lut_type = vsi->rss_lut_type; in ice_set_rss_lut()
8000 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n", in ice_set_rss_lut()
8013 int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed) in ice_set_rss_key() argument
8015 struct ice_hw *hw = &vsi->back->hw; in ice_set_rss_key()
8021 status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed); in ice_set_rss_key()
8023 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n", in ice_set_rss_key()
8037 int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) in ice_get_rss_lut() argument
8040 struct ice_hw *hw = &vsi->back->hw; in ice_get_rss_lut()
8046 params.vsi_handle = vsi->idx; in ice_get_rss_lut()
8048 params.lut_type = vsi->rss_lut_type; in ice_get_rss_lut()
8053 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n", in ice_get_rss_lut()
8066 int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed) in ice_get_rss_key() argument
8068 struct ice_hw *hw = &vsi->back->hw; in ice_get_rss_key()
8074 status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed); in ice_get_rss_key()
8076 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n", in ice_get_rss_key()
8089 int ice_set_rss_hfunc(struct ice_vsi *vsi, u8 hfunc) in ice_set_rss_hfunc() argument
8091 struct ice_hw *hw = &vsi->back->hw; in ice_set_rss_hfunc()
8096 if (hfunc == vsi->rss_hfunc) in ice_set_rss_hfunc()
8108 ctx->info.q_opt_rss = vsi->info.q_opt_rss; in ice_set_rss_hfunc()
8112 ctx->info.q_opt_tc = vsi->info.q_opt_tc; in ice_set_rss_hfunc()
8113 ctx->info.q_opt_flags = vsi->info.q_opt_rss; in ice_set_rss_hfunc()
8115 err = ice_update_vsi(hw, vsi->idx, ctx, NULL); in ice_set_rss_hfunc()
8117 dev_err(ice_pf_to_dev(vsi->back), "Failed to configure RSS hash for VSI %d, error %d\n", in ice_set_rss_hfunc()
8118 vsi->vsi_num, err); in ice_set_rss_hfunc()
8120 vsi->info.q_opt_rss = ctx->info.q_opt_rss; in ice_set_rss_hfunc()
8121 vsi->rss_hfunc = hfunc; in ice_set_rss_hfunc()
8122 netdev_info(vsi->netdev, "Hash function set to: %sToeplitz\n", in ice_set_rss_hfunc()
8132 return ice_set_rss_cfg_symm(hw, vsi, symm); in ice_set_rss_hfunc()
8151 struct ice_vsi *vsi = np->vsi; in ice_bridge_getlink() local
8152 struct ice_pf *pf = vsi->back; in ice_bridge_getlink()
8168 static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) in ice_vsi_update_bridge_mode() argument
8171 struct ice_hw *hw = &vsi->back->hw; in ice_vsi_update_bridge_mode()
8175 vsi_props = &vsi->info; in ice_vsi_update_bridge_mode()
8181 ctxt->info = vsi->info; in ice_vsi_update_bridge_mode()
8191 ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL); in ice_vsi_update_bridge_mode()
8193 …dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\… in ice_vsi_update_bridge_mode()
8223 struct ice_pf *pf = np->vsi->back; in ice_bridge_setlink()
8247 if (!pf->vsi[v]) in ice_bridge_setlink()
8249 err = ice_vsi_update_bridge_mode(pf->vsi[v], mode); in ice_bridge_setlink()
8283 struct ice_vsi *vsi = np->vsi; in ice_tx_timeout() local
8284 struct ice_pf *pf = vsi->back; in ice_tx_timeout()
8300 ice_for_each_txq(vsi, i) in ice_tx_timeout()
8301 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) in ice_tx_timeout()
8302 if (txqueue == vsi->tx_rings[i]->q_index) { in ice_tx_timeout()
8303 tx_ring = vsi->tx_rings[i]; in ice_tx_timeout()
8321 rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue]))); in ice_tx_timeout()
8326 vsi->vsi_num, txqueue, tx_ring->next_to_clean, in ice_tx_timeout()
8329 ice_prep_tx_hang_report(pf, tx_ring, vsi->vsi_num, head, intr); in ice_tx_timeout()
8349 set_bit(ICE_VSI_NEEDS_RESTART, vsi->state); in ice_tx_timeout()
8369 struct ice_vsi *vsi = np->vsi; in ice_setup_tc_cls_flower() local
8376 return ice_add_cls_flower(filter_dev, vsi, cls_flower); in ice_setup_tc_cls_flower()
8378 return ice_del_cls_flower(vsi, cls_flower); in ice_setup_tc_cls_flower()
8397 return ice_setup_tc_cls_flower(np, np->vsi->netdev, in ice_setup_tc_block_cb()
8414 ice_validate_mqprio_qopt(struct ice_vsi *vsi, in ice_validate_mqprio_qopt() argument
8418 struct ice_pf *pf = vsi->back; in ice_validate_mqprio_qopt()
8425 if (vsi->type != ICE_VSI_PF) in ice_validate_mqprio_qopt()
8434 vsi->ch_rss_size = 0; in ice_validate_mqprio_qopt()
8436 speed = ice_get_link_speed_kbps(vsi); in ice_validate_mqprio_qopt()
8525 if (vsi->num_rxq < in ice_validate_mqprio_qopt()
8528 if (vsi->num_txq < in ice_validate_mqprio_qopt()
8539 vsi->ch_rss_size = max_rss_q_cnt; in ice_validate_mqprio_qopt()
8549 static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi) in ice_add_vsi_to_fdir() argument
8556 if (!(vsi->num_gfltr || vsi->num_bfltr)) in ice_add_vsi_to_fdir()
8577 prof->vsi_h[0], vsi->idx, in ice_add_vsi_to_fdir()
8582 vsi->idx, flow); in ice_add_vsi_to_fdir()
8590 prof->vsi_h[prof->cnt] = vsi->idx; in ice_add_vsi_to_fdir()
8594 dev_dbg(dev, "VSI idx %d added to fdir group %d\n", vsi->idx, in ice_add_vsi_to_fdir()
8599 dev_dbg(dev, "VSI idx %d not added to fdir groups\n", vsi->idx); in ice_add_vsi_to_fdir()
8615 struct ice_vsi *vsi; in ice_add_channel() local
8622 vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch); in ice_add_channel()
8623 if (!vsi || vsi->type != ICE_VSI_CHNL) { in ice_add_channel()
8628 ice_add_vsi_to_fdir(pf, vsi); in ice_add_channel()
8631 ch->vsi_num = vsi->vsi_num; in ice_add_channel()
8632 ch->info.mapping_flags = vsi->info.mapping_flags; in ice_add_channel()
8633 ch->ch_vsi = vsi; in ice_add_channel()
8635 vsi->ch = ch; in ice_add_channel()
8637 memcpy(&ch->info.q_mapping, &vsi->info.q_mapping, in ice_add_channel()
8638 sizeof(vsi->info.q_mapping)); in ice_add_channel()
8639 memcpy(&ch->info.tc_mapping, vsi->info.tc_mapping, in ice_add_channel()
8640 sizeof(vsi->info.tc_mapping)); in ice_add_channel()
8652 static void ice_chnl_cfg_res(struct ice_vsi *vsi, struct ice_channel *ch) in ice_chnl_cfg_res() argument
8662 tx_ring = vsi->tx_rings[ch->base_q + i]; in ice_chnl_cfg_res()
8663 rx_ring = vsi->rx_rings[ch->base_q + i]; in ice_chnl_cfg_res()
8698 ice_flush(&vsi->back->hw); in ice_chnl_cfg_res()
8710 ice_cfg_chnl_all_res(struct ice_vsi *vsi, struct ice_channel *ch) in ice_cfg_chnl_all_res() argument
8715 ice_chnl_cfg_res(vsi, ch); in ice_cfg_chnl_all_res()
8730 ice_setup_hw_channel(struct ice_pf *pf, struct ice_vsi *vsi, in ice_setup_hw_channel() argument
8736 ch->base_q = vsi->next_base_q; in ice_setup_hw_channel()
8746 ice_cfg_chnl_all_res(vsi, ch); in ice_setup_hw_channel()
8751 vsi->next_base_q = vsi->next_base_q + ch->num_rxq; in ice_setup_hw_channel()
8768 ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi, in ice_setup_channel() argument
8775 if (vsi->type != ICE_VSI_PF) { in ice_setup_channel()
8776 dev_err(dev, "unsupported parent VSI type(%d)\n", vsi->type); in ice_setup_channel()
8783 ret = ice_setup_hw_channel(pf, vsi, ch, sw_id, ICE_VSI_CHNL); in ice_setup_channel()
8800 ice_set_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate, u64 min_tx_rate) in ice_set_bw_limit() argument
8804 err = ice_set_min_bw_limit(vsi, min_tx_rate); in ice_set_bw_limit()
8808 return ice_set_max_bw_limit(vsi, max_tx_rate); in ice_set_bw_limit()
8819 static int ice_create_q_channel(struct ice_vsi *vsi, struct ice_channel *ch) in ice_create_q_channel() argument
8821 struct ice_pf *pf = vsi->back; in ice_create_q_channel()
8833 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_txq) { in ice_create_q_channel()
8835 vsi->cnt_q_avail, ch->num_txq); in ice_create_q_channel()
8839 if (!ice_setup_channel(pf, vsi, ch)) { in ice_create_q_channel()
8857 vsi->cnt_q_avail -= ch->num_txq; in ice_create_q_channel()
8920 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr) in ice_remove_q_channels() argument
8923 struct ice_pf *pf = vsi->back; in ice_remove_q_channels()
8931 if (vsi->netdev->features & NETIF_F_NTUPLE) { in ice_remove_q_channels()
8935 ice_fdir_del_all_fltrs(vsi); in ice_remove_q_channels()
8940 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { in ice_remove_q_channels()
8955 tx_ring = vsi->tx_rings[ch->base_q + i]; in ice_remove_q_channels()
8956 rx_ring = vsi->rx_rings[ch->base_q + i]; in ice_remove_q_channels()
8984 vsi->tc_map_vsi[i] = NULL; in ice_remove_q_channels()
8987 vsi->all_enatc = 0; in ice_remove_q_channels()
8988 vsi->all_numtc = 0; in ice_remove_q_channels()
9003 struct ice_vsi *vsi; in ice_rebuild_channels() local
9029 vsi = pf->vsi[i]; in ice_rebuild_channels()
9030 if (!vsi || vsi->type != ICE_VSI_CHNL) in ice_rebuild_channels()
9033 type = vsi->type; in ice_rebuild_channels()
9036 err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT); in ice_rebuild_channels()
9039 ice_vsi_type_str(type), vsi->idx, err); in ice_rebuild_channels()
9046 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); in ice_rebuild_channels()
9049 err = ice_replay_vsi(&pf->hw, vsi->idx); in ice_rebuild_channels()
9052 ice_vsi_type_str(type), err, vsi->idx); in ice_rebuild_channels()
9057 ice_vsi_type_str(type), vsi->idx); in ice_rebuild_channels()
9062 main_vsi->tc_map_vsi[tc_idx++] = vsi; in ice_rebuild_channels()
9111 static int ice_create_q_channels(struct ice_vsi *vsi) in ice_create_q_channels() argument
9113 struct ice_pf *pf = vsi->back; in ice_create_q_channels()
9118 if (!(vsi->all_enatc & BIT(i))) in ice_create_q_channels()
9127 ch->num_rxq = vsi->mqprio_qopt.qopt.count[i]; in ice_create_q_channels()
9128 ch->num_txq = vsi->mqprio_qopt.qopt.count[i]; in ice_create_q_channels()
9129 ch->base_q = vsi->mqprio_qopt.qopt.offset[i]; in ice_create_q_channels()
9130 ch->max_tx_rate = vsi->mqprio_qopt.max_rate[i]; in ice_create_q_channels()
9131 ch->min_tx_rate = vsi->mqprio_qopt.min_rate[i]; in ice_create_q_channels()
9141 ret = ice_create_q_channel(vsi, ch); in ice_create_q_channels()
9148 list_add_tail(&ch->list, &vsi->ch_list); in ice_create_q_channels()
9149 vsi->tc_map_vsi[i] = ch->ch_vsi; in ice_create_q_channels()
9156 ice_remove_q_channels(vsi, false); in ice_create_q_channels()
9170 struct ice_vsi *vsi = np->vsi; in ice_setup_tc_mqprio_qdisc() local
9171 struct ice_pf *pf = vsi->back; in ice_setup_tc_mqprio_qdisc()
9184 vsi->ch_rss_size = 0; in ice_setup_tc_mqprio_qdisc()
9185 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); in ice_setup_tc_mqprio_qdisc()
9202 ret = ice_validate_mqprio_qopt(vsi, mqprio_qopt); in ice_setup_tc_mqprio_qdisc()
9208 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); in ice_setup_tc_mqprio_qdisc()
9214 if (vsi->netdev->features & NETIF_F_HW_TC) in ice_setup_tc_mqprio_qdisc()
9224 if (ena_tc_qdisc == vsi->tc_cfg.ena_tc && in ice_setup_tc_mqprio_qdisc()
9229 ice_dis_vsi(vsi, true); in ice_setup_tc_mqprio_qdisc()
9232 ice_remove_q_channels(vsi, true); in ice_setup_tc_mqprio_qdisc()
9235 vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf), in ice_setup_tc_mqprio_qdisc()
9237 vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf), in ice_setup_tc_mqprio_qdisc()
9247 offset = vsi->mqprio_qopt.qopt.offset[i]; in ice_setup_tc_mqprio_qdisc()
9248 qcount_rx = vsi->mqprio_qopt.qopt.count[i]; in ice_setup_tc_mqprio_qdisc()
9249 qcount_tx = vsi->mqprio_qopt.qopt.count[i]; in ice_setup_tc_mqprio_qdisc()
9251 vsi->req_txq = offset + qcount_tx; in ice_setup_tc_mqprio_qdisc()
9252 vsi->req_rxq = offset + qcount_rx; in ice_setup_tc_mqprio_qdisc()
9258 vsi->orig_rss_size = vsi->rss_size; in ice_setup_tc_mqprio_qdisc()
9264 cur_txq = vsi->num_txq; in ice_setup_tc_mqprio_qdisc()
9265 cur_rxq = vsi->num_rxq; in ice_setup_tc_mqprio_qdisc()
9268 ret = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); in ice_setup_tc_mqprio_qdisc()
9272 vsi->req_txq = cur_txq; in ice_setup_tc_mqprio_qdisc()
9273 vsi->req_rxq = cur_rxq; in ice_setup_tc_mqprio_qdisc()
9275 if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT)) { in ice_setup_tc_mqprio_qdisc()
9281 vsi->all_numtc = num_tcf; in ice_setup_tc_mqprio_qdisc()
9282 vsi->all_enatc = ena_tc_qdisc; in ice_setup_tc_mqprio_qdisc()
9283 ret = ice_vsi_cfg_tc(vsi, ena_tc_qdisc); in ice_setup_tc_mqprio_qdisc()
9286 vsi->vsi_num); in ice_setup_tc_mqprio_qdisc()
9291 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0]; in ice_setup_tc_mqprio_qdisc()
9292 u64 min_tx_rate = vsi->mqprio_qopt.min_rate[0]; in ice_setup_tc_mqprio_qdisc()
9302 ret = ice_set_bw_limit(vsi, max_tx_rate, min_tx_rate); in ice_setup_tc_mqprio_qdisc()
9305 max_tx_rate, min_tx_rate, vsi->vsi_num); in ice_setup_tc_mqprio_qdisc()
9308 max_tx_rate, min_tx_rate, vsi->vsi_num); in ice_setup_tc_mqprio_qdisc()
9312 ret = ice_create_q_channels(vsi); in ice_setup_tc_mqprio_qdisc()
9321 if (vsi->ch_rss_size) in ice_setup_tc_mqprio_qdisc()
9322 ice_vsi_cfg_rss_lut_key(vsi); in ice_setup_tc_mqprio_qdisc()
9327 vsi->all_numtc = 0; in ice_setup_tc_mqprio_qdisc()
9328 vsi->all_enatc = 0; in ice_setup_tc_mqprio_qdisc()
9331 ice_ena_vsi(vsi, true); in ice_setup_tc_mqprio_qdisc()
9343 struct ice_pf *pf = np->vsi->back; in ice_setup_tc()
9430 vlan_dev_real_dev(netdev) == np->vsi->netdev)) in ice_indr_setup_tc_block()
9517 struct ice_pf *pf = np->vsi->back; in ice_open()
9539 struct ice_vsi *vsi = np->vsi; in ice_open_internal() local
9540 struct ice_pf *pf = vsi->back; in ice_open_internal()
9551 pi = vsi->port_info; in ice_open_internal()
9572 err = ice_configure_phy(vsi); in ice_open_internal()
9580 ice_set_link(vsi, false); in ice_open_internal()
9583 err = ice_vsi_open(vsi); in ice_open_internal()
9586 vsi->vsi_num, vsi->vsw->sw_id); in ice_open_internal()
9607 struct ice_vsi *vsi = np->vsi; in ice_stop() local
9608 struct ice_pf *pf = vsi->back; in ice_stop()
9615 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) { in ice_stop()
9616 int link_err = ice_force_phys_link_state(vsi, false); in ice_stop()
9620 netdev_info(vsi->netdev, "Skipping link reconfig - no media attached, VSI %d\n", in ice_stop()
9621 vsi->vsi_num); in ice_stop()
9623 netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n", in ice_stop()
9624 vsi->vsi_num, link_err); in ice_stop()
9626 ice_vsi_close(vsi); in ice_stop()
9631 ice_vsi_close(vsi); in ice_stop()