Lines Matching refs:vf

27 	struct ice_vf *vf;  in ice_get_vf_by_id()  local
30 hash_for_each_possible_rcu(pf->vfs.table, vf, entry, vf_id) { in ice_get_vf_by_id()
31 if (vf->vf_id == vf_id) { in ice_get_vf_by_id()
34 if (kref_get_unless_zero(&vf->refcnt)) in ice_get_vf_by_id()
35 found = vf; in ice_get_vf_by_id()
57 struct ice_vf *vf = container_of(ref, struct ice_vf, refcnt); in ice_release_vf() local
59 pci_dev_put(vf->vfdev); in ice_release_vf()
61 vf->vf_ops->free(vf); in ice_release_vf()
75 void ice_put_vf(struct ice_vf *vf) in ice_put_vf() argument
77 kref_put(&vf->refcnt, ice_release_vf); in ice_put_vf()
107 struct ice_vf *vf; in ice_get_num_vfs() local
112 ice_for_each_vf_rcu(pf, bkt, vf) in ice_get_num_vfs()
123 struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf) in ice_get_vf_vsi() argument
125 if (vf->lan_vsi_idx == ICE_NO_VSI) in ice_get_vf_vsi()
128 return vf->pf->vsi[vf->lan_vsi_idx]; in ice_get_vf_vsi()
143 bool ice_is_vf_disabled(struct ice_vf *vf) in ice_is_vf_disabled() argument
145 struct ice_pf *pf = vf->pf; in ice_is_vf_disabled()
148 test_bit(ICE_VF_STATE_DIS, vf->vf_states)); in ice_is_vf_disabled()
158 static void ice_wait_on_vf_reset(struct ice_vf *vf) in ice_wait_on_vf_reset() argument
163 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) in ice_wait_on_vf_reset()
177 int ice_check_vf_ready_for_cfg(struct ice_vf *vf) in ice_check_vf_ready_for_cfg() argument
179 ice_wait_on_vf_reset(vf); in ice_check_vf_ready_for_cfg()
181 if (ice_is_vf_disabled(vf)) in ice_check_vf_ready_for_cfg()
184 if (ice_check_vf_init(vf)) in ice_check_vf_ready_for_cfg()
200 static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr) in ice_trigger_vf_reset() argument
203 clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states); in ice_trigger_vf_reset()
208 clear_bit(ICE_VF_STATE_INIT, vf->vf_states); in ice_trigger_vf_reset()
216 vf->vf_ops->clear_mbx_register(vf); in ice_trigger_vf_reset()
218 vf->vf_ops->trigger_reset_register(vf, is_vflr); in ice_trigger_vf_reset()
221 static void ice_vf_clear_counters(struct ice_vf *vf) in ice_vf_clear_counters() argument
223 struct ice_vsi *vsi = ice_get_vf_vsi(vf); in ice_vf_clear_counters()
228 vf->num_mac = 0; in ice_vf_clear_counters()
229 memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events)); in ice_vf_clear_counters()
230 memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events)); in ice_vf_clear_counters()
240 static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf) in ice_vf_pre_vsi_rebuild() argument
243 if (vf->vf_ops->irq_close) in ice_vf_pre_vsi_rebuild()
244 vf->vf_ops->irq_close(vf); in ice_vf_pre_vsi_rebuild()
246 ice_vf_clear_counters(vf); in ice_vf_pre_vsi_rebuild()
247 vf->vf_ops->clear_reset_trigger(vf); in ice_vf_pre_vsi_rebuild()
259 static int ice_vf_reconfig_vsi(struct ice_vf *vf) in ice_vf_reconfig_vsi() argument
261 struct ice_vsi *vsi = ice_get_vf_vsi(vf); in ice_vf_reconfig_vsi()
262 struct ice_pf *pf = vf->pf; in ice_vf_reconfig_vsi()
277 vf->vf_id, err); in ice_vf_reconfig_vsi()
293 static int ice_vf_rebuild_vsi(struct ice_vf *vf) in ice_vf_rebuild_vsi() argument
295 struct ice_vsi *vsi = ice_get_vf_vsi(vf); in ice_vf_rebuild_vsi()
296 struct ice_pf *pf = vf->pf; in ice_vf_rebuild_vsi()
303 vf->vf_id); in ice_vf_rebuild_vsi()
322 static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi) in ice_vf_rebuild_host_vlan_cfg() argument
325 struct device *dev = ice_pf_to_dev(vf->pf); in ice_vf_rebuild_host_vlan_cfg()
328 if (ice_vf_is_port_vlan_ena(vf)) { in ice_vf_rebuild_host_vlan_cfg()
329 err = vlan_ops->set_port_vlan(vsi, &vf->port_vlan_info); in ice_vf_rebuild_host_vlan_cfg()
332 vf->vf_id, err); in ice_vf_rebuild_host_vlan_cfg()
336 err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info); in ice_vf_rebuild_host_vlan_cfg()
342 vf->vf_id, err); in ice_vf_rebuild_host_vlan_cfg()
350 ice_vf_is_port_vlan_ena(vf) ? in ice_vf_rebuild_host_vlan_cfg()
351 ice_vf_get_port_vlan_id(vf) : 0, vf->vf_id, err); in ice_vf_rebuild_host_vlan_cfg()
358 vf->vf_id, vsi->idx, err); in ice_vf_rebuild_host_vlan_cfg()
370 static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf) in ice_vf_rebuild_host_tx_rate_cfg() argument
372 struct device *dev = ice_pf_to_dev(vf->pf); in ice_vf_rebuild_host_tx_rate_cfg()
373 struct ice_vsi *vsi = ice_get_vf_vsi(vf); in ice_vf_rebuild_host_tx_rate_cfg()
379 if (vf->min_tx_rate) { in ice_vf_rebuild_host_tx_rate_cfg()
380 err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000); in ice_vf_rebuild_host_tx_rate_cfg()
383 vf->min_tx_rate, vf->vf_id, err); in ice_vf_rebuild_host_tx_rate_cfg()
388 if (vf->max_tx_rate) { in ice_vf_rebuild_host_tx_rate_cfg()
389 err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000); in ice_vf_rebuild_host_tx_rate_cfg()
392 vf->max_tx_rate, vf->vf_id, err); in ice_vf_rebuild_host_tx_rate_cfg()
404 static void ice_vf_set_host_trust_cfg(struct ice_vf *vf) in ice_vf_set_host_trust_cfg() argument
406 assign_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps, vf->trusted); in ice_vf_set_host_trust_cfg()
416 static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf) in ice_vf_rebuild_host_mac_cfg() argument
418 struct device *dev = ice_pf_to_dev(vf->pf); in ice_vf_rebuild_host_mac_cfg()
419 struct ice_vsi *vsi = ice_get_vf_vsi(vf); in ice_vf_rebuild_host_mac_cfg()
426 if (ice_is_eswitch_mode_switchdev(vf->pf)) in ice_vf_rebuild_host_mac_cfg()
433 vf->vf_id, status); in ice_vf_rebuild_host_mac_cfg()
437 vf->num_mac++; in ice_vf_rebuild_host_mac_cfg()
439 if (is_valid_ether_addr(vf->hw_lan_addr)) { in ice_vf_rebuild_host_mac_cfg()
440 status = ice_fltr_add_mac(vsi, vf->hw_lan_addr, in ice_vf_rebuild_host_mac_cfg()
444 &vf->hw_lan_addr[0], vf->vf_id, in ice_vf_rebuild_host_mac_cfg()
448 vf->num_mac++; in ice_vf_rebuild_host_mac_cfg()
450 ether_addr_copy(vf->dev_lan_addr, vf->hw_lan_addr); in ice_vf_rebuild_host_mac_cfg()
493 static void ice_vf_rebuild_host_cfg(struct ice_vf *vf) in ice_vf_rebuild_host_cfg() argument
495 struct device *dev = ice_pf_to_dev(vf->pf); in ice_vf_rebuild_host_cfg()
496 struct ice_vsi *vsi = ice_get_vf_vsi(vf); in ice_vf_rebuild_host_cfg()
501 ice_vf_set_host_trust_cfg(vf); in ice_vf_rebuild_host_cfg()
503 if (ice_vf_rebuild_host_mac_cfg(vf)) in ice_vf_rebuild_host_cfg()
505 vf->vf_id); in ice_vf_rebuild_host_cfg()
507 if (ice_vf_rebuild_host_vlan_cfg(vf, vsi)) in ice_vf_rebuild_host_cfg()
509 vf->vf_id); in ice_vf_rebuild_host_cfg()
511 if (ice_vf_rebuild_host_tx_rate_cfg(vf)) in ice_vf_rebuild_host_cfg()
513 vf->vf_id); in ice_vf_rebuild_host_cfg()
515 if (ice_vsi_apply_spoofchk(vsi, vf->spoofchk)) in ice_vf_rebuild_host_cfg()
517 vf->vf_id); in ice_vf_rebuild_host_cfg()
527 static void ice_set_vf_state_qs_dis(struct ice_vf *vf) in ice_set_vf_state_qs_dis() argument
530 bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF); in ice_set_vf_state_qs_dis()
531 bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF); in ice_set_vf_state_qs_dis()
532 clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states); in ice_set_vf_state_qs_dis()
542 static void ice_vf_set_initialized(struct ice_vf *vf) in ice_vf_set_initialized() argument
544 ice_set_vf_state_qs_dis(vf); in ice_vf_set_initialized()
545 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states); in ice_vf_set_initialized()
546 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states); in ice_vf_set_initialized()
547 clear_bit(ICE_VF_STATE_DIS, vf->vf_states); in ice_vf_set_initialized()
548 set_bit(ICE_VF_STATE_INIT, vf->vf_states); in ice_vf_set_initialized()
549 memset(&vf->vlan_v2_caps, 0, sizeof(vf->vlan_v2_caps)); in ice_vf_set_initialized()
559 static void ice_vf_post_vsi_rebuild(struct ice_vf *vf) in ice_vf_post_vsi_rebuild() argument
561 ice_vf_rebuild_host_cfg(vf); in ice_vf_post_vsi_rebuild()
562 ice_vf_set_initialized(vf); in ice_vf_post_vsi_rebuild()
564 vf->vf_ops->post_vsi_rebuild(vf); in ice_vf_post_vsi_rebuild()
578 struct ice_vf *vf; in ice_is_any_vf_in_unicast_promisc() local
582 ice_for_each_vf_rcu(pf, bkt, vf) { in ice_is_any_vf_in_unicast_promisc()
584 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) { in ice_is_any_vf_in_unicast_promisc()
605 ice_vf_get_promisc_masks(struct ice_vf *vf, struct ice_vsi *vsi, in ice_vf_get_promisc_masks() argument
608 if (ice_vf_is_port_vlan_ena(vf) || in ice_vf_get_promisc_masks()
626 ice_vf_clear_all_promisc_modes(struct ice_vf *vf, struct ice_vsi *vsi) in ice_vf_clear_all_promisc_modes() argument
628 struct ice_pf *pf = vf->pf; in ice_vf_clear_all_promisc_modes()
632 ice_vf_get_promisc_masks(vf, vsi, &ucast_m, &mcast_m); in ice_vf_clear_all_promisc_modes()
633 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) { in ice_vf_clear_all_promisc_modes()
638 ret = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m); in ice_vf_clear_all_promisc_modes()
642 dev_err(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode failed\n"); in ice_vf_clear_all_promisc_modes()
644 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states); in ice_vf_clear_all_promisc_modes()
645 dev_info(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode succeeded\n"); in ice_vf_clear_all_promisc_modes()
649 if (test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) { in ice_vf_clear_all_promisc_modes()
650 ret = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m); in ice_vf_clear_all_promisc_modes()
652 dev_err(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode failed\n"); in ice_vf_clear_all_promisc_modes()
654 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states); in ice_vf_clear_all_promisc_modes()
655 dev_info(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode succeeded\n"); in ice_vf_clear_all_promisc_modes()
668 ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m) in ice_vf_set_vsi_promisc() argument
673 if (ice_vf_is_port_vlan_ena(vf)) in ice_vf_set_vsi_promisc()
675 ice_vf_get_port_vlan_id(vf)); in ice_vf_set_vsi_promisc()
683 vf->vf_id, status); in ice_vf_set_vsi_promisc()
697 ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m) in ice_vf_clear_vsi_promisc() argument
702 if (ice_vf_is_port_vlan_ena(vf)) in ice_vf_clear_vsi_promisc()
704 ice_vf_get_port_vlan_id(vf)); in ice_vf_clear_vsi_promisc()
712 vf->vf_id, status); in ice_vf_clear_vsi_promisc()
734 struct ice_vf *vf; in ice_reset_all_vfs() local
744 ice_for_each_vf(pf, bkt, vf) in ice_reset_all_vfs()
745 ice_mbx_clear_malvf(&vf->mbx_info); in ice_reset_all_vfs()
754 ice_for_each_vf(pf, bkt, vf) in ice_reset_all_vfs()
755 ice_trigger_vf_reset(vf, true, true); in ice_reset_all_vfs()
761 ice_for_each_vf(pf, bkt, vf) { in ice_reset_all_vfs()
762 if (!vf->vf_ops->poll_reset_status(vf)) { in ice_reset_all_vfs()
767 dev_warn(dev, "VF %u reset check timeout\n", vf->vf_id); in ice_reset_all_vfs()
773 ice_for_each_vf(pf, bkt, vf) { in ice_reset_all_vfs()
774 mutex_lock(&vf->cfg_lock); in ice_reset_all_vfs()
776 ice_eswitch_detach_vf(pf, vf); in ice_reset_all_vfs()
777 vf->driver_caps = 0; in ice_reset_all_vfs()
778 ice_vc_set_default_allowlist(vf); in ice_reset_all_vfs()
780 ice_vf_fdir_exit(vf); in ice_reset_all_vfs()
781 ice_vf_fdir_init(vf); in ice_reset_all_vfs()
785 if (vf->ctrl_vsi_idx != ICE_NO_VSI) in ice_reset_all_vfs()
786 ice_vf_ctrl_invalidate_vsi(vf); in ice_reset_all_vfs()
788 ice_vf_pre_vsi_rebuild(vf); in ice_reset_all_vfs()
789 ice_vf_rebuild_vsi(vf); in ice_reset_all_vfs()
790 ice_vf_post_vsi_rebuild(vf); in ice_reset_all_vfs()
792 ice_eswitch_attach_vf(pf, vf); in ice_reset_all_vfs()
794 mutex_unlock(&vf->cfg_lock); in ice_reset_all_vfs()
807 static void ice_notify_vf_reset(struct ice_vf *vf) in ice_notify_vf_reset() argument
809 struct ice_hw *hw = &vf->pf->hw; in ice_notify_vf_reset()
815 if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) && in ice_notify_vf_reset()
816 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) || in ice_notify_vf_reset()
817 test_bit(ICE_VF_STATE_DIS, vf->vf_states)) in ice_notify_vf_reset()
822 ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, in ice_notify_vf_reset()
841 int ice_reset_vf(struct ice_vf *vf, u32 flags) in ice_reset_vf() argument
843 struct ice_pf *pf = vf->pf; in ice_reset_vf()
856 ice_notify_vf_reset(vf); in ice_reset_vf()
860 vf->vf_id); in ice_reset_vf()
865 mutex_lock(&vf->cfg_lock); in ice_reset_vf()
867 lockdep_assert_held(&vf->cfg_lock); in ice_reset_vf()
880 if (ice_is_vf_disabled(vf)) { in ice_reset_vf()
881 vsi = ice_get_vf_vsi(vf); in ice_reset_vf()
887 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id); in ice_reset_vf()
893 vf->vf_id); in ice_reset_vf()
898 set_bit(ICE_VF_STATE_DIS, vf->vf_states); in ice_reset_vf()
899 ice_trigger_vf_reset(vf, flags & ICE_VF_RESET_VFLR, false); in ice_reset_vf()
901 vsi = ice_get_vf_vsi(vf); in ice_reset_vf()
907 ice_dis_vf_qs(vf); in ice_reset_vf()
913 NULL, vf->vf_ops->reset_type, vf->vf_id, NULL); in ice_reset_vf()
918 rsd = vf->vf_ops->poll_reset_status(vf); in ice_reset_vf()
924 dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id); in ice_reset_vf()
926 vf->driver_caps = 0; in ice_reset_vf()
927 ice_vc_set_default_allowlist(vf); in ice_reset_vf()
932 ice_vf_clear_all_promisc_modes(vf, vsi); in ice_reset_vf()
934 ice_vf_fdir_exit(vf); in ice_reset_vf()
935 ice_vf_fdir_init(vf); in ice_reset_vf()
939 if (vf->ctrl_vsi_idx != ICE_NO_VSI) in ice_reset_vf()
940 ice_vf_ctrl_vsi_release(vf); in ice_reset_vf()
942 ice_vf_pre_vsi_rebuild(vf); in ice_reset_vf()
944 if (ice_vf_reconfig_vsi(vf)) { in ice_reset_vf()
946 vf->vf_id); in ice_reset_vf()
951 ice_vf_post_vsi_rebuild(vf); in ice_reset_vf()
952 vsi = ice_get_vf_vsi(vf); in ice_reset_vf()
958 ice_eswitch_update_repr(&vf->repr_id, vsi); in ice_reset_vf()
961 ice_mbx_clear_malvf(&vf->mbx_info); in ice_reset_vf()
970 mutex_unlock(&vf->cfg_lock); in ice_reset_vf()
979 void ice_set_vf_state_dis(struct ice_vf *vf) in ice_set_vf_state_dis() argument
981 ice_set_vf_state_qs_dis(vf); in ice_set_vf_state_dis()
982 vf->vf_ops->clear_reset_state(vf); in ice_set_vf_state_dis()
991 void ice_initialize_vf_entry(struct ice_vf *vf) in ice_initialize_vf_entry() argument
993 struct ice_pf *pf = vf->pf; in ice_initialize_vf_entry()
999 vf->spoofchk = true; in ice_initialize_vf_entry()
1000 ice_vc_set_default_allowlist(vf); in ice_initialize_vf_entry()
1001 ice_virtchnl_set_dflt_ops(vf); in ice_initialize_vf_entry()
1004 vf->num_msix = vfs->num_msix_per; in ice_initialize_vf_entry()
1005 vf->num_vf_qs = vfs->num_qps_per; in ice_initialize_vf_entry()
1010 ice_vf_ctrl_invalidate_vsi(vf); in ice_initialize_vf_entry()
1011 ice_vf_fdir_init(vf); in ice_initialize_vf_entry()
1014 ice_mbx_init_vf_info(&pf->hw, &vf->mbx_info); in ice_initialize_vf_entry()
1016 mutex_init(&vf->cfg_lock); in ice_initialize_vf_entry()
1023 void ice_dis_vf_qs(struct ice_vf *vf) in ice_dis_vf_qs() argument
1025 struct ice_vsi *vsi = ice_get_vf_vsi(vf); in ice_dis_vf_qs()
1030 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id); in ice_dis_vf_qs()
1032 ice_set_vf_state_qs_dis(vf); in ice_dis_vf_qs()
1063 int ice_check_vf_init(struct ice_vf *vf) in ice_check_vf_init() argument
1065 struct ice_pf *pf = vf->pf; in ice_check_vf_init()
1067 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { in ice_check_vf_init()
1069 vf->vf_id); in ice_check_vf_init()
1079 struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf) in ice_vf_get_port_info() argument
1081 return vf->pf->hw.port_info; in ice_vf_get_port_info()
1180 bool ice_is_vf_trusted(struct ice_vf *vf) in ice_is_vf_trusted() argument
1182 return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); in ice_is_vf_trusted()
1192 bool ice_vf_has_no_qs_ena(struct ice_vf *vf) in ice_vf_has_no_qs_ena() argument
1194 return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) && in ice_vf_has_no_qs_ena()
1195 !bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF)); in ice_vf_has_no_qs_ena()
1202 bool ice_is_vf_link_up(struct ice_vf *vf) in ice_is_vf_link_up() argument
1204 struct ice_port_info *pi = ice_vf_get_port_info(vf); in ice_is_vf_link_up()
1206 if (ice_check_vf_init(vf)) in ice_is_vf_link_up()
1209 if (ice_vf_has_no_qs_ena(vf)) in ice_is_vf_link_up()
1211 else if (vf->link_forced) in ice_is_vf_link_up()
1212 return vf->link_up; in ice_is_vf_link_up()
1222 void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf) in ice_vf_ctrl_invalidate_vsi() argument
1224 vf->ctrl_vsi_idx = ICE_NO_VSI; in ice_vf_ctrl_invalidate_vsi()
1231 void ice_vf_ctrl_vsi_release(struct ice_vf *vf) in ice_vf_ctrl_vsi_release() argument
1233 ice_vsi_release(vf->pf->vsi[vf->ctrl_vsi_idx]); in ice_vf_ctrl_vsi_release()
1234 ice_vf_ctrl_invalidate_vsi(vf); in ice_vf_ctrl_vsi_release()
1244 struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf) in ice_vf_ctrl_vsi_setup() argument
1247 struct ice_pf *pf = vf->pf; in ice_vf_ctrl_vsi_setup()
1251 params.port_info = ice_vf_get_port_info(vf); in ice_vf_ctrl_vsi_setup()
1252 params.vf = vf; in ice_vf_ctrl_vsi_setup()
1258 ice_vf_ctrl_invalidate_vsi(vf); in ice_vf_ctrl_vsi_setup()
1273 int ice_vf_init_host_cfg(struct ice_vf *vf, struct ice_vsi *vsi) in ice_vf_init_host_cfg() argument
1276 struct ice_pf *pf = vf->pf; in ice_vf_init_host_cfg()
1286 vf->vf_id); in ice_vf_init_host_cfg()
1294 vf->vf_id); in ice_vf_init_host_cfg()
1302 vf->vf_id, err); in ice_vf_init_host_cfg()
1306 vf->num_mac = 1; in ice_vf_init_host_cfg()
1308 err = ice_vsi_apply_spoofchk(vsi, vf->spoofchk); in ice_vf_init_host_cfg()
1311 vf->vf_id); in ice_vf_init_host_cfg()
1322 void ice_vf_invalidate_vsi(struct ice_vf *vf) in ice_vf_invalidate_vsi() argument
1324 vf->lan_vsi_idx = ICE_NO_VSI; in ice_vf_invalidate_vsi()
1334 void ice_vf_vsi_release(struct ice_vf *vf) in ice_vf_vsi_release() argument
1336 struct ice_vsi *vsi = ice_get_vf_vsi(vf); in ice_vf_vsi_release()
1342 ice_vf_invalidate_vsi(vf); in ice_vf_vsi_release()
1362 struct ice_vf *vf; in ice_get_vf_ctrl_vsi() local
1366 ice_for_each_vf_rcu(pf, bkt, vf) { in ice_get_vf_ctrl_vsi()
1367 if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) { in ice_get_vf_ctrl_vsi()
1368 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx]; in ice_get_vf_ctrl_vsi()