Lines Matching refs:vf
28 struct ice_vf *vf; in ice_free_vf_entries() local
37 hash_for_each_safe(vfs->table, bkt, tmp, vf, entry) { in ice_free_vf_entries()
38 hash_del_rcu(&vf->entry); in ice_free_vf_entries()
39 ice_put_vf(vf); in ice_free_vf_entries()
47 static void ice_free_vf_res(struct ice_vf *vf) in ice_free_vf_res() argument
49 struct ice_pf *pf = vf->pf; in ice_free_vf_res()
55 clear_bit(ICE_VF_STATE_INIT, vf->vf_states); in ice_free_vf_res()
56 ice_vf_fdir_exit(vf); in ice_free_vf_res()
58 if (vf->ctrl_vsi_idx != ICE_NO_VSI) in ice_free_vf_res()
59 ice_vf_ctrl_vsi_release(vf); in ice_free_vf_res()
62 if (vf->lan_vsi_idx != ICE_NO_VSI) { in ice_free_vf_res()
63 ice_vf_vsi_release(vf); in ice_free_vf_res()
64 vf->num_mac = 0; in ice_free_vf_res()
67 last_vector_idx = vf->first_vector_idx + vf->num_msix - 1; in ice_free_vf_res()
70 memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events)); in ice_free_vf_res()
71 memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events)); in ice_free_vf_res()
74 for (i = vf->first_vector_idx; i <= last_vector_idx; i++) { in ice_free_vf_res()
79 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states); in ice_free_vf_res()
80 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states); in ice_free_vf_res()
87 static void ice_dis_vf_mappings(struct ice_vf *vf) in ice_dis_vf_mappings() argument
89 struct ice_pf *pf = vf->pf; in ice_dis_vf_mappings()
96 vsi = ice_get_vf_vsi(vf); in ice_dis_vf_mappings()
101 wr32(hw, VPINT_ALLOC(vf->vf_id), 0); in ice_dis_vf_mappings()
102 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0); in ice_dis_vf_mappings()
104 first = vf->first_vector_idx; in ice_dis_vf_mappings()
105 last = first + vf->num_msix - 1; in ice_dis_vf_mappings()
115 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0); in ice_dis_vf_mappings()
120 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0); in ice_dis_vf_mappings()
155 struct ice_vf *vf; in ice_free_vfs() local
175 ice_for_each_vf(pf, bkt, vf) { in ice_free_vfs()
176 mutex_lock(&vf->cfg_lock); in ice_free_vfs()
178 ice_eswitch_detach_vf(pf, vf); in ice_free_vfs()
179 ice_dis_vf_qs(vf); in ice_free_vfs()
181 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { in ice_free_vfs()
183 ice_dis_vf_mappings(vf); in ice_free_vfs()
184 set_bit(ICE_VF_STATE_DIS, vf->vf_states); in ice_free_vfs()
185 ice_free_vf_res(vf); in ice_free_vfs()
191 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32; in ice_free_vfs()
192 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32; in ice_free_vfs()
197 list_del(&vf->mbx_info.list_entry); in ice_free_vfs()
199 mutex_unlock(&vf->cfg_lock); in ice_free_vfs()
221 static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf) in ice_vf_vsi_setup() argument
224 struct ice_pf *pf = vf->pf; in ice_vf_vsi_setup()
228 params.port_info = ice_vf_get_port_info(vf); in ice_vf_vsi_setup()
229 params.vf = vf; in ice_vf_vsi_setup()
236 ice_vf_invalidate_vsi(vf); in ice_vf_vsi_setup()
240 vf->lan_vsi_idx = vsi->idx; in ice_vf_vsi_setup()
254 static void ice_ena_vf_msix_mappings(struct ice_vf *vf) in ice_ena_vf_msix_mappings() argument
258 struct ice_pf *pf = vf->pf; in ice_ena_vf_msix_mappings()
264 pf_based_first_msix = vf->first_vector_idx; in ice_ena_vf_msix_mappings()
265 pf_based_last_msix = (pf_based_first_msix + vf->num_msix) - 1; in ice_ena_vf_msix_mappings()
270 (device_based_first_msix + vf->num_msix) - 1; in ice_ena_vf_msix_mappings()
271 device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id; in ice_ena_vf_msix_mappings()
276 wr32(hw, VPINT_ALLOC(vf->vf_id), reg); in ice_ena_vf_msix_mappings()
281 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg); in ice_ena_vf_msix_mappings()
300 static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq) in ice_ena_vf_q_mappings() argument
302 struct device *dev = ice_pf_to_dev(vf->pf); in ice_ena_vf_q_mappings()
303 struct ice_vsi *vsi = ice_get_vf_vsi(vf); in ice_ena_vf_q_mappings()
304 struct ice_hw *hw = &vf->pf->hw; in ice_ena_vf_q_mappings()
311 wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M); in ice_ena_vf_q_mappings()
321 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg); in ice_ena_vf_q_mappings()
327 wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M); in ice_ena_vf_q_mappings()
337 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg); in ice_ena_vf_q_mappings()
347 static void ice_ena_vf_mappings(struct ice_vf *vf) in ice_ena_vf_mappings() argument
349 struct ice_vsi *vsi = ice_get_vf_vsi(vf); in ice_ena_vf_mappings()
354 ice_ena_vf_msix_mappings(vf); in ice_ena_vf_mappings()
355 ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq); in ice_ena_vf_mappings()
363 void ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector) in ice_calc_vf_reg_idx() argument
365 if (!vf || !q_vector) in ice_calc_vf_reg_idx()
370 q_vector->reg_idx = vf->first_vector_idx + q_vector->vf_reg_idx; in ice_calc_vf_reg_idx()
536 static void ice_sriov_free_irqs(struct ice_pf *pf, struct ice_vf *vf) in ice_sriov_free_irqs() argument
539 int bm_i = pf->sriov_irq_size - vf->first_vector_idx - vf->num_msix; in ice_sriov_free_irqs()
541 bitmap_clear(pf->sriov_irq_bm, bm_i, vf->num_msix); in ice_sriov_free_irqs()
542 vf->first_vector_idx = 0; in ice_sriov_free_irqs()
552 static int ice_init_vf_vsi_res(struct ice_vf *vf) in ice_init_vf_vsi_res() argument
554 struct ice_pf *pf = vf->pf; in ice_init_vf_vsi_res()
558 vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix); in ice_init_vf_vsi_res()
559 if (vf->first_vector_idx < 0) in ice_init_vf_vsi_res()
562 vsi = ice_vf_vsi_setup(vf); in ice_init_vf_vsi_res()
566 err = ice_vf_init_host_cfg(vf, vsi); in ice_init_vf_vsi_res()
573 ice_vf_vsi_release(vf); in ice_init_vf_vsi_res()
585 struct ice_vf *vf; in ice_start_vfs() local
591 ice_for_each_vf(pf, bkt, vf) { in ice_start_vfs()
592 vf->vf_ops->clear_reset_trigger(vf); in ice_start_vfs()
594 retval = ice_init_vf_vsi_res(vf); in ice_start_vfs()
597 vf->vf_id, retval); in ice_start_vfs()
601 retval = ice_eswitch_attach_vf(pf, vf); in ice_start_vfs()
604 vf->vf_id, retval); in ice_start_vfs()
605 ice_vf_vsi_release(vf); in ice_start_vfs()
609 set_bit(ICE_VF_STATE_INIT, vf->vf_states); in ice_start_vfs()
610 ice_ena_vf_mappings(vf); in ice_start_vfs()
611 wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); in ice_start_vfs()
619 ice_for_each_vf(pf, bkt, vf) { in ice_start_vfs()
623 ice_dis_vf_mappings(vf); in ice_start_vfs()
624 ice_vf_vsi_release(vf); in ice_start_vfs()
638 static void ice_sriov_free_vf(struct ice_vf *vf) in ice_sriov_free_vf() argument
640 mutex_destroy(&vf->cfg_lock); in ice_sriov_free_vf()
642 kfree_rcu(vf, rcu); in ice_sriov_free_vf()
649 static void ice_sriov_clear_reset_state(struct ice_vf *vf) in ice_sriov_clear_reset_state() argument
651 struct ice_hw *hw = &vf->pf->hw; in ice_sriov_clear_reset_state()
657 wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_INPROGRESS); in ice_sriov_clear_reset_state()
664 static void ice_sriov_clear_mbx_register(struct ice_vf *vf) in ice_sriov_clear_mbx_register() argument
666 struct ice_pf *pf = vf->pf; in ice_sriov_clear_mbx_register()
668 wr32(&pf->hw, VF_MBX_ARQLEN(vf->vf_id), 0); in ice_sriov_clear_mbx_register()
669 wr32(&pf->hw, VF_MBX_ATQLEN(vf->vf_id), 0); in ice_sriov_clear_mbx_register()
679 static void ice_sriov_trigger_reset_register(struct ice_vf *vf, bool is_vflr) in ice_sriov_trigger_reset_register() argument
681 struct ice_pf *pf = vf->pf; in ice_sriov_trigger_reset_register()
689 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id; in ice_sriov_trigger_reset_register()
696 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id)); in ice_sriov_trigger_reset_register()
698 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg); in ice_sriov_trigger_reset_register()
715 dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id); in ice_sriov_trigger_reset_register()
726 static bool ice_sriov_poll_reset_status(struct ice_vf *vf) in ice_sriov_poll_reset_status() argument
728 struct ice_pf *pf = vf->pf; in ice_sriov_poll_reset_status()
737 reg = rd32(&pf->hw, VPGEN_VFRSTAT(vf->vf_id)); in ice_sriov_poll_reset_status()
751 static void ice_sriov_clear_reset_trigger(struct ice_vf *vf) in ice_sriov_clear_reset_trigger() argument
753 struct ice_hw *hw = &vf->pf->hw; in ice_sriov_clear_reset_trigger()
756 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id)); in ice_sriov_clear_reset_trigger()
758 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg); in ice_sriov_clear_reset_trigger()
766 static void ice_sriov_post_vsi_rebuild(struct ice_vf *vf) in ice_sriov_post_vsi_rebuild() argument
768 ice_ena_vf_mappings(vf); in ice_sriov_post_vsi_rebuild()
769 wr32(&vf->pf->hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); in ice_sriov_post_vsi_rebuild()
802 struct ice_vf *vf; in ice_create_vf_entries() local
812 vf = kzalloc(sizeof(*vf), GFP_KERNEL); in ice_create_vf_entries()
813 if (!vf) { in ice_create_vf_entries()
817 kref_init(&vf->refcnt); in ice_create_vf_entries()
819 vf->pf = pf; in ice_create_vf_entries()
820 vf->vf_id = vf_id; in ice_create_vf_entries()
823 vf->vf_ops = &ice_sriov_vf_ops; in ice_create_vf_entries()
825 ice_initialize_vf_entry(vf); in ice_create_vf_entries()
830 vf->vfdev = vfdev; in ice_create_vf_entries()
831 vf->vf_sw_id = pf->first_sw; in ice_create_vf_entries()
835 hash_add_rcu(vfs->table, &vf->entry, vf_id); in ice_create_vf_entries()
1064 struct ice_vf *vf; in ice_sriov_set_msix_vec_count() local
1093 vf = ice_get_vf_by_id(pf, id); in ice_sriov_set_msix_vec_count()
1095 if (!vf) in ice_sriov_set_msix_vec_count()
1098 vsi = ice_get_vf_vsi(vf); in ice_sriov_set_msix_vec_count()
1100 ice_put_vf(vf); in ice_sriov_set_msix_vec_count()
1104 prev_msix = vf->num_msix; in ice_sriov_set_msix_vec_count()
1105 prev_queues = vf->num_vf_qs; in ice_sriov_set_msix_vec_count()
1108 ice_put_vf(vf); in ice_sriov_set_msix_vec_count()
1112 ice_dis_vf_mappings(vf); in ice_sriov_set_msix_vec_count()
1113 ice_sriov_free_irqs(pf, vf); in ice_sriov_set_msix_vec_count()
1116 ice_sriov_remap_vectors(pf, vf->vf_id); in ice_sriov_set_msix_vec_count()
1118 vf->num_msix = msix_vec_count; in ice_sriov_set_msix_vec_count()
1119 vf->num_vf_qs = queues; in ice_sriov_set_msix_vec_count()
1120 vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix); in ice_sriov_set_msix_vec_count()
1121 if (vf->first_vector_idx < 0) in ice_sriov_set_msix_vec_count()
1135 vf->vf_id, vf->num_msix, vf->num_vf_qs); in ice_sriov_set_msix_vec_count()
1137 ice_ena_vf_mappings(vf); in ice_sriov_set_msix_vec_count()
1138 ice_put_vf(vf); in ice_sriov_set_msix_vec_count()
1145 vf->num_msix, vf->vf_id, prev_msix); in ice_sriov_set_msix_vec_count()
1147 vf->num_msix = prev_msix; in ice_sriov_set_msix_vec_count()
1148 vf->num_vf_qs = prev_queues; in ice_sriov_set_msix_vec_count()
1149 vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix); in ice_sriov_set_msix_vec_count()
1150 if (vf->first_vector_idx < 0) { in ice_sriov_set_msix_vec_count()
1151 ice_put_vf(vf); in ice_sriov_set_msix_vec_count()
1162 ice_ena_vf_mappings(vf); in ice_sriov_set_msix_vec_count()
1163 ice_put_vf(vf); in ice_sriov_set_msix_vec_count()
1214 struct ice_vf *vf; in ice_process_vflr_event() local
1223 ice_for_each_vf(pf, bkt, vf) { in ice_process_vflr_event()
1226 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32; in ice_process_vflr_event()
1227 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32; in ice_process_vflr_event()
1232 ice_reset_vf(vf, ICE_VF_RESET_VFLR | ICE_VF_RESET_LOCK); in ice_process_vflr_event()
1251 struct ice_vf *vf; in ice_get_vf_from_pfq() local
1255 ice_for_each_vf_rcu(pf, bkt, vf) { in ice_get_vf_from_pfq()
1259 vsi = ice_get_vf_vsi(vf); in ice_get_vf_from_pfq()
1267 if (kref_get_unless_zero(&vf->refcnt)) in ice_get_vf_from_pfq()
1268 found = vf; in ice_get_vf_from_pfq()
1303 struct ice_vf *vf; in ice_vf_lan_overflow_event() local
1311 vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue)); in ice_vf_lan_overflow_event()
1312 if (!vf) in ice_vf_lan_overflow_event()
1315 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY | ICE_VF_RESET_LOCK); in ice_vf_lan_overflow_event()
1316 ice_put_vf(vf); in ice_vf_lan_overflow_event()
1333 struct ice_vf *vf; in ice_set_vf_spoofchk() local
1338 vf = ice_get_vf_by_id(pf, vf_id); in ice_set_vf_spoofchk()
1339 if (!vf) in ice_set_vf_spoofchk()
1342 ret = ice_check_vf_ready_for_cfg(vf); in ice_set_vf_spoofchk()
1346 vf_vsi = ice_get_vf_vsi(vf); in ice_set_vf_spoofchk()
1349 vf->lan_vsi_idx, vf->vf_id); in ice_set_vf_spoofchk()
1356 vf_vsi->type, vf_vsi->vsi_num, vf->vf_id); in ice_set_vf_spoofchk()
1361 if (ena == vf->spoofchk) { in ice_set_vf_spoofchk()
1370 ena ? "ON" : "OFF", vf->vf_id, vf_vsi->vsi_num, ret); in ice_set_vf_spoofchk()
1372 vf->spoofchk = ena; in ice_set_vf_spoofchk()
1375 ice_put_vf(vf); in ice_set_vf_spoofchk()
1391 struct ice_vf *vf; in ice_get_vf_cfg() local
1394 vf = ice_get_vf_by_id(pf, vf_id); in ice_get_vf_cfg()
1395 if (!vf) in ice_get_vf_cfg()
1398 ret = ice_check_vf_ready_for_cfg(vf); in ice_get_vf_cfg()
1402 ivi->vf = vf_id; in ice_get_vf_cfg()
1403 ether_addr_copy(ivi->mac, vf->hw_lan_addr); in ice_get_vf_cfg()
1406 ivi->vlan = ice_vf_get_port_vlan_id(vf); in ice_get_vf_cfg()
1407 ivi->qos = ice_vf_get_port_vlan_prio(vf); in ice_get_vf_cfg()
1408 if (ice_vf_is_port_vlan_ena(vf)) in ice_get_vf_cfg()
1409 ivi->vlan_proto = cpu_to_be16(ice_vf_get_port_vlan_tpid(vf)); in ice_get_vf_cfg()
1411 ivi->trusted = vf->trusted; in ice_get_vf_cfg()
1412 ivi->spoofchk = vf->spoofchk; in ice_get_vf_cfg()
1413 if (!vf->link_forced) in ice_get_vf_cfg()
1415 else if (vf->link_up) in ice_get_vf_cfg()
1419 ivi->max_tx_rate = vf->max_tx_rate; in ice_get_vf_cfg()
1420 ivi->min_tx_rate = vf->min_tx_rate; in ice_get_vf_cfg()
1423 ice_put_vf(vf); in ice_get_vf_cfg()
1439 struct ice_vf *vf; in __ice_set_vf_mac() local
1448 vf = ice_get_vf_by_id(pf, vf_id); in __ice_set_vf_mac()
1449 if (!vf) in __ice_set_vf_mac()
1453 if (ether_addr_equal(vf->dev_lan_addr, mac) && in __ice_set_vf_mac()
1454 ether_addr_equal(vf->hw_lan_addr, mac)) { in __ice_set_vf_mac()
1459 ret = ice_check_vf_ready_for_cfg(vf); in __ice_set_vf_mac()
1463 mutex_lock(&vf->cfg_lock); in __ice_set_vf_mac()
1468 ether_addr_copy(vf->dev_lan_addr, mac); in __ice_set_vf_mac()
1469 ether_addr_copy(vf->hw_lan_addr, mac); in __ice_set_vf_mac()
1472 vf->pf_set_mac = false; in __ice_set_vf_mac()
1474 vf->vf_id); in __ice_set_vf_mac()
1477 vf->pf_set_mac = true; in __ice_set_vf_mac()
1482 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY); in __ice_set_vf_mac()
1483 mutex_unlock(&vf->cfg_lock); in __ice_set_vf_mac()
1486 ice_put_vf(vf); in __ice_set_vf_mac()
1515 struct ice_vf *vf; in ice_set_vf_trust() local
1518 vf = ice_get_vf_by_id(pf, vf_id); in ice_set_vf_trust()
1519 if (!vf) in ice_set_vf_trust()
1527 ret = ice_check_vf_ready_for_cfg(vf); in ice_set_vf_trust()
1532 if (trusted == vf->trusted) { in ice_set_vf_trust()
1537 mutex_lock(&vf->cfg_lock); in ice_set_vf_trust()
1539 vf->trusted = trusted; in ice_set_vf_trust()
1540 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY); in ice_set_vf_trust()
1544 mutex_unlock(&vf->cfg_lock); in ice_set_vf_trust()
1547 ice_put_vf(vf); in ice_set_vf_trust()
1562 struct ice_vf *vf; in ice_set_vf_link_state() local
1565 vf = ice_get_vf_by_id(pf, vf_id); in ice_set_vf_link_state()
1566 if (!vf) in ice_set_vf_link_state()
1569 ret = ice_check_vf_ready_for_cfg(vf); in ice_set_vf_link_state()
1575 vf->link_forced = false; in ice_set_vf_link_state()
1578 vf->link_forced = true; in ice_set_vf_link_state()
1579 vf->link_up = true; in ice_set_vf_link_state()
1582 vf->link_forced = true; in ice_set_vf_link_state()
1583 vf->link_up = false; in ice_set_vf_link_state()
1590 ice_vc_notify_vf_link_state(vf); in ice_set_vf_link_state()
1593 ice_put_vf(vf); in ice_set_vf_link_state()
1603 struct ice_vf *vf; in ice_calc_all_vfs_min_tx_rate() local
1608 ice_for_each_vf_rcu(pf, bkt, vf) in ice_calc_all_vfs_min_tx_rate()
1609 rate += vf->min_tx_rate; in ice_calc_all_vfs_min_tx_rate()
1628 ice_min_tx_rate_oversubscribed(struct ice_vf *vf, int min_tx_rate) in ice_min_tx_rate_oversubscribed() argument
1630 struct ice_vsi *vsi = ice_get_vf_vsi(vf); in ice_min_tx_rate_oversubscribed()
1638 all_vfs_min_tx_rate = ice_calc_all_vfs_min_tx_rate(vf->pf); in ice_min_tx_rate_oversubscribed()
1641 all_vfs_min_tx_rate -= vf->min_tx_rate; in ice_min_tx_rate_oversubscribed()
1644 …dev_err(ice_pf_to_dev(vf->pf), "min_tx_rate of %d Mbps on VF %u would cause oversubscription of %d… in ice_min_tx_rate_oversubscribed()
1645 min_tx_rate, vf->vf_id, in ice_min_tx_rate_oversubscribed()
1668 struct ice_vf *vf; in ice_set_vf_bw() local
1673 vf = ice_get_vf_by_id(pf, vf_id); in ice_set_vf_bw()
1674 if (!vf) in ice_set_vf_bw()
1677 ret = ice_check_vf_ready_for_cfg(vf); in ice_set_vf_bw()
1681 vsi = ice_get_vf_vsi(vf); in ice_set_vf_bw()
1693 if (ice_min_tx_rate_oversubscribed(vf, min_tx_rate)) { in ice_set_vf_bw()
1698 if (vf->min_tx_rate != (unsigned int)min_tx_rate) { in ice_set_vf_bw()
1702 vf->vf_id); in ice_set_vf_bw()
1706 vf->min_tx_rate = min_tx_rate; in ice_set_vf_bw()
1709 if (vf->max_tx_rate != (unsigned int)max_tx_rate) { in ice_set_vf_bw()
1713 vf->vf_id); in ice_set_vf_bw()
1717 vf->max_tx_rate = max_tx_rate; in ice_set_vf_bw()
1721 ice_put_vf(vf); in ice_set_vf_bw()
1737 struct ice_vf *vf; in ice_get_vf_stats() local
1740 vf = ice_get_vf_by_id(pf, vf_id); in ice_get_vf_stats()
1741 if (!vf) in ice_get_vf_stats()
1744 ret = ice_check_vf_ready_for_cfg(vf); in ice_get_vf_stats()
1748 vsi = ice_get_vf_vsi(vf); in ice_get_vf_stats()
1771 ice_put_vf(vf); in ice_get_vf_stats()
1819 struct ice_vf *vf; in ice_set_vf_port_vlan() local
1836 vf = ice_get_vf_by_id(pf, vf_id); in ice_set_vf_port_vlan()
1837 if (!vf) in ice_set_vf_port_vlan()
1840 ret = ice_check_vf_ready_for_cfg(vf); in ice_set_vf_port_vlan()
1844 if (ice_vf_get_port_vlan_prio(vf) == qos && in ice_set_vf_port_vlan()
1845 ice_vf_get_port_vlan_tpid(vf) == local_vlan_proto && in ice_set_vf_port_vlan()
1846 ice_vf_get_port_vlan_id(vf) == vlan_id) { in ice_set_vf_port_vlan()
1854 mutex_lock(&vf->cfg_lock); in ice_set_vf_port_vlan()
1856 vf->port_vlan_info = ICE_VLAN(local_vlan_proto, vlan_id, qos); in ice_set_vf_port_vlan()
1857 if (ice_vf_is_port_vlan_ena(vf)) in ice_set_vf_port_vlan()
1863 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY); in ice_set_vf_port_vlan()
1864 mutex_unlock(&vf->cfg_lock); in ice_set_vf_port_vlan()
1867 ice_put_vf(vf); in ice_set_vf_port_vlan()
1875 void ice_print_vf_rx_mdd_event(struct ice_vf *vf) in ice_print_vf_rx_mdd_event() argument
1877 struct ice_pf *pf = vf->pf; in ice_print_vf_rx_mdd_event()
1883 vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id, in ice_print_vf_rx_mdd_event()
1884 vf->dev_lan_addr, in ice_print_vf_rx_mdd_event()
1893 void ice_print_vf_tx_mdd_event(struct ice_vf *vf) in ice_print_vf_tx_mdd_event() argument
1895 struct ice_pf *pf = vf->pf; in ice_print_vf_tx_mdd_event()
1901 vf->mdd_tx_events.count, pf->hw.pf_id, vf->vf_id, in ice_print_vf_tx_mdd_event()
1902 vf->dev_lan_addr, in ice_print_vf_tx_mdd_event()
1915 struct ice_vf *vf; in ice_print_vfs_mdd_events() local
1929 ice_for_each_vf(pf, bkt, vf) { in ice_print_vfs_mdd_events()
1931 if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) { in ice_print_vfs_mdd_events()
1932 vf->mdd_rx_events.last_printed = in ice_print_vfs_mdd_events()
1933 vf->mdd_rx_events.count; in ice_print_vfs_mdd_events()
1934 ice_print_vf_rx_mdd_event(vf); in ice_print_vfs_mdd_events()
1938 if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) { in ice_print_vfs_mdd_events()
1939 vf->mdd_tx_events.last_printed = in ice_print_vfs_mdd_events()
1940 vf->mdd_tx_events.count; in ice_print_vfs_mdd_events()
1941 ice_print_vf_tx_mdd_event(vf); in ice_print_vfs_mdd_events()
1956 struct ice_vf *vf; in ice_restore_all_vfs_msi_state() local
1959 ice_for_each_vf(pf, bkt, vf) in ice_restore_all_vfs_msi_state()
1960 pci_restore_msi_state(vf->vfdev); in ice_restore_all_vfs_msi_state()