xref: /linux/drivers/net/ethernet/intel/ice/ice_vf_lib.c (revision af2d6148d2a159e1a0862bce5a2c88c1618a2b27)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2022, Intel Corporation. */
3 
4 #include "ice_vf_lib_private.h"
5 #include "ice.h"
6 #include "ice_lib.h"
7 #include "ice_fltr.h"
8 #include "ice_virtchnl_allowlist.h"
9 
10 /* Public functions which may be accessed by all driver files */
11 
12 /**
13  * ice_get_vf_by_id - Get pointer to VF by ID
14  * @pf: the PF private structure
15  * @vf_id: the VF ID to locate
16  *
17  * Locate and return a pointer to the VF structure associated with a given ID.
18  * Returns NULL if the ID does not have a valid VF structure associated with
19  * it.
20  *
21  * This function takes a reference to the VF, which must be released by
22  * calling ice_put_vf() once the caller is finished accessing the VF structure
23  * returned.
24  */
25 struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id)
26 {
27 	struct ice_vf *vf;
28 
29 	rcu_read_lock();
30 	hash_for_each_possible_rcu(pf->vfs.table, vf, entry, vf_id) {
31 		if (vf->vf_id == vf_id) {
32 			struct ice_vf *found;
33 
34 			if (kref_get_unless_zero(&vf->refcnt))
35 				found = vf;
36 			else
37 				found = NULL;
38 
39 			rcu_read_unlock();
40 			return found;
41 		}
42 	}
43 	rcu_read_unlock();
44 
45 	return NULL;
46 }
47 
48 /**
49  * ice_release_vf - Release VF associated with a refcount
50  * @ref: the kref decremented to zero
51  *
52  * Callback function for kref_put to release a VF once its reference count has
53  * hit zero.
54  */
55 static void ice_release_vf(struct kref *ref)
56 {
57 	struct ice_vf *vf = container_of(ref, struct ice_vf, refcnt);
58 
59 	pci_dev_put(vf->vfdev);
60 
61 	vf->vf_ops->free(vf);
62 }
63 
64 /**
65  * ice_put_vf - Release a reference to a VF
66  * @vf: the VF structure to decrease reference count on
67  *
68  * Decrease the reference count for a VF, and free the entry if it is no
69  * longer in use.
70  *
71  * This must be called after ice_get_vf_by_id() once the reference to the VF
72  * structure is no longer used. Otherwise, the VF structure will never be
73  * freed.
74  */
75 void ice_put_vf(struct ice_vf *vf)
76 {
77 	kref_put(&vf->refcnt, ice_release_vf);
78 }
79 
80 /**
81  * ice_has_vfs - Return true if the PF has any associated VFs
82  * @pf: the PF private structure
83  *
84  * Return whether or not the PF has any allocated VFs.
85  *
86  * Note that this function only guarantees that there are no VFs at the point
87  * of calling it. It does not guarantee that no more VFs will be added.
88  */
89 bool ice_has_vfs(struct ice_pf *pf)
90 {
91 	/* A simple check that the hash table is not empty does not require
92 	 * the mutex or rcu_read_lock.
93 	 */
94 	return !hash_empty(pf->vfs.table);
95 }
96 
97 /**
98  * ice_get_num_vfs - Get number of allocated VFs
99  * @pf: the PF private structure
100  *
101  * Return the total number of allocated VFs. NOTE: VF IDs are not guaranteed
102  * to be contiguous. Do not assume that a VF ID is guaranteed to be less than
103  * the output of this function.
104  */
105 u16 ice_get_num_vfs(struct ice_pf *pf)
106 {
107 	struct ice_vf *vf;
108 	unsigned int bkt;
109 	u16 num_vfs = 0;
110 
111 	rcu_read_lock();
112 	ice_for_each_vf_rcu(pf, bkt, vf)
113 		num_vfs++;
114 	rcu_read_unlock();
115 
116 	return num_vfs;
117 }
118 
119 /**
120  * ice_get_vf_vsi - get VF's VSI based on the stored index
121  * @vf: VF used to get VSI
122  */
123 struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
124 {
125 	if (vf->lan_vsi_idx == ICE_NO_VSI)
126 		return NULL;
127 
128 	return vf->pf->vsi[vf->lan_vsi_idx];
129 }
130 
131 /**
132  * ice_is_vf_disabled
133  * @vf: pointer to the VF info
134  *
135  * If the PF has been disabled, there is no need resetting VF until PF is
136  * active again. Similarly, if the VF has been disabled, this means something
137  * else is resetting the VF, so we shouldn't continue.
138  *
139  * Returns true if the caller should consider the VF as disabled whether
140  * because that single VF is explicitly disabled or because the PF is
141  * currently disabled.
142  */
143 bool ice_is_vf_disabled(struct ice_vf *vf)
144 {
145 	struct ice_pf *pf = vf->pf;
146 
147 	return (test_bit(ICE_VF_DIS, pf->state) ||
148 		test_bit(ICE_VF_STATE_DIS, vf->vf_states));
149 }
150 
151 /**
152  * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset
153  * @vf: The VF being resseting
154  *
155  * The max poll time is about ~800ms, which is about the maximum time it takes
156  * for a VF to be reset and/or a VF driver to be removed.
157  */
158 static void ice_wait_on_vf_reset(struct ice_vf *vf)
159 {
160 	int i;
161 
162 	for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) {
163 		if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
164 			break;
165 		msleep(ICE_MAX_VF_RESET_SLEEP_MS);
166 	}
167 }
168 
169 /**
170  * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried
171  * @vf: VF to check if it's ready to be configured/queried
172  *
173  * The purpose of this function is to make sure the VF is not in reset, not
174  * disabled, and initialized so it can be configured and/or queried by a host
175  * administrator.
176  */
177 int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
178 {
179 	ice_wait_on_vf_reset(vf);
180 
181 	if (ice_is_vf_disabled(vf))
182 		return -EINVAL;
183 
184 	if (ice_check_vf_init(vf))
185 		return -EBUSY;
186 
187 	return 0;
188 }
189 
190 /**
191  * ice_trigger_vf_reset - Reset a VF on HW
192  * @vf: pointer to the VF structure
193  * @is_vflr: true if VFLR was issued, false if not
194  * @is_pfr: true if the reset was triggered due to a previous PFR
195  *
196  * Trigger hardware to start a reset for a particular VF. Expects the caller
197  * to wait the proper amount of time to allow hardware to reset the VF before
198  * it cleans up and restores VF functionality.
199  */
200 static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
201 {
202 	/* Inform VF that it is no longer active, as a warning */
203 	clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
204 
205 	/* Disable VF's configuration API during reset. The flag is re-enabled
206 	 * when it's safe again to access VF's VSI.
207 	 */
208 	clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
209 
210 	/* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver
211 	 * needs to clear them in the case of VFR/VFLR. If this is done for
212 	 * PFR, it can mess up VF resets because the VF driver may already
213 	 * have started cleanup by the time we get here.
214 	 */
215 	if (!is_pfr)
216 		vf->vf_ops->clear_mbx_register(vf);
217 
218 	vf->vf_ops->trigger_reset_register(vf, is_vflr);
219 }
220 
221 static void ice_vf_clear_counters(struct ice_vf *vf)
222 {
223 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
224 
225 	if (vsi)
226 		vsi->num_vlan = 0;
227 
228 	vf->num_mac = 0;
229 	vf->num_mac_lldp = 0;
230 	memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
231 	memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
232 }
233 
234 /**
235  * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild
236  * @vf: VF to perform pre VSI rebuild tasks
237  *
238  * These tasks are items that don't need to be amortized since they are most
239  * likely called in a for loop with all VF(s) in the reset_all_vfs() case.
240  */
241 static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
242 {
243 	/* Close any IRQ mapping now */
244 	if (vf->vf_ops->irq_close)
245 		vf->vf_ops->irq_close(vf);
246 
247 	ice_vf_clear_counters(vf);
248 	vf->vf_ops->clear_reset_trigger(vf);
249 }
250 
251 /**
252  * ice_vf_reconfig_vsi - Reconfigure a VF VSI with the device
253  * @vf: VF to reconfigure the VSI for
254  *
255  * This is called when a single VF is being reset (i.e. VVF, VFLR, host VF
256  * configuration change, etc).
257  *
258  * It brings the VSI down and then reconfigures it with the hardware.
259  */
260 static int ice_vf_reconfig_vsi(struct ice_vf *vf)
261 {
262 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
263 	struct ice_pf *pf = vf->pf;
264 	int err;
265 
266 	if (WARN_ON(!vsi))
267 		return -EINVAL;
268 
269 	vsi->flags = ICE_VSI_FLAG_NO_INIT;
270 
271 	ice_vsi_decfg(vsi);
272 	ice_fltr_remove_all(vsi);
273 
274 	err = ice_vsi_cfg(vsi);
275 	if (err) {
276 		dev_err(ice_pf_to_dev(pf),
277 			"Failed to reconfigure the VF%u's VSI, error %d\n",
278 			vf->vf_id, err);
279 		return err;
280 	}
281 
282 	return 0;
283 }
284 
285 /**
286  * ice_vf_rebuild_vsi - rebuild the VF's VSI
287  * @vf: VF to rebuild the VSI for
288  *
289  * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the
290  * host, PFR, CORER, etc.).
291  *
292  * It reprograms the VSI configuration back into hardware.
293  */
294 static int ice_vf_rebuild_vsi(struct ice_vf *vf)
295 {
296 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
297 	struct ice_pf *pf = vf->pf;
298 
299 	if (WARN_ON(!vsi))
300 		return -EINVAL;
301 
302 	if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT)) {
303 		dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n",
304 			vf->vf_id);
305 		return -EIO;
306 	}
307 	/* vsi->idx will remain the same in this case so don't update
308 	 * vf->lan_vsi_idx
309 	 */
310 	vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
311 
312 	return 0;
313 }
314 
315 /**
316  * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
317  * @vf: VF to add MAC filters for
318  * @vsi: Pointer to VSI
319  *
320  * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
321  * always re-adds either a VLAN 0 or port VLAN based filter after reset.
322  */
323 static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
324 {
325 	struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
326 	struct device *dev = ice_pf_to_dev(vf->pf);
327 	int err;
328 
329 	if (ice_vf_is_port_vlan_ena(vf)) {
330 		err = vlan_ops->set_port_vlan(vsi, &vf->port_vlan_info);
331 		if (err) {
332 			dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n",
333 				vf->vf_id, err);
334 			return err;
335 		}
336 
337 		err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info);
338 	} else {
339 		/* clear possible previous port vlan config */
340 		err = ice_vsi_clear_port_vlan(vsi);
341 		if (err) {
342 			dev_err(dev, "failed to clear port VLAN via VSI parameters for VF %u, error %d\n",
343 				vf->vf_id, err);
344 			return err;
345 		}
346 		err = ice_vsi_add_vlan_zero(vsi);
347 	}
348 
349 	if (err) {
350 		dev_err(dev, "failed to add VLAN %u filter for VF %u during VF rebuild, error %d\n",
351 			ice_vf_is_port_vlan_ena(vf) ?
352 			ice_vf_get_port_vlan_id(vf) : 0, vf->vf_id, err);
353 		return err;
354 	}
355 
356 	err = vlan_ops->ena_rx_filtering(vsi);
357 	if (err)
358 		dev_warn(dev, "failed to enable Rx VLAN filtering for VF %d VSI %d during VF rebuild, error %d\n",
359 			 vf->vf_id, vsi->idx, err);
360 
361 	return 0;
362 }
363 
364 /**
365  * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration
366  * @vf: VF to re-apply the configuration for
367  *
368  * Called after a VF VSI has been re-added/rebuild during reset. The PF driver
369  * needs to re-apply the host configured Tx rate limiting configuration.
370  */
371 static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf)
372 {
373 	struct device *dev = ice_pf_to_dev(vf->pf);
374 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
375 	int err;
376 
377 	if (WARN_ON(!vsi))
378 		return -EINVAL;
379 
380 	if (vf->min_tx_rate) {
381 		err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000);
382 		if (err) {
383 			dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n",
384 				vf->min_tx_rate, vf->vf_id, err);
385 			return err;
386 		}
387 	}
388 
389 	if (vf->max_tx_rate) {
390 		err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000);
391 		if (err) {
392 			dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n",
393 				vf->max_tx_rate, vf->vf_id, err);
394 			return err;
395 		}
396 	}
397 
398 	return 0;
399 }
400 
401 /**
402  * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value
403  * @vf: VF to configure trust setting for
404  */
405 static void ice_vf_set_host_trust_cfg(struct ice_vf *vf)
406 {
407 	assign_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps, vf->trusted);
408 }
409 
410 /**
411  * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA
412  * @vf: VF to add MAC filters for
413  *
414  * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
415  * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset.
416  */
417 static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
418 {
419 	struct device *dev = ice_pf_to_dev(vf->pf);
420 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
421 	u8 broadcast[ETH_ALEN];
422 	int status;
423 
424 	if (WARN_ON(!vsi))
425 		return -EINVAL;
426 
427 	if (ice_is_eswitch_mode_switchdev(vf->pf))
428 		return 0;
429 
430 	eth_broadcast_addr(broadcast);
431 	status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
432 	if (status) {
433 		dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n",
434 			vf->vf_id, status);
435 		return status;
436 	}
437 
438 	vf->num_mac++;
439 
440 	if (is_valid_ether_addr(vf->hw_lan_addr)) {
441 		status = ice_fltr_add_mac(vsi, vf->hw_lan_addr,
442 					  ICE_FWD_TO_VSI);
443 		if (status) {
444 			dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n",
445 				&vf->hw_lan_addr[0], vf->vf_id,
446 				status);
447 			return status;
448 		}
449 		vf->num_mac++;
450 
451 		ether_addr_copy(vf->dev_lan_addr, vf->hw_lan_addr);
452 	}
453 
454 	return 0;
455 }
456 
457 /**
458  * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config
459  * @vsi: Pointer to VSI
460  *
461  * This function moves VSI into corresponding scheduler aggregator node
462  * based on cached value of "aggregator node info" per VSI
463  */
464 static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi)
465 {
466 	struct ice_pf *pf = vsi->back;
467 	struct device *dev;
468 	int status;
469 
470 	if (!vsi->agg_node)
471 		return;
472 
473 	dev = ice_pf_to_dev(pf);
474 	if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
475 		dev_dbg(dev,
476 			"agg_id %u already has reached max_num_vsis %u\n",
477 			vsi->agg_node->agg_id, vsi->agg_node->num_vsis);
478 		return;
479 	}
480 
481 	status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id,
482 				     vsi->idx, vsi->tc_cfg.ena_tc);
483 	if (status)
484 		dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node",
485 			vsi->idx, vsi->agg_node->agg_id);
486 	else
487 		vsi->agg_node->num_vsis++;
488 }
489 
490 /**
491  * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset
492  * @vf: VF to rebuild host configuration on
493  */
494 static void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
495 {
496 	struct device *dev = ice_pf_to_dev(vf->pf);
497 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
498 
499 	if (WARN_ON(!vsi))
500 		return;
501 
502 	ice_vf_set_host_trust_cfg(vf);
503 
504 	if (ice_vf_rebuild_host_mac_cfg(vf))
505 		dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n",
506 			vf->vf_id);
507 
508 	if (ice_vf_rebuild_host_vlan_cfg(vf, vsi))
509 		dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
510 			vf->vf_id);
511 
512 	if (ice_vf_rebuild_host_tx_rate_cfg(vf))
513 		dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n",
514 			vf->vf_id);
515 
516 	if (ice_vsi_apply_spoofchk(vsi, vf->spoofchk))
517 		dev_err(dev, "failed to rebuild spoofchk configuration for VF %d\n",
518 			vf->vf_id);
519 
520 	/* rebuild aggregator node config for main VF VSI */
521 	ice_vf_rebuild_aggregator_node_cfg(vsi);
522 }
523 
524 /**
525  * ice_set_vf_state_qs_dis - Set VF queues state to disabled
526  * @vf: pointer to the VF structure
527  */
528 static void ice_set_vf_state_qs_dis(struct ice_vf *vf)
529 {
530 	/* Clear Rx/Tx enabled queues flag */
531 	bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
532 	bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
533 	clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
534 }
535 
536 /**
537  * ice_vf_set_initialized - VF is ready for VIRTCHNL communication
538  * @vf: VF to set in initialized state
539  *
540  * After this function the VF will be ready to receive/handle the
541  * VIRTCHNL_OP_GET_VF_RESOURCES message
542  */
543 static void ice_vf_set_initialized(struct ice_vf *vf)
544 {
545 	ice_set_vf_state_qs_dis(vf);
546 	clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
547 	clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
548 	clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
549 	set_bit(ICE_VF_STATE_INIT, vf->vf_states);
550 	memset(&vf->vlan_v2_caps, 0, sizeof(vf->vlan_v2_caps));
551 }
552 
553 /**
554  * ice_vf_post_vsi_rebuild - Reset tasks that occur after VSI rebuild
555  * @vf: the VF being reset
556  *
557  * Perform reset tasks which must occur after the VSI has been re-created or
558  * rebuilt during a VF reset.
559  */
560 static void ice_vf_post_vsi_rebuild(struct ice_vf *vf)
561 {
562 	ice_vf_rebuild_host_cfg(vf);
563 	ice_vf_set_initialized(vf);
564 
565 	vf->vf_ops->post_vsi_rebuild(vf);
566 }
567 
568 /**
569  * ice_is_any_vf_in_unicast_promisc - check if any VF(s)
570  * are in unicast promiscuous mode
571  * @pf: PF structure for accessing VF(s)
572  *
573  * Return false if no VF(s) are in unicast promiscuous mode,
574  * else return true
575  */
576 bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf)
577 {
578 	bool is_vf_promisc = false;
579 	struct ice_vf *vf;
580 	unsigned int bkt;
581 
582 	rcu_read_lock();
583 	ice_for_each_vf_rcu(pf, bkt, vf) {
584 		/* found a VF that has promiscuous mode configured */
585 		if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) {
586 			is_vf_promisc = true;
587 			break;
588 		}
589 	}
590 	rcu_read_unlock();
591 
592 	return is_vf_promisc;
593 }
594 
595 /**
596  * ice_vf_get_promisc_masks - Calculate masks for promiscuous modes
597  * @vf: the VF pointer
598  * @vsi: the VSI to configure
599  * @ucast_m: promiscuous mask to apply to unicast
600  * @mcast_m: promiscuous mask to apply to multicast
601  *
602  * Decide which mask should be used for unicast and multicast filter,
603  * based on presence of VLANs
604  */
605 void
606 ice_vf_get_promisc_masks(struct ice_vf *vf, struct ice_vsi *vsi,
607 			 u8 *ucast_m, u8 *mcast_m)
608 {
609 	if (ice_vf_is_port_vlan_ena(vf) ||
610 	    ice_vsi_has_non_zero_vlans(vsi)) {
611 		*mcast_m = ICE_MCAST_VLAN_PROMISC_BITS;
612 		*ucast_m = ICE_UCAST_VLAN_PROMISC_BITS;
613 	} else {
614 		*mcast_m = ICE_MCAST_PROMISC_BITS;
615 		*ucast_m = ICE_UCAST_PROMISC_BITS;
616 	}
617 }
618 
619 /**
620  * ice_vf_clear_all_promisc_modes - Clear promisc/allmulticast on VF VSI
621  * @vf: the VF pointer
622  * @vsi: the VSI to configure
623  *
624  * Clear all promiscuous/allmulticast filters for a VF
625  */
626 static int
627 ice_vf_clear_all_promisc_modes(struct ice_vf *vf, struct ice_vsi *vsi)
628 {
629 	struct ice_pf *pf = vf->pf;
630 	u8 ucast_m, mcast_m;
631 	int ret = 0;
632 
633 	ice_vf_get_promisc_masks(vf, vsi, &ucast_m, &mcast_m);
634 	if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) {
635 		if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
636 			if (ice_is_dflt_vsi_in_use(vsi->port_info))
637 				ret = ice_clear_dflt_vsi(vsi);
638 		} else {
639 			ret = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m);
640 		}
641 
642 		if (ret) {
643 			dev_err(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode failed\n");
644 		} else {
645 			clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
646 			dev_info(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode succeeded\n");
647 		}
648 	}
649 
650 	if (test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
651 		ret = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m);
652 		if (ret) {
653 			dev_err(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode failed\n");
654 		} else {
655 			clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
656 			dev_info(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode succeeded\n");
657 		}
658 	}
659 	return ret;
660 }
661 
662 /**
663  * ice_vf_set_vsi_promisc - Enable promiscuous mode for a VF VSI
664  * @vf: the VF to configure
665  * @vsi: the VF's VSI
666  * @promisc_m: the promiscuous mode to enable
667  */
668 int
669 ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
670 {
671 	struct ice_hw *hw = &vsi->back->hw;
672 	int status;
673 
674 	if (ice_vf_is_port_vlan_ena(vf))
675 		status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m,
676 						  ice_vf_get_port_vlan_id(vf));
677 	else if (ice_vsi_has_non_zero_vlans(vsi))
678 		status = ice_fltr_set_vlan_vsi_promisc(hw, vsi, promisc_m);
679 	else
680 		status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 0);
681 
682 	if (status && status != -EEXIST) {
683 		dev_err(ice_pf_to_dev(vsi->back), "enable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
684 			vf->vf_id, status);
685 		return status;
686 	}
687 
688 	return 0;
689 }
690 
691 /**
692  * ice_vf_clear_vsi_promisc - Disable promiscuous mode for a VF VSI
693  * @vf: the VF to configure
694  * @vsi: the VF's VSI
695  * @promisc_m: the promiscuous mode to disable
696  */
697 int
698 ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
699 {
700 	struct ice_hw *hw = &vsi->back->hw;
701 	int status;
702 
703 	if (ice_vf_is_port_vlan_ena(vf))
704 		status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m,
705 						    ice_vf_get_port_vlan_id(vf));
706 	else if (ice_vsi_has_non_zero_vlans(vsi))
707 		status = ice_fltr_clear_vlan_vsi_promisc(hw, vsi, promisc_m);
708 	else
709 		status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 0);
710 
711 	if (status && status != -ENOENT) {
712 		dev_err(ice_pf_to_dev(vsi->back), "disable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
713 			vf->vf_id, status);
714 		return status;
715 	}
716 
717 	return 0;
718 }
719 
720 /**
721  * ice_reset_vf_mbx_cnt - reset VF mailbox message count
722  * @vf: pointer to the VF structure
723  *
724  * This function clears the VF mailbox message count, and should be called on
725  * VF reset.
726  */
727 static void ice_reset_vf_mbx_cnt(struct ice_vf *vf)
728 {
729 	struct ice_pf *pf = vf->pf;
730 
731 	if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
732 		ice_mbx_vf_clear_cnt_e830(&pf->hw, vf->vf_id);
733 	else
734 		ice_mbx_clear_malvf(&vf->mbx_info);
735 }
736 
737 /**
738  * ice_reset_all_vfs - reset all allocated VFs in one go
739  * @pf: pointer to the PF structure
740  *
741  * Reset all VFs at once, in response to a PF or other device reset.
742  *
743  * First, tell the hardware to reset each VF, then do all the waiting in one
744  * chunk, and finally finish restoring each VF after the wait. This is useful
745  * during PF routines which need to reset all VFs, as otherwise it must perform
746  * these resets in a serialized fashion.
747  */
748 void ice_reset_all_vfs(struct ice_pf *pf)
749 {
750 	struct device *dev = ice_pf_to_dev(pf);
751 	struct ice_hw *hw = &pf->hw;
752 	struct ice_vf *vf;
753 	unsigned int bkt;
754 
755 	/* If we don't have any VFs, then there is nothing to reset */
756 	if (!ice_has_vfs(pf))
757 		return;
758 
759 	mutex_lock(&pf->vfs.table_lock);
760 
761 	/* clear all malicious info if the VFs are getting reset */
762 	ice_for_each_vf(pf, bkt, vf)
763 		ice_reset_vf_mbx_cnt(vf);
764 
765 	/* If VFs have been disabled, there is no need to reset */
766 	if (test_and_set_bit(ICE_VF_DIS, pf->state)) {
767 		mutex_unlock(&pf->vfs.table_lock);
768 		return;
769 	}
770 
771 	/* Begin reset on all VFs at once */
772 	ice_for_each_vf(pf, bkt, vf)
773 		ice_trigger_vf_reset(vf, true, true);
774 
775 	/* HW requires some time to make sure it can flush the FIFO for a VF
776 	 * when it resets it. Now that we've triggered all of the VFs, iterate
777 	 * the table again and wait for each VF to complete.
778 	 */
779 	ice_for_each_vf(pf, bkt, vf) {
780 		if (!vf->vf_ops->poll_reset_status(vf)) {
781 			/* Display a warning if at least one VF didn't manage
782 			 * to reset in time, but continue on with the
783 			 * operation.
784 			 */
785 			dev_warn(dev, "VF %u reset check timeout\n", vf->vf_id);
786 			break;
787 		}
788 	}
789 
790 	/* free VF resources to begin resetting the VSI state */
791 	ice_for_each_vf(pf, bkt, vf) {
792 		mutex_lock(&vf->cfg_lock);
793 
794 		ice_eswitch_detach_vf(pf, vf);
795 		vf->driver_caps = 0;
796 		ice_vc_set_default_allowlist(vf);
797 
798 		ice_vf_fdir_exit(vf);
799 		ice_vf_fdir_init(vf);
800 		/* clean VF control VSI when resetting VFs since it should be
801 		 * setup only when VF creates its first FDIR rule.
802 		 */
803 		if (vf->ctrl_vsi_idx != ICE_NO_VSI)
804 			ice_vf_ctrl_invalidate_vsi(vf);
805 
806 		ice_vf_pre_vsi_rebuild(vf);
807 		ice_vf_rebuild_vsi(vf);
808 		ice_vf_post_vsi_rebuild(vf);
809 
810 		ice_eswitch_attach_vf(pf, vf);
811 
812 		mutex_unlock(&vf->cfg_lock);
813 	}
814 
815 	ice_flush(hw);
816 	clear_bit(ICE_VF_DIS, pf->state);
817 
818 	mutex_unlock(&pf->vfs.table_lock);
819 }
820 
821 /**
822  * ice_notify_vf_reset - Notify VF of a reset event
823  * @vf: pointer to the VF structure
824  */
825 static void ice_notify_vf_reset(struct ice_vf *vf)
826 {
827 	struct ice_hw *hw = &vf->pf->hw;
828 	struct virtchnl_pf_event pfe;
829 
830 	/* Bail out if VF is in disabled state, neither initialized, nor active
831 	 * state - otherwise proceed with notifications
832 	 */
833 	if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
834 	     !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
835 	    test_bit(ICE_VF_STATE_DIS, vf->vf_states))
836 		return;
837 
838 	pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
839 	pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
840 	ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
841 			      VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
842 			      NULL);
843 }
844 
845 /**
846  * ice_reset_vf - Reset a particular VF
847  * @vf: pointer to the VF structure
848  * @flags: flags controlling behavior of the reset
849  *
850  * Flags:
851  *   ICE_VF_RESET_VFLR - Indicates a reset is due to VFLR event
852  *   ICE_VF_RESET_NOTIFY - Send VF a notification prior to reset
853  *   ICE_VF_RESET_LOCK - Acquire VF cfg_lock before resetting
854  *
855  * Returns 0 if the VF is currently in reset, if resets are disabled, or if
856  * the VF resets successfully. Returns an error code if the VF fails to
857  * rebuild.
858  */
859 int ice_reset_vf(struct ice_vf *vf, u32 flags)
860 {
861 	struct ice_pf *pf = vf->pf;
862 	struct ice_lag *lag;
863 	struct ice_vsi *vsi;
864 	u8 act_prt, pri_prt;
865 	struct device *dev;
866 	int err = 0;
867 	bool rsd;
868 
869 	dev = ice_pf_to_dev(pf);
870 	act_prt = ICE_LAG_INVALID_PORT;
871 	pri_prt = pf->hw.port_info->lport;
872 
873 	if (flags & ICE_VF_RESET_NOTIFY)
874 		ice_notify_vf_reset(vf);
875 
876 	if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
877 		dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n",
878 			vf->vf_id);
879 		return 0;
880 	}
881 
882 	if (flags & ICE_VF_RESET_LOCK)
883 		mutex_lock(&vf->cfg_lock);
884 	else
885 		lockdep_assert_held(&vf->cfg_lock);
886 
887 	lag = pf->lag;
888 	mutex_lock(&pf->lag_mutex);
889 	if (lag && lag->bonded && lag->primary) {
890 		act_prt = lag->active_port;
891 		if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT &&
892 		    lag->upper_netdev)
893 			ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
894 		else
895 			act_prt = ICE_LAG_INVALID_PORT;
896 	}
897 
898 	if (ice_is_vf_disabled(vf)) {
899 		vsi = ice_get_vf_vsi(vf);
900 		if (!vsi) {
901 			dev_dbg(dev, "VF is already removed\n");
902 			err = -EINVAL;
903 			goto out_unlock;
904 		}
905 		ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
906 
907 		if (ice_vsi_is_rx_queue_active(vsi))
908 			ice_vsi_stop_all_rx_rings(vsi);
909 
910 		dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
911 			vf->vf_id);
912 		goto out_unlock;
913 	}
914 
915 	/* Set VF disable bit state here, before triggering reset */
916 	set_bit(ICE_VF_STATE_DIS, vf->vf_states);
917 	ice_trigger_vf_reset(vf, flags & ICE_VF_RESET_VFLR, false);
918 
919 	vsi = ice_get_vf_vsi(vf);
920 	if (WARN_ON(!vsi)) {
921 		err = -EIO;
922 		goto out_unlock;
923 	}
924 
925 	ice_dis_vf_qs(vf);
926 
927 	/* Call Disable LAN Tx queue AQ whether or not queues are
928 	 * enabled. This is needed for successful completion of VFR.
929 	 */
930 	ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
931 			NULL, vf->vf_ops->reset_type, vf->vf_id, NULL);
932 
933 	/* poll VPGEN_VFRSTAT reg to make sure
934 	 * that reset is complete
935 	 */
936 	rsd = vf->vf_ops->poll_reset_status(vf);
937 
938 	/* Display a warning if VF didn't manage to reset in time, but need to
939 	 * continue on with the operation.
940 	 */
941 	if (!rsd)
942 		dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id);
943 
944 	vf->driver_caps = 0;
945 	ice_vc_set_default_allowlist(vf);
946 
947 	/* disable promiscuous modes in case they were enabled
948 	 * ignore any error if disabling process failed
949 	 */
950 	ice_vf_clear_all_promisc_modes(vf, vsi);
951 
952 	ice_vf_fdir_exit(vf);
953 	ice_vf_fdir_init(vf);
954 	/* clean VF control VSI when resetting VF since it should be setup
955 	 * only when VF creates its first FDIR rule.
956 	 */
957 	if (vf->ctrl_vsi_idx != ICE_NO_VSI)
958 		ice_vf_ctrl_vsi_release(vf);
959 
960 	ice_vf_pre_vsi_rebuild(vf);
961 
962 	if (ice_vf_reconfig_vsi(vf)) {
963 		dev_err(dev, "Failed to release and setup the VF%u's VSI\n",
964 			vf->vf_id);
965 		err = -EFAULT;
966 		goto out_unlock;
967 	}
968 
969 	ice_vf_post_vsi_rebuild(vf);
970 	vsi = ice_get_vf_vsi(vf);
971 	if (WARN_ON(!vsi)) {
972 		err = -EINVAL;
973 		goto out_unlock;
974 	}
975 
976 	ice_eswitch_update_repr(&vf->repr_id, vsi);
977 
978 	/* if the VF has been reset allow it to come up again */
979 	ice_reset_vf_mbx_cnt(vf);
980 
981 out_unlock:
982 	if (lag && lag->bonded && lag->primary &&
983 	    act_prt != ICE_LAG_INVALID_PORT)
984 		ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
985 	mutex_unlock(&pf->lag_mutex);
986 
987 	if (flags & ICE_VF_RESET_LOCK)
988 		mutex_unlock(&vf->cfg_lock);
989 
990 	return err;
991 }
992 
993 /**
994  * ice_set_vf_state_dis - Set VF state to disabled
995  * @vf: pointer to the VF structure
996  */
997 void ice_set_vf_state_dis(struct ice_vf *vf)
998 {
999 	ice_set_vf_state_qs_dis(vf);
1000 	vf->vf_ops->clear_reset_state(vf);
1001 }
1002 
1003 /* Private functions only accessed from other virtualization files */
1004 
1005 /**
1006  * ice_initialize_vf_entry - Initialize a VF entry
1007  * @vf: pointer to the VF structure
1008  */
1009 void ice_initialize_vf_entry(struct ice_vf *vf)
1010 {
1011 	struct ice_pf *pf = vf->pf;
1012 	struct ice_vfs *vfs;
1013 
1014 	vfs = &pf->vfs;
1015 
1016 	/* assign default capabilities */
1017 	vf->spoofchk = true;
1018 	ice_vc_set_default_allowlist(vf);
1019 	ice_virtchnl_set_dflt_ops(vf);
1020 
1021 	/* set default number of MSI-X */
1022 	vf->num_msix = vfs->num_msix_per;
1023 	vf->num_vf_qs = vfs->num_qps_per;
1024 
1025 	/* set default RSS hash configuration */
1026 	vf->rss_hashcfg = ICE_DEFAULT_RSS_HASHCFG;
1027 
1028 	/* ctrl_vsi_idx will be set to a valid value only when iAVF
1029 	 * creates its first fdir rule.
1030 	 */
1031 	ice_vf_ctrl_invalidate_vsi(vf);
1032 	ice_vf_fdir_init(vf);
1033 
1034 	/* Initialize mailbox info for this VF */
1035 	if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
1036 		ice_mbx_vf_clear_cnt_e830(&pf->hw, vf->vf_id);
1037 	else
1038 		ice_mbx_init_vf_info(&pf->hw, &vf->mbx_info);
1039 
1040 	mutex_init(&vf->cfg_lock);
1041 }
1042 
1043 void ice_deinitialize_vf_entry(struct ice_vf *vf)
1044 {
1045 	struct ice_pf *pf = vf->pf;
1046 
1047 	if (!ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
1048 		list_del(&vf->mbx_info.list_entry);
1049 }
1050 
1051 /**
1052  * ice_dis_vf_qs - Disable the VF queues
1053  * @vf: pointer to the VF structure
1054  */
1055 void ice_dis_vf_qs(struct ice_vf *vf)
1056 {
1057 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1058 
1059 	if (WARN_ON(!vsi))
1060 		return;
1061 
1062 	ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
1063 	ice_vsi_stop_all_rx_rings(vsi);
1064 	ice_set_vf_state_qs_dis(vf);
1065 }
1066 
1067 /**
1068  * ice_err_to_virt_err - translate errors for VF return code
1069  * @err: error return code
1070  */
1071 enum virtchnl_status_code ice_err_to_virt_err(int err)
1072 {
1073 	switch (err) {
1074 	case 0:
1075 		return VIRTCHNL_STATUS_SUCCESS;
1076 	case -EINVAL:
1077 	case -ENODEV:
1078 		return VIRTCHNL_STATUS_ERR_PARAM;
1079 	case -ENOMEM:
1080 		return VIRTCHNL_STATUS_ERR_NO_MEMORY;
1081 	case -EALREADY:
1082 	case -EBUSY:
1083 	case -EIO:
1084 	case -ENOSPC:
1085 		return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1086 	default:
1087 		return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
1088 	}
1089 }
1090 
1091 /**
1092  * ice_check_vf_init - helper to check if VF init complete
1093  * @vf: the pointer to the VF to check
1094  */
1095 int ice_check_vf_init(struct ice_vf *vf)
1096 {
1097 	struct ice_pf *pf = vf->pf;
1098 
1099 	if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
1100 		dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n",
1101 			vf->vf_id);
1102 		return -EBUSY;
1103 	}
1104 	return 0;
1105 }
1106 
1107 /**
1108  * ice_vf_get_port_info - Get the VF's port info structure
1109  * @vf: VF used to get the port info structure for
1110  */
1111 struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf)
1112 {
1113 	return vf->pf->hw.port_info;
1114 }
1115 
1116 /**
1117  * ice_cfg_mac_antispoof - Configure MAC antispoof checking behavior
1118  * @vsi: the VSI to configure
1119  * @enable: whether to enable or disable the spoof checking
1120  *
1121  * Configure a VSI to enable (or disable) spoof checking behavior.
1122  */
1123 static int ice_cfg_mac_antispoof(struct ice_vsi *vsi, bool enable)
1124 {
1125 	struct ice_vsi_ctx *ctx;
1126 	int err;
1127 
1128 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1129 	if (!ctx)
1130 		return -ENOMEM;
1131 
1132 	ctx->info.sec_flags = vsi->info.sec_flags;
1133 	ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
1134 
1135 	if (enable)
1136 		ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
1137 	else
1138 		ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
1139 
1140 	err = ice_update_vsi(&vsi->back->hw, vsi->idx, ctx, NULL);
1141 	if (err)
1142 		dev_err(ice_pf_to_dev(vsi->back), "Failed to configure Tx MAC anti-spoof %s for VSI %d, error %d\n",
1143 			enable ? "ON" : "OFF", vsi->vsi_num, err);
1144 	else
1145 		vsi->info.sec_flags = ctx->info.sec_flags;
1146 
1147 	kfree(ctx);
1148 
1149 	return err;
1150 }
1151 
1152 /**
1153  * ice_vsi_ena_spoofchk - enable Tx spoof checking for this VSI
1154  * @vsi: VSI to enable Tx spoof checking for
1155  */
1156 static int ice_vsi_ena_spoofchk(struct ice_vsi *vsi)
1157 {
1158 	struct ice_vsi_vlan_ops *vlan_ops;
1159 	int err = 0;
1160 
1161 	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
1162 
1163 	/* Allow VF with VLAN 0 only to send all tagged traffic */
1164 	if (vsi->type != ICE_VSI_VF || ice_vsi_has_non_zero_vlans(vsi)) {
1165 		err = vlan_ops->ena_tx_filtering(vsi);
1166 		if (err)
1167 			return err;
1168 	}
1169 
1170 	return ice_cfg_mac_antispoof(vsi, true);
1171 }
1172 
1173 /**
1174  * ice_vsi_dis_spoofchk - disable Tx spoof checking for this VSI
1175  * @vsi: VSI to disable Tx spoof checking for
1176  */
1177 static int ice_vsi_dis_spoofchk(struct ice_vsi *vsi)
1178 {
1179 	struct ice_vsi_vlan_ops *vlan_ops;
1180 	int err;
1181 
1182 	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
1183 
1184 	err = vlan_ops->dis_tx_filtering(vsi);
1185 	if (err)
1186 		return err;
1187 
1188 	return ice_cfg_mac_antispoof(vsi, false);
1189 }
1190 
1191 /**
1192  * ice_vsi_apply_spoofchk - Apply Tx spoof checking setting to a VSI
1193  * @vsi: VSI associated to the VF
1194  * @enable: whether to enable or disable the spoof checking
1195  */
1196 int ice_vsi_apply_spoofchk(struct ice_vsi *vsi, bool enable)
1197 {
1198 	int err;
1199 
1200 	if (enable)
1201 		err = ice_vsi_ena_spoofchk(vsi);
1202 	else
1203 		err = ice_vsi_dis_spoofchk(vsi);
1204 
1205 	return err;
1206 }
1207 
1208 /**
1209  * ice_is_vf_trusted
1210  * @vf: pointer to the VF info
1211  */
1212 bool ice_is_vf_trusted(struct ice_vf *vf)
1213 {
1214 	return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1215 }
1216 
1217 /**
1218  * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled
1219  * @vf: the VF to check
1220  *
1221  * Returns true if the VF has no Rx and no Tx queues enabled and returns false
1222  * otherwise
1223  */
1224 bool ice_vf_has_no_qs_ena(struct ice_vf *vf)
1225 {
1226 	return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) &&
1227 		!bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF));
1228 }
1229 
1230 /**
1231  * ice_is_vf_link_up - check if the VF's link is up
1232  * @vf: VF to check if link is up
1233  */
1234 bool ice_is_vf_link_up(struct ice_vf *vf)
1235 {
1236 	struct ice_port_info *pi = ice_vf_get_port_info(vf);
1237 
1238 	if (ice_check_vf_init(vf))
1239 		return false;
1240 
1241 	if (ice_vf_has_no_qs_ena(vf))
1242 		return false;
1243 	else if (vf->link_forced)
1244 		return vf->link_up;
1245 	else
1246 		return pi->phy.link_info.link_info &
1247 			ICE_AQ_LINK_UP;
1248 }
1249 
1250 /**
1251  * ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove VSI access
1252  * @vf: VF that control VSI is being invalidated on
1253  */
1254 void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf)
1255 {
1256 	vf->ctrl_vsi_idx = ICE_NO_VSI;
1257 }
1258 
1259 /**
1260  * ice_vf_ctrl_vsi_release - invalidate the VF's control VSI after freeing it
1261  * @vf: VF that control VSI is being released on
1262  */
1263 void ice_vf_ctrl_vsi_release(struct ice_vf *vf)
1264 {
1265 	ice_vsi_release(vf->pf->vsi[vf->ctrl_vsi_idx]);
1266 	ice_vf_ctrl_invalidate_vsi(vf);
1267 }
1268 
1269 /**
1270  * ice_vf_ctrl_vsi_setup - Set up a VF control VSI
1271  * @vf: VF to setup control VSI for
1272  *
1273  * Returns pointer to the successfully allocated VSI struct on success,
1274  * otherwise returns NULL on failure.
1275  */
1276 struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)
1277 {
1278 	struct ice_vsi_cfg_params params = {};
1279 	struct ice_pf *pf = vf->pf;
1280 	struct ice_vsi *vsi;
1281 
1282 	params.type = ICE_VSI_CTRL;
1283 	params.port_info = ice_vf_get_port_info(vf);
1284 	params.vf = vf;
1285 	params.flags = ICE_VSI_FLAG_INIT;
1286 
1287 	vsi = ice_vsi_setup(pf, &params);
1288 	if (!vsi) {
1289 		dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n");
1290 		ice_vf_ctrl_invalidate_vsi(vf);
1291 	}
1292 
1293 	return vsi;
1294 }
1295 
1296 /**
1297  * ice_vf_init_host_cfg - Initialize host admin configuration
1298  * @vf: VF to initialize
1299  * @vsi: the VSI created at initialization
1300  *
1301  * Initialize the VF host configuration. Called during VF creation to setup
1302  * VLAN 0, add the VF VSI broadcast filter, and setup spoof checking. It
1303  * should only be called during VF creation.
1304  */
1305 int ice_vf_init_host_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
1306 {
1307 	struct ice_vsi_vlan_ops *vlan_ops;
1308 	struct ice_pf *pf = vf->pf;
1309 	u8 broadcast[ETH_ALEN];
1310 	struct device *dev;
1311 	int err;
1312 
1313 	dev = ice_pf_to_dev(pf);
1314 
1315 	err = ice_vsi_add_vlan_zero(vsi);
1316 	if (err) {
1317 		dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n",
1318 			 vf->vf_id);
1319 		return err;
1320 	}
1321 
1322 	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
1323 	err = vlan_ops->ena_rx_filtering(vsi);
1324 	if (err) {
1325 		dev_warn(dev, "Failed to enable Rx VLAN filtering for VF %d\n",
1326 			 vf->vf_id);
1327 		return err;
1328 	}
1329 
1330 	eth_broadcast_addr(broadcast);
1331 	err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
1332 	if (err) {
1333 		dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %d\n",
1334 			vf->vf_id, err);
1335 		return err;
1336 	}
1337 
1338 	vf->num_mac = 1;
1339 
1340 	err = ice_vsi_apply_spoofchk(vsi, vf->spoofchk);
1341 	if (err) {
1342 		dev_warn(dev, "Failed to initialize spoofchk setting for VF %d\n",
1343 			 vf->vf_id);
1344 		return err;
1345 	}
1346 
1347 	return 0;
1348 }
1349 
1350 /**
1351  * ice_vf_invalidate_vsi - invalidate vsi_idx to remove VSI access
1352  * @vf: VF to remove access to VSI for
1353  */
1354 void ice_vf_invalidate_vsi(struct ice_vf *vf)
1355 {
1356 	vf->lan_vsi_idx = ICE_NO_VSI;
1357 }
1358 
1359 /**
1360  * ice_vf_vsi_release - Release the VF VSI and invalidate indexes
1361  * @vf: pointer to the VF structure
1362  *
1363  * Release the VF associated with this VSI and then invalidate the VSI
1364  * indexes.
1365  */
1366 void ice_vf_vsi_release(struct ice_vf *vf)
1367 {
1368 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1369 
1370 	if (WARN_ON(!vsi))
1371 		return;
1372 
1373 	ice_vsi_release(vsi);
1374 	ice_vf_invalidate_vsi(vf);
1375 }
1376 
1377 /**
1378  * ice_get_vf_ctrl_vsi - Get first VF control VSI pointer
1379  * @pf: the PF private structure
1380  * @vsi: pointer to the VSI
1381  *
1382  * Return first found VF control VSI other than the vsi
1383  * passed by parameter. This function is used to determine
1384  * whether new resources have to be allocated for control VSI
1385  * or they can be shared with existing one.
1386  *
1387  * Return found VF control VSI pointer other itself. Return
1388  * NULL Otherwise.
1389  *
1390  */
1391 struct ice_vsi *ice_get_vf_ctrl_vsi(struct ice_pf *pf, struct ice_vsi *vsi)
1392 {
1393 	struct ice_vsi *ctrl_vsi = NULL;
1394 	struct ice_vf *vf;
1395 	unsigned int bkt;
1396 
1397 	rcu_read_lock();
1398 	ice_for_each_vf_rcu(pf, bkt, vf) {
1399 		if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) {
1400 			ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
1401 			break;
1402 		}
1403 	}
1404 
1405 	rcu_read_unlock();
1406 	return ctrl_vsi;
1407 }
1408 
1409 /**
1410  * ice_vf_update_mac_lldp_num - update the VF's number of LLDP addresses
1411  * @vf: a VF to add the address to
1412  * @vsi: the corresponding VSI
1413  * @incr: is the rule added or removed
1414  */
1415 void ice_vf_update_mac_lldp_num(struct ice_vf *vf, struct ice_vsi *vsi,
1416 				bool incr)
1417 {
1418 	bool lldp_by_fw = test_bit(ICE_FLAG_FW_LLDP_AGENT, vsi->back->flags);
1419 	bool was_ena = ice_vf_is_lldp_ena(vf) && !lldp_by_fw;
1420 	bool is_ena;
1421 
1422 	if (WARN_ON(!vsi)) {
1423 		vf->num_mac_lldp = 0;
1424 		return;
1425 	}
1426 
1427 	vf->num_mac_lldp += incr ? 1 : -1;
1428 	is_ena = ice_vf_is_lldp_ena(vf) && !lldp_by_fw;
1429 
1430 	if (was_ena != is_ena)
1431 		ice_vsi_cfg_sw_lldp(vsi, false, is_ena);
1432 }
1433