xref: /linux/drivers/net/ethernet/intel/ice/ice_vf_lib.c (revision 4eca0ef49af9b2b0c52ef2b58e045ab34629796b)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2022, Intel Corporation. */
3 
4 #include "ice_vf_lib_private.h"
5 #include "ice.h"
6 #include "ice_lib.h"
7 #include "ice_fltr.h"
8 #include "ice_virtchnl_allowlist.h"
9 
10 /* Public functions which may be accessed by all driver files */
11 
12 /**
13  * ice_get_vf_by_id - Get pointer to VF by ID
14  * @pf: the PF private structure
15  * @vf_id: the VF ID to locate
16  *
17  * Locate and return a pointer to the VF structure associated with a given ID.
18  * Returns NULL if the ID does not have a valid VF structure associated with
19  * it.
20  *
21  * This function takes a reference to the VF, which must be released by
22  * calling ice_put_vf() once the caller is finished accessing the VF structure
23  * returned.
24  */
25 struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id)
26 {
27 	struct ice_vf *vf;
28 
29 	rcu_read_lock();
30 	hash_for_each_possible_rcu(pf->vfs.table, vf, entry, vf_id) {
31 		if (vf->vf_id == vf_id) {
32 			struct ice_vf *found;
33 
34 			if (kref_get_unless_zero(&vf->refcnt))
35 				found = vf;
36 			else
37 				found = NULL;
38 
39 			rcu_read_unlock();
40 			return found;
41 		}
42 	}
43 	rcu_read_unlock();
44 
45 	return NULL;
46 }
47 
48 /**
49  * ice_release_vf - Release VF associated with a refcount
50  * @ref: the kref decremented to zero
51  *
52  * Callback function for kref_put to release a VF once its reference count has
53  * hit zero.
54  */
55 static void ice_release_vf(struct kref *ref)
56 {
57 	struct ice_vf *vf = container_of(ref, struct ice_vf, refcnt);
58 
59 	pci_dev_put(vf->vfdev);
60 
61 	vf->vf_ops->free(vf);
62 }
63 
64 /**
65  * ice_put_vf - Release a reference to a VF
66  * @vf: the VF structure to decrease reference count on
67  *
68  * Decrease the reference count for a VF, and free the entry if it is no
69  * longer in use.
70  *
71  * This must be called after ice_get_vf_by_id() once the reference to the VF
72  * structure is no longer used. Otherwise, the VF structure will never be
73  * freed.
74  */
75 void ice_put_vf(struct ice_vf *vf)
76 {
77 	kref_put(&vf->refcnt, ice_release_vf);
78 }
79 
80 /**
81  * ice_has_vfs - Return true if the PF has any associated VFs
82  * @pf: the PF private structure
83  *
84  * Return whether or not the PF has any allocated VFs.
85  *
86  * Note that this function only guarantees that there are no VFs at the point
87  * of calling it. It does not guarantee that no more VFs will be added.
88  */
89 bool ice_has_vfs(struct ice_pf *pf)
90 {
91 	/* A simple check that the hash table is not empty does not require
92 	 * the mutex or rcu_read_lock.
93 	 */
94 	return !hash_empty(pf->vfs.table);
95 }
96 
97 /**
98  * ice_get_num_vfs - Get number of allocated VFs
99  * @pf: the PF private structure
100  *
101  * Return the total number of allocated VFs. NOTE: VF IDs are not guaranteed
102  * to be contiguous. Do not assume that a VF ID is guaranteed to be less than
103  * the output of this function.
104  */
105 u16 ice_get_num_vfs(struct ice_pf *pf)
106 {
107 	struct ice_vf *vf;
108 	unsigned int bkt;
109 	u16 num_vfs = 0;
110 
111 	rcu_read_lock();
112 	ice_for_each_vf_rcu(pf, bkt, vf)
113 		num_vfs++;
114 	rcu_read_unlock();
115 
116 	return num_vfs;
117 }
118 
119 /**
120  * ice_get_vf_vsi - get VF's VSI based on the stored index
121  * @vf: VF used to get VSI
122  */
123 struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
124 {
125 	if (vf->lan_vsi_idx == ICE_NO_VSI)
126 		return NULL;
127 
128 	return vf->pf->vsi[vf->lan_vsi_idx];
129 }
130 
131 /**
132  * ice_is_vf_disabled
133  * @vf: pointer to the VF info
134  *
135  * If the PF has been disabled, there is no need resetting VF until PF is
136  * active again. Similarly, if the VF has been disabled, this means something
137  * else is resetting the VF, so we shouldn't continue.
138  *
139  * Returns true if the caller should consider the VF as disabled whether
140  * because that single VF is explicitly disabled or because the PF is
141  * currently disabled.
142  */
143 bool ice_is_vf_disabled(struct ice_vf *vf)
144 {
145 	struct ice_pf *pf = vf->pf;
146 
147 	return (test_bit(ICE_VF_DIS, pf->state) ||
148 		test_bit(ICE_VF_STATE_DIS, vf->vf_states));
149 }
150 
151 /**
152  * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset
153  * @vf: The VF being resseting
154  *
155  * The max poll time is about ~800ms, which is about the maximum time it takes
156  * for a VF to be reset and/or a VF driver to be removed.
157  */
158 static void ice_wait_on_vf_reset(struct ice_vf *vf)
159 {
160 	int i;
161 
162 	for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) {
163 		if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
164 			break;
165 		msleep(ICE_MAX_VF_RESET_SLEEP_MS);
166 	}
167 }
168 
169 /**
170  * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried
171  * @vf: VF to check if it's ready to be configured/queried
172  *
173  * The purpose of this function is to make sure the VF is not in reset, not
174  * disabled, and initialized so it can be configured and/or queried by a host
175  * administrator.
176  */
177 int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
178 {
179 	ice_wait_on_vf_reset(vf);
180 
181 	if (ice_is_vf_disabled(vf))
182 		return -EINVAL;
183 
184 	if (ice_check_vf_init(vf))
185 		return -EBUSY;
186 
187 	return 0;
188 }
189 
190 /**
191  * ice_trigger_vf_reset - Reset a VF on HW
192  * @vf: pointer to the VF structure
193  * @is_vflr: true if VFLR was issued, false if not
194  * @is_pfr: true if the reset was triggered due to a previous PFR
195  *
196  * Trigger hardware to start a reset for a particular VF. Expects the caller
197  * to wait the proper amount of time to allow hardware to reset the VF before
198  * it cleans up and restores VF functionality.
199  */
200 static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
201 {
202 	/* Inform VF that it is no longer active, as a warning */
203 	clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
204 
205 	/* Disable VF's configuration API during reset. The flag is re-enabled
206 	 * when it's safe again to access VF's VSI.
207 	 */
208 	clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
209 
210 	/* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver
211 	 * needs to clear them in the case of VFR/VFLR. If this is done for
212 	 * PFR, it can mess up VF resets because the VF driver may already
213 	 * have started cleanup by the time we get here.
214 	 */
215 	if (!is_pfr)
216 		vf->vf_ops->clear_mbx_register(vf);
217 
218 	vf->vf_ops->trigger_reset_register(vf, is_vflr);
219 }
220 
221 static void ice_vf_clear_counters(struct ice_vf *vf)
222 {
223 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
224 
225 	if (vsi)
226 		vsi->num_vlan = 0;
227 
228 	vf->num_mac = 0;
229 	memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
230 	memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
231 }
232 
233 /**
234  * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild
235  * @vf: VF to perform pre VSI rebuild tasks
236  *
237  * These tasks are items that don't need to be amortized since they are most
238  * likely called in a for loop with all VF(s) in the reset_all_vfs() case.
239  */
240 static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
241 {
242 	/* Close any IRQ mapping now */
243 	if (vf->vf_ops->irq_close)
244 		vf->vf_ops->irq_close(vf);
245 
246 	ice_vf_clear_counters(vf);
247 	vf->vf_ops->clear_reset_trigger(vf);
248 }
249 
250 /**
251  * ice_vf_recreate_vsi - Release and re-create the VF's VSI
252  * @vf: VF to recreate the VSI for
253  *
254  * This is only called when a single VF is being reset (i.e. VVF, VFLR, host
255  * VF configuration change, etc)
256  *
257  * It releases and then re-creates a new VSI.
258  */
259 static int ice_vf_recreate_vsi(struct ice_vf *vf)
260 {
261 	struct ice_pf *pf = vf->pf;
262 	int err;
263 
264 	ice_vf_vsi_release(vf);
265 
266 	err = vf->vf_ops->create_vsi(vf);
267 	if (err) {
268 		dev_err(ice_pf_to_dev(pf),
269 			"Failed to recreate the VF%u's VSI, error %d\n",
270 			vf->vf_id, err);
271 		return err;
272 	}
273 
274 	return 0;
275 }
276 
277 /**
278  * ice_vf_rebuild_vsi - rebuild the VF's VSI
279  * @vf: VF to rebuild the VSI for
280  *
281  * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the
282  * host, PFR, CORER, etc.).
283  *
284  * It reprograms the VSI configuration back into hardware.
285  */
286 static int ice_vf_rebuild_vsi(struct ice_vf *vf)
287 {
288 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
289 	struct ice_pf *pf = vf->pf;
290 
291 	if (WARN_ON(!vsi))
292 		return -EINVAL;
293 
294 	if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT)) {
295 		dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n",
296 			vf->vf_id);
297 		return -EIO;
298 	}
299 	/* vsi->idx will remain the same in this case so don't update
300 	 * vf->lan_vsi_idx
301 	 */
302 	vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
303 	vf->lan_vsi_num = vsi->vsi_num;
304 
305 	return 0;
306 }
307 
308 /**
309  * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
310  * @vf: VF to add MAC filters for
311  * @vsi: Pointer to VSI
312  *
313  * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
314  * always re-adds either a VLAN 0 or port VLAN based filter after reset.
315  */
316 static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
317 {
318 	struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
319 	struct device *dev = ice_pf_to_dev(vf->pf);
320 	int err;
321 
322 	if (ice_vf_is_port_vlan_ena(vf)) {
323 		err = vlan_ops->set_port_vlan(vsi, &vf->port_vlan_info);
324 		if (err) {
325 			dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n",
326 				vf->vf_id, err);
327 			return err;
328 		}
329 
330 		err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info);
331 	} else {
332 		err = ice_vsi_add_vlan_zero(vsi);
333 	}
334 
335 	if (err) {
336 		dev_err(dev, "failed to add VLAN %u filter for VF %u during VF rebuild, error %d\n",
337 			ice_vf_is_port_vlan_ena(vf) ?
338 			ice_vf_get_port_vlan_id(vf) : 0, vf->vf_id, err);
339 		return err;
340 	}
341 
342 	err = vlan_ops->ena_rx_filtering(vsi);
343 	if (err)
344 		dev_warn(dev, "failed to enable Rx VLAN filtering for VF %d VSI %d during VF rebuild, error %d\n",
345 			 vf->vf_id, vsi->idx, err);
346 
347 	return 0;
348 }
349 
350 /**
351  * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration
352  * @vf: VF to re-apply the configuration for
353  *
354  * Called after a VF VSI has been re-added/rebuild during reset. The PF driver
355  * needs to re-apply the host configured Tx rate limiting configuration.
356  */
357 static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf)
358 {
359 	struct device *dev = ice_pf_to_dev(vf->pf);
360 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
361 	int err;
362 
363 	if (WARN_ON(!vsi))
364 		return -EINVAL;
365 
366 	if (vf->min_tx_rate) {
367 		err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000);
368 		if (err) {
369 			dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n",
370 				vf->min_tx_rate, vf->vf_id, err);
371 			return err;
372 		}
373 	}
374 
375 	if (vf->max_tx_rate) {
376 		err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000);
377 		if (err) {
378 			dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n",
379 				vf->max_tx_rate, vf->vf_id, err);
380 			return err;
381 		}
382 	}
383 
384 	return 0;
385 }
386 
387 /**
388  * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value
389  * @vf: VF to configure trust setting for
390  */
391 static void ice_vf_set_host_trust_cfg(struct ice_vf *vf)
392 {
393 	assign_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps, vf->trusted);
394 }
395 
396 /**
397  * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA
398  * @vf: VF to add MAC filters for
399  *
400  * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
401  * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset.
402  */
403 static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
404 {
405 	struct device *dev = ice_pf_to_dev(vf->pf);
406 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
407 	u8 broadcast[ETH_ALEN];
408 	int status;
409 
410 	if (WARN_ON(!vsi))
411 		return -EINVAL;
412 
413 	if (ice_is_eswitch_mode_switchdev(vf->pf))
414 		return 0;
415 
416 	eth_broadcast_addr(broadcast);
417 	status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
418 	if (status) {
419 		dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n",
420 			vf->vf_id, status);
421 		return status;
422 	}
423 
424 	vf->num_mac++;
425 
426 	if (is_valid_ether_addr(vf->hw_lan_addr)) {
427 		status = ice_fltr_add_mac(vsi, vf->hw_lan_addr,
428 					  ICE_FWD_TO_VSI);
429 		if (status) {
430 			dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n",
431 				&vf->hw_lan_addr[0], vf->vf_id,
432 				status);
433 			return status;
434 		}
435 		vf->num_mac++;
436 
437 		ether_addr_copy(vf->dev_lan_addr, vf->hw_lan_addr);
438 	}
439 
440 	return 0;
441 }
442 
443 /**
444  * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config
445  * @vsi: Pointer to VSI
446  *
447  * This function moves VSI into corresponding scheduler aggregator node
448  * based on cached value of "aggregator node info" per VSI
449  */
450 static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi)
451 {
452 	struct ice_pf *pf = vsi->back;
453 	struct device *dev;
454 	int status;
455 
456 	if (!vsi->agg_node)
457 		return;
458 
459 	dev = ice_pf_to_dev(pf);
460 	if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
461 		dev_dbg(dev,
462 			"agg_id %u already has reached max_num_vsis %u\n",
463 			vsi->agg_node->agg_id, vsi->agg_node->num_vsis);
464 		return;
465 	}
466 
467 	status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id,
468 				     vsi->idx, vsi->tc_cfg.ena_tc);
469 	if (status)
470 		dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node",
471 			vsi->idx, vsi->agg_node->agg_id);
472 	else
473 		vsi->agg_node->num_vsis++;
474 }
475 
476 /**
477  * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset
478  * @vf: VF to rebuild host configuration on
479  */
480 static void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
481 {
482 	struct device *dev = ice_pf_to_dev(vf->pf);
483 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
484 
485 	if (WARN_ON(!vsi))
486 		return;
487 
488 	ice_vf_set_host_trust_cfg(vf);
489 
490 	if (ice_vf_rebuild_host_mac_cfg(vf))
491 		dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n",
492 			vf->vf_id);
493 
494 	if (ice_vf_rebuild_host_vlan_cfg(vf, vsi))
495 		dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
496 			vf->vf_id);
497 
498 	if (ice_vf_rebuild_host_tx_rate_cfg(vf))
499 		dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n",
500 			vf->vf_id);
501 
502 	if (ice_vsi_apply_spoofchk(vsi, vf->spoofchk))
503 		dev_err(dev, "failed to rebuild spoofchk configuration for VF %d\n",
504 			vf->vf_id);
505 
506 	/* rebuild aggregator node config for main VF VSI */
507 	ice_vf_rebuild_aggregator_node_cfg(vsi);
508 }
509 
510 /**
511  * ice_set_vf_state_qs_dis - Set VF queues state to disabled
512  * @vf: pointer to the VF structure
513  */
514 static void ice_set_vf_state_qs_dis(struct ice_vf *vf)
515 {
516 	/* Clear Rx/Tx enabled queues flag */
517 	bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
518 	bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
519 	clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
520 }
521 
522 /**
523  * ice_vf_set_initialized - VF is ready for VIRTCHNL communication
524  * @vf: VF to set in initialized state
525  *
526  * After this function the VF will be ready to receive/handle the
527  * VIRTCHNL_OP_GET_VF_RESOURCES message
528  */
529 static void ice_vf_set_initialized(struct ice_vf *vf)
530 {
531 	ice_set_vf_state_qs_dis(vf);
532 	clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
533 	clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
534 	clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
535 	set_bit(ICE_VF_STATE_INIT, vf->vf_states);
536 	memset(&vf->vlan_v2_caps, 0, sizeof(vf->vlan_v2_caps));
537 }
538 
539 /**
540  * ice_vf_post_vsi_rebuild - Reset tasks that occur after VSI rebuild
541  * @vf: the VF being reset
542  *
543  * Perform reset tasks which must occur after the VSI has been re-created or
544  * rebuilt during a VF reset.
545  */
546 static void ice_vf_post_vsi_rebuild(struct ice_vf *vf)
547 {
548 	ice_vf_rebuild_host_cfg(vf);
549 	ice_vf_set_initialized(vf);
550 
551 	vf->vf_ops->post_vsi_rebuild(vf);
552 }
553 
554 /**
555  * ice_is_any_vf_in_unicast_promisc - check if any VF(s)
556  * are in unicast promiscuous mode
557  * @pf: PF structure for accessing VF(s)
558  *
559  * Return false if no VF(s) are in unicast promiscuous mode,
560  * else return true
561  */
562 bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf)
563 {
564 	bool is_vf_promisc = false;
565 	struct ice_vf *vf;
566 	unsigned int bkt;
567 
568 	rcu_read_lock();
569 	ice_for_each_vf_rcu(pf, bkt, vf) {
570 		/* found a VF that has promiscuous mode configured */
571 		if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) {
572 			is_vf_promisc = true;
573 			break;
574 		}
575 	}
576 	rcu_read_unlock();
577 
578 	return is_vf_promisc;
579 }
580 
581 /**
582  * ice_vf_get_promisc_masks - Calculate masks for promiscuous modes
583  * @vf: the VF pointer
584  * @vsi: the VSI to configure
585  * @ucast_m: promiscuous mask to apply to unicast
586  * @mcast_m: promiscuous mask to apply to multicast
587  *
588  * Decide which mask should be used for unicast and multicast filter,
589  * based on presence of VLANs
590  */
591 void
592 ice_vf_get_promisc_masks(struct ice_vf *vf, struct ice_vsi *vsi,
593 			 u8 *ucast_m, u8 *mcast_m)
594 {
595 	if (ice_vf_is_port_vlan_ena(vf) ||
596 	    ice_vsi_has_non_zero_vlans(vsi)) {
597 		*mcast_m = ICE_MCAST_VLAN_PROMISC_BITS;
598 		*ucast_m = ICE_UCAST_VLAN_PROMISC_BITS;
599 	} else {
600 		*mcast_m = ICE_MCAST_PROMISC_BITS;
601 		*ucast_m = ICE_UCAST_PROMISC_BITS;
602 	}
603 }
604 
605 /**
606  * ice_vf_clear_all_promisc_modes - Clear promisc/allmulticast on VF VSI
607  * @vf: the VF pointer
608  * @vsi: the VSI to configure
609  *
610  * Clear all promiscuous/allmulticast filters for a VF
611  */
612 static int
613 ice_vf_clear_all_promisc_modes(struct ice_vf *vf, struct ice_vsi *vsi)
614 {
615 	struct ice_pf *pf = vf->pf;
616 	u8 ucast_m, mcast_m;
617 	int ret = 0;
618 
619 	ice_vf_get_promisc_masks(vf, vsi, &ucast_m, &mcast_m);
620 	if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) {
621 		if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
622 			if (ice_is_dflt_vsi_in_use(vsi->port_info))
623 				ret = ice_clear_dflt_vsi(vsi);
624 		} else {
625 			ret = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m);
626 		}
627 
628 		if (ret) {
629 			dev_err(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode failed\n");
630 		} else {
631 			clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
632 			dev_info(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode succeeded\n");
633 		}
634 	}
635 
636 	if (test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
637 		ret = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m);
638 		if (ret) {
639 			dev_err(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode failed\n");
640 		} else {
641 			clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
642 			dev_info(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode succeeded\n");
643 		}
644 	}
645 	return ret;
646 }
647 
648 /**
649  * ice_vf_set_vsi_promisc - Enable promiscuous mode for a VF VSI
650  * @vf: the VF to configure
651  * @vsi: the VF's VSI
652  * @promisc_m: the promiscuous mode to enable
653  */
654 int
655 ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
656 {
657 	struct ice_hw *hw = &vsi->back->hw;
658 	int status;
659 
660 	if (ice_vf_is_port_vlan_ena(vf))
661 		status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m,
662 						  ice_vf_get_port_vlan_id(vf));
663 	else if (ice_vsi_has_non_zero_vlans(vsi))
664 		status = ice_fltr_set_vlan_vsi_promisc(hw, vsi, promisc_m);
665 	else
666 		status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 0);
667 
668 	if (status && status != -EEXIST) {
669 		dev_err(ice_pf_to_dev(vsi->back), "enable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
670 			vf->vf_id, status);
671 		return status;
672 	}
673 
674 	return 0;
675 }
676 
677 /**
678  * ice_vf_clear_vsi_promisc - Disable promiscuous mode for a VF VSI
679  * @vf: the VF to configure
680  * @vsi: the VF's VSI
681  * @promisc_m: the promiscuous mode to disable
682  */
683 int
684 ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
685 {
686 	struct ice_hw *hw = &vsi->back->hw;
687 	int status;
688 
689 	if (ice_vf_is_port_vlan_ena(vf))
690 		status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m,
691 						    ice_vf_get_port_vlan_id(vf));
692 	else if (ice_vsi_has_non_zero_vlans(vsi))
693 		status = ice_fltr_clear_vlan_vsi_promisc(hw, vsi, promisc_m);
694 	else
695 		status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 0);
696 
697 	if (status && status != -ENOENT) {
698 		dev_err(ice_pf_to_dev(vsi->back), "disable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
699 			vf->vf_id, status);
700 		return status;
701 	}
702 
703 	return 0;
704 }
705 
706 /**
707  * ice_reset_all_vfs - reset all allocated VFs in one go
708  * @pf: pointer to the PF structure
709  *
710  * Reset all VFs at once, in response to a PF or other device reset.
711  *
712  * First, tell the hardware to reset each VF, then do all the waiting in one
713  * chunk, and finally finish restoring each VF after the wait. This is useful
714  * during PF routines which need to reset all VFs, as otherwise it must perform
715  * these resets in a serialized fashion.
716  */
717 void ice_reset_all_vfs(struct ice_pf *pf)
718 {
719 	struct device *dev = ice_pf_to_dev(pf);
720 	struct ice_hw *hw = &pf->hw;
721 	struct ice_vf *vf;
722 	unsigned int bkt;
723 
724 	/* If we don't have any VFs, then there is nothing to reset */
725 	if (!ice_has_vfs(pf))
726 		return;
727 
728 	mutex_lock(&pf->vfs.table_lock);
729 
730 	/* clear all malicious info if the VFs are getting reset */
731 	ice_for_each_vf(pf, bkt, vf)
732 		ice_mbx_clear_malvf(&vf->mbx_info);
733 
734 	/* If VFs have been disabled, there is no need to reset */
735 	if (test_and_set_bit(ICE_VF_DIS, pf->state)) {
736 		mutex_unlock(&pf->vfs.table_lock);
737 		return;
738 	}
739 
740 	/* Begin reset on all VFs at once */
741 	ice_for_each_vf(pf, bkt, vf)
742 		ice_trigger_vf_reset(vf, true, true);
743 
744 	/* HW requires some time to make sure it can flush the FIFO for a VF
745 	 * when it resets it. Now that we've triggered all of the VFs, iterate
746 	 * the table again and wait for each VF to complete.
747 	 */
748 	ice_for_each_vf(pf, bkt, vf) {
749 		if (!vf->vf_ops->poll_reset_status(vf)) {
750 			/* Display a warning if at least one VF didn't manage
751 			 * to reset in time, but continue on with the
752 			 * operation.
753 			 */
754 			dev_warn(dev, "VF %u reset check timeout\n", vf->vf_id);
755 			break;
756 		}
757 	}
758 
759 	/* free VF resources to begin resetting the VSI state */
760 	ice_for_each_vf(pf, bkt, vf) {
761 		mutex_lock(&vf->cfg_lock);
762 
763 		vf->driver_caps = 0;
764 		ice_vc_set_default_allowlist(vf);
765 
766 		ice_vf_fdir_exit(vf);
767 		ice_vf_fdir_init(vf);
768 		/* clean VF control VSI when resetting VFs since it should be
769 		 * setup only when VF creates its first FDIR rule.
770 		 */
771 		if (vf->ctrl_vsi_idx != ICE_NO_VSI)
772 			ice_vf_ctrl_invalidate_vsi(vf);
773 
774 		ice_vf_pre_vsi_rebuild(vf);
775 		ice_vf_rebuild_vsi(vf);
776 		ice_vf_post_vsi_rebuild(vf);
777 
778 		mutex_unlock(&vf->cfg_lock);
779 	}
780 
781 	if (ice_is_eswitch_mode_switchdev(pf))
782 		if (ice_eswitch_rebuild(pf))
783 			dev_warn(dev, "eswitch rebuild failed\n");
784 
785 	ice_flush(hw);
786 	clear_bit(ICE_VF_DIS, pf->state);
787 
788 	mutex_unlock(&pf->vfs.table_lock);
789 }
790 
791 /**
792  * ice_notify_vf_reset - Notify VF of a reset event
793  * @vf: pointer to the VF structure
794  */
795 static void ice_notify_vf_reset(struct ice_vf *vf)
796 {
797 	struct ice_hw *hw = &vf->pf->hw;
798 	struct virtchnl_pf_event pfe;
799 
800 	/* Bail out if VF is in disabled state, neither initialized, nor active
801 	 * state - otherwise proceed with notifications
802 	 */
803 	if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
804 	     !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
805 	    test_bit(ICE_VF_STATE_DIS, vf->vf_states))
806 		return;
807 
808 	pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
809 	pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
810 	ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
811 			      VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
812 			      NULL);
813 }
814 
815 /**
816  * ice_reset_vf - Reset a particular VF
817  * @vf: pointer to the VF structure
818  * @flags: flags controlling behavior of the reset
819  *
820  * Flags:
821  *   ICE_VF_RESET_VFLR - Indicates a reset is due to VFLR event
822  *   ICE_VF_RESET_NOTIFY - Send VF a notification prior to reset
823  *   ICE_VF_RESET_LOCK - Acquire VF cfg_lock before resetting
824  *
825  * Returns 0 if the VF is currently in reset, if resets are disabled, or if
826  * the VF resets successfully. Returns an error code if the VF fails to
827  * rebuild.
828  */
829 int ice_reset_vf(struct ice_vf *vf, u32 flags)
830 {
831 	struct ice_pf *pf = vf->pf;
832 	struct ice_lag *lag;
833 	struct ice_vsi *vsi;
834 	u8 act_prt, pri_prt;
835 	struct device *dev;
836 	int err = 0;
837 	bool rsd;
838 
839 	dev = ice_pf_to_dev(pf);
840 	act_prt = ICE_LAG_INVALID_PORT;
841 	pri_prt = pf->hw.port_info->lport;
842 
843 	if (flags & ICE_VF_RESET_NOTIFY)
844 		ice_notify_vf_reset(vf);
845 
846 	if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
847 		dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n",
848 			vf->vf_id);
849 		return 0;
850 	}
851 
852 	lag = pf->lag;
853 	mutex_lock(&pf->lag_mutex);
854 	if (lag && lag->bonded && lag->primary) {
855 		act_prt = lag->active_port;
856 		if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT &&
857 		    lag->upper_netdev)
858 			ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
859 		else
860 			act_prt = ICE_LAG_INVALID_PORT;
861 	}
862 
863 	if (flags & ICE_VF_RESET_LOCK)
864 		mutex_lock(&vf->cfg_lock);
865 	else
866 		lockdep_assert_held(&vf->cfg_lock);
867 
868 	if (ice_is_vf_disabled(vf)) {
869 		vsi = ice_get_vf_vsi(vf);
870 		if (!vsi) {
871 			dev_dbg(dev, "VF is already removed\n");
872 			err = -EINVAL;
873 			goto out_unlock;
874 		}
875 		ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
876 
877 		if (ice_vsi_is_rx_queue_active(vsi))
878 			ice_vsi_stop_all_rx_rings(vsi);
879 
880 		dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
881 			vf->vf_id);
882 		goto out_unlock;
883 	}
884 
885 	/* Set VF disable bit state here, before triggering reset */
886 	set_bit(ICE_VF_STATE_DIS, vf->vf_states);
887 	ice_trigger_vf_reset(vf, flags & ICE_VF_RESET_VFLR, false);
888 
889 	vsi = ice_get_vf_vsi(vf);
890 	if (WARN_ON(!vsi)) {
891 		err = -EIO;
892 		goto out_unlock;
893 	}
894 
895 	ice_dis_vf_qs(vf);
896 
897 	/* Call Disable LAN Tx queue AQ whether or not queues are
898 	 * enabled. This is needed for successful completion of VFR.
899 	 */
900 	ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
901 			NULL, vf->vf_ops->reset_type, vf->vf_id, NULL);
902 
903 	/* poll VPGEN_VFRSTAT reg to make sure
904 	 * that reset is complete
905 	 */
906 	rsd = vf->vf_ops->poll_reset_status(vf);
907 
908 	/* Display a warning if VF didn't manage to reset in time, but need to
909 	 * continue on with the operation.
910 	 */
911 	if (!rsd)
912 		dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id);
913 
914 	vf->driver_caps = 0;
915 	ice_vc_set_default_allowlist(vf);
916 
917 	/* disable promiscuous modes in case they were enabled
918 	 * ignore any error if disabling process failed
919 	 */
920 	ice_vf_clear_all_promisc_modes(vf, vsi);
921 
922 	ice_vf_fdir_exit(vf);
923 	ice_vf_fdir_init(vf);
924 	/* clean VF control VSI when resetting VF since it should be setup
925 	 * only when VF creates its first FDIR rule.
926 	 */
927 	if (vf->ctrl_vsi_idx != ICE_NO_VSI)
928 		ice_vf_ctrl_vsi_release(vf);
929 
930 	ice_vf_pre_vsi_rebuild(vf);
931 
932 	if (ice_vf_recreate_vsi(vf)) {
933 		dev_err(dev, "Failed to release and setup the VF%u's VSI\n",
934 			vf->vf_id);
935 		err = -EFAULT;
936 		goto out_unlock;
937 	}
938 
939 	ice_vf_post_vsi_rebuild(vf);
940 	vsi = ice_get_vf_vsi(vf);
941 	if (WARN_ON(!vsi)) {
942 		err = -EINVAL;
943 		goto out_unlock;
944 	}
945 
946 	ice_eswitch_update_repr(vsi);
947 
948 	/* if the VF has been reset allow it to come up again */
949 	ice_mbx_clear_malvf(&vf->mbx_info);
950 
951 out_unlock:
952 	if (flags & ICE_VF_RESET_LOCK)
953 		mutex_unlock(&vf->cfg_lock);
954 
955 	if (lag && lag->bonded && lag->primary &&
956 	    act_prt != ICE_LAG_INVALID_PORT)
957 		ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
958 	mutex_unlock(&pf->lag_mutex);
959 
960 	return err;
961 }
962 
963 /**
964  * ice_set_vf_state_dis - Set VF state to disabled
965  * @vf: pointer to the VF structure
966  */
967 void ice_set_vf_state_dis(struct ice_vf *vf)
968 {
969 	ice_set_vf_state_qs_dis(vf);
970 	vf->vf_ops->clear_reset_state(vf);
971 }
972 
973 /* Private functions only accessed from other virtualization files */
974 
975 /**
976  * ice_initialize_vf_entry - Initialize a VF entry
977  * @vf: pointer to the VF structure
978  */
979 void ice_initialize_vf_entry(struct ice_vf *vf)
980 {
981 	struct ice_pf *pf = vf->pf;
982 	struct ice_vfs *vfs;
983 
984 	vfs = &pf->vfs;
985 
986 	/* assign default capabilities */
987 	vf->spoofchk = true;
988 	vf->num_vf_qs = vfs->num_qps_per;
989 	ice_vc_set_default_allowlist(vf);
990 	ice_virtchnl_set_dflt_ops(vf);
991 
992 	/* ctrl_vsi_idx will be set to a valid value only when iAVF
993 	 * creates its first fdir rule.
994 	 */
995 	ice_vf_ctrl_invalidate_vsi(vf);
996 	ice_vf_fdir_init(vf);
997 
998 	/* Initialize mailbox info for this VF */
999 	ice_mbx_init_vf_info(&pf->hw, &vf->mbx_info);
1000 
1001 	mutex_init(&vf->cfg_lock);
1002 }
1003 
1004 /**
1005  * ice_dis_vf_qs - Disable the VF queues
1006  * @vf: pointer to the VF structure
1007  */
1008 void ice_dis_vf_qs(struct ice_vf *vf)
1009 {
1010 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1011 
1012 	if (WARN_ON(!vsi))
1013 		return;
1014 
1015 	ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
1016 	ice_vsi_stop_all_rx_rings(vsi);
1017 	ice_set_vf_state_qs_dis(vf);
1018 }
1019 
1020 /**
1021  * ice_err_to_virt_err - translate errors for VF return code
1022  * @err: error return code
1023  */
1024 enum virtchnl_status_code ice_err_to_virt_err(int err)
1025 {
1026 	switch (err) {
1027 	case 0:
1028 		return VIRTCHNL_STATUS_SUCCESS;
1029 	case -EINVAL:
1030 	case -ENODEV:
1031 		return VIRTCHNL_STATUS_ERR_PARAM;
1032 	case -ENOMEM:
1033 		return VIRTCHNL_STATUS_ERR_NO_MEMORY;
1034 	case -EALREADY:
1035 	case -EBUSY:
1036 	case -EIO:
1037 	case -ENOSPC:
1038 		return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1039 	default:
1040 		return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
1041 	}
1042 }
1043 
1044 /**
1045  * ice_check_vf_init - helper to check if VF init complete
1046  * @vf: the pointer to the VF to check
1047  */
1048 int ice_check_vf_init(struct ice_vf *vf)
1049 {
1050 	struct ice_pf *pf = vf->pf;
1051 
1052 	if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
1053 		dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n",
1054 			vf->vf_id);
1055 		return -EBUSY;
1056 	}
1057 	return 0;
1058 }
1059 
1060 /**
1061  * ice_vf_get_port_info - Get the VF's port info structure
1062  * @vf: VF used to get the port info structure for
1063  */
1064 struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf)
1065 {
1066 	return vf->pf->hw.port_info;
1067 }
1068 
1069 /**
1070  * ice_cfg_mac_antispoof - Configure MAC antispoof checking behavior
1071  * @vsi: the VSI to configure
1072  * @enable: whether to enable or disable the spoof checking
1073  *
1074  * Configure a VSI to enable (or disable) spoof checking behavior.
1075  */
1076 static int ice_cfg_mac_antispoof(struct ice_vsi *vsi, bool enable)
1077 {
1078 	struct ice_vsi_ctx *ctx;
1079 	int err;
1080 
1081 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1082 	if (!ctx)
1083 		return -ENOMEM;
1084 
1085 	ctx->info.sec_flags = vsi->info.sec_flags;
1086 	ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
1087 
1088 	if (enable)
1089 		ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
1090 	else
1091 		ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
1092 
1093 	err = ice_update_vsi(&vsi->back->hw, vsi->idx, ctx, NULL);
1094 	if (err)
1095 		dev_err(ice_pf_to_dev(vsi->back), "Failed to configure Tx MAC anti-spoof %s for VSI %d, error %d\n",
1096 			enable ? "ON" : "OFF", vsi->vsi_num, err);
1097 	else
1098 		vsi->info.sec_flags = ctx->info.sec_flags;
1099 
1100 	kfree(ctx);
1101 
1102 	return err;
1103 }
1104 
1105 /**
1106  * ice_vsi_ena_spoofchk - enable Tx spoof checking for this VSI
1107  * @vsi: VSI to enable Tx spoof checking for
1108  */
1109 static int ice_vsi_ena_spoofchk(struct ice_vsi *vsi)
1110 {
1111 	struct ice_vsi_vlan_ops *vlan_ops;
1112 	int err = 0;
1113 
1114 	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
1115 
1116 	/* Allow VF with VLAN 0 only to send all tagged traffic */
1117 	if (vsi->type != ICE_VSI_VF || ice_vsi_has_non_zero_vlans(vsi)) {
1118 		err = vlan_ops->ena_tx_filtering(vsi);
1119 		if (err)
1120 			return err;
1121 	}
1122 
1123 	return ice_cfg_mac_antispoof(vsi, true);
1124 }
1125 
1126 /**
1127  * ice_vsi_dis_spoofchk - disable Tx spoof checking for this VSI
1128  * @vsi: VSI to disable Tx spoof checking for
1129  */
1130 static int ice_vsi_dis_spoofchk(struct ice_vsi *vsi)
1131 {
1132 	struct ice_vsi_vlan_ops *vlan_ops;
1133 	int err;
1134 
1135 	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
1136 
1137 	err = vlan_ops->dis_tx_filtering(vsi);
1138 	if (err)
1139 		return err;
1140 
1141 	return ice_cfg_mac_antispoof(vsi, false);
1142 }
1143 
1144 /**
1145  * ice_vsi_apply_spoofchk - Apply Tx spoof checking setting to a VSI
1146  * @vsi: VSI associated to the VF
1147  * @enable: whether to enable or disable the spoof checking
1148  */
1149 int ice_vsi_apply_spoofchk(struct ice_vsi *vsi, bool enable)
1150 {
1151 	int err;
1152 
1153 	if (enable)
1154 		err = ice_vsi_ena_spoofchk(vsi);
1155 	else
1156 		err = ice_vsi_dis_spoofchk(vsi);
1157 
1158 	return err;
1159 }
1160 
1161 /**
1162  * ice_is_vf_trusted
1163  * @vf: pointer to the VF info
1164  */
1165 bool ice_is_vf_trusted(struct ice_vf *vf)
1166 {
1167 	return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1168 }
1169 
1170 /**
1171  * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled
1172  * @vf: the VF to check
1173  *
1174  * Returns true if the VF has no Rx and no Tx queues enabled and returns false
1175  * otherwise
1176  */
1177 bool ice_vf_has_no_qs_ena(struct ice_vf *vf)
1178 {
1179 	return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) &&
1180 		!bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF));
1181 }
1182 
1183 /**
1184  * ice_is_vf_link_up - check if the VF's link is up
1185  * @vf: VF to check if link is up
1186  */
1187 bool ice_is_vf_link_up(struct ice_vf *vf)
1188 {
1189 	struct ice_port_info *pi = ice_vf_get_port_info(vf);
1190 
1191 	if (ice_check_vf_init(vf))
1192 		return false;
1193 
1194 	if (ice_vf_has_no_qs_ena(vf))
1195 		return false;
1196 	else if (vf->link_forced)
1197 		return vf->link_up;
1198 	else
1199 		return pi->phy.link_info.link_info &
1200 			ICE_AQ_LINK_UP;
1201 }
1202 
1203 /**
1204  * ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove VSI access
1205  * @vf: VF that control VSI is being invalidated on
1206  */
1207 void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf)
1208 {
1209 	vf->ctrl_vsi_idx = ICE_NO_VSI;
1210 }
1211 
1212 /**
1213  * ice_vf_ctrl_vsi_release - invalidate the VF's control VSI after freeing it
1214  * @vf: VF that control VSI is being released on
1215  */
1216 void ice_vf_ctrl_vsi_release(struct ice_vf *vf)
1217 {
1218 	ice_vsi_release(vf->pf->vsi[vf->ctrl_vsi_idx]);
1219 	ice_vf_ctrl_invalidate_vsi(vf);
1220 }
1221 
1222 /**
1223  * ice_vf_ctrl_vsi_setup - Set up a VF control VSI
1224  * @vf: VF to setup control VSI for
1225  *
1226  * Returns pointer to the successfully allocated VSI struct on success,
1227  * otherwise returns NULL on failure.
1228  */
1229 struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)
1230 {
1231 	struct ice_vsi_cfg_params params = {};
1232 	struct ice_pf *pf = vf->pf;
1233 	struct ice_vsi *vsi;
1234 
1235 	params.type = ICE_VSI_CTRL;
1236 	params.pi = ice_vf_get_port_info(vf);
1237 	params.vf = vf;
1238 	params.flags = ICE_VSI_FLAG_INIT;
1239 
1240 	vsi = ice_vsi_setup(pf, &params);
1241 	if (!vsi) {
1242 		dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n");
1243 		ice_vf_ctrl_invalidate_vsi(vf);
1244 	}
1245 
1246 	return vsi;
1247 }
1248 
1249 /**
1250  * ice_vf_init_host_cfg - Initialize host admin configuration
1251  * @vf: VF to initialize
1252  * @vsi: the VSI created at initialization
1253  *
1254  * Initialize the VF host configuration. Called during VF creation to setup
1255  * VLAN 0, add the VF VSI broadcast filter, and setup spoof checking. It
1256  * should only be called during VF creation.
1257  */
1258 int ice_vf_init_host_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
1259 {
1260 	struct ice_vsi_vlan_ops *vlan_ops;
1261 	struct ice_pf *pf = vf->pf;
1262 	u8 broadcast[ETH_ALEN];
1263 	struct device *dev;
1264 	int err;
1265 
1266 	dev = ice_pf_to_dev(pf);
1267 
1268 	err = ice_vsi_add_vlan_zero(vsi);
1269 	if (err) {
1270 		dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n",
1271 			 vf->vf_id);
1272 		return err;
1273 	}
1274 
1275 	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
1276 	err = vlan_ops->ena_rx_filtering(vsi);
1277 	if (err) {
1278 		dev_warn(dev, "Failed to enable Rx VLAN filtering for VF %d\n",
1279 			 vf->vf_id);
1280 		return err;
1281 	}
1282 
1283 	eth_broadcast_addr(broadcast);
1284 	err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
1285 	if (err) {
1286 		dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %d\n",
1287 			vf->vf_id, err);
1288 		return err;
1289 	}
1290 
1291 	vf->num_mac = 1;
1292 
1293 	err = ice_vsi_apply_spoofchk(vsi, vf->spoofchk);
1294 	if (err) {
1295 		dev_warn(dev, "Failed to initialize spoofchk setting for VF %d\n",
1296 			 vf->vf_id);
1297 		return err;
1298 	}
1299 
1300 	return 0;
1301 }
1302 
1303 /**
1304  * ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access
1305  * @vf: VF to remove access to VSI for
1306  */
1307 void ice_vf_invalidate_vsi(struct ice_vf *vf)
1308 {
1309 	vf->lan_vsi_idx = ICE_NO_VSI;
1310 	vf->lan_vsi_num = ICE_NO_VSI;
1311 }
1312 
1313 /**
1314  * ice_vf_vsi_release - Release the VF VSI and invalidate indexes
1315  * @vf: pointer to the VF structure
1316  *
1317  * Release the VF associated with this VSI and then invalidate the VSI
1318  * indexes.
1319  */
1320 void ice_vf_vsi_release(struct ice_vf *vf)
1321 {
1322 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1323 
1324 	if (WARN_ON(!vsi))
1325 		return;
1326 
1327 	ice_vsi_release(vsi);
1328 	ice_vf_invalidate_vsi(vf);
1329 }
1330 
1331 /**
1332  * ice_get_vf_ctrl_vsi - Get first VF control VSI pointer
1333  * @pf: the PF private structure
1334  * @vsi: pointer to the VSI
1335  *
1336  * Return first found VF control VSI other than the vsi
1337  * passed by parameter. This function is used to determine
1338  * whether new resources have to be allocated for control VSI
1339  * or they can be shared with existing one.
1340  *
1341  * Return found VF control VSI pointer other itself. Return
1342  * NULL Otherwise.
1343  *
1344  */
1345 struct ice_vsi *ice_get_vf_ctrl_vsi(struct ice_pf *pf, struct ice_vsi *vsi)
1346 {
1347 	struct ice_vsi *ctrl_vsi = NULL;
1348 	struct ice_vf *vf;
1349 	unsigned int bkt;
1350 
1351 	rcu_read_lock();
1352 	ice_for_each_vf_rcu(pf, bkt, vf) {
1353 		if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) {
1354 			ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
1355 			break;
1356 		}
1357 	}
1358 
1359 	rcu_read_unlock();
1360 	return ctrl_vsi;
1361 }
1362