xref: /linux/drivers/net/ethernet/intel/ice/ice_vf_lib.c (revision f98e51585f2ca00efbd16e27ed1d94a5e5520703)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2022, Intel Corporation. */
3 
4 #include "ice_vf_lib_private.h"
5 #include "ice.h"
6 #include "ice_lib.h"
7 #include "ice_fltr.h"
8 #include "ice_virtchnl_allowlist.h"
9 
10 /* Public functions which may be accessed by all driver files */
11 
12 /**
13  * ice_get_vf_by_id - Get pointer to VF by ID
14  * @pf: the PF private structure
15  * @vf_id: the VF ID to locate
16  *
17  * Locate and return a pointer to the VF structure associated with a given ID.
18  * Returns NULL if the ID does not have a valid VF structure associated with
19  * it.
20  *
21  * This function takes a reference to the VF, which must be released by
22  * calling ice_put_vf() once the caller is finished accessing the VF structure
23  * returned.
24  */
25 struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id)
26 {
27 	struct ice_vf *vf;
28 
29 	rcu_read_lock();
30 	hash_for_each_possible_rcu(pf->vfs.table, vf, entry, vf_id) {
31 		if (vf->vf_id == vf_id) {
32 			struct ice_vf *found;
33 
34 			if (kref_get_unless_zero(&vf->refcnt))
35 				found = vf;
36 			else
37 				found = NULL;
38 
39 			rcu_read_unlock();
40 			return found;
41 		}
42 	}
43 	rcu_read_unlock();
44 
45 	return NULL;
46 }
47 
48 /**
49  * ice_release_vf - Release VF associated with a refcount
50  * @ref: the kref decremented to zero
51  *
52  * Callback function for kref_put to release a VF once its reference count has
53  * hit zero.
54  */
55 static void ice_release_vf(struct kref *ref)
56 {
57 	struct ice_vf *vf = container_of(ref, struct ice_vf, refcnt);
58 
59 	vf->vf_ops->free(vf);
60 }
61 
62 /**
63  * ice_put_vf - Release a reference to a VF
64  * @vf: the VF structure to decrease reference count on
65  *
66  * Decrease the reference count for a VF, and free the entry if it is no
67  * longer in use.
68  *
69  * This must be called after ice_get_vf_by_id() once the reference to the VF
70  * structure is no longer used. Otherwise, the VF structure will never be
71  * freed.
72  */
73 void ice_put_vf(struct ice_vf *vf)
74 {
75 	kref_put(&vf->refcnt, ice_release_vf);
76 }
77 
78 /**
79  * ice_has_vfs - Return true if the PF has any associated VFs
80  * @pf: the PF private structure
81  *
82  * Return whether or not the PF has any allocated VFs.
83  *
84  * Note that this function only guarantees that there are no VFs at the point
85  * of calling it. It does not guarantee that no more VFs will be added.
86  */
87 bool ice_has_vfs(struct ice_pf *pf)
88 {
89 	/* A simple check that the hash table is not empty does not require
90 	 * the mutex or rcu_read_lock.
91 	 */
92 	return !hash_empty(pf->vfs.table);
93 }
94 
95 /**
96  * ice_get_num_vfs - Get number of allocated VFs
97  * @pf: the PF private structure
98  *
99  * Return the total number of allocated VFs. NOTE: VF IDs are not guaranteed
100  * to be contiguous. Do not assume that a VF ID is guaranteed to be less than
101  * the output of this function.
102  */
103 u16 ice_get_num_vfs(struct ice_pf *pf)
104 {
105 	struct ice_vf *vf;
106 	unsigned int bkt;
107 	u16 num_vfs = 0;
108 
109 	rcu_read_lock();
110 	ice_for_each_vf_rcu(pf, bkt, vf)
111 		num_vfs++;
112 	rcu_read_unlock();
113 
114 	return num_vfs;
115 }
116 
117 /**
118  * ice_get_vf_vsi - get VF's VSI based on the stored index
119  * @vf: VF used to get VSI
120  */
121 struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
122 {
123 	if (vf->lan_vsi_idx == ICE_NO_VSI)
124 		return NULL;
125 
126 	return vf->pf->vsi[vf->lan_vsi_idx];
127 }
128 
129 /**
130  * ice_is_vf_disabled
131  * @vf: pointer to the VF info
132  *
133  * If the PF has been disabled, there is no need resetting VF until PF is
134  * active again. Similarly, if the VF has been disabled, this means something
135  * else is resetting the VF, so we shouldn't continue.
136  *
137  * Returns true if the caller should consider the VF as disabled whether
138  * because that single VF is explicitly disabled or because the PF is
139  * currently disabled.
140  */
141 bool ice_is_vf_disabled(struct ice_vf *vf)
142 {
143 	struct ice_pf *pf = vf->pf;
144 
145 	return (test_bit(ICE_VF_DIS, pf->state) ||
146 		test_bit(ICE_VF_STATE_DIS, vf->vf_states));
147 }
148 
149 /**
150  * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset
151  * @vf: The VF being resseting
152  *
153  * The max poll time is about ~800ms, which is about the maximum time it takes
154  * for a VF to be reset and/or a VF driver to be removed.
155  */
156 static void ice_wait_on_vf_reset(struct ice_vf *vf)
157 {
158 	int i;
159 
160 	for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) {
161 		if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
162 			break;
163 		msleep(ICE_MAX_VF_RESET_SLEEP_MS);
164 	}
165 }
166 
167 /**
168  * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried
169  * @vf: VF to check if it's ready to be configured/queried
170  *
171  * The purpose of this function is to make sure the VF is not in reset, not
172  * disabled, and initialized so it can be configured and/or queried by a host
173  * administrator.
174  */
175 int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
176 {
177 	ice_wait_on_vf_reset(vf);
178 
179 	if (ice_is_vf_disabled(vf))
180 		return -EINVAL;
181 
182 	if (ice_check_vf_init(vf))
183 		return -EBUSY;
184 
185 	return 0;
186 }
187 
188 /**
189  * ice_check_vf_ready_for_reset - check if VF is ready to be reset
190  * @vf: VF to check if it's ready to be reset
191  *
192  * The purpose of this function is to ensure that the VF is not in reset,
193  * disabled, and is both initialized and active, thus enabling us to safely
194  * initialize another reset.
195  */
196 int ice_check_vf_ready_for_reset(struct ice_vf *vf)
197 {
198 	int ret;
199 
200 	ret = ice_check_vf_ready_for_cfg(vf);
201 	if (!ret && !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
202 		ret = -EAGAIN;
203 
204 	return ret;
205 }
206 
207 /**
208  * ice_trigger_vf_reset - Reset a VF on HW
209  * @vf: pointer to the VF structure
210  * @is_vflr: true if VFLR was issued, false if not
211  * @is_pfr: true if the reset was triggered due to a previous PFR
212  *
213  * Trigger hardware to start a reset for a particular VF. Expects the caller
214  * to wait the proper amount of time to allow hardware to reset the VF before
215  * it cleans up and restores VF functionality.
216  */
217 static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
218 {
219 	/* Inform VF that it is no longer active, as a warning */
220 	clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
221 
222 	/* Disable VF's configuration API during reset. The flag is re-enabled
223 	 * when it's safe again to access VF's VSI.
224 	 */
225 	clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
226 
227 	/* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver
228 	 * needs to clear them in the case of VFR/VFLR. If this is done for
229 	 * PFR, it can mess up VF resets because the VF driver may already
230 	 * have started cleanup by the time we get here.
231 	 */
232 	if (!is_pfr)
233 		vf->vf_ops->clear_mbx_register(vf);
234 
235 	vf->vf_ops->trigger_reset_register(vf, is_vflr);
236 }
237 
238 static void ice_vf_clear_counters(struct ice_vf *vf)
239 {
240 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
241 
242 	if (vsi)
243 		vsi->num_vlan = 0;
244 
245 	vf->num_mac = 0;
246 	memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
247 	memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
248 }
249 
250 /**
251  * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild
252  * @vf: VF to perform pre VSI rebuild tasks
253  *
254  * These tasks are items that don't need to be amortized since they are most
255  * likely called in a for loop with all VF(s) in the reset_all_vfs() case.
256  */
257 static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
258 {
259 	/* Close any IRQ mapping now */
260 	if (vf->vf_ops->irq_close)
261 		vf->vf_ops->irq_close(vf);
262 
263 	ice_vf_clear_counters(vf);
264 	vf->vf_ops->clear_reset_trigger(vf);
265 }
266 
267 /**
268  * ice_vf_recreate_vsi - Release and re-create the VF's VSI
269  * @vf: VF to recreate the VSI for
270  *
271  * This is only called when a single VF is being reset (i.e. VVF, VFLR, host
272  * VF configuration change, etc)
273  *
274  * It releases and then re-creates a new VSI.
275  */
276 static int ice_vf_recreate_vsi(struct ice_vf *vf)
277 {
278 	struct ice_pf *pf = vf->pf;
279 	int err;
280 
281 	ice_vf_vsi_release(vf);
282 
283 	err = vf->vf_ops->create_vsi(vf);
284 	if (err) {
285 		dev_err(ice_pf_to_dev(pf),
286 			"Failed to recreate the VF%u's VSI, error %d\n",
287 			vf->vf_id, err);
288 		return err;
289 	}
290 
291 	return 0;
292 }
293 
294 /**
295  * ice_vf_rebuild_vsi - rebuild the VF's VSI
296  * @vf: VF to rebuild the VSI for
297  *
298  * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the
299  * host, PFR, CORER, etc.).
300  *
301  * It reprograms the VSI configuration back into hardware.
302  */
303 static int ice_vf_rebuild_vsi(struct ice_vf *vf)
304 {
305 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
306 	struct ice_pf *pf = vf->pf;
307 
308 	if (WARN_ON(!vsi))
309 		return -EINVAL;
310 
311 	if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT)) {
312 		dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n",
313 			vf->vf_id);
314 		return -EIO;
315 	}
316 	/* vsi->idx will remain the same in this case so don't update
317 	 * vf->lan_vsi_idx
318 	 */
319 	vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
320 	vf->lan_vsi_num = vsi->vsi_num;
321 
322 	return 0;
323 }
324 
325 /**
326  * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
327  * @vf: VF to add MAC filters for
328  * @vsi: Pointer to VSI
329  *
330  * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
331  * always re-adds either a VLAN 0 or port VLAN based filter after reset.
332  */
333 static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
334 {
335 	struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
336 	struct device *dev = ice_pf_to_dev(vf->pf);
337 	int err;
338 
339 	if (ice_vf_is_port_vlan_ena(vf)) {
340 		err = vlan_ops->set_port_vlan(vsi, &vf->port_vlan_info);
341 		if (err) {
342 			dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n",
343 				vf->vf_id, err);
344 			return err;
345 		}
346 
347 		err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info);
348 	} else {
349 		err = ice_vsi_add_vlan_zero(vsi);
350 	}
351 
352 	if (err) {
353 		dev_err(dev, "failed to add VLAN %u filter for VF %u during VF rebuild, error %d\n",
354 			ice_vf_is_port_vlan_ena(vf) ?
355 			ice_vf_get_port_vlan_id(vf) : 0, vf->vf_id, err);
356 		return err;
357 	}
358 
359 	err = vlan_ops->ena_rx_filtering(vsi);
360 	if (err)
361 		dev_warn(dev, "failed to enable Rx VLAN filtering for VF %d VSI %d during VF rebuild, error %d\n",
362 			 vf->vf_id, vsi->idx, err);
363 
364 	return 0;
365 }
366 
367 /**
368  * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration
369  * @vf: VF to re-apply the configuration for
370  *
371  * Called after a VF VSI has been re-added/rebuild during reset. The PF driver
372  * needs to re-apply the host configured Tx rate limiting configuration.
373  */
374 static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf)
375 {
376 	struct device *dev = ice_pf_to_dev(vf->pf);
377 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
378 	int err;
379 
380 	if (WARN_ON(!vsi))
381 		return -EINVAL;
382 
383 	if (vf->min_tx_rate) {
384 		err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000);
385 		if (err) {
386 			dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n",
387 				vf->min_tx_rate, vf->vf_id, err);
388 			return err;
389 		}
390 	}
391 
392 	if (vf->max_tx_rate) {
393 		err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000);
394 		if (err) {
395 			dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n",
396 				vf->max_tx_rate, vf->vf_id, err);
397 			return err;
398 		}
399 	}
400 
401 	return 0;
402 }
403 
404 /**
405  * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value
406  * @vf: VF to configure trust setting for
407  */
408 static void ice_vf_set_host_trust_cfg(struct ice_vf *vf)
409 {
410 	assign_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps, vf->trusted);
411 }
412 
413 /**
414  * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA
415  * @vf: VF to add MAC filters for
416  *
417  * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
418  * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset.
419  */
420 static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
421 {
422 	struct device *dev = ice_pf_to_dev(vf->pf);
423 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
424 	u8 broadcast[ETH_ALEN];
425 	int status;
426 
427 	if (WARN_ON(!vsi))
428 		return -EINVAL;
429 
430 	if (ice_is_eswitch_mode_switchdev(vf->pf))
431 		return 0;
432 
433 	eth_broadcast_addr(broadcast);
434 	status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
435 	if (status) {
436 		dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n",
437 			vf->vf_id, status);
438 		return status;
439 	}
440 
441 	vf->num_mac++;
442 
443 	if (is_valid_ether_addr(vf->hw_lan_addr)) {
444 		status = ice_fltr_add_mac(vsi, vf->hw_lan_addr,
445 					  ICE_FWD_TO_VSI);
446 		if (status) {
447 			dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n",
448 				&vf->hw_lan_addr[0], vf->vf_id,
449 				status);
450 			return status;
451 		}
452 		vf->num_mac++;
453 
454 		ether_addr_copy(vf->dev_lan_addr, vf->hw_lan_addr);
455 	}
456 
457 	return 0;
458 }
459 
460 /**
461  * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config
462  * @vsi: Pointer to VSI
463  *
464  * This function moves VSI into corresponding scheduler aggregator node
465  * based on cached value of "aggregator node info" per VSI
466  */
467 static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi)
468 {
469 	struct ice_pf *pf = vsi->back;
470 	struct device *dev;
471 	int status;
472 
473 	if (!vsi->agg_node)
474 		return;
475 
476 	dev = ice_pf_to_dev(pf);
477 	if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
478 		dev_dbg(dev,
479 			"agg_id %u already has reached max_num_vsis %u\n",
480 			vsi->agg_node->agg_id, vsi->agg_node->num_vsis);
481 		return;
482 	}
483 
484 	status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id,
485 				     vsi->idx, vsi->tc_cfg.ena_tc);
486 	if (status)
487 		dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node",
488 			vsi->idx, vsi->agg_node->agg_id);
489 	else
490 		vsi->agg_node->num_vsis++;
491 }
492 
493 /**
494  * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset
495  * @vf: VF to rebuild host configuration on
496  */
497 static void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
498 {
499 	struct device *dev = ice_pf_to_dev(vf->pf);
500 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
501 
502 	if (WARN_ON(!vsi))
503 		return;
504 
505 	ice_vf_set_host_trust_cfg(vf);
506 
507 	if (ice_vf_rebuild_host_mac_cfg(vf))
508 		dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n",
509 			vf->vf_id);
510 
511 	if (ice_vf_rebuild_host_vlan_cfg(vf, vsi))
512 		dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
513 			vf->vf_id);
514 
515 	if (ice_vf_rebuild_host_tx_rate_cfg(vf))
516 		dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n",
517 			vf->vf_id);
518 
519 	if (ice_vsi_apply_spoofchk(vsi, vf->spoofchk))
520 		dev_err(dev, "failed to rebuild spoofchk configuration for VF %d\n",
521 			vf->vf_id);
522 
523 	/* rebuild aggregator node config for main VF VSI */
524 	ice_vf_rebuild_aggregator_node_cfg(vsi);
525 }
526 
527 /**
528  * ice_set_vf_state_qs_dis - Set VF queues state to disabled
529  * @vf: pointer to the VF structure
530  */
531 static void ice_set_vf_state_qs_dis(struct ice_vf *vf)
532 {
533 	/* Clear Rx/Tx enabled queues flag */
534 	bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
535 	bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
536 	clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
537 }
538 
539 /**
540  * ice_vf_set_initialized - VF is ready for VIRTCHNL communication
541  * @vf: VF to set in initialized state
542  *
543  * After this function the VF will be ready to receive/handle the
544  * VIRTCHNL_OP_GET_VF_RESOURCES message
545  */
546 static void ice_vf_set_initialized(struct ice_vf *vf)
547 {
548 	ice_set_vf_state_qs_dis(vf);
549 	clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
550 	clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
551 	clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
552 	set_bit(ICE_VF_STATE_INIT, vf->vf_states);
553 	memset(&vf->vlan_v2_caps, 0, sizeof(vf->vlan_v2_caps));
554 }
555 
556 /**
557  * ice_vf_post_vsi_rebuild - Reset tasks that occur after VSI rebuild
558  * @vf: the VF being reset
559  *
560  * Perform reset tasks which must occur after the VSI has been re-created or
561  * rebuilt during a VF reset.
562  */
563 static void ice_vf_post_vsi_rebuild(struct ice_vf *vf)
564 {
565 	ice_vf_rebuild_host_cfg(vf);
566 	ice_vf_set_initialized(vf);
567 
568 	vf->vf_ops->post_vsi_rebuild(vf);
569 }
570 
571 /**
572  * ice_is_any_vf_in_unicast_promisc - check if any VF(s)
573  * are in unicast promiscuous mode
574  * @pf: PF structure for accessing VF(s)
575  *
576  * Return false if no VF(s) are in unicast promiscuous mode,
577  * else return true
578  */
579 bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf)
580 {
581 	bool is_vf_promisc = false;
582 	struct ice_vf *vf;
583 	unsigned int bkt;
584 
585 	rcu_read_lock();
586 	ice_for_each_vf_rcu(pf, bkt, vf) {
587 		/* found a VF that has promiscuous mode configured */
588 		if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) {
589 			is_vf_promisc = true;
590 			break;
591 		}
592 	}
593 	rcu_read_unlock();
594 
595 	return is_vf_promisc;
596 }
597 
598 /**
599  * ice_vf_get_promisc_masks - Calculate masks for promiscuous modes
600  * @vf: the VF pointer
601  * @vsi: the VSI to configure
602  * @ucast_m: promiscuous mask to apply to unicast
603  * @mcast_m: promiscuous mask to apply to multicast
604  *
605  * Decide which mask should be used for unicast and multicast filter,
606  * based on presence of VLANs
607  */
608 void
609 ice_vf_get_promisc_masks(struct ice_vf *vf, struct ice_vsi *vsi,
610 			 u8 *ucast_m, u8 *mcast_m)
611 {
612 	if (ice_vf_is_port_vlan_ena(vf) ||
613 	    ice_vsi_has_non_zero_vlans(vsi)) {
614 		*mcast_m = ICE_MCAST_VLAN_PROMISC_BITS;
615 		*ucast_m = ICE_UCAST_VLAN_PROMISC_BITS;
616 	} else {
617 		*mcast_m = ICE_MCAST_PROMISC_BITS;
618 		*ucast_m = ICE_UCAST_PROMISC_BITS;
619 	}
620 }
621 
622 /**
623  * ice_vf_clear_all_promisc_modes - Clear promisc/allmulticast on VF VSI
624  * @vf: the VF pointer
625  * @vsi: the VSI to configure
626  *
627  * Clear all promiscuous/allmulticast filters for a VF
628  */
629 static int
630 ice_vf_clear_all_promisc_modes(struct ice_vf *vf, struct ice_vsi *vsi)
631 {
632 	struct ice_pf *pf = vf->pf;
633 	u8 ucast_m, mcast_m;
634 	int ret = 0;
635 
636 	ice_vf_get_promisc_masks(vf, vsi, &ucast_m, &mcast_m);
637 	if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) {
638 		if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
639 			if (ice_is_dflt_vsi_in_use(vsi->port_info))
640 				ret = ice_clear_dflt_vsi(vsi);
641 		} else {
642 			ret = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m);
643 		}
644 
645 		if (ret) {
646 			dev_err(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode failed\n");
647 		} else {
648 			clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
649 			dev_info(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode succeeded\n");
650 		}
651 	}
652 
653 	if (test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
654 		ret = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m);
655 		if (ret) {
656 			dev_err(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode failed\n");
657 		} else {
658 			clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
659 			dev_info(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode succeeded\n");
660 		}
661 	}
662 	return ret;
663 }
664 
665 /**
666  * ice_vf_set_vsi_promisc - Enable promiscuous mode for a VF VSI
667  * @vf: the VF to configure
668  * @vsi: the VF's VSI
669  * @promisc_m: the promiscuous mode to enable
670  */
671 int
672 ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
673 {
674 	struct ice_hw *hw = &vsi->back->hw;
675 	int status;
676 
677 	if (ice_vf_is_port_vlan_ena(vf))
678 		status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m,
679 						  ice_vf_get_port_vlan_id(vf));
680 	else if (ice_vsi_has_non_zero_vlans(vsi))
681 		status = ice_fltr_set_vlan_vsi_promisc(hw, vsi, promisc_m);
682 	else
683 		status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 0);
684 
685 	if (status && status != -EEXIST) {
686 		dev_err(ice_pf_to_dev(vsi->back), "enable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
687 			vf->vf_id, status);
688 		return status;
689 	}
690 
691 	return 0;
692 }
693 
694 /**
695  * ice_vf_clear_vsi_promisc - Disable promiscuous mode for a VF VSI
696  * @vf: the VF to configure
697  * @vsi: the VF's VSI
698  * @promisc_m: the promiscuous mode to disable
699  */
700 int
701 ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
702 {
703 	struct ice_hw *hw = &vsi->back->hw;
704 	int status;
705 
706 	if (ice_vf_is_port_vlan_ena(vf))
707 		status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m,
708 						    ice_vf_get_port_vlan_id(vf));
709 	else if (ice_vsi_has_non_zero_vlans(vsi))
710 		status = ice_fltr_clear_vlan_vsi_promisc(hw, vsi, promisc_m);
711 	else
712 		status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 0);
713 
714 	if (status && status != -ENOENT) {
715 		dev_err(ice_pf_to_dev(vsi->back), "disable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
716 			vf->vf_id, status);
717 		return status;
718 	}
719 
720 	return 0;
721 }
722 
723 /**
724  * ice_reset_all_vfs - reset all allocated VFs in one go
725  * @pf: pointer to the PF structure
726  *
727  * Reset all VFs at once, in response to a PF or other device reset.
728  *
729  * First, tell the hardware to reset each VF, then do all the waiting in one
730  * chunk, and finally finish restoring each VF after the wait. This is useful
731  * during PF routines which need to reset all VFs, as otherwise it must perform
732  * these resets in a serialized fashion.
733  */
734 void ice_reset_all_vfs(struct ice_pf *pf)
735 {
736 	struct device *dev = ice_pf_to_dev(pf);
737 	struct ice_hw *hw = &pf->hw;
738 	struct ice_vf *vf;
739 	unsigned int bkt;
740 
741 	/* If we don't have any VFs, then there is nothing to reset */
742 	if (!ice_has_vfs(pf))
743 		return;
744 
745 	mutex_lock(&pf->vfs.table_lock);
746 
747 	/* clear all malicious info if the VFs are getting reset */
748 	ice_for_each_vf(pf, bkt, vf)
749 		ice_mbx_clear_malvf(&vf->mbx_info);
750 
751 	/* If VFs have been disabled, there is no need to reset */
752 	if (test_and_set_bit(ICE_VF_DIS, pf->state)) {
753 		mutex_unlock(&pf->vfs.table_lock);
754 		return;
755 	}
756 
757 	/* Begin reset on all VFs at once */
758 	ice_for_each_vf(pf, bkt, vf)
759 		ice_trigger_vf_reset(vf, true, true);
760 
761 	/* HW requires some time to make sure it can flush the FIFO for a VF
762 	 * when it resets it. Now that we've triggered all of the VFs, iterate
763 	 * the table again and wait for each VF to complete.
764 	 */
765 	ice_for_each_vf(pf, bkt, vf) {
766 		if (!vf->vf_ops->poll_reset_status(vf)) {
767 			/* Display a warning if at least one VF didn't manage
768 			 * to reset in time, but continue on with the
769 			 * operation.
770 			 */
771 			dev_warn(dev, "VF %u reset check timeout\n", vf->vf_id);
772 			break;
773 		}
774 	}
775 
776 	/* free VF resources to begin resetting the VSI state */
777 	ice_for_each_vf(pf, bkt, vf) {
778 		mutex_lock(&vf->cfg_lock);
779 
780 		vf->driver_caps = 0;
781 		ice_vc_set_default_allowlist(vf);
782 
783 		ice_vf_fdir_exit(vf);
784 		ice_vf_fdir_init(vf);
785 		/* clean VF control VSI when resetting VFs since it should be
786 		 * setup only when VF creates its first FDIR rule.
787 		 */
788 		if (vf->ctrl_vsi_idx != ICE_NO_VSI)
789 			ice_vf_ctrl_invalidate_vsi(vf);
790 
791 		ice_vf_pre_vsi_rebuild(vf);
792 		ice_vf_rebuild_vsi(vf);
793 		ice_vf_post_vsi_rebuild(vf);
794 
795 		mutex_unlock(&vf->cfg_lock);
796 	}
797 
798 	if (ice_is_eswitch_mode_switchdev(pf))
799 		if (ice_eswitch_rebuild(pf))
800 			dev_warn(dev, "eswitch rebuild failed\n");
801 
802 	ice_flush(hw);
803 	clear_bit(ICE_VF_DIS, pf->state);
804 
805 	mutex_unlock(&pf->vfs.table_lock);
806 }
807 
808 /**
809  * ice_notify_vf_reset - Notify VF of a reset event
810  * @vf: pointer to the VF structure
811  */
812 static void ice_notify_vf_reset(struct ice_vf *vf)
813 {
814 	struct ice_hw *hw = &vf->pf->hw;
815 	struct virtchnl_pf_event pfe;
816 
817 	/* Bail out if VF is in disabled state, neither initialized, nor active
818 	 * state - otherwise proceed with notifications
819 	 */
820 	if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
821 	     !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
822 	    test_bit(ICE_VF_STATE_DIS, vf->vf_states))
823 		return;
824 
825 	pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
826 	pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
827 	ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
828 			      VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
829 			      NULL);
830 }
831 
832 /**
833  * ice_reset_vf - Reset a particular VF
834  * @vf: pointer to the VF structure
835  * @flags: flags controlling behavior of the reset
836  *
837  * Flags:
838  *   ICE_VF_RESET_VFLR - Indicates a reset is due to VFLR event
839  *   ICE_VF_RESET_NOTIFY - Send VF a notification prior to reset
840  *   ICE_VF_RESET_LOCK - Acquire VF cfg_lock before resetting
841  *
842  * Returns 0 if the VF is currently in reset, if resets are disabled, or if
843  * the VF resets successfully. Returns an error code if the VF fails to
844  * rebuild.
845  */
846 int ice_reset_vf(struct ice_vf *vf, u32 flags)
847 {
848 	struct ice_pf *pf = vf->pf;
849 	struct ice_vsi *vsi;
850 	struct device *dev;
851 	int err = 0;
852 	bool rsd;
853 
854 	dev = ice_pf_to_dev(pf);
855 
856 	if (flags & ICE_VF_RESET_NOTIFY)
857 		ice_notify_vf_reset(vf);
858 
859 	if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
860 		dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n",
861 			vf->vf_id);
862 		return 0;
863 	}
864 
865 	if (ice_is_vf_disabled(vf)) {
866 		vsi = ice_get_vf_vsi(vf);
867 		if (!vsi) {
868 			dev_dbg(dev, "VF is already removed\n");
869 			return -EINVAL;
870 		}
871 		ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
872 
873 		if (ice_vsi_is_rx_queue_active(vsi))
874 			ice_vsi_stop_all_rx_rings(vsi);
875 
876 		dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
877 			vf->vf_id);
878 		return 0;
879 	}
880 
881 	if (flags & ICE_VF_RESET_LOCK)
882 		mutex_lock(&vf->cfg_lock);
883 	else
884 		lockdep_assert_held(&vf->cfg_lock);
885 
886 	/* Set VF disable bit state here, before triggering reset */
887 	set_bit(ICE_VF_STATE_DIS, vf->vf_states);
888 	ice_trigger_vf_reset(vf, flags & ICE_VF_RESET_VFLR, false);
889 
890 	vsi = ice_get_vf_vsi(vf);
891 	if (WARN_ON(!vsi)) {
892 		err = -EIO;
893 		goto out_unlock;
894 	}
895 
896 	ice_dis_vf_qs(vf);
897 
898 	/* Call Disable LAN Tx queue AQ whether or not queues are
899 	 * enabled. This is needed for successful completion of VFR.
900 	 */
901 	ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
902 			NULL, vf->vf_ops->reset_type, vf->vf_id, NULL);
903 
904 	/* poll VPGEN_VFRSTAT reg to make sure
905 	 * that reset is complete
906 	 */
907 	rsd = vf->vf_ops->poll_reset_status(vf);
908 
909 	/* Display a warning if VF didn't manage to reset in time, but need to
910 	 * continue on with the operation.
911 	 */
912 	if (!rsd)
913 		dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id);
914 
915 	vf->driver_caps = 0;
916 	ice_vc_set_default_allowlist(vf);
917 
918 	/* disable promiscuous modes in case they were enabled
919 	 * ignore any error if disabling process failed
920 	 */
921 	ice_vf_clear_all_promisc_modes(vf, vsi);
922 
923 	ice_vf_fdir_exit(vf);
924 	ice_vf_fdir_init(vf);
925 	/* clean VF control VSI when resetting VF since it should be setup
926 	 * only when VF creates its first FDIR rule.
927 	 */
928 	if (vf->ctrl_vsi_idx != ICE_NO_VSI)
929 		ice_vf_ctrl_vsi_release(vf);
930 
931 	ice_vf_pre_vsi_rebuild(vf);
932 
933 	if (ice_vf_recreate_vsi(vf)) {
934 		dev_err(dev, "Failed to release and setup the VF%u's VSI\n",
935 			vf->vf_id);
936 		err = -EFAULT;
937 		goto out_unlock;
938 	}
939 
940 	ice_vf_post_vsi_rebuild(vf);
941 	vsi = ice_get_vf_vsi(vf);
942 	if (WARN_ON(!vsi)) {
943 		err = -EINVAL;
944 		goto out_unlock;
945 	}
946 
947 	ice_eswitch_update_repr(vsi);
948 
949 	/* if the VF has been reset allow it to come up again */
950 	ice_mbx_clear_malvf(&vf->mbx_info);
951 
952 out_unlock:
953 	if (flags & ICE_VF_RESET_LOCK)
954 		mutex_unlock(&vf->cfg_lock);
955 
956 	return err;
957 }
958 
959 /**
960  * ice_set_vf_state_dis - Set VF state to disabled
961  * @vf: pointer to the VF structure
962  */
963 void ice_set_vf_state_dis(struct ice_vf *vf)
964 {
965 	ice_set_vf_state_qs_dis(vf);
966 	vf->vf_ops->clear_reset_state(vf);
967 }
968 
969 /* Private functions only accessed from other virtualization files */
970 
971 /**
972  * ice_initialize_vf_entry - Initialize a VF entry
973  * @vf: pointer to the VF structure
974  */
975 void ice_initialize_vf_entry(struct ice_vf *vf)
976 {
977 	struct ice_pf *pf = vf->pf;
978 	struct ice_vfs *vfs;
979 
980 	vfs = &pf->vfs;
981 
982 	/* assign default capabilities */
983 	vf->spoofchk = true;
984 	vf->num_vf_qs = vfs->num_qps_per;
985 	ice_vc_set_default_allowlist(vf);
986 	ice_virtchnl_set_dflt_ops(vf);
987 
988 	/* ctrl_vsi_idx will be set to a valid value only when iAVF
989 	 * creates its first fdir rule.
990 	 */
991 	ice_vf_ctrl_invalidate_vsi(vf);
992 	ice_vf_fdir_init(vf);
993 
994 	/* Initialize mailbox info for this VF */
995 	ice_mbx_init_vf_info(&pf->hw, &vf->mbx_info);
996 
997 	mutex_init(&vf->cfg_lock);
998 }
999 
1000 /**
1001  * ice_dis_vf_qs - Disable the VF queues
1002  * @vf: pointer to the VF structure
1003  */
1004 void ice_dis_vf_qs(struct ice_vf *vf)
1005 {
1006 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1007 
1008 	if (WARN_ON(!vsi))
1009 		return;
1010 
1011 	ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
1012 	ice_vsi_stop_all_rx_rings(vsi);
1013 	ice_set_vf_state_qs_dis(vf);
1014 }
1015 
1016 /**
1017  * ice_err_to_virt_err - translate errors for VF return code
1018  * @err: error return code
1019  */
1020 enum virtchnl_status_code ice_err_to_virt_err(int err)
1021 {
1022 	switch (err) {
1023 	case 0:
1024 		return VIRTCHNL_STATUS_SUCCESS;
1025 	case -EINVAL:
1026 	case -ENODEV:
1027 		return VIRTCHNL_STATUS_ERR_PARAM;
1028 	case -ENOMEM:
1029 		return VIRTCHNL_STATUS_ERR_NO_MEMORY;
1030 	case -EALREADY:
1031 	case -EBUSY:
1032 	case -EIO:
1033 	case -ENOSPC:
1034 		return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1035 	default:
1036 		return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
1037 	}
1038 }
1039 
1040 /**
1041  * ice_check_vf_init - helper to check if VF init complete
1042  * @vf: the pointer to the VF to check
1043  */
1044 int ice_check_vf_init(struct ice_vf *vf)
1045 {
1046 	struct ice_pf *pf = vf->pf;
1047 
1048 	if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
1049 		dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n",
1050 			vf->vf_id);
1051 		return -EBUSY;
1052 	}
1053 	return 0;
1054 }
1055 
1056 /**
1057  * ice_vf_get_port_info - Get the VF's port info structure
1058  * @vf: VF used to get the port info structure for
1059  */
1060 struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf)
1061 {
1062 	return vf->pf->hw.port_info;
1063 }
1064 
1065 /**
1066  * ice_cfg_mac_antispoof - Configure MAC antispoof checking behavior
1067  * @vsi: the VSI to configure
1068  * @enable: whether to enable or disable the spoof checking
1069  *
1070  * Configure a VSI to enable (or disable) spoof checking behavior.
1071  */
1072 static int ice_cfg_mac_antispoof(struct ice_vsi *vsi, bool enable)
1073 {
1074 	struct ice_vsi_ctx *ctx;
1075 	int err;
1076 
1077 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1078 	if (!ctx)
1079 		return -ENOMEM;
1080 
1081 	ctx->info.sec_flags = vsi->info.sec_flags;
1082 	ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
1083 
1084 	if (enable)
1085 		ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
1086 	else
1087 		ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
1088 
1089 	err = ice_update_vsi(&vsi->back->hw, vsi->idx, ctx, NULL);
1090 	if (err)
1091 		dev_err(ice_pf_to_dev(vsi->back), "Failed to configure Tx MAC anti-spoof %s for VSI %d, error %d\n",
1092 			enable ? "ON" : "OFF", vsi->vsi_num, err);
1093 	else
1094 		vsi->info.sec_flags = ctx->info.sec_flags;
1095 
1096 	kfree(ctx);
1097 
1098 	return err;
1099 }
1100 
1101 /**
1102  * ice_vsi_ena_spoofchk - enable Tx spoof checking for this VSI
1103  * @vsi: VSI to enable Tx spoof checking for
1104  */
1105 static int ice_vsi_ena_spoofchk(struct ice_vsi *vsi)
1106 {
1107 	struct ice_vsi_vlan_ops *vlan_ops;
1108 	int err = 0;
1109 
1110 	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
1111 
1112 	/* Allow VF with VLAN 0 only to send all tagged traffic */
1113 	if (vsi->type != ICE_VSI_VF || ice_vsi_has_non_zero_vlans(vsi)) {
1114 		err = vlan_ops->ena_tx_filtering(vsi);
1115 		if (err)
1116 			return err;
1117 	}
1118 
1119 	return ice_cfg_mac_antispoof(vsi, true);
1120 }
1121 
1122 /**
1123  * ice_vsi_dis_spoofchk - disable Tx spoof checking for this VSI
1124  * @vsi: VSI to disable Tx spoof checking for
1125  */
1126 static int ice_vsi_dis_spoofchk(struct ice_vsi *vsi)
1127 {
1128 	struct ice_vsi_vlan_ops *vlan_ops;
1129 	int err;
1130 
1131 	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
1132 
1133 	err = vlan_ops->dis_tx_filtering(vsi);
1134 	if (err)
1135 		return err;
1136 
1137 	return ice_cfg_mac_antispoof(vsi, false);
1138 }
1139 
1140 /**
1141  * ice_vsi_apply_spoofchk - Apply Tx spoof checking setting to a VSI
1142  * @vsi: VSI associated to the VF
1143  * @enable: whether to enable or disable the spoof checking
1144  */
1145 int ice_vsi_apply_spoofchk(struct ice_vsi *vsi, bool enable)
1146 {
1147 	int err;
1148 
1149 	if (enable)
1150 		err = ice_vsi_ena_spoofchk(vsi);
1151 	else
1152 		err = ice_vsi_dis_spoofchk(vsi);
1153 
1154 	return err;
1155 }
1156 
1157 /**
1158  * ice_is_vf_trusted
1159  * @vf: pointer to the VF info
1160  */
1161 bool ice_is_vf_trusted(struct ice_vf *vf)
1162 {
1163 	return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1164 }
1165 
1166 /**
1167  * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled
1168  * @vf: the VF to check
1169  *
1170  * Returns true if the VF has no Rx and no Tx queues enabled and returns false
1171  * otherwise
1172  */
1173 bool ice_vf_has_no_qs_ena(struct ice_vf *vf)
1174 {
1175 	return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) &&
1176 		!bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF));
1177 }
1178 
1179 /**
1180  * ice_is_vf_link_up - check if the VF's link is up
1181  * @vf: VF to check if link is up
1182  */
1183 bool ice_is_vf_link_up(struct ice_vf *vf)
1184 {
1185 	struct ice_port_info *pi = ice_vf_get_port_info(vf);
1186 
1187 	if (ice_check_vf_init(vf))
1188 		return false;
1189 
1190 	if (ice_vf_has_no_qs_ena(vf))
1191 		return false;
1192 	else if (vf->link_forced)
1193 		return vf->link_up;
1194 	else
1195 		return pi->phy.link_info.link_info &
1196 			ICE_AQ_LINK_UP;
1197 }
1198 
1199 /**
1200  * ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove VSI access
1201  * @vf: VF that control VSI is being invalidated on
1202  */
1203 void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf)
1204 {
1205 	vf->ctrl_vsi_idx = ICE_NO_VSI;
1206 }
1207 
1208 /**
1209  * ice_vf_ctrl_vsi_release - invalidate the VF's control VSI after freeing it
1210  * @vf: VF that control VSI is being released on
1211  */
1212 void ice_vf_ctrl_vsi_release(struct ice_vf *vf)
1213 {
1214 	ice_vsi_release(vf->pf->vsi[vf->ctrl_vsi_idx]);
1215 	ice_vf_ctrl_invalidate_vsi(vf);
1216 }
1217 
1218 /**
1219  * ice_vf_ctrl_vsi_setup - Set up a VF control VSI
1220  * @vf: VF to setup control VSI for
1221  *
1222  * Returns pointer to the successfully allocated VSI struct on success,
1223  * otherwise returns NULL on failure.
1224  */
1225 struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)
1226 {
1227 	struct ice_vsi_cfg_params params = {};
1228 	struct ice_pf *pf = vf->pf;
1229 	struct ice_vsi *vsi;
1230 
1231 	params.type = ICE_VSI_CTRL;
1232 	params.pi = ice_vf_get_port_info(vf);
1233 	params.vf = vf;
1234 	params.flags = ICE_VSI_FLAG_INIT;
1235 
1236 	vsi = ice_vsi_setup(pf, &params);
1237 	if (!vsi) {
1238 		dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n");
1239 		ice_vf_ctrl_invalidate_vsi(vf);
1240 	}
1241 
1242 	return vsi;
1243 }
1244 
1245 /**
1246  * ice_vf_init_host_cfg - Initialize host admin configuration
1247  * @vf: VF to initialize
1248  * @vsi: the VSI created at initialization
1249  *
1250  * Initialize the VF host configuration. Called during VF creation to setup
1251  * VLAN 0, add the VF VSI broadcast filter, and setup spoof checking. It
1252  * should only be called during VF creation.
1253  */
1254 int ice_vf_init_host_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
1255 {
1256 	struct ice_vsi_vlan_ops *vlan_ops;
1257 	struct ice_pf *pf = vf->pf;
1258 	u8 broadcast[ETH_ALEN];
1259 	struct device *dev;
1260 	int err;
1261 
1262 	dev = ice_pf_to_dev(pf);
1263 
1264 	err = ice_vsi_add_vlan_zero(vsi);
1265 	if (err) {
1266 		dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n",
1267 			 vf->vf_id);
1268 		return err;
1269 	}
1270 
1271 	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
1272 	err = vlan_ops->ena_rx_filtering(vsi);
1273 	if (err) {
1274 		dev_warn(dev, "Failed to enable Rx VLAN filtering for VF %d\n",
1275 			 vf->vf_id);
1276 		return err;
1277 	}
1278 
1279 	eth_broadcast_addr(broadcast);
1280 	err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
1281 	if (err) {
1282 		dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %d\n",
1283 			vf->vf_id, err);
1284 		return err;
1285 	}
1286 
1287 	vf->num_mac = 1;
1288 
1289 	err = ice_vsi_apply_spoofchk(vsi, vf->spoofchk);
1290 	if (err) {
1291 		dev_warn(dev, "Failed to initialize spoofchk setting for VF %d\n",
1292 			 vf->vf_id);
1293 		return err;
1294 	}
1295 
1296 	return 0;
1297 }
1298 
1299 /**
1300  * ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access
1301  * @vf: VF to remove access to VSI for
1302  */
1303 void ice_vf_invalidate_vsi(struct ice_vf *vf)
1304 {
1305 	vf->lan_vsi_idx = ICE_NO_VSI;
1306 	vf->lan_vsi_num = ICE_NO_VSI;
1307 }
1308 
1309 /**
1310  * ice_vf_vsi_release - Release the VF VSI and invalidate indexes
1311  * @vf: pointer to the VF structure
1312  *
1313  * Release the VF associated with this VSI and then invalidate the VSI
1314  * indexes.
1315  */
1316 void ice_vf_vsi_release(struct ice_vf *vf)
1317 {
1318 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1319 
1320 	if (WARN_ON(!vsi))
1321 		return;
1322 
1323 	ice_vsi_release(vsi);
1324 	ice_vf_invalidate_vsi(vf);
1325 }
1326 
1327 /**
1328  * ice_get_vf_ctrl_vsi - Get first VF control VSI pointer
1329  * @pf: the PF private structure
1330  * @vsi: pointer to the VSI
1331  *
1332  * Return first found VF control VSI other than the vsi
1333  * passed by parameter. This function is used to determine
1334  * whether new resources have to be allocated for control VSI
1335  * or they can be shared with existing one.
1336  *
1337  * Return found VF control VSI pointer other itself. Return
1338  * NULL Otherwise.
1339  *
1340  */
1341 struct ice_vsi *ice_get_vf_ctrl_vsi(struct ice_pf *pf, struct ice_vsi *vsi)
1342 {
1343 	struct ice_vsi *ctrl_vsi = NULL;
1344 	struct ice_vf *vf;
1345 	unsigned int bkt;
1346 
1347 	rcu_read_lock();
1348 	ice_for_each_vf_rcu(pf, bkt, vf) {
1349 		if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) {
1350 			ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
1351 			break;
1352 		}
1353 	}
1354 
1355 	rcu_read_unlock();
1356 	return ctrl_vsi;
1357 }
1358