xref: /linux/drivers/net/ethernet/intel/ice/ice_vf_lib.c (revision 3fd6c59042dbba50391e30862beac979491145fe)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2022, Intel Corporation. */
3 
4 #include "ice_vf_lib_private.h"
5 #include "ice.h"
6 #include "ice_lib.h"
7 #include "ice_fltr.h"
8 #include "ice_virtchnl_allowlist.h"
9 
10 /* Public functions which may be accessed by all driver files */
11 
12 /**
13  * ice_get_vf_by_id - Get pointer to VF by ID
14  * @pf: the PF private structure
15  * @vf_id: the VF ID to locate
16  *
17  * Locate and return a pointer to the VF structure associated with a given ID.
18  * Returns NULL if the ID does not have a valid VF structure associated with
19  * it.
20  *
21  * This function takes a reference to the VF, which must be released by
22  * calling ice_put_vf() once the caller is finished accessing the VF structure
23  * returned.
24  */
ice_get_vf_by_id(struct ice_pf * pf,u16 vf_id)25 struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id)
26 {
27 	struct ice_vf *vf;
28 
29 	rcu_read_lock();
30 	hash_for_each_possible_rcu(pf->vfs.table, vf, entry, vf_id) {
31 		if (vf->vf_id == vf_id) {
32 			struct ice_vf *found;
33 
34 			if (kref_get_unless_zero(&vf->refcnt))
35 				found = vf;
36 			else
37 				found = NULL;
38 
39 			rcu_read_unlock();
40 			return found;
41 		}
42 	}
43 	rcu_read_unlock();
44 
45 	return NULL;
46 }
47 
48 /**
49  * ice_release_vf - Release VF associated with a refcount
50  * @ref: the kref decremented to zero
51  *
52  * Callback function for kref_put to release a VF once its reference count has
53  * hit zero.
54  */
ice_release_vf(struct kref * ref)55 static void ice_release_vf(struct kref *ref)
56 {
57 	struct ice_vf *vf = container_of(ref, struct ice_vf, refcnt);
58 
59 	pci_dev_put(vf->vfdev);
60 
61 	vf->vf_ops->free(vf);
62 }
63 
64 /**
65  * ice_put_vf - Release a reference to a VF
66  * @vf: the VF structure to decrease reference count on
67  *
68  * Decrease the reference count for a VF, and free the entry if it is no
69  * longer in use.
70  *
71  * This must be called after ice_get_vf_by_id() once the reference to the VF
72  * structure is no longer used. Otherwise, the VF structure will never be
73  * freed.
74  */
ice_put_vf(struct ice_vf * vf)75 void ice_put_vf(struct ice_vf *vf)
76 {
77 	kref_put(&vf->refcnt, ice_release_vf);
78 }
79 
80 /**
81  * ice_has_vfs - Return true if the PF has any associated VFs
82  * @pf: the PF private structure
83  *
84  * Return whether or not the PF has any allocated VFs.
85  *
86  * Note that this function only guarantees that there are no VFs at the point
87  * of calling it. It does not guarantee that no more VFs will be added.
88  */
ice_has_vfs(struct ice_pf * pf)89 bool ice_has_vfs(struct ice_pf *pf)
90 {
91 	/* A simple check that the hash table is not empty does not require
92 	 * the mutex or rcu_read_lock.
93 	 */
94 	return !hash_empty(pf->vfs.table);
95 }
96 
97 /**
98  * ice_get_num_vfs - Get number of allocated VFs
99  * @pf: the PF private structure
100  *
101  * Return the total number of allocated VFs. NOTE: VF IDs are not guaranteed
102  * to be contiguous. Do not assume that a VF ID is guaranteed to be less than
103  * the output of this function.
104  */
ice_get_num_vfs(struct ice_pf * pf)105 u16 ice_get_num_vfs(struct ice_pf *pf)
106 {
107 	struct ice_vf *vf;
108 	unsigned int bkt;
109 	u16 num_vfs = 0;
110 
111 	rcu_read_lock();
112 	ice_for_each_vf_rcu(pf, bkt, vf)
113 		num_vfs++;
114 	rcu_read_unlock();
115 
116 	return num_vfs;
117 }
118 
119 /**
120  * ice_get_vf_vsi - get VF's VSI based on the stored index
121  * @vf: VF used to get VSI
122  */
ice_get_vf_vsi(struct ice_vf * vf)123 struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
124 {
125 	if (vf->lan_vsi_idx == ICE_NO_VSI)
126 		return NULL;
127 
128 	return vf->pf->vsi[vf->lan_vsi_idx];
129 }
130 
131 /**
132  * ice_is_vf_disabled
133  * @vf: pointer to the VF info
134  *
135  * If the PF has been disabled, there is no need resetting VF until PF is
136  * active again. Similarly, if the VF has been disabled, this means something
137  * else is resetting the VF, so we shouldn't continue.
138  *
139  * Returns true if the caller should consider the VF as disabled whether
140  * because that single VF is explicitly disabled or because the PF is
141  * currently disabled.
142  */
ice_is_vf_disabled(struct ice_vf * vf)143 bool ice_is_vf_disabled(struct ice_vf *vf)
144 {
145 	struct ice_pf *pf = vf->pf;
146 
147 	return (test_bit(ICE_VF_DIS, pf->state) ||
148 		test_bit(ICE_VF_STATE_DIS, vf->vf_states));
149 }
150 
151 /**
152  * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset
153  * @vf: The VF being resseting
154  *
155  * The max poll time is about ~800ms, which is about the maximum time it takes
156  * for a VF to be reset and/or a VF driver to be removed.
157  */
ice_wait_on_vf_reset(struct ice_vf * vf)158 static void ice_wait_on_vf_reset(struct ice_vf *vf)
159 {
160 	int i;
161 
162 	for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) {
163 		if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
164 			break;
165 		msleep(ICE_MAX_VF_RESET_SLEEP_MS);
166 	}
167 }
168 
169 /**
170  * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried
171  * @vf: VF to check if it's ready to be configured/queried
172  *
173  * The purpose of this function is to make sure the VF is not in reset, not
174  * disabled, and initialized so it can be configured and/or queried by a host
175  * administrator.
176  */
ice_check_vf_ready_for_cfg(struct ice_vf * vf)177 int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
178 {
179 	ice_wait_on_vf_reset(vf);
180 
181 	if (ice_is_vf_disabled(vf))
182 		return -EINVAL;
183 
184 	if (ice_check_vf_init(vf))
185 		return -EBUSY;
186 
187 	return 0;
188 }
189 
190 /**
191  * ice_trigger_vf_reset - Reset a VF on HW
192  * @vf: pointer to the VF structure
193  * @is_vflr: true if VFLR was issued, false if not
194  * @is_pfr: true if the reset was triggered due to a previous PFR
195  *
196  * Trigger hardware to start a reset for a particular VF. Expects the caller
197  * to wait the proper amount of time to allow hardware to reset the VF before
198  * it cleans up and restores VF functionality.
199  */
ice_trigger_vf_reset(struct ice_vf * vf,bool is_vflr,bool is_pfr)200 static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
201 {
202 	/* Inform VF that it is no longer active, as a warning */
203 	clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
204 
205 	/* Disable VF's configuration API during reset. The flag is re-enabled
206 	 * when it's safe again to access VF's VSI.
207 	 */
208 	clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
209 
210 	/* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver
211 	 * needs to clear them in the case of VFR/VFLR. If this is done for
212 	 * PFR, it can mess up VF resets because the VF driver may already
213 	 * have started cleanup by the time we get here.
214 	 */
215 	if (!is_pfr)
216 		vf->vf_ops->clear_mbx_register(vf);
217 
218 	vf->vf_ops->trigger_reset_register(vf, is_vflr);
219 }
220 
ice_vf_clear_counters(struct ice_vf * vf)221 static void ice_vf_clear_counters(struct ice_vf *vf)
222 {
223 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
224 
225 	if (vsi)
226 		vsi->num_vlan = 0;
227 
228 	vf->num_mac = 0;
229 	memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
230 	memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
231 }
232 
233 /**
234  * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild
235  * @vf: VF to perform pre VSI rebuild tasks
236  *
237  * These tasks are items that don't need to be amortized since they are most
238  * likely called in a for loop with all VF(s) in the reset_all_vfs() case.
239  */
ice_vf_pre_vsi_rebuild(struct ice_vf * vf)240 static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
241 {
242 	/* Close any IRQ mapping now */
243 	if (vf->vf_ops->irq_close)
244 		vf->vf_ops->irq_close(vf);
245 
246 	ice_vf_clear_counters(vf);
247 	vf->vf_ops->clear_reset_trigger(vf);
248 }
249 
250 /**
251  * ice_vf_reconfig_vsi - Reconfigure a VF VSI with the device
252  * @vf: VF to reconfigure the VSI for
253  *
254  * This is called when a single VF is being reset (i.e. VVF, VFLR, host VF
255  * configuration change, etc).
256  *
257  * It brings the VSI down and then reconfigures it with the hardware.
258  */
ice_vf_reconfig_vsi(struct ice_vf * vf)259 static int ice_vf_reconfig_vsi(struct ice_vf *vf)
260 {
261 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
262 	struct ice_pf *pf = vf->pf;
263 	int err;
264 
265 	if (WARN_ON(!vsi))
266 		return -EINVAL;
267 
268 	vsi->flags = ICE_VSI_FLAG_NO_INIT;
269 
270 	ice_vsi_decfg(vsi);
271 	ice_fltr_remove_all(vsi);
272 
273 	err = ice_vsi_cfg(vsi);
274 	if (err) {
275 		dev_err(ice_pf_to_dev(pf),
276 			"Failed to reconfigure the VF%u's VSI, error %d\n",
277 			vf->vf_id, err);
278 		return err;
279 	}
280 
281 	return 0;
282 }
283 
284 /**
285  * ice_vf_rebuild_vsi - rebuild the VF's VSI
286  * @vf: VF to rebuild the VSI for
287  *
288  * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the
289  * host, PFR, CORER, etc.).
290  *
291  * It reprograms the VSI configuration back into hardware.
292  */
ice_vf_rebuild_vsi(struct ice_vf * vf)293 static int ice_vf_rebuild_vsi(struct ice_vf *vf)
294 {
295 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
296 	struct ice_pf *pf = vf->pf;
297 
298 	if (WARN_ON(!vsi))
299 		return -EINVAL;
300 
301 	if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT)) {
302 		dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n",
303 			vf->vf_id);
304 		return -EIO;
305 	}
306 	/* vsi->idx will remain the same in this case so don't update
307 	 * vf->lan_vsi_idx
308 	 */
309 	vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
310 
311 	return 0;
312 }
313 
314 /**
315  * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
316  * @vf: VF to add MAC filters for
317  * @vsi: Pointer to VSI
318  *
319  * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
320  * always re-adds either a VLAN 0 or port VLAN based filter after reset.
321  */
ice_vf_rebuild_host_vlan_cfg(struct ice_vf * vf,struct ice_vsi * vsi)322 static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
323 {
324 	struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
325 	struct device *dev = ice_pf_to_dev(vf->pf);
326 	int err;
327 
328 	if (ice_vf_is_port_vlan_ena(vf)) {
329 		err = vlan_ops->set_port_vlan(vsi, &vf->port_vlan_info);
330 		if (err) {
331 			dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n",
332 				vf->vf_id, err);
333 			return err;
334 		}
335 
336 		err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info);
337 	} else {
338 		/* clear possible previous port vlan config */
339 		err = ice_vsi_clear_port_vlan(vsi);
340 		if (err) {
341 			dev_err(dev, "failed to clear port VLAN via VSI parameters for VF %u, error %d\n",
342 				vf->vf_id, err);
343 			return err;
344 		}
345 		err = ice_vsi_add_vlan_zero(vsi);
346 	}
347 
348 	if (err) {
349 		dev_err(dev, "failed to add VLAN %u filter for VF %u during VF rebuild, error %d\n",
350 			ice_vf_is_port_vlan_ena(vf) ?
351 			ice_vf_get_port_vlan_id(vf) : 0, vf->vf_id, err);
352 		return err;
353 	}
354 
355 	err = vlan_ops->ena_rx_filtering(vsi);
356 	if (err)
357 		dev_warn(dev, "failed to enable Rx VLAN filtering for VF %d VSI %d during VF rebuild, error %d\n",
358 			 vf->vf_id, vsi->idx, err);
359 
360 	return 0;
361 }
362 
363 /**
364  * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration
365  * @vf: VF to re-apply the configuration for
366  *
367  * Called after a VF VSI has been re-added/rebuild during reset. The PF driver
368  * needs to re-apply the host configured Tx rate limiting configuration.
369  */
ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf * vf)370 static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf)
371 {
372 	struct device *dev = ice_pf_to_dev(vf->pf);
373 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
374 	int err;
375 
376 	if (WARN_ON(!vsi))
377 		return -EINVAL;
378 
379 	if (vf->min_tx_rate) {
380 		err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000);
381 		if (err) {
382 			dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n",
383 				vf->min_tx_rate, vf->vf_id, err);
384 			return err;
385 		}
386 	}
387 
388 	if (vf->max_tx_rate) {
389 		err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000);
390 		if (err) {
391 			dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n",
392 				vf->max_tx_rate, vf->vf_id, err);
393 			return err;
394 		}
395 	}
396 
397 	return 0;
398 }
399 
400 /**
401  * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value
402  * @vf: VF to configure trust setting for
403  */
ice_vf_set_host_trust_cfg(struct ice_vf * vf)404 static void ice_vf_set_host_trust_cfg(struct ice_vf *vf)
405 {
406 	assign_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps, vf->trusted);
407 }
408 
409 /**
410  * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA
411  * @vf: VF to add MAC filters for
412  *
413  * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
414  * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset.
415  */
ice_vf_rebuild_host_mac_cfg(struct ice_vf * vf)416 static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
417 {
418 	struct device *dev = ice_pf_to_dev(vf->pf);
419 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
420 	u8 broadcast[ETH_ALEN];
421 	int status;
422 
423 	if (WARN_ON(!vsi))
424 		return -EINVAL;
425 
426 	if (ice_is_eswitch_mode_switchdev(vf->pf))
427 		return 0;
428 
429 	eth_broadcast_addr(broadcast);
430 	status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
431 	if (status) {
432 		dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n",
433 			vf->vf_id, status);
434 		return status;
435 	}
436 
437 	vf->num_mac++;
438 
439 	if (is_valid_ether_addr(vf->hw_lan_addr)) {
440 		status = ice_fltr_add_mac(vsi, vf->hw_lan_addr,
441 					  ICE_FWD_TO_VSI);
442 		if (status) {
443 			dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n",
444 				&vf->hw_lan_addr[0], vf->vf_id,
445 				status);
446 			return status;
447 		}
448 		vf->num_mac++;
449 
450 		ether_addr_copy(vf->dev_lan_addr, vf->hw_lan_addr);
451 	}
452 
453 	return 0;
454 }
455 
456 /**
457  * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config
458  * @vsi: Pointer to VSI
459  *
460  * This function moves VSI into corresponding scheduler aggregator node
461  * based on cached value of "aggregator node info" per VSI
462  */
ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi * vsi)463 static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi)
464 {
465 	struct ice_pf *pf = vsi->back;
466 	struct device *dev;
467 	int status;
468 
469 	if (!vsi->agg_node)
470 		return;
471 
472 	dev = ice_pf_to_dev(pf);
473 	if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
474 		dev_dbg(dev,
475 			"agg_id %u already has reached max_num_vsis %u\n",
476 			vsi->agg_node->agg_id, vsi->agg_node->num_vsis);
477 		return;
478 	}
479 
480 	status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id,
481 				     vsi->idx, vsi->tc_cfg.ena_tc);
482 	if (status)
483 		dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node",
484 			vsi->idx, vsi->agg_node->agg_id);
485 	else
486 		vsi->agg_node->num_vsis++;
487 }
488 
489 /**
490  * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset
491  * @vf: VF to rebuild host configuration on
492  */
ice_vf_rebuild_host_cfg(struct ice_vf * vf)493 static void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
494 {
495 	struct device *dev = ice_pf_to_dev(vf->pf);
496 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
497 
498 	if (WARN_ON(!vsi))
499 		return;
500 
501 	ice_vf_set_host_trust_cfg(vf);
502 
503 	if (ice_vf_rebuild_host_mac_cfg(vf))
504 		dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n",
505 			vf->vf_id);
506 
507 	if (ice_vf_rebuild_host_vlan_cfg(vf, vsi))
508 		dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
509 			vf->vf_id);
510 
511 	if (ice_vf_rebuild_host_tx_rate_cfg(vf))
512 		dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n",
513 			vf->vf_id);
514 
515 	if (ice_vsi_apply_spoofchk(vsi, vf->spoofchk))
516 		dev_err(dev, "failed to rebuild spoofchk configuration for VF %d\n",
517 			vf->vf_id);
518 
519 	/* rebuild aggregator node config for main VF VSI */
520 	ice_vf_rebuild_aggregator_node_cfg(vsi);
521 }
522 
523 /**
524  * ice_set_vf_state_qs_dis - Set VF queues state to disabled
525  * @vf: pointer to the VF structure
526  */
ice_set_vf_state_qs_dis(struct ice_vf * vf)527 static void ice_set_vf_state_qs_dis(struct ice_vf *vf)
528 {
529 	/* Clear Rx/Tx enabled queues flag */
530 	bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
531 	bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
532 	clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
533 }
534 
535 /**
536  * ice_vf_set_initialized - VF is ready for VIRTCHNL communication
537  * @vf: VF to set in initialized state
538  *
539  * After this function the VF will be ready to receive/handle the
540  * VIRTCHNL_OP_GET_VF_RESOURCES message
541  */
ice_vf_set_initialized(struct ice_vf * vf)542 static void ice_vf_set_initialized(struct ice_vf *vf)
543 {
544 	ice_set_vf_state_qs_dis(vf);
545 	clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
546 	clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
547 	clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
548 	set_bit(ICE_VF_STATE_INIT, vf->vf_states);
549 	memset(&vf->vlan_v2_caps, 0, sizeof(vf->vlan_v2_caps));
550 }
551 
552 /**
553  * ice_vf_post_vsi_rebuild - Reset tasks that occur after VSI rebuild
554  * @vf: the VF being reset
555  *
556  * Perform reset tasks which must occur after the VSI has been re-created or
557  * rebuilt during a VF reset.
558  */
ice_vf_post_vsi_rebuild(struct ice_vf * vf)559 static void ice_vf_post_vsi_rebuild(struct ice_vf *vf)
560 {
561 	ice_vf_rebuild_host_cfg(vf);
562 	ice_vf_set_initialized(vf);
563 
564 	vf->vf_ops->post_vsi_rebuild(vf);
565 }
566 
567 /**
568  * ice_is_any_vf_in_unicast_promisc - check if any VF(s)
569  * are in unicast promiscuous mode
570  * @pf: PF structure for accessing VF(s)
571  *
572  * Return false if no VF(s) are in unicast promiscuous mode,
573  * else return true
574  */
ice_is_any_vf_in_unicast_promisc(struct ice_pf * pf)575 bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf)
576 {
577 	bool is_vf_promisc = false;
578 	struct ice_vf *vf;
579 	unsigned int bkt;
580 
581 	rcu_read_lock();
582 	ice_for_each_vf_rcu(pf, bkt, vf) {
583 		/* found a VF that has promiscuous mode configured */
584 		if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) {
585 			is_vf_promisc = true;
586 			break;
587 		}
588 	}
589 	rcu_read_unlock();
590 
591 	return is_vf_promisc;
592 }
593 
594 /**
595  * ice_vf_get_promisc_masks - Calculate masks for promiscuous modes
596  * @vf: the VF pointer
597  * @vsi: the VSI to configure
598  * @ucast_m: promiscuous mask to apply to unicast
599  * @mcast_m: promiscuous mask to apply to multicast
600  *
601  * Decide which mask should be used for unicast and multicast filter,
602  * based on presence of VLANs
603  */
604 void
ice_vf_get_promisc_masks(struct ice_vf * vf,struct ice_vsi * vsi,u8 * ucast_m,u8 * mcast_m)605 ice_vf_get_promisc_masks(struct ice_vf *vf, struct ice_vsi *vsi,
606 			 u8 *ucast_m, u8 *mcast_m)
607 {
608 	if (ice_vf_is_port_vlan_ena(vf) ||
609 	    ice_vsi_has_non_zero_vlans(vsi)) {
610 		*mcast_m = ICE_MCAST_VLAN_PROMISC_BITS;
611 		*ucast_m = ICE_UCAST_VLAN_PROMISC_BITS;
612 	} else {
613 		*mcast_m = ICE_MCAST_PROMISC_BITS;
614 		*ucast_m = ICE_UCAST_PROMISC_BITS;
615 	}
616 }
617 
618 /**
619  * ice_vf_clear_all_promisc_modes - Clear promisc/allmulticast on VF VSI
620  * @vf: the VF pointer
621  * @vsi: the VSI to configure
622  *
623  * Clear all promiscuous/allmulticast filters for a VF
624  */
625 static int
ice_vf_clear_all_promisc_modes(struct ice_vf * vf,struct ice_vsi * vsi)626 ice_vf_clear_all_promisc_modes(struct ice_vf *vf, struct ice_vsi *vsi)
627 {
628 	struct ice_pf *pf = vf->pf;
629 	u8 ucast_m, mcast_m;
630 	int ret = 0;
631 
632 	ice_vf_get_promisc_masks(vf, vsi, &ucast_m, &mcast_m);
633 	if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) {
634 		if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
635 			if (ice_is_dflt_vsi_in_use(vsi->port_info))
636 				ret = ice_clear_dflt_vsi(vsi);
637 		} else {
638 			ret = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m);
639 		}
640 
641 		if (ret) {
642 			dev_err(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode failed\n");
643 		} else {
644 			clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
645 			dev_info(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode succeeded\n");
646 		}
647 	}
648 
649 	if (test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
650 		ret = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m);
651 		if (ret) {
652 			dev_err(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode failed\n");
653 		} else {
654 			clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
655 			dev_info(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode succeeded\n");
656 		}
657 	}
658 	return ret;
659 }
660 
661 /**
662  * ice_vf_set_vsi_promisc - Enable promiscuous mode for a VF VSI
663  * @vf: the VF to configure
664  * @vsi: the VF's VSI
665  * @promisc_m: the promiscuous mode to enable
666  */
667 int
ice_vf_set_vsi_promisc(struct ice_vf * vf,struct ice_vsi * vsi,u8 promisc_m)668 ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
669 {
670 	struct ice_hw *hw = &vsi->back->hw;
671 	int status;
672 
673 	if (ice_vf_is_port_vlan_ena(vf))
674 		status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m,
675 						  ice_vf_get_port_vlan_id(vf));
676 	else if (ice_vsi_has_non_zero_vlans(vsi))
677 		status = ice_fltr_set_vlan_vsi_promisc(hw, vsi, promisc_m);
678 	else
679 		status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 0);
680 
681 	if (status && status != -EEXIST) {
682 		dev_err(ice_pf_to_dev(vsi->back), "enable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
683 			vf->vf_id, status);
684 		return status;
685 	}
686 
687 	return 0;
688 }
689 
690 /**
691  * ice_vf_clear_vsi_promisc - Disable promiscuous mode for a VF VSI
692  * @vf: the VF to configure
693  * @vsi: the VF's VSI
694  * @promisc_m: the promiscuous mode to disable
695  */
696 int
ice_vf_clear_vsi_promisc(struct ice_vf * vf,struct ice_vsi * vsi,u8 promisc_m)697 ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
698 {
699 	struct ice_hw *hw = &vsi->back->hw;
700 	int status;
701 
702 	if (ice_vf_is_port_vlan_ena(vf))
703 		status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m,
704 						    ice_vf_get_port_vlan_id(vf));
705 	else if (ice_vsi_has_non_zero_vlans(vsi))
706 		status = ice_fltr_clear_vlan_vsi_promisc(hw, vsi, promisc_m);
707 	else
708 		status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 0);
709 
710 	if (status && status != -ENOENT) {
711 		dev_err(ice_pf_to_dev(vsi->back), "disable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
712 			vf->vf_id, status);
713 		return status;
714 	}
715 
716 	return 0;
717 }
718 
719 /**
720  * ice_reset_vf_mbx_cnt - reset VF mailbox message count
721  * @vf: pointer to the VF structure
722  *
723  * This function clears the VF mailbox message count, and should be called on
724  * VF reset.
725  */
ice_reset_vf_mbx_cnt(struct ice_vf * vf)726 static void ice_reset_vf_mbx_cnt(struct ice_vf *vf)
727 {
728 	struct ice_pf *pf = vf->pf;
729 
730 	if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
731 		ice_mbx_vf_clear_cnt_e830(&pf->hw, vf->vf_id);
732 	else
733 		ice_mbx_clear_malvf(&vf->mbx_info);
734 }
735 
736 /**
737  * ice_reset_all_vfs - reset all allocated VFs in one go
738  * @pf: pointer to the PF structure
739  *
740  * Reset all VFs at once, in response to a PF or other device reset.
741  *
742  * First, tell the hardware to reset each VF, then do all the waiting in one
743  * chunk, and finally finish restoring each VF after the wait. This is useful
744  * during PF routines which need to reset all VFs, as otherwise it must perform
745  * these resets in a serialized fashion.
746  */
ice_reset_all_vfs(struct ice_pf * pf)747 void ice_reset_all_vfs(struct ice_pf *pf)
748 {
749 	struct device *dev = ice_pf_to_dev(pf);
750 	struct ice_hw *hw = &pf->hw;
751 	struct ice_vf *vf;
752 	unsigned int bkt;
753 
754 	/* If we don't have any VFs, then there is nothing to reset */
755 	if (!ice_has_vfs(pf))
756 		return;
757 
758 	mutex_lock(&pf->vfs.table_lock);
759 
760 	/* clear all malicious info if the VFs are getting reset */
761 	ice_for_each_vf(pf, bkt, vf)
762 		ice_reset_vf_mbx_cnt(vf);
763 
764 	/* If VFs have been disabled, there is no need to reset */
765 	if (test_and_set_bit(ICE_VF_DIS, pf->state)) {
766 		mutex_unlock(&pf->vfs.table_lock);
767 		return;
768 	}
769 
770 	/* Begin reset on all VFs at once */
771 	ice_for_each_vf(pf, bkt, vf)
772 		ice_trigger_vf_reset(vf, true, true);
773 
774 	/* HW requires some time to make sure it can flush the FIFO for a VF
775 	 * when it resets it. Now that we've triggered all of the VFs, iterate
776 	 * the table again and wait for each VF to complete.
777 	 */
778 	ice_for_each_vf(pf, bkt, vf) {
779 		if (!vf->vf_ops->poll_reset_status(vf)) {
780 			/* Display a warning if at least one VF didn't manage
781 			 * to reset in time, but continue on with the
782 			 * operation.
783 			 */
784 			dev_warn(dev, "VF %u reset check timeout\n", vf->vf_id);
785 			break;
786 		}
787 	}
788 
789 	/* free VF resources to begin resetting the VSI state */
790 	ice_for_each_vf(pf, bkt, vf) {
791 		mutex_lock(&vf->cfg_lock);
792 
793 		ice_eswitch_detach_vf(pf, vf);
794 		vf->driver_caps = 0;
795 		ice_vc_set_default_allowlist(vf);
796 
797 		ice_vf_fdir_exit(vf);
798 		ice_vf_fdir_init(vf);
799 		/* clean VF control VSI when resetting VFs since it should be
800 		 * setup only when VF creates its first FDIR rule.
801 		 */
802 		if (vf->ctrl_vsi_idx != ICE_NO_VSI)
803 			ice_vf_ctrl_invalidate_vsi(vf);
804 
805 		ice_vf_pre_vsi_rebuild(vf);
806 		ice_vf_rebuild_vsi(vf);
807 		ice_vf_post_vsi_rebuild(vf);
808 
809 		ice_eswitch_attach_vf(pf, vf);
810 
811 		mutex_unlock(&vf->cfg_lock);
812 	}
813 
814 	ice_flush(hw);
815 	clear_bit(ICE_VF_DIS, pf->state);
816 
817 	mutex_unlock(&pf->vfs.table_lock);
818 }
819 
820 /**
821  * ice_notify_vf_reset - Notify VF of a reset event
822  * @vf: pointer to the VF structure
823  */
ice_notify_vf_reset(struct ice_vf * vf)824 static void ice_notify_vf_reset(struct ice_vf *vf)
825 {
826 	struct ice_hw *hw = &vf->pf->hw;
827 	struct virtchnl_pf_event pfe;
828 
829 	/* Bail out if VF is in disabled state, neither initialized, nor active
830 	 * state - otherwise proceed with notifications
831 	 */
832 	if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
833 	     !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
834 	    test_bit(ICE_VF_STATE_DIS, vf->vf_states))
835 		return;
836 
837 	pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
838 	pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
839 	ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
840 			      VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
841 			      NULL);
842 }
843 
844 /**
845  * ice_reset_vf - Reset a particular VF
846  * @vf: pointer to the VF structure
847  * @flags: flags controlling behavior of the reset
848  *
849  * Flags:
850  *   ICE_VF_RESET_VFLR - Indicates a reset is due to VFLR event
851  *   ICE_VF_RESET_NOTIFY - Send VF a notification prior to reset
852  *   ICE_VF_RESET_LOCK - Acquire VF cfg_lock before resetting
853  *
854  * Returns 0 if the VF is currently in reset, if resets are disabled, or if
855  * the VF resets successfully. Returns an error code if the VF fails to
856  * rebuild.
857  */
ice_reset_vf(struct ice_vf * vf,u32 flags)858 int ice_reset_vf(struct ice_vf *vf, u32 flags)
859 {
860 	struct ice_pf *pf = vf->pf;
861 	struct ice_lag *lag;
862 	struct ice_vsi *vsi;
863 	u8 act_prt, pri_prt;
864 	struct device *dev;
865 	int err = 0;
866 	bool rsd;
867 
868 	dev = ice_pf_to_dev(pf);
869 	act_prt = ICE_LAG_INVALID_PORT;
870 	pri_prt = pf->hw.port_info->lport;
871 
872 	if (flags & ICE_VF_RESET_NOTIFY)
873 		ice_notify_vf_reset(vf);
874 
875 	if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
876 		dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n",
877 			vf->vf_id);
878 		return 0;
879 	}
880 
881 	if (flags & ICE_VF_RESET_LOCK)
882 		mutex_lock(&vf->cfg_lock);
883 	else
884 		lockdep_assert_held(&vf->cfg_lock);
885 
886 	lag = pf->lag;
887 	mutex_lock(&pf->lag_mutex);
888 	if (lag && lag->bonded && lag->primary) {
889 		act_prt = lag->active_port;
890 		if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT &&
891 		    lag->upper_netdev)
892 			ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
893 		else
894 			act_prt = ICE_LAG_INVALID_PORT;
895 	}
896 
897 	if (ice_is_vf_disabled(vf)) {
898 		vsi = ice_get_vf_vsi(vf);
899 		if (!vsi) {
900 			dev_dbg(dev, "VF is already removed\n");
901 			err = -EINVAL;
902 			goto out_unlock;
903 		}
904 		ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
905 
906 		if (ice_vsi_is_rx_queue_active(vsi))
907 			ice_vsi_stop_all_rx_rings(vsi);
908 
909 		dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
910 			vf->vf_id);
911 		goto out_unlock;
912 	}
913 
914 	/* Set VF disable bit state here, before triggering reset */
915 	set_bit(ICE_VF_STATE_DIS, vf->vf_states);
916 	ice_trigger_vf_reset(vf, flags & ICE_VF_RESET_VFLR, false);
917 
918 	vsi = ice_get_vf_vsi(vf);
919 	if (WARN_ON(!vsi)) {
920 		err = -EIO;
921 		goto out_unlock;
922 	}
923 
924 	ice_dis_vf_qs(vf);
925 
926 	/* Call Disable LAN Tx queue AQ whether or not queues are
927 	 * enabled. This is needed for successful completion of VFR.
928 	 */
929 	ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
930 			NULL, vf->vf_ops->reset_type, vf->vf_id, NULL);
931 
932 	/* poll VPGEN_VFRSTAT reg to make sure
933 	 * that reset is complete
934 	 */
935 	rsd = vf->vf_ops->poll_reset_status(vf);
936 
937 	/* Display a warning if VF didn't manage to reset in time, but need to
938 	 * continue on with the operation.
939 	 */
940 	if (!rsd)
941 		dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id);
942 
943 	vf->driver_caps = 0;
944 	ice_vc_set_default_allowlist(vf);
945 
946 	/* disable promiscuous modes in case they were enabled
947 	 * ignore any error if disabling process failed
948 	 */
949 	ice_vf_clear_all_promisc_modes(vf, vsi);
950 
951 	ice_vf_fdir_exit(vf);
952 	ice_vf_fdir_init(vf);
953 	/* clean VF control VSI when resetting VF since it should be setup
954 	 * only when VF creates its first FDIR rule.
955 	 */
956 	if (vf->ctrl_vsi_idx != ICE_NO_VSI)
957 		ice_vf_ctrl_vsi_release(vf);
958 
959 	ice_vf_pre_vsi_rebuild(vf);
960 
961 	if (ice_vf_reconfig_vsi(vf)) {
962 		dev_err(dev, "Failed to release and setup the VF%u's VSI\n",
963 			vf->vf_id);
964 		err = -EFAULT;
965 		goto out_unlock;
966 	}
967 
968 	ice_vf_post_vsi_rebuild(vf);
969 	vsi = ice_get_vf_vsi(vf);
970 	if (WARN_ON(!vsi)) {
971 		err = -EINVAL;
972 		goto out_unlock;
973 	}
974 
975 	ice_eswitch_update_repr(&vf->repr_id, vsi);
976 
977 	/* if the VF has been reset allow it to come up again */
978 	ice_reset_vf_mbx_cnt(vf);
979 
980 out_unlock:
981 	if (lag && lag->bonded && lag->primary &&
982 	    act_prt != ICE_LAG_INVALID_PORT)
983 		ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
984 	mutex_unlock(&pf->lag_mutex);
985 
986 	if (flags & ICE_VF_RESET_LOCK)
987 		mutex_unlock(&vf->cfg_lock);
988 
989 	return err;
990 }
991 
992 /**
993  * ice_set_vf_state_dis - Set VF state to disabled
994  * @vf: pointer to the VF structure
995  */
ice_set_vf_state_dis(struct ice_vf * vf)996 void ice_set_vf_state_dis(struct ice_vf *vf)
997 {
998 	ice_set_vf_state_qs_dis(vf);
999 	vf->vf_ops->clear_reset_state(vf);
1000 }
1001 
1002 /* Private functions only accessed from other virtualization files */
1003 
1004 /**
1005  * ice_initialize_vf_entry - Initialize a VF entry
1006  * @vf: pointer to the VF structure
1007  */
ice_initialize_vf_entry(struct ice_vf * vf)1008 void ice_initialize_vf_entry(struct ice_vf *vf)
1009 {
1010 	struct ice_pf *pf = vf->pf;
1011 	struct ice_vfs *vfs;
1012 
1013 	vfs = &pf->vfs;
1014 
1015 	/* assign default capabilities */
1016 	vf->spoofchk = true;
1017 	ice_vc_set_default_allowlist(vf);
1018 	ice_virtchnl_set_dflt_ops(vf);
1019 
1020 	/* set default number of MSI-X */
1021 	vf->num_msix = vfs->num_msix_per;
1022 	vf->num_vf_qs = vfs->num_qps_per;
1023 
1024 	/* ctrl_vsi_idx will be set to a valid value only when iAVF
1025 	 * creates its first fdir rule.
1026 	 */
1027 	ice_vf_ctrl_invalidate_vsi(vf);
1028 	ice_vf_fdir_init(vf);
1029 
1030 	/* Initialize mailbox info for this VF */
1031 	if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
1032 		ice_mbx_vf_clear_cnt_e830(&pf->hw, vf->vf_id);
1033 	else
1034 		ice_mbx_init_vf_info(&pf->hw, &vf->mbx_info);
1035 
1036 	mutex_init(&vf->cfg_lock);
1037 }
1038 
1039 /**
1040  * ice_dis_vf_qs - Disable the VF queues
1041  * @vf: pointer to the VF structure
1042  */
ice_dis_vf_qs(struct ice_vf * vf)1043 void ice_dis_vf_qs(struct ice_vf *vf)
1044 {
1045 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1046 
1047 	if (WARN_ON(!vsi))
1048 		return;
1049 
1050 	ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
1051 	ice_vsi_stop_all_rx_rings(vsi);
1052 	ice_set_vf_state_qs_dis(vf);
1053 }
1054 
1055 /**
1056  * ice_err_to_virt_err - translate errors for VF return code
1057  * @err: error return code
1058  */
ice_err_to_virt_err(int err)1059 enum virtchnl_status_code ice_err_to_virt_err(int err)
1060 {
1061 	switch (err) {
1062 	case 0:
1063 		return VIRTCHNL_STATUS_SUCCESS;
1064 	case -EINVAL:
1065 	case -ENODEV:
1066 		return VIRTCHNL_STATUS_ERR_PARAM;
1067 	case -ENOMEM:
1068 		return VIRTCHNL_STATUS_ERR_NO_MEMORY;
1069 	case -EALREADY:
1070 	case -EBUSY:
1071 	case -EIO:
1072 	case -ENOSPC:
1073 		return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1074 	default:
1075 		return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
1076 	}
1077 }
1078 
1079 /**
1080  * ice_check_vf_init - helper to check if VF init complete
1081  * @vf: the pointer to the VF to check
1082  */
ice_check_vf_init(struct ice_vf * vf)1083 int ice_check_vf_init(struct ice_vf *vf)
1084 {
1085 	struct ice_pf *pf = vf->pf;
1086 
1087 	if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
1088 		dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n",
1089 			vf->vf_id);
1090 		return -EBUSY;
1091 	}
1092 	return 0;
1093 }
1094 
1095 /**
1096  * ice_vf_get_port_info - Get the VF's port info structure
1097  * @vf: VF used to get the port info structure for
1098  */
ice_vf_get_port_info(struct ice_vf * vf)1099 struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf)
1100 {
1101 	return vf->pf->hw.port_info;
1102 }
1103 
1104 /**
1105  * ice_cfg_mac_antispoof - Configure MAC antispoof checking behavior
1106  * @vsi: the VSI to configure
1107  * @enable: whether to enable or disable the spoof checking
1108  *
1109  * Configure a VSI to enable (or disable) spoof checking behavior.
1110  */
ice_cfg_mac_antispoof(struct ice_vsi * vsi,bool enable)1111 static int ice_cfg_mac_antispoof(struct ice_vsi *vsi, bool enable)
1112 {
1113 	struct ice_vsi_ctx *ctx;
1114 	int err;
1115 
1116 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1117 	if (!ctx)
1118 		return -ENOMEM;
1119 
1120 	ctx->info.sec_flags = vsi->info.sec_flags;
1121 	ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
1122 
1123 	if (enable)
1124 		ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
1125 	else
1126 		ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
1127 
1128 	err = ice_update_vsi(&vsi->back->hw, vsi->idx, ctx, NULL);
1129 	if (err)
1130 		dev_err(ice_pf_to_dev(vsi->back), "Failed to configure Tx MAC anti-spoof %s for VSI %d, error %d\n",
1131 			enable ? "ON" : "OFF", vsi->vsi_num, err);
1132 	else
1133 		vsi->info.sec_flags = ctx->info.sec_flags;
1134 
1135 	kfree(ctx);
1136 
1137 	return err;
1138 }
1139 
1140 /**
1141  * ice_vsi_ena_spoofchk - enable Tx spoof checking for this VSI
1142  * @vsi: VSI to enable Tx spoof checking for
1143  */
ice_vsi_ena_spoofchk(struct ice_vsi * vsi)1144 static int ice_vsi_ena_spoofchk(struct ice_vsi *vsi)
1145 {
1146 	struct ice_vsi_vlan_ops *vlan_ops;
1147 	int err = 0;
1148 
1149 	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
1150 
1151 	/* Allow VF with VLAN 0 only to send all tagged traffic */
1152 	if (vsi->type != ICE_VSI_VF || ice_vsi_has_non_zero_vlans(vsi)) {
1153 		err = vlan_ops->ena_tx_filtering(vsi);
1154 		if (err)
1155 			return err;
1156 	}
1157 
1158 	return ice_cfg_mac_antispoof(vsi, true);
1159 }
1160 
1161 /**
1162  * ice_vsi_dis_spoofchk - disable Tx spoof checking for this VSI
1163  * @vsi: VSI to disable Tx spoof checking for
1164  */
ice_vsi_dis_spoofchk(struct ice_vsi * vsi)1165 static int ice_vsi_dis_spoofchk(struct ice_vsi *vsi)
1166 {
1167 	struct ice_vsi_vlan_ops *vlan_ops;
1168 	int err;
1169 
1170 	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
1171 
1172 	err = vlan_ops->dis_tx_filtering(vsi);
1173 	if (err)
1174 		return err;
1175 
1176 	return ice_cfg_mac_antispoof(vsi, false);
1177 }
1178 
1179 /**
1180  * ice_vsi_apply_spoofchk - Apply Tx spoof checking setting to a VSI
1181  * @vsi: VSI associated to the VF
1182  * @enable: whether to enable or disable the spoof checking
1183  */
ice_vsi_apply_spoofchk(struct ice_vsi * vsi,bool enable)1184 int ice_vsi_apply_spoofchk(struct ice_vsi *vsi, bool enable)
1185 {
1186 	int err;
1187 
1188 	if (enable)
1189 		err = ice_vsi_ena_spoofchk(vsi);
1190 	else
1191 		err = ice_vsi_dis_spoofchk(vsi);
1192 
1193 	return err;
1194 }
1195 
1196 /**
1197  * ice_is_vf_trusted
1198  * @vf: pointer to the VF info
1199  */
ice_is_vf_trusted(struct ice_vf * vf)1200 bool ice_is_vf_trusted(struct ice_vf *vf)
1201 {
1202 	return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1203 }
1204 
1205 /**
1206  * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled
1207  * @vf: the VF to check
1208  *
1209  * Returns true if the VF has no Rx and no Tx queues enabled and returns false
1210  * otherwise
1211  */
ice_vf_has_no_qs_ena(struct ice_vf * vf)1212 bool ice_vf_has_no_qs_ena(struct ice_vf *vf)
1213 {
1214 	return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) &&
1215 		!bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF));
1216 }
1217 
1218 /**
1219  * ice_is_vf_link_up - check if the VF's link is up
1220  * @vf: VF to check if link is up
1221  */
ice_is_vf_link_up(struct ice_vf * vf)1222 bool ice_is_vf_link_up(struct ice_vf *vf)
1223 {
1224 	struct ice_port_info *pi = ice_vf_get_port_info(vf);
1225 
1226 	if (ice_check_vf_init(vf))
1227 		return false;
1228 
1229 	if (ice_vf_has_no_qs_ena(vf))
1230 		return false;
1231 	else if (vf->link_forced)
1232 		return vf->link_up;
1233 	else
1234 		return pi->phy.link_info.link_info &
1235 			ICE_AQ_LINK_UP;
1236 }
1237 
1238 /**
1239  * ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove VSI access
1240  * @vf: VF that control VSI is being invalidated on
1241  */
ice_vf_ctrl_invalidate_vsi(struct ice_vf * vf)1242 void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf)
1243 {
1244 	vf->ctrl_vsi_idx = ICE_NO_VSI;
1245 }
1246 
1247 /**
1248  * ice_vf_ctrl_vsi_release - invalidate the VF's control VSI after freeing it
1249  * @vf: VF that control VSI is being released on
1250  */
ice_vf_ctrl_vsi_release(struct ice_vf * vf)1251 void ice_vf_ctrl_vsi_release(struct ice_vf *vf)
1252 {
1253 	ice_vsi_release(vf->pf->vsi[vf->ctrl_vsi_idx]);
1254 	ice_vf_ctrl_invalidate_vsi(vf);
1255 }
1256 
1257 /**
1258  * ice_vf_ctrl_vsi_setup - Set up a VF control VSI
1259  * @vf: VF to setup control VSI for
1260  *
1261  * Returns pointer to the successfully allocated VSI struct on success,
1262  * otherwise returns NULL on failure.
1263  */
ice_vf_ctrl_vsi_setup(struct ice_vf * vf)1264 struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)
1265 {
1266 	struct ice_vsi_cfg_params params = {};
1267 	struct ice_pf *pf = vf->pf;
1268 	struct ice_vsi *vsi;
1269 
1270 	params.type = ICE_VSI_CTRL;
1271 	params.port_info = ice_vf_get_port_info(vf);
1272 	params.vf = vf;
1273 	params.flags = ICE_VSI_FLAG_INIT;
1274 
1275 	vsi = ice_vsi_setup(pf, &params);
1276 	if (!vsi) {
1277 		dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n");
1278 		ice_vf_ctrl_invalidate_vsi(vf);
1279 	}
1280 
1281 	return vsi;
1282 }
1283 
1284 /**
1285  * ice_vf_init_host_cfg - Initialize host admin configuration
1286  * @vf: VF to initialize
1287  * @vsi: the VSI created at initialization
1288  *
1289  * Initialize the VF host configuration. Called during VF creation to setup
1290  * VLAN 0, add the VF VSI broadcast filter, and setup spoof checking. It
1291  * should only be called during VF creation.
1292  */
ice_vf_init_host_cfg(struct ice_vf * vf,struct ice_vsi * vsi)1293 int ice_vf_init_host_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
1294 {
1295 	struct ice_vsi_vlan_ops *vlan_ops;
1296 	struct ice_pf *pf = vf->pf;
1297 	u8 broadcast[ETH_ALEN];
1298 	struct device *dev;
1299 	int err;
1300 
1301 	dev = ice_pf_to_dev(pf);
1302 
1303 	err = ice_vsi_add_vlan_zero(vsi);
1304 	if (err) {
1305 		dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n",
1306 			 vf->vf_id);
1307 		return err;
1308 	}
1309 
1310 	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
1311 	err = vlan_ops->ena_rx_filtering(vsi);
1312 	if (err) {
1313 		dev_warn(dev, "Failed to enable Rx VLAN filtering for VF %d\n",
1314 			 vf->vf_id);
1315 		return err;
1316 	}
1317 
1318 	eth_broadcast_addr(broadcast);
1319 	err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
1320 	if (err) {
1321 		dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %d\n",
1322 			vf->vf_id, err);
1323 		return err;
1324 	}
1325 
1326 	vf->num_mac = 1;
1327 
1328 	err = ice_vsi_apply_spoofchk(vsi, vf->spoofchk);
1329 	if (err) {
1330 		dev_warn(dev, "Failed to initialize spoofchk setting for VF %d\n",
1331 			 vf->vf_id);
1332 		return err;
1333 	}
1334 
1335 	return 0;
1336 }
1337 
1338 /**
1339  * ice_vf_invalidate_vsi - invalidate vsi_idx to remove VSI access
1340  * @vf: VF to remove access to VSI for
1341  */
ice_vf_invalidate_vsi(struct ice_vf * vf)1342 void ice_vf_invalidate_vsi(struct ice_vf *vf)
1343 {
1344 	vf->lan_vsi_idx = ICE_NO_VSI;
1345 }
1346 
1347 /**
1348  * ice_vf_vsi_release - Release the VF VSI and invalidate indexes
1349  * @vf: pointer to the VF structure
1350  *
1351  * Release the VF associated with this VSI and then invalidate the VSI
1352  * indexes.
1353  */
ice_vf_vsi_release(struct ice_vf * vf)1354 void ice_vf_vsi_release(struct ice_vf *vf)
1355 {
1356 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1357 
1358 	if (WARN_ON(!vsi))
1359 		return;
1360 
1361 	ice_vsi_release(vsi);
1362 	ice_vf_invalidate_vsi(vf);
1363 }
1364 
1365 /**
1366  * ice_get_vf_ctrl_vsi - Get first VF control VSI pointer
1367  * @pf: the PF private structure
1368  * @vsi: pointer to the VSI
1369  *
1370  * Return first found VF control VSI other than the vsi
1371  * passed by parameter. This function is used to determine
1372  * whether new resources have to be allocated for control VSI
1373  * or they can be shared with existing one.
1374  *
1375  * Return found VF control VSI pointer other itself. Return
1376  * NULL Otherwise.
1377  *
1378  */
ice_get_vf_ctrl_vsi(struct ice_pf * pf,struct ice_vsi * vsi)1379 struct ice_vsi *ice_get_vf_ctrl_vsi(struct ice_pf *pf, struct ice_vsi *vsi)
1380 {
1381 	struct ice_vsi *ctrl_vsi = NULL;
1382 	struct ice_vf *vf;
1383 	unsigned int bkt;
1384 
1385 	rcu_read_lock();
1386 	ice_for_each_vf_rcu(pf, bkt, vf) {
1387 		if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) {
1388 			ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
1389 			break;
1390 		}
1391 	}
1392 
1393 	rcu_read_unlock();
1394 	return ctrl_vsi;
1395 }
1396