xref: /linux/drivers/net/ethernet/intel/ice/ice_sriov.c (revision 1e15510b71c99c6e49134d756df91069f7d18141)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 #include "ice.h"
5 #include "ice_vf_lib_private.h"
6 #include "ice_base.h"
7 #include "ice_lib.h"
8 #include "ice_fltr.h"
9 #include "ice_dcb_lib.h"
10 #include "ice_flow.h"
11 #include "ice_eswitch.h"
12 #include "ice_virtchnl_allowlist.h"
13 #include "ice_flex_pipe.h"
14 #include "ice_vf_vsi_vlan_ops.h"
15 #include "ice_vlan.h"
16 
17 /**
18  * ice_free_vf_entries - Free all VF entries from the hash table
19  * @pf: pointer to the PF structure
20  *
21  * Iterate over the VF hash table, removing and releasing all VF entries.
22  * Called during VF teardown or as cleanup during failed VF initialization.
23  */
ice_free_vf_entries(struct ice_pf * pf)24 static void ice_free_vf_entries(struct ice_pf *pf)
25 {
26 	struct ice_vfs *vfs = &pf->vfs;
27 	struct hlist_node *tmp;
28 	struct ice_vf *vf;
29 	unsigned int bkt;
30 
31 	/* Remove all VFs from the hash table and release their main
32 	 * reference. Once all references to the VF are dropped, ice_put_vf()
33 	 * will call ice_release_vf which will remove the VF memory.
34 	 */
35 	lockdep_assert_held(&vfs->table_lock);
36 
37 	hash_for_each_safe(vfs->table, bkt, tmp, vf, entry) {
38 		hash_del_rcu(&vf->entry);
39 		ice_deinitialize_vf_entry(vf);
40 		ice_put_vf(vf);
41 	}
42 }
43 
44 /**
45  * ice_free_vf_res - Free a VF's resources
46  * @vf: pointer to the VF info
47  */
ice_free_vf_res(struct ice_vf * vf)48 static void ice_free_vf_res(struct ice_vf *vf)
49 {
50 	struct ice_pf *pf = vf->pf;
51 	int i, last_vector_idx;
52 
53 	/* First, disable VF's configuration API to prevent OS from
54 	 * accessing the VF's VSI after it's freed or invalidated.
55 	 */
56 	clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
57 	ice_vf_fdir_exit(vf);
58 	/* free VF control VSI */
59 	if (vf->ctrl_vsi_idx != ICE_NO_VSI)
60 		ice_vf_ctrl_vsi_release(vf);
61 
62 	/* free VSI and disconnect it from the parent uplink */
63 	if (vf->lan_vsi_idx != ICE_NO_VSI) {
64 		ice_vf_vsi_release(vf);
65 		vf->num_mac = 0;
66 	}
67 
68 	last_vector_idx = vf->first_vector_idx + vf->num_msix - 1;
69 
70 	/* clear VF MDD event information */
71 	memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
72 	memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
73 
74 	/* Disable interrupts so that VF starts in a known state */
75 	for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
76 		wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
77 		ice_flush(&pf->hw);
78 	}
79 	/* reset some of the state variables keeping track of the resources */
80 	clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
81 	clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
82 }
83 
84 /**
85  * ice_dis_vf_mappings
86  * @vf: pointer to the VF structure
87  */
ice_dis_vf_mappings(struct ice_vf * vf)88 static void ice_dis_vf_mappings(struct ice_vf *vf)
89 {
90 	struct ice_pf *pf = vf->pf;
91 	struct ice_vsi *vsi;
92 	struct device *dev;
93 	int first, last, v;
94 	struct ice_hw *hw;
95 
96 	hw = &pf->hw;
97 	vsi = ice_get_vf_vsi(vf);
98 	if (WARN_ON(!vsi))
99 		return;
100 
101 	dev = ice_pf_to_dev(pf);
102 	wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
103 	wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
104 
105 	first = vf->first_vector_idx;
106 	last = first + vf->num_msix - 1;
107 	for (v = first; v <= last; v++) {
108 		u32 reg;
109 
110 		reg = FIELD_PREP(GLINT_VECT2FUNC_IS_PF_M, 1) |
111 		      FIELD_PREP(GLINT_VECT2FUNC_PF_NUM_M, hw->pf_id);
112 		wr32(hw, GLINT_VECT2FUNC(v), reg);
113 	}
114 
115 	if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
116 		wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
117 	else
118 		dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
119 
120 	if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
121 		wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
122 	else
123 		dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
124 }
125 
126 /**
127  * ice_sriov_free_msix_res - Reset/free any used MSIX resources
128  * @pf: pointer to the PF structure
129  *
130  * Since no MSIX entries are taken from the pf->irq_tracker then just clear
131  * the pf->sriov_base_vector.
132  *
133  * Returns 0 on success, and -EINVAL on error.
134  */
ice_sriov_free_msix_res(struct ice_pf * pf)135 static int ice_sriov_free_msix_res(struct ice_pf *pf)
136 {
137 	if (!pf)
138 		return -EINVAL;
139 
140 	bitmap_free(pf->sriov_irq_bm);
141 	pf->sriov_irq_size = 0;
142 	pf->sriov_base_vector = 0;
143 
144 	return 0;
145 }
146 
147 /**
148  * ice_free_vfs - Free all VFs
149  * @pf: pointer to the PF structure
150  */
ice_free_vfs(struct ice_pf * pf)151 void ice_free_vfs(struct ice_pf *pf)
152 {
153 	struct device *dev = ice_pf_to_dev(pf);
154 	struct ice_vfs *vfs = &pf->vfs;
155 	struct ice_hw *hw = &pf->hw;
156 	struct ice_vf *vf;
157 	unsigned int bkt;
158 
159 	if (!ice_has_vfs(pf))
160 		return;
161 
162 	while (test_and_set_bit(ICE_VF_DIS, pf->state))
163 		usleep_range(1000, 2000);
164 
165 	/* Disable IOV before freeing resources. This lets any VF drivers
166 	 * running in the host get themselves cleaned up before we yank
167 	 * the carpet out from underneath their feet.
168 	 */
169 	if (!pci_vfs_assigned(pf->pdev))
170 		pci_disable_sriov(pf->pdev);
171 	else
172 		dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
173 
174 	mutex_lock(&vfs->table_lock);
175 
176 	ice_for_each_vf(pf, bkt, vf) {
177 		mutex_lock(&vf->cfg_lock);
178 
179 		ice_eswitch_detach_vf(pf, vf);
180 		ice_dis_vf_qs(vf);
181 
182 		if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
183 			/* disable VF qp mappings and set VF disable state */
184 			ice_dis_vf_mappings(vf);
185 			set_bit(ICE_VF_STATE_DIS, vf->vf_states);
186 			ice_free_vf_res(vf);
187 		}
188 
189 		if (!pci_vfs_assigned(pf->pdev)) {
190 			u32 reg_idx, bit_idx;
191 
192 			reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
193 			bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
194 			wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
195 		}
196 
197 		mutex_unlock(&vf->cfg_lock);
198 	}
199 
200 	if (ice_sriov_free_msix_res(pf))
201 		dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
202 
203 	vfs->num_qps_per = 0;
204 	ice_free_vf_entries(pf);
205 
206 	mutex_unlock(&vfs->table_lock);
207 
208 	clear_bit(ICE_VF_DIS, pf->state);
209 	clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
210 }
211 
212 /**
213  * ice_vf_vsi_setup - Set up a VF VSI
214  * @vf: VF to setup VSI for
215  *
216  * Returns pointer to the successfully allocated VSI struct on success,
217  * otherwise returns NULL on failure.
218  */
ice_vf_vsi_setup(struct ice_vf * vf)219 static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
220 {
221 	struct ice_vsi_cfg_params params = {};
222 	struct ice_pf *pf = vf->pf;
223 	struct ice_vsi *vsi;
224 
225 	params.type = ICE_VSI_VF;
226 	params.port_info = ice_vf_get_port_info(vf);
227 	params.vf = vf;
228 	params.flags = ICE_VSI_FLAG_INIT;
229 
230 	vsi = ice_vsi_setup(pf, &params);
231 
232 	if (!vsi) {
233 		dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n");
234 		ice_vf_invalidate_vsi(vf);
235 		return NULL;
236 	}
237 
238 	vf->lan_vsi_idx = vsi->idx;
239 
240 	return vsi;
241 }
242 
243 
244 /**
245  * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware
246  * @vf: VF to enable MSIX mappings for
247  *
248  * Some of the registers need to be indexed/configured using hardware global
249  * device values and other registers need 0-based values, which represent PF
250  * based values.
251  */
ice_ena_vf_msix_mappings(struct ice_vf * vf)252 static void ice_ena_vf_msix_mappings(struct ice_vf *vf)
253 {
254 	int device_based_first_msix, device_based_last_msix;
255 	int pf_based_first_msix, pf_based_last_msix, v;
256 	struct ice_pf *pf = vf->pf;
257 	int device_based_vf_id;
258 	struct ice_hw *hw;
259 	u32 reg;
260 
261 	hw = &pf->hw;
262 	pf_based_first_msix = vf->first_vector_idx;
263 	pf_based_last_msix = (pf_based_first_msix + vf->num_msix) - 1;
264 
265 	device_based_first_msix = pf_based_first_msix +
266 		pf->hw.func_caps.common_cap.msix_vector_first_id;
267 	device_based_last_msix =
268 		(device_based_first_msix + vf->num_msix) - 1;
269 	device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
270 
271 	reg = FIELD_PREP(VPINT_ALLOC_FIRST_M, device_based_first_msix) |
272 	      FIELD_PREP(VPINT_ALLOC_LAST_M, device_based_last_msix) |
273 	      VPINT_ALLOC_VALID_M;
274 	wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
275 
276 	reg = FIELD_PREP(VPINT_ALLOC_PCI_FIRST_M, device_based_first_msix) |
277 	      FIELD_PREP(VPINT_ALLOC_PCI_LAST_M, device_based_last_msix) |
278 	      VPINT_ALLOC_PCI_VALID_M;
279 	wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
280 
281 	/* map the interrupts to its functions */
282 	for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) {
283 		reg = FIELD_PREP(GLINT_VECT2FUNC_VF_NUM_M, device_based_vf_id) |
284 		      FIELD_PREP(GLINT_VECT2FUNC_PF_NUM_M, hw->pf_id);
285 		wr32(hw, GLINT_VECT2FUNC(v), reg);
286 	}
287 
288 	/* Map mailbox interrupt to VF MSI-X vector 0 */
289 	wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M);
290 }
291 
292 /**
293  * ice_ena_vf_q_mappings - enable Rx/Tx queue mappings for a VF
294  * @vf: VF to enable the mappings for
295  * @max_txq: max Tx queues allowed on the VF's VSI
296  * @max_rxq: max Rx queues allowed on the VF's VSI
297  */
ice_ena_vf_q_mappings(struct ice_vf * vf,u16 max_txq,u16 max_rxq)298 static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq)
299 {
300 	struct device *dev = ice_pf_to_dev(vf->pf);
301 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
302 	struct ice_hw *hw = &vf->pf->hw;
303 	u32 reg;
304 
305 	if (WARN_ON(!vsi))
306 		return;
307 
308 	/* set regardless of mapping mode */
309 	wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
310 
311 	/* VF Tx queues allocation */
312 	if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
313 		/* set the VF PF Tx queue range
314 		 * VFNUMQ value should be set to (number of queues - 1). A value
315 		 * of 0 means 1 queue and a value of 255 means 256 queues
316 		 */
317 		reg = FIELD_PREP(VPLAN_TX_QBASE_VFFIRSTQ_M, vsi->txq_map[0]) |
318 		      FIELD_PREP(VPLAN_TX_QBASE_VFNUMQ_M, max_txq - 1);
319 		wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
320 	} else {
321 		dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
322 	}
323 
324 	/* set regardless of mapping mode */
325 	wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M);
326 
327 	/* VF Rx queues allocation */
328 	if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
329 		/* set the VF PF Rx queue range
330 		 * VFNUMQ value should be set to (number of queues - 1). A value
331 		 * of 0 means 1 queue and a value of 255 means 256 queues
332 		 */
333 		reg = FIELD_PREP(VPLAN_RX_QBASE_VFFIRSTQ_M, vsi->rxq_map[0]) |
334 		      FIELD_PREP(VPLAN_RX_QBASE_VFNUMQ_M, max_rxq - 1);
335 		wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
336 	} else {
337 		dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
338 	}
339 }
340 
341 /**
342  * ice_ena_vf_mappings - enable VF MSIX and queue mapping
343  * @vf: pointer to the VF structure
344  */
ice_ena_vf_mappings(struct ice_vf * vf)345 static void ice_ena_vf_mappings(struct ice_vf *vf)
346 {
347 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
348 
349 	if (WARN_ON(!vsi))
350 		return;
351 
352 	ice_ena_vf_msix_mappings(vf);
353 	ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq);
354 }
355 
356 /**
357  * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space
358  * @vf: VF to calculate the register index for
359  * @q_vector: a q_vector associated to the VF
360  */
ice_calc_vf_reg_idx(struct ice_vf * vf,struct ice_q_vector * q_vector)361 void ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
362 {
363 	if (!vf || !q_vector)
364 		return;
365 
366 	/* always add one to account for the OICR being the first MSIX */
367 	q_vector->vf_reg_idx = q_vector->v_idx + ICE_NONQ_VECS_VF;
368 	q_vector->reg_idx = vf->first_vector_idx + q_vector->vf_reg_idx;
369 }
370 
371 /**
372  * ice_sriov_set_msix_res - Set any used MSIX resources
373  * @pf: pointer to PF structure
374  * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
375  *
376  * This function allows SR-IOV resources to be taken from the end of the PF's
377  * allowed HW MSIX vectors so that the irq_tracker will not be affected. We
378  * just set the pf->sriov_base_vector and return success.
379  *
380  * If there are not enough resources available, return an error. This should
381  * always be caught by ice_set_per_vf_res().
382  *
383  * Return 0 on success, and -EINVAL when there are not enough MSIX vectors
384  * in the PF's space available for SR-IOV.
385  */
ice_sriov_set_msix_res(struct ice_pf * pf,u16 num_msix_needed)386 static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
387 {
388 	u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
389 	int vectors_used = ice_get_max_used_msix_vector(pf);
390 	int sriov_base_vector;
391 
392 	sriov_base_vector = total_vectors - num_msix_needed;
393 
394 	/* make sure we only grab irq_tracker entries from the list end and
395 	 * that we have enough available MSIX vectors
396 	 */
397 	if (sriov_base_vector < vectors_used)
398 		return -EINVAL;
399 
400 	pf->sriov_base_vector = sriov_base_vector;
401 
402 	return 0;
403 }
404 
405 /**
406  * ice_set_per_vf_res - check if vectors and queues are available
407  * @pf: pointer to the PF structure
408  * @num_vfs: the number of SR-IOV VFs being configured
409  *
410  * First, determine HW interrupts from common pool. If we allocate fewer VFs, we
411  * get more vectors and can enable more queues per VF. Note that this does not
412  * grab any vectors from the SW pool already allocated. Also note, that all
413  * vector counts include one for each VF's miscellaneous interrupt vector
414  * (i.e. OICR).
415  *
416  * Minimum VFs - 2 vectors, 1 queue pair
417  * Small VFs - 5 vectors, 4 queue pairs
418  * Medium VFs - 17 vectors, 16 queue pairs
419  *
420  * Second, determine number of queue pairs per VF by starting with a pre-defined
421  * maximum each VF supports. If this is not possible, then we adjust based on
422  * queue pairs available on the device.
423  *
424  * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used
425  * by each VF during VF initialization and reset.
426  */
ice_set_per_vf_res(struct ice_pf * pf,u16 num_vfs)427 static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
428 {
429 	int vectors_used = ice_get_max_used_msix_vector(pf);
430 	u16 num_msix_per_vf, num_txq, num_rxq, avail_qs;
431 	int msix_avail_per_vf, msix_avail_for_sriov;
432 	struct device *dev = ice_pf_to_dev(pf);
433 	int err;
434 
435 	lockdep_assert_held(&pf->vfs.table_lock);
436 
437 	if (!num_vfs)
438 		return -EINVAL;
439 
440 	/* determine MSI-X resources per VF */
441 	msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors -
442 		vectors_used;
443 	msix_avail_per_vf = msix_avail_for_sriov / num_vfs;
444 	if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
445 		num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
446 	} else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) {
447 		num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL;
448 	} else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MULTIQ_MIN) {
449 		num_msix_per_vf = ICE_NUM_VF_MSIX_MULTIQ_MIN;
450 	} else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) {
451 		num_msix_per_vf = ICE_MIN_INTR_PER_VF;
452 	} else {
453 		dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n",
454 			msix_avail_for_sriov, ICE_MIN_INTR_PER_VF,
455 			num_vfs);
456 		return -ENOSPC;
457 	}
458 
459 	num_txq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF,
460 			ICE_MAX_RSS_QS_PER_VF);
461 	avail_qs = ice_get_avail_txq_count(pf) / num_vfs;
462 	if (!avail_qs)
463 		num_txq = 0;
464 	else if (num_txq > avail_qs)
465 		num_txq = rounddown_pow_of_two(avail_qs);
466 
467 	num_rxq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF,
468 			ICE_MAX_RSS_QS_PER_VF);
469 	avail_qs = ice_get_avail_rxq_count(pf) / num_vfs;
470 	if (!avail_qs)
471 		num_rxq = 0;
472 	else if (num_rxq > avail_qs)
473 		num_rxq = rounddown_pow_of_two(avail_qs);
474 
475 	if (num_txq < ICE_MIN_QS_PER_VF || num_rxq < ICE_MIN_QS_PER_VF) {
476 		dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n",
477 			ICE_MIN_QS_PER_VF, num_vfs);
478 		return -ENOSPC;
479 	}
480 
481 	err = ice_sriov_set_msix_res(pf, num_msix_per_vf * num_vfs);
482 	if (err) {
483 		dev_err(dev, "Unable to set MSI-X resources for %d VFs, err %d\n",
484 			num_vfs, err);
485 		return err;
486 	}
487 
488 	/* only allow equal Tx/Rx queue count (i.e. queue pairs) */
489 	pf->vfs.num_qps_per = min_t(int, num_txq, num_rxq);
490 	pf->vfs.num_msix_per = num_msix_per_vf;
491 	dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n",
492 		 num_vfs, pf->vfs.num_msix_per, pf->vfs.num_qps_per);
493 
494 	return 0;
495 }
496 
497 /**
498  * ice_sriov_get_irqs - get irqs for SR-IOV usacase
499  * @pf: pointer to PF structure
500  * @needed: number of irqs to get
501  *
502  * This returns the first MSI-X vector index in PF space that is used by this
503  * VF. This index is used when accessing PF relative registers such as
504  * GLINT_VECT2FUNC and GLINT_DYN_CTL.
505  * This will always be the OICR index in the AVF driver so any functionality
506  * using vf->first_vector_idx for queue configuration_id: id of VF which will
507  * use this irqs
508  *
509  * Only SRIOV specific vectors are tracked in sriov_irq_bm. SRIOV vectors are
510  * allocated from the end of global irq index. First bit in sriov_irq_bm means
511  * last irq index etc. It simplifies extension of SRIOV vectors.
512  * They will be always located from sriov_base_vector to the last irq
513  * index. While increasing/decreasing sriov_base_vector can be moved.
514  */
ice_sriov_get_irqs(struct ice_pf * pf,u16 needed)515 static int ice_sriov_get_irqs(struct ice_pf *pf, u16 needed)
516 {
517 	int res = bitmap_find_next_zero_area(pf->sriov_irq_bm,
518 					     pf->sriov_irq_size, 0, needed, 0);
519 	/* conversion from number in bitmap to global irq index */
520 	int index = pf->sriov_irq_size - res - needed;
521 
522 	if (res >= pf->sriov_irq_size || index < pf->sriov_base_vector)
523 		return -ENOENT;
524 
525 	bitmap_set(pf->sriov_irq_bm, res, needed);
526 	return index;
527 }
528 
529 /**
530  * ice_sriov_free_irqs - free irqs used by the VF
531  * @pf: pointer to PF structure
532  * @vf: pointer to VF structure
533  */
ice_sriov_free_irqs(struct ice_pf * pf,struct ice_vf * vf)534 static void ice_sriov_free_irqs(struct ice_pf *pf, struct ice_vf *vf)
535 {
536 	/* Move back from first vector index to first index in bitmap */
537 	int bm_i = pf->sriov_irq_size - vf->first_vector_idx - vf->num_msix;
538 
539 	bitmap_clear(pf->sriov_irq_bm, bm_i, vf->num_msix);
540 	vf->first_vector_idx = 0;
541 }
542 
543 /**
544  * ice_init_vf_vsi_res - initialize/setup VF VSI resources
545  * @vf: VF to initialize/setup the VSI for
546  *
547  * This function creates a VSI for the VF, adds a VLAN 0 filter, and sets up the
548  * VF VSI's broadcast filter and is only used during initial VF creation.
549  */
ice_init_vf_vsi_res(struct ice_vf * vf)550 static int ice_init_vf_vsi_res(struct ice_vf *vf)
551 {
552 	struct ice_pf *pf = vf->pf;
553 	struct ice_vsi *vsi;
554 	int err;
555 
556 	vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix);
557 	if (vf->first_vector_idx < 0)
558 		return -ENOMEM;
559 
560 	vsi = ice_vf_vsi_setup(vf);
561 	if (!vsi)
562 		return -ENOMEM;
563 
564 	err = ice_vf_init_host_cfg(vf, vsi);
565 	if (err)
566 		goto release_vsi;
567 
568 	return 0;
569 
570 release_vsi:
571 	ice_vf_vsi_release(vf);
572 	return err;
573 }
574 
575 /**
576  * ice_start_vfs - start VFs so they are ready to be used by SR-IOV
577  * @pf: PF the VFs are associated with
578  */
ice_start_vfs(struct ice_pf * pf)579 static int ice_start_vfs(struct ice_pf *pf)
580 {
581 	struct ice_hw *hw = &pf->hw;
582 	unsigned int bkt, it_cnt;
583 	struct ice_vf *vf;
584 	int retval;
585 
586 	lockdep_assert_held(&pf->vfs.table_lock);
587 
588 	it_cnt = 0;
589 	ice_for_each_vf(pf, bkt, vf) {
590 		vf->vf_ops->clear_reset_trigger(vf);
591 
592 		retval = ice_init_vf_vsi_res(vf);
593 		if (retval) {
594 			dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n",
595 				vf->vf_id, retval);
596 			goto teardown;
597 		}
598 
599 		retval = ice_eswitch_attach_vf(pf, vf);
600 		if (retval) {
601 			dev_err(ice_pf_to_dev(pf), "Failed to attach VF %d to eswitch, error %d",
602 				vf->vf_id, retval);
603 			ice_vf_vsi_release(vf);
604 			goto teardown;
605 		}
606 
607 		set_bit(ICE_VF_STATE_INIT, vf->vf_states);
608 		ice_ena_vf_mappings(vf);
609 		wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
610 		it_cnt++;
611 	}
612 
613 	ice_flush(hw);
614 	return 0;
615 
616 teardown:
617 	ice_for_each_vf(pf, bkt, vf) {
618 		if (it_cnt == 0)
619 			break;
620 
621 		ice_dis_vf_mappings(vf);
622 		ice_vf_vsi_release(vf);
623 		it_cnt--;
624 	}
625 
626 	return retval;
627 }
628 
629 /**
630  * ice_sriov_free_vf - Free VF memory after all references are dropped
631  * @vf: pointer to VF to free
632  *
633  * Called by ice_put_vf through ice_release_vf once the last reference to a VF
634  * structure has been dropped.
635  */
ice_sriov_free_vf(struct ice_vf * vf)636 static void ice_sriov_free_vf(struct ice_vf *vf)
637 {
638 	mutex_destroy(&vf->cfg_lock);
639 
640 	kfree_rcu(vf, rcu);
641 }
642 
643 /**
644  * ice_sriov_clear_reset_state - clears VF Reset status register
645  * @vf: the vf to configure
646  */
ice_sriov_clear_reset_state(struct ice_vf * vf)647 static void ice_sriov_clear_reset_state(struct ice_vf *vf)
648 {
649 	struct ice_hw *hw = &vf->pf->hw;
650 
651 	/* Clear the reset status register so that VF immediately sees that
652 	 * the device is resetting, even if hardware hasn't yet gotten around
653 	 * to clearing VFGEN_RSTAT for us.
654 	 */
655 	wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_INPROGRESS);
656 }
657 
658 /**
659  * ice_sriov_clear_mbx_register - clears SRIOV VF's mailbox registers
660  * @vf: the vf to configure
661  */
ice_sriov_clear_mbx_register(struct ice_vf * vf)662 static void ice_sriov_clear_mbx_register(struct ice_vf *vf)
663 {
664 	struct ice_pf *pf = vf->pf;
665 
666 	wr32(&pf->hw, VF_MBX_ARQLEN(vf->vf_id), 0);
667 	wr32(&pf->hw, VF_MBX_ATQLEN(vf->vf_id), 0);
668 }
669 
670 /**
671  * ice_sriov_trigger_reset_register - trigger VF reset for SRIOV VF
672  * @vf: pointer to VF structure
673  * @is_vflr: true if reset occurred due to VFLR
674  *
675  * Trigger and cleanup after a VF reset for a SR-IOV VF.
676  */
ice_sriov_trigger_reset_register(struct ice_vf * vf,bool is_vflr)677 static void ice_sriov_trigger_reset_register(struct ice_vf *vf, bool is_vflr)
678 {
679 	struct ice_pf *pf = vf->pf;
680 	u32 reg, reg_idx, bit_idx;
681 	unsigned int vf_abs_id, i;
682 	struct device *dev;
683 	struct ice_hw *hw;
684 
685 	dev = ice_pf_to_dev(pf);
686 	hw = &pf->hw;
687 	vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
688 
689 	/* In the case of a VFLR, HW has already reset the VF and we just need
690 	 * to clean up. Otherwise we must first trigger the reset using the
691 	 * VFRTRIG register.
692 	 */
693 	if (!is_vflr) {
694 		reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
695 		reg |= VPGEN_VFRTRIG_VFSWR_M;
696 		wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
697 	}
698 
699 	/* clear the VFLR bit in GLGEN_VFLRSTAT */
700 	reg_idx = (vf_abs_id) / 32;
701 	bit_idx = (vf_abs_id) % 32;
702 	wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
703 	ice_flush(hw);
704 
705 	wr32(hw, PF_PCI_CIAA,
706 	     VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
707 	for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) {
708 		reg = rd32(hw, PF_PCI_CIAD);
709 		/* no transactions pending so stop polling */
710 		if ((reg & VF_TRANS_PENDING_M) == 0)
711 			break;
712 
713 		dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id);
714 		udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
715 	}
716 }
717 
718 /**
719  * ice_sriov_poll_reset_status - poll SRIOV VF reset status
720  * @vf: pointer to VF structure
721  *
722  * Returns true when reset is successful, else returns false
723  */
ice_sriov_poll_reset_status(struct ice_vf * vf)724 static bool ice_sriov_poll_reset_status(struct ice_vf *vf)
725 {
726 	struct ice_pf *pf = vf->pf;
727 	unsigned int i;
728 	u32 reg;
729 
730 	for (i = 0; i < 10; i++) {
731 		/* VF reset requires driver to first reset the VF and then
732 		 * poll the status register to make sure that the reset
733 		 * completed successfully.
734 		 */
735 		reg = rd32(&pf->hw, VPGEN_VFRSTAT(vf->vf_id));
736 		if (reg & VPGEN_VFRSTAT_VFRD_M)
737 			return true;
738 
739 		/* only sleep if the reset is not done */
740 		usleep_range(10, 20);
741 	}
742 	return false;
743 }
744 
745 /**
746  * ice_sriov_clear_reset_trigger - enable VF to access hardware
747  * @vf: VF to enabled hardware access for
748  */
ice_sriov_clear_reset_trigger(struct ice_vf * vf)749 static void ice_sriov_clear_reset_trigger(struct ice_vf *vf)
750 {
751 	struct ice_hw *hw = &vf->pf->hw;
752 	u32 reg;
753 
754 	reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
755 	reg &= ~VPGEN_VFRTRIG_VFSWR_M;
756 	wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
757 	ice_flush(hw);
758 }
759 
760 /**
761  * ice_sriov_post_vsi_rebuild - tasks to do after the VF's VSI have been rebuilt
762  * @vf: VF to perform tasks on
763  */
ice_sriov_post_vsi_rebuild(struct ice_vf * vf)764 static void ice_sriov_post_vsi_rebuild(struct ice_vf *vf)
765 {
766 	ice_ena_vf_mappings(vf);
767 	wr32(&vf->pf->hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
768 }
769 
770 static const struct ice_vf_ops ice_sriov_vf_ops = {
771 	.reset_type = ICE_VF_RESET,
772 	.free = ice_sriov_free_vf,
773 	.clear_reset_state = ice_sriov_clear_reset_state,
774 	.clear_mbx_register = ice_sriov_clear_mbx_register,
775 	.trigger_reset_register = ice_sriov_trigger_reset_register,
776 	.poll_reset_status = ice_sriov_poll_reset_status,
777 	.clear_reset_trigger = ice_sriov_clear_reset_trigger,
778 	.irq_close = NULL,
779 	.post_vsi_rebuild = ice_sriov_post_vsi_rebuild,
780 };
781 
782 /**
783  * ice_create_vf_entries - Allocate and insert VF entries
784  * @pf: pointer to the PF structure
785  * @num_vfs: the number of VFs to allocate
786  *
787  * Allocate new VF entries and insert them into the hash table. Set some
788  * basic default fields for initializing the new VFs.
789  *
790  * After this function exits, the hash table will have num_vfs entries
791  * inserted.
792  *
793  * Returns 0 on success or an integer error code on failure.
794  */
ice_create_vf_entries(struct ice_pf * pf,u16 num_vfs)795 static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs)
796 {
797 	struct pci_dev *pdev = pf->pdev;
798 	struct ice_vfs *vfs = &pf->vfs;
799 	struct pci_dev *vfdev = NULL;
800 	struct ice_vf *vf;
801 	u16 vf_pdev_id;
802 	int err, pos;
803 
804 	lockdep_assert_held(&vfs->table_lock);
805 
806 	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
807 	pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_pdev_id);
808 
809 	for (u16 vf_id = 0; vf_id < num_vfs; vf_id++) {
810 		vf = kzalloc(sizeof(*vf), GFP_KERNEL);
811 		if (!vf) {
812 			err = -ENOMEM;
813 			goto err_free_entries;
814 		}
815 		kref_init(&vf->refcnt);
816 
817 		vf->pf = pf;
818 		vf->vf_id = vf_id;
819 
820 		/* set sriov vf ops for VFs created during SRIOV flow */
821 		vf->vf_ops = &ice_sriov_vf_ops;
822 
823 		ice_initialize_vf_entry(vf);
824 
825 		do {
826 			vfdev = pci_get_device(pdev->vendor, vf_pdev_id, vfdev);
827 		} while (vfdev && vfdev->physfn != pdev);
828 		vf->vfdev = vfdev;
829 		vf->vf_sw_id = pf->first_sw;
830 
831 		pci_dev_get(vfdev);
832 
833 		hash_add_rcu(vfs->table, &vf->entry, vf_id);
834 	}
835 
836 	/* Decrement of refcount done by pci_get_device() inside the loop does
837 	 * not touch the last iteration's vfdev, so it has to be done manually
838 	 * to balance pci_dev_get() added within the loop.
839 	 */
840 	pci_dev_put(vfdev);
841 
842 	return 0;
843 
844 err_free_entries:
845 	ice_free_vf_entries(pf);
846 	return err;
847 }
848 
849 /**
850  * ice_ena_vfs - enable VFs so they are ready to be used
851  * @pf: pointer to the PF structure
852  * @num_vfs: number of VFs to enable
853  */
ice_ena_vfs(struct ice_pf * pf,u16 num_vfs)854 static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
855 {
856 	int total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
857 	struct device *dev = ice_pf_to_dev(pf);
858 	struct ice_hw *hw = &pf->hw;
859 	int ret;
860 
861 	pf->sriov_irq_bm = bitmap_zalloc(total_vectors, GFP_KERNEL);
862 	if (!pf->sriov_irq_bm)
863 		return -ENOMEM;
864 	pf->sriov_irq_size = total_vectors;
865 
866 	/* Disable global interrupt 0 so we don't try to handle the VFLR. */
867 	wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index),
868 	     ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
869 	set_bit(ICE_OICR_INTR_DIS, pf->state);
870 	ice_flush(hw);
871 
872 	ret = pci_enable_sriov(pf->pdev, num_vfs);
873 	if (ret)
874 		goto err_unroll_intr;
875 
876 	mutex_lock(&pf->vfs.table_lock);
877 
878 	ret = ice_set_per_vf_res(pf, num_vfs);
879 	if (ret) {
880 		dev_err(dev, "Not enough resources for %d VFs, err %d. Try with fewer number of VFs\n",
881 			num_vfs, ret);
882 		goto err_unroll_sriov;
883 	}
884 
885 	ret = ice_create_vf_entries(pf, num_vfs);
886 	if (ret) {
887 		dev_err(dev, "Failed to allocate VF entries for %d VFs\n",
888 			num_vfs);
889 		goto err_unroll_sriov;
890 	}
891 
892 	ret = ice_start_vfs(pf);
893 	if (ret) {
894 		dev_err(dev, "Failed to start %d VFs, err %d\n", num_vfs, ret);
895 		ret = -EAGAIN;
896 		goto err_unroll_vf_entries;
897 	}
898 
899 	clear_bit(ICE_VF_DIS, pf->state);
900 
901 	/* rearm global interrupts */
902 	if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state))
903 		ice_irq_dynamic_ena(hw, NULL, NULL);
904 
905 	mutex_unlock(&pf->vfs.table_lock);
906 
907 	return 0;
908 
909 err_unroll_vf_entries:
910 	ice_free_vf_entries(pf);
911 err_unroll_sriov:
912 	mutex_unlock(&pf->vfs.table_lock);
913 	pci_disable_sriov(pf->pdev);
914 err_unroll_intr:
915 	/* rearm interrupts here */
916 	ice_irq_dynamic_ena(hw, NULL, NULL);
917 	clear_bit(ICE_OICR_INTR_DIS, pf->state);
918 	bitmap_free(pf->sriov_irq_bm);
919 	return ret;
920 }
921 
922 /**
923  * ice_pci_sriov_ena - Enable or change number of VFs
924  * @pf: pointer to the PF structure
925  * @num_vfs: number of VFs to allocate
926  *
927  * Returns 0 on success and negative on failure
928  */
ice_pci_sriov_ena(struct ice_pf * pf,int num_vfs)929 static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
930 {
931 	struct device *dev = ice_pf_to_dev(pf);
932 	int err;
933 
934 	if (!num_vfs) {
935 		ice_free_vfs(pf);
936 		return 0;
937 	}
938 
939 	if (num_vfs > pf->vfs.num_supported) {
940 		dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
941 			num_vfs, pf->vfs.num_supported);
942 		return -EOPNOTSUPP;
943 	}
944 
945 	dev_info(dev, "Enabling %d VFs\n", num_vfs);
946 	err = ice_ena_vfs(pf, num_vfs);
947 	if (err) {
948 		dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
949 		return err;
950 	}
951 
952 	set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
953 	return 0;
954 }
955 
956 /**
957  * ice_check_sriov_allowed - check if SR-IOV is allowed based on various checks
958  * @pf: PF to enabled SR-IOV on
959  */
ice_check_sriov_allowed(struct ice_pf * pf)960 static int ice_check_sriov_allowed(struct ice_pf *pf)
961 {
962 	struct device *dev = ice_pf_to_dev(pf);
963 
964 	if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
965 		dev_err(dev, "This device is not capable of SR-IOV\n");
966 		return -EOPNOTSUPP;
967 	}
968 
969 	if (ice_is_safe_mode(pf)) {
970 		dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n");
971 		return -EOPNOTSUPP;
972 	}
973 
974 	if (!ice_pf_state_is_nominal(pf)) {
975 		dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
976 		return -EBUSY;
977 	}
978 
979 	return 0;
980 }
981 
982 /**
983  * ice_sriov_get_vf_total_msix - return number of MSI-X used by VFs
984  * @pdev: pointer to pci_dev struct
985  *
986  * The function is called via sysfs ops
987  */
ice_sriov_get_vf_total_msix(struct pci_dev * pdev)988 u32 ice_sriov_get_vf_total_msix(struct pci_dev *pdev)
989 {
990 	struct ice_pf *pf = pci_get_drvdata(pdev);
991 
992 	return pf->sriov_irq_size - ice_get_max_used_msix_vector(pf);
993 }
994 
ice_sriov_move_base_vector(struct ice_pf * pf,int move)995 static int ice_sriov_move_base_vector(struct ice_pf *pf, int move)
996 {
997 	if (pf->sriov_base_vector - move < ice_get_max_used_msix_vector(pf))
998 		return -ENOMEM;
999 
1000 	pf->sriov_base_vector -= move;
1001 	return 0;
1002 }
1003 
ice_sriov_remap_vectors(struct ice_pf * pf,u16 restricted_id)1004 static void ice_sriov_remap_vectors(struct ice_pf *pf, u16 restricted_id)
1005 {
1006 	u16 vf_ids[ICE_MAX_SRIOV_VFS];
1007 	struct ice_vf *tmp_vf;
1008 	int to_remap = 0, bkt;
1009 
1010 	/* For better irqs usage try to remap irqs of VFs
1011 	 * that aren't running yet
1012 	 */
1013 	ice_for_each_vf(pf, bkt, tmp_vf) {
1014 		/* skip VF which is changing the number of MSI-X */
1015 		if (restricted_id == tmp_vf->vf_id ||
1016 		    test_bit(ICE_VF_STATE_ACTIVE, tmp_vf->vf_states))
1017 			continue;
1018 
1019 		ice_dis_vf_mappings(tmp_vf);
1020 		ice_sriov_free_irqs(pf, tmp_vf);
1021 
1022 		vf_ids[to_remap] = tmp_vf->vf_id;
1023 		to_remap += 1;
1024 	}
1025 
1026 	for (int i = 0; i < to_remap; i++) {
1027 		tmp_vf = ice_get_vf_by_id(pf, vf_ids[i]);
1028 		if (!tmp_vf)
1029 			continue;
1030 
1031 		tmp_vf->first_vector_idx =
1032 			ice_sriov_get_irqs(pf, tmp_vf->num_msix);
1033 		/* there is no need to rebuild VSI as we are only changing the
1034 		 * vector indexes not amount of MSI-X or queues
1035 		 */
1036 		ice_ena_vf_mappings(tmp_vf);
1037 		ice_put_vf(tmp_vf);
1038 	}
1039 }
1040 
1041 /**
1042  * ice_sriov_set_msix_vec_count
1043  * @vf_dev: pointer to pci_dev struct of VF device
1044  * @msix_vec_count: new value for MSI-X amount on this VF
1045  *
1046  * Set requested MSI-X, queues and registers for @vf_dev.
1047  *
1048  * First do some sanity checks like if there are any VFs, if the new value
1049  * is correct etc. Then disable old mapping (MSI-X and queues registers), change
1050  * MSI-X and queues, rebuild VSI and enable new mapping.
1051  *
1052  * If it is possible (driver not binded to VF) try to remap also other VFs to
1053  * linearize irqs register usage.
1054  */
ice_sriov_set_msix_vec_count(struct pci_dev * vf_dev,int msix_vec_count)1055 int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
1056 {
1057 	struct pci_dev *pdev = pci_physfn(vf_dev);
1058 	struct ice_pf *pf = pci_get_drvdata(pdev);
1059 	u16 prev_msix, prev_queues, queues;
1060 	bool needs_rebuild = false;
1061 	struct ice_vsi *vsi;
1062 	struct ice_vf *vf;
1063 	int id;
1064 
1065 	if (!ice_get_num_vfs(pf))
1066 		return -ENOENT;
1067 
1068 	if (!msix_vec_count)
1069 		return 0;
1070 
1071 	queues = msix_vec_count;
1072 	/* add 1 MSI-X for OICR */
1073 	msix_vec_count += 1;
1074 
1075 	if (queues > min(ice_get_avail_txq_count(pf),
1076 			 ice_get_avail_rxq_count(pf)))
1077 		return -EINVAL;
1078 
1079 	if (msix_vec_count < ICE_MIN_INTR_PER_VF)
1080 		return -EINVAL;
1081 
1082 	/* Transition of PCI VF function number to function_id */
1083 	for (id = 0; id < pci_num_vf(pdev); id++) {
1084 		if (vf_dev->devfn == pci_iov_virtfn_devfn(pdev, id))
1085 			break;
1086 	}
1087 
1088 	if (id == pci_num_vf(pdev))
1089 		return -ENOENT;
1090 
1091 	vf = ice_get_vf_by_id(pf, id);
1092 
1093 	if (!vf)
1094 		return -ENOENT;
1095 
1096 	vsi = ice_get_vf_vsi(vf);
1097 	if (!vsi) {
1098 		ice_put_vf(vf);
1099 		return -ENOENT;
1100 	}
1101 
1102 	prev_msix = vf->num_msix;
1103 	prev_queues = vf->num_vf_qs;
1104 
1105 	if (ice_sriov_move_base_vector(pf, msix_vec_count - prev_msix)) {
1106 		ice_put_vf(vf);
1107 		return -ENOSPC;
1108 	}
1109 
1110 	ice_dis_vf_mappings(vf);
1111 	ice_sriov_free_irqs(pf, vf);
1112 
1113 	/* Remap all VFs beside the one is now configured */
1114 	ice_sriov_remap_vectors(pf, vf->vf_id);
1115 
1116 	vf->num_msix = msix_vec_count;
1117 	vf->num_vf_qs = queues;
1118 	vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix);
1119 	if (vf->first_vector_idx < 0)
1120 		goto unroll;
1121 
1122 	vsi->req_txq = queues;
1123 	vsi->req_rxq = queues;
1124 
1125 	if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT)) {
1126 		/* Try to rebuild with previous values */
1127 		needs_rebuild = true;
1128 		goto unroll;
1129 	}
1130 
1131 	dev_info(ice_pf_to_dev(pf),
1132 		 "Changing VF %d resources to %d vectors and %d queues\n",
1133 		 vf->vf_id, vf->num_msix, vf->num_vf_qs);
1134 
1135 	ice_ena_vf_mappings(vf);
1136 	ice_put_vf(vf);
1137 
1138 	return 0;
1139 
1140 unroll:
1141 	dev_info(ice_pf_to_dev(pf),
1142 		 "Can't set %d vectors on VF %d, falling back to %d\n",
1143 		 vf->num_msix, vf->vf_id, prev_msix);
1144 
1145 	vf->num_msix = prev_msix;
1146 	vf->num_vf_qs = prev_queues;
1147 	vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix);
1148 	if (vf->first_vector_idx < 0) {
1149 		ice_put_vf(vf);
1150 		return -EINVAL;
1151 	}
1152 
1153 	if (needs_rebuild) {
1154 		vsi->req_txq = prev_queues;
1155 		vsi->req_rxq = prev_queues;
1156 
1157 		ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
1158 	}
1159 
1160 	ice_ena_vf_mappings(vf);
1161 	ice_put_vf(vf);
1162 
1163 	return -EINVAL;
1164 }
1165 
1166 /**
1167  * ice_sriov_configure - Enable or change number of VFs via sysfs
1168  * @pdev: pointer to a pci_dev structure
1169  * @num_vfs: number of VFs to allocate or 0 to free VFs
1170  *
1171  * This function is called when the user updates the number of VFs in sysfs. On
1172  * success return whatever num_vfs was set to by the caller. Return negative on
1173  * failure.
1174  */
ice_sriov_configure(struct pci_dev * pdev,int num_vfs)1175 int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
1176 {
1177 	struct ice_pf *pf = pci_get_drvdata(pdev);
1178 	struct device *dev = ice_pf_to_dev(pf);
1179 	int err;
1180 
1181 	err = ice_check_sriov_allowed(pf);
1182 	if (err)
1183 		return err;
1184 
1185 	if (!num_vfs) {
1186 		if (!pci_vfs_assigned(pdev)) {
1187 			ice_free_vfs(pf);
1188 			return 0;
1189 		}
1190 
1191 		dev_err(dev, "can't free VFs because some are assigned to VMs.\n");
1192 		return -EBUSY;
1193 	}
1194 
1195 	err = ice_pci_sriov_ena(pf, num_vfs);
1196 	if (err)
1197 		return err;
1198 
1199 	return num_vfs;
1200 }
1201 
1202 /**
1203  * ice_process_vflr_event - Free VF resources via IRQ calls
1204  * @pf: pointer to the PF structure
1205  *
1206  * called from the VFLR IRQ handler to
1207  * free up VF resources and state variables
1208  */
ice_process_vflr_event(struct ice_pf * pf)1209 void ice_process_vflr_event(struct ice_pf *pf)
1210 {
1211 	struct ice_hw *hw = &pf->hw;
1212 	struct ice_vf *vf;
1213 	unsigned int bkt;
1214 	u32 reg;
1215 
1216 	if (!test_and_clear_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
1217 	    !ice_has_vfs(pf))
1218 		return;
1219 
1220 	mutex_lock(&pf->vfs.table_lock);
1221 	ice_for_each_vf(pf, bkt, vf) {
1222 		u32 reg_idx, bit_idx;
1223 
1224 		reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
1225 		bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
1226 		/* read GLGEN_VFLRSTAT register to find out the flr VFs */
1227 		reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
1228 		if (reg & BIT(bit_idx))
1229 			/* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
1230 			ice_reset_vf(vf, ICE_VF_RESET_VFLR | ICE_VF_RESET_LOCK);
1231 	}
1232 	mutex_unlock(&pf->vfs.table_lock);
1233 }
1234 
1235 /**
1236  * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in
1237  * @pf: PF used to index all VFs
1238  * @pfq: queue index relative to the PF's function space
1239  *
1240  * If no VF is found who owns the pfq then return NULL, otherwise return a
1241  * pointer to the VF who owns the pfq
1242  *
1243  * If this function returns non-NULL, it acquires a reference count of the VF
1244  * structure. The caller is responsible for calling ice_put_vf() to drop this
1245  * reference.
1246  */
ice_get_vf_from_pfq(struct ice_pf * pf,u16 pfq)1247 static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq)
1248 {
1249 	struct ice_vf *vf;
1250 	unsigned int bkt;
1251 
1252 	rcu_read_lock();
1253 	ice_for_each_vf_rcu(pf, bkt, vf) {
1254 		struct ice_vsi *vsi;
1255 		u16 rxq_idx;
1256 
1257 		vsi = ice_get_vf_vsi(vf);
1258 		if (!vsi)
1259 			continue;
1260 
1261 		ice_for_each_rxq(vsi, rxq_idx)
1262 			if (vsi->rxq_map[rxq_idx] == pfq) {
1263 				struct ice_vf *found;
1264 
1265 				if (kref_get_unless_zero(&vf->refcnt))
1266 					found = vf;
1267 				else
1268 					found = NULL;
1269 				rcu_read_unlock();
1270 				return found;
1271 			}
1272 	}
1273 	rcu_read_unlock();
1274 
1275 	return NULL;
1276 }
1277 
1278 /**
1279  * ice_globalq_to_pfq - convert from global queue index to PF space queue index
1280  * @pf: PF used for conversion
1281  * @globalq: global queue index used to convert to PF space queue index
1282  */
ice_globalq_to_pfq(struct ice_pf * pf,u32 globalq)1283 static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq)
1284 {
1285 	return globalq - pf->hw.func_caps.common_cap.rxq_first_id;
1286 }
1287 
1288 /**
1289  * ice_vf_lan_overflow_event - handle LAN overflow event for a VF
1290  * @pf: PF that the LAN overflow event happened on
1291  * @event: structure holding the event information for the LAN overflow event
1292  *
1293  * Determine if the LAN overflow event was caused by a VF queue. If it was not
1294  * caused by a VF, do nothing. If a VF caused this LAN overflow event trigger a
1295  * reset on the offending VF.
1296  */
1297 void
ice_vf_lan_overflow_event(struct ice_pf * pf,struct ice_rq_event_info * event)1298 ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1299 {
1300 	u32 gldcb_rtctq, queue;
1301 	struct ice_vf *vf;
1302 
1303 	gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq);
1304 	dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq);
1305 
1306 	/* event returns device global Rx queue number */
1307 	queue = FIELD_GET(GLDCB_RTCTQ_RXQNUM_M, gldcb_rtctq);
1308 
1309 	vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue));
1310 	if (!vf)
1311 		return;
1312 
1313 	ice_reset_vf(vf, ICE_VF_RESET_NOTIFY | ICE_VF_RESET_LOCK);
1314 	ice_put_vf(vf);
1315 }
1316 
1317 /**
1318  * ice_set_vf_spoofchk
1319  * @netdev: network interface device structure
1320  * @vf_id: VF identifier
1321  * @ena: flag to enable or disable feature
1322  *
1323  * Enable or disable VF spoof checking
1324  */
ice_set_vf_spoofchk(struct net_device * netdev,int vf_id,bool ena)1325 int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
1326 {
1327 	struct ice_netdev_priv *np = netdev_priv(netdev);
1328 	struct ice_pf *pf = np->vsi->back;
1329 	struct ice_vsi *vf_vsi;
1330 	struct device *dev;
1331 	struct ice_vf *vf;
1332 	int ret;
1333 
1334 	dev = ice_pf_to_dev(pf);
1335 
1336 	vf = ice_get_vf_by_id(pf, vf_id);
1337 	if (!vf)
1338 		return -EINVAL;
1339 
1340 	ret = ice_check_vf_ready_for_cfg(vf);
1341 	if (ret)
1342 		goto out_put_vf;
1343 
1344 	vf_vsi = ice_get_vf_vsi(vf);
1345 	if (!vf_vsi) {
1346 		netdev_err(netdev, "VSI %d for VF %d is null\n",
1347 			   vf->lan_vsi_idx, vf->vf_id);
1348 		ret = -EINVAL;
1349 		goto out_put_vf;
1350 	}
1351 
1352 	if (vf_vsi->type != ICE_VSI_VF) {
1353 		netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
1354 			   vf_vsi->type, vf_vsi->vsi_num, vf->vf_id);
1355 		ret = -ENODEV;
1356 		goto out_put_vf;
1357 	}
1358 
1359 	if (ena == vf->spoofchk) {
1360 		dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF");
1361 		ret = 0;
1362 		goto out_put_vf;
1363 	}
1364 
1365 	ret = ice_vsi_apply_spoofchk(vf_vsi, ena);
1366 	if (ret)
1367 		dev_err(dev, "Failed to set spoofchk %s for VF %d VSI %d\n error %d\n",
1368 			ena ? "ON" : "OFF", vf->vf_id, vf_vsi->vsi_num, ret);
1369 	else
1370 		vf->spoofchk = ena;
1371 
1372 out_put_vf:
1373 	ice_put_vf(vf);
1374 	return ret;
1375 }
1376 
1377 /**
1378  * ice_get_vf_cfg
1379  * @netdev: network interface device structure
1380  * @vf_id: VF identifier
1381  * @ivi: VF configuration structure
1382  *
1383  * return VF configuration
1384  */
1385 int
ice_get_vf_cfg(struct net_device * netdev,int vf_id,struct ifla_vf_info * ivi)1386 ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
1387 {
1388 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
1389 	struct ice_vf *vf;
1390 	int ret;
1391 
1392 	vf = ice_get_vf_by_id(pf, vf_id);
1393 	if (!vf)
1394 		return -EINVAL;
1395 
1396 	ret = ice_check_vf_ready_for_cfg(vf);
1397 	if (ret)
1398 		goto out_put_vf;
1399 
1400 	ivi->vf = vf_id;
1401 	ether_addr_copy(ivi->mac, vf->hw_lan_addr);
1402 
1403 	/* VF configuration for VLAN and applicable QoS */
1404 	ivi->vlan = ice_vf_get_port_vlan_id(vf);
1405 	ivi->qos = ice_vf_get_port_vlan_prio(vf);
1406 	if (ice_vf_is_port_vlan_ena(vf))
1407 		ivi->vlan_proto = cpu_to_be16(ice_vf_get_port_vlan_tpid(vf));
1408 
1409 	ivi->trusted = vf->trusted;
1410 	ivi->spoofchk = vf->spoofchk;
1411 	if (!vf->link_forced)
1412 		ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
1413 	else if (vf->link_up)
1414 		ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
1415 	else
1416 		ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
1417 	ivi->max_tx_rate = vf->max_tx_rate;
1418 	ivi->min_tx_rate = vf->min_tx_rate;
1419 
1420 out_put_vf:
1421 	ice_put_vf(vf);
1422 	return ret;
1423 }
1424 
1425 /**
1426  * __ice_set_vf_mac - program VF MAC address
1427  * @pf: PF to be configure
1428  * @vf_id: VF identifier
1429  * @mac: MAC address
1430  *
1431  * program VF MAC address
1432  * Return: zero on success or an error code on failure
1433  */
__ice_set_vf_mac(struct ice_pf * pf,u16 vf_id,const u8 * mac)1434 int __ice_set_vf_mac(struct ice_pf *pf, u16 vf_id, const u8 *mac)
1435 {
1436 	struct device *dev;
1437 	struct ice_vf *vf;
1438 	int ret;
1439 
1440 	dev = ice_pf_to_dev(pf);
1441 	if (is_multicast_ether_addr(mac)) {
1442 		dev_err(dev, "%pM not a valid unicast address\n", mac);
1443 		return -EINVAL;
1444 	}
1445 
1446 	vf = ice_get_vf_by_id(pf, vf_id);
1447 	if (!vf)
1448 		return -EINVAL;
1449 
1450 	/* nothing left to do, unicast MAC already set */
1451 	if (ether_addr_equal(vf->dev_lan_addr, mac) &&
1452 	    ether_addr_equal(vf->hw_lan_addr, mac)) {
1453 		ret = 0;
1454 		goto out_put_vf;
1455 	}
1456 
1457 	ret = ice_check_vf_ready_for_cfg(vf);
1458 	if (ret)
1459 		goto out_put_vf;
1460 
1461 	mutex_lock(&vf->cfg_lock);
1462 
1463 	/* VF is notified of its new MAC via the PF's response to the
1464 	 * VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset
1465 	 */
1466 	ether_addr_copy(vf->dev_lan_addr, mac);
1467 	ether_addr_copy(vf->hw_lan_addr, mac);
1468 	if (is_zero_ether_addr(mac)) {
1469 		/* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */
1470 		vf->pf_set_mac = false;
1471 		dev_info(dev, "Removing MAC on VF %d. VF driver will be reinitialized\n",
1472 			 vf->vf_id);
1473 	} else {
1474 		/* PF will add MAC rule for the VF */
1475 		vf->pf_set_mac = true;
1476 		dev_info(dev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n",
1477 			 mac, vf_id);
1478 	}
1479 
1480 	ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
1481 	mutex_unlock(&vf->cfg_lock);
1482 
1483 out_put_vf:
1484 	ice_put_vf(vf);
1485 	return ret;
1486 }
1487 
1488 /**
1489  * ice_set_vf_mac - .ndo_set_vf_mac handler
1490  * @netdev: network interface device structure
1491  * @vf_id: VF identifier
1492  * @mac: MAC address
1493  *
1494  * program VF MAC address
1495  * Return: zero on success or an error code on failure
1496  */
ice_set_vf_mac(struct net_device * netdev,int vf_id,u8 * mac)1497 int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
1498 {
1499 	return __ice_set_vf_mac(ice_netdev_to_pf(netdev), vf_id, mac);
1500 }
1501 
1502 /**
1503  * ice_set_vf_trust
1504  * @netdev: network interface device structure
1505  * @vf_id: VF identifier
1506  * @trusted: Boolean value to enable/disable trusted VF
1507  *
1508  * Enable or disable a given VF as trusted
1509  */
ice_set_vf_trust(struct net_device * netdev,int vf_id,bool trusted)1510 int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
1511 {
1512 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
1513 	struct ice_vf *vf;
1514 	int ret;
1515 
1516 	vf = ice_get_vf_by_id(pf, vf_id);
1517 	if (!vf)
1518 		return -EINVAL;
1519 
1520 	if (ice_is_eswitch_mode_switchdev(pf)) {
1521 		dev_info(ice_pf_to_dev(pf), "Trusted VF is forbidden in switchdev mode\n");
1522 		return -EOPNOTSUPP;
1523 	}
1524 
1525 	ret = ice_check_vf_ready_for_cfg(vf);
1526 	if (ret)
1527 		goto out_put_vf;
1528 
1529 	/* Check if already trusted */
1530 	if (trusted == vf->trusted) {
1531 		ret = 0;
1532 		goto out_put_vf;
1533 	}
1534 
1535 	mutex_lock(&vf->cfg_lock);
1536 
1537 	vf->trusted = trusted;
1538 	ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
1539 	dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
1540 		 vf_id, trusted ? "" : "un");
1541 
1542 	mutex_unlock(&vf->cfg_lock);
1543 
1544 out_put_vf:
1545 	ice_put_vf(vf);
1546 	return ret;
1547 }
1548 
1549 /**
1550  * ice_set_vf_link_state
1551  * @netdev: network interface device structure
1552  * @vf_id: VF identifier
1553  * @link_state: required link state
1554  *
1555  * Set VF's link state, irrespective of physical link state status
1556  */
ice_set_vf_link_state(struct net_device * netdev,int vf_id,int link_state)1557 int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
1558 {
1559 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
1560 	struct ice_vf *vf;
1561 	int ret;
1562 
1563 	vf = ice_get_vf_by_id(pf, vf_id);
1564 	if (!vf)
1565 		return -EINVAL;
1566 
1567 	ret = ice_check_vf_ready_for_cfg(vf);
1568 	if (ret)
1569 		goto out_put_vf;
1570 
1571 	switch (link_state) {
1572 	case IFLA_VF_LINK_STATE_AUTO:
1573 		vf->link_forced = false;
1574 		break;
1575 	case IFLA_VF_LINK_STATE_ENABLE:
1576 		vf->link_forced = true;
1577 		vf->link_up = true;
1578 		break;
1579 	case IFLA_VF_LINK_STATE_DISABLE:
1580 		vf->link_forced = true;
1581 		vf->link_up = false;
1582 		break;
1583 	default:
1584 		ret = -EINVAL;
1585 		goto out_put_vf;
1586 	}
1587 
1588 	ice_vc_notify_vf_link_state(vf);
1589 
1590 out_put_vf:
1591 	ice_put_vf(vf);
1592 	return ret;
1593 }
1594 
1595 /**
1596  * ice_calc_all_vfs_min_tx_rate - calculate cumulative min Tx rate on all VFs
1597  * @pf: PF associated with VFs
1598  */
ice_calc_all_vfs_min_tx_rate(struct ice_pf * pf)1599 static int ice_calc_all_vfs_min_tx_rate(struct ice_pf *pf)
1600 {
1601 	struct ice_vf *vf;
1602 	unsigned int bkt;
1603 	int rate = 0;
1604 
1605 	rcu_read_lock();
1606 	ice_for_each_vf_rcu(pf, bkt, vf)
1607 		rate += vf->min_tx_rate;
1608 	rcu_read_unlock();
1609 
1610 	return rate;
1611 }
1612 
1613 /**
1614  * ice_min_tx_rate_oversubscribed - check if min Tx rate causes oversubscription
1615  * @vf: VF trying to configure min_tx_rate
1616  * @min_tx_rate: min Tx rate in Mbps
1617  *
1618  * Check if the min_tx_rate being passed in will cause oversubscription of total
1619  * min_tx_rate based on the current link speed and all other VFs configured
1620  * min_tx_rate
1621  *
1622  * Return true if the passed min_tx_rate would cause oversubscription, else
1623  * return false
1624  */
1625 static bool
ice_min_tx_rate_oversubscribed(struct ice_vf * vf,int min_tx_rate)1626 ice_min_tx_rate_oversubscribed(struct ice_vf *vf, int min_tx_rate)
1627 {
1628 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1629 	int all_vfs_min_tx_rate;
1630 	int link_speed_mbps;
1631 
1632 	if (WARN_ON(!vsi))
1633 		return false;
1634 
1635 	link_speed_mbps = ice_get_link_speed_mbps(vsi);
1636 	all_vfs_min_tx_rate = ice_calc_all_vfs_min_tx_rate(vf->pf);
1637 
1638 	/* this VF's previous rate is being overwritten */
1639 	all_vfs_min_tx_rate -= vf->min_tx_rate;
1640 
1641 	if (all_vfs_min_tx_rate + min_tx_rate > link_speed_mbps) {
1642 		dev_err(ice_pf_to_dev(vf->pf), "min_tx_rate of %d Mbps on VF %u would cause oversubscription of %d Mbps based on the current link speed %d Mbps\n",
1643 			min_tx_rate, vf->vf_id,
1644 			all_vfs_min_tx_rate + min_tx_rate - link_speed_mbps,
1645 			link_speed_mbps);
1646 		return true;
1647 	}
1648 
1649 	return false;
1650 }
1651 
1652 /**
1653  * ice_set_vf_bw - set min/max VF bandwidth
1654  * @netdev: network interface device structure
1655  * @vf_id: VF identifier
1656  * @min_tx_rate: Minimum Tx rate in Mbps
1657  * @max_tx_rate: Maximum Tx rate in Mbps
1658  */
1659 int
ice_set_vf_bw(struct net_device * netdev,int vf_id,int min_tx_rate,int max_tx_rate)1660 ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
1661 	      int max_tx_rate)
1662 {
1663 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
1664 	struct ice_vsi *vsi;
1665 	struct device *dev;
1666 	struct ice_vf *vf;
1667 	int ret;
1668 
1669 	dev = ice_pf_to_dev(pf);
1670 
1671 	vf = ice_get_vf_by_id(pf, vf_id);
1672 	if (!vf)
1673 		return -EINVAL;
1674 
1675 	ret = ice_check_vf_ready_for_cfg(vf);
1676 	if (ret)
1677 		goto out_put_vf;
1678 
1679 	vsi = ice_get_vf_vsi(vf);
1680 	if (!vsi) {
1681 		ret = -EINVAL;
1682 		goto out_put_vf;
1683 	}
1684 
1685 	if (min_tx_rate && ice_is_dcb_active(pf)) {
1686 		dev_err(dev, "DCB on PF is currently enabled. VF min Tx rate limiting not allowed on this PF.\n");
1687 		ret = -EOPNOTSUPP;
1688 		goto out_put_vf;
1689 	}
1690 
1691 	if (ice_min_tx_rate_oversubscribed(vf, min_tx_rate)) {
1692 		ret = -EINVAL;
1693 		goto out_put_vf;
1694 	}
1695 
1696 	if (vf->min_tx_rate != (unsigned int)min_tx_rate) {
1697 		ret = ice_set_min_bw_limit(vsi, (u64)min_tx_rate * 1000);
1698 		if (ret) {
1699 			dev_err(dev, "Unable to set min-tx-rate for VF %d\n",
1700 				vf->vf_id);
1701 			goto out_put_vf;
1702 		}
1703 
1704 		vf->min_tx_rate = min_tx_rate;
1705 	}
1706 
1707 	if (vf->max_tx_rate != (unsigned int)max_tx_rate) {
1708 		ret = ice_set_max_bw_limit(vsi, (u64)max_tx_rate * 1000);
1709 		if (ret) {
1710 			dev_err(dev, "Unable to set max-tx-rate for VF %d\n",
1711 				vf->vf_id);
1712 			goto out_put_vf;
1713 		}
1714 
1715 		vf->max_tx_rate = max_tx_rate;
1716 	}
1717 
1718 out_put_vf:
1719 	ice_put_vf(vf);
1720 	return ret;
1721 }
1722 
1723 /**
1724  * ice_get_vf_stats - populate some stats for the VF
1725  * @netdev: the netdev of the PF
1726  * @vf_id: the host OS identifier (0-255)
1727  * @vf_stats: pointer to the OS memory to be initialized
1728  */
ice_get_vf_stats(struct net_device * netdev,int vf_id,struct ifla_vf_stats * vf_stats)1729 int ice_get_vf_stats(struct net_device *netdev, int vf_id,
1730 		     struct ifla_vf_stats *vf_stats)
1731 {
1732 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
1733 	struct ice_eth_stats *stats;
1734 	struct ice_vsi *vsi;
1735 	struct ice_vf *vf;
1736 	int ret;
1737 
1738 	vf = ice_get_vf_by_id(pf, vf_id);
1739 	if (!vf)
1740 		return -EINVAL;
1741 
1742 	ret = ice_check_vf_ready_for_cfg(vf);
1743 	if (ret)
1744 		goto out_put_vf;
1745 
1746 	vsi = ice_get_vf_vsi(vf);
1747 	if (!vsi) {
1748 		ret = -EINVAL;
1749 		goto out_put_vf;
1750 	}
1751 
1752 	ice_update_eth_stats(vsi);
1753 	stats = &vsi->eth_stats;
1754 
1755 	memset(vf_stats, 0, sizeof(*vf_stats));
1756 
1757 	vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
1758 		stats->rx_multicast;
1759 	vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
1760 		stats->tx_multicast;
1761 	vf_stats->rx_bytes   = stats->rx_bytes;
1762 	vf_stats->tx_bytes   = stats->tx_bytes;
1763 	vf_stats->broadcast  = stats->rx_broadcast;
1764 	vf_stats->multicast  = stats->rx_multicast;
1765 	vf_stats->rx_dropped = stats->rx_discards;
1766 	vf_stats->tx_dropped = stats->tx_discards;
1767 
1768 out_put_vf:
1769 	ice_put_vf(vf);
1770 	return ret;
1771 }
1772 
1773 /**
1774  * ice_is_supported_port_vlan_proto - make sure the vlan_proto is supported
1775  * @hw: hardware structure used to check the VLAN mode
1776  * @vlan_proto: VLAN TPID being checked
1777  *
1778  * If the device is configured in Double VLAN Mode (DVM), then both ETH_P_8021Q
1779  * and ETH_P_8021AD are supported. If the device is configured in Single VLAN
1780  * Mode (SVM), then only ETH_P_8021Q is supported.
1781  */
1782 static bool
ice_is_supported_port_vlan_proto(struct ice_hw * hw,u16 vlan_proto)1783 ice_is_supported_port_vlan_proto(struct ice_hw *hw, u16 vlan_proto)
1784 {
1785 	bool is_supported = false;
1786 
1787 	switch (vlan_proto) {
1788 	case ETH_P_8021Q:
1789 		is_supported = true;
1790 		break;
1791 	case ETH_P_8021AD:
1792 		if (ice_is_dvm_ena(hw))
1793 			is_supported = true;
1794 		break;
1795 	}
1796 
1797 	return is_supported;
1798 }
1799 
1800 /**
1801  * ice_set_vf_port_vlan
1802  * @netdev: network interface device structure
1803  * @vf_id: VF identifier
1804  * @vlan_id: VLAN ID being set
1805  * @qos: priority setting
1806  * @vlan_proto: VLAN protocol
1807  *
1808  * program VF Port VLAN ID and/or QoS
1809  */
1810 int
ice_set_vf_port_vlan(struct net_device * netdev,int vf_id,u16 vlan_id,u8 qos,__be16 vlan_proto)1811 ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
1812 		     __be16 vlan_proto)
1813 {
1814 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
1815 	u16 local_vlan_proto = ntohs(vlan_proto);
1816 	struct device *dev;
1817 	struct ice_vf *vf;
1818 	int ret;
1819 
1820 	dev = ice_pf_to_dev(pf);
1821 
1822 	if (vlan_id >= VLAN_N_VID || qos > 7) {
1823 		dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n",
1824 			vf_id, vlan_id, qos);
1825 		return -EINVAL;
1826 	}
1827 
1828 	if (!ice_is_supported_port_vlan_proto(&pf->hw, local_vlan_proto)) {
1829 		dev_err(dev, "VF VLAN protocol 0x%04x is not supported\n",
1830 			local_vlan_proto);
1831 		return -EPROTONOSUPPORT;
1832 	}
1833 
1834 	vf = ice_get_vf_by_id(pf, vf_id);
1835 	if (!vf)
1836 		return -EINVAL;
1837 
1838 	ret = ice_check_vf_ready_for_cfg(vf);
1839 	if (ret)
1840 		goto out_put_vf;
1841 
1842 	if (ice_vf_get_port_vlan_prio(vf) == qos &&
1843 	    ice_vf_get_port_vlan_tpid(vf) == local_vlan_proto &&
1844 	    ice_vf_get_port_vlan_id(vf) == vlan_id) {
1845 		/* duplicate request, so just return success */
1846 		dev_dbg(dev, "Duplicate port VLAN %u, QoS %u, TPID 0x%04x request\n",
1847 			vlan_id, qos, local_vlan_proto);
1848 		ret = 0;
1849 		goto out_put_vf;
1850 	}
1851 
1852 	mutex_lock(&vf->cfg_lock);
1853 
1854 	vf->port_vlan_info = ICE_VLAN(local_vlan_proto, vlan_id, qos);
1855 	if (ice_vf_is_port_vlan_ena(vf))
1856 		dev_info(dev, "Setting VLAN %u, QoS %u, TPID 0x%04x on VF %d\n",
1857 			 vlan_id, qos, local_vlan_proto, vf_id);
1858 	else
1859 		dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id);
1860 
1861 	ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
1862 	mutex_unlock(&vf->cfg_lock);
1863 
1864 out_put_vf:
1865 	ice_put_vf(vf);
1866 	return ret;
1867 }
1868 
1869 /**
1870  * ice_print_vf_rx_mdd_event - print VF Rx malicious driver detect event
1871  * @vf: pointer to the VF structure
1872  */
ice_print_vf_rx_mdd_event(struct ice_vf * vf)1873 void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
1874 {
1875 	struct ice_pf *pf = vf->pf;
1876 	struct device *dev;
1877 
1878 	dev = ice_pf_to_dev(pf);
1879 
1880 	dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
1881 		 vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id,
1882 		 vf->dev_lan_addr,
1883 		 test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
1884 			  ? "on" : "off");
1885 }
1886 
1887 /**
1888  * ice_print_vf_tx_mdd_event - print VF Tx malicious driver detect event
1889  * @vf: pointer to the VF structure
1890  */
ice_print_vf_tx_mdd_event(struct ice_vf * vf)1891 void ice_print_vf_tx_mdd_event(struct ice_vf *vf)
1892 {
1893 	struct ice_pf *pf = vf->pf;
1894 	struct device *dev;
1895 
1896 	dev = ice_pf_to_dev(pf);
1897 
1898 	dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
1899 		 vf->mdd_tx_events.count, pf->hw.pf_id, vf->vf_id,
1900 		 vf->dev_lan_addr,
1901 		 test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
1902 			  ? "on" : "off");
1903 }
1904 
1905 /**
1906  * ice_print_vfs_mdd_events - print VFs malicious driver detect event
1907  * @pf: pointer to the PF structure
1908  *
1909  * Called from ice_handle_mdd_event to rate limit and print VFs MDD events.
1910  */
ice_print_vfs_mdd_events(struct ice_pf * pf)1911 void ice_print_vfs_mdd_events(struct ice_pf *pf)
1912 {
1913 	struct ice_vf *vf;
1914 	unsigned int bkt;
1915 
1916 	/* check that there are pending MDD events to print */
1917 	if (!test_and_clear_bit(ICE_MDD_VF_PRINT_PENDING, pf->state))
1918 		return;
1919 
1920 	/* VF MDD event logs are rate limited to one second intervals */
1921 	if (time_is_after_jiffies(pf->vfs.last_printed_mdd_jiffies + HZ * 1))
1922 		return;
1923 
1924 	pf->vfs.last_printed_mdd_jiffies = jiffies;
1925 
1926 	mutex_lock(&pf->vfs.table_lock);
1927 	ice_for_each_vf(pf, bkt, vf) {
1928 		/* only print Rx MDD event message if there are new events */
1929 		if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
1930 			vf->mdd_rx_events.last_printed =
1931 							vf->mdd_rx_events.count;
1932 			ice_print_vf_rx_mdd_event(vf);
1933 		}
1934 
1935 		/* only print Tx MDD event message if there are new events */
1936 		if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
1937 			vf->mdd_tx_events.last_printed =
1938 							vf->mdd_tx_events.count;
1939 			ice_print_vf_tx_mdd_event(vf);
1940 		}
1941 	}
1942 	mutex_unlock(&pf->vfs.table_lock);
1943 }
1944 
1945 /**
1946  * ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR
1947  * @pf: pointer to the PF structure
1948  *
1949  * Called when recovering from a PF FLR to restore interrupt capability to
1950  * the VFs.
1951  */
ice_restore_all_vfs_msi_state(struct ice_pf * pf)1952 void ice_restore_all_vfs_msi_state(struct ice_pf *pf)
1953 {
1954 	struct ice_vf *vf;
1955 	u32 bkt;
1956 
1957 	ice_for_each_vf(pf, bkt, vf)
1958 		pci_restore_msi_state(vf->vfdev);
1959 }
1960