1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2022, Intel Corporation. */
3
4 #include "ice_vf_lib_private.h"
5 #include "ice.h"
6 #include "ice_lib.h"
7 #include "ice_fltr.h"
8 #include "ice_virtchnl_allowlist.h"
9
10 /* Public functions which may be accessed by all driver files */
11
12 /**
13 * ice_get_vf_by_id - Get pointer to VF by ID
14 * @pf: the PF private structure
15 * @vf_id: the VF ID to locate
16 *
17 * Locate and return a pointer to the VF structure associated with a given ID.
18 * Returns NULL if the ID does not have a valid VF structure associated with
19 * it.
20 *
21 * This function takes a reference to the VF, which must be released by
22 * calling ice_put_vf() once the caller is finished accessing the VF structure
23 * returned.
24 */
ice_get_vf_by_id(struct ice_pf * pf,u16 vf_id)25 struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id)
26 {
27 struct ice_vf *vf;
28
29 rcu_read_lock();
30 hash_for_each_possible_rcu(pf->vfs.table, vf, entry, vf_id) {
31 if (vf->vf_id == vf_id) {
32 struct ice_vf *found;
33
34 if (kref_get_unless_zero(&vf->refcnt))
35 found = vf;
36 else
37 found = NULL;
38
39 rcu_read_unlock();
40 return found;
41 }
42 }
43 rcu_read_unlock();
44
45 return NULL;
46 }
47
48 /**
49 * ice_release_vf - Release VF associated with a refcount
50 * @ref: the kref decremented to zero
51 *
52 * Callback function for kref_put to release a VF once its reference count has
53 * hit zero.
54 */
ice_release_vf(struct kref * ref)55 static void ice_release_vf(struct kref *ref)
56 {
57 struct ice_vf *vf = container_of(ref, struct ice_vf, refcnt);
58
59 pci_dev_put(vf->vfdev);
60
61 vf->vf_ops->free(vf);
62 }
63
64 /**
65 * ice_put_vf - Release a reference to a VF
66 * @vf: the VF structure to decrease reference count on
67 *
68 * Decrease the reference count for a VF, and free the entry if it is no
69 * longer in use.
70 *
71 * This must be called after ice_get_vf_by_id() once the reference to the VF
72 * structure is no longer used. Otherwise, the VF structure will never be
73 * freed.
74 */
ice_put_vf(struct ice_vf * vf)75 void ice_put_vf(struct ice_vf *vf)
76 {
77 kref_put(&vf->refcnt, ice_release_vf);
78 }
79
80 /**
81 * ice_has_vfs - Return true if the PF has any associated VFs
82 * @pf: the PF private structure
83 *
84 * Return whether or not the PF has any allocated VFs.
85 *
86 * Note that this function only guarantees that there are no VFs at the point
87 * of calling it. It does not guarantee that no more VFs will be added.
88 */
ice_has_vfs(struct ice_pf * pf)89 bool ice_has_vfs(struct ice_pf *pf)
90 {
91 /* A simple check that the hash table is not empty does not require
92 * the mutex or rcu_read_lock.
93 */
94 return !hash_empty(pf->vfs.table);
95 }
96
97 /**
98 * ice_get_num_vfs - Get number of allocated VFs
99 * @pf: the PF private structure
100 *
101 * Return the total number of allocated VFs. NOTE: VF IDs are not guaranteed
102 * to be contiguous. Do not assume that a VF ID is guaranteed to be less than
103 * the output of this function.
104 */
ice_get_num_vfs(struct ice_pf * pf)105 u16 ice_get_num_vfs(struct ice_pf *pf)
106 {
107 struct ice_vf *vf;
108 unsigned int bkt;
109 u16 num_vfs = 0;
110
111 rcu_read_lock();
112 ice_for_each_vf_rcu(pf, bkt, vf)
113 num_vfs++;
114 rcu_read_unlock();
115
116 return num_vfs;
117 }
118
119 /**
120 * ice_get_vf_vsi - get VF's VSI based on the stored index
121 * @vf: VF used to get VSI
122 */
ice_get_vf_vsi(struct ice_vf * vf)123 struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
124 {
125 if (vf->lan_vsi_idx == ICE_NO_VSI)
126 return NULL;
127
128 return vf->pf->vsi[vf->lan_vsi_idx];
129 }
130
131 /**
132 * ice_is_vf_disabled
133 * @vf: pointer to the VF info
134 *
135 * If the PF has been disabled, there is no need resetting VF until PF is
136 * active again. Similarly, if the VF has been disabled, this means something
137 * else is resetting the VF, so we shouldn't continue.
138 *
139 * Returns true if the caller should consider the VF as disabled whether
140 * because that single VF is explicitly disabled or because the PF is
141 * currently disabled.
142 */
ice_is_vf_disabled(struct ice_vf * vf)143 bool ice_is_vf_disabled(struct ice_vf *vf)
144 {
145 struct ice_pf *pf = vf->pf;
146
147 return (test_bit(ICE_VF_DIS, pf->state) ||
148 test_bit(ICE_VF_STATE_DIS, vf->vf_states));
149 }
150
151 /**
152 * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset
153 * @vf: The VF being resseting
154 *
155 * The max poll time is about ~800ms, which is about the maximum time it takes
156 * for a VF to be reset and/or a VF driver to be removed.
157 */
ice_wait_on_vf_reset(struct ice_vf * vf)158 static void ice_wait_on_vf_reset(struct ice_vf *vf)
159 {
160 int i;
161
162 for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) {
163 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
164 break;
165 msleep(ICE_MAX_VF_RESET_SLEEP_MS);
166 }
167 }
168
169 /**
170 * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried
171 * @vf: VF to check if it's ready to be configured/queried
172 *
173 * The purpose of this function is to make sure the VF is not in reset, not
174 * disabled, and initialized so it can be configured and/or queried by a host
175 * administrator.
176 */
ice_check_vf_ready_for_cfg(struct ice_vf * vf)177 int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
178 {
179 ice_wait_on_vf_reset(vf);
180
181 if (ice_is_vf_disabled(vf))
182 return -EINVAL;
183
184 if (ice_check_vf_init(vf))
185 return -EBUSY;
186
187 return 0;
188 }
189
190 /**
191 * ice_trigger_vf_reset - Reset a VF on HW
192 * @vf: pointer to the VF structure
193 * @is_vflr: true if VFLR was issued, false if not
194 * @is_pfr: true if the reset was triggered due to a previous PFR
195 *
196 * Trigger hardware to start a reset for a particular VF. Expects the caller
197 * to wait the proper amount of time to allow hardware to reset the VF before
198 * it cleans up and restores VF functionality.
199 */
ice_trigger_vf_reset(struct ice_vf * vf,bool is_vflr,bool is_pfr)200 static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
201 {
202 /* Inform VF that it is no longer active, as a warning */
203 clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
204
205 /* Disable VF's configuration API during reset. The flag is re-enabled
206 * when it's safe again to access VF's VSI.
207 */
208 clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
209
210 /* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver
211 * needs to clear them in the case of VFR/VFLR. If this is done for
212 * PFR, it can mess up VF resets because the VF driver may already
213 * have started cleanup by the time we get here.
214 */
215 if (!is_pfr)
216 vf->vf_ops->clear_mbx_register(vf);
217
218 vf->vf_ops->trigger_reset_register(vf, is_vflr);
219 }
220
ice_vf_clear_counters(struct ice_vf * vf)221 static void ice_vf_clear_counters(struct ice_vf *vf)
222 {
223 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
224
225 if (vsi)
226 vsi->num_vlan = 0;
227
228 vf->num_mac = 0;
229 memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
230 memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
231 }
232
233 /**
234 * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild
235 * @vf: VF to perform pre VSI rebuild tasks
236 *
237 * These tasks are items that don't need to be amortized since they are most
238 * likely called in a for loop with all VF(s) in the reset_all_vfs() case.
239 */
ice_vf_pre_vsi_rebuild(struct ice_vf * vf)240 static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
241 {
242 /* Close any IRQ mapping now */
243 if (vf->vf_ops->irq_close)
244 vf->vf_ops->irq_close(vf);
245
246 ice_vf_clear_counters(vf);
247 vf->vf_ops->clear_reset_trigger(vf);
248 }
249
250 /**
251 * ice_vf_reconfig_vsi - Reconfigure a VF VSI with the device
252 * @vf: VF to reconfigure the VSI for
253 *
254 * This is called when a single VF is being reset (i.e. VVF, VFLR, host VF
255 * configuration change, etc).
256 *
257 * It brings the VSI down and then reconfigures it with the hardware.
258 */
ice_vf_reconfig_vsi(struct ice_vf * vf)259 static int ice_vf_reconfig_vsi(struct ice_vf *vf)
260 {
261 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
262 struct ice_pf *pf = vf->pf;
263 int err;
264
265 if (WARN_ON(!vsi))
266 return -EINVAL;
267
268 vsi->flags = ICE_VSI_FLAG_NO_INIT;
269
270 ice_vsi_decfg(vsi);
271 ice_fltr_remove_all(vsi);
272
273 err = ice_vsi_cfg(vsi);
274 if (err) {
275 dev_err(ice_pf_to_dev(pf),
276 "Failed to reconfigure the VF%u's VSI, error %d\n",
277 vf->vf_id, err);
278 return err;
279 }
280
281 return 0;
282 }
283
284 /**
285 * ice_vf_rebuild_vsi - rebuild the VF's VSI
286 * @vf: VF to rebuild the VSI for
287 *
288 * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the
289 * host, PFR, CORER, etc.).
290 *
291 * It reprograms the VSI configuration back into hardware.
292 */
ice_vf_rebuild_vsi(struct ice_vf * vf)293 static int ice_vf_rebuild_vsi(struct ice_vf *vf)
294 {
295 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
296 struct ice_pf *pf = vf->pf;
297
298 if (WARN_ON(!vsi))
299 return -EINVAL;
300
301 if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT)) {
302 dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n",
303 vf->vf_id);
304 return -EIO;
305 }
306 /* vsi->idx will remain the same in this case so don't update
307 * vf->lan_vsi_idx
308 */
309 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
310
311 return 0;
312 }
313
314 /**
315 * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
316 * @vf: VF to add MAC filters for
317 * @vsi: Pointer to VSI
318 *
319 * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
320 * always re-adds either a VLAN 0 or port VLAN based filter after reset.
321 */
ice_vf_rebuild_host_vlan_cfg(struct ice_vf * vf,struct ice_vsi * vsi)322 static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
323 {
324 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
325 struct device *dev = ice_pf_to_dev(vf->pf);
326 int err;
327
328 if (ice_vf_is_port_vlan_ena(vf)) {
329 err = vlan_ops->set_port_vlan(vsi, &vf->port_vlan_info);
330 if (err) {
331 dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n",
332 vf->vf_id, err);
333 return err;
334 }
335
336 err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info);
337 } else {
338 /* clear possible previous port vlan config */
339 err = ice_vsi_clear_port_vlan(vsi);
340 if (err) {
341 dev_err(dev, "failed to clear port VLAN via VSI parameters for VF %u, error %d\n",
342 vf->vf_id, err);
343 return err;
344 }
345 err = ice_vsi_add_vlan_zero(vsi);
346 }
347
348 if (err) {
349 dev_err(dev, "failed to add VLAN %u filter for VF %u during VF rebuild, error %d\n",
350 ice_vf_is_port_vlan_ena(vf) ?
351 ice_vf_get_port_vlan_id(vf) : 0, vf->vf_id, err);
352 return err;
353 }
354
355 err = vlan_ops->ena_rx_filtering(vsi);
356 if (err)
357 dev_warn(dev, "failed to enable Rx VLAN filtering for VF %d VSI %d during VF rebuild, error %d\n",
358 vf->vf_id, vsi->idx, err);
359
360 return 0;
361 }
362
363 /**
364 * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration
365 * @vf: VF to re-apply the configuration for
366 *
367 * Called after a VF VSI has been re-added/rebuild during reset. The PF driver
368 * needs to re-apply the host configured Tx rate limiting configuration.
369 */
ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf * vf)370 static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf)
371 {
372 struct device *dev = ice_pf_to_dev(vf->pf);
373 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
374 int err;
375
376 if (WARN_ON(!vsi))
377 return -EINVAL;
378
379 if (vf->min_tx_rate) {
380 err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000);
381 if (err) {
382 dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n",
383 vf->min_tx_rate, vf->vf_id, err);
384 return err;
385 }
386 }
387
388 if (vf->max_tx_rate) {
389 err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000);
390 if (err) {
391 dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n",
392 vf->max_tx_rate, vf->vf_id, err);
393 return err;
394 }
395 }
396
397 return 0;
398 }
399
400 /**
401 * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value
402 * @vf: VF to configure trust setting for
403 */
ice_vf_set_host_trust_cfg(struct ice_vf * vf)404 static void ice_vf_set_host_trust_cfg(struct ice_vf *vf)
405 {
406 assign_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps, vf->trusted);
407 }
408
409 /**
410 * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA
411 * @vf: VF to add MAC filters for
412 *
413 * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
414 * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset.
415 */
ice_vf_rebuild_host_mac_cfg(struct ice_vf * vf)416 static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
417 {
418 struct device *dev = ice_pf_to_dev(vf->pf);
419 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
420 u8 broadcast[ETH_ALEN];
421 int status;
422
423 if (WARN_ON(!vsi))
424 return -EINVAL;
425
426 if (ice_is_eswitch_mode_switchdev(vf->pf))
427 return 0;
428
429 eth_broadcast_addr(broadcast);
430 status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
431 if (status) {
432 dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n",
433 vf->vf_id, status);
434 return status;
435 }
436
437 vf->num_mac++;
438
439 if (is_valid_ether_addr(vf->hw_lan_addr)) {
440 status = ice_fltr_add_mac(vsi, vf->hw_lan_addr,
441 ICE_FWD_TO_VSI);
442 if (status) {
443 dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n",
444 &vf->hw_lan_addr[0], vf->vf_id,
445 status);
446 return status;
447 }
448 vf->num_mac++;
449
450 ether_addr_copy(vf->dev_lan_addr, vf->hw_lan_addr);
451 }
452
453 return 0;
454 }
455
456 /**
457 * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config
458 * @vsi: Pointer to VSI
459 *
460 * This function moves VSI into corresponding scheduler aggregator node
461 * based on cached value of "aggregator node info" per VSI
462 */
ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi * vsi)463 static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi)
464 {
465 struct ice_pf *pf = vsi->back;
466 struct device *dev;
467 int status;
468
469 if (!vsi->agg_node)
470 return;
471
472 dev = ice_pf_to_dev(pf);
473 if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
474 dev_dbg(dev,
475 "agg_id %u already has reached max_num_vsis %u\n",
476 vsi->agg_node->agg_id, vsi->agg_node->num_vsis);
477 return;
478 }
479
480 status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id,
481 vsi->idx, vsi->tc_cfg.ena_tc);
482 if (status)
483 dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node",
484 vsi->idx, vsi->agg_node->agg_id);
485 else
486 vsi->agg_node->num_vsis++;
487 }
488
489 /**
490 * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset
491 * @vf: VF to rebuild host configuration on
492 */
ice_vf_rebuild_host_cfg(struct ice_vf * vf)493 static void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
494 {
495 struct device *dev = ice_pf_to_dev(vf->pf);
496 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
497
498 if (WARN_ON(!vsi))
499 return;
500
501 ice_vf_set_host_trust_cfg(vf);
502
503 if (ice_vf_rebuild_host_mac_cfg(vf))
504 dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n",
505 vf->vf_id);
506
507 if (ice_vf_rebuild_host_vlan_cfg(vf, vsi))
508 dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
509 vf->vf_id);
510
511 if (ice_vf_rebuild_host_tx_rate_cfg(vf))
512 dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n",
513 vf->vf_id);
514
515 if (ice_vsi_apply_spoofchk(vsi, vf->spoofchk))
516 dev_err(dev, "failed to rebuild spoofchk configuration for VF %d\n",
517 vf->vf_id);
518
519 /* rebuild aggregator node config for main VF VSI */
520 ice_vf_rebuild_aggregator_node_cfg(vsi);
521 }
522
523 /**
524 * ice_set_vf_state_qs_dis - Set VF queues state to disabled
525 * @vf: pointer to the VF structure
526 */
ice_set_vf_state_qs_dis(struct ice_vf * vf)527 static void ice_set_vf_state_qs_dis(struct ice_vf *vf)
528 {
529 /* Clear Rx/Tx enabled queues flag */
530 bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
531 bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
532 clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
533 }
534
535 /**
536 * ice_vf_set_initialized - VF is ready for VIRTCHNL communication
537 * @vf: VF to set in initialized state
538 *
539 * After this function the VF will be ready to receive/handle the
540 * VIRTCHNL_OP_GET_VF_RESOURCES message
541 */
ice_vf_set_initialized(struct ice_vf * vf)542 static void ice_vf_set_initialized(struct ice_vf *vf)
543 {
544 ice_set_vf_state_qs_dis(vf);
545 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
546 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
547 clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
548 set_bit(ICE_VF_STATE_INIT, vf->vf_states);
549 memset(&vf->vlan_v2_caps, 0, sizeof(vf->vlan_v2_caps));
550 }
551
552 /**
553 * ice_vf_post_vsi_rebuild - Reset tasks that occur after VSI rebuild
554 * @vf: the VF being reset
555 *
556 * Perform reset tasks which must occur after the VSI has been re-created or
557 * rebuilt during a VF reset.
558 */
ice_vf_post_vsi_rebuild(struct ice_vf * vf)559 static void ice_vf_post_vsi_rebuild(struct ice_vf *vf)
560 {
561 ice_vf_rebuild_host_cfg(vf);
562 ice_vf_set_initialized(vf);
563
564 vf->vf_ops->post_vsi_rebuild(vf);
565 }
566
567 /**
568 * ice_is_any_vf_in_unicast_promisc - check if any VF(s)
569 * are in unicast promiscuous mode
570 * @pf: PF structure for accessing VF(s)
571 *
572 * Return false if no VF(s) are in unicast promiscuous mode,
573 * else return true
574 */
ice_is_any_vf_in_unicast_promisc(struct ice_pf * pf)575 bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf)
576 {
577 bool is_vf_promisc = false;
578 struct ice_vf *vf;
579 unsigned int bkt;
580
581 rcu_read_lock();
582 ice_for_each_vf_rcu(pf, bkt, vf) {
583 /* found a VF that has promiscuous mode configured */
584 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) {
585 is_vf_promisc = true;
586 break;
587 }
588 }
589 rcu_read_unlock();
590
591 return is_vf_promisc;
592 }
593
594 /**
595 * ice_vf_get_promisc_masks - Calculate masks for promiscuous modes
596 * @vf: the VF pointer
597 * @vsi: the VSI to configure
598 * @ucast_m: promiscuous mask to apply to unicast
599 * @mcast_m: promiscuous mask to apply to multicast
600 *
601 * Decide which mask should be used for unicast and multicast filter,
602 * based on presence of VLANs
603 */
604 void
ice_vf_get_promisc_masks(struct ice_vf * vf,struct ice_vsi * vsi,u8 * ucast_m,u8 * mcast_m)605 ice_vf_get_promisc_masks(struct ice_vf *vf, struct ice_vsi *vsi,
606 u8 *ucast_m, u8 *mcast_m)
607 {
608 if (ice_vf_is_port_vlan_ena(vf) ||
609 ice_vsi_has_non_zero_vlans(vsi)) {
610 *mcast_m = ICE_MCAST_VLAN_PROMISC_BITS;
611 *ucast_m = ICE_UCAST_VLAN_PROMISC_BITS;
612 } else {
613 *mcast_m = ICE_MCAST_PROMISC_BITS;
614 *ucast_m = ICE_UCAST_PROMISC_BITS;
615 }
616 }
617
618 /**
619 * ice_vf_clear_all_promisc_modes - Clear promisc/allmulticast on VF VSI
620 * @vf: the VF pointer
621 * @vsi: the VSI to configure
622 *
623 * Clear all promiscuous/allmulticast filters for a VF
624 */
625 static int
ice_vf_clear_all_promisc_modes(struct ice_vf * vf,struct ice_vsi * vsi)626 ice_vf_clear_all_promisc_modes(struct ice_vf *vf, struct ice_vsi *vsi)
627 {
628 struct ice_pf *pf = vf->pf;
629 u8 ucast_m, mcast_m;
630 int ret = 0;
631
632 ice_vf_get_promisc_masks(vf, vsi, &ucast_m, &mcast_m);
633 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) {
634 if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
635 if (ice_is_dflt_vsi_in_use(vsi->port_info))
636 ret = ice_clear_dflt_vsi(vsi);
637 } else {
638 ret = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m);
639 }
640
641 if (ret) {
642 dev_err(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode failed\n");
643 } else {
644 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
645 dev_info(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode succeeded\n");
646 }
647 }
648
649 if (test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
650 ret = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m);
651 if (ret) {
652 dev_err(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode failed\n");
653 } else {
654 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
655 dev_info(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode succeeded\n");
656 }
657 }
658 return ret;
659 }
660
661 /**
662 * ice_vf_set_vsi_promisc - Enable promiscuous mode for a VF VSI
663 * @vf: the VF to configure
664 * @vsi: the VF's VSI
665 * @promisc_m: the promiscuous mode to enable
666 */
667 int
ice_vf_set_vsi_promisc(struct ice_vf * vf,struct ice_vsi * vsi,u8 promisc_m)668 ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
669 {
670 struct ice_hw *hw = &vsi->back->hw;
671 int status;
672
673 if (ice_vf_is_port_vlan_ena(vf))
674 status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m,
675 ice_vf_get_port_vlan_id(vf));
676 else if (ice_vsi_has_non_zero_vlans(vsi))
677 status = ice_fltr_set_vlan_vsi_promisc(hw, vsi, promisc_m);
678 else
679 status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 0);
680
681 if (status && status != -EEXIST) {
682 dev_err(ice_pf_to_dev(vsi->back), "enable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
683 vf->vf_id, status);
684 return status;
685 }
686
687 return 0;
688 }
689
690 /**
691 * ice_vf_clear_vsi_promisc - Disable promiscuous mode for a VF VSI
692 * @vf: the VF to configure
693 * @vsi: the VF's VSI
694 * @promisc_m: the promiscuous mode to disable
695 */
696 int
ice_vf_clear_vsi_promisc(struct ice_vf * vf,struct ice_vsi * vsi,u8 promisc_m)697 ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
698 {
699 struct ice_hw *hw = &vsi->back->hw;
700 int status;
701
702 if (ice_vf_is_port_vlan_ena(vf))
703 status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m,
704 ice_vf_get_port_vlan_id(vf));
705 else if (ice_vsi_has_non_zero_vlans(vsi))
706 status = ice_fltr_clear_vlan_vsi_promisc(hw, vsi, promisc_m);
707 else
708 status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 0);
709
710 if (status && status != -ENOENT) {
711 dev_err(ice_pf_to_dev(vsi->back), "disable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
712 vf->vf_id, status);
713 return status;
714 }
715
716 return 0;
717 }
718
719 /**
720 * ice_reset_all_vfs - reset all allocated VFs in one go
721 * @pf: pointer to the PF structure
722 *
723 * Reset all VFs at once, in response to a PF or other device reset.
724 *
725 * First, tell the hardware to reset each VF, then do all the waiting in one
726 * chunk, and finally finish restoring each VF after the wait. This is useful
727 * during PF routines which need to reset all VFs, as otherwise it must perform
728 * these resets in a serialized fashion.
729 */
ice_reset_all_vfs(struct ice_pf * pf)730 void ice_reset_all_vfs(struct ice_pf *pf)
731 {
732 struct device *dev = ice_pf_to_dev(pf);
733 struct ice_hw *hw = &pf->hw;
734 struct ice_vf *vf;
735 unsigned int bkt;
736
737 /* If we don't have any VFs, then there is nothing to reset */
738 if (!ice_has_vfs(pf))
739 return;
740
741 mutex_lock(&pf->vfs.table_lock);
742
743 /* clear all malicious info if the VFs are getting reset */
744 ice_for_each_vf(pf, bkt, vf)
745 ice_mbx_clear_malvf(&vf->mbx_info);
746
747 /* If VFs have been disabled, there is no need to reset */
748 if (test_and_set_bit(ICE_VF_DIS, pf->state)) {
749 mutex_unlock(&pf->vfs.table_lock);
750 return;
751 }
752
753 /* Begin reset on all VFs at once */
754 ice_for_each_vf(pf, bkt, vf)
755 ice_trigger_vf_reset(vf, true, true);
756
757 /* HW requires some time to make sure it can flush the FIFO for a VF
758 * when it resets it. Now that we've triggered all of the VFs, iterate
759 * the table again and wait for each VF to complete.
760 */
761 ice_for_each_vf(pf, bkt, vf) {
762 if (!vf->vf_ops->poll_reset_status(vf)) {
763 /* Display a warning if at least one VF didn't manage
764 * to reset in time, but continue on with the
765 * operation.
766 */
767 dev_warn(dev, "VF %u reset check timeout\n", vf->vf_id);
768 break;
769 }
770 }
771
772 /* free VF resources to begin resetting the VSI state */
773 ice_for_each_vf(pf, bkt, vf) {
774 mutex_lock(&vf->cfg_lock);
775
776 ice_eswitch_detach_vf(pf, vf);
777 vf->driver_caps = 0;
778 ice_vc_set_default_allowlist(vf);
779
780 ice_vf_fdir_exit(vf);
781 ice_vf_fdir_init(vf);
782 /* clean VF control VSI when resetting VFs since it should be
783 * setup only when VF creates its first FDIR rule.
784 */
785 if (vf->ctrl_vsi_idx != ICE_NO_VSI)
786 ice_vf_ctrl_invalidate_vsi(vf);
787
788 ice_vf_pre_vsi_rebuild(vf);
789 ice_vf_rebuild_vsi(vf);
790 ice_vf_post_vsi_rebuild(vf);
791
792 ice_eswitch_attach_vf(pf, vf);
793
794 mutex_unlock(&vf->cfg_lock);
795 }
796
797 ice_flush(hw);
798 clear_bit(ICE_VF_DIS, pf->state);
799
800 mutex_unlock(&pf->vfs.table_lock);
801 }
802
803 /**
804 * ice_notify_vf_reset - Notify VF of a reset event
805 * @vf: pointer to the VF structure
806 */
ice_notify_vf_reset(struct ice_vf * vf)807 static void ice_notify_vf_reset(struct ice_vf *vf)
808 {
809 struct ice_hw *hw = &vf->pf->hw;
810 struct virtchnl_pf_event pfe;
811
812 /* Bail out if VF is in disabled state, neither initialized, nor active
813 * state - otherwise proceed with notifications
814 */
815 if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
816 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
817 test_bit(ICE_VF_STATE_DIS, vf->vf_states))
818 return;
819
820 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
821 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
822 ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
823 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
824 NULL);
825 }
826
827 /**
828 * ice_reset_vf - Reset a particular VF
829 * @vf: pointer to the VF structure
830 * @flags: flags controlling behavior of the reset
831 *
832 * Flags:
833 * ICE_VF_RESET_VFLR - Indicates a reset is due to VFLR event
834 * ICE_VF_RESET_NOTIFY - Send VF a notification prior to reset
835 * ICE_VF_RESET_LOCK - Acquire VF cfg_lock before resetting
836 *
837 * Returns 0 if the VF is currently in reset, if resets are disabled, or if
838 * the VF resets successfully. Returns an error code if the VF fails to
839 * rebuild.
840 */
ice_reset_vf(struct ice_vf * vf,u32 flags)841 int ice_reset_vf(struct ice_vf *vf, u32 flags)
842 {
843 struct ice_pf *pf = vf->pf;
844 struct ice_lag *lag;
845 struct ice_vsi *vsi;
846 u8 act_prt, pri_prt;
847 struct device *dev;
848 int err = 0;
849 bool rsd;
850
851 dev = ice_pf_to_dev(pf);
852 act_prt = ICE_LAG_INVALID_PORT;
853 pri_prt = pf->hw.port_info->lport;
854
855 if (flags & ICE_VF_RESET_NOTIFY)
856 ice_notify_vf_reset(vf);
857
858 if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
859 dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n",
860 vf->vf_id);
861 return 0;
862 }
863
864 if (flags & ICE_VF_RESET_LOCK)
865 mutex_lock(&vf->cfg_lock);
866 else
867 lockdep_assert_held(&vf->cfg_lock);
868
869 lag = pf->lag;
870 mutex_lock(&pf->lag_mutex);
871 if (lag && lag->bonded && lag->primary) {
872 act_prt = lag->active_port;
873 if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT &&
874 lag->upper_netdev)
875 ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
876 else
877 act_prt = ICE_LAG_INVALID_PORT;
878 }
879
880 if (ice_is_vf_disabled(vf)) {
881 vsi = ice_get_vf_vsi(vf);
882 if (!vsi) {
883 dev_dbg(dev, "VF is already removed\n");
884 err = -EINVAL;
885 goto out_unlock;
886 }
887 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
888
889 if (ice_vsi_is_rx_queue_active(vsi))
890 ice_vsi_stop_all_rx_rings(vsi);
891
892 dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
893 vf->vf_id);
894 goto out_unlock;
895 }
896
897 /* Set VF disable bit state here, before triggering reset */
898 set_bit(ICE_VF_STATE_DIS, vf->vf_states);
899 ice_trigger_vf_reset(vf, flags & ICE_VF_RESET_VFLR, false);
900
901 vsi = ice_get_vf_vsi(vf);
902 if (WARN_ON(!vsi)) {
903 err = -EIO;
904 goto out_unlock;
905 }
906
907 ice_dis_vf_qs(vf);
908
909 /* Call Disable LAN Tx queue AQ whether or not queues are
910 * enabled. This is needed for successful completion of VFR.
911 */
912 ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
913 NULL, vf->vf_ops->reset_type, vf->vf_id, NULL);
914
915 /* poll VPGEN_VFRSTAT reg to make sure
916 * that reset is complete
917 */
918 rsd = vf->vf_ops->poll_reset_status(vf);
919
920 /* Display a warning if VF didn't manage to reset in time, but need to
921 * continue on with the operation.
922 */
923 if (!rsd)
924 dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id);
925
926 vf->driver_caps = 0;
927 ice_vc_set_default_allowlist(vf);
928
929 /* disable promiscuous modes in case they were enabled
930 * ignore any error if disabling process failed
931 */
932 ice_vf_clear_all_promisc_modes(vf, vsi);
933
934 ice_vf_fdir_exit(vf);
935 ice_vf_fdir_init(vf);
936 /* clean VF control VSI when resetting VF since it should be setup
937 * only when VF creates its first FDIR rule.
938 */
939 if (vf->ctrl_vsi_idx != ICE_NO_VSI)
940 ice_vf_ctrl_vsi_release(vf);
941
942 ice_vf_pre_vsi_rebuild(vf);
943
944 if (ice_vf_reconfig_vsi(vf)) {
945 dev_err(dev, "Failed to release and setup the VF%u's VSI\n",
946 vf->vf_id);
947 err = -EFAULT;
948 goto out_unlock;
949 }
950
951 ice_vf_post_vsi_rebuild(vf);
952 vsi = ice_get_vf_vsi(vf);
953 if (WARN_ON(!vsi)) {
954 err = -EINVAL;
955 goto out_unlock;
956 }
957
958 ice_eswitch_update_repr(&vf->repr_id, vsi);
959
960 /* if the VF has been reset allow it to come up again */
961 ice_mbx_clear_malvf(&vf->mbx_info);
962
963 out_unlock:
964 if (lag && lag->bonded && lag->primary &&
965 act_prt != ICE_LAG_INVALID_PORT)
966 ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
967 mutex_unlock(&pf->lag_mutex);
968
969 if (flags & ICE_VF_RESET_LOCK)
970 mutex_unlock(&vf->cfg_lock);
971
972 return err;
973 }
974
975 /**
976 * ice_set_vf_state_dis - Set VF state to disabled
977 * @vf: pointer to the VF structure
978 */
ice_set_vf_state_dis(struct ice_vf * vf)979 void ice_set_vf_state_dis(struct ice_vf *vf)
980 {
981 ice_set_vf_state_qs_dis(vf);
982 vf->vf_ops->clear_reset_state(vf);
983 }
984
985 /* Private functions only accessed from other virtualization files */
986
987 /**
988 * ice_initialize_vf_entry - Initialize a VF entry
989 * @vf: pointer to the VF structure
990 */
ice_initialize_vf_entry(struct ice_vf * vf)991 void ice_initialize_vf_entry(struct ice_vf *vf)
992 {
993 struct ice_pf *pf = vf->pf;
994 struct ice_vfs *vfs;
995
996 vfs = &pf->vfs;
997
998 /* assign default capabilities */
999 vf->spoofchk = true;
1000 ice_vc_set_default_allowlist(vf);
1001 ice_virtchnl_set_dflt_ops(vf);
1002
1003 /* set default number of MSI-X */
1004 vf->num_msix = vfs->num_msix_per;
1005 vf->num_vf_qs = vfs->num_qps_per;
1006
1007 /* ctrl_vsi_idx will be set to a valid value only when iAVF
1008 * creates its first fdir rule.
1009 */
1010 ice_vf_ctrl_invalidate_vsi(vf);
1011 ice_vf_fdir_init(vf);
1012
1013 /* Initialize mailbox info for this VF */
1014 ice_mbx_init_vf_info(&pf->hw, &vf->mbx_info);
1015
1016 mutex_init(&vf->cfg_lock);
1017 }
1018
1019 /**
1020 * ice_dis_vf_qs - Disable the VF queues
1021 * @vf: pointer to the VF structure
1022 */
ice_dis_vf_qs(struct ice_vf * vf)1023 void ice_dis_vf_qs(struct ice_vf *vf)
1024 {
1025 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1026
1027 if (WARN_ON(!vsi))
1028 return;
1029
1030 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
1031 ice_vsi_stop_all_rx_rings(vsi);
1032 ice_set_vf_state_qs_dis(vf);
1033 }
1034
1035 /**
1036 * ice_err_to_virt_err - translate errors for VF return code
1037 * @err: error return code
1038 */
ice_err_to_virt_err(int err)1039 enum virtchnl_status_code ice_err_to_virt_err(int err)
1040 {
1041 switch (err) {
1042 case 0:
1043 return VIRTCHNL_STATUS_SUCCESS;
1044 case -EINVAL:
1045 case -ENODEV:
1046 return VIRTCHNL_STATUS_ERR_PARAM;
1047 case -ENOMEM:
1048 return VIRTCHNL_STATUS_ERR_NO_MEMORY;
1049 case -EALREADY:
1050 case -EBUSY:
1051 case -EIO:
1052 case -ENOSPC:
1053 return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1054 default:
1055 return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
1056 }
1057 }
1058
1059 /**
1060 * ice_check_vf_init - helper to check if VF init complete
1061 * @vf: the pointer to the VF to check
1062 */
ice_check_vf_init(struct ice_vf * vf)1063 int ice_check_vf_init(struct ice_vf *vf)
1064 {
1065 struct ice_pf *pf = vf->pf;
1066
1067 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
1068 dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n",
1069 vf->vf_id);
1070 return -EBUSY;
1071 }
1072 return 0;
1073 }
1074
1075 /**
1076 * ice_vf_get_port_info - Get the VF's port info structure
1077 * @vf: VF used to get the port info structure for
1078 */
ice_vf_get_port_info(struct ice_vf * vf)1079 struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf)
1080 {
1081 return vf->pf->hw.port_info;
1082 }
1083
1084 /**
1085 * ice_cfg_mac_antispoof - Configure MAC antispoof checking behavior
1086 * @vsi: the VSI to configure
1087 * @enable: whether to enable or disable the spoof checking
1088 *
1089 * Configure a VSI to enable (or disable) spoof checking behavior.
1090 */
ice_cfg_mac_antispoof(struct ice_vsi * vsi,bool enable)1091 static int ice_cfg_mac_antispoof(struct ice_vsi *vsi, bool enable)
1092 {
1093 struct ice_vsi_ctx *ctx;
1094 int err;
1095
1096 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1097 if (!ctx)
1098 return -ENOMEM;
1099
1100 ctx->info.sec_flags = vsi->info.sec_flags;
1101 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
1102
1103 if (enable)
1104 ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
1105 else
1106 ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
1107
1108 err = ice_update_vsi(&vsi->back->hw, vsi->idx, ctx, NULL);
1109 if (err)
1110 dev_err(ice_pf_to_dev(vsi->back), "Failed to configure Tx MAC anti-spoof %s for VSI %d, error %d\n",
1111 enable ? "ON" : "OFF", vsi->vsi_num, err);
1112 else
1113 vsi->info.sec_flags = ctx->info.sec_flags;
1114
1115 kfree(ctx);
1116
1117 return err;
1118 }
1119
1120 /**
1121 * ice_vsi_ena_spoofchk - enable Tx spoof checking for this VSI
1122 * @vsi: VSI to enable Tx spoof checking for
1123 */
ice_vsi_ena_spoofchk(struct ice_vsi * vsi)1124 static int ice_vsi_ena_spoofchk(struct ice_vsi *vsi)
1125 {
1126 struct ice_vsi_vlan_ops *vlan_ops;
1127 int err = 0;
1128
1129 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
1130
1131 /* Allow VF with VLAN 0 only to send all tagged traffic */
1132 if (vsi->type != ICE_VSI_VF || ice_vsi_has_non_zero_vlans(vsi)) {
1133 err = vlan_ops->ena_tx_filtering(vsi);
1134 if (err)
1135 return err;
1136 }
1137
1138 return ice_cfg_mac_antispoof(vsi, true);
1139 }
1140
1141 /**
1142 * ice_vsi_dis_spoofchk - disable Tx spoof checking for this VSI
1143 * @vsi: VSI to disable Tx spoof checking for
1144 */
ice_vsi_dis_spoofchk(struct ice_vsi * vsi)1145 static int ice_vsi_dis_spoofchk(struct ice_vsi *vsi)
1146 {
1147 struct ice_vsi_vlan_ops *vlan_ops;
1148 int err;
1149
1150 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
1151
1152 err = vlan_ops->dis_tx_filtering(vsi);
1153 if (err)
1154 return err;
1155
1156 return ice_cfg_mac_antispoof(vsi, false);
1157 }
1158
1159 /**
1160 * ice_vsi_apply_spoofchk - Apply Tx spoof checking setting to a VSI
1161 * @vsi: VSI associated to the VF
1162 * @enable: whether to enable or disable the spoof checking
1163 */
ice_vsi_apply_spoofchk(struct ice_vsi * vsi,bool enable)1164 int ice_vsi_apply_spoofchk(struct ice_vsi *vsi, bool enable)
1165 {
1166 int err;
1167
1168 if (enable)
1169 err = ice_vsi_ena_spoofchk(vsi);
1170 else
1171 err = ice_vsi_dis_spoofchk(vsi);
1172
1173 return err;
1174 }
1175
1176 /**
1177 * ice_is_vf_trusted
1178 * @vf: pointer to the VF info
1179 */
ice_is_vf_trusted(struct ice_vf * vf)1180 bool ice_is_vf_trusted(struct ice_vf *vf)
1181 {
1182 return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1183 }
1184
1185 /**
1186 * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled
1187 * @vf: the VF to check
1188 *
1189 * Returns true if the VF has no Rx and no Tx queues enabled and returns false
1190 * otherwise
1191 */
ice_vf_has_no_qs_ena(struct ice_vf * vf)1192 bool ice_vf_has_no_qs_ena(struct ice_vf *vf)
1193 {
1194 return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) &&
1195 !bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF));
1196 }
1197
1198 /**
1199 * ice_is_vf_link_up - check if the VF's link is up
1200 * @vf: VF to check if link is up
1201 */
ice_is_vf_link_up(struct ice_vf * vf)1202 bool ice_is_vf_link_up(struct ice_vf *vf)
1203 {
1204 struct ice_port_info *pi = ice_vf_get_port_info(vf);
1205
1206 if (ice_check_vf_init(vf))
1207 return false;
1208
1209 if (ice_vf_has_no_qs_ena(vf))
1210 return false;
1211 else if (vf->link_forced)
1212 return vf->link_up;
1213 else
1214 return pi->phy.link_info.link_info &
1215 ICE_AQ_LINK_UP;
1216 }
1217
1218 /**
1219 * ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove VSI access
1220 * @vf: VF that control VSI is being invalidated on
1221 */
ice_vf_ctrl_invalidate_vsi(struct ice_vf * vf)1222 void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf)
1223 {
1224 vf->ctrl_vsi_idx = ICE_NO_VSI;
1225 }
1226
1227 /**
1228 * ice_vf_ctrl_vsi_release - invalidate the VF's control VSI after freeing it
1229 * @vf: VF that control VSI is being released on
1230 */
ice_vf_ctrl_vsi_release(struct ice_vf * vf)1231 void ice_vf_ctrl_vsi_release(struct ice_vf *vf)
1232 {
1233 ice_vsi_release(vf->pf->vsi[vf->ctrl_vsi_idx]);
1234 ice_vf_ctrl_invalidate_vsi(vf);
1235 }
1236
1237 /**
1238 * ice_vf_ctrl_vsi_setup - Set up a VF control VSI
1239 * @vf: VF to setup control VSI for
1240 *
1241 * Returns pointer to the successfully allocated VSI struct on success,
1242 * otherwise returns NULL on failure.
1243 */
ice_vf_ctrl_vsi_setup(struct ice_vf * vf)1244 struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)
1245 {
1246 struct ice_vsi_cfg_params params = {};
1247 struct ice_pf *pf = vf->pf;
1248 struct ice_vsi *vsi;
1249
1250 params.type = ICE_VSI_CTRL;
1251 params.port_info = ice_vf_get_port_info(vf);
1252 params.vf = vf;
1253 params.flags = ICE_VSI_FLAG_INIT;
1254
1255 vsi = ice_vsi_setup(pf, ¶ms);
1256 if (!vsi) {
1257 dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n");
1258 ice_vf_ctrl_invalidate_vsi(vf);
1259 }
1260
1261 return vsi;
1262 }
1263
1264 /**
1265 * ice_vf_init_host_cfg - Initialize host admin configuration
1266 * @vf: VF to initialize
1267 * @vsi: the VSI created at initialization
1268 *
1269 * Initialize the VF host configuration. Called during VF creation to setup
1270 * VLAN 0, add the VF VSI broadcast filter, and setup spoof checking. It
1271 * should only be called during VF creation.
1272 */
ice_vf_init_host_cfg(struct ice_vf * vf,struct ice_vsi * vsi)1273 int ice_vf_init_host_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
1274 {
1275 struct ice_vsi_vlan_ops *vlan_ops;
1276 struct ice_pf *pf = vf->pf;
1277 u8 broadcast[ETH_ALEN];
1278 struct device *dev;
1279 int err;
1280
1281 dev = ice_pf_to_dev(pf);
1282
1283 err = ice_vsi_add_vlan_zero(vsi);
1284 if (err) {
1285 dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n",
1286 vf->vf_id);
1287 return err;
1288 }
1289
1290 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
1291 err = vlan_ops->ena_rx_filtering(vsi);
1292 if (err) {
1293 dev_warn(dev, "Failed to enable Rx VLAN filtering for VF %d\n",
1294 vf->vf_id);
1295 return err;
1296 }
1297
1298 eth_broadcast_addr(broadcast);
1299 err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
1300 if (err) {
1301 dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %d\n",
1302 vf->vf_id, err);
1303 return err;
1304 }
1305
1306 vf->num_mac = 1;
1307
1308 err = ice_vsi_apply_spoofchk(vsi, vf->spoofchk);
1309 if (err) {
1310 dev_warn(dev, "Failed to initialize spoofchk setting for VF %d\n",
1311 vf->vf_id);
1312 return err;
1313 }
1314
1315 return 0;
1316 }
1317
1318 /**
1319 * ice_vf_invalidate_vsi - invalidate vsi_idx to remove VSI access
1320 * @vf: VF to remove access to VSI for
1321 */
ice_vf_invalidate_vsi(struct ice_vf * vf)1322 void ice_vf_invalidate_vsi(struct ice_vf *vf)
1323 {
1324 vf->lan_vsi_idx = ICE_NO_VSI;
1325 }
1326
1327 /**
1328 * ice_vf_vsi_release - Release the VF VSI and invalidate indexes
1329 * @vf: pointer to the VF structure
1330 *
1331 * Release the VF associated with this VSI and then invalidate the VSI
1332 * indexes.
1333 */
ice_vf_vsi_release(struct ice_vf * vf)1334 void ice_vf_vsi_release(struct ice_vf *vf)
1335 {
1336 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1337
1338 if (WARN_ON(!vsi))
1339 return;
1340
1341 ice_vsi_release(vsi);
1342 ice_vf_invalidate_vsi(vf);
1343 }
1344
1345 /**
1346 * ice_get_vf_ctrl_vsi - Get first VF control VSI pointer
1347 * @pf: the PF private structure
1348 * @vsi: pointer to the VSI
1349 *
1350 * Return first found VF control VSI other than the vsi
1351 * passed by parameter. This function is used to determine
1352 * whether new resources have to be allocated for control VSI
1353 * or they can be shared with existing one.
1354 *
1355 * Return found VF control VSI pointer other itself. Return
1356 * NULL Otherwise.
1357 *
1358 */
ice_get_vf_ctrl_vsi(struct ice_pf * pf,struct ice_vsi * vsi)1359 struct ice_vsi *ice_get_vf_ctrl_vsi(struct ice_pf *pf, struct ice_vsi *vsi)
1360 {
1361 struct ice_vsi *ctrl_vsi = NULL;
1362 struct ice_vf *vf;
1363 unsigned int bkt;
1364
1365 rcu_read_lock();
1366 ice_for_each_vf_rcu(pf, bkt, vf) {
1367 if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) {
1368 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
1369 break;
1370 }
1371 }
1372
1373 rcu_read_unlock();
1374 return ctrl_vsi;
1375 }
1376