1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2022, Intel Corporation. */
3
4 #include "ice_vf_lib_private.h"
5 #include "ice.h"
6 #include "ice_lib.h"
7 #include "ice_fltr.h"
8 #include "virt/allowlist.h"
9
10 /* Public functions which may be accessed by all driver files */
11
12 /**
13 * ice_get_vf_by_id - Get pointer to VF by ID
14 * @pf: the PF private structure
15 * @vf_id: the VF ID to locate
16 *
17 * Locate and return a pointer to the VF structure associated with a given ID.
18 * Returns NULL if the ID does not have a valid VF structure associated with
19 * it.
20 *
21 * This function takes a reference to the VF, which must be released by
22 * calling ice_put_vf() once the caller is finished accessing the VF structure
23 * returned.
24 */
ice_get_vf_by_id(struct ice_pf * pf,u16 vf_id)25 struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id)
26 {
27 struct ice_vf *vf;
28
29 rcu_read_lock();
30 hash_for_each_possible_rcu(pf->vfs.table, vf, entry, vf_id) {
31 if (vf->vf_id == vf_id) {
32 struct ice_vf *found;
33
34 if (kref_get_unless_zero(&vf->refcnt))
35 found = vf;
36 else
37 found = NULL;
38
39 rcu_read_unlock();
40 return found;
41 }
42 }
43 rcu_read_unlock();
44
45 return NULL;
46 }
47
48 /**
49 * ice_release_vf - Release VF associated with a refcount
50 * @ref: the kref decremented to zero
51 *
52 * Callback function for kref_put to release a VF once its reference count has
53 * hit zero.
54 */
ice_release_vf(struct kref * ref)55 static void ice_release_vf(struct kref *ref)
56 {
57 struct ice_vf *vf = container_of(ref, struct ice_vf, refcnt);
58
59 pci_dev_put(vf->vfdev);
60
61 vf->vf_ops->free(vf);
62 }
63
64 /**
65 * ice_put_vf - Release a reference to a VF
66 * @vf: the VF structure to decrease reference count on
67 *
68 * Decrease the reference count for a VF, and free the entry if it is no
69 * longer in use.
70 *
71 * This must be called after ice_get_vf_by_id() once the reference to the VF
72 * structure is no longer used. Otherwise, the VF structure will never be
73 * freed.
74 */
ice_put_vf(struct ice_vf * vf)75 void ice_put_vf(struct ice_vf *vf)
76 {
77 kref_put(&vf->refcnt, ice_release_vf);
78 }
79
80 /**
81 * ice_has_vfs - Return true if the PF has any associated VFs
82 * @pf: the PF private structure
83 *
84 * Return whether or not the PF has any allocated VFs.
85 *
86 * Note that this function only guarantees that there are no VFs at the point
87 * of calling it. It does not guarantee that no more VFs will be added.
88 */
ice_has_vfs(struct ice_pf * pf)89 bool ice_has_vfs(struct ice_pf *pf)
90 {
91 /* A simple check that the hash table is not empty does not require
92 * the mutex or rcu_read_lock.
93 */
94 return !hash_empty(pf->vfs.table);
95 }
96
97 /**
98 * ice_get_num_vfs - Get number of allocated VFs
99 * @pf: the PF private structure
100 *
101 * Return the total number of allocated VFs. NOTE: VF IDs are not guaranteed
102 * to be contiguous. Do not assume that a VF ID is guaranteed to be less than
103 * the output of this function.
104 */
ice_get_num_vfs(struct ice_pf * pf)105 u16 ice_get_num_vfs(struct ice_pf *pf)
106 {
107 struct ice_vf *vf;
108 unsigned int bkt;
109 u16 num_vfs = 0;
110
111 rcu_read_lock();
112 ice_for_each_vf_rcu(pf, bkt, vf)
113 num_vfs++;
114 rcu_read_unlock();
115
116 return num_vfs;
117 }
118
119 /**
120 * ice_get_vf_vsi - get VF's VSI based on the stored index
121 * @vf: VF used to get VSI
122 */
ice_get_vf_vsi(struct ice_vf * vf)123 struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
124 {
125 if (vf->lan_vsi_idx == ICE_NO_VSI)
126 return NULL;
127
128 return vf->pf->vsi[vf->lan_vsi_idx];
129 }
130
131 /**
132 * ice_is_vf_disabled
133 * @vf: pointer to the VF info
134 *
135 * If the PF has been disabled, there is no need resetting VF until PF is
136 * active again. Similarly, if the VF has been disabled, this means something
137 * else is resetting the VF, so we shouldn't continue.
138 *
139 * Returns true if the caller should consider the VF as disabled whether
140 * because that single VF is explicitly disabled or because the PF is
141 * currently disabled.
142 */
ice_is_vf_disabled(struct ice_vf * vf)143 bool ice_is_vf_disabled(struct ice_vf *vf)
144 {
145 struct ice_pf *pf = vf->pf;
146
147 return (test_bit(ICE_VF_DIS, pf->state) ||
148 test_bit(ICE_VF_STATE_DIS, vf->vf_states));
149 }
150
151 /**
152 * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset
153 * @vf: The VF being resseting
154 *
155 * The max poll time is about ~800ms, which is about the maximum time it takes
156 * for a VF to be reset and/or a VF driver to be removed.
157 */
ice_wait_on_vf_reset(struct ice_vf * vf)158 static void ice_wait_on_vf_reset(struct ice_vf *vf)
159 {
160 int i;
161
162 for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) {
163 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
164 break;
165 msleep(ICE_MAX_VF_RESET_SLEEP_MS);
166 }
167 }
168
169 /**
170 * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried
171 * @vf: VF to check if it's ready to be configured/queried
172 *
173 * The purpose of this function is to make sure the VF is not in reset, not
174 * disabled, and initialized so it can be configured and/or queried by a host
175 * administrator.
176 */
ice_check_vf_ready_for_cfg(struct ice_vf * vf)177 int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
178 {
179 ice_wait_on_vf_reset(vf);
180
181 if (ice_is_vf_disabled(vf))
182 return -EINVAL;
183
184 if (ice_check_vf_init(vf))
185 return -EBUSY;
186
187 return 0;
188 }
189
190 /**
191 * ice_trigger_vf_reset - Reset a VF on HW
192 * @vf: pointer to the VF structure
193 * @is_vflr: true if VFLR was issued, false if not
194 * @is_pfr: true if the reset was triggered due to a previous PFR
195 *
196 * Trigger hardware to start a reset for a particular VF. Expects the caller
197 * to wait the proper amount of time to allow hardware to reset the VF before
198 * it cleans up and restores VF functionality.
199 */
ice_trigger_vf_reset(struct ice_vf * vf,bool is_vflr,bool is_pfr)200 static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
201 {
202 /* Inform VF that it is no longer active, as a warning */
203 clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
204
205 /* Disable VF's configuration API during reset. The flag is re-enabled
206 * when it's safe again to access VF's VSI.
207 */
208 clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
209
210 /* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver
211 * needs to clear them in the case of VFR/VFLR. If this is done for
212 * PFR, it can mess up VF resets because the VF driver may already
213 * have started cleanup by the time we get here.
214 */
215 if (!is_pfr)
216 vf->vf_ops->clear_mbx_register(vf);
217
218 vf->vf_ops->trigger_reset_register(vf, is_vflr);
219 }
220
ice_vf_clear_counters(struct ice_vf * vf)221 static void ice_vf_clear_counters(struct ice_vf *vf)
222 {
223 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
224
225 if (vsi)
226 vsi->num_vlan = 0;
227
228 vf->num_mac = 0;
229 vf->num_mac_lldp = 0;
230 memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
231 memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
232 }
233
234 /**
235 * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild
236 * @vf: VF to perform pre VSI rebuild tasks
237 *
238 * These tasks are items that don't need to be amortized since they are most
239 * likely called in a for loop with all VF(s) in the reset_all_vfs() case.
240 */
ice_vf_pre_vsi_rebuild(struct ice_vf * vf)241 static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
242 {
243 /* Close any IRQ mapping now */
244 if (vf->vf_ops->irq_close)
245 vf->vf_ops->irq_close(vf);
246
247 ice_vf_clear_counters(vf);
248 vf->vf_ops->clear_reset_trigger(vf);
249 }
250
251 /**
252 * ice_vf_reconfig_vsi - Reconfigure a VF VSI with the device
253 * @vf: VF to reconfigure the VSI for
254 *
255 * This is called when a single VF is being reset (i.e. VVF, VFLR, host VF
256 * configuration change, etc).
257 *
258 * It brings the VSI down and then reconfigures it with the hardware.
259 */
ice_vf_reconfig_vsi(struct ice_vf * vf)260 static int ice_vf_reconfig_vsi(struct ice_vf *vf)
261 {
262 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
263 struct ice_pf *pf = vf->pf;
264 int err;
265
266 if (WARN_ON(!vsi))
267 return -EINVAL;
268
269 vsi->flags = ICE_VSI_FLAG_NO_INIT;
270
271 ice_vsi_decfg(vsi);
272 ice_fltr_remove_all(vsi);
273
274 err = ice_vsi_cfg(vsi);
275 if (err) {
276 dev_err(ice_pf_to_dev(pf),
277 "Failed to reconfigure the VF%u's VSI, error %d\n",
278 vf->vf_id, err);
279 return err;
280 }
281
282 return 0;
283 }
284
285 /**
286 * ice_vf_rebuild_vsi - rebuild the VF's VSI
287 * @vf: VF to rebuild the VSI for
288 *
289 * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the
290 * host, PFR, CORER, etc.).
291 *
292 * It reprograms the VSI configuration back into hardware.
293 */
ice_vf_rebuild_vsi(struct ice_vf * vf)294 static int ice_vf_rebuild_vsi(struct ice_vf *vf)
295 {
296 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
297 struct ice_pf *pf = vf->pf;
298
299 if (WARN_ON(!vsi))
300 return -EINVAL;
301
302 if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT)) {
303 dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n",
304 vf->vf_id);
305 return -EIO;
306 }
307 /* vsi->idx will remain the same in this case so don't update
308 * vf->lan_vsi_idx
309 */
310 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
311
312 return 0;
313 }
314
315 /**
316 * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
317 * @vf: VF to add MAC filters for
318 * @vsi: Pointer to VSI
319 *
320 * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
321 * always re-adds either a VLAN 0 or port VLAN based filter after reset.
322 */
ice_vf_rebuild_host_vlan_cfg(struct ice_vf * vf,struct ice_vsi * vsi)323 static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
324 {
325 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
326 struct device *dev = ice_pf_to_dev(vf->pf);
327 int err;
328
329 if (ice_vf_is_port_vlan_ena(vf)) {
330 err = vlan_ops->set_port_vlan(vsi, &vf->port_vlan_info);
331 if (err) {
332 dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n",
333 vf->vf_id, err);
334 return err;
335 }
336
337 err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info);
338 } else {
339 /* clear possible previous port vlan config */
340 err = ice_vsi_clear_port_vlan(vsi);
341 if (err) {
342 dev_err(dev, "failed to clear port VLAN via VSI parameters for VF %u, error %d\n",
343 vf->vf_id, err);
344 return err;
345 }
346 err = ice_vsi_add_vlan_zero(vsi);
347 }
348
349 if (err) {
350 dev_err(dev, "failed to add VLAN %u filter for VF %u during VF rebuild, error %d\n",
351 ice_vf_is_port_vlan_ena(vf) ?
352 ice_vf_get_port_vlan_id(vf) : 0, vf->vf_id, err);
353 return err;
354 }
355
356 err = vlan_ops->ena_rx_filtering(vsi);
357 if (err)
358 dev_warn(dev, "failed to enable Rx VLAN filtering for VF %d VSI %d during VF rebuild, error %d\n",
359 vf->vf_id, vsi->idx, err);
360
361 return 0;
362 }
363
364 /**
365 * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration
366 * @vf: VF to re-apply the configuration for
367 *
368 * Called after a VF VSI has been re-added/rebuild during reset. The PF driver
369 * needs to re-apply the host configured Tx rate limiting configuration.
370 */
ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf * vf)371 static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf)
372 {
373 struct device *dev = ice_pf_to_dev(vf->pf);
374 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
375 int err;
376
377 if (WARN_ON(!vsi))
378 return -EINVAL;
379
380 if (vf->min_tx_rate) {
381 err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000);
382 if (err) {
383 dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n",
384 vf->min_tx_rate, vf->vf_id, err);
385 return err;
386 }
387 }
388
389 if (vf->max_tx_rate) {
390 err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000);
391 if (err) {
392 dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n",
393 vf->max_tx_rate, vf->vf_id, err);
394 return err;
395 }
396 }
397
398 return 0;
399 }
400
401 /**
402 * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value
403 * @vf: VF to configure trust setting for
404 */
ice_vf_set_host_trust_cfg(struct ice_vf * vf)405 static void ice_vf_set_host_trust_cfg(struct ice_vf *vf)
406 {
407 assign_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps, vf->trusted);
408 }
409
410 /**
411 * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA
412 * @vf: VF to add MAC filters for
413 *
414 * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
415 * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset.
416 */
ice_vf_rebuild_host_mac_cfg(struct ice_vf * vf)417 static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
418 {
419 struct device *dev = ice_pf_to_dev(vf->pf);
420 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
421 u8 broadcast[ETH_ALEN];
422 int status;
423
424 if (WARN_ON(!vsi))
425 return -EINVAL;
426
427 if (ice_is_eswitch_mode_switchdev(vf->pf))
428 return 0;
429
430 eth_broadcast_addr(broadcast);
431 status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
432 if (status) {
433 dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n",
434 vf->vf_id, status);
435 return status;
436 }
437
438 vf->num_mac++;
439
440 if (is_valid_ether_addr(vf->hw_lan_addr)) {
441 status = ice_fltr_add_mac(vsi, vf->hw_lan_addr,
442 ICE_FWD_TO_VSI);
443 if (status) {
444 dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n",
445 &vf->hw_lan_addr[0], vf->vf_id,
446 status);
447 return status;
448 }
449 vf->num_mac++;
450
451 ether_addr_copy(vf->dev_lan_addr, vf->hw_lan_addr);
452 }
453
454 return 0;
455 }
456
457 /**
458 * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config
459 * @vsi: Pointer to VSI
460 *
461 * This function moves VSI into corresponding scheduler aggregator node
462 * based on cached value of "aggregator node info" per VSI
463 */
ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi * vsi)464 static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi)
465 {
466 struct ice_pf *pf = vsi->back;
467 struct device *dev;
468 int status;
469
470 if (!vsi->agg_node)
471 return;
472
473 dev = ice_pf_to_dev(pf);
474 if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
475 dev_dbg(dev,
476 "agg_id %u already has reached max_num_vsis %u\n",
477 vsi->agg_node->agg_id, vsi->agg_node->num_vsis);
478 return;
479 }
480
481 status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id,
482 vsi->idx, vsi->tc_cfg.ena_tc);
483 if (status)
484 dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node",
485 vsi->idx, vsi->agg_node->agg_id);
486 else
487 vsi->agg_node->num_vsis++;
488 }
489
490 /**
491 * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset
492 * @vf: VF to rebuild host configuration on
493 */
ice_vf_rebuild_host_cfg(struct ice_vf * vf)494 static void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
495 {
496 struct device *dev = ice_pf_to_dev(vf->pf);
497 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
498
499 if (WARN_ON(!vsi))
500 return;
501
502 ice_vf_set_host_trust_cfg(vf);
503
504 if (ice_vf_rebuild_host_mac_cfg(vf))
505 dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n",
506 vf->vf_id);
507
508 if (ice_vf_rebuild_host_vlan_cfg(vf, vsi))
509 dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
510 vf->vf_id);
511
512 if (ice_vf_rebuild_host_tx_rate_cfg(vf))
513 dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n",
514 vf->vf_id);
515
516 if (ice_vsi_apply_spoofchk(vsi, vf->spoofchk))
517 dev_err(dev, "failed to rebuild spoofchk configuration for VF %d\n",
518 vf->vf_id);
519
520 /* rebuild aggregator node config for main VF VSI */
521 ice_vf_rebuild_aggregator_node_cfg(vsi);
522 }
523
524 /**
525 * ice_set_vf_state_qs_dis - Set VF queues state to disabled
526 * @vf: pointer to the VF structure
527 */
ice_set_vf_state_qs_dis(struct ice_vf * vf)528 static void ice_set_vf_state_qs_dis(struct ice_vf *vf)
529 {
530 /* Clear Rx/Tx enabled queues flag */
531 bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
532 bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
533 clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
534 }
535
536 /**
537 * ice_vf_set_initialized - VF is ready for VIRTCHNL communication
538 * @vf: VF to set in initialized state
539 *
540 * After this function the VF will be ready to receive/handle the
541 * VIRTCHNL_OP_GET_VF_RESOURCES message
542 */
ice_vf_set_initialized(struct ice_vf * vf)543 static void ice_vf_set_initialized(struct ice_vf *vf)
544 {
545 ice_set_vf_state_qs_dis(vf);
546 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
547 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
548 clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
549 set_bit(ICE_VF_STATE_INIT, vf->vf_states);
550 memset(&vf->vlan_v2_caps, 0, sizeof(vf->vlan_v2_caps));
551 }
552
553 /**
554 * ice_vf_post_vsi_rebuild - Reset tasks that occur after VSI rebuild
555 * @vf: the VF being reset
556 *
557 * Perform reset tasks which must occur after the VSI has been re-created or
558 * rebuilt during a VF reset.
559 */
ice_vf_post_vsi_rebuild(struct ice_vf * vf)560 static void ice_vf_post_vsi_rebuild(struct ice_vf *vf)
561 {
562 ice_vf_rebuild_host_cfg(vf);
563 ice_vf_set_initialized(vf);
564
565 vf->vf_ops->post_vsi_rebuild(vf);
566 }
567
568 /**
569 * ice_is_any_vf_in_unicast_promisc - check if any VF(s)
570 * are in unicast promiscuous mode
571 * @pf: PF structure for accessing VF(s)
572 *
573 * Return false if no VF(s) are in unicast promiscuous mode,
574 * else return true
575 */
ice_is_any_vf_in_unicast_promisc(struct ice_pf * pf)576 bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf)
577 {
578 bool is_vf_promisc = false;
579 struct ice_vf *vf;
580 unsigned int bkt;
581
582 rcu_read_lock();
583 ice_for_each_vf_rcu(pf, bkt, vf) {
584 /* found a VF that has promiscuous mode configured */
585 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) {
586 is_vf_promisc = true;
587 break;
588 }
589 }
590 rcu_read_unlock();
591
592 return is_vf_promisc;
593 }
594
595 /**
596 * ice_vf_get_promisc_masks - Calculate masks for promiscuous modes
597 * @vf: the VF pointer
598 * @vsi: the VSI to configure
599 * @ucast_m: promiscuous mask to apply to unicast
600 * @mcast_m: promiscuous mask to apply to multicast
601 *
602 * Decide which mask should be used for unicast and multicast filter,
603 * based on presence of VLANs
604 */
605 void
ice_vf_get_promisc_masks(struct ice_vf * vf,struct ice_vsi * vsi,u8 * ucast_m,u8 * mcast_m)606 ice_vf_get_promisc_masks(struct ice_vf *vf, struct ice_vsi *vsi,
607 u8 *ucast_m, u8 *mcast_m)
608 {
609 if (ice_vf_is_port_vlan_ena(vf) ||
610 ice_vsi_has_non_zero_vlans(vsi)) {
611 *mcast_m = ICE_MCAST_VLAN_PROMISC_BITS;
612 *ucast_m = ICE_UCAST_VLAN_PROMISC_BITS;
613 } else {
614 *mcast_m = ICE_MCAST_PROMISC_BITS;
615 *ucast_m = ICE_UCAST_PROMISC_BITS;
616 }
617 }
618
619 /**
620 * ice_vf_clear_all_promisc_modes - Clear promisc/allmulticast on VF VSI
621 * @vf: the VF pointer
622 * @vsi: the VSI to configure
623 *
624 * Clear all promiscuous/allmulticast filters for a VF
625 */
626 static int
ice_vf_clear_all_promisc_modes(struct ice_vf * vf,struct ice_vsi * vsi)627 ice_vf_clear_all_promisc_modes(struct ice_vf *vf, struct ice_vsi *vsi)
628 {
629 struct ice_pf *pf = vf->pf;
630 u8 ucast_m, mcast_m;
631 int ret = 0;
632
633 ice_vf_get_promisc_masks(vf, vsi, &ucast_m, &mcast_m);
634 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) {
635 if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
636 if (ice_is_dflt_vsi_in_use(vsi->port_info))
637 ret = ice_clear_dflt_vsi(vsi);
638 } else {
639 ret = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m);
640 }
641
642 if (ret) {
643 dev_err(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode failed\n");
644 } else {
645 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
646 dev_info(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode succeeded\n");
647 }
648 }
649
650 if (test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
651 ret = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m);
652 if (ret) {
653 dev_err(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode failed\n");
654 } else {
655 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
656 dev_info(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode succeeded\n");
657 }
658 }
659 return ret;
660 }
661
662 /**
663 * ice_vf_set_vsi_promisc - Enable promiscuous mode for a VF VSI
664 * @vf: the VF to configure
665 * @vsi: the VF's VSI
666 * @promisc_m: the promiscuous mode to enable
667 */
668 int
ice_vf_set_vsi_promisc(struct ice_vf * vf,struct ice_vsi * vsi,u8 promisc_m)669 ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
670 {
671 struct ice_hw *hw = &vsi->back->hw;
672 int status;
673
674 if (ice_vf_is_port_vlan_ena(vf))
675 status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m,
676 ice_vf_get_port_vlan_id(vf));
677 else if (ice_vsi_has_non_zero_vlans(vsi))
678 status = ice_fltr_set_vlan_vsi_promisc(hw, vsi, promisc_m);
679 else
680 status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 0);
681
682 if (status && status != -EEXIST) {
683 dev_err(ice_pf_to_dev(vsi->back), "enable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
684 vf->vf_id, status);
685 return status;
686 }
687
688 return 0;
689 }
690
691 /**
692 * ice_vf_clear_vsi_promisc - Disable promiscuous mode for a VF VSI
693 * @vf: the VF to configure
694 * @vsi: the VF's VSI
695 * @promisc_m: the promiscuous mode to disable
696 */
697 int
ice_vf_clear_vsi_promisc(struct ice_vf * vf,struct ice_vsi * vsi,u8 promisc_m)698 ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
699 {
700 struct ice_hw *hw = &vsi->back->hw;
701 int status;
702
703 if (ice_vf_is_port_vlan_ena(vf))
704 status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m,
705 ice_vf_get_port_vlan_id(vf));
706 else if (ice_vsi_has_non_zero_vlans(vsi))
707 status = ice_fltr_clear_vlan_vsi_promisc(hw, vsi, promisc_m);
708 else
709 status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 0);
710
711 if (status && status != -ENOENT) {
712 dev_err(ice_pf_to_dev(vsi->back), "disable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
713 vf->vf_id, status);
714 return status;
715 }
716
717 return 0;
718 }
719
720 /**
721 * ice_reset_vf_mbx_cnt - reset VF mailbox message count
722 * @vf: pointer to the VF structure
723 *
724 * This function clears the VF mailbox message count, and should be called on
725 * VF reset.
726 */
ice_reset_vf_mbx_cnt(struct ice_vf * vf)727 static void ice_reset_vf_mbx_cnt(struct ice_vf *vf)
728 {
729 struct ice_pf *pf = vf->pf;
730
731 if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
732 ice_mbx_vf_clear_cnt_e830(&pf->hw, vf->vf_id);
733 else
734 ice_mbx_clear_malvf(&vf->mbx_info);
735 }
736
737 /**
738 * ice_reset_all_vfs - reset all allocated VFs in one go
739 * @pf: pointer to the PF structure
740 *
741 * Reset all VFs at once, in response to a PF or other device reset.
742 *
743 * First, tell the hardware to reset each VF, then do all the waiting in one
744 * chunk, and finally finish restoring each VF after the wait. This is useful
745 * during PF routines which need to reset all VFs, as otherwise it must perform
746 * these resets in a serialized fashion.
747 */
ice_reset_all_vfs(struct ice_pf * pf)748 void ice_reset_all_vfs(struct ice_pf *pf)
749 {
750 struct device *dev = ice_pf_to_dev(pf);
751 struct ice_hw *hw = &pf->hw;
752 struct ice_vf *vf;
753 unsigned int bkt;
754
755 /* If we don't have any VFs, then there is nothing to reset */
756 if (!ice_has_vfs(pf))
757 return;
758
759 mutex_lock(&pf->vfs.table_lock);
760
761 /* clear all malicious info if the VFs are getting reset */
762 ice_for_each_vf(pf, bkt, vf)
763 ice_reset_vf_mbx_cnt(vf);
764
765 /* If VFs have been disabled, there is no need to reset */
766 if (test_and_set_bit(ICE_VF_DIS, pf->state)) {
767 mutex_unlock(&pf->vfs.table_lock);
768 return;
769 }
770
771 /* Begin reset on all VFs at once */
772 ice_for_each_vf(pf, bkt, vf)
773 ice_trigger_vf_reset(vf, true, true);
774
775 /* HW requires some time to make sure it can flush the FIFO for a VF
776 * when it resets it. Now that we've triggered all of the VFs, iterate
777 * the table again and wait for each VF to complete.
778 */
779 ice_for_each_vf(pf, bkt, vf) {
780 if (!vf->vf_ops->poll_reset_status(vf)) {
781 /* Display a warning if at least one VF didn't manage
782 * to reset in time, but continue on with the
783 * operation.
784 */
785 dev_warn(dev, "VF %u reset check timeout\n", vf->vf_id);
786 break;
787 }
788 }
789
790 /* free VF resources to begin resetting the VSI state */
791 ice_for_each_vf(pf, bkt, vf) {
792 mutex_lock(&vf->cfg_lock);
793
794 ice_eswitch_detach_vf(pf, vf);
795 vf->driver_caps = 0;
796 ice_vc_set_default_allowlist(vf);
797
798 ice_vf_fdir_exit(vf);
799 ice_vf_fdir_init(vf);
800 /* clean VF control VSI when resetting VFs since it should be
801 * setup only when VF creates its first FDIR rule.
802 */
803 if (vf->ctrl_vsi_idx != ICE_NO_VSI)
804 ice_vf_ctrl_invalidate_vsi(vf);
805
806 ice_vf_pre_vsi_rebuild(vf);
807 if (ice_vf_rebuild_vsi(vf)) {
808 dev_err(dev, "VF %u VSI rebuild failed, leaving VF disabled\n",
809 vf->vf_id);
810 mutex_unlock(&vf->cfg_lock);
811 continue;
812 }
813 ice_vf_post_vsi_rebuild(vf);
814
815 ice_eswitch_attach_vf(pf, vf);
816
817 mutex_unlock(&vf->cfg_lock);
818 }
819
820 ice_flush(hw);
821 clear_bit(ICE_VF_DIS, pf->state);
822
823 mutex_unlock(&pf->vfs.table_lock);
824 }
825
826 /**
827 * ice_notify_vf_reset - Notify VF of a reset event
828 * @vf: pointer to the VF structure
829 */
ice_notify_vf_reset(struct ice_vf * vf)830 static void ice_notify_vf_reset(struct ice_vf *vf)
831 {
832 struct ice_hw *hw = &vf->pf->hw;
833 struct virtchnl_pf_event pfe;
834
835 /* Bail out if VF is in disabled state, neither initialized, nor active
836 * state - otherwise proceed with notifications
837 */
838 if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
839 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
840 test_bit(ICE_VF_STATE_DIS, vf->vf_states))
841 return;
842
843 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
844 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
845 ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
846 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
847 NULL);
848 }
849
850 /**
851 * ice_reset_vf - Reset a particular VF
852 * @vf: pointer to the VF structure
853 * @flags: flags controlling behavior of the reset
854 *
855 * Flags:
856 * ICE_VF_RESET_VFLR - Indicates a reset is due to VFLR event
857 * ICE_VF_RESET_NOTIFY - Send VF a notification prior to reset
858 * ICE_VF_RESET_LOCK - Acquire VF cfg_lock before resetting
859 *
860 * Returns 0 if the VF is currently in reset, if resets are disabled, or if
861 * the VF resets successfully. Returns an error code if the VF fails to
862 * rebuild.
863 */
ice_reset_vf(struct ice_vf * vf,u32 flags)864 int ice_reset_vf(struct ice_vf *vf, u32 flags)
865 {
866 struct ice_pf *pf = vf->pf;
867 struct ice_vsi *vsi;
868 struct device *dev;
869 int err = 0;
870 u8 act_prt;
871 bool rsd;
872
873 dev = ice_pf_to_dev(pf);
874
875 if (flags & ICE_VF_RESET_NOTIFY)
876 ice_notify_vf_reset(vf);
877
878 if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
879 dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n",
880 vf->vf_id);
881 return 0;
882 }
883
884 if (flags & ICE_VF_RESET_LOCK)
885 mutex_lock(&vf->cfg_lock);
886 else
887 lockdep_assert_held(&vf->cfg_lock);
888
889 mutex_lock(&pf->lag_mutex);
890 act_prt = ice_lag_prepare_vf_reset(pf->lag);
891
892 if (ice_is_vf_disabled(vf)) {
893 vsi = ice_get_vf_vsi(vf);
894 if (!vsi) {
895 dev_dbg(dev, "VF is already removed\n");
896 err = -EINVAL;
897 goto out_unlock;
898 }
899 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
900
901 if (ice_vsi_is_rx_queue_active(vsi))
902 ice_vsi_stop_all_rx_rings(vsi);
903
904 dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
905 vf->vf_id);
906 goto out_unlock;
907 }
908
909 /* Set VF disable bit state here, before triggering reset */
910 set_bit(ICE_VF_STATE_DIS, vf->vf_states);
911 ice_trigger_vf_reset(vf, flags & ICE_VF_RESET_VFLR, false);
912
913 vsi = ice_get_vf_vsi(vf);
914 if (WARN_ON(!vsi)) {
915 err = -EIO;
916 goto out_unlock;
917 }
918
919 ice_dis_vf_qs(vf);
920
921 /* Call Disable LAN Tx queue AQ whether or not queues are
922 * enabled. This is needed for successful completion of VFR.
923 */
924 ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
925 NULL, vf->vf_ops->reset_type, vf->vf_id, NULL);
926
927 /* poll VPGEN_VFRSTAT reg to make sure
928 * that reset is complete
929 */
930 rsd = vf->vf_ops->poll_reset_status(vf);
931
932 /* Display a warning if VF didn't manage to reset in time, but need to
933 * continue on with the operation.
934 */
935 if (!rsd)
936 dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id);
937
938 vf->driver_caps = 0;
939 ice_vc_set_default_allowlist(vf);
940
941 /* disable promiscuous modes in case they were enabled
942 * ignore any error if disabling process failed
943 */
944 ice_vf_clear_all_promisc_modes(vf, vsi);
945
946 ice_vf_fdir_exit(vf);
947 ice_vf_fdir_init(vf);
948 /* clean VF control VSI when resetting VF since it should be setup
949 * only when VF creates its first FDIR rule.
950 */
951 if (vf->ctrl_vsi_idx != ICE_NO_VSI)
952 ice_vf_ctrl_vsi_release(vf);
953
954 ice_vf_pre_vsi_rebuild(vf);
955
956 if (ice_vf_reconfig_vsi(vf)) {
957 dev_err(dev, "Failed to release and setup the VF%u's VSI\n",
958 vf->vf_id);
959 err = -EFAULT;
960 goto out_unlock;
961 }
962
963 ice_vf_post_vsi_rebuild(vf);
964 vsi = ice_get_vf_vsi(vf);
965 if (WARN_ON(!vsi)) {
966 err = -EINVAL;
967 goto out_unlock;
968 }
969
970 ice_eswitch_update_repr(&vf->repr_id, vsi);
971
972 /* if the VF has been reset allow it to come up again */
973 ice_reset_vf_mbx_cnt(vf);
974
975 out_unlock:
976 ice_lag_complete_vf_reset(pf->lag, act_prt);
977 mutex_unlock(&pf->lag_mutex);
978
979 if (flags & ICE_VF_RESET_LOCK)
980 mutex_unlock(&vf->cfg_lock);
981
982 return err;
983 }
984
985 /**
986 * ice_set_vf_state_dis - Set VF state to disabled
987 * @vf: pointer to the VF structure
988 */
ice_set_vf_state_dis(struct ice_vf * vf)989 void ice_set_vf_state_dis(struct ice_vf *vf)
990 {
991 ice_set_vf_state_qs_dis(vf);
992 vf->vf_ops->clear_reset_state(vf);
993 }
994
995 /* Private functions only accessed from other virtualization files */
996
997 /**
998 * ice_initialize_vf_entry - Initialize a VF entry
999 * @vf: pointer to the VF structure
1000 */
ice_initialize_vf_entry(struct ice_vf * vf)1001 void ice_initialize_vf_entry(struct ice_vf *vf)
1002 {
1003 struct ice_pf *pf = vf->pf;
1004 struct ice_vfs *vfs;
1005
1006 vfs = &pf->vfs;
1007
1008 /* assign default capabilities */
1009 vf->spoofchk = true;
1010 ice_vc_set_default_allowlist(vf);
1011 ice_virtchnl_set_dflt_ops(vf);
1012
1013 /* set default number of MSI-X */
1014 vf->num_msix = vfs->num_msix_per;
1015 vf->num_vf_qs = vfs->num_qps_per;
1016
1017 /* set default RSS hash configuration */
1018 vf->rss_hashcfg = ICE_DEFAULT_RSS_HASHCFG;
1019
1020 /* ctrl_vsi_idx will be set to a valid value only when iAVF
1021 * creates its first fdir rule.
1022 */
1023 ice_vf_ctrl_invalidate_vsi(vf);
1024 ice_vf_fdir_init(vf);
1025
1026 /* Initialize mailbox info for this VF */
1027 if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
1028 ice_mbx_vf_clear_cnt_e830(&pf->hw, vf->vf_id);
1029 else
1030 ice_mbx_init_vf_info(&pf->hw, &vf->mbx_info);
1031
1032 mutex_init(&vf->cfg_lock);
1033 }
1034
ice_deinitialize_vf_entry(struct ice_vf * vf)1035 void ice_deinitialize_vf_entry(struct ice_vf *vf)
1036 {
1037 struct ice_pf *pf = vf->pf;
1038
1039 if (!ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
1040 list_del(&vf->mbx_info.list_entry);
1041 }
1042
1043 /**
1044 * ice_dis_vf_qs - Disable the VF queues
1045 * @vf: pointer to the VF structure
1046 */
ice_dis_vf_qs(struct ice_vf * vf)1047 void ice_dis_vf_qs(struct ice_vf *vf)
1048 {
1049 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1050
1051 if (WARN_ON(!vsi))
1052 return;
1053
1054 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
1055 ice_vsi_stop_all_rx_rings(vsi);
1056 ice_set_vf_state_qs_dis(vf);
1057 }
1058
1059 /**
1060 * ice_err_to_virt_err - translate errors for VF return code
1061 * @err: error return code
1062 */
ice_err_to_virt_err(int err)1063 enum virtchnl_status_code ice_err_to_virt_err(int err)
1064 {
1065 switch (err) {
1066 case 0:
1067 return VIRTCHNL_STATUS_SUCCESS;
1068 case -EINVAL:
1069 case -ENODEV:
1070 return VIRTCHNL_STATUS_ERR_PARAM;
1071 case -ENOMEM:
1072 return VIRTCHNL_STATUS_ERR_NO_MEMORY;
1073 case -EALREADY:
1074 case -EBUSY:
1075 case -EIO:
1076 case -ENOSPC:
1077 return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1078 default:
1079 return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
1080 }
1081 }
1082
1083 /**
1084 * ice_check_vf_init - helper to check if VF init complete
1085 * @vf: the pointer to the VF to check
1086 */
ice_check_vf_init(struct ice_vf * vf)1087 int ice_check_vf_init(struct ice_vf *vf)
1088 {
1089 struct ice_pf *pf = vf->pf;
1090
1091 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
1092 dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n",
1093 vf->vf_id);
1094 return -EBUSY;
1095 }
1096 return 0;
1097 }
1098
1099 /**
1100 * ice_vf_get_port_info - Get the VF's port info structure
1101 * @vf: VF used to get the port info structure for
1102 */
ice_vf_get_port_info(struct ice_vf * vf)1103 struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf)
1104 {
1105 return vf->pf->hw.port_info;
1106 }
1107
1108 /**
1109 * ice_cfg_mac_antispoof - Configure MAC antispoof checking behavior
1110 * @vsi: the VSI to configure
1111 * @enable: whether to enable or disable the spoof checking
1112 *
1113 * Configure a VSI to enable (or disable) spoof checking behavior.
1114 */
ice_cfg_mac_antispoof(struct ice_vsi * vsi,bool enable)1115 static int ice_cfg_mac_antispoof(struct ice_vsi *vsi, bool enable)
1116 {
1117 struct ice_vsi_ctx *ctx;
1118 int err;
1119
1120 ctx = kzalloc_obj(*ctx);
1121 if (!ctx)
1122 return -ENOMEM;
1123
1124 ctx->info.sec_flags = vsi->info.sec_flags;
1125 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
1126
1127 if (enable)
1128 ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
1129 else
1130 ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
1131
1132 err = ice_update_vsi(&vsi->back->hw, vsi->idx, ctx, NULL);
1133 if (err)
1134 dev_err(ice_pf_to_dev(vsi->back), "Failed to configure Tx MAC anti-spoof %s for VSI %d, error %d\n",
1135 enable ? "ON" : "OFF", vsi->vsi_num, err);
1136 else
1137 vsi->info.sec_flags = ctx->info.sec_flags;
1138
1139 kfree(ctx);
1140
1141 return err;
1142 }
1143
1144 /**
1145 * ice_vsi_ena_spoofchk - enable Tx spoof checking for this VSI
1146 * @vsi: VSI to enable Tx spoof checking for
1147 */
ice_vsi_ena_spoofchk(struct ice_vsi * vsi)1148 static int ice_vsi_ena_spoofchk(struct ice_vsi *vsi)
1149 {
1150 struct ice_vsi_vlan_ops *vlan_ops;
1151 int err = 0;
1152
1153 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
1154
1155 /* Allow VF with VLAN 0 only to send all tagged traffic */
1156 if (vsi->type != ICE_VSI_VF || ice_vsi_has_non_zero_vlans(vsi)) {
1157 err = vlan_ops->ena_tx_filtering(vsi);
1158 if (err)
1159 return err;
1160 }
1161
1162 return ice_cfg_mac_antispoof(vsi, true);
1163 }
1164
1165 /**
1166 * ice_vsi_dis_spoofchk - disable Tx spoof checking for this VSI
1167 * @vsi: VSI to disable Tx spoof checking for
1168 */
ice_vsi_dis_spoofchk(struct ice_vsi * vsi)1169 static int ice_vsi_dis_spoofchk(struct ice_vsi *vsi)
1170 {
1171 struct ice_vsi_vlan_ops *vlan_ops;
1172 int err;
1173
1174 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
1175
1176 err = vlan_ops->dis_tx_filtering(vsi);
1177 if (err)
1178 return err;
1179
1180 return ice_cfg_mac_antispoof(vsi, false);
1181 }
1182
1183 /**
1184 * ice_vsi_apply_spoofchk - Apply Tx spoof checking setting to a VSI
1185 * @vsi: VSI associated to the VF
1186 * @enable: whether to enable or disable the spoof checking
1187 */
ice_vsi_apply_spoofchk(struct ice_vsi * vsi,bool enable)1188 int ice_vsi_apply_spoofchk(struct ice_vsi *vsi, bool enable)
1189 {
1190 int err;
1191
1192 if (enable)
1193 err = ice_vsi_ena_spoofchk(vsi);
1194 else
1195 err = ice_vsi_dis_spoofchk(vsi);
1196
1197 return err;
1198 }
1199
1200 /**
1201 * ice_is_vf_trusted
1202 * @vf: pointer to the VF info
1203 */
ice_is_vf_trusted(struct ice_vf * vf)1204 bool ice_is_vf_trusted(struct ice_vf *vf)
1205 {
1206 return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1207 }
1208
1209 /**
1210 * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled
1211 * @vf: the VF to check
1212 *
1213 * Returns true if the VF has no Rx and no Tx queues enabled and returns false
1214 * otherwise
1215 */
ice_vf_has_no_qs_ena(struct ice_vf * vf)1216 bool ice_vf_has_no_qs_ena(struct ice_vf *vf)
1217 {
1218 return bitmap_empty(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) &&
1219 bitmap_empty(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
1220 }
1221
1222 /**
1223 * ice_is_vf_link_up - check if the VF's link is up
1224 * @vf: VF to check if link is up
1225 */
ice_is_vf_link_up(struct ice_vf * vf)1226 bool ice_is_vf_link_up(struct ice_vf *vf)
1227 {
1228 struct ice_port_info *pi = ice_vf_get_port_info(vf);
1229
1230 if (ice_check_vf_init(vf))
1231 return false;
1232
1233 if (ice_vf_has_no_qs_ena(vf))
1234 return false;
1235 else if (vf->link_forced)
1236 return vf->link_up;
1237 else
1238 return pi->phy.link_info.link_info &
1239 ICE_AQ_LINK_UP;
1240 }
1241
1242 /**
1243 * ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove VSI access
1244 * @vf: VF that control VSI is being invalidated on
1245 */
ice_vf_ctrl_invalidate_vsi(struct ice_vf * vf)1246 void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf)
1247 {
1248 vf->ctrl_vsi_idx = ICE_NO_VSI;
1249 }
1250
1251 /**
1252 * ice_vf_ctrl_vsi_release - invalidate the VF's control VSI after freeing it
1253 * @vf: VF that control VSI is being released on
1254 */
ice_vf_ctrl_vsi_release(struct ice_vf * vf)1255 void ice_vf_ctrl_vsi_release(struct ice_vf *vf)
1256 {
1257 ice_vsi_release(vf->pf->vsi[vf->ctrl_vsi_idx]);
1258 ice_vf_ctrl_invalidate_vsi(vf);
1259 }
1260
1261 /**
1262 * ice_vf_ctrl_vsi_setup - Set up a VF control VSI
1263 * @vf: VF to setup control VSI for
1264 *
1265 * Returns pointer to the successfully allocated VSI struct on success,
1266 * otherwise returns NULL on failure.
1267 */
ice_vf_ctrl_vsi_setup(struct ice_vf * vf)1268 struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)
1269 {
1270 struct ice_vsi_cfg_params params = {};
1271 struct ice_pf *pf = vf->pf;
1272 struct ice_vsi *vsi;
1273
1274 params.type = ICE_VSI_CTRL;
1275 params.port_info = ice_vf_get_port_info(vf);
1276 params.vf = vf;
1277 params.flags = ICE_VSI_FLAG_INIT;
1278
1279 vsi = ice_vsi_setup(pf, ¶ms);
1280 if (!vsi) {
1281 dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n");
1282 ice_vf_ctrl_invalidate_vsi(vf);
1283 }
1284
1285 return vsi;
1286 }
1287
1288 /**
1289 * ice_vf_init_host_cfg - Initialize host admin configuration
1290 * @vf: VF to initialize
1291 * @vsi: the VSI created at initialization
1292 *
1293 * Initialize the VF host configuration. Called during VF creation to setup
1294 * VLAN 0, add the VF VSI broadcast filter, and setup spoof checking. It
1295 * should only be called during VF creation.
1296 */
ice_vf_init_host_cfg(struct ice_vf * vf,struct ice_vsi * vsi)1297 int ice_vf_init_host_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
1298 {
1299 struct ice_vsi_vlan_ops *vlan_ops;
1300 struct ice_pf *pf = vf->pf;
1301 u8 broadcast[ETH_ALEN];
1302 struct device *dev;
1303 int err;
1304
1305 dev = ice_pf_to_dev(pf);
1306
1307 err = ice_vsi_add_vlan_zero(vsi);
1308 if (err) {
1309 dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n",
1310 vf->vf_id);
1311 return err;
1312 }
1313
1314 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
1315 err = vlan_ops->ena_rx_filtering(vsi);
1316 if (err) {
1317 dev_warn(dev, "Failed to enable Rx VLAN filtering for VF %d\n",
1318 vf->vf_id);
1319 return err;
1320 }
1321
1322 eth_broadcast_addr(broadcast);
1323 err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
1324 if (err) {
1325 dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %d\n",
1326 vf->vf_id, err);
1327 return err;
1328 }
1329
1330 vf->num_mac = 1;
1331
1332 err = ice_vsi_apply_spoofchk(vsi, vf->spoofchk);
1333 if (err) {
1334 dev_warn(dev, "Failed to initialize spoofchk setting for VF %d\n",
1335 vf->vf_id);
1336 return err;
1337 }
1338
1339 return 0;
1340 }
1341
1342 /**
1343 * ice_vf_invalidate_vsi - invalidate vsi_idx to remove VSI access
1344 * @vf: VF to remove access to VSI for
1345 */
ice_vf_invalidate_vsi(struct ice_vf * vf)1346 void ice_vf_invalidate_vsi(struct ice_vf *vf)
1347 {
1348 vf->lan_vsi_idx = ICE_NO_VSI;
1349 }
1350
1351 /**
1352 * ice_vf_vsi_release - Release the VF VSI and invalidate indexes
1353 * @vf: pointer to the VF structure
1354 *
1355 * Release the VF associated with this VSI and then invalidate the VSI
1356 * indexes.
1357 */
ice_vf_vsi_release(struct ice_vf * vf)1358 void ice_vf_vsi_release(struct ice_vf *vf)
1359 {
1360 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1361
1362 if (WARN_ON(!vsi))
1363 return;
1364
1365 ice_vsi_release(vsi);
1366 ice_vf_invalidate_vsi(vf);
1367 }
1368
1369 /**
1370 * ice_get_vf_ctrl_vsi - Get first VF control VSI pointer
1371 * @pf: the PF private structure
1372 * @vsi: pointer to the VSI
1373 *
1374 * Return first found VF control VSI other than the vsi
1375 * passed by parameter. This function is used to determine
1376 * whether new resources have to be allocated for control VSI
1377 * or they can be shared with existing one.
1378 *
1379 * Return found VF control VSI pointer other itself. Return
1380 * NULL Otherwise.
1381 *
1382 */
ice_get_vf_ctrl_vsi(struct ice_pf * pf,struct ice_vsi * vsi)1383 struct ice_vsi *ice_get_vf_ctrl_vsi(struct ice_pf *pf, struct ice_vsi *vsi)
1384 {
1385 struct ice_vsi *ctrl_vsi = NULL;
1386 struct ice_vf *vf;
1387 unsigned int bkt;
1388
1389 rcu_read_lock();
1390 ice_for_each_vf_rcu(pf, bkt, vf) {
1391 if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) {
1392 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
1393 break;
1394 }
1395 }
1396
1397 rcu_read_unlock();
1398 return ctrl_vsi;
1399 }
1400
1401 /**
1402 * ice_vf_update_mac_lldp_num - update the VF's number of LLDP addresses
1403 * @vf: a VF to add the address to
1404 * @vsi: the corresponding VSI
1405 * @incr: is the rule added or removed
1406 */
ice_vf_update_mac_lldp_num(struct ice_vf * vf,struct ice_vsi * vsi,bool incr)1407 void ice_vf_update_mac_lldp_num(struct ice_vf *vf, struct ice_vsi *vsi,
1408 bool incr)
1409 {
1410 bool lldp_by_fw = test_bit(ICE_FLAG_FW_LLDP_AGENT, vsi->back->flags);
1411 bool was_ena = ice_vf_is_lldp_ena(vf) && !lldp_by_fw;
1412 bool is_ena;
1413
1414 if (WARN_ON(!vsi)) {
1415 vf->num_mac_lldp = 0;
1416 return;
1417 }
1418
1419 vf->num_mac_lldp += incr ? 1 : -1;
1420 is_ena = ice_vf_is_lldp_ena(vf) && !lldp_by_fw;
1421
1422 if (was_ena != is_ena)
1423 ice_vsi_cfg_sw_lldp(vsi, false, is_ena);
1424 }
1425