1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2022, Intel Corporation. */ 3 4 #include "ice_vf_lib_private.h" 5 #include "ice.h" 6 #include "ice_lib.h" 7 #include "ice_fltr.h" 8 #include "ice_virtchnl_allowlist.h" 9 10 /* Public functions which may be accessed by all driver files */ 11 12 /** 13 * ice_get_vf_by_id - Get pointer to VF by ID 14 * @pf: the PF private structure 15 * @vf_id: the VF ID to locate 16 * 17 * Locate and return a pointer to the VF structure associated with a given ID. 18 * Returns NULL if the ID does not have a valid VF structure associated with 19 * it. 20 * 21 * This function takes a reference to the VF, which must be released by 22 * calling ice_put_vf() once the caller is finished accessing the VF structure 23 * returned. 24 */ 25 struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id) 26 { 27 struct ice_vf *vf; 28 29 rcu_read_lock(); 30 hash_for_each_possible_rcu(pf->vfs.table, vf, entry, vf_id) { 31 if (vf->vf_id == vf_id) { 32 struct ice_vf *found; 33 34 if (kref_get_unless_zero(&vf->refcnt)) 35 found = vf; 36 else 37 found = NULL; 38 39 rcu_read_unlock(); 40 return found; 41 } 42 } 43 rcu_read_unlock(); 44 45 return NULL; 46 } 47 48 /** 49 * ice_release_vf - Release VF associated with a refcount 50 * @ref: the kref decremented to zero 51 * 52 * Callback function for kref_put to release a VF once its reference count has 53 * hit zero. 54 */ 55 static void ice_release_vf(struct kref *ref) 56 { 57 struct ice_vf *vf = container_of(ref, struct ice_vf, refcnt); 58 59 pci_dev_put(vf->vfdev); 60 61 vf->vf_ops->free(vf); 62 } 63 64 /** 65 * ice_put_vf - Release a reference to a VF 66 * @vf: the VF structure to decrease reference count on 67 * 68 * Decrease the reference count for a VF, and free the entry if it is no 69 * longer in use. 70 * 71 * This must be called after ice_get_vf_by_id() once the reference to the VF 72 * structure is no longer used. Otherwise, the VF structure will never be 73 * freed. 74 */ 75 void ice_put_vf(struct ice_vf *vf) 76 { 77 kref_put(&vf->refcnt, ice_release_vf); 78 } 79 80 /** 81 * ice_has_vfs - Return true if the PF has any associated VFs 82 * @pf: the PF private structure 83 * 84 * Return whether or not the PF has any allocated VFs. 85 * 86 * Note that this function only guarantees that there are no VFs at the point 87 * of calling it. It does not guarantee that no more VFs will be added. 88 */ 89 bool ice_has_vfs(struct ice_pf *pf) 90 { 91 /* A simple check that the hash table is not empty does not require 92 * the mutex or rcu_read_lock. 93 */ 94 return !hash_empty(pf->vfs.table); 95 } 96 97 /** 98 * ice_get_num_vfs - Get number of allocated VFs 99 * @pf: the PF private structure 100 * 101 * Return the total number of allocated VFs. NOTE: VF IDs are not guaranteed 102 * to be contiguous. Do not assume that a VF ID is guaranteed to be less than 103 * the output of this function. 104 */ 105 u16 ice_get_num_vfs(struct ice_pf *pf) 106 { 107 struct ice_vf *vf; 108 unsigned int bkt; 109 u16 num_vfs = 0; 110 111 rcu_read_lock(); 112 ice_for_each_vf_rcu(pf, bkt, vf) 113 num_vfs++; 114 rcu_read_unlock(); 115 116 return num_vfs; 117 } 118 119 /** 120 * ice_get_vf_vsi - get VF's VSI based on the stored index 121 * @vf: VF used to get VSI 122 */ 123 struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf) 124 { 125 if (vf->lan_vsi_idx == ICE_NO_VSI) 126 return NULL; 127 128 return vf->pf->vsi[vf->lan_vsi_idx]; 129 } 130 131 /** 132 * ice_is_vf_disabled 133 * @vf: pointer to the VF info 134 * 135 * If the PF has been disabled, there is no need resetting VF until PF is 136 * active again. Similarly, if the VF has been disabled, this means something 137 * else is resetting the VF, so we shouldn't continue. 138 * 139 * Returns true if the caller should consider the VF as disabled whether 140 * because that single VF is explicitly disabled or because the PF is 141 * currently disabled. 142 */ 143 bool ice_is_vf_disabled(struct ice_vf *vf) 144 { 145 struct ice_pf *pf = vf->pf; 146 147 return (test_bit(ICE_VF_DIS, pf->state) || 148 test_bit(ICE_VF_STATE_DIS, vf->vf_states)); 149 } 150 151 /** 152 * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset 153 * @vf: The VF being resseting 154 * 155 * The max poll time is about ~800ms, which is about the maximum time it takes 156 * for a VF to be reset and/or a VF driver to be removed. 157 */ 158 static void ice_wait_on_vf_reset(struct ice_vf *vf) 159 { 160 int i; 161 162 for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) { 163 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) 164 break; 165 msleep(ICE_MAX_VF_RESET_SLEEP_MS); 166 } 167 } 168 169 /** 170 * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried 171 * @vf: VF to check if it's ready to be configured/queried 172 * 173 * The purpose of this function is to make sure the VF is not in reset, not 174 * disabled, and initialized so it can be configured and/or queried by a host 175 * administrator. 176 */ 177 int ice_check_vf_ready_for_cfg(struct ice_vf *vf) 178 { 179 ice_wait_on_vf_reset(vf); 180 181 if (ice_is_vf_disabled(vf)) 182 return -EINVAL; 183 184 if (ice_check_vf_init(vf)) 185 return -EBUSY; 186 187 return 0; 188 } 189 190 /** 191 * ice_trigger_vf_reset - Reset a VF on HW 192 * @vf: pointer to the VF structure 193 * @is_vflr: true if VFLR was issued, false if not 194 * @is_pfr: true if the reset was triggered due to a previous PFR 195 * 196 * Trigger hardware to start a reset for a particular VF. Expects the caller 197 * to wait the proper amount of time to allow hardware to reset the VF before 198 * it cleans up and restores VF functionality. 199 */ 200 static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr) 201 { 202 /* Inform VF that it is no longer active, as a warning */ 203 clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states); 204 205 /* Disable VF's configuration API during reset. The flag is re-enabled 206 * when it's safe again to access VF's VSI. 207 */ 208 clear_bit(ICE_VF_STATE_INIT, vf->vf_states); 209 210 /* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver 211 * needs to clear them in the case of VFR/VFLR. If this is done for 212 * PFR, it can mess up VF resets because the VF driver may already 213 * have started cleanup by the time we get here. 214 */ 215 if (!is_pfr) 216 vf->vf_ops->clear_mbx_register(vf); 217 218 vf->vf_ops->trigger_reset_register(vf, is_vflr); 219 } 220 221 static void ice_vf_clear_counters(struct ice_vf *vf) 222 { 223 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 224 225 if (vsi) 226 vsi->num_vlan = 0; 227 228 vf->num_mac = 0; 229 memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events)); 230 memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events)); 231 } 232 233 /** 234 * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild 235 * @vf: VF to perform pre VSI rebuild tasks 236 * 237 * These tasks are items that don't need to be amortized since they are most 238 * likely called in a for loop with all VF(s) in the reset_all_vfs() case. 239 */ 240 static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf) 241 { 242 /* Close any IRQ mapping now */ 243 if (vf->vf_ops->irq_close) 244 vf->vf_ops->irq_close(vf); 245 246 ice_vf_clear_counters(vf); 247 vf->vf_ops->clear_reset_trigger(vf); 248 } 249 250 /** 251 * ice_vf_reconfig_vsi - Reconfigure a VF VSI with the device 252 * @vf: VF to reconfigure the VSI for 253 * 254 * This is called when a single VF is being reset (i.e. VVF, VFLR, host VF 255 * configuration change, etc). 256 * 257 * It brings the VSI down and then reconfigures it with the hardware. 258 */ 259 int ice_vf_reconfig_vsi(struct ice_vf *vf) 260 { 261 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 262 struct ice_vsi_cfg_params params = {}; 263 struct ice_pf *pf = vf->pf; 264 int err; 265 266 if (WARN_ON(!vsi)) 267 return -EINVAL; 268 269 params = ice_vsi_to_params(vsi); 270 params.flags = ICE_VSI_FLAG_NO_INIT; 271 272 ice_vsi_decfg(vsi); 273 ice_fltr_remove_all(vsi); 274 275 err = ice_vsi_cfg(vsi, ¶ms); 276 if (err) { 277 dev_err(ice_pf_to_dev(pf), 278 "Failed to reconfigure the VF%u's VSI, error %d\n", 279 vf->vf_id, err); 280 return err; 281 } 282 283 return 0; 284 } 285 286 /** 287 * ice_vf_rebuild_vsi - rebuild the VF's VSI 288 * @vf: VF to rebuild the VSI for 289 * 290 * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the 291 * host, PFR, CORER, etc.). 292 * 293 * It reprograms the VSI configuration back into hardware. 294 */ 295 static int ice_vf_rebuild_vsi(struct ice_vf *vf) 296 { 297 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 298 struct ice_pf *pf = vf->pf; 299 300 if (WARN_ON(!vsi)) 301 return -EINVAL; 302 303 if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT)) { 304 dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n", 305 vf->vf_id); 306 return -EIO; 307 } 308 /* vsi->idx will remain the same in this case so don't update 309 * vf->lan_vsi_idx 310 */ 311 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); 312 313 return 0; 314 } 315 316 /** 317 * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN 318 * @vf: VF to add MAC filters for 319 * @vsi: Pointer to VSI 320 * 321 * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver 322 * always re-adds either a VLAN 0 or port VLAN based filter after reset. 323 */ 324 static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi) 325 { 326 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 327 struct device *dev = ice_pf_to_dev(vf->pf); 328 int err; 329 330 if (ice_vf_is_port_vlan_ena(vf)) { 331 err = vlan_ops->set_port_vlan(vsi, &vf->port_vlan_info); 332 if (err) { 333 dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n", 334 vf->vf_id, err); 335 return err; 336 } 337 338 err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info); 339 } else { 340 err = ice_vsi_add_vlan_zero(vsi); 341 } 342 343 if (err) { 344 dev_err(dev, "failed to add VLAN %u filter for VF %u during VF rebuild, error %d\n", 345 ice_vf_is_port_vlan_ena(vf) ? 346 ice_vf_get_port_vlan_id(vf) : 0, vf->vf_id, err); 347 return err; 348 } 349 350 err = vlan_ops->ena_rx_filtering(vsi); 351 if (err) 352 dev_warn(dev, "failed to enable Rx VLAN filtering for VF %d VSI %d during VF rebuild, error %d\n", 353 vf->vf_id, vsi->idx, err); 354 355 return 0; 356 } 357 358 /** 359 * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration 360 * @vf: VF to re-apply the configuration for 361 * 362 * Called after a VF VSI has been re-added/rebuild during reset. The PF driver 363 * needs to re-apply the host configured Tx rate limiting configuration. 364 */ 365 static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf) 366 { 367 struct device *dev = ice_pf_to_dev(vf->pf); 368 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 369 int err; 370 371 if (WARN_ON(!vsi)) 372 return -EINVAL; 373 374 if (vf->min_tx_rate) { 375 err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000); 376 if (err) { 377 dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n", 378 vf->min_tx_rate, vf->vf_id, err); 379 return err; 380 } 381 } 382 383 if (vf->max_tx_rate) { 384 err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000); 385 if (err) { 386 dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n", 387 vf->max_tx_rate, vf->vf_id, err); 388 return err; 389 } 390 } 391 392 return 0; 393 } 394 395 /** 396 * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value 397 * @vf: VF to configure trust setting for 398 */ 399 static void ice_vf_set_host_trust_cfg(struct ice_vf *vf) 400 { 401 assign_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps, vf->trusted); 402 } 403 404 /** 405 * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA 406 * @vf: VF to add MAC filters for 407 * 408 * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver 409 * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset. 410 */ 411 static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf) 412 { 413 struct device *dev = ice_pf_to_dev(vf->pf); 414 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 415 u8 broadcast[ETH_ALEN]; 416 int status; 417 418 if (WARN_ON(!vsi)) 419 return -EINVAL; 420 421 if (ice_is_eswitch_mode_switchdev(vf->pf)) 422 return 0; 423 424 eth_broadcast_addr(broadcast); 425 status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI); 426 if (status) { 427 dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n", 428 vf->vf_id, status); 429 return status; 430 } 431 432 vf->num_mac++; 433 434 if (is_valid_ether_addr(vf->hw_lan_addr)) { 435 status = ice_fltr_add_mac(vsi, vf->hw_lan_addr, 436 ICE_FWD_TO_VSI); 437 if (status) { 438 dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n", 439 &vf->hw_lan_addr[0], vf->vf_id, 440 status); 441 return status; 442 } 443 vf->num_mac++; 444 445 ether_addr_copy(vf->dev_lan_addr, vf->hw_lan_addr); 446 } 447 448 return 0; 449 } 450 451 /** 452 * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config 453 * @vsi: Pointer to VSI 454 * 455 * This function moves VSI into corresponding scheduler aggregator node 456 * based on cached value of "aggregator node info" per VSI 457 */ 458 static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi) 459 { 460 struct ice_pf *pf = vsi->back; 461 struct device *dev; 462 int status; 463 464 if (!vsi->agg_node) 465 return; 466 467 dev = ice_pf_to_dev(pf); 468 if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) { 469 dev_dbg(dev, 470 "agg_id %u already has reached max_num_vsis %u\n", 471 vsi->agg_node->agg_id, vsi->agg_node->num_vsis); 472 return; 473 } 474 475 status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id, 476 vsi->idx, vsi->tc_cfg.ena_tc); 477 if (status) 478 dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node", 479 vsi->idx, vsi->agg_node->agg_id); 480 else 481 vsi->agg_node->num_vsis++; 482 } 483 484 /** 485 * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset 486 * @vf: VF to rebuild host configuration on 487 */ 488 static void ice_vf_rebuild_host_cfg(struct ice_vf *vf) 489 { 490 struct device *dev = ice_pf_to_dev(vf->pf); 491 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 492 493 if (WARN_ON(!vsi)) 494 return; 495 496 ice_vf_set_host_trust_cfg(vf); 497 498 if (ice_vf_rebuild_host_mac_cfg(vf)) 499 dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n", 500 vf->vf_id); 501 502 if (ice_vf_rebuild_host_vlan_cfg(vf, vsi)) 503 dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n", 504 vf->vf_id); 505 506 if (ice_vf_rebuild_host_tx_rate_cfg(vf)) 507 dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n", 508 vf->vf_id); 509 510 if (ice_vsi_apply_spoofchk(vsi, vf->spoofchk)) 511 dev_err(dev, "failed to rebuild spoofchk configuration for VF %d\n", 512 vf->vf_id); 513 514 /* rebuild aggregator node config for main VF VSI */ 515 ice_vf_rebuild_aggregator_node_cfg(vsi); 516 } 517 518 /** 519 * ice_set_vf_state_qs_dis - Set VF queues state to disabled 520 * @vf: pointer to the VF structure 521 */ 522 static void ice_set_vf_state_qs_dis(struct ice_vf *vf) 523 { 524 /* Clear Rx/Tx enabled queues flag */ 525 bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF); 526 bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF); 527 clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states); 528 } 529 530 /** 531 * ice_vf_set_initialized - VF is ready for VIRTCHNL communication 532 * @vf: VF to set in initialized state 533 * 534 * After this function the VF will be ready to receive/handle the 535 * VIRTCHNL_OP_GET_VF_RESOURCES message 536 */ 537 static void ice_vf_set_initialized(struct ice_vf *vf) 538 { 539 ice_set_vf_state_qs_dis(vf); 540 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states); 541 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states); 542 clear_bit(ICE_VF_STATE_DIS, vf->vf_states); 543 set_bit(ICE_VF_STATE_INIT, vf->vf_states); 544 memset(&vf->vlan_v2_caps, 0, sizeof(vf->vlan_v2_caps)); 545 } 546 547 /** 548 * ice_vf_post_vsi_rebuild - Reset tasks that occur after VSI rebuild 549 * @vf: the VF being reset 550 * 551 * Perform reset tasks which must occur after the VSI has been re-created or 552 * rebuilt during a VF reset. 553 */ 554 static void ice_vf_post_vsi_rebuild(struct ice_vf *vf) 555 { 556 ice_vf_rebuild_host_cfg(vf); 557 ice_vf_set_initialized(vf); 558 559 vf->vf_ops->post_vsi_rebuild(vf); 560 } 561 562 /** 563 * ice_is_any_vf_in_unicast_promisc - check if any VF(s) 564 * are in unicast promiscuous mode 565 * @pf: PF structure for accessing VF(s) 566 * 567 * Return false if no VF(s) are in unicast promiscuous mode, 568 * else return true 569 */ 570 bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf) 571 { 572 bool is_vf_promisc = false; 573 struct ice_vf *vf; 574 unsigned int bkt; 575 576 rcu_read_lock(); 577 ice_for_each_vf_rcu(pf, bkt, vf) { 578 /* found a VF that has promiscuous mode configured */ 579 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) { 580 is_vf_promisc = true; 581 break; 582 } 583 } 584 rcu_read_unlock(); 585 586 return is_vf_promisc; 587 } 588 589 /** 590 * ice_vf_get_promisc_masks - Calculate masks for promiscuous modes 591 * @vf: the VF pointer 592 * @vsi: the VSI to configure 593 * @ucast_m: promiscuous mask to apply to unicast 594 * @mcast_m: promiscuous mask to apply to multicast 595 * 596 * Decide which mask should be used for unicast and multicast filter, 597 * based on presence of VLANs 598 */ 599 void 600 ice_vf_get_promisc_masks(struct ice_vf *vf, struct ice_vsi *vsi, 601 u8 *ucast_m, u8 *mcast_m) 602 { 603 if (ice_vf_is_port_vlan_ena(vf) || 604 ice_vsi_has_non_zero_vlans(vsi)) { 605 *mcast_m = ICE_MCAST_VLAN_PROMISC_BITS; 606 *ucast_m = ICE_UCAST_VLAN_PROMISC_BITS; 607 } else { 608 *mcast_m = ICE_MCAST_PROMISC_BITS; 609 *ucast_m = ICE_UCAST_PROMISC_BITS; 610 } 611 } 612 613 /** 614 * ice_vf_clear_all_promisc_modes - Clear promisc/allmulticast on VF VSI 615 * @vf: the VF pointer 616 * @vsi: the VSI to configure 617 * 618 * Clear all promiscuous/allmulticast filters for a VF 619 */ 620 static int 621 ice_vf_clear_all_promisc_modes(struct ice_vf *vf, struct ice_vsi *vsi) 622 { 623 struct ice_pf *pf = vf->pf; 624 u8 ucast_m, mcast_m; 625 int ret = 0; 626 627 ice_vf_get_promisc_masks(vf, vsi, &ucast_m, &mcast_m); 628 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) { 629 if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) { 630 if (ice_is_dflt_vsi_in_use(vsi->port_info)) 631 ret = ice_clear_dflt_vsi(vsi); 632 } else { 633 ret = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m); 634 } 635 636 if (ret) { 637 dev_err(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode failed\n"); 638 } else { 639 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states); 640 dev_info(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode succeeded\n"); 641 } 642 } 643 644 if (test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) { 645 ret = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m); 646 if (ret) { 647 dev_err(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode failed\n"); 648 } else { 649 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states); 650 dev_info(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode succeeded\n"); 651 } 652 } 653 return ret; 654 } 655 656 /** 657 * ice_vf_set_vsi_promisc - Enable promiscuous mode for a VF VSI 658 * @vf: the VF to configure 659 * @vsi: the VF's VSI 660 * @promisc_m: the promiscuous mode to enable 661 */ 662 int 663 ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m) 664 { 665 struct ice_hw *hw = &vsi->back->hw; 666 int status; 667 668 if (ice_vf_is_port_vlan_ena(vf)) 669 status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 670 ice_vf_get_port_vlan_id(vf)); 671 else if (ice_vsi_has_non_zero_vlans(vsi)) 672 status = ice_fltr_set_vlan_vsi_promisc(hw, vsi, promisc_m); 673 else 674 status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 0); 675 676 if (status && status != -EEXIST) { 677 dev_err(ice_pf_to_dev(vsi->back), "enable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n", 678 vf->vf_id, status); 679 return status; 680 } 681 682 return 0; 683 } 684 685 /** 686 * ice_vf_clear_vsi_promisc - Disable promiscuous mode for a VF VSI 687 * @vf: the VF to configure 688 * @vsi: the VF's VSI 689 * @promisc_m: the promiscuous mode to disable 690 */ 691 int 692 ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m) 693 { 694 struct ice_hw *hw = &vsi->back->hw; 695 int status; 696 697 if (ice_vf_is_port_vlan_ena(vf)) 698 status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 699 ice_vf_get_port_vlan_id(vf)); 700 else if (ice_vsi_has_non_zero_vlans(vsi)) 701 status = ice_fltr_clear_vlan_vsi_promisc(hw, vsi, promisc_m); 702 else 703 status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 0); 704 705 if (status && status != -ENOENT) { 706 dev_err(ice_pf_to_dev(vsi->back), "disable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n", 707 vf->vf_id, status); 708 return status; 709 } 710 711 return 0; 712 } 713 714 /** 715 * ice_reset_all_vfs - reset all allocated VFs in one go 716 * @pf: pointer to the PF structure 717 * 718 * Reset all VFs at once, in response to a PF or other device reset. 719 * 720 * First, tell the hardware to reset each VF, then do all the waiting in one 721 * chunk, and finally finish restoring each VF after the wait. This is useful 722 * during PF routines which need to reset all VFs, as otherwise it must perform 723 * these resets in a serialized fashion. 724 */ 725 void ice_reset_all_vfs(struct ice_pf *pf) 726 { 727 struct device *dev = ice_pf_to_dev(pf); 728 struct ice_hw *hw = &pf->hw; 729 struct ice_vf *vf; 730 unsigned int bkt; 731 732 /* If we don't have any VFs, then there is nothing to reset */ 733 if (!ice_has_vfs(pf)) 734 return; 735 736 mutex_lock(&pf->vfs.table_lock); 737 738 /* clear all malicious info if the VFs are getting reset */ 739 ice_for_each_vf(pf, bkt, vf) 740 ice_mbx_clear_malvf(&vf->mbx_info); 741 742 /* If VFs have been disabled, there is no need to reset */ 743 if (test_and_set_bit(ICE_VF_DIS, pf->state)) { 744 mutex_unlock(&pf->vfs.table_lock); 745 return; 746 } 747 748 /* Begin reset on all VFs at once */ 749 ice_for_each_vf(pf, bkt, vf) 750 ice_trigger_vf_reset(vf, true, true); 751 752 /* HW requires some time to make sure it can flush the FIFO for a VF 753 * when it resets it. Now that we've triggered all of the VFs, iterate 754 * the table again and wait for each VF to complete. 755 */ 756 ice_for_each_vf(pf, bkt, vf) { 757 if (!vf->vf_ops->poll_reset_status(vf)) { 758 /* Display a warning if at least one VF didn't manage 759 * to reset in time, but continue on with the 760 * operation. 761 */ 762 dev_warn(dev, "VF %u reset check timeout\n", vf->vf_id); 763 break; 764 } 765 } 766 767 /* free VF resources to begin resetting the VSI state */ 768 ice_for_each_vf(pf, bkt, vf) { 769 mutex_lock(&vf->cfg_lock); 770 771 ice_eswitch_detach(pf, vf); 772 vf->driver_caps = 0; 773 ice_vc_set_default_allowlist(vf); 774 775 ice_vf_fdir_exit(vf); 776 ice_vf_fdir_init(vf); 777 /* clean VF control VSI when resetting VFs since it should be 778 * setup only when VF creates its first FDIR rule. 779 */ 780 if (vf->ctrl_vsi_idx != ICE_NO_VSI) 781 ice_vf_ctrl_invalidate_vsi(vf); 782 783 ice_vf_pre_vsi_rebuild(vf); 784 ice_vf_rebuild_vsi(vf); 785 ice_vf_post_vsi_rebuild(vf); 786 787 ice_eswitch_attach(pf, vf); 788 789 mutex_unlock(&vf->cfg_lock); 790 } 791 792 ice_flush(hw); 793 clear_bit(ICE_VF_DIS, pf->state); 794 795 mutex_unlock(&pf->vfs.table_lock); 796 } 797 798 /** 799 * ice_notify_vf_reset - Notify VF of a reset event 800 * @vf: pointer to the VF structure 801 */ 802 static void ice_notify_vf_reset(struct ice_vf *vf) 803 { 804 struct ice_hw *hw = &vf->pf->hw; 805 struct virtchnl_pf_event pfe; 806 807 /* Bail out if VF is in disabled state, neither initialized, nor active 808 * state - otherwise proceed with notifications 809 */ 810 if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) && 811 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) || 812 test_bit(ICE_VF_STATE_DIS, vf->vf_states)) 813 return; 814 815 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; 816 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; 817 ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, 818 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe), 819 NULL); 820 } 821 822 /** 823 * ice_reset_vf - Reset a particular VF 824 * @vf: pointer to the VF structure 825 * @flags: flags controlling behavior of the reset 826 * 827 * Flags: 828 * ICE_VF_RESET_VFLR - Indicates a reset is due to VFLR event 829 * ICE_VF_RESET_NOTIFY - Send VF a notification prior to reset 830 * ICE_VF_RESET_LOCK - Acquire VF cfg_lock before resetting 831 * 832 * Returns 0 if the VF is currently in reset, if resets are disabled, or if 833 * the VF resets successfully. Returns an error code if the VF fails to 834 * rebuild. 835 */ 836 int ice_reset_vf(struct ice_vf *vf, u32 flags) 837 { 838 struct ice_pf *pf = vf->pf; 839 struct ice_lag *lag; 840 struct ice_vsi *vsi; 841 u8 act_prt, pri_prt; 842 struct device *dev; 843 int err = 0; 844 bool rsd; 845 846 dev = ice_pf_to_dev(pf); 847 act_prt = ICE_LAG_INVALID_PORT; 848 pri_prt = pf->hw.port_info->lport; 849 850 if (flags & ICE_VF_RESET_NOTIFY) 851 ice_notify_vf_reset(vf); 852 853 if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) { 854 dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n", 855 vf->vf_id); 856 return 0; 857 } 858 859 lag = pf->lag; 860 mutex_lock(&pf->lag_mutex); 861 if (lag && lag->bonded && lag->primary) { 862 act_prt = lag->active_port; 863 if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT && 864 lag->upper_netdev) 865 ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt); 866 else 867 act_prt = ICE_LAG_INVALID_PORT; 868 } 869 870 if (flags & ICE_VF_RESET_LOCK) 871 mutex_lock(&vf->cfg_lock); 872 else 873 lockdep_assert_held(&vf->cfg_lock); 874 875 if (ice_is_vf_disabled(vf)) { 876 vsi = ice_get_vf_vsi(vf); 877 if (!vsi) { 878 dev_dbg(dev, "VF is already removed\n"); 879 err = -EINVAL; 880 goto out_unlock; 881 } 882 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id); 883 884 if (ice_vsi_is_rx_queue_active(vsi)) 885 ice_vsi_stop_all_rx_rings(vsi); 886 887 dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n", 888 vf->vf_id); 889 goto out_unlock; 890 } 891 892 /* Set VF disable bit state here, before triggering reset */ 893 set_bit(ICE_VF_STATE_DIS, vf->vf_states); 894 ice_trigger_vf_reset(vf, flags & ICE_VF_RESET_VFLR, false); 895 896 vsi = ice_get_vf_vsi(vf); 897 if (WARN_ON(!vsi)) { 898 err = -EIO; 899 goto out_unlock; 900 } 901 902 ice_dis_vf_qs(vf); 903 904 /* Call Disable LAN Tx queue AQ whether or not queues are 905 * enabled. This is needed for successful completion of VFR. 906 */ 907 ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL, 908 NULL, vf->vf_ops->reset_type, vf->vf_id, NULL); 909 910 /* poll VPGEN_VFRSTAT reg to make sure 911 * that reset is complete 912 */ 913 rsd = vf->vf_ops->poll_reset_status(vf); 914 915 /* Display a warning if VF didn't manage to reset in time, but need to 916 * continue on with the operation. 917 */ 918 if (!rsd) 919 dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id); 920 921 vf->driver_caps = 0; 922 ice_vc_set_default_allowlist(vf); 923 924 /* disable promiscuous modes in case they were enabled 925 * ignore any error if disabling process failed 926 */ 927 ice_vf_clear_all_promisc_modes(vf, vsi); 928 929 ice_vf_fdir_exit(vf); 930 ice_vf_fdir_init(vf); 931 /* clean VF control VSI when resetting VF since it should be setup 932 * only when VF creates its first FDIR rule. 933 */ 934 if (vf->ctrl_vsi_idx != ICE_NO_VSI) 935 ice_vf_ctrl_vsi_release(vf); 936 937 ice_vf_pre_vsi_rebuild(vf); 938 939 if (ice_vf_reconfig_vsi(vf)) { 940 dev_err(dev, "Failed to release and setup the VF%u's VSI\n", 941 vf->vf_id); 942 err = -EFAULT; 943 goto out_unlock; 944 } 945 946 ice_vf_post_vsi_rebuild(vf); 947 vsi = ice_get_vf_vsi(vf); 948 if (WARN_ON(!vsi)) { 949 err = -EINVAL; 950 goto out_unlock; 951 } 952 953 ice_eswitch_update_repr(vf->repr_id, vsi); 954 955 /* if the VF has been reset allow it to come up again */ 956 ice_mbx_clear_malvf(&vf->mbx_info); 957 958 out_unlock: 959 if (flags & ICE_VF_RESET_LOCK) 960 mutex_unlock(&vf->cfg_lock); 961 962 if (lag && lag->bonded && lag->primary && 963 act_prt != ICE_LAG_INVALID_PORT) 964 ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt); 965 mutex_unlock(&pf->lag_mutex); 966 967 return err; 968 } 969 970 /** 971 * ice_set_vf_state_dis - Set VF state to disabled 972 * @vf: pointer to the VF structure 973 */ 974 void ice_set_vf_state_dis(struct ice_vf *vf) 975 { 976 ice_set_vf_state_qs_dis(vf); 977 vf->vf_ops->clear_reset_state(vf); 978 } 979 980 /* Private functions only accessed from other virtualization files */ 981 982 /** 983 * ice_initialize_vf_entry - Initialize a VF entry 984 * @vf: pointer to the VF structure 985 */ 986 void ice_initialize_vf_entry(struct ice_vf *vf) 987 { 988 struct ice_pf *pf = vf->pf; 989 struct ice_vfs *vfs; 990 991 vfs = &pf->vfs; 992 993 /* assign default capabilities */ 994 vf->spoofchk = true; 995 vf->num_vf_qs = vfs->num_qps_per; 996 ice_vc_set_default_allowlist(vf); 997 ice_virtchnl_set_dflt_ops(vf); 998 999 /* ctrl_vsi_idx will be set to a valid value only when iAVF 1000 * creates its first fdir rule. 1001 */ 1002 ice_vf_ctrl_invalidate_vsi(vf); 1003 ice_vf_fdir_init(vf); 1004 1005 /* Initialize mailbox info for this VF */ 1006 ice_mbx_init_vf_info(&pf->hw, &vf->mbx_info); 1007 1008 mutex_init(&vf->cfg_lock); 1009 } 1010 1011 /** 1012 * ice_dis_vf_qs - Disable the VF queues 1013 * @vf: pointer to the VF structure 1014 */ 1015 void ice_dis_vf_qs(struct ice_vf *vf) 1016 { 1017 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 1018 1019 if (WARN_ON(!vsi)) 1020 return; 1021 1022 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id); 1023 ice_vsi_stop_all_rx_rings(vsi); 1024 ice_set_vf_state_qs_dis(vf); 1025 } 1026 1027 /** 1028 * ice_err_to_virt_err - translate errors for VF return code 1029 * @err: error return code 1030 */ 1031 enum virtchnl_status_code ice_err_to_virt_err(int err) 1032 { 1033 switch (err) { 1034 case 0: 1035 return VIRTCHNL_STATUS_SUCCESS; 1036 case -EINVAL: 1037 case -ENODEV: 1038 return VIRTCHNL_STATUS_ERR_PARAM; 1039 case -ENOMEM: 1040 return VIRTCHNL_STATUS_ERR_NO_MEMORY; 1041 case -EALREADY: 1042 case -EBUSY: 1043 case -EIO: 1044 case -ENOSPC: 1045 return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; 1046 default: 1047 return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; 1048 } 1049 } 1050 1051 /** 1052 * ice_check_vf_init - helper to check if VF init complete 1053 * @vf: the pointer to the VF to check 1054 */ 1055 int ice_check_vf_init(struct ice_vf *vf) 1056 { 1057 struct ice_pf *pf = vf->pf; 1058 1059 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { 1060 dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n", 1061 vf->vf_id); 1062 return -EBUSY; 1063 } 1064 return 0; 1065 } 1066 1067 /** 1068 * ice_vf_get_port_info - Get the VF's port info structure 1069 * @vf: VF used to get the port info structure for 1070 */ 1071 struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf) 1072 { 1073 return vf->pf->hw.port_info; 1074 } 1075 1076 /** 1077 * ice_cfg_mac_antispoof - Configure MAC antispoof checking behavior 1078 * @vsi: the VSI to configure 1079 * @enable: whether to enable or disable the spoof checking 1080 * 1081 * Configure a VSI to enable (or disable) spoof checking behavior. 1082 */ 1083 static int ice_cfg_mac_antispoof(struct ice_vsi *vsi, bool enable) 1084 { 1085 struct ice_vsi_ctx *ctx; 1086 int err; 1087 1088 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 1089 if (!ctx) 1090 return -ENOMEM; 1091 1092 ctx->info.sec_flags = vsi->info.sec_flags; 1093 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID); 1094 1095 if (enable) 1096 ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF; 1097 else 1098 ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF; 1099 1100 err = ice_update_vsi(&vsi->back->hw, vsi->idx, ctx, NULL); 1101 if (err) 1102 dev_err(ice_pf_to_dev(vsi->back), "Failed to configure Tx MAC anti-spoof %s for VSI %d, error %d\n", 1103 enable ? "ON" : "OFF", vsi->vsi_num, err); 1104 else 1105 vsi->info.sec_flags = ctx->info.sec_flags; 1106 1107 kfree(ctx); 1108 1109 return err; 1110 } 1111 1112 /** 1113 * ice_vsi_ena_spoofchk - enable Tx spoof checking for this VSI 1114 * @vsi: VSI to enable Tx spoof checking for 1115 */ 1116 static int ice_vsi_ena_spoofchk(struct ice_vsi *vsi) 1117 { 1118 struct ice_vsi_vlan_ops *vlan_ops; 1119 int err = 0; 1120 1121 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 1122 1123 /* Allow VF with VLAN 0 only to send all tagged traffic */ 1124 if (vsi->type != ICE_VSI_VF || ice_vsi_has_non_zero_vlans(vsi)) { 1125 err = vlan_ops->ena_tx_filtering(vsi); 1126 if (err) 1127 return err; 1128 } 1129 1130 return ice_cfg_mac_antispoof(vsi, true); 1131 } 1132 1133 /** 1134 * ice_vsi_dis_spoofchk - disable Tx spoof checking for this VSI 1135 * @vsi: VSI to disable Tx spoof checking for 1136 */ 1137 static int ice_vsi_dis_spoofchk(struct ice_vsi *vsi) 1138 { 1139 struct ice_vsi_vlan_ops *vlan_ops; 1140 int err; 1141 1142 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 1143 1144 err = vlan_ops->dis_tx_filtering(vsi); 1145 if (err) 1146 return err; 1147 1148 return ice_cfg_mac_antispoof(vsi, false); 1149 } 1150 1151 /** 1152 * ice_vsi_apply_spoofchk - Apply Tx spoof checking setting to a VSI 1153 * @vsi: VSI associated to the VF 1154 * @enable: whether to enable or disable the spoof checking 1155 */ 1156 int ice_vsi_apply_spoofchk(struct ice_vsi *vsi, bool enable) 1157 { 1158 int err; 1159 1160 if (enable) 1161 err = ice_vsi_ena_spoofchk(vsi); 1162 else 1163 err = ice_vsi_dis_spoofchk(vsi); 1164 1165 return err; 1166 } 1167 1168 /** 1169 * ice_is_vf_trusted 1170 * @vf: pointer to the VF info 1171 */ 1172 bool ice_is_vf_trusted(struct ice_vf *vf) 1173 { 1174 return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 1175 } 1176 1177 /** 1178 * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled 1179 * @vf: the VF to check 1180 * 1181 * Returns true if the VF has no Rx and no Tx queues enabled and returns false 1182 * otherwise 1183 */ 1184 bool ice_vf_has_no_qs_ena(struct ice_vf *vf) 1185 { 1186 return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) && 1187 !bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF)); 1188 } 1189 1190 /** 1191 * ice_is_vf_link_up - check if the VF's link is up 1192 * @vf: VF to check if link is up 1193 */ 1194 bool ice_is_vf_link_up(struct ice_vf *vf) 1195 { 1196 struct ice_port_info *pi = ice_vf_get_port_info(vf); 1197 1198 if (ice_check_vf_init(vf)) 1199 return false; 1200 1201 if (ice_vf_has_no_qs_ena(vf)) 1202 return false; 1203 else if (vf->link_forced) 1204 return vf->link_up; 1205 else 1206 return pi->phy.link_info.link_info & 1207 ICE_AQ_LINK_UP; 1208 } 1209 1210 /** 1211 * ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove VSI access 1212 * @vf: VF that control VSI is being invalidated on 1213 */ 1214 void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf) 1215 { 1216 vf->ctrl_vsi_idx = ICE_NO_VSI; 1217 } 1218 1219 /** 1220 * ice_vf_ctrl_vsi_release - invalidate the VF's control VSI after freeing it 1221 * @vf: VF that control VSI is being released on 1222 */ 1223 void ice_vf_ctrl_vsi_release(struct ice_vf *vf) 1224 { 1225 ice_vsi_release(vf->pf->vsi[vf->ctrl_vsi_idx]); 1226 ice_vf_ctrl_invalidate_vsi(vf); 1227 } 1228 1229 /** 1230 * ice_vf_ctrl_vsi_setup - Set up a VF control VSI 1231 * @vf: VF to setup control VSI for 1232 * 1233 * Returns pointer to the successfully allocated VSI struct on success, 1234 * otherwise returns NULL on failure. 1235 */ 1236 struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf) 1237 { 1238 struct ice_vsi_cfg_params params = {}; 1239 struct ice_pf *pf = vf->pf; 1240 struct ice_vsi *vsi; 1241 1242 params.type = ICE_VSI_CTRL; 1243 params.pi = ice_vf_get_port_info(vf); 1244 params.vf = vf; 1245 params.flags = ICE_VSI_FLAG_INIT; 1246 1247 vsi = ice_vsi_setup(pf, ¶ms); 1248 if (!vsi) { 1249 dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n"); 1250 ice_vf_ctrl_invalidate_vsi(vf); 1251 } 1252 1253 return vsi; 1254 } 1255 1256 /** 1257 * ice_vf_init_host_cfg - Initialize host admin configuration 1258 * @vf: VF to initialize 1259 * @vsi: the VSI created at initialization 1260 * 1261 * Initialize the VF host configuration. Called during VF creation to setup 1262 * VLAN 0, add the VF VSI broadcast filter, and setup spoof checking. It 1263 * should only be called during VF creation. 1264 */ 1265 int ice_vf_init_host_cfg(struct ice_vf *vf, struct ice_vsi *vsi) 1266 { 1267 struct ice_vsi_vlan_ops *vlan_ops; 1268 struct ice_pf *pf = vf->pf; 1269 u8 broadcast[ETH_ALEN]; 1270 struct device *dev; 1271 int err; 1272 1273 dev = ice_pf_to_dev(pf); 1274 1275 err = ice_vsi_add_vlan_zero(vsi); 1276 if (err) { 1277 dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n", 1278 vf->vf_id); 1279 return err; 1280 } 1281 1282 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 1283 err = vlan_ops->ena_rx_filtering(vsi); 1284 if (err) { 1285 dev_warn(dev, "Failed to enable Rx VLAN filtering for VF %d\n", 1286 vf->vf_id); 1287 return err; 1288 } 1289 1290 eth_broadcast_addr(broadcast); 1291 err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI); 1292 if (err) { 1293 dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %d\n", 1294 vf->vf_id, err); 1295 return err; 1296 } 1297 1298 vf->num_mac = 1; 1299 1300 err = ice_vsi_apply_spoofchk(vsi, vf->spoofchk); 1301 if (err) { 1302 dev_warn(dev, "Failed to initialize spoofchk setting for VF %d\n", 1303 vf->vf_id); 1304 return err; 1305 } 1306 1307 return 0; 1308 } 1309 1310 /** 1311 * ice_vf_invalidate_vsi - invalidate vsi_idx to remove VSI access 1312 * @vf: VF to remove access to VSI for 1313 */ 1314 void ice_vf_invalidate_vsi(struct ice_vf *vf) 1315 { 1316 vf->lan_vsi_idx = ICE_NO_VSI; 1317 } 1318 1319 /** 1320 * ice_vf_vsi_release - Release the VF VSI and invalidate indexes 1321 * @vf: pointer to the VF structure 1322 * 1323 * Release the VF associated with this VSI and then invalidate the VSI 1324 * indexes. 1325 */ 1326 void ice_vf_vsi_release(struct ice_vf *vf) 1327 { 1328 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 1329 1330 if (WARN_ON(!vsi)) 1331 return; 1332 1333 ice_vsi_release(vsi); 1334 ice_vf_invalidate_vsi(vf); 1335 } 1336 1337 /** 1338 * ice_get_vf_ctrl_vsi - Get first VF control VSI pointer 1339 * @pf: the PF private structure 1340 * @vsi: pointer to the VSI 1341 * 1342 * Return first found VF control VSI other than the vsi 1343 * passed by parameter. This function is used to determine 1344 * whether new resources have to be allocated for control VSI 1345 * or they can be shared with existing one. 1346 * 1347 * Return found VF control VSI pointer other itself. Return 1348 * NULL Otherwise. 1349 * 1350 */ 1351 struct ice_vsi *ice_get_vf_ctrl_vsi(struct ice_pf *pf, struct ice_vsi *vsi) 1352 { 1353 struct ice_vsi *ctrl_vsi = NULL; 1354 struct ice_vf *vf; 1355 unsigned int bkt; 1356 1357 rcu_read_lock(); 1358 ice_for_each_vf_rcu(pf, bkt, vf) { 1359 if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) { 1360 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx]; 1361 break; 1362 } 1363 } 1364 1365 rcu_read_unlock(); 1366 return ctrl_vsi; 1367 } 1368