1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2022, Intel Corporation. */ 3 4 #include "ice_vf_lib_private.h" 5 #include "ice.h" 6 #include "ice_lib.h" 7 #include "ice_fltr.h" 8 #include "ice_virtchnl_allowlist.h" 9 10 /* Public functions which may be accessed by all driver files */ 11 12 /** 13 * ice_get_vf_by_id - Get pointer to VF by ID 14 * @pf: the PF private structure 15 * @vf_id: the VF ID to locate 16 * 17 * Locate and return a pointer to the VF structure associated with a given ID. 18 * Returns NULL if the ID does not have a valid VF structure associated with 19 * it. 20 * 21 * This function takes a reference to the VF, which must be released by 22 * calling ice_put_vf() once the caller is finished accessing the VF structure 23 * returned. 24 */ 25 struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id) 26 { 27 struct ice_vf *vf; 28 29 rcu_read_lock(); 30 hash_for_each_possible_rcu(pf->vfs.table, vf, entry, vf_id) { 31 if (vf->vf_id == vf_id) { 32 struct ice_vf *found; 33 34 if (kref_get_unless_zero(&vf->refcnt)) 35 found = vf; 36 else 37 found = NULL; 38 39 rcu_read_unlock(); 40 return found; 41 } 42 } 43 rcu_read_unlock(); 44 45 return NULL; 46 } 47 48 /** 49 * ice_release_vf - Release VF associated with a refcount 50 * @ref: the kref decremented to zero 51 * 52 * Callback function for kref_put to release a VF once its reference count has 53 * hit zero. 54 */ 55 static void ice_release_vf(struct kref *ref) 56 { 57 struct ice_vf *vf = container_of(ref, struct ice_vf, refcnt); 58 59 pci_dev_put(vf->vfdev); 60 61 vf->vf_ops->free(vf); 62 } 63 64 /** 65 * ice_put_vf - Release a reference to a VF 66 * @vf: the VF structure to decrease reference count on 67 * 68 * Decrease the reference count for a VF, and free the entry if it is no 69 * longer in use. 70 * 71 * This must be called after ice_get_vf_by_id() once the reference to the VF 72 * structure is no longer used. Otherwise, the VF structure will never be 73 * freed. 74 */ 75 void ice_put_vf(struct ice_vf *vf) 76 { 77 kref_put(&vf->refcnt, ice_release_vf); 78 } 79 80 /** 81 * ice_has_vfs - Return true if the PF has any associated VFs 82 * @pf: the PF private structure 83 * 84 * Return whether or not the PF has any allocated VFs. 85 * 86 * Note that this function only guarantees that there are no VFs at the point 87 * of calling it. It does not guarantee that no more VFs will be added. 88 */ 89 bool ice_has_vfs(struct ice_pf *pf) 90 { 91 /* A simple check that the hash table is not empty does not require 92 * the mutex or rcu_read_lock. 93 */ 94 return !hash_empty(pf->vfs.table); 95 } 96 97 /** 98 * ice_get_num_vfs - Get number of allocated VFs 99 * @pf: the PF private structure 100 * 101 * Return the total number of allocated VFs. NOTE: VF IDs are not guaranteed 102 * to be contiguous. Do not assume that a VF ID is guaranteed to be less than 103 * the output of this function. 104 */ 105 u16 ice_get_num_vfs(struct ice_pf *pf) 106 { 107 struct ice_vf *vf; 108 unsigned int bkt; 109 u16 num_vfs = 0; 110 111 rcu_read_lock(); 112 ice_for_each_vf_rcu(pf, bkt, vf) 113 num_vfs++; 114 rcu_read_unlock(); 115 116 return num_vfs; 117 } 118 119 /** 120 * ice_get_vf_vsi - get VF's VSI based on the stored index 121 * @vf: VF used to get VSI 122 */ 123 struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf) 124 { 125 if (vf->lan_vsi_idx == ICE_NO_VSI) 126 return NULL; 127 128 return vf->pf->vsi[vf->lan_vsi_idx]; 129 } 130 131 /** 132 * ice_is_vf_disabled 133 * @vf: pointer to the VF info 134 * 135 * If the PF has been disabled, there is no need resetting VF until PF is 136 * active again. Similarly, if the VF has been disabled, this means something 137 * else is resetting the VF, so we shouldn't continue. 138 * 139 * Returns true if the caller should consider the VF as disabled whether 140 * because that single VF is explicitly disabled or because the PF is 141 * currently disabled. 142 */ 143 bool ice_is_vf_disabled(struct ice_vf *vf) 144 { 145 struct ice_pf *pf = vf->pf; 146 147 return (test_bit(ICE_VF_DIS, pf->state) || 148 test_bit(ICE_VF_STATE_DIS, vf->vf_states)); 149 } 150 151 /** 152 * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset 153 * @vf: The VF being resseting 154 * 155 * The max poll time is about ~800ms, which is about the maximum time it takes 156 * for a VF to be reset and/or a VF driver to be removed. 157 */ 158 static void ice_wait_on_vf_reset(struct ice_vf *vf) 159 { 160 int i; 161 162 for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) { 163 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) 164 break; 165 msleep(ICE_MAX_VF_RESET_SLEEP_MS); 166 } 167 } 168 169 /** 170 * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried 171 * @vf: VF to check if it's ready to be configured/queried 172 * 173 * The purpose of this function is to make sure the VF is not in reset, not 174 * disabled, and initialized so it can be configured and/or queried by a host 175 * administrator. 176 */ 177 int ice_check_vf_ready_for_cfg(struct ice_vf *vf) 178 { 179 ice_wait_on_vf_reset(vf); 180 181 if (ice_is_vf_disabled(vf)) 182 return -EINVAL; 183 184 if (ice_check_vf_init(vf)) 185 return -EBUSY; 186 187 return 0; 188 } 189 190 /** 191 * ice_trigger_vf_reset - Reset a VF on HW 192 * @vf: pointer to the VF structure 193 * @is_vflr: true if VFLR was issued, false if not 194 * @is_pfr: true if the reset was triggered due to a previous PFR 195 * 196 * Trigger hardware to start a reset for a particular VF. Expects the caller 197 * to wait the proper amount of time to allow hardware to reset the VF before 198 * it cleans up and restores VF functionality. 199 */ 200 static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr) 201 { 202 /* Inform VF that it is no longer active, as a warning */ 203 clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states); 204 205 /* Disable VF's configuration API during reset. The flag is re-enabled 206 * when it's safe again to access VF's VSI. 207 */ 208 clear_bit(ICE_VF_STATE_INIT, vf->vf_states); 209 210 /* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver 211 * needs to clear them in the case of VFR/VFLR. If this is done for 212 * PFR, it can mess up VF resets because the VF driver may already 213 * have started cleanup by the time we get here. 214 */ 215 if (!is_pfr) 216 vf->vf_ops->clear_mbx_register(vf); 217 218 vf->vf_ops->trigger_reset_register(vf, is_vflr); 219 } 220 221 static void ice_vf_clear_counters(struct ice_vf *vf) 222 { 223 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 224 225 if (vsi) 226 vsi->num_vlan = 0; 227 228 vf->num_mac = 0; 229 memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events)); 230 memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events)); 231 } 232 233 /** 234 * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild 235 * @vf: VF to perform pre VSI rebuild tasks 236 * 237 * These tasks are items that don't need to be amortized since they are most 238 * likely called in a for loop with all VF(s) in the reset_all_vfs() case. 239 */ 240 static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf) 241 { 242 /* Close any IRQ mapping now */ 243 if (vf->vf_ops->irq_close) 244 vf->vf_ops->irq_close(vf); 245 246 ice_vf_clear_counters(vf); 247 vf->vf_ops->clear_reset_trigger(vf); 248 } 249 250 /** 251 * ice_vf_recreate_vsi - Release and re-create the VF's VSI 252 * @vf: VF to recreate the VSI for 253 * 254 * This is only called when a single VF is being reset (i.e. VVF, VFLR, host 255 * VF configuration change, etc) 256 * 257 * It releases and then re-creates a new VSI. 258 */ 259 static int ice_vf_recreate_vsi(struct ice_vf *vf) 260 { 261 struct ice_pf *pf = vf->pf; 262 int err; 263 264 ice_vf_vsi_release(vf); 265 266 err = vf->vf_ops->create_vsi(vf); 267 if (err) { 268 dev_err(ice_pf_to_dev(pf), 269 "Failed to recreate the VF%u's VSI, error %d\n", 270 vf->vf_id, err); 271 return err; 272 } 273 274 return 0; 275 } 276 277 /** 278 * ice_vf_rebuild_vsi - rebuild the VF's VSI 279 * @vf: VF to rebuild the VSI for 280 * 281 * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the 282 * host, PFR, CORER, etc.). 283 * 284 * It reprograms the VSI configuration back into hardware. 285 */ 286 static int ice_vf_rebuild_vsi(struct ice_vf *vf) 287 { 288 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 289 struct ice_pf *pf = vf->pf; 290 291 if (WARN_ON(!vsi)) 292 return -EINVAL; 293 294 if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT)) { 295 dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n", 296 vf->vf_id); 297 return -EIO; 298 } 299 /* vsi->idx will remain the same in this case so don't update 300 * vf->lan_vsi_idx 301 */ 302 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); 303 vf->lan_vsi_num = vsi->vsi_num; 304 305 return 0; 306 } 307 308 /** 309 * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN 310 * @vf: VF to add MAC filters for 311 * @vsi: Pointer to VSI 312 * 313 * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver 314 * always re-adds either a VLAN 0 or port VLAN based filter after reset. 315 */ 316 static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi) 317 { 318 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 319 struct device *dev = ice_pf_to_dev(vf->pf); 320 int err; 321 322 if (ice_vf_is_port_vlan_ena(vf)) { 323 err = vlan_ops->set_port_vlan(vsi, &vf->port_vlan_info); 324 if (err) { 325 dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n", 326 vf->vf_id, err); 327 return err; 328 } 329 330 err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info); 331 } else { 332 err = ice_vsi_add_vlan_zero(vsi); 333 } 334 335 if (err) { 336 dev_err(dev, "failed to add VLAN %u filter for VF %u during VF rebuild, error %d\n", 337 ice_vf_is_port_vlan_ena(vf) ? 338 ice_vf_get_port_vlan_id(vf) : 0, vf->vf_id, err); 339 return err; 340 } 341 342 err = vlan_ops->ena_rx_filtering(vsi); 343 if (err) 344 dev_warn(dev, "failed to enable Rx VLAN filtering for VF %d VSI %d during VF rebuild, error %d\n", 345 vf->vf_id, vsi->idx, err); 346 347 return 0; 348 } 349 350 /** 351 * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration 352 * @vf: VF to re-apply the configuration for 353 * 354 * Called after a VF VSI has been re-added/rebuild during reset. The PF driver 355 * needs to re-apply the host configured Tx rate limiting configuration. 356 */ 357 static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf) 358 { 359 struct device *dev = ice_pf_to_dev(vf->pf); 360 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 361 int err; 362 363 if (WARN_ON(!vsi)) 364 return -EINVAL; 365 366 if (vf->min_tx_rate) { 367 err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000); 368 if (err) { 369 dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n", 370 vf->min_tx_rate, vf->vf_id, err); 371 return err; 372 } 373 } 374 375 if (vf->max_tx_rate) { 376 err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000); 377 if (err) { 378 dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n", 379 vf->max_tx_rate, vf->vf_id, err); 380 return err; 381 } 382 } 383 384 return 0; 385 } 386 387 /** 388 * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value 389 * @vf: VF to configure trust setting for 390 */ 391 static void ice_vf_set_host_trust_cfg(struct ice_vf *vf) 392 { 393 assign_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps, vf->trusted); 394 } 395 396 /** 397 * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA 398 * @vf: VF to add MAC filters for 399 * 400 * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver 401 * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset. 402 */ 403 static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf) 404 { 405 struct device *dev = ice_pf_to_dev(vf->pf); 406 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 407 u8 broadcast[ETH_ALEN]; 408 int status; 409 410 if (WARN_ON(!vsi)) 411 return -EINVAL; 412 413 if (ice_is_eswitch_mode_switchdev(vf->pf)) 414 return 0; 415 416 eth_broadcast_addr(broadcast); 417 status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI); 418 if (status) { 419 dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n", 420 vf->vf_id, status); 421 return status; 422 } 423 424 vf->num_mac++; 425 426 if (is_valid_ether_addr(vf->hw_lan_addr)) { 427 status = ice_fltr_add_mac(vsi, vf->hw_lan_addr, 428 ICE_FWD_TO_VSI); 429 if (status) { 430 dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n", 431 &vf->hw_lan_addr[0], vf->vf_id, 432 status); 433 return status; 434 } 435 vf->num_mac++; 436 437 ether_addr_copy(vf->dev_lan_addr, vf->hw_lan_addr); 438 } 439 440 return 0; 441 } 442 443 /** 444 * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config 445 * @vsi: Pointer to VSI 446 * 447 * This function moves VSI into corresponding scheduler aggregator node 448 * based on cached value of "aggregator node info" per VSI 449 */ 450 static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi) 451 { 452 struct ice_pf *pf = vsi->back; 453 struct device *dev; 454 int status; 455 456 if (!vsi->agg_node) 457 return; 458 459 dev = ice_pf_to_dev(pf); 460 if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) { 461 dev_dbg(dev, 462 "agg_id %u already has reached max_num_vsis %u\n", 463 vsi->agg_node->agg_id, vsi->agg_node->num_vsis); 464 return; 465 } 466 467 status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id, 468 vsi->idx, vsi->tc_cfg.ena_tc); 469 if (status) 470 dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node", 471 vsi->idx, vsi->agg_node->agg_id); 472 else 473 vsi->agg_node->num_vsis++; 474 } 475 476 /** 477 * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset 478 * @vf: VF to rebuild host configuration on 479 */ 480 static void ice_vf_rebuild_host_cfg(struct ice_vf *vf) 481 { 482 struct device *dev = ice_pf_to_dev(vf->pf); 483 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 484 485 if (WARN_ON(!vsi)) 486 return; 487 488 ice_vf_set_host_trust_cfg(vf); 489 490 if (ice_vf_rebuild_host_mac_cfg(vf)) 491 dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n", 492 vf->vf_id); 493 494 if (ice_vf_rebuild_host_vlan_cfg(vf, vsi)) 495 dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n", 496 vf->vf_id); 497 498 if (ice_vf_rebuild_host_tx_rate_cfg(vf)) 499 dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n", 500 vf->vf_id); 501 502 if (ice_vsi_apply_spoofchk(vsi, vf->spoofchk)) 503 dev_err(dev, "failed to rebuild spoofchk configuration for VF %d\n", 504 vf->vf_id); 505 506 /* rebuild aggregator node config for main VF VSI */ 507 ice_vf_rebuild_aggregator_node_cfg(vsi); 508 } 509 510 /** 511 * ice_set_vf_state_qs_dis - Set VF queues state to disabled 512 * @vf: pointer to the VF structure 513 */ 514 static void ice_set_vf_state_qs_dis(struct ice_vf *vf) 515 { 516 /* Clear Rx/Tx enabled queues flag */ 517 bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF); 518 bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF); 519 clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states); 520 } 521 522 /** 523 * ice_vf_set_initialized - VF is ready for VIRTCHNL communication 524 * @vf: VF to set in initialized state 525 * 526 * After this function the VF will be ready to receive/handle the 527 * VIRTCHNL_OP_GET_VF_RESOURCES message 528 */ 529 static void ice_vf_set_initialized(struct ice_vf *vf) 530 { 531 ice_set_vf_state_qs_dis(vf); 532 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states); 533 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states); 534 clear_bit(ICE_VF_STATE_DIS, vf->vf_states); 535 set_bit(ICE_VF_STATE_INIT, vf->vf_states); 536 memset(&vf->vlan_v2_caps, 0, sizeof(vf->vlan_v2_caps)); 537 } 538 539 /** 540 * ice_vf_post_vsi_rebuild - Reset tasks that occur after VSI rebuild 541 * @vf: the VF being reset 542 * 543 * Perform reset tasks which must occur after the VSI has been re-created or 544 * rebuilt during a VF reset. 545 */ 546 static void ice_vf_post_vsi_rebuild(struct ice_vf *vf) 547 { 548 ice_vf_rebuild_host_cfg(vf); 549 ice_vf_set_initialized(vf); 550 551 vf->vf_ops->post_vsi_rebuild(vf); 552 } 553 554 /** 555 * ice_is_any_vf_in_unicast_promisc - check if any VF(s) 556 * are in unicast promiscuous mode 557 * @pf: PF structure for accessing VF(s) 558 * 559 * Return false if no VF(s) are in unicast promiscuous mode, 560 * else return true 561 */ 562 bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf) 563 { 564 bool is_vf_promisc = false; 565 struct ice_vf *vf; 566 unsigned int bkt; 567 568 rcu_read_lock(); 569 ice_for_each_vf_rcu(pf, bkt, vf) { 570 /* found a VF that has promiscuous mode configured */ 571 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) { 572 is_vf_promisc = true; 573 break; 574 } 575 } 576 rcu_read_unlock(); 577 578 return is_vf_promisc; 579 } 580 581 /** 582 * ice_vf_get_promisc_masks - Calculate masks for promiscuous modes 583 * @vf: the VF pointer 584 * @vsi: the VSI to configure 585 * @ucast_m: promiscuous mask to apply to unicast 586 * @mcast_m: promiscuous mask to apply to multicast 587 * 588 * Decide which mask should be used for unicast and multicast filter, 589 * based on presence of VLANs 590 */ 591 void 592 ice_vf_get_promisc_masks(struct ice_vf *vf, struct ice_vsi *vsi, 593 u8 *ucast_m, u8 *mcast_m) 594 { 595 if (ice_vf_is_port_vlan_ena(vf) || 596 ice_vsi_has_non_zero_vlans(vsi)) { 597 *mcast_m = ICE_MCAST_VLAN_PROMISC_BITS; 598 *ucast_m = ICE_UCAST_VLAN_PROMISC_BITS; 599 } else { 600 *mcast_m = ICE_MCAST_PROMISC_BITS; 601 *ucast_m = ICE_UCAST_PROMISC_BITS; 602 } 603 } 604 605 /** 606 * ice_vf_clear_all_promisc_modes - Clear promisc/allmulticast on VF VSI 607 * @vf: the VF pointer 608 * @vsi: the VSI to configure 609 * 610 * Clear all promiscuous/allmulticast filters for a VF 611 */ 612 static int 613 ice_vf_clear_all_promisc_modes(struct ice_vf *vf, struct ice_vsi *vsi) 614 { 615 struct ice_pf *pf = vf->pf; 616 u8 ucast_m, mcast_m; 617 int ret = 0; 618 619 ice_vf_get_promisc_masks(vf, vsi, &ucast_m, &mcast_m); 620 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) { 621 if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) { 622 if (ice_is_dflt_vsi_in_use(vsi->port_info)) 623 ret = ice_clear_dflt_vsi(vsi); 624 } else { 625 ret = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m); 626 } 627 628 if (ret) { 629 dev_err(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode failed\n"); 630 } else { 631 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states); 632 dev_info(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode succeeded\n"); 633 } 634 } 635 636 if (test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) { 637 ret = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m); 638 if (ret) { 639 dev_err(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode failed\n"); 640 } else { 641 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states); 642 dev_info(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode succeeded\n"); 643 } 644 } 645 return ret; 646 } 647 648 /** 649 * ice_vf_set_vsi_promisc - Enable promiscuous mode for a VF VSI 650 * @vf: the VF to configure 651 * @vsi: the VF's VSI 652 * @promisc_m: the promiscuous mode to enable 653 */ 654 int 655 ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m) 656 { 657 struct ice_hw *hw = &vsi->back->hw; 658 int status; 659 660 if (ice_vf_is_port_vlan_ena(vf)) 661 status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 662 ice_vf_get_port_vlan_id(vf)); 663 else if (ice_vsi_has_non_zero_vlans(vsi)) 664 status = ice_fltr_set_vlan_vsi_promisc(hw, vsi, promisc_m); 665 else 666 status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 0); 667 668 if (status && status != -EEXIST) { 669 dev_err(ice_pf_to_dev(vsi->back), "enable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n", 670 vf->vf_id, status); 671 return status; 672 } 673 674 return 0; 675 } 676 677 /** 678 * ice_vf_clear_vsi_promisc - Disable promiscuous mode for a VF VSI 679 * @vf: the VF to configure 680 * @vsi: the VF's VSI 681 * @promisc_m: the promiscuous mode to disable 682 */ 683 int 684 ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m) 685 { 686 struct ice_hw *hw = &vsi->back->hw; 687 int status; 688 689 if (ice_vf_is_port_vlan_ena(vf)) 690 status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 691 ice_vf_get_port_vlan_id(vf)); 692 else if (ice_vsi_has_non_zero_vlans(vsi)) 693 status = ice_fltr_clear_vlan_vsi_promisc(hw, vsi, promisc_m); 694 else 695 status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 0); 696 697 if (status && status != -ENOENT) { 698 dev_err(ice_pf_to_dev(vsi->back), "disable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n", 699 vf->vf_id, status); 700 return status; 701 } 702 703 return 0; 704 } 705 706 /** 707 * ice_reset_all_vfs - reset all allocated VFs in one go 708 * @pf: pointer to the PF structure 709 * 710 * Reset all VFs at once, in response to a PF or other device reset. 711 * 712 * First, tell the hardware to reset each VF, then do all the waiting in one 713 * chunk, and finally finish restoring each VF after the wait. This is useful 714 * during PF routines which need to reset all VFs, as otherwise it must perform 715 * these resets in a serialized fashion. 716 */ 717 void ice_reset_all_vfs(struct ice_pf *pf) 718 { 719 struct device *dev = ice_pf_to_dev(pf); 720 struct ice_hw *hw = &pf->hw; 721 struct ice_vf *vf; 722 unsigned int bkt; 723 724 /* If we don't have any VFs, then there is nothing to reset */ 725 if (!ice_has_vfs(pf)) 726 return; 727 728 mutex_lock(&pf->vfs.table_lock); 729 730 /* clear all malicious info if the VFs are getting reset */ 731 ice_for_each_vf(pf, bkt, vf) 732 ice_mbx_clear_malvf(&vf->mbx_info); 733 734 /* If VFs have been disabled, there is no need to reset */ 735 if (test_and_set_bit(ICE_VF_DIS, pf->state)) { 736 mutex_unlock(&pf->vfs.table_lock); 737 return; 738 } 739 740 /* Begin reset on all VFs at once */ 741 ice_for_each_vf(pf, bkt, vf) 742 ice_trigger_vf_reset(vf, true, true); 743 744 /* HW requires some time to make sure it can flush the FIFO for a VF 745 * when it resets it. Now that we've triggered all of the VFs, iterate 746 * the table again and wait for each VF to complete. 747 */ 748 ice_for_each_vf(pf, bkt, vf) { 749 if (!vf->vf_ops->poll_reset_status(vf)) { 750 /* Display a warning if at least one VF didn't manage 751 * to reset in time, but continue on with the 752 * operation. 753 */ 754 dev_warn(dev, "VF %u reset check timeout\n", vf->vf_id); 755 break; 756 } 757 } 758 759 /* free VF resources to begin resetting the VSI state */ 760 ice_for_each_vf(pf, bkt, vf) { 761 mutex_lock(&vf->cfg_lock); 762 763 vf->driver_caps = 0; 764 ice_vc_set_default_allowlist(vf); 765 766 ice_vf_fdir_exit(vf); 767 ice_vf_fdir_init(vf); 768 /* clean VF control VSI when resetting VFs since it should be 769 * setup only when VF creates its first FDIR rule. 770 */ 771 if (vf->ctrl_vsi_idx != ICE_NO_VSI) 772 ice_vf_ctrl_invalidate_vsi(vf); 773 774 ice_vf_pre_vsi_rebuild(vf); 775 ice_vf_rebuild_vsi(vf); 776 ice_vf_post_vsi_rebuild(vf); 777 778 mutex_unlock(&vf->cfg_lock); 779 } 780 781 if (ice_is_eswitch_mode_switchdev(pf)) 782 if (ice_eswitch_rebuild(pf)) 783 dev_warn(dev, "eswitch rebuild failed\n"); 784 785 ice_flush(hw); 786 clear_bit(ICE_VF_DIS, pf->state); 787 788 mutex_unlock(&pf->vfs.table_lock); 789 } 790 791 /** 792 * ice_notify_vf_reset - Notify VF of a reset event 793 * @vf: pointer to the VF structure 794 */ 795 static void ice_notify_vf_reset(struct ice_vf *vf) 796 { 797 struct ice_hw *hw = &vf->pf->hw; 798 struct virtchnl_pf_event pfe; 799 800 /* Bail out if VF is in disabled state, neither initialized, nor active 801 * state - otherwise proceed with notifications 802 */ 803 if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) && 804 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) || 805 test_bit(ICE_VF_STATE_DIS, vf->vf_states)) 806 return; 807 808 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; 809 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; 810 ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, 811 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe), 812 NULL); 813 } 814 815 /** 816 * ice_reset_vf - Reset a particular VF 817 * @vf: pointer to the VF structure 818 * @flags: flags controlling behavior of the reset 819 * 820 * Flags: 821 * ICE_VF_RESET_VFLR - Indicates a reset is due to VFLR event 822 * ICE_VF_RESET_NOTIFY - Send VF a notification prior to reset 823 * ICE_VF_RESET_LOCK - Acquire VF cfg_lock before resetting 824 * 825 * Returns 0 if the VF is currently in reset, if resets are disabled, or if 826 * the VF resets successfully. Returns an error code if the VF fails to 827 * rebuild. 828 */ 829 int ice_reset_vf(struct ice_vf *vf, u32 flags) 830 { 831 struct ice_pf *pf = vf->pf; 832 struct ice_vsi *vsi; 833 struct device *dev; 834 int err = 0; 835 bool rsd; 836 837 dev = ice_pf_to_dev(pf); 838 839 if (flags & ICE_VF_RESET_NOTIFY) 840 ice_notify_vf_reset(vf); 841 842 if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) { 843 dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n", 844 vf->vf_id); 845 return 0; 846 } 847 848 if (flags & ICE_VF_RESET_LOCK) 849 mutex_lock(&vf->cfg_lock); 850 else 851 lockdep_assert_held(&vf->cfg_lock); 852 853 if (ice_is_vf_disabled(vf)) { 854 vsi = ice_get_vf_vsi(vf); 855 if (!vsi) { 856 dev_dbg(dev, "VF is already removed\n"); 857 err = -EINVAL; 858 goto out_unlock; 859 } 860 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id); 861 862 if (ice_vsi_is_rx_queue_active(vsi)) 863 ice_vsi_stop_all_rx_rings(vsi); 864 865 dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n", 866 vf->vf_id); 867 goto out_unlock; 868 } 869 870 /* Set VF disable bit state here, before triggering reset */ 871 set_bit(ICE_VF_STATE_DIS, vf->vf_states); 872 ice_trigger_vf_reset(vf, flags & ICE_VF_RESET_VFLR, false); 873 874 vsi = ice_get_vf_vsi(vf); 875 if (WARN_ON(!vsi)) { 876 err = -EIO; 877 goto out_unlock; 878 } 879 880 ice_dis_vf_qs(vf); 881 882 /* Call Disable LAN Tx queue AQ whether or not queues are 883 * enabled. This is needed for successful completion of VFR. 884 */ 885 ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL, 886 NULL, vf->vf_ops->reset_type, vf->vf_id, NULL); 887 888 /* poll VPGEN_VFRSTAT reg to make sure 889 * that reset is complete 890 */ 891 rsd = vf->vf_ops->poll_reset_status(vf); 892 893 /* Display a warning if VF didn't manage to reset in time, but need to 894 * continue on with the operation. 895 */ 896 if (!rsd) 897 dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id); 898 899 vf->driver_caps = 0; 900 ice_vc_set_default_allowlist(vf); 901 902 /* disable promiscuous modes in case they were enabled 903 * ignore any error if disabling process failed 904 */ 905 ice_vf_clear_all_promisc_modes(vf, vsi); 906 907 ice_vf_fdir_exit(vf); 908 ice_vf_fdir_init(vf); 909 /* clean VF control VSI when resetting VF since it should be setup 910 * only when VF creates its first FDIR rule. 911 */ 912 if (vf->ctrl_vsi_idx != ICE_NO_VSI) 913 ice_vf_ctrl_vsi_release(vf); 914 915 ice_vf_pre_vsi_rebuild(vf); 916 917 if (ice_vf_recreate_vsi(vf)) { 918 dev_err(dev, "Failed to release and setup the VF%u's VSI\n", 919 vf->vf_id); 920 err = -EFAULT; 921 goto out_unlock; 922 } 923 924 ice_vf_post_vsi_rebuild(vf); 925 vsi = ice_get_vf_vsi(vf); 926 if (WARN_ON(!vsi)) { 927 err = -EINVAL; 928 goto out_unlock; 929 } 930 931 ice_eswitch_update_repr(vsi); 932 933 /* if the VF has been reset allow it to come up again */ 934 ice_mbx_clear_malvf(&vf->mbx_info); 935 936 out_unlock: 937 if (flags & ICE_VF_RESET_LOCK) 938 mutex_unlock(&vf->cfg_lock); 939 940 return err; 941 } 942 943 /** 944 * ice_set_vf_state_dis - Set VF state to disabled 945 * @vf: pointer to the VF structure 946 */ 947 void ice_set_vf_state_dis(struct ice_vf *vf) 948 { 949 ice_set_vf_state_qs_dis(vf); 950 vf->vf_ops->clear_reset_state(vf); 951 } 952 953 /* Private functions only accessed from other virtualization files */ 954 955 /** 956 * ice_initialize_vf_entry - Initialize a VF entry 957 * @vf: pointer to the VF structure 958 */ 959 void ice_initialize_vf_entry(struct ice_vf *vf) 960 { 961 struct ice_pf *pf = vf->pf; 962 struct ice_vfs *vfs; 963 964 vfs = &pf->vfs; 965 966 /* assign default capabilities */ 967 vf->spoofchk = true; 968 vf->num_vf_qs = vfs->num_qps_per; 969 ice_vc_set_default_allowlist(vf); 970 ice_virtchnl_set_dflt_ops(vf); 971 972 /* ctrl_vsi_idx will be set to a valid value only when iAVF 973 * creates its first fdir rule. 974 */ 975 ice_vf_ctrl_invalidate_vsi(vf); 976 ice_vf_fdir_init(vf); 977 978 /* Initialize mailbox info for this VF */ 979 ice_mbx_init_vf_info(&pf->hw, &vf->mbx_info); 980 981 mutex_init(&vf->cfg_lock); 982 } 983 984 /** 985 * ice_dis_vf_qs - Disable the VF queues 986 * @vf: pointer to the VF structure 987 */ 988 void ice_dis_vf_qs(struct ice_vf *vf) 989 { 990 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 991 992 if (WARN_ON(!vsi)) 993 return; 994 995 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id); 996 ice_vsi_stop_all_rx_rings(vsi); 997 ice_set_vf_state_qs_dis(vf); 998 } 999 1000 /** 1001 * ice_err_to_virt_err - translate errors for VF return code 1002 * @err: error return code 1003 */ 1004 enum virtchnl_status_code ice_err_to_virt_err(int err) 1005 { 1006 switch (err) { 1007 case 0: 1008 return VIRTCHNL_STATUS_SUCCESS; 1009 case -EINVAL: 1010 case -ENODEV: 1011 return VIRTCHNL_STATUS_ERR_PARAM; 1012 case -ENOMEM: 1013 return VIRTCHNL_STATUS_ERR_NO_MEMORY; 1014 case -EALREADY: 1015 case -EBUSY: 1016 case -EIO: 1017 case -ENOSPC: 1018 return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; 1019 default: 1020 return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; 1021 } 1022 } 1023 1024 /** 1025 * ice_check_vf_init - helper to check if VF init complete 1026 * @vf: the pointer to the VF to check 1027 */ 1028 int ice_check_vf_init(struct ice_vf *vf) 1029 { 1030 struct ice_pf *pf = vf->pf; 1031 1032 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { 1033 dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n", 1034 vf->vf_id); 1035 return -EBUSY; 1036 } 1037 return 0; 1038 } 1039 1040 /** 1041 * ice_vf_get_port_info - Get the VF's port info structure 1042 * @vf: VF used to get the port info structure for 1043 */ 1044 struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf) 1045 { 1046 return vf->pf->hw.port_info; 1047 } 1048 1049 /** 1050 * ice_cfg_mac_antispoof - Configure MAC antispoof checking behavior 1051 * @vsi: the VSI to configure 1052 * @enable: whether to enable or disable the spoof checking 1053 * 1054 * Configure a VSI to enable (or disable) spoof checking behavior. 1055 */ 1056 static int ice_cfg_mac_antispoof(struct ice_vsi *vsi, bool enable) 1057 { 1058 struct ice_vsi_ctx *ctx; 1059 int err; 1060 1061 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 1062 if (!ctx) 1063 return -ENOMEM; 1064 1065 ctx->info.sec_flags = vsi->info.sec_flags; 1066 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID); 1067 1068 if (enable) 1069 ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF; 1070 else 1071 ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF; 1072 1073 err = ice_update_vsi(&vsi->back->hw, vsi->idx, ctx, NULL); 1074 if (err) 1075 dev_err(ice_pf_to_dev(vsi->back), "Failed to configure Tx MAC anti-spoof %s for VSI %d, error %d\n", 1076 enable ? "ON" : "OFF", vsi->vsi_num, err); 1077 else 1078 vsi->info.sec_flags = ctx->info.sec_flags; 1079 1080 kfree(ctx); 1081 1082 return err; 1083 } 1084 1085 /** 1086 * ice_vsi_ena_spoofchk - enable Tx spoof checking for this VSI 1087 * @vsi: VSI to enable Tx spoof checking for 1088 */ 1089 static int ice_vsi_ena_spoofchk(struct ice_vsi *vsi) 1090 { 1091 struct ice_vsi_vlan_ops *vlan_ops; 1092 int err = 0; 1093 1094 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 1095 1096 /* Allow VF with VLAN 0 only to send all tagged traffic */ 1097 if (vsi->type != ICE_VSI_VF || ice_vsi_has_non_zero_vlans(vsi)) { 1098 err = vlan_ops->ena_tx_filtering(vsi); 1099 if (err) 1100 return err; 1101 } 1102 1103 return ice_cfg_mac_antispoof(vsi, true); 1104 } 1105 1106 /** 1107 * ice_vsi_dis_spoofchk - disable Tx spoof checking for this VSI 1108 * @vsi: VSI to disable Tx spoof checking for 1109 */ 1110 static int ice_vsi_dis_spoofchk(struct ice_vsi *vsi) 1111 { 1112 struct ice_vsi_vlan_ops *vlan_ops; 1113 int err; 1114 1115 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 1116 1117 err = vlan_ops->dis_tx_filtering(vsi); 1118 if (err) 1119 return err; 1120 1121 return ice_cfg_mac_antispoof(vsi, false); 1122 } 1123 1124 /** 1125 * ice_vsi_apply_spoofchk - Apply Tx spoof checking setting to a VSI 1126 * @vsi: VSI associated to the VF 1127 * @enable: whether to enable or disable the spoof checking 1128 */ 1129 int ice_vsi_apply_spoofchk(struct ice_vsi *vsi, bool enable) 1130 { 1131 int err; 1132 1133 if (enable) 1134 err = ice_vsi_ena_spoofchk(vsi); 1135 else 1136 err = ice_vsi_dis_spoofchk(vsi); 1137 1138 return err; 1139 } 1140 1141 /** 1142 * ice_is_vf_trusted 1143 * @vf: pointer to the VF info 1144 */ 1145 bool ice_is_vf_trusted(struct ice_vf *vf) 1146 { 1147 return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 1148 } 1149 1150 /** 1151 * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled 1152 * @vf: the VF to check 1153 * 1154 * Returns true if the VF has no Rx and no Tx queues enabled and returns false 1155 * otherwise 1156 */ 1157 bool ice_vf_has_no_qs_ena(struct ice_vf *vf) 1158 { 1159 return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) && 1160 !bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF)); 1161 } 1162 1163 /** 1164 * ice_is_vf_link_up - check if the VF's link is up 1165 * @vf: VF to check if link is up 1166 */ 1167 bool ice_is_vf_link_up(struct ice_vf *vf) 1168 { 1169 struct ice_port_info *pi = ice_vf_get_port_info(vf); 1170 1171 if (ice_check_vf_init(vf)) 1172 return false; 1173 1174 if (ice_vf_has_no_qs_ena(vf)) 1175 return false; 1176 else if (vf->link_forced) 1177 return vf->link_up; 1178 else 1179 return pi->phy.link_info.link_info & 1180 ICE_AQ_LINK_UP; 1181 } 1182 1183 /** 1184 * ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove VSI access 1185 * @vf: VF that control VSI is being invalidated on 1186 */ 1187 void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf) 1188 { 1189 vf->ctrl_vsi_idx = ICE_NO_VSI; 1190 } 1191 1192 /** 1193 * ice_vf_ctrl_vsi_release - invalidate the VF's control VSI after freeing it 1194 * @vf: VF that control VSI is being released on 1195 */ 1196 void ice_vf_ctrl_vsi_release(struct ice_vf *vf) 1197 { 1198 ice_vsi_release(vf->pf->vsi[vf->ctrl_vsi_idx]); 1199 ice_vf_ctrl_invalidate_vsi(vf); 1200 } 1201 1202 /** 1203 * ice_vf_ctrl_vsi_setup - Set up a VF control VSI 1204 * @vf: VF to setup control VSI for 1205 * 1206 * Returns pointer to the successfully allocated VSI struct on success, 1207 * otherwise returns NULL on failure. 1208 */ 1209 struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf) 1210 { 1211 struct ice_vsi_cfg_params params = {}; 1212 struct ice_pf *pf = vf->pf; 1213 struct ice_vsi *vsi; 1214 1215 params.type = ICE_VSI_CTRL; 1216 params.pi = ice_vf_get_port_info(vf); 1217 params.vf = vf; 1218 params.flags = ICE_VSI_FLAG_INIT; 1219 1220 vsi = ice_vsi_setup(pf, ¶ms); 1221 if (!vsi) { 1222 dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n"); 1223 ice_vf_ctrl_invalidate_vsi(vf); 1224 } 1225 1226 return vsi; 1227 } 1228 1229 /** 1230 * ice_vf_init_host_cfg - Initialize host admin configuration 1231 * @vf: VF to initialize 1232 * @vsi: the VSI created at initialization 1233 * 1234 * Initialize the VF host configuration. Called during VF creation to setup 1235 * VLAN 0, add the VF VSI broadcast filter, and setup spoof checking. It 1236 * should only be called during VF creation. 1237 */ 1238 int ice_vf_init_host_cfg(struct ice_vf *vf, struct ice_vsi *vsi) 1239 { 1240 struct ice_vsi_vlan_ops *vlan_ops; 1241 struct ice_pf *pf = vf->pf; 1242 u8 broadcast[ETH_ALEN]; 1243 struct device *dev; 1244 int err; 1245 1246 dev = ice_pf_to_dev(pf); 1247 1248 err = ice_vsi_add_vlan_zero(vsi); 1249 if (err) { 1250 dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n", 1251 vf->vf_id); 1252 return err; 1253 } 1254 1255 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 1256 err = vlan_ops->ena_rx_filtering(vsi); 1257 if (err) { 1258 dev_warn(dev, "Failed to enable Rx VLAN filtering for VF %d\n", 1259 vf->vf_id); 1260 return err; 1261 } 1262 1263 eth_broadcast_addr(broadcast); 1264 err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI); 1265 if (err) { 1266 dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %d\n", 1267 vf->vf_id, err); 1268 return err; 1269 } 1270 1271 vf->num_mac = 1; 1272 1273 err = ice_vsi_apply_spoofchk(vsi, vf->spoofchk); 1274 if (err) { 1275 dev_warn(dev, "Failed to initialize spoofchk setting for VF %d\n", 1276 vf->vf_id); 1277 return err; 1278 } 1279 1280 return 0; 1281 } 1282 1283 /** 1284 * ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access 1285 * @vf: VF to remove access to VSI for 1286 */ 1287 void ice_vf_invalidate_vsi(struct ice_vf *vf) 1288 { 1289 vf->lan_vsi_idx = ICE_NO_VSI; 1290 vf->lan_vsi_num = ICE_NO_VSI; 1291 } 1292 1293 /** 1294 * ice_vf_vsi_release - Release the VF VSI and invalidate indexes 1295 * @vf: pointer to the VF structure 1296 * 1297 * Release the VF associated with this VSI and then invalidate the VSI 1298 * indexes. 1299 */ 1300 void ice_vf_vsi_release(struct ice_vf *vf) 1301 { 1302 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 1303 1304 if (WARN_ON(!vsi)) 1305 return; 1306 1307 ice_vsi_release(vsi); 1308 ice_vf_invalidate_vsi(vf); 1309 } 1310 1311 /** 1312 * ice_get_vf_ctrl_vsi - Get first VF control VSI pointer 1313 * @pf: the PF private structure 1314 * @vsi: pointer to the VSI 1315 * 1316 * Return first found VF control VSI other than the vsi 1317 * passed by parameter. This function is used to determine 1318 * whether new resources have to be allocated for control VSI 1319 * or they can be shared with existing one. 1320 * 1321 * Return found VF control VSI pointer other itself. Return 1322 * NULL Otherwise. 1323 * 1324 */ 1325 struct ice_vsi *ice_get_vf_ctrl_vsi(struct ice_pf *pf, struct ice_vsi *vsi) 1326 { 1327 struct ice_vsi *ctrl_vsi = NULL; 1328 struct ice_vf *vf; 1329 unsigned int bkt; 1330 1331 rcu_read_lock(); 1332 ice_for_each_vf_rcu(pf, bkt, vf) { 1333 if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) { 1334 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx]; 1335 break; 1336 } 1337 } 1338 1339 rcu_read_unlock(); 1340 return ctrl_vsi; 1341 } 1342