1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2022, Intel Corporation. */ 3 4 #include "ice_vf_lib_private.h" 5 #include "ice.h" 6 #include "ice_lib.h" 7 #include "ice_fltr.h" 8 #include "ice_virtchnl_allowlist.h" 9 10 /* Public functions which may be accessed by all driver files */ 11 12 /** 13 * ice_get_vf_by_id - Get pointer to VF by ID 14 * @pf: the PF private structure 15 * @vf_id: the VF ID to locate 16 * 17 * Locate and return a pointer to the VF structure associated with a given ID. 18 * Returns NULL if the ID does not have a valid VF structure associated with 19 * it. 20 * 21 * This function takes a reference to the VF, which must be released by 22 * calling ice_put_vf() once the caller is finished accessing the VF structure 23 * returned. 24 */ 25 struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id) 26 { 27 struct ice_vf *vf; 28 29 rcu_read_lock(); 30 hash_for_each_possible_rcu(pf->vfs.table, vf, entry, vf_id) { 31 if (vf->vf_id == vf_id) { 32 struct ice_vf *found; 33 34 if (kref_get_unless_zero(&vf->refcnt)) 35 found = vf; 36 else 37 found = NULL; 38 39 rcu_read_unlock(); 40 return found; 41 } 42 } 43 rcu_read_unlock(); 44 45 return NULL; 46 } 47 48 /** 49 * ice_release_vf - Release VF associated with a refcount 50 * @ref: the kref decremented to zero 51 * 52 * Callback function for kref_put to release a VF once its reference count has 53 * hit zero. 54 */ 55 static void ice_release_vf(struct kref *ref) 56 { 57 struct ice_vf *vf = container_of(ref, struct ice_vf, refcnt); 58 59 pci_dev_put(vf->vfdev); 60 61 vf->vf_ops->free(vf); 62 } 63 64 /** 65 * ice_put_vf - Release a reference to a VF 66 * @vf: the VF structure to decrease reference count on 67 * 68 * Decrease the reference count for a VF, and free the entry if it is no 69 * longer in use. 70 * 71 * This must be called after ice_get_vf_by_id() once the reference to the VF 72 * structure is no longer used. Otherwise, the VF structure will never be 73 * freed. 74 */ 75 void ice_put_vf(struct ice_vf *vf) 76 { 77 kref_put(&vf->refcnt, ice_release_vf); 78 } 79 80 /** 81 * ice_has_vfs - Return true if the PF has any associated VFs 82 * @pf: the PF private structure 83 * 84 * Return whether or not the PF has any allocated VFs. 85 * 86 * Note that this function only guarantees that there are no VFs at the point 87 * of calling it. It does not guarantee that no more VFs will be added. 88 */ 89 bool ice_has_vfs(struct ice_pf *pf) 90 { 91 /* A simple check that the hash table is not empty does not require 92 * the mutex or rcu_read_lock. 93 */ 94 return !hash_empty(pf->vfs.table); 95 } 96 97 /** 98 * ice_get_num_vfs - Get number of allocated VFs 99 * @pf: the PF private structure 100 * 101 * Return the total number of allocated VFs. NOTE: VF IDs are not guaranteed 102 * to be contiguous. Do not assume that a VF ID is guaranteed to be less than 103 * the output of this function. 104 */ 105 u16 ice_get_num_vfs(struct ice_pf *pf) 106 { 107 struct ice_vf *vf; 108 unsigned int bkt; 109 u16 num_vfs = 0; 110 111 rcu_read_lock(); 112 ice_for_each_vf_rcu(pf, bkt, vf) 113 num_vfs++; 114 rcu_read_unlock(); 115 116 return num_vfs; 117 } 118 119 /** 120 * ice_get_vf_vsi - get VF's VSI based on the stored index 121 * @vf: VF used to get VSI 122 */ 123 struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf) 124 { 125 if (vf->lan_vsi_idx == ICE_NO_VSI) 126 return NULL; 127 128 return vf->pf->vsi[vf->lan_vsi_idx]; 129 } 130 131 /** 132 * ice_is_vf_disabled 133 * @vf: pointer to the VF info 134 * 135 * If the PF has been disabled, there is no need resetting VF until PF is 136 * active again. Similarly, if the VF has been disabled, this means something 137 * else is resetting the VF, so we shouldn't continue. 138 * 139 * Returns true if the caller should consider the VF as disabled whether 140 * because that single VF is explicitly disabled or because the PF is 141 * currently disabled. 142 */ 143 bool ice_is_vf_disabled(struct ice_vf *vf) 144 { 145 struct ice_pf *pf = vf->pf; 146 147 return (test_bit(ICE_VF_DIS, pf->state) || 148 test_bit(ICE_VF_STATE_DIS, vf->vf_states)); 149 } 150 151 /** 152 * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset 153 * @vf: The VF being resseting 154 * 155 * The max poll time is about ~800ms, which is about the maximum time it takes 156 * for a VF to be reset and/or a VF driver to be removed. 157 */ 158 static void ice_wait_on_vf_reset(struct ice_vf *vf) 159 { 160 int i; 161 162 for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) { 163 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) 164 break; 165 msleep(ICE_MAX_VF_RESET_SLEEP_MS); 166 } 167 } 168 169 /** 170 * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried 171 * @vf: VF to check if it's ready to be configured/queried 172 * 173 * The purpose of this function is to make sure the VF is not in reset, not 174 * disabled, and initialized so it can be configured and/or queried by a host 175 * administrator. 176 */ 177 int ice_check_vf_ready_for_cfg(struct ice_vf *vf) 178 { 179 ice_wait_on_vf_reset(vf); 180 181 if (ice_is_vf_disabled(vf)) 182 return -EINVAL; 183 184 if (ice_check_vf_init(vf)) 185 return -EBUSY; 186 187 return 0; 188 } 189 190 /** 191 * ice_trigger_vf_reset - Reset a VF on HW 192 * @vf: pointer to the VF structure 193 * @is_vflr: true if VFLR was issued, false if not 194 * @is_pfr: true if the reset was triggered due to a previous PFR 195 * 196 * Trigger hardware to start a reset for a particular VF. Expects the caller 197 * to wait the proper amount of time to allow hardware to reset the VF before 198 * it cleans up and restores VF functionality. 199 */ 200 static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr) 201 { 202 /* Inform VF that it is no longer active, as a warning */ 203 clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states); 204 205 /* Disable VF's configuration API during reset. The flag is re-enabled 206 * when it's safe again to access VF's VSI. 207 */ 208 clear_bit(ICE_VF_STATE_INIT, vf->vf_states); 209 210 /* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver 211 * needs to clear them in the case of VFR/VFLR. If this is done for 212 * PFR, it can mess up VF resets because the VF driver may already 213 * have started cleanup by the time we get here. 214 */ 215 if (!is_pfr) 216 vf->vf_ops->clear_mbx_register(vf); 217 218 vf->vf_ops->trigger_reset_register(vf, is_vflr); 219 } 220 221 static void ice_vf_clear_counters(struct ice_vf *vf) 222 { 223 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 224 225 if (vsi) 226 vsi->num_vlan = 0; 227 228 vf->num_mac = 0; 229 vf->num_mac_lldp = 0; 230 memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events)); 231 memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events)); 232 } 233 234 /** 235 * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild 236 * @vf: VF to perform pre VSI rebuild tasks 237 * 238 * These tasks are items that don't need to be amortized since they are most 239 * likely called in a for loop with all VF(s) in the reset_all_vfs() case. 240 */ 241 static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf) 242 { 243 /* Close any IRQ mapping now */ 244 if (vf->vf_ops->irq_close) 245 vf->vf_ops->irq_close(vf); 246 247 ice_vf_clear_counters(vf); 248 vf->vf_ops->clear_reset_trigger(vf); 249 } 250 251 /** 252 * ice_vf_reconfig_vsi - Reconfigure a VF VSI with the device 253 * @vf: VF to reconfigure the VSI for 254 * 255 * This is called when a single VF is being reset (i.e. VVF, VFLR, host VF 256 * configuration change, etc). 257 * 258 * It brings the VSI down and then reconfigures it with the hardware. 259 */ 260 static int ice_vf_reconfig_vsi(struct ice_vf *vf) 261 { 262 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 263 struct ice_pf *pf = vf->pf; 264 int err; 265 266 if (WARN_ON(!vsi)) 267 return -EINVAL; 268 269 vsi->flags = ICE_VSI_FLAG_NO_INIT; 270 271 ice_vsi_decfg(vsi); 272 ice_fltr_remove_all(vsi); 273 274 err = ice_vsi_cfg(vsi); 275 if (err) { 276 dev_err(ice_pf_to_dev(pf), 277 "Failed to reconfigure the VF%u's VSI, error %d\n", 278 vf->vf_id, err); 279 return err; 280 } 281 282 return 0; 283 } 284 285 /** 286 * ice_vf_rebuild_vsi - rebuild the VF's VSI 287 * @vf: VF to rebuild the VSI for 288 * 289 * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the 290 * host, PFR, CORER, etc.). 291 * 292 * It reprograms the VSI configuration back into hardware. 293 */ 294 static int ice_vf_rebuild_vsi(struct ice_vf *vf) 295 { 296 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 297 struct ice_pf *pf = vf->pf; 298 299 if (WARN_ON(!vsi)) 300 return -EINVAL; 301 302 if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT)) { 303 dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n", 304 vf->vf_id); 305 return -EIO; 306 } 307 /* vsi->idx will remain the same in this case so don't update 308 * vf->lan_vsi_idx 309 */ 310 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); 311 312 return 0; 313 } 314 315 /** 316 * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN 317 * @vf: VF to add MAC filters for 318 * @vsi: Pointer to VSI 319 * 320 * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver 321 * always re-adds either a VLAN 0 or port VLAN based filter after reset. 322 */ 323 static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi) 324 { 325 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 326 struct device *dev = ice_pf_to_dev(vf->pf); 327 int err; 328 329 if (ice_vf_is_port_vlan_ena(vf)) { 330 err = vlan_ops->set_port_vlan(vsi, &vf->port_vlan_info); 331 if (err) { 332 dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n", 333 vf->vf_id, err); 334 return err; 335 } 336 337 err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info); 338 } else { 339 /* clear possible previous port vlan config */ 340 err = ice_vsi_clear_port_vlan(vsi); 341 if (err) { 342 dev_err(dev, "failed to clear port VLAN via VSI parameters for VF %u, error %d\n", 343 vf->vf_id, err); 344 return err; 345 } 346 err = ice_vsi_add_vlan_zero(vsi); 347 } 348 349 if (err) { 350 dev_err(dev, "failed to add VLAN %u filter for VF %u during VF rebuild, error %d\n", 351 ice_vf_is_port_vlan_ena(vf) ? 352 ice_vf_get_port_vlan_id(vf) : 0, vf->vf_id, err); 353 return err; 354 } 355 356 err = vlan_ops->ena_rx_filtering(vsi); 357 if (err) 358 dev_warn(dev, "failed to enable Rx VLAN filtering for VF %d VSI %d during VF rebuild, error %d\n", 359 vf->vf_id, vsi->idx, err); 360 361 return 0; 362 } 363 364 /** 365 * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration 366 * @vf: VF to re-apply the configuration for 367 * 368 * Called after a VF VSI has been re-added/rebuild during reset. The PF driver 369 * needs to re-apply the host configured Tx rate limiting configuration. 370 */ 371 static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf) 372 { 373 struct device *dev = ice_pf_to_dev(vf->pf); 374 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 375 int err; 376 377 if (WARN_ON(!vsi)) 378 return -EINVAL; 379 380 if (vf->min_tx_rate) { 381 err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000); 382 if (err) { 383 dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n", 384 vf->min_tx_rate, vf->vf_id, err); 385 return err; 386 } 387 } 388 389 if (vf->max_tx_rate) { 390 err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000); 391 if (err) { 392 dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n", 393 vf->max_tx_rate, vf->vf_id, err); 394 return err; 395 } 396 } 397 398 return 0; 399 } 400 401 /** 402 * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value 403 * @vf: VF to configure trust setting for 404 */ 405 static void ice_vf_set_host_trust_cfg(struct ice_vf *vf) 406 { 407 assign_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps, vf->trusted); 408 } 409 410 /** 411 * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA 412 * @vf: VF to add MAC filters for 413 * 414 * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver 415 * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset. 416 */ 417 static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf) 418 { 419 struct device *dev = ice_pf_to_dev(vf->pf); 420 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 421 u8 broadcast[ETH_ALEN]; 422 int status; 423 424 if (WARN_ON(!vsi)) 425 return -EINVAL; 426 427 if (ice_is_eswitch_mode_switchdev(vf->pf)) 428 return 0; 429 430 eth_broadcast_addr(broadcast); 431 status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI); 432 if (status) { 433 dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n", 434 vf->vf_id, status); 435 return status; 436 } 437 438 vf->num_mac++; 439 440 if (is_valid_ether_addr(vf->hw_lan_addr)) { 441 status = ice_fltr_add_mac(vsi, vf->hw_lan_addr, 442 ICE_FWD_TO_VSI); 443 if (status) { 444 dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n", 445 &vf->hw_lan_addr[0], vf->vf_id, 446 status); 447 return status; 448 } 449 vf->num_mac++; 450 451 ether_addr_copy(vf->dev_lan_addr, vf->hw_lan_addr); 452 } 453 454 return 0; 455 } 456 457 /** 458 * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config 459 * @vsi: Pointer to VSI 460 * 461 * This function moves VSI into corresponding scheduler aggregator node 462 * based on cached value of "aggregator node info" per VSI 463 */ 464 static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi) 465 { 466 struct ice_pf *pf = vsi->back; 467 struct device *dev; 468 int status; 469 470 if (!vsi->agg_node) 471 return; 472 473 dev = ice_pf_to_dev(pf); 474 if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) { 475 dev_dbg(dev, 476 "agg_id %u already has reached max_num_vsis %u\n", 477 vsi->agg_node->agg_id, vsi->agg_node->num_vsis); 478 return; 479 } 480 481 status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id, 482 vsi->idx, vsi->tc_cfg.ena_tc); 483 if (status) 484 dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node", 485 vsi->idx, vsi->agg_node->agg_id); 486 else 487 vsi->agg_node->num_vsis++; 488 } 489 490 /** 491 * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset 492 * @vf: VF to rebuild host configuration on 493 */ 494 static void ice_vf_rebuild_host_cfg(struct ice_vf *vf) 495 { 496 struct device *dev = ice_pf_to_dev(vf->pf); 497 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 498 499 if (WARN_ON(!vsi)) 500 return; 501 502 ice_vf_set_host_trust_cfg(vf); 503 504 if (ice_vf_rebuild_host_mac_cfg(vf)) 505 dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n", 506 vf->vf_id); 507 508 if (ice_vf_rebuild_host_vlan_cfg(vf, vsi)) 509 dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n", 510 vf->vf_id); 511 512 if (ice_vf_rebuild_host_tx_rate_cfg(vf)) 513 dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n", 514 vf->vf_id); 515 516 if (ice_vsi_apply_spoofchk(vsi, vf->spoofchk)) 517 dev_err(dev, "failed to rebuild spoofchk configuration for VF %d\n", 518 vf->vf_id); 519 520 /* rebuild aggregator node config for main VF VSI */ 521 ice_vf_rebuild_aggregator_node_cfg(vsi); 522 } 523 524 /** 525 * ice_set_vf_state_qs_dis - Set VF queues state to disabled 526 * @vf: pointer to the VF structure 527 */ 528 static void ice_set_vf_state_qs_dis(struct ice_vf *vf) 529 { 530 /* Clear Rx/Tx enabled queues flag */ 531 bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF); 532 bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF); 533 clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states); 534 } 535 536 /** 537 * ice_vf_set_initialized - VF is ready for VIRTCHNL communication 538 * @vf: VF to set in initialized state 539 * 540 * After this function the VF will be ready to receive/handle the 541 * VIRTCHNL_OP_GET_VF_RESOURCES message 542 */ 543 static void ice_vf_set_initialized(struct ice_vf *vf) 544 { 545 ice_set_vf_state_qs_dis(vf); 546 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states); 547 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states); 548 clear_bit(ICE_VF_STATE_DIS, vf->vf_states); 549 set_bit(ICE_VF_STATE_INIT, vf->vf_states); 550 memset(&vf->vlan_v2_caps, 0, sizeof(vf->vlan_v2_caps)); 551 } 552 553 /** 554 * ice_vf_post_vsi_rebuild - Reset tasks that occur after VSI rebuild 555 * @vf: the VF being reset 556 * 557 * Perform reset tasks which must occur after the VSI has been re-created or 558 * rebuilt during a VF reset. 559 */ 560 static void ice_vf_post_vsi_rebuild(struct ice_vf *vf) 561 { 562 ice_vf_rebuild_host_cfg(vf); 563 ice_vf_set_initialized(vf); 564 565 vf->vf_ops->post_vsi_rebuild(vf); 566 } 567 568 /** 569 * ice_is_any_vf_in_unicast_promisc - check if any VF(s) 570 * are in unicast promiscuous mode 571 * @pf: PF structure for accessing VF(s) 572 * 573 * Return false if no VF(s) are in unicast promiscuous mode, 574 * else return true 575 */ 576 bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf) 577 { 578 bool is_vf_promisc = false; 579 struct ice_vf *vf; 580 unsigned int bkt; 581 582 rcu_read_lock(); 583 ice_for_each_vf_rcu(pf, bkt, vf) { 584 /* found a VF that has promiscuous mode configured */ 585 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) { 586 is_vf_promisc = true; 587 break; 588 } 589 } 590 rcu_read_unlock(); 591 592 return is_vf_promisc; 593 } 594 595 /** 596 * ice_vf_get_promisc_masks - Calculate masks for promiscuous modes 597 * @vf: the VF pointer 598 * @vsi: the VSI to configure 599 * @ucast_m: promiscuous mask to apply to unicast 600 * @mcast_m: promiscuous mask to apply to multicast 601 * 602 * Decide which mask should be used for unicast and multicast filter, 603 * based on presence of VLANs 604 */ 605 void 606 ice_vf_get_promisc_masks(struct ice_vf *vf, struct ice_vsi *vsi, 607 u8 *ucast_m, u8 *mcast_m) 608 { 609 if (ice_vf_is_port_vlan_ena(vf) || 610 ice_vsi_has_non_zero_vlans(vsi)) { 611 *mcast_m = ICE_MCAST_VLAN_PROMISC_BITS; 612 *ucast_m = ICE_UCAST_VLAN_PROMISC_BITS; 613 } else { 614 *mcast_m = ICE_MCAST_PROMISC_BITS; 615 *ucast_m = ICE_UCAST_PROMISC_BITS; 616 } 617 } 618 619 /** 620 * ice_vf_clear_all_promisc_modes - Clear promisc/allmulticast on VF VSI 621 * @vf: the VF pointer 622 * @vsi: the VSI to configure 623 * 624 * Clear all promiscuous/allmulticast filters for a VF 625 */ 626 static int 627 ice_vf_clear_all_promisc_modes(struct ice_vf *vf, struct ice_vsi *vsi) 628 { 629 struct ice_pf *pf = vf->pf; 630 u8 ucast_m, mcast_m; 631 int ret = 0; 632 633 ice_vf_get_promisc_masks(vf, vsi, &ucast_m, &mcast_m); 634 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) { 635 if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) { 636 if (ice_is_dflt_vsi_in_use(vsi->port_info)) 637 ret = ice_clear_dflt_vsi(vsi); 638 } else { 639 ret = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m); 640 } 641 642 if (ret) { 643 dev_err(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode failed\n"); 644 } else { 645 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states); 646 dev_info(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode succeeded\n"); 647 } 648 } 649 650 if (test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) { 651 ret = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m); 652 if (ret) { 653 dev_err(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode failed\n"); 654 } else { 655 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states); 656 dev_info(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode succeeded\n"); 657 } 658 } 659 return ret; 660 } 661 662 /** 663 * ice_vf_set_vsi_promisc - Enable promiscuous mode for a VF VSI 664 * @vf: the VF to configure 665 * @vsi: the VF's VSI 666 * @promisc_m: the promiscuous mode to enable 667 */ 668 int 669 ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m) 670 { 671 struct ice_hw *hw = &vsi->back->hw; 672 int status; 673 674 if (ice_vf_is_port_vlan_ena(vf)) 675 status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 676 ice_vf_get_port_vlan_id(vf)); 677 else if (ice_vsi_has_non_zero_vlans(vsi)) 678 status = ice_fltr_set_vlan_vsi_promisc(hw, vsi, promisc_m); 679 else 680 status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 0); 681 682 if (status && status != -EEXIST) { 683 dev_err(ice_pf_to_dev(vsi->back), "enable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n", 684 vf->vf_id, status); 685 return status; 686 } 687 688 return 0; 689 } 690 691 /** 692 * ice_vf_clear_vsi_promisc - Disable promiscuous mode for a VF VSI 693 * @vf: the VF to configure 694 * @vsi: the VF's VSI 695 * @promisc_m: the promiscuous mode to disable 696 */ 697 int 698 ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m) 699 { 700 struct ice_hw *hw = &vsi->back->hw; 701 int status; 702 703 if (ice_vf_is_port_vlan_ena(vf)) 704 status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 705 ice_vf_get_port_vlan_id(vf)); 706 else if (ice_vsi_has_non_zero_vlans(vsi)) 707 status = ice_fltr_clear_vlan_vsi_promisc(hw, vsi, promisc_m); 708 else 709 status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 0); 710 711 if (status && status != -ENOENT) { 712 dev_err(ice_pf_to_dev(vsi->back), "disable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n", 713 vf->vf_id, status); 714 return status; 715 } 716 717 return 0; 718 } 719 720 /** 721 * ice_reset_vf_mbx_cnt - reset VF mailbox message count 722 * @vf: pointer to the VF structure 723 * 724 * This function clears the VF mailbox message count, and should be called on 725 * VF reset. 726 */ 727 static void ice_reset_vf_mbx_cnt(struct ice_vf *vf) 728 { 729 struct ice_pf *pf = vf->pf; 730 731 if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT)) 732 ice_mbx_vf_clear_cnt_e830(&pf->hw, vf->vf_id); 733 else 734 ice_mbx_clear_malvf(&vf->mbx_info); 735 } 736 737 /** 738 * ice_reset_all_vfs - reset all allocated VFs in one go 739 * @pf: pointer to the PF structure 740 * 741 * Reset all VFs at once, in response to a PF or other device reset. 742 * 743 * First, tell the hardware to reset each VF, then do all the waiting in one 744 * chunk, and finally finish restoring each VF after the wait. This is useful 745 * during PF routines which need to reset all VFs, as otherwise it must perform 746 * these resets in a serialized fashion. 747 */ 748 void ice_reset_all_vfs(struct ice_pf *pf) 749 { 750 struct device *dev = ice_pf_to_dev(pf); 751 struct ice_hw *hw = &pf->hw; 752 struct ice_vf *vf; 753 unsigned int bkt; 754 755 /* If we don't have any VFs, then there is nothing to reset */ 756 if (!ice_has_vfs(pf)) 757 return; 758 759 mutex_lock(&pf->vfs.table_lock); 760 761 /* clear all malicious info if the VFs are getting reset */ 762 ice_for_each_vf(pf, bkt, vf) 763 ice_reset_vf_mbx_cnt(vf); 764 765 /* If VFs have been disabled, there is no need to reset */ 766 if (test_and_set_bit(ICE_VF_DIS, pf->state)) { 767 mutex_unlock(&pf->vfs.table_lock); 768 return; 769 } 770 771 /* Begin reset on all VFs at once */ 772 ice_for_each_vf(pf, bkt, vf) 773 ice_trigger_vf_reset(vf, true, true); 774 775 /* HW requires some time to make sure it can flush the FIFO for a VF 776 * when it resets it. Now that we've triggered all of the VFs, iterate 777 * the table again and wait for each VF to complete. 778 */ 779 ice_for_each_vf(pf, bkt, vf) { 780 if (!vf->vf_ops->poll_reset_status(vf)) { 781 /* Display a warning if at least one VF didn't manage 782 * to reset in time, but continue on with the 783 * operation. 784 */ 785 dev_warn(dev, "VF %u reset check timeout\n", vf->vf_id); 786 break; 787 } 788 } 789 790 /* free VF resources to begin resetting the VSI state */ 791 ice_for_each_vf(pf, bkt, vf) { 792 mutex_lock(&vf->cfg_lock); 793 794 ice_eswitch_detach_vf(pf, vf); 795 vf->driver_caps = 0; 796 ice_vc_set_default_allowlist(vf); 797 798 ice_vf_fdir_exit(vf); 799 ice_vf_fdir_init(vf); 800 /* clean VF control VSI when resetting VFs since it should be 801 * setup only when VF creates its first FDIR rule. 802 */ 803 if (vf->ctrl_vsi_idx != ICE_NO_VSI) 804 ice_vf_ctrl_invalidate_vsi(vf); 805 806 ice_vf_pre_vsi_rebuild(vf); 807 ice_vf_rebuild_vsi(vf); 808 ice_vf_post_vsi_rebuild(vf); 809 810 ice_eswitch_attach_vf(pf, vf); 811 812 mutex_unlock(&vf->cfg_lock); 813 } 814 815 ice_flush(hw); 816 clear_bit(ICE_VF_DIS, pf->state); 817 818 mutex_unlock(&pf->vfs.table_lock); 819 } 820 821 /** 822 * ice_notify_vf_reset - Notify VF of a reset event 823 * @vf: pointer to the VF structure 824 */ 825 static void ice_notify_vf_reset(struct ice_vf *vf) 826 { 827 struct ice_hw *hw = &vf->pf->hw; 828 struct virtchnl_pf_event pfe; 829 830 /* Bail out if VF is in disabled state, neither initialized, nor active 831 * state - otherwise proceed with notifications 832 */ 833 if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) && 834 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) || 835 test_bit(ICE_VF_STATE_DIS, vf->vf_states)) 836 return; 837 838 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; 839 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; 840 ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, 841 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe), 842 NULL); 843 } 844 845 /** 846 * ice_reset_vf - Reset a particular VF 847 * @vf: pointer to the VF structure 848 * @flags: flags controlling behavior of the reset 849 * 850 * Flags: 851 * ICE_VF_RESET_VFLR - Indicates a reset is due to VFLR event 852 * ICE_VF_RESET_NOTIFY - Send VF a notification prior to reset 853 * ICE_VF_RESET_LOCK - Acquire VF cfg_lock before resetting 854 * 855 * Returns 0 if the VF is currently in reset, if resets are disabled, or if 856 * the VF resets successfully. Returns an error code if the VF fails to 857 * rebuild. 858 */ 859 int ice_reset_vf(struct ice_vf *vf, u32 flags) 860 { 861 struct ice_pf *pf = vf->pf; 862 struct ice_vsi *vsi; 863 struct device *dev; 864 int err = 0; 865 u8 act_prt; 866 bool rsd; 867 868 dev = ice_pf_to_dev(pf); 869 870 if (flags & ICE_VF_RESET_NOTIFY) 871 ice_notify_vf_reset(vf); 872 873 if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) { 874 dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n", 875 vf->vf_id); 876 return 0; 877 } 878 879 if (flags & ICE_VF_RESET_LOCK) 880 mutex_lock(&vf->cfg_lock); 881 else 882 lockdep_assert_held(&vf->cfg_lock); 883 884 mutex_lock(&pf->lag_mutex); 885 act_prt = ice_lag_prepare_vf_reset(pf->lag); 886 887 if (ice_is_vf_disabled(vf)) { 888 vsi = ice_get_vf_vsi(vf); 889 if (!vsi) { 890 dev_dbg(dev, "VF is already removed\n"); 891 err = -EINVAL; 892 goto out_unlock; 893 } 894 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id); 895 896 if (ice_vsi_is_rx_queue_active(vsi)) 897 ice_vsi_stop_all_rx_rings(vsi); 898 899 dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n", 900 vf->vf_id); 901 goto out_unlock; 902 } 903 904 /* Set VF disable bit state here, before triggering reset */ 905 set_bit(ICE_VF_STATE_DIS, vf->vf_states); 906 ice_trigger_vf_reset(vf, flags & ICE_VF_RESET_VFLR, false); 907 908 vsi = ice_get_vf_vsi(vf); 909 if (WARN_ON(!vsi)) { 910 err = -EIO; 911 goto out_unlock; 912 } 913 914 ice_dis_vf_qs(vf); 915 916 /* Call Disable LAN Tx queue AQ whether or not queues are 917 * enabled. This is needed for successful completion of VFR. 918 */ 919 ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL, 920 NULL, vf->vf_ops->reset_type, vf->vf_id, NULL); 921 922 /* poll VPGEN_VFRSTAT reg to make sure 923 * that reset is complete 924 */ 925 rsd = vf->vf_ops->poll_reset_status(vf); 926 927 /* Display a warning if VF didn't manage to reset in time, but need to 928 * continue on with the operation. 929 */ 930 if (!rsd) 931 dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id); 932 933 vf->driver_caps = 0; 934 ice_vc_set_default_allowlist(vf); 935 936 /* disable promiscuous modes in case they were enabled 937 * ignore any error if disabling process failed 938 */ 939 ice_vf_clear_all_promisc_modes(vf, vsi); 940 941 ice_vf_fdir_exit(vf); 942 ice_vf_fdir_init(vf); 943 /* clean VF control VSI when resetting VF since it should be setup 944 * only when VF creates its first FDIR rule. 945 */ 946 if (vf->ctrl_vsi_idx != ICE_NO_VSI) 947 ice_vf_ctrl_vsi_release(vf); 948 949 ice_vf_pre_vsi_rebuild(vf); 950 951 if (ice_vf_reconfig_vsi(vf)) { 952 dev_err(dev, "Failed to release and setup the VF%u's VSI\n", 953 vf->vf_id); 954 err = -EFAULT; 955 goto out_unlock; 956 } 957 958 ice_vf_post_vsi_rebuild(vf); 959 vsi = ice_get_vf_vsi(vf); 960 if (WARN_ON(!vsi)) { 961 err = -EINVAL; 962 goto out_unlock; 963 } 964 965 ice_eswitch_update_repr(&vf->repr_id, vsi); 966 967 /* if the VF has been reset allow it to come up again */ 968 ice_reset_vf_mbx_cnt(vf); 969 970 out_unlock: 971 ice_lag_complete_vf_reset(pf->lag, act_prt); 972 mutex_unlock(&pf->lag_mutex); 973 974 if (flags & ICE_VF_RESET_LOCK) 975 mutex_unlock(&vf->cfg_lock); 976 977 return err; 978 } 979 980 /** 981 * ice_set_vf_state_dis - Set VF state to disabled 982 * @vf: pointer to the VF structure 983 */ 984 void ice_set_vf_state_dis(struct ice_vf *vf) 985 { 986 ice_set_vf_state_qs_dis(vf); 987 vf->vf_ops->clear_reset_state(vf); 988 } 989 990 /* Private functions only accessed from other virtualization files */ 991 992 /** 993 * ice_initialize_vf_entry - Initialize a VF entry 994 * @vf: pointer to the VF structure 995 */ 996 void ice_initialize_vf_entry(struct ice_vf *vf) 997 { 998 struct ice_pf *pf = vf->pf; 999 struct ice_vfs *vfs; 1000 1001 vfs = &pf->vfs; 1002 1003 /* assign default capabilities */ 1004 vf->spoofchk = true; 1005 ice_vc_set_default_allowlist(vf); 1006 ice_virtchnl_set_dflt_ops(vf); 1007 1008 /* set default number of MSI-X */ 1009 vf->num_msix = vfs->num_msix_per; 1010 vf->num_vf_qs = vfs->num_qps_per; 1011 1012 /* set default RSS hash configuration */ 1013 vf->rss_hashcfg = ICE_DEFAULT_RSS_HASHCFG; 1014 1015 /* ctrl_vsi_idx will be set to a valid value only when iAVF 1016 * creates its first fdir rule. 1017 */ 1018 ice_vf_ctrl_invalidate_vsi(vf); 1019 ice_vf_fdir_init(vf); 1020 1021 /* Initialize mailbox info for this VF */ 1022 if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT)) 1023 ice_mbx_vf_clear_cnt_e830(&pf->hw, vf->vf_id); 1024 else 1025 ice_mbx_init_vf_info(&pf->hw, &vf->mbx_info); 1026 1027 mutex_init(&vf->cfg_lock); 1028 } 1029 1030 void ice_deinitialize_vf_entry(struct ice_vf *vf) 1031 { 1032 struct ice_pf *pf = vf->pf; 1033 1034 if (!ice_is_feature_supported(pf, ICE_F_MBX_LIMIT)) 1035 list_del(&vf->mbx_info.list_entry); 1036 } 1037 1038 /** 1039 * ice_dis_vf_qs - Disable the VF queues 1040 * @vf: pointer to the VF structure 1041 */ 1042 void ice_dis_vf_qs(struct ice_vf *vf) 1043 { 1044 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 1045 1046 if (WARN_ON(!vsi)) 1047 return; 1048 1049 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id); 1050 ice_vsi_stop_all_rx_rings(vsi); 1051 ice_set_vf_state_qs_dis(vf); 1052 } 1053 1054 /** 1055 * ice_err_to_virt_err - translate errors for VF return code 1056 * @err: error return code 1057 */ 1058 enum virtchnl_status_code ice_err_to_virt_err(int err) 1059 { 1060 switch (err) { 1061 case 0: 1062 return VIRTCHNL_STATUS_SUCCESS; 1063 case -EINVAL: 1064 case -ENODEV: 1065 return VIRTCHNL_STATUS_ERR_PARAM; 1066 case -ENOMEM: 1067 return VIRTCHNL_STATUS_ERR_NO_MEMORY; 1068 case -EALREADY: 1069 case -EBUSY: 1070 case -EIO: 1071 case -ENOSPC: 1072 return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; 1073 default: 1074 return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; 1075 } 1076 } 1077 1078 /** 1079 * ice_check_vf_init - helper to check if VF init complete 1080 * @vf: the pointer to the VF to check 1081 */ 1082 int ice_check_vf_init(struct ice_vf *vf) 1083 { 1084 struct ice_pf *pf = vf->pf; 1085 1086 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { 1087 dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n", 1088 vf->vf_id); 1089 return -EBUSY; 1090 } 1091 return 0; 1092 } 1093 1094 /** 1095 * ice_vf_get_port_info - Get the VF's port info structure 1096 * @vf: VF used to get the port info structure for 1097 */ 1098 struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf) 1099 { 1100 return vf->pf->hw.port_info; 1101 } 1102 1103 /** 1104 * ice_cfg_mac_antispoof - Configure MAC antispoof checking behavior 1105 * @vsi: the VSI to configure 1106 * @enable: whether to enable or disable the spoof checking 1107 * 1108 * Configure a VSI to enable (or disable) spoof checking behavior. 1109 */ 1110 static int ice_cfg_mac_antispoof(struct ice_vsi *vsi, bool enable) 1111 { 1112 struct ice_vsi_ctx *ctx; 1113 int err; 1114 1115 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 1116 if (!ctx) 1117 return -ENOMEM; 1118 1119 ctx->info.sec_flags = vsi->info.sec_flags; 1120 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID); 1121 1122 if (enable) 1123 ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF; 1124 else 1125 ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF; 1126 1127 err = ice_update_vsi(&vsi->back->hw, vsi->idx, ctx, NULL); 1128 if (err) 1129 dev_err(ice_pf_to_dev(vsi->back), "Failed to configure Tx MAC anti-spoof %s for VSI %d, error %d\n", 1130 enable ? "ON" : "OFF", vsi->vsi_num, err); 1131 else 1132 vsi->info.sec_flags = ctx->info.sec_flags; 1133 1134 kfree(ctx); 1135 1136 return err; 1137 } 1138 1139 /** 1140 * ice_vsi_ena_spoofchk - enable Tx spoof checking for this VSI 1141 * @vsi: VSI to enable Tx spoof checking for 1142 */ 1143 static int ice_vsi_ena_spoofchk(struct ice_vsi *vsi) 1144 { 1145 struct ice_vsi_vlan_ops *vlan_ops; 1146 int err = 0; 1147 1148 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 1149 1150 /* Allow VF with VLAN 0 only to send all tagged traffic */ 1151 if (vsi->type != ICE_VSI_VF || ice_vsi_has_non_zero_vlans(vsi)) { 1152 err = vlan_ops->ena_tx_filtering(vsi); 1153 if (err) 1154 return err; 1155 } 1156 1157 return ice_cfg_mac_antispoof(vsi, true); 1158 } 1159 1160 /** 1161 * ice_vsi_dis_spoofchk - disable Tx spoof checking for this VSI 1162 * @vsi: VSI to disable Tx spoof checking for 1163 */ 1164 static int ice_vsi_dis_spoofchk(struct ice_vsi *vsi) 1165 { 1166 struct ice_vsi_vlan_ops *vlan_ops; 1167 int err; 1168 1169 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 1170 1171 err = vlan_ops->dis_tx_filtering(vsi); 1172 if (err) 1173 return err; 1174 1175 return ice_cfg_mac_antispoof(vsi, false); 1176 } 1177 1178 /** 1179 * ice_vsi_apply_spoofchk - Apply Tx spoof checking setting to a VSI 1180 * @vsi: VSI associated to the VF 1181 * @enable: whether to enable or disable the spoof checking 1182 */ 1183 int ice_vsi_apply_spoofchk(struct ice_vsi *vsi, bool enable) 1184 { 1185 int err; 1186 1187 if (enable) 1188 err = ice_vsi_ena_spoofchk(vsi); 1189 else 1190 err = ice_vsi_dis_spoofchk(vsi); 1191 1192 return err; 1193 } 1194 1195 /** 1196 * ice_is_vf_trusted 1197 * @vf: pointer to the VF info 1198 */ 1199 bool ice_is_vf_trusted(struct ice_vf *vf) 1200 { 1201 return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 1202 } 1203 1204 /** 1205 * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled 1206 * @vf: the VF to check 1207 * 1208 * Returns true if the VF has no Rx and no Tx queues enabled and returns false 1209 * otherwise 1210 */ 1211 bool ice_vf_has_no_qs_ena(struct ice_vf *vf) 1212 { 1213 return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) && 1214 !bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF)); 1215 } 1216 1217 /** 1218 * ice_is_vf_link_up - check if the VF's link is up 1219 * @vf: VF to check if link is up 1220 */ 1221 bool ice_is_vf_link_up(struct ice_vf *vf) 1222 { 1223 struct ice_port_info *pi = ice_vf_get_port_info(vf); 1224 1225 if (ice_check_vf_init(vf)) 1226 return false; 1227 1228 if (ice_vf_has_no_qs_ena(vf)) 1229 return false; 1230 else if (vf->link_forced) 1231 return vf->link_up; 1232 else 1233 return pi->phy.link_info.link_info & 1234 ICE_AQ_LINK_UP; 1235 } 1236 1237 /** 1238 * ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove VSI access 1239 * @vf: VF that control VSI is being invalidated on 1240 */ 1241 void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf) 1242 { 1243 vf->ctrl_vsi_idx = ICE_NO_VSI; 1244 } 1245 1246 /** 1247 * ice_vf_ctrl_vsi_release - invalidate the VF's control VSI after freeing it 1248 * @vf: VF that control VSI is being released on 1249 */ 1250 void ice_vf_ctrl_vsi_release(struct ice_vf *vf) 1251 { 1252 ice_vsi_release(vf->pf->vsi[vf->ctrl_vsi_idx]); 1253 ice_vf_ctrl_invalidate_vsi(vf); 1254 } 1255 1256 /** 1257 * ice_vf_ctrl_vsi_setup - Set up a VF control VSI 1258 * @vf: VF to setup control VSI for 1259 * 1260 * Returns pointer to the successfully allocated VSI struct on success, 1261 * otherwise returns NULL on failure. 1262 */ 1263 struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf) 1264 { 1265 struct ice_vsi_cfg_params params = {}; 1266 struct ice_pf *pf = vf->pf; 1267 struct ice_vsi *vsi; 1268 1269 params.type = ICE_VSI_CTRL; 1270 params.port_info = ice_vf_get_port_info(vf); 1271 params.vf = vf; 1272 params.flags = ICE_VSI_FLAG_INIT; 1273 1274 vsi = ice_vsi_setup(pf, ¶ms); 1275 if (!vsi) { 1276 dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n"); 1277 ice_vf_ctrl_invalidate_vsi(vf); 1278 } 1279 1280 return vsi; 1281 } 1282 1283 /** 1284 * ice_vf_init_host_cfg - Initialize host admin configuration 1285 * @vf: VF to initialize 1286 * @vsi: the VSI created at initialization 1287 * 1288 * Initialize the VF host configuration. Called during VF creation to setup 1289 * VLAN 0, add the VF VSI broadcast filter, and setup spoof checking. It 1290 * should only be called during VF creation. 1291 */ 1292 int ice_vf_init_host_cfg(struct ice_vf *vf, struct ice_vsi *vsi) 1293 { 1294 struct ice_vsi_vlan_ops *vlan_ops; 1295 struct ice_pf *pf = vf->pf; 1296 u8 broadcast[ETH_ALEN]; 1297 struct device *dev; 1298 int err; 1299 1300 dev = ice_pf_to_dev(pf); 1301 1302 err = ice_vsi_add_vlan_zero(vsi); 1303 if (err) { 1304 dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n", 1305 vf->vf_id); 1306 return err; 1307 } 1308 1309 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 1310 err = vlan_ops->ena_rx_filtering(vsi); 1311 if (err) { 1312 dev_warn(dev, "Failed to enable Rx VLAN filtering for VF %d\n", 1313 vf->vf_id); 1314 return err; 1315 } 1316 1317 eth_broadcast_addr(broadcast); 1318 err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI); 1319 if (err) { 1320 dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %d\n", 1321 vf->vf_id, err); 1322 return err; 1323 } 1324 1325 vf->num_mac = 1; 1326 1327 err = ice_vsi_apply_spoofchk(vsi, vf->spoofchk); 1328 if (err) { 1329 dev_warn(dev, "Failed to initialize spoofchk setting for VF %d\n", 1330 vf->vf_id); 1331 return err; 1332 } 1333 1334 return 0; 1335 } 1336 1337 /** 1338 * ice_vf_invalidate_vsi - invalidate vsi_idx to remove VSI access 1339 * @vf: VF to remove access to VSI for 1340 */ 1341 void ice_vf_invalidate_vsi(struct ice_vf *vf) 1342 { 1343 vf->lan_vsi_idx = ICE_NO_VSI; 1344 } 1345 1346 /** 1347 * ice_vf_vsi_release - Release the VF VSI and invalidate indexes 1348 * @vf: pointer to the VF structure 1349 * 1350 * Release the VF associated with this VSI and then invalidate the VSI 1351 * indexes. 1352 */ 1353 void ice_vf_vsi_release(struct ice_vf *vf) 1354 { 1355 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 1356 1357 if (WARN_ON(!vsi)) 1358 return; 1359 1360 ice_vsi_release(vsi); 1361 ice_vf_invalidate_vsi(vf); 1362 } 1363 1364 /** 1365 * ice_get_vf_ctrl_vsi - Get first VF control VSI pointer 1366 * @pf: the PF private structure 1367 * @vsi: pointer to the VSI 1368 * 1369 * Return first found VF control VSI other than the vsi 1370 * passed by parameter. This function is used to determine 1371 * whether new resources have to be allocated for control VSI 1372 * or they can be shared with existing one. 1373 * 1374 * Return found VF control VSI pointer other itself. Return 1375 * NULL Otherwise. 1376 * 1377 */ 1378 struct ice_vsi *ice_get_vf_ctrl_vsi(struct ice_pf *pf, struct ice_vsi *vsi) 1379 { 1380 struct ice_vsi *ctrl_vsi = NULL; 1381 struct ice_vf *vf; 1382 unsigned int bkt; 1383 1384 rcu_read_lock(); 1385 ice_for_each_vf_rcu(pf, bkt, vf) { 1386 if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) { 1387 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx]; 1388 break; 1389 } 1390 } 1391 1392 rcu_read_unlock(); 1393 return ctrl_vsi; 1394 } 1395 1396 /** 1397 * ice_vf_update_mac_lldp_num - update the VF's number of LLDP addresses 1398 * @vf: a VF to add the address to 1399 * @vsi: the corresponding VSI 1400 * @incr: is the rule added or removed 1401 */ 1402 void ice_vf_update_mac_lldp_num(struct ice_vf *vf, struct ice_vsi *vsi, 1403 bool incr) 1404 { 1405 bool lldp_by_fw = test_bit(ICE_FLAG_FW_LLDP_AGENT, vsi->back->flags); 1406 bool was_ena = ice_vf_is_lldp_ena(vf) && !lldp_by_fw; 1407 bool is_ena; 1408 1409 if (WARN_ON(!vsi)) { 1410 vf->num_mac_lldp = 0; 1411 return; 1412 } 1413 1414 vf->num_mac_lldp += incr ? 1 : -1; 1415 is_ena = ice_vf_is_lldp_ena(vf) && !lldp_by_fw; 1416 1417 if (was_ena != is_ena) 1418 ice_vsi_cfg_sw_lldp(vsi, false, is_ena); 1419 } 1420