1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include "ice.h" 5 #include "ice_vf_lib_private.h" 6 #include "ice_base.h" 7 #include "ice_lib.h" 8 #include "ice_fltr.h" 9 #include "ice_dcb_lib.h" 10 #include "ice_flow.h" 11 #include "ice_eswitch.h" 12 #include "ice_virtchnl_allowlist.h" 13 #include "ice_flex_pipe.h" 14 #include "ice_vf_vsi_vlan_ops.h" 15 #include "ice_vlan.h" 16 17 /** 18 * ice_free_vf_entries - Free all VF entries from the hash table 19 * @pf: pointer to the PF structure 20 * 21 * Iterate over the VF hash table, removing and releasing all VF entries. 22 * Called during VF teardown or as cleanup during failed VF initialization. 23 */ 24 static void ice_free_vf_entries(struct ice_pf *pf) 25 { 26 struct ice_vfs *vfs = &pf->vfs; 27 struct hlist_node *tmp; 28 struct ice_vf *vf; 29 unsigned int bkt; 30 31 /* Remove all VFs from the hash table and release their main 32 * reference. Once all references to the VF are dropped, ice_put_vf() 33 * will call ice_release_vf which will remove the VF memory. 34 */ 35 lockdep_assert_held(&vfs->table_lock); 36 37 hash_for_each_safe(vfs->table, bkt, tmp, vf, entry) { 38 hash_del_rcu(&vf->entry); 39 ice_put_vf(vf); 40 } 41 } 42 43 /** 44 * ice_vf_vsi_release - invalidate the VF's VSI after freeing it 45 * @vf: invalidate this VF's VSI after freeing it 46 */ 47 static void ice_vf_vsi_release(struct ice_vf *vf) 48 { 49 ice_vsi_release(ice_get_vf_vsi(vf)); 50 ice_vf_invalidate_vsi(vf); 51 } 52 53 /** 54 * ice_free_vf_res - Free a VF's resources 55 * @vf: pointer to the VF info 56 */ 57 static void ice_free_vf_res(struct ice_vf *vf) 58 { 59 struct ice_pf *pf = vf->pf; 60 int i, last_vector_idx; 61 62 /* First, disable VF's configuration API to prevent OS from 63 * accessing the VF's VSI after it's freed or invalidated. 64 */ 65 clear_bit(ICE_VF_STATE_INIT, vf->vf_states); 66 ice_vf_fdir_exit(vf); 67 /* free VF control VSI */ 68 if (vf->ctrl_vsi_idx != ICE_NO_VSI) 69 ice_vf_ctrl_vsi_release(vf); 70 71 /* free VSI and disconnect it from the parent uplink */ 72 if (vf->lan_vsi_idx != ICE_NO_VSI) { 73 ice_vf_vsi_release(vf); 74 vf->num_mac = 0; 75 } 76 77 last_vector_idx = vf->first_vector_idx + pf->vfs.num_msix_per - 1; 78 79 /* clear VF MDD event information */ 80 memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events)); 81 memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events)); 82 83 /* Disable interrupts so that VF starts in a known state */ 84 for (i = vf->first_vector_idx; i <= last_vector_idx; i++) { 85 wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M); 86 ice_flush(&pf->hw); 87 } 88 /* reset some of the state variables keeping track of the resources */ 89 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states); 90 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states); 91 } 92 93 /** 94 * ice_dis_vf_mappings 95 * @vf: pointer to the VF structure 96 */ 97 static void ice_dis_vf_mappings(struct ice_vf *vf) 98 { 99 struct ice_pf *pf = vf->pf; 100 struct ice_vsi *vsi; 101 struct device *dev; 102 int first, last, v; 103 struct ice_hw *hw; 104 105 hw = &pf->hw; 106 vsi = ice_get_vf_vsi(vf); 107 108 dev = ice_pf_to_dev(pf); 109 wr32(hw, VPINT_ALLOC(vf->vf_id), 0); 110 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0); 111 112 first = vf->first_vector_idx; 113 last = first + pf->vfs.num_msix_per - 1; 114 for (v = first; v <= last; v++) { 115 u32 reg; 116 117 reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) & 118 GLINT_VECT2FUNC_IS_PF_M) | 119 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) & 120 GLINT_VECT2FUNC_PF_NUM_M)); 121 wr32(hw, GLINT_VECT2FUNC(v), reg); 122 } 123 124 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) 125 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0); 126 else 127 dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n"); 128 129 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) 130 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0); 131 else 132 dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n"); 133 } 134 135 /** 136 * ice_sriov_free_msix_res - Reset/free any used MSIX resources 137 * @pf: pointer to the PF structure 138 * 139 * Since no MSIX entries are taken from the pf->irq_tracker then just clear 140 * the pf->sriov_base_vector. 141 * 142 * Returns 0 on success, and -EINVAL on error. 143 */ 144 static int ice_sriov_free_msix_res(struct ice_pf *pf) 145 { 146 struct ice_res_tracker *res; 147 148 if (!pf) 149 return -EINVAL; 150 151 res = pf->irq_tracker; 152 if (!res) 153 return -EINVAL; 154 155 /* give back irq_tracker resources used */ 156 WARN_ON(pf->sriov_base_vector < res->num_entries); 157 158 pf->sriov_base_vector = 0; 159 160 return 0; 161 } 162 163 /** 164 * ice_free_vfs - Free all VFs 165 * @pf: pointer to the PF structure 166 */ 167 void ice_free_vfs(struct ice_pf *pf) 168 { 169 struct device *dev = ice_pf_to_dev(pf); 170 struct ice_vfs *vfs = &pf->vfs; 171 struct ice_hw *hw = &pf->hw; 172 struct ice_vf *vf; 173 unsigned int bkt; 174 175 if (!ice_has_vfs(pf)) 176 return; 177 178 while (test_and_set_bit(ICE_VF_DIS, pf->state)) 179 usleep_range(1000, 2000); 180 181 /* Disable IOV before freeing resources. This lets any VF drivers 182 * running in the host get themselves cleaned up before we yank 183 * the carpet out from underneath their feet. 184 */ 185 if (!pci_vfs_assigned(pf->pdev)) 186 pci_disable_sriov(pf->pdev); 187 else 188 dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n"); 189 190 mutex_lock(&vfs->table_lock); 191 192 ice_eswitch_release(pf); 193 194 ice_for_each_vf(pf, bkt, vf) { 195 mutex_lock(&vf->cfg_lock); 196 197 ice_dis_vf_qs(vf); 198 199 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { 200 /* disable VF qp mappings and set VF disable state */ 201 ice_dis_vf_mappings(vf); 202 set_bit(ICE_VF_STATE_DIS, vf->vf_states); 203 ice_free_vf_res(vf); 204 } 205 206 if (!pci_vfs_assigned(pf->pdev)) { 207 u32 reg_idx, bit_idx; 208 209 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32; 210 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32; 211 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 212 } 213 214 /* clear malicious info since the VF is getting released */ 215 if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->vfs.malvfs, 216 ICE_MAX_SRIOV_VFS, vf->vf_id)) 217 dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", 218 vf->vf_id); 219 220 mutex_unlock(&vf->cfg_lock); 221 } 222 223 if (ice_sriov_free_msix_res(pf)) 224 dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n"); 225 226 vfs->num_qps_per = 0; 227 ice_free_vf_entries(pf); 228 229 mutex_unlock(&vfs->table_lock); 230 231 clear_bit(ICE_VF_DIS, pf->state); 232 clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags); 233 } 234 235 /** 236 * ice_vf_vsi_setup - Set up a VF VSI 237 * @vf: VF to setup VSI for 238 * 239 * Returns pointer to the successfully allocated VSI struct on success, 240 * otherwise returns NULL on failure. 241 */ 242 static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf) 243 { 244 struct ice_port_info *pi = ice_vf_get_port_info(vf); 245 struct ice_pf *pf = vf->pf; 246 struct ice_vsi *vsi; 247 248 vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf, NULL); 249 250 if (!vsi) { 251 dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n"); 252 ice_vf_invalidate_vsi(vf); 253 return NULL; 254 } 255 256 vf->lan_vsi_idx = vsi->idx; 257 vf->lan_vsi_num = vsi->vsi_num; 258 259 return vsi; 260 } 261 262 /** 263 * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space 264 * @pf: pointer to PF structure 265 * @vf: pointer to VF that the first MSIX vector index is being calculated for 266 * 267 * This returns the first MSIX vector index in PF space that is used by this VF. 268 * This index is used when accessing PF relative registers such as 269 * GLINT_VECT2FUNC and GLINT_DYN_CTL. 270 * This will always be the OICR index in the AVF driver so any functionality 271 * using vf->first_vector_idx for queue configuration will have to increment by 272 * 1 to avoid meddling with the OICR index. 273 */ 274 static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf) 275 { 276 return pf->sriov_base_vector + vf->vf_id * pf->vfs.num_msix_per; 277 } 278 279 /** 280 * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware 281 * @vf: VF to enable MSIX mappings for 282 * 283 * Some of the registers need to be indexed/configured using hardware global 284 * device values and other registers need 0-based values, which represent PF 285 * based values. 286 */ 287 static void ice_ena_vf_msix_mappings(struct ice_vf *vf) 288 { 289 int device_based_first_msix, device_based_last_msix; 290 int pf_based_first_msix, pf_based_last_msix, v; 291 struct ice_pf *pf = vf->pf; 292 int device_based_vf_id; 293 struct ice_hw *hw; 294 u32 reg; 295 296 hw = &pf->hw; 297 pf_based_first_msix = vf->first_vector_idx; 298 pf_based_last_msix = (pf_based_first_msix + pf->vfs.num_msix_per) - 1; 299 300 device_based_first_msix = pf_based_first_msix + 301 pf->hw.func_caps.common_cap.msix_vector_first_id; 302 device_based_last_msix = 303 (device_based_first_msix + pf->vfs.num_msix_per) - 1; 304 device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 305 306 reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) & 307 VPINT_ALLOC_FIRST_M) | 308 ((device_based_last_msix << VPINT_ALLOC_LAST_S) & 309 VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M); 310 wr32(hw, VPINT_ALLOC(vf->vf_id), reg); 311 312 reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S) 313 & VPINT_ALLOC_PCI_FIRST_M) | 314 ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) & 315 VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M); 316 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg); 317 318 /* map the interrupts to its functions */ 319 for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) { 320 reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) & 321 GLINT_VECT2FUNC_VF_NUM_M) | 322 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) & 323 GLINT_VECT2FUNC_PF_NUM_M)); 324 wr32(hw, GLINT_VECT2FUNC(v), reg); 325 } 326 327 /* Map mailbox interrupt to VF MSI-X vector 0 */ 328 wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M); 329 } 330 331 /** 332 * ice_ena_vf_q_mappings - enable Rx/Tx queue mappings for a VF 333 * @vf: VF to enable the mappings for 334 * @max_txq: max Tx queues allowed on the VF's VSI 335 * @max_rxq: max Rx queues allowed on the VF's VSI 336 */ 337 static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq) 338 { 339 struct device *dev = ice_pf_to_dev(vf->pf); 340 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 341 struct ice_hw *hw = &vf->pf->hw; 342 u32 reg; 343 344 /* set regardless of mapping mode */ 345 wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M); 346 347 /* VF Tx queues allocation */ 348 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) { 349 /* set the VF PF Tx queue range 350 * VFNUMQ value should be set to (number of queues - 1). A value 351 * of 0 means 1 queue and a value of 255 means 256 queues 352 */ 353 reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) & 354 VPLAN_TX_QBASE_VFFIRSTQ_M) | 355 (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) & 356 VPLAN_TX_QBASE_VFNUMQ_M)); 357 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg); 358 } else { 359 dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n"); 360 } 361 362 /* set regardless of mapping mode */ 363 wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M); 364 365 /* VF Rx queues allocation */ 366 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) { 367 /* set the VF PF Rx queue range 368 * VFNUMQ value should be set to (number of queues - 1). A value 369 * of 0 means 1 queue and a value of 255 means 256 queues 370 */ 371 reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) & 372 VPLAN_RX_QBASE_VFFIRSTQ_M) | 373 (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) & 374 VPLAN_RX_QBASE_VFNUMQ_M)); 375 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg); 376 } else { 377 dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n"); 378 } 379 } 380 381 /** 382 * ice_ena_vf_mappings - enable VF MSIX and queue mapping 383 * @vf: pointer to the VF structure 384 */ 385 static void ice_ena_vf_mappings(struct ice_vf *vf) 386 { 387 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 388 389 ice_ena_vf_msix_mappings(vf); 390 ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq); 391 } 392 393 /** 394 * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space 395 * @vf: VF to calculate the register index for 396 * @q_vector: a q_vector associated to the VF 397 */ 398 int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector) 399 { 400 struct ice_pf *pf; 401 402 if (!vf || !q_vector) 403 return -EINVAL; 404 405 pf = vf->pf; 406 407 /* always add one to account for the OICR being the first MSIX */ 408 return pf->sriov_base_vector + pf->vfs.num_msix_per * vf->vf_id + 409 q_vector->v_idx + 1; 410 } 411 412 /** 413 * ice_get_max_valid_res_idx - Get the max valid resource index 414 * @res: pointer to the resource to find the max valid index for 415 * 416 * Start from the end of the ice_res_tracker and return right when we find the 417 * first res->list entry with the ICE_RES_VALID_BIT set. This function is only 418 * valid for SR-IOV because it is the only consumer that manipulates the 419 * res->end and this is always called when res->end is set to res->num_entries. 420 */ 421 static int ice_get_max_valid_res_idx(struct ice_res_tracker *res) 422 { 423 int i; 424 425 if (!res) 426 return -EINVAL; 427 428 for (i = res->num_entries - 1; i >= 0; i--) 429 if (res->list[i] & ICE_RES_VALID_BIT) 430 return i; 431 432 return 0; 433 } 434 435 /** 436 * ice_sriov_set_msix_res - Set any used MSIX resources 437 * @pf: pointer to PF structure 438 * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs 439 * 440 * This function allows SR-IOV resources to be taken from the end of the PF's 441 * allowed HW MSIX vectors so that the irq_tracker will not be affected. We 442 * just set the pf->sriov_base_vector and return success. 443 * 444 * If there are not enough resources available, return an error. This should 445 * always be caught by ice_set_per_vf_res(). 446 * 447 * Return 0 on success, and -EINVAL when there are not enough MSIX vectors 448 * in the PF's space available for SR-IOV. 449 */ 450 static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed) 451 { 452 u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors; 453 int vectors_used = pf->irq_tracker->num_entries; 454 int sriov_base_vector; 455 456 sriov_base_vector = total_vectors - num_msix_needed; 457 458 /* make sure we only grab irq_tracker entries from the list end and 459 * that we have enough available MSIX vectors 460 */ 461 if (sriov_base_vector < vectors_used) 462 return -EINVAL; 463 464 pf->sriov_base_vector = sriov_base_vector; 465 466 return 0; 467 } 468 469 /** 470 * ice_set_per_vf_res - check if vectors and queues are available 471 * @pf: pointer to the PF structure 472 * @num_vfs: the number of SR-IOV VFs being configured 473 * 474 * First, determine HW interrupts from common pool. If we allocate fewer VFs, we 475 * get more vectors and can enable more queues per VF. Note that this does not 476 * grab any vectors from the SW pool already allocated. Also note, that all 477 * vector counts include one for each VF's miscellaneous interrupt vector 478 * (i.e. OICR). 479 * 480 * Minimum VFs - 2 vectors, 1 queue pair 481 * Small VFs - 5 vectors, 4 queue pairs 482 * Medium VFs - 17 vectors, 16 queue pairs 483 * 484 * Second, determine number of queue pairs per VF by starting with a pre-defined 485 * maximum each VF supports. If this is not possible, then we adjust based on 486 * queue pairs available on the device. 487 * 488 * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used 489 * by each VF during VF initialization and reset. 490 */ 491 static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs) 492 { 493 int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker); 494 u16 num_msix_per_vf, num_txq, num_rxq, avail_qs; 495 int msix_avail_per_vf, msix_avail_for_sriov; 496 struct device *dev = ice_pf_to_dev(pf); 497 int err; 498 499 lockdep_assert_held(&pf->vfs.table_lock); 500 501 if (!num_vfs) 502 return -EINVAL; 503 504 if (max_valid_res_idx < 0) 505 return -ENOSPC; 506 507 /* determine MSI-X resources per VF */ 508 msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors - 509 pf->irq_tracker->num_entries; 510 msix_avail_per_vf = msix_avail_for_sriov / num_vfs; 511 if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) { 512 num_msix_per_vf = ICE_NUM_VF_MSIX_MED; 513 } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) { 514 num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL; 515 } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MULTIQ_MIN) { 516 num_msix_per_vf = ICE_NUM_VF_MSIX_MULTIQ_MIN; 517 } else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) { 518 num_msix_per_vf = ICE_MIN_INTR_PER_VF; 519 } else { 520 dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n", 521 msix_avail_for_sriov, ICE_MIN_INTR_PER_VF, 522 num_vfs); 523 return -ENOSPC; 524 } 525 526 num_txq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF, 527 ICE_MAX_RSS_QS_PER_VF); 528 avail_qs = ice_get_avail_txq_count(pf) / num_vfs; 529 if (!avail_qs) 530 num_txq = 0; 531 else if (num_txq > avail_qs) 532 num_txq = rounddown_pow_of_two(avail_qs); 533 534 num_rxq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF, 535 ICE_MAX_RSS_QS_PER_VF); 536 avail_qs = ice_get_avail_rxq_count(pf) / num_vfs; 537 if (!avail_qs) 538 num_rxq = 0; 539 else if (num_rxq > avail_qs) 540 num_rxq = rounddown_pow_of_two(avail_qs); 541 542 if (num_txq < ICE_MIN_QS_PER_VF || num_rxq < ICE_MIN_QS_PER_VF) { 543 dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n", 544 ICE_MIN_QS_PER_VF, num_vfs); 545 return -ENOSPC; 546 } 547 548 err = ice_sriov_set_msix_res(pf, num_msix_per_vf * num_vfs); 549 if (err) { 550 dev_err(dev, "Unable to set MSI-X resources for %d VFs, err %d\n", 551 num_vfs, err); 552 return err; 553 } 554 555 /* only allow equal Tx/Rx queue count (i.e. queue pairs) */ 556 pf->vfs.num_qps_per = min_t(int, num_txq, num_rxq); 557 pf->vfs.num_msix_per = num_msix_per_vf; 558 dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n", 559 num_vfs, pf->vfs.num_msix_per, pf->vfs.num_qps_per); 560 561 return 0; 562 } 563 564 /** 565 * ice_init_vf_vsi_res - initialize/setup VF VSI resources 566 * @vf: VF to initialize/setup the VSI for 567 * 568 * This function creates a VSI for the VF, adds a VLAN 0 filter, and sets up the 569 * VF VSI's broadcast filter and is only used during initial VF creation. 570 */ 571 static int ice_init_vf_vsi_res(struct ice_vf *vf) 572 { 573 struct ice_vsi_vlan_ops *vlan_ops; 574 struct ice_pf *pf = vf->pf; 575 u8 broadcast[ETH_ALEN]; 576 struct ice_vsi *vsi; 577 struct device *dev; 578 int err; 579 580 vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf); 581 582 dev = ice_pf_to_dev(pf); 583 vsi = ice_vf_vsi_setup(vf); 584 if (!vsi) 585 return -ENOMEM; 586 587 err = ice_vsi_add_vlan_zero(vsi); 588 if (err) { 589 dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n", 590 vf->vf_id); 591 goto release_vsi; 592 } 593 594 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 595 err = vlan_ops->ena_rx_filtering(vsi); 596 if (err) { 597 dev_warn(dev, "Failed to enable Rx VLAN filtering for VF %d\n", 598 vf->vf_id); 599 goto release_vsi; 600 } 601 602 eth_broadcast_addr(broadcast); 603 err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI); 604 if (err) { 605 dev_err(dev, "Failed to add broadcast MAC filter for VF %d, error %d\n", 606 vf->vf_id, err); 607 goto release_vsi; 608 } 609 610 err = ice_vsi_apply_spoofchk(vsi, vf->spoofchk); 611 if (err) { 612 dev_warn(dev, "Failed to initialize spoofchk setting for VF %d\n", 613 vf->vf_id); 614 goto release_vsi; 615 } 616 617 vf->num_mac = 1; 618 619 return 0; 620 621 release_vsi: 622 ice_vf_vsi_release(vf); 623 return err; 624 } 625 626 /** 627 * ice_start_vfs - start VFs so they are ready to be used by SR-IOV 628 * @pf: PF the VFs are associated with 629 */ 630 static int ice_start_vfs(struct ice_pf *pf) 631 { 632 struct ice_hw *hw = &pf->hw; 633 unsigned int bkt, it_cnt; 634 struct ice_vf *vf; 635 int retval; 636 637 lockdep_assert_held(&pf->vfs.table_lock); 638 639 it_cnt = 0; 640 ice_for_each_vf(pf, bkt, vf) { 641 vf->vf_ops->clear_reset_trigger(vf); 642 643 retval = ice_init_vf_vsi_res(vf); 644 if (retval) { 645 dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n", 646 vf->vf_id, retval); 647 goto teardown; 648 } 649 650 set_bit(ICE_VF_STATE_INIT, vf->vf_states); 651 ice_ena_vf_mappings(vf); 652 wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); 653 it_cnt++; 654 } 655 656 ice_flush(hw); 657 return 0; 658 659 teardown: 660 ice_for_each_vf(pf, bkt, vf) { 661 if (it_cnt == 0) 662 break; 663 664 ice_dis_vf_mappings(vf); 665 ice_vf_vsi_release(vf); 666 it_cnt--; 667 } 668 669 return retval; 670 } 671 672 /** 673 * ice_sriov_free_vf - Free VF memory after all references are dropped 674 * @vf: pointer to VF to free 675 * 676 * Called by ice_put_vf through ice_release_vf once the last reference to a VF 677 * structure has been dropped. 678 */ 679 static void ice_sriov_free_vf(struct ice_vf *vf) 680 { 681 mutex_destroy(&vf->cfg_lock); 682 683 kfree_rcu(vf, rcu); 684 } 685 686 /** 687 * ice_sriov_clear_mbx_register - clears SRIOV VF's mailbox registers 688 * @vf: the vf to configure 689 */ 690 static void ice_sriov_clear_mbx_register(struct ice_vf *vf) 691 { 692 struct ice_pf *pf = vf->pf; 693 694 wr32(&pf->hw, VF_MBX_ARQLEN(vf->vf_id), 0); 695 wr32(&pf->hw, VF_MBX_ATQLEN(vf->vf_id), 0); 696 } 697 698 /** 699 * ice_sriov_trigger_reset_register - trigger VF reset for SRIOV VF 700 * @vf: pointer to VF structure 701 * @is_vflr: true if reset occurred due to VFLR 702 * 703 * Trigger and cleanup after a VF reset for a SR-IOV VF. 704 */ 705 static void ice_sriov_trigger_reset_register(struct ice_vf *vf, bool is_vflr) 706 { 707 struct ice_pf *pf = vf->pf; 708 u32 reg, reg_idx, bit_idx; 709 unsigned int vf_abs_id, i; 710 struct device *dev; 711 struct ice_hw *hw; 712 713 dev = ice_pf_to_dev(pf); 714 hw = &pf->hw; 715 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id; 716 717 /* In the case of a VFLR, HW has already reset the VF and we just need 718 * to clean up. Otherwise we must first trigger the reset using the 719 * VFRTRIG register. 720 */ 721 if (!is_vflr) { 722 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id)); 723 reg |= VPGEN_VFRTRIG_VFSWR_M; 724 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg); 725 } 726 727 /* clear the VFLR bit in GLGEN_VFLRSTAT */ 728 reg_idx = (vf_abs_id) / 32; 729 bit_idx = (vf_abs_id) % 32; 730 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 731 ice_flush(hw); 732 733 wr32(hw, PF_PCI_CIAA, 734 VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S)); 735 for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) { 736 reg = rd32(hw, PF_PCI_CIAD); 737 /* no transactions pending so stop polling */ 738 if ((reg & VF_TRANS_PENDING_M) == 0) 739 break; 740 741 dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id); 742 udelay(ICE_PCI_CIAD_WAIT_DELAY_US); 743 } 744 } 745 746 /** 747 * ice_sriov_poll_reset_status - poll SRIOV VF reset status 748 * @vf: pointer to VF structure 749 * 750 * Returns true when reset is successful, else returns false 751 */ 752 static bool ice_sriov_poll_reset_status(struct ice_vf *vf) 753 { 754 struct ice_pf *pf = vf->pf; 755 unsigned int i; 756 u32 reg; 757 758 for (i = 0; i < 10; i++) { 759 /* VF reset requires driver to first reset the VF and then 760 * poll the status register to make sure that the reset 761 * completed successfully. 762 */ 763 reg = rd32(&pf->hw, VPGEN_VFRSTAT(vf->vf_id)); 764 if (reg & VPGEN_VFRSTAT_VFRD_M) 765 return true; 766 767 /* only sleep if the reset is not done */ 768 usleep_range(10, 20); 769 } 770 return false; 771 } 772 773 /** 774 * ice_sriov_clear_reset_trigger - enable VF to access hardware 775 * @vf: VF to enabled hardware access for 776 */ 777 static void ice_sriov_clear_reset_trigger(struct ice_vf *vf) 778 { 779 struct ice_hw *hw = &vf->pf->hw; 780 u32 reg; 781 782 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id)); 783 reg &= ~VPGEN_VFRTRIG_VFSWR_M; 784 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg); 785 ice_flush(hw); 786 } 787 788 /** 789 * ice_sriov_vsi_rebuild - release and rebuild VF's VSI 790 * @vf: VF to release and setup the VSI for 791 * 792 * This is only called when a single VF is being reset (i.e. VFR, VFLR, host VF 793 * configuration change, etc.). 794 */ 795 static int ice_sriov_vsi_rebuild(struct ice_vf *vf) 796 { 797 struct ice_pf *pf = vf->pf; 798 799 ice_vf_vsi_release(vf); 800 if (!ice_vf_vsi_setup(vf)) { 801 dev_err(ice_pf_to_dev(pf), 802 "Failed to release and setup the VF%u's VSI\n", 803 vf->vf_id); 804 return -ENOMEM; 805 } 806 807 return 0; 808 } 809 810 /** 811 * ice_sriov_post_vsi_rebuild - tasks to do after the VF's VSI have been rebuilt 812 * @vf: VF to perform tasks on 813 */ 814 static void ice_sriov_post_vsi_rebuild(struct ice_vf *vf) 815 { 816 ice_vf_rebuild_host_cfg(vf); 817 ice_vf_set_initialized(vf); 818 ice_ena_vf_mappings(vf); 819 wr32(&vf->pf->hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); 820 } 821 822 static const struct ice_vf_ops ice_sriov_vf_ops = { 823 .reset_type = ICE_VF_RESET, 824 .free = ice_sriov_free_vf, 825 .clear_mbx_register = ice_sriov_clear_mbx_register, 826 .trigger_reset_register = ice_sriov_trigger_reset_register, 827 .poll_reset_status = ice_sriov_poll_reset_status, 828 .clear_reset_trigger = ice_sriov_clear_reset_trigger, 829 .vsi_rebuild = ice_sriov_vsi_rebuild, 830 .post_vsi_rebuild = ice_sriov_post_vsi_rebuild, 831 }; 832 833 /** 834 * ice_create_vf_entries - Allocate and insert VF entries 835 * @pf: pointer to the PF structure 836 * @num_vfs: the number of VFs to allocate 837 * 838 * Allocate new VF entries and insert them into the hash table. Set some 839 * basic default fields for initializing the new VFs. 840 * 841 * After this function exits, the hash table will have num_vfs entries 842 * inserted. 843 * 844 * Returns 0 on success or an integer error code on failure. 845 */ 846 static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs) 847 { 848 struct ice_vfs *vfs = &pf->vfs; 849 struct ice_vf *vf; 850 u16 vf_id; 851 int err; 852 853 lockdep_assert_held(&vfs->table_lock); 854 855 for (vf_id = 0; vf_id < num_vfs; vf_id++) { 856 vf = kzalloc(sizeof(*vf), GFP_KERNEL); 857 if (!vf) { 858 err = -ENOMEM; 859 goto err_free_entries; 860 } 861 kref_init(&vf->refcnt); 862 863 vf->pf = pf; 864 vf->vf_id = vf_id; 865 866 /* set sriov vf ops for VFs created during SRIOV flow */ 867 vf->vf_ops = &ice_sriov_vf_ops; 868 869 vf->vf_sw_id = pf->first_sw; 870 /* assign default capabilities */ 871 vf->spoofchk = true; 872 vf->num_vf_qs = pf->vfs.num_qps_per; 873 ice_vc_set_default_allowlist(vf); 874 875 /* ctrl_vsi_idx will be set to a valid value only when VF 876 * creates its first fdir rule. 877 */ 878 ice_vf_ctrl_invalidate_vsi(vf); 879 ice_vf_fdir_init(vf); 880 881 ice_virtchnl_set_dflt_ops(vf); 882 883 mutex_init(&vf->cfg_lock); 884 885 hash_add_rcu(vfs->table, &vf->entry, vf_id); 886 } 887 888 return 0; 889 890 err_free_entries: 891 ice_free_vf_entries(pf); 892 return err; 893 } 894 895 /** 896 * ice_ena_vfs - enable VFs so they are ready to be used 897 * @pf: pointer to the PF structure 898 * @num_vfs: number of VFs to enable 899 */ 900 static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) 901 { 902 struct device *dev = ice_pf_to_dev(pf); 903 struct ice_hw *hw = &pf->hw; 904 int ret; 905 906 /* Disable global interrupt 0 so we don't try to handle the VFLR. */ 907 wr32(hw, GLINT_DYN_CTL(pf->oicr_idx), 908 ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S); 909 set_bit(ICE_OICR_INTR_DIS, pf->state); 910 ice_flush(hw); 911 912 ret = pci_enable_sriov(pf->pdev, num_vfs); 913 if (ret) 914 goto err_unroll_intr; 915 916 mutex_lock(&pf->vfs.table_lock); 917 918 ret = ice_set_per_vf_res(pf, num_vfs); 919 if (ret) { 920 dev_err(dev, "Not enough resources for %d VFs, err %d. Try with fewer number of VFs\n", 921 num_vfs, ret); 922 goto err_unroll_sriov; 923 } 924 925 ret = ice_create_vf_entries(pf, num_vfs); 926 if (ret) { 927 dev_err(dev, "Failed to allocate VF entries for %d VFs\n", 928 num_vfs); 929 goto err_unroll_sriov; 930 } 931 932 ret = ice_start_vfs(pf); 933 if (ret) { 934 dev_err(dev, "Failed to start %d VFs, err %d\n", num_vfs, ret); 935 ret = -EAGAIN; 936 goto err_unroll_vf_entries; 937 } 938 939 clear_bit(ICE_VF_DIS, pf->state); 940 941 ret = ice_eswitch_configure(pf); 942 if (ret) { 943 dev_err(dev, "Failed to configure eswitch, err %d\n", ret); 944 goto err_unroll_sriov; 945 } 946 947 /* rearm global interrupts */ 948 if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state)) 949 ice_irq_dynamic_ena(hw, NULL, NULL); 950 951 mutex_unlock(&pf->vfs.table_lock); 952 953 return 0; 954 955 err_unroll_vf_entries: 956 ice_free_vf_entries(pf); 957 err_unroll_sriov: 958 mutex_unlock(&pf->vfs.table_lock); 959 pci_disable_sriov(pf->pdev); 960 err_unroll_intr: 961 /* rearm interrupts here */ 962 ice_irq_dynamic_ena(hw, NULL, NULL); 963 clear_bit(ICE_OICR_INTR_DIS, pf->state); 964 return ret; 965 } 966 967 /** 968 * ice_pci_sriov_ena - Enable or change number of VFs 969 * @pf: pointer to the PF structure 970 * @num_vfs: number of VFs to allocate 971 * 972 * Returns 0 on success and negative on failure 973 */ 974 static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs) 975 { 976 int pre_existing_vfs = pci_num_vf(pf->pdev); 977 struct device *dev = ice_pf_to_dev(pf); 978 int err; 979 980 if (pre_existing_vfs && pre_existing_vfs != num_vfs) 981 ice_free_vfs(pf); 982 else if (pre_existing_vfs && pre_existing_vfs == num_vfs) 983 return 0; 984 985 if (num_vfs > pf->vfs.num_supported) { 986 dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n", 987 num_vfs, pf->vfs.num_supported); 988 return -EOPNOTSUPP; 989 } 990 991 dev_info(dev, "Enabling %d VFs\n", num_vfs); 992 err = ice_ena_vfs(pf, num_vfs); 993 if (err) { 994 dev_err(dev, "Failed to enable SR-IOV: %d\n", err); 995 return err; 996 } 997 998 set_bit(ICE_FLAG_SRIOV_ENA, pf->flags); 999 return 0; 1000 } 1001 1002 /** 1003 * ice_check_sriov_allowed - check if SR-IOV is allowed based on various checks 1004 * @pf: PF to enabled SR-IOV on 1005 */ 1006 static int ice_check_sriov_allowed(struct ice_pf *pf) 1007 { 1008 struct device *dev = ice_pf_to_dev(pf); 1009 1010 if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) { 1011 dev_err(dev, "This device is not capable of SR-IOV\n"); 1012 return -EOPNOTSUPP; 1013 } 1014 1015 if (ice_is_safe_mode(pf)) { 1016 dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n"); 1017 return -EOPNOTSUPP; 1018 } 1019 1020 if (!ice_pf_state_is_nominal(pf)) { 1021 dev_err(dev, "Cannot enable SR-IOV, device not ready\n"); 1022 return -EBUSY; 1023 } 1024 1025 return 0; 1026 } 1027 1028 /** 1029 * ice_sriov_configure - Enable or change number of VFs via sysfs 1030 * @pdev: pointer to a pci_dev structure 1031 * @num_vfs: number of VFs to allocate or 0 to free VFs 1032 * 1033 * This function is called when the user updates the number of VFs in sysfs. On 1034 * success return whatever num_vfs was set to by the caller. Return negative on 1035 * failure. 1036 */ 1037 int ice_sriov_configure(struct pci_dev *pdev, int num_vfs) 1038 { 1039 struct ice_pf *pf = pci_get_drvdata(pdev); 1040 struct device *dev = ice_pf_to_dev(pf); 1041 int err; 1042 1043 err = ice_check_sriov_allowed(pf); 1044 if (err) 1045 return err; 1046 1047 if (!num_vfs) { 1048 if (!pci_vfs_assigned(pdev)) { 1049 ice_mbx_deinit_snapshot(&pf->hw); 1050 ice_free_vfs(pf); 1051 if (pf->lag) 1052 ice_enable_lag(pf->lag); 1053 return 0; 1054 } 1055 1056 dev_err(dev, "can't free VFs because some are assigned to VMs.\n"); 1057 return -EBUSY; 1058 } 1059 1060 err = ice_mbx_init_snapshot(&pf->hw, num_vfs); 1061 if (err) 1062 return err; 1063 1064 err = ice_pci_sriov_ena(pf, num_vfs); 1065 if (err) { 1066 ice_mbx_deinit_snapshot(&pf->hw); 1067 return err; 1068 } 1069 1070 if (pf->lag) 1071 ice_disable_lag(pf->lag); 1072 return num_vfs; 1073 } 1074 1075 /** 1076 * ice_process_vflr_event - Free VF resources via IRQ calls 1077 * @pf: pointer to the PF structure 1078 * 1079 * called from the VFLR IRQ handler to 1080 * free up VF resources and state variables 1081 */ 1082 void ice_process_vflr_event(struct ice_pf *pf) 1083 { 1084 struct ice_hw *hw = &pf->hw; 1085 struct ice_vf *vf; 1086 unsigned int bkt; 1087 u32 reg; 1088 1089 if (!test_and_clear_bit(ICE_VFLR_EVENT_PENDING, pf->state) || 1090 !ice_has_vfs(pf)) 1091 return; 1092 1093 mutex_lock(&pf->vfs.table_lock); 1094 ice_for_each_vf(pf, bkt, vf) { 1095 u32 reg_idx, bit_idx; 1096 1097 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32; 1098 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32; 1099 /* read GLGEN_VFLRSTAT register to find out the flr VFs */ 1100 reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx)); 1101 if (reg & BIT(bit_idx)) 1102 /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */ 1103 ice_reset_vf(vf, ICE_VF_RESET_VFLR | ICE_VF_RESET_LOCK); 1104 } 1105 mutex_unlock(&pf->vfs.table_lock); 1106 } 1107 1108 /** 1109 * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in 1110 * @pf: PF used to index all VFs 1111 * @pfq: queue index relative to the PF's function space 1112 * 1113 * If no VF is found who owns the pfq then return NULL, otherwise return a 1114 * pointer to the VF who owns the pfq 1115 * 1116 * If this function returns non-NULL, it acquires a reference count of the VF 1117 * structure. The caller is responsible for calling ice_put_vf() to drop this 1118 * reference. 1119 */ 1120 static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq) 1121 { 1122 struct ice_vf *vf; 1123 unsigned int bkt; 1124 1125 rcu_read_lock(); 1126 ice_for_each_vf_rcu(pf, bkt, vf) { 1127 struct ice_vsi *vsi; 1128 u16 rxq_idx; 1129 1130 vsi = ice_get_vf_vsi(vf); 1131 1132 ice_for_each_rxq(vsi, rxq_idx) 1133 if (vsi->rxq_map[rxq_idx] == pfq) { 1134 struct ice_vf *found; 1135 1136 if (kref_get_unless_zero(&vf->refcnt)) 1137 found = vf; 1138 else 1139 found = NULL; 1140 rcu_read_unlock(); 1141 return found; 1142 } 1143 } 1144 rcu_read_unlock(); 1145 1146 return NULL; 1147 } 1148 1149 /** 1150 * ice_globalq_to_pfq - convert from global queue index to PF space queue index 1151 * @pf: PF used for conversion 1152 * @globalq: global queue index used to convert to PF space queue index 1153 */ 1154 static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq) 1155 { 1156 return globalq - pf->hw.func_caps.common_cap.rxq_first_id; 1157 } 1158 1159 /** 1160 * ice_vf_lan_overflow_event - handle LAN overflow event for a VF 1161 * @pf: PF that the LAN overflow event happened on 1162 * @event: structure holding the event information for the LAN overflow event 1163 * 1164 * Determine if the LAN overflow event was caused by a VF queue. If it was not 1165 * caused by a VF, do nothing. If a VF caused this LAN overflow event trigger a 1166 * reset on the offending VF. 1167 */ 1168 void 1169 ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) 1170 { 1171 u32 gldcb_rtctq, queue; 1172 struct ice_vf *vf; 1173 1174 gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq); 1175 dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq); 1176 1177 /* event returns device global Rx queue number */ 1178 queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >> 1179 GLDCB_RTCTQ_RXQNUM_S; 1180 1181 vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue)); 1182 if (!vf) 1183 return; 1184 1185 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY | ICE_VF_RESET_LOCK); 1186 ice_put_vf(vf); 1187 } 1188 1189 /** 1190 * ice_set_vf_spoofchk 1191 * @netdev: network interface device structure 1192 * @vf_id: VF identifier 1193 * @ena: flag to enable or disable feature 1194 * 1195 * Enable or disable VF spoof checking 1196 */ 1197 int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena) 1198 { 1199 struct ice_netdev_priv *np = netdev_priv(netdev); 1200 struct ice_pf *pf = np->vsi->back; 1201 struct ice_vsi *vf_vsi; 1202 struct device *dev; 1203 struct ice_vf *vf; 1204 int ret; 1205 1206 dev = ice_pf_to_dev(pf); 1207 1208 vf = ice_get_vf_by_id(pf, vf_id); 1209 if (!vf) 1210 return -EINVAL; 1211 1212 ret = ice_check_vf_ready_for_cfg(vf); 1213 if (ret) 1214 goto out_put_vf; 1215 1216 vf_vsi = ice_get_vf_vsi(vf); 1217 if (!vf_vsi) { 1218 netdev_err(netdev, "VSI %d for VF %d is null\n", 1219 vf->lan_vsi_idx, vf->vf_id); 1220 ret = -EINVAL; 1221 goto out_put_vf; 1222 } 1223 1224 if (vf_vsi->type != ICE_VSI_VF) { 1225 netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n", 1226 vf_vsi->type, vf_vsi->vsi_num, vf->vf_id); 1227 ret = -ENODEV; 1228 goto out_put_vf; 1229 } 1230 1231 if (ena == vf->spoofchk) { 1232 dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF"); 1233 ret = 0; 1234 goto out_put_vf; 1235 } 1236 1237 ret = ice_vsi_apply_spoofchk(vf_vsi, ena); 1238 if (ret) 1239 dev_err(dev, "Failed to set spoofchk %s for VF %d VSI %d\n error %d\n", 1240 ena ? "ON" : "OFF", vf->vf_id, vf_vsi->vsi_num, ret); 1241 else 1242 vf->spoofchk = ena; 1243 1244 out_put_vf: 1245 ice_put_vf(vf); 1246 return ret; 1247 } 1248 1249 /** 1250 * ice_get_vf_cfg 1251 * @netdev: network interface device structure 1252 * @vf_id: VF identifier 1253 * @ivi: VF configuration structure 1254 * 1255 * return VF configuration 1256 */ 1257 int 1258 ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi) 1259 { 1260 struct ice_pf *pf = ice_netdev_to_pf(netdev); 1261 struct ice_vf *vf; 1262 int ret; 1263 1264 vf = ice_get_vf_by_id(pf, vf_id); 1265 if (!vf) 1266 return -EINVAL; 1267 1268 ret = ice_check_vf_ready_for_cfg(vf); 1269 if (ret) 1270 goto out_put_vf; 1271 1272 ivi->vf = vf_id; 1273 ether_addr_copy(ivi->mac, vf->hw_lan_addr.addr); 1274 1275 /* VF configuration for VLAN and applicable QoS */ 1276 ivi->vlan = ice_vf_get_port_vlan_id(vf); 1277 ivi->qos = ice_vf_get_port_vlan_prio(vf); 1278 if (ice_vf_is_port_vlan_ena(vf)) 1279 ivi->vlan_proto = cpu_to_be16(ice_vf_get_port_vlan_tpid(vf)); 1280 1281 ivi->trusted = vf->trusted; 1282 ivi->spoofchk = vf->spoofchk; 1283 if (!vf->link_forced) 1284 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; 1285 else if (vf->link_up) 1286 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; 1287 else 1288 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; 1289 ivi->max_tx_rate = vf->max_tx_rate; 1290 ivi->min_tx_rate = vf->min_tx_rate; 1291 1292 out_put_vf: 1293 ice_put_vf(vf); 1294 return ret; 1295 } 1296 1297 /** 1298 * ice_unicast_mac_exists - check if the unicast MAC exists on the PF's switch 1299 * @pf: PF used to reference the switch's rules 1300 * @umac: unicast MAC to compare against existing switch rules 1301 * 1302 * Return true on the first/any match, else return false 1303 */ 1304 static bool ice_unicast_mac_exists(struct ice_pf *pf, u8 *umac) 1305 { 1306 struct ice_sw_recipe *mac_recipe_list = 1307 &pf->hw.switch_info->recp_list[ICE_SW_LKUP_MAC]; 1308 struct ice_fltr_mgmt_list_entry *list_itr; 1309 struct list_head *rule_head; 1310 struct mutex *rule_lock; /* protect MAC filter list access */ 1311 1312 rule_head = &mac_recipe_list->filt_rules; 1313 rule_lock = &mac_recipe_list->filt_rule_lock; 1314 1315 mutex_lock(rule_lock); 1316 list_for_each_entry(list_itr, rule_head, list_entry) { 1317 u8 *existing_mac = &list_itr->fltr_info.l_data.mac.mac_addr[0]; 1318 1319 if (ether_addr_equal(existing_mac, umac)) { 1320 mutex_unlock(rule_lock); 1321 return true; 1322 } 1323 } 1324 1325 mutex_unlock(rule_lock); 1326 1327 return false; 1328 } 1329 1330 /** 1331 * ice_set_vf_mac 1332 * @netdev: network interface device structure 1333 * @vf_id: VF identifier 1334 * @mac: MAC address 1335 * 1336 * program VF MAC address 1337 */ 1338 int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) 1339 { 1340 struct ice_pf *pf = ice_netdev_to_pf(netdev); 1341 struct ice_vf *vf; 1342 int ret; 1343 1344 if (is_multicast_ether_addr(mac)) { 1345 netdev_err(netdev, "%pM not a valid unicast address\n", mac); 1346 return -EINVAL; 1347 } 1348 1349 vf = ice_get_vf_by_id(pf, vf_id); 1350 if (!vf) 1351 return -EINVAL; 1352 1353 /* nothing left to do, unicast MAC already set */ 1354 if (ether_addr_equal(vf->dev_lan_addr.addr, mac) && 1355 ether_addr_equal(vf->hw_lan_addr.addr, mac)) { 1356 ret = 0; 1357 goto out_put_vf; 1358 } 1359 1360 ret = ice_check_vf_ready_for_cfg(vf); 1361 if (ret) 1362 goto out_put_vf; 1363 1364 if (ice_unicast_mac_exists(pf, mac)) { 1365 netdev_err(netdev, "Unicast MAC %pM already exists on this PF. Preventing setting VF %u unicast MAC address to %pM\n", 1366 mac, vf_id, mac); 1367 ret = -EINVAL; 1368 goto out_put_vf; 1369 } 1370 1371 mutex_lock(&vf->cfg_lock); 1372 1373 /* VF is notified of its new MAC via the PF's response to the 1374 * VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset 1375 */ 1376 ether_addr_copy(vf->dev_lan_addr.addr, mac); 1377 ether_addr_copy(vf->hw_lan_addr.addr, mac); 1378 if (is_zero_ether_addr(mac)) { 1379 /* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */ 1380 vf->pf_set_mac = false; 1381 netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n", 1382 vf->vf_id); 1383 } else { 1384 /* PF will add MAC rule for the VF */ 1385 vf->pf_set_mac = true; 1386 netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n", 1387 mac, vf_id); 1388 } 1389 1390 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY); 1391 mutex_unlock(&vf->cfg_lock); 1392 1393 out_put_vf: 1394 ice_put_vf(vf); 1395 return ret; 1396 } 1397 1398 /** 1399 * ice_set_vf_trust 1400 * @netdev: network interface device structure 1401 * @vf_id: VF identifier 1402 * @trusted: Boolean value to enable/disable trusted VF 1403 * 1404 * Enable or disable a given VF as trusted 1405 */ 1406 int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted) 1407 { 1408 struct ice_pf *pf = ice_netdev_to_pf(netdev); 1409 struct ice_vf *vf; 1410 int ret; 1411 1412 if (ice_is_eswitch_mode_switchdev(pf)) { 1413 dev_info(ice_pf_to_dev(pf), "Trusted VF is forbidden in switchdev mode\n"); 1414 return -EOPNOTSUPP; 1415 } 1416 1417 vf = ice_get_vf_by_id(pf, vf_id); 1418 if (!vf) 1419 return -EINVAL; 1420 1421 ret = ice_check_vf_ready_for_cfg(vf); 1422 if (ret) 1423 goto out_put_vf; 1424 1425 /* Check if already trusted */ 1426 if (trusted == vf->trusted) { 1427 ret = 0; 1428 goto out_put_vf; 1429 } 1430 1431 mutex_lock(&vf->cfg_lock); 1432 1433 vf->trusted = trusted; 1434 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY); 1435 dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n", 1436 vf_id, trusted ? "" : "un"); 1437 1438 mutex_unlock(&vf->cfg_lock); 1439 1440 out_put_vf: 1441 ice_put_vf(vf); 1442 return ret; 1443 } 1444 1445 /** 1446 * ice_set_vf_link_state 1447 * @netdev: network interface device structure 1448 * @vf_id: VF identifier 1449 * @link_state: required link state 1450 * 1451 * Set VF's link state, irrespective of physical link state status 1452 */ 1453 int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state) 1454 { 1455 struct ice_pf *pf = ice_netdev_to_pf(netdev); 1456 struct ice_vf *vf; 1457 int ret; 1458 1459 vf = ice_get_vf_by_id(pf, vf_id); 1460 if (!vf) 1461 return -EINVAL; 1462 1463 ret = ice_check_vf_ready_for_cfg(vf); 1464 if (ret) 1465 goto out_put_vf; 1466 1467 switch (link_state) { 1468 case IFLA_VF_LINK_STATE_AUTO: 1469 vf->link_forced = false; 1470 break; 1471 case IFLA_VF_LINK_STATE_ENABLE: 1472 vf->link_forced = true; 1473 vf->link_up = true; 1474 break; 1475 case IFLA_VF_LINK_STATE_DISABLE: 1476 vf->link_forced = true; 1477 vf->link_up = false; 1478 break; 1479 default: 1480 ret = -EINVAL; 1481 goto out_put_vf; 1482 } 1483 1484 ice_vc_notify_vf_link_state(vf); 1485 1486 out_put_vf: 1487 ice_put_vf(vf); 1488 return ret; 1489 } 1490 1491 /** 1492 * ice_calc_all_vfs_min_tx_rate - calculate cumulative min Tx rate on all VFs 1493 * @pf: PF associated with VFs 1494 */ 1495 static int ice_calc_all_vfs_min_tx_rate(struct ice_pf *pf) 1496 { 1497 struct ice_vf *vf; 1498 unsigned int bkt; 1499 int rate = 0; 1500 1501 rcu_read_lock(); 1502 ice_for_each_vf_rcu(pf, bkt, vf) 1503 rate += vf->min_tx_rate; 1504 rcu_read_unlock(); 1505 1506 return rate; 1507 } 1508 1509 /** 1510 * ice_min_tx_rate_oversubscribed - check if min Tx rate causes oversubscription 1511 * @vf: VF trying to configure min_tx_rate 1512 * @min_tx_rate: min Tx rate in Mbps 1513 * 1514 * Check if the min_tx_rate being passed in will cause oversubscription of total 1515 * min_tx_rate based on the current link speed and all other VFs configured 1516 * min_tx_rate 1517 * 1518 * Return true if the passed min_tx_rate would cause oversubscription, else 1519 * return false 1520 */ 1521 static bool 1522 ice_min_tx_rate_oversubscribed(struct ice_vf *vf, int min_tx_rate) 1523 { 1524 int link_speed_mbps = ice_get_link_speed_mbps(ice_get_vf_vsi(vf)); 1525 int all_vfs_min_tx_rate = ice_calc_all_vfs_min_tx_rate(vf->pf); 1526 1527 /* this VF's previous rate is being overwritten */ 1528 all_vfs_min_tx_rate -= vf->min_tx_rate; 1529 1530 if (all_vfs_min_tx_rate + min_tx_rate > link_speed_mbps) { 1531 dev_err(ice_pf_to_dev(vf->pf), "min_tx_rate of %d Mbps on VF %u would cause oversubscription of %d Mbps based on the current link speed %d Mbps\n", 1532 min_tx_rate, vf->vf_id, 1533 all_vfs_min_tx_rate + min_tx_rate - link_speed_mbps, 1534 link_speed_mbps); 1535 return true; 1536 } 1537 1538 return false; 1539 } 1540 1541 /** 1542 * ice_set_vf_bw - set min/max VF bandwidth 1543 * @netdev: network interface device structure 1544 * @vf_id: VF identifier 1545 * @min_tx_rate: Minimum Tx rate in Mbps 1546 * @max_tx_rate: Maximum Tx rate in Mbps 1547 */ 1548 int 1549 ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, 1550 int max_tx_rate) 1551 { 1552 struct ice_pf *pf = ice_netdev_to_pf(netdev); 1553 struct ice_vsi *vsi; 1554 struct device *dev; 1555 struct ice_vf *vf; 1556 int ret; 1557 1558 dev = ice_pf_to_dev(pf); 1559 1560 vf = ice_get_vf_by_id(pf, vf_id); 1561 if (!vf) 1562 return -EINVAL; 1563 1564 ret = ice_check_vf_ready_for_cfg(vf); 1565 if (ret) 1566 goto out_put_vf; 1567 1568 vsi = ice_get_vf_vsi(vf); 1569 1570 /* when max_tx_rate is zero that means no max Tx rate limiting, so only 1571 * check if max_tx_rate is non-zero 1572 */ 1573 if (max_tx_rate && min_tx_rate > max_tx_rate) { 1574 dev_err(dev, "Cannot set min Tx rate %d Mbps greater than max Tx rate %d Mbps\n", 1575 min_tx_rate, max_tx_rate); 1576 ret = -EINVAL; 1577 goto out_put_vf; 1578 } 1579 1580 if (min_tx_rate && ice_is_dcb_active(pf)) { 1581 dev_err(dev, "DCB on PF is currently enabled. VF min Tx rate limiting not allowed on this PF.\n"); 1582 ret = -EOPNOTSUPP; 1583 goto out_put_vf; 1584 } 1585 1586 if (ice_min_tx_rate_oversubscribed(vf, min_tx_rate)) { 1587 ret = -EINVAL; 1588 goto out_put_vf; 1589 } 1590 1591 if (vf->min_tx_rate != (unsigned int)min_tx_rate) { 1592 ret = ice_set_min_bw_limit(vsi, (u64)min_tx_rate * 1000); 1593 if (ret) { 1594 dev_err(dev, "Unable to set min-tx-rate for VF %d\n", 1595 vf->vf_id); 1596 goto out_put_vf; 1597 } 1598 1599 vf->min_tx_rate = min_tx_rate; 1600 } 1601 1602 if (vf->max_tx_rate != (unsigned int)max_tx_rate) { 1603 ret = ice_set_max_bw_limit(vsi, (u64)max_tx_rate * 1000); 1604 if (ret) { 1605 dev_err(dev, "Unable to set max-tx-rate for VF %d\n", 1606 vf->vf_id); 1607 goto out_put_vf; 1608 } 1609 1610 vf->max_tx_rate = max_tx_rate; 1611 } 1612 1613 out_put_vf: 1614 ice_put_vf(vf); 1615 return ret; 1616 } 1617 1618 /** 1619 * ice_get_vf_stats - populate some stats for the VF 1620 * @netdev: the netdev of the PF 1621 * @vf_id: the host OS identifier (0-255) 1622 * @vf_stats: pointer to the OS memory to be initialized 1623 */ 1624 int ice_get_vf_stats(struct net_device *netdev, int vf_id, 1625 struct ifla_vf_stats *vf_stats) 1626 { 1627 struct ice_pf *pf = ice_netdev_to_pf(netdev); 1628 struct ice_eth_stats *stats; 1629 struct ice_vsi *vsi; 1630 struct ice_vf *vf; 1631 int ret; 1632 1633 vf = ice_get_vf_by_id(pf, vf_id); 1634 if (!vf) 1635 return -EINVAL; 1636 1637 ret = ice_check_vf_ready_for_cfg(vf); 1638 if (ret) 1639 goto out_put_vf; 1640 1641 vsi = ice_get_vf_vsi(vf); 1642 if (!vsi) { 1643 ret = -EINVAL; 1644 goto out_put_vf; 1645 } 1646 1647 ice_update_eth_stats(vsi); 1648 stats = &vsi->eth_stats; 1649 1650 memset(vf_stats, 0, sizeof(*vf_stats)); 1651 1652 vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast + 1653 stats->rx_multicast; 1654 vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast + 1655 stats->tx_multicast; 1656 vf_stats->rx_bytes = stats->rx_bytes; 1657 vf_stats->tx_bytes = stats->tx_bytes; 1658 vf_stats->broadcast = stats->rx_broadcast; 1659 vf_stats->multicast = stats->rx_multicast; 1660 vf_stats->rx_dropped = stats->rx_discards; 1661 vf_stats->tx_dropped = stats->tx_discards; 1662 1663 out_put_vf: 1664 ice_put_vf(vf); 1665 return ret; 1666 } 1667 1668 /** 1669 * ice_is_supported_port_vlan_proto - make sure the vlan_proto is supported 1670 * @hw: hardware structure used to check the VLAN mode 1671 * @vlan_proto: VLAN TPID being checked 1672 * 1673 * If the device is configured in Double VLAN Mode (DVM), then both ETH_P_8021Q 1674 * and ETH_P_8021AD are supported. If the device is configured in Single VLAN 1675 * Mode (SVM), then only ETH_P_8021Q is supported. 1676 */ 1677 static bool 1678 ice_is_supported_port_vlan_proto(struct ice_hw *hw, u16 vlan_proto) 1679 { 1680 bool is_supported = false; 1681 1682 switch (vlan_proto) { 1683 case ETH_P_8021Q: 1684 is_supported = true; 1685 break; 1686 case ETH_P_8021AD: 1687 if (ice_is_dvm_ena(hw)) 1688 is_supported = true; 1689 break; 1690 } 1691 1692 return is_supported; 1693 } 1694 1695 /** 1696 * ice_set_vf_port_vlan 1697 * @netdev: network interface device structure 1698 * @vf_id: VF identifier 1699 * @vlan_id: VLAN ID being set 1700 * @qos: priority setting 1701 * @vlan_proto: VLAN protocol 1702 * 1703 * program VF Port VLAN ID and/or QoS 1704 */ 1705 int 1706 ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, 1707 __be16 vlan_proto) 1708 { 1709 struct ice_pf *pf = ice_netdev_to_pf(netdev); 1710 u16 local_vlan_proto = ntohs(vlan_proto); 1711 struct device *dev; 1712 struct ice_vf *vf; 1713 int ret; 1714 1715 dev = ice_pf_to_dev(pf); 1716 1717 if (vlan_id >= VLAN_N_VID || qos > 7) { 1718 dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n", 1719 vf_id, vlan_id, qos); 1720 return -EINVAL; 1721 } 1722 1723 if (!ice_is_supported_port_vlan_proto(&pf->hw, local_vlan_proto)) { 1724 dev_err(dev, "VF VLAN protocol 0x%04x is not supported\n", 1725 local_vlan_proto); 1726 return -EPROTONOSUPPORT; 1727 } 1728 1729 vf = ice_get_vf_by_id(pf, vf_id); 1730 if (!vf) 1731 return -EINVAL; 1732 1733 ret = ice_check_vf_ready_for_cfg(vf); 1734 if (ret) 1735 goto out_put_vf; 1736 1737 if (ice_vf_get_port_vlan_prio(vf) == qos && 1738 ice_vf_get_port_vlan_tpid(vf) == local_vlan_proto && 1739 ice_vf_get_port_vlan_id(vf) == vlan_id) { 1740 /* duplicate request, so just return success */ 1741 dev_dbg(dev, "Duplicate port VLAN %u, QoS %u, TPID 0x%04x request\n", 1742 vlan_id, qos, local_vlan_proto); 1743 ret = 0; 1744 goto out_put_vf; 1745 } 1746 1747 mutex_lock(&vf->cfg_lock); 1748 1749 vf->port_vlan_info = ICE_VLAN(local_vlan_proto, vlan_id, qos); 1750 if (ice_vf_is_port_vlan_ena(vf)) 1751 dev_info(dev, "Setting VLAN %u, QoS %u, TPID 0x%04x on VF %d\n", 1752 vlan_id, qos, local_vlan_proto, vf_id); 1753 else 1754 dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id); 1755 1756 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY); 1757 mutex_unlock(&vf->cfg_lock); 1758 1759 out_put_vf: 1760 ice_put_vf(vf); 1761 return ret; 1762 } 1763 1764 /** 1765 * ice_print_vf_rx_mdd_event - print VF Rx malicious driver detect event 1766 * @vf: pointer to the VF structure 1767 */ 1768 void ice_print_vf_rx_mdd_event(struct ice_vf *vf) 1769 { 1770 struct ice_pf *pf = vf->pf; 1771 struct device *dev; 1772 1773 dev = ice_pf_to_dev(pf); 1774 1775 dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n", 1776 vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id, 1777 vf->dev_lan_addr.addr, 1778 test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags) 1779 ? "on" : "off"); 1780 } 1781 1782 /** 1783 * ice_print_vfs_mdd_events - print VFs malicious driver detect event 1784 * @pf: pointer to the PF structure 1785 * 1786 * Called from ice_handle_mdd_event to rate limit and print VFs MDD events. 1787 */ 1788 void ice_print_vfs_mdd_events(struct ice_pf *pf) 1789 { 1790 struct device *dev = ice_pf_to_dev(pf); 1791 struct ice_hw *hw = &pf->hw; 1792 struct ice_vf *vf; 1793 unsigned int bkt; 1794 1795 /* check that there are pending MDD events to print */ 1796 if (!test_and_clear_bit(ICE_MDD_VF_PRINT_PENDING, pf->state)) 1797 return; 1798 1799 /* VF MDD event logs are rate limited to one second intervals */ 1800 if (time_is_after_jiffies(pf->vfs.last_printed_mdd_jiffies + HZ * 1)) 1801 return; 1802 1803 pf->vfs.last_printed_mdd_jiffies = jiffies; 1804 1805 mutex_lock(&pf->vfs.table_lock); 1806 ice_for_each_vf(pf, bkt, vf) { 1807 /* only print Rx MDD event message if there are new events */ 1808 if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) { 1809 vf->mdd_rx_events.last_printed = 1810 vf->mdd_rx_events.count; 1811 ice_print_vf_rx_mdd_event(vf); 1812 } 1813 1814 /* only print Tx MDD event message if there are new events */ 1815 if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) { 1816 vf->mdd_tx_events.last_printed = 1817 vf->mdd_tx_events.count; 1818 1819 dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n", 1820 vf->mdd_tx_events.count, hw->pf_id, vf->vf_id, 1821 vf->dev_lan_addr.addr); 1822 } 1823 } 1824 mutex_unlock(&pf->vfs.table_lock); 1825 } 1826 1827 /** 1828 * ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR 1829 * @pdev: pointer to a pci_dev structure 1830 * 1831 * Called when recovering from a PF FLR to restore interrupt capability to 1832 * the VFs. 1833 */ 1834 void ice_restore_all_vfs_msi_state(struct pci_dev *pdev) 1835 { 1836 u16 vf_id; 1837 int pos; 1838 1839 if (!pci_num_vf(pdev)) 1840 return; 1841 1842 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 1843 if (pos) { 1844 struct pci_dev *vfdev; 1845 1846 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, 1847 &vf_id); 1848 vfdev = pci_get_device(pdev->vendor, vf_id, NULL); 1849 while (vfdev) { 1850 if (vfdev->is_virtfn && vfdev->physfn == pdev) 1851 pci_restore_msi_state(vfdev); 1852 vfdev = pci_get_device(pdev->vendor, vf_id, 1853 vfdev); 1854 } 1855 } 1856 } 1857 1858 /** 1859 * ice_is_malicious_vf - helper function to detect a malicious VF 1860 * @pf: ptr to struct ice_pf 1861 * @event: pointer to the AQ event 1862 * @num_msg_proc: the number of messages processed so far 1863 * @num_msg_pending: the number of messages peinding in admin queue 1864 */ 1865 bool 1866 ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event, 1867 u16 num_msg_proc, u16 num_msg_pending) 1868 { 1869 s16 vf_id = le16_to_cpu(event->desc.retval); 1870 struct device *dev = ice_pf_to_dev(pf); 1871 struct ice_mbx_data mbxdata; 1872 bool malvf = false; 1873 struct ice_vf *vf; 1874 int status; 1875 1876 vf = ice_get_vf_by_id(pf, vf_id); 1877 if (!vf) 1878 return false; 1879 1880 if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) 1881 goto out_put_vf; 1882 1883 mbxdata.num_msg_proc = num_msg_proc; 1884 mbxdata.num_pending_arq = num_msg_pending; 1885 mbxdata.max_num_msgs_mbx = pf->hw.mailboxq.num_rq_entries; 1886 #define ICE_MBX_OVERFLOW_WATERMARK 64 1887 mbxdata.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK; 1888 1889 /* check to see if we have a malicious VF */ 1890 status = ice_mbx_vf_state_handler(&pf->hw, &mbxdata, vf_id, &malvf); 1891 if (status) 1892 goto out_put_vf; 1893 1894 if (malvf) { 1895 bool report_vf = false; 1896 1897 /* if the VF is malicious and we haven't let the user 1898 * know about it, then let them know now 1899 */ 1900 status = ice_mbx_report_malvf(&pf->hw, pf->vfs.malvfs, 1901 ICE_MAX_SRIOV_VFS, vf_id, 1902 &report_vf); 1903 if (status) 1904 dev_dbg(dev, "Error reporting malicious VF\n"); 1905 1906 if (report_vf) { 1907 struct ice_vsi *pf_vsi = ice_get_main_vsi(pf); 1908 1909 if (pf_vsi) 1910 dev_warn(dev, "VF MAC %pM on PF MAC %pM is generating asynchronous messages and may be overflowing the PF message queue. Please see the Adapter User Guide for more information\n", 1911 &vf->dev_lan_addr.addr[0], 1912 pf_vsi->netdev->dev_addr); 1913 } 1914 } 1915 1916 out_put_vf: 1917 ice_put_vf(vf); 1918 return malvf; 1919 } 1920