1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include "ice.h" 5 #include "ice_vf_lib_private.h" 6 #include "ice_base.h" 7 #include "ice_lib.h" 8 #include "ice_fltr.h" 9 #include "ice_dcb_lib.h" 10 #include "ice_flow.h" 11 #include "ice_eswitch.h" 12 #include "ice_virtchnl_allowlist.h" 13 #include "ice_flex_pipe.h" 14 #include "ice_vf_vsi_vlan_ops.h" 15 #include "ice_vlan.h" 16 17 /** 18 * ice_free_vf_entries - Free all VF entries from the hash table 19 * @pf: pointer to the PF structure 20 * 21 * Iterate over the VF hash table, removing and releasing all VF entries. 22 * Called during VF teardown or as cleanup during failed VF initialization. 23 */ 24 static void ice_free_vf_entries(struct ice_pf *pf) 25 { 26 struct ice_vfs *vfs = &pf->vfs; 27 struct hlist_node *tmp; 28 struct ice_vf *vf; 29 unsigned int bkt; 30 31 /* Remove all VFs from the hash table and release their main 32 * reference. Once all references to the VF are dropped, ice_put_vf() 33 * will call ice_release_vf which will remove the VF memory. 34 */ 35 lockdep_assert_held(&vfs->table_lock); 36 37 hash_for_each_safe(vfs->table, bkt, tmp, vf, entry) { 38 hash_del_rcu(&vf->entry); 39 ice_put_vf(vf); 40 } 41 } 42 43 /** 44 * ice_free_vf_res - Free a VF's resources 45 * @vf: pointer to the VF info 46 */ 47 static void ice_free_vf_res(struct ice_vf *vf) 48 { 49 struct ice_pf *pf = vf->pf; 50 int i, last_vector_idx; 51 52 /* First, disable VF's configuration API to prevent OS from 53 * accessing the VF's VSI after it's freed or invalidated. 54 */ 55 clear_bit(ICE_VF_STATE_INIT, vf->vf_states); 56 ice_vf_fdir_exit(vf); 57 /* free VF control VSI */ 58 if (vf->ctrl_vsi_idx != ICE_NO_VSI) 59 ice_vf_ctrl_vsi_release(vf); 60 61 /* free VSI and disconnect it from the parent uplink */ 62 if (vf->lan_vsi_idx != ICE_NO_VSI) { 63 ice_vf_vsi_release(vf); 64 vf->num_mac = 0; 65 } 66 67 last_vector_idx = vf->first_vector_idx + vf->num_msix - 1; 68 69 /* clear VF MDD event information */ 70 memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events)); 71 memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events)); 72 73 /* Disable interrupts so that VF starts in a known state */ 74 for (i = vf->first_vector_idx; i <= last_vector_idx; i++) { 75 wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M); 76 ice_flush(&pf->hw); 77 } 78 /* reset some of the state variables keeping track of the resources */ 79 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states); 80 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states); 81 } 82 83 /** 84 * ice_dis_vf_mappings 85 * @vf: pointer to the VF structure 86 */ 87 static void ice_dis_vf_mappings(struct ice_vf *vf) 88 { 89 struct ice_pf *pf = vf->pf; 90 struct ice_vsi *vsi; 91 struct device *dev; 92 int first, last, v; 93 struct ice_hw *hw; 94 95 hw = &pf->hw; 96 vsi = ice_get_vf_vsi(vf); 97 if (WARN_ON(!vsi)) 98 return; 99 100 dev = ice_pf_to_dev(pf); 101 wr32(hw, VPINT_ALLOC(vf->vf_id), 0); 102 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0); 103 104 first = vf->first_vector_idx; 105 last = first + vf->num_msix - 1; 106 for (v = first; v <= last; v++) { 107 u32 reg; 108 109 reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) & 110 GLINT_VECT2FUNC_IS_PF_M) | 111 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) & 112 GLINT_VECT2FUNC_PF_NUM_M)); 113 wr32(hw, GLINT_VECT2FUNC(v), reg); 114 } 115 116 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) 117 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0); 118 else 119 dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n"); 120 121 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) 122 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0); 123 else 124 dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n"); 125 } 126 127 /** 128 * ice_sriov_free_msix_res - Reset/free any used MSIX resources 129 * @pf: pointer to the PF structure 130 * 131 * Since no MSIX entries are taken from the pf->irq_tracker then just clear 132 * the pf->sriov_base_vector. 133 * 134 * Returns 0 on success, and -EINVAL on error. 135 */ 136 static int ice_sriov_free_msix_res(struct ice_pf *pf) 137 { 138 if (!pf) 139 return -EINVAL; 140 141 bitmap_free(pf->sriov_irq_bm); 142 pf->sriov_irq_size = 0; 143 pf->sriov_base_vector = 0; 144 145 return 0; 146 } 147 148 /** 149 * ice_free_vfs - Free all VFs 150 * @pf: pointer to the PF structure 151 */ 152 void ice_free_vfs(struct ice_pf *pf) 153 { 154 struct device *dev = ice_pf_to_dev(pf); 155 struct ice_vfs *vfs = &pf->vfs; 156 struct ice_hw *hw = &pf->hw; 157 struct ice_vf *vf; 158 unsigned int bkt; 159 160 if (!ice_has_vfs(pf)) 161 return; 162 163 while (test_and_set_bit(ICE_VF_DIS, pf->state)) 164 usleep_range(1000, 2000); 165 166 /* Disable IOV before freeing resources. This lets any VF drivers 167 * running in the host get themselves cleaned up before we yank 168 * the carpet out from underneath their feet. 169 */ 170 if (!pci_vfs_assigned(pf->pdev)) 171 pci_disable_sriov(pf->pdev); 172 else 173 dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n"); 174 175 ice_eswitch_reserve_cp_queues(pf, -ice_get_num_vfs(pf)); 176 177 mutex_lock(&vfs->table_lock); 178 179 ice_for_each_vf(pf, bkt, vf) { 180 mutex_lock(&vf->cfg_lock); 181 182 ice_eswitch_detach(pf, vf); 183 ice_dis_vf_qs(vf); 184 185 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { 186 /* disable VF qp mappings and set VF disable state */ 187 ice_dis_vf_mappings(vf); 188 set_bit(ICE_VF_STATE_DIS, vf->vf_states); 189 ice_free_vf_res(vf); 190 } 191 192 if (!pci_vfs_assigned(pf->pdev)) { 193 u32 reg_idx, bit_idx; 194 195 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32; 196 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32; 197 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 198 } 199 200 /* clear malicious info since the VF is getting released */ 201 list_del(&vf->mbx_info.list_entry); 202 203 mutex_unlock(&vf->cfg_lock); 204 } 205 206 if (ice_sriov_free_msix_res(pf)) 207 dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n"); 208 209 vfs->num_qps_per = 0; 210 ice_free_vf_entries(pf); 211 212 mutex_unlock(&vfs->table_lock); 213 214 clear_bit(ICE_VF_DIS, pf->state); 215 clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags); 216 } 217 218 /** 219 * ice_vf_vsi_setup - Set up a VF VSI 220 * @vf: VF to setup VSI for 221 * 222 * Returns pointer to the successfully allocated VSI struct on success, 223 * otherwise returns NULL on failure. 224 */ 225 static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf) 226 { 227 struct ice_vsi_cfg_params params = {}; 228 struct ice_pf *pf = vf->pf; 229 struct ice_vsi *vsi; 230 231 params.type = ICE_VSI_VF; 232 params.pi = ice_vf_get_port_info(vf); 233 params.vf = vf; 234 params.flags = ICE_VSI_FLAG_INIT; 235 236 vsi = ice_vsi_setup(pf, ¶ms); 237 238 if (!vsi) { 239 dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n"); 240 ice_vf_invalidate_vsi(vf); 241 return NULL; 242 } 243 244 vf->lan_vsi_idx = vsi->idx; 245 vf->lan_vsi_num = vsi->vsi_num; 246 247 return vsi; 248 } 249 250 251 /** 252 * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware 253 * @vf: VF to enable MSIX mappings for 254 * 255 * Some of the registers need to be indexed/configured using hardware global 256 * device values and other registers need 0-based values, which represent PF 257 * based values. 258 */ 259 static void ice_ena_vf_msix_mappings(struct ice_vf *vf) 260 { 261 int device_based_first_msix, device_based_last_msix; 262 int pf_based_first_msix, pf_based_last_msix, v; 263 struct ice_pf *pf = vf->pf; 264 int device_based_vf_id; 265 struct ice_hw *hw; 266 u32 reg; 267 268 hw = &pf->hw; 269 pf_based_first_msix = vf->first_vector_idx; 270 pf_based_last_msix = (pf_based_first_msix + vf->num_msix) - 1; 271 272 device_based_first_msix = pf_based_first_msix + 273 pf->hw.func_caps.common_cap.msix_vector_first_id; 274 device_based_last_msix = 275 (device_based_first_msix + vf->num_msix) - 1; 276 device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 277 278 reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) & 279 VPINT_ALLOC_FIRST_M) | 280 ((device_based_last_msix << VPINT_ALLOC_LAST_S) & 281 VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M); 282 wr32(hw, VPINT_ALLOC(vf->vf_id), reg); 283 284 reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S) 285 & VPINT_ALLOC_PCI_FIRST_M) | 286 ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) & 287 VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M); 288 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg); 289 290 /* map the interrupts to its functions */ 291 for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) { 292 reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) & 293 GLINT_VECT2FUNC_VF_NUM_M) | 294 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) & 295 GLINT_VECT2FUNC_PF_NUM_M)); 296 wr32(hw, GLINT_VECT2FUNC(v), reg); 297 } 298 299 /* Map mailbox interrupt to VF MSI-X vector 0 */ 300 wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M); 301 } 302 303 /** 304 * ice_ena_vf_q_mappings - enable Rx/Tx queue mappings for a VF 305 * @vf: VF to enable the mappings for 306 * @max_txq: max Tx queues allowed on the VF's VSI 307 * @max_rxq: max Rx queues allowed on the VF's VSI 308 */ 309 static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq) 310 { 311 struct device *dev = ice_pf_to_dev(vf->pf); 312 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 313 struct ice_hw *hw = &vf->pf->hw; 314 u32 reg; 315 316 if (WARN_ON(!vsi)) 317 return; 318 319 /* set regardless of mapping mode */ 320 wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M); 321 322 /* VF Tx queues allocation */ 323 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) { 324 /* set the VF PF Tx queue range 325 * VFNUMQ value should be set to (number of queues - 1). A value 326 * of 0 means 1 queue and a value of 255 means 256 queues 327 */ 328 reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) & 329 VPLAN_TX_QBASE_VFFIRSTQ_M) | 330 (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) & 331 VPLAN_TX_QBASE_VFNUMQ_M)); 332 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg); 333 } else { 334 dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n"); 335 } 336 337 /* set regardless of mapping mode */ 338 wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M); 339 340 /* VF Rx queues allocation */ 341 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) { 342 /* set the VF PF Rx queue range 343 * VFNUMQ value should be set to (number of queues - 1). A value 344 * of 0 means 1 queue and a value of 255 means 256 queues 345 */ 346 reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) & 347 VPLAN_RX_QBASE_VFFIRSTQ_M) | 348 (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) & 349 VPLAN_RX_QBASE_VFNUMQ_M)); 350 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg); 351 } else { 352 dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n"); 353 } 354 } 355 356 /** 357 * ice_ena_vf_mappings - enable VF MSIX and queue mapping 358 * @vf: pointer to the VF structure 359 */ 360 static void ice_ena_vf_mappings(struct ice_vf *vf) 361 { 362 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 363 364 if (WARN_ON(!vsi)) 365 return; 366 367 ice_ena_vf_msix_mappings(vf); 368 ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq); 369 } 370 371 /** 372 * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space 373 * @vf: VF to calculate the register index for 374 * @q_vector: a q_vector associated to the VF 375 */ 376 int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector) 377 { 378 if (!vf || !q_vector) 379 return -EINVAL; 380 381 /* always add one to account for the OICR being the first MSIX */ 382 return vf->first_vector_idx + q_vector->v_idx + 1; 383 } 384 385 /** 386 * ice_sriov_set_msix_res - Set any used MSIX resources 387 * @pf: pointer to PF structure 388 * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs 389 * 390 * This function allows SR-IOV resources to be taken from the end of the PF's 391 * allowed HW MSIX vectors so that the irq_tracker will not be affected. We 392 * just set the pf->sriov_base_vector and return success. 393 * 394 * If there are not enough resources available, return an error. This should 395 * always be caught by ice_set_per_vf_res(). 396 * 397 * Return 0 on success, and -EINVAL when there are not enough MSIX vectors 398 * in the PF's space available for SR-IOV. 399 */ 400 static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed) 401 { 402 u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors; 403 int vectors_used = ice_get_max_used_msix_vector(pf); 404 int sriov_base_vector; 405 406 sriov_base_vector = total_vectors - num_msix_needed; 407 408 /* make sure we only grab irq_tracker entries from the list end and 409 * that we have enough available MSIX vectors 410 */ 411 if (sriov_base_vector < vectors_used) 412 return -EINVAL; 413 414 pf->sriov_base_vector = sriov_base_vector; 415 416 return 0; 417 } 418 419 /** 420 * ice_set_per_vf_res - check if vectors and queues are available 421 * @pf: pointer to the PF structure 422 * @num_vfs: the number of SR-IOV VFs being configured 423 * 424 * First, determine HW interrupts from common pool. If we allocate fewer VFs, we 425 * get more vectors and can enable more queues per VF. Note that this does not 426 * grab any vectors from the SW pool already allocated. Also note, that all 427 * vector counts include one for each VF's miscellaneous interrupt vector 428 * (i.e. OICR). 429 * 430 * Minimum VFs - 2 vectors, 1 queue pair 431 * Small VFs - 5 vectors, 4 queue pairs 432 * Medium VFs - 17 vectors, 16 queue pairs 433 * 434 * Second, determine number of queue pairs per VF by starting with a pre-defined 435 * maximum each VF supports. If this is not possible, then we adjust based on 436 * queue pairs available on the device. 437 * 438 * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used 439 * by each VF during VF initialization and reset. 440 */ 441 static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs) 442 { 443 int vectors_used = ice_get_max_used_msix_vector(pf); 444 u16 num_msix_per_vf, num_txq, num_rxq, avail_qs; 445 int msix_avail_per_vf, msix_avail_for_sriov; 446 struct device *dev = ice_pf_to_dev(pf); 447 int err; 448 449 lockdep_assert_held(&pf->vfs.table_lock); 450 451 if (!num_vfs) 452 return -EINVAL; 453 454 /* determine MSI-X resources per VF */ 455 msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors - 456 vectors_used; 457 msix_avail_per_vf = msix_avail_for_sriov / num_vfs; 458 if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) { 459 num_msix_per_vf = ICE_NUM_VF_MSIX_MED; 460 } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) { 461 num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL; 462 } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MULTIQ_MIN) { 463 num_msix_per_vf = ICE_NUM_VF_MSIX_MULTIQ_MIN; 464 } else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) { 465 num_msix_per_vf = ICE_MIN_INTR_PER_VF; 466 } else { 467 dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n", 468 msix_avail_for_sriov, ICE_MIN_INTR_PER_VF, 469 num_vfs); 470 return -ENOSPC; 471 } 472 473 num_txq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF, 474 ICE_MAX_RSS_QS_PER_VF); 475 avail_qs = ice_get_avail_txq_count(pf) / num_vfs; 476 if (!avail_qs) 477 num_txq = 0; 478 else if (num_txq > avail_qs) 479 num_txq = rounddown_pow_of_two(avail_qs); 480 481 num_rxq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF, 482 ICE_MAX_RSS_QS_PER_VF); 483 avail_qs = ice_get_avail_rxq_count(pf) / num_vfs; 484 if (!avail_qs) 485 num_rxq = 0; 486 else if (num_rxq > avail_qs) 487 num_rxq = rounddown_pow_of_two(avail_qs); 488 489 if (num_txq < ICE_MIN_QS_PER_VF || num_rxq < ICE_MIN_QS_PER_VF) { 490 dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n", 491 ICE_MIN_QS_PER_VF, num_vfs); 492 return -ENOSPC; 493 } 494 495 err = ice_sriov_set_msix_res(pf, num_msix_per_vf * num_vfs); 496 if (err) { 497 dev_err(dev, "Unable to set MSI-X resources for %d VFs, err %d\n", 498 num_vfs, err); 499 return err; 500 } 501 502 /* only allow equal Tx/Rx queue count (i.e. queue pairs) */ 503 pf->vfs.num_qps_per = min_t(int, num_txq, num_rxq); 504 pf->vfs.num_msix_per = num_msix_per_vf; 505 dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n", 506 num_vfs, pf->vfs.num_msix_per, pf->vfs.num_qps_per); 507 508 return 0; 509 } 510 511 /** 512 * ice_sriov_get_irqs - get irqs for SR-IOV usacase 513 * @pf: pointer to PF structure 514 * @needed: number of irqs to get 515 * 516 * This returns the first MSI-X vector index in PF space that is used by this 517 * VF. This index is used when accessing PF relative registers such as 518 * GLINT_VECT2FUNC and GLINT_DYN_CTL. 519 * This will always be the OICR index in the AVF driver so any functionality 520 * using vf->first_vector_idx for queue configuration_id: id of VF which will 521 * use this irqs 522 * 523 * Only SRIOV specific vectors are tracked in sriov_irq_bm. SRIOV vectors are 524 * allocated from the end of global irq index. First bit in sriov_irq_bm means 525 * last irq index etc. It simplifies extension of SRIOV vectors. 526 * They will be always located from sriov_base_vector to the last irq 527 * index. While increasing/decreasing sriov_base_vector can be moved. 528 */ 529 static int ice_sriov_get_irqs(struct ice_pf *pf, u16 needed) 530 { 531 int res = bitmap_find_next_zero_area(pf->sriov_irq_bm, 532 pf->sriov_irq_size, 0, needed, 0); 533 /* conversion from number in bitmap to global irq index */ 534 int index = pf->sriov_irq_size - res - needed; 535 536 if (res >= pf->sriov_irq_size || index < pf->sriov_base_vector) 537 return -ENOENT; 538 539 bitmap_set(pf->sriov_irq_bm, res, needed); 540 return index; 541 } 542 543 /** 544 * ice_sriov_free_irqs - free irqs used by the VF 545 * @pf: pointer to PF structure 546 * @vf: pointer to VF structure 547 */ 548 static void ice_sriov_free_irqs(struct ice_pf *pf, struct ice_vf *vf) 549 { 550 /* Move back from first vector index to first index in bitmap */ 551 int bm_i = pf->sriov_irq_size - vf->first_vector_idx - vf->num_msix; 552 553 bitmap_clear(pf->sriov_irq_bm, bm_i, vf->num_msix); 554 vf->first_vector_idx = 0; 555 } 556 557 /** 558 * ice_init_vf_vsi_res - initialize/setup VF VSI resources 559 * @vf: VF to initialize/setup the VSI for 560 * 561 * This function creates a VSI for the VF, adds a VLAN 0 filter, and sets up the 562 * VF VSI's broadcast filter and is only used during initial VF creation. 563 */ 564 static int ice_init_vf_vsi_res(struct ice_vf *vf) 565 { 566 struct ice_pf *pf = vf->pf; 567 struct ice_vsi *vsi; 568 int err; 569 570 vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix); 571 if (vf->first_vector_idx < 0) 572 return -ENOMEM; 573 574 vsi = ice_vf_vsi_setup(vf); 575 if (!vsi) 576 return -ENOMEM; 577 578 err = ice_vf_init_host_cfg(vf, vsi); 579 if (err) 580 goto release_vsi; 581 582 return 0; 583 584 release_vsi: 585 ice_vf_vsi_release(vf); 586 return err; 587 } 588 589 /** 590 * ice_start_vfs - start VFs so they are ready to be used by SR-IOV 591 * @pf: PF the VFs are associated with 592 */ 593 static int ice_start_vfs(struct ice_pf *pf) 594 { 595 struct ice_hw *hw = &pf->hw; 596 unsigned int bkt, it_cnt; 597 struct ice_vf *vf; 598 int retval; 599 600 lockdep_assert_held(&pf->vfs.table_lock); 601 602 it_cnt = 0; 603 ice_for_each_vf(pf, bkt, vf) { 604 vf->vf_ops->clear_reset_trigger(vf); 605 606 retval = ice_init_vf_vsi_res(vf); 607 if (retval) { 608 dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n", 609 vf->vf_id, retval); 610 goto teardown; 611 } 612 613 retval = ice_eswitch_attach(pf, vf); 614 if (retval) { 615 dev_err(ice_pf_to_dev(pf), "Failed to attach VF %d to eswitch, error %d", 616 vf->vf_id, retval); 617 ice_vf_vsi_release(vf); 618 goto teardown; 619 } 620 621 set_bit(ICE_VF_STATE_INIT, vf->vf_states); 622 ice_ena_vf_mappings(vf); 623 wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); 624 it_cnt++; 625 } 626 627 ice_flush(hw); 628 return 0; 629 630 teardown: 631 ice_for_each_vf(pf, bkt, vf) { 632 if (it_cnt == 0) 633 break; 634 635 ice_dis_vf_mappings(vf); 636 ice_vf_vsi_release(vf); 637 it_cnt--; 638 } 639 640 return retval; 641 } 642 643 /** 644 * ice_sriov_free_vf - Free VF memory after all references are dropped 645 * @vf: pointer to VF to free 646 * 647 * Called by ice_put_vf through ice_release_vf once the last reference to a VF 648 * structure has been dropped. 649 */ 650 static void ice_sriov_free_vf(struct ice_vf *vf) 651 { 652 mutex_destroy(&vf->cfg_lock); 653 654 kfree_rcu(vf, rcu); 655 } 656 657 /** 658 * ice_sriov_clear_reset_state - clears VF Reset status register 659 * @vf: the vf to configure 660 */ 661 static void ice_sriov_clear_reset_state(struct ice_vf *vf) 662 { 663 struct ice_hw *hw = &vf->pf->hw; 664 665 /* Clear the reset status register so that VF immediately sees that 666 * the device is resetting, even if hardware hasn't yet gotten around 667 * to clearing VFGEN_RSTAT for us. 668 */ 669 wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_INPROGRESS); 670 } 671 672 /** 673 * ice_sriov_clear_mbx_register - clears SRIOV VF's mailbox registers 674 * @vf: the vf to configure 675 */ 676 static void ice_sriov_clear_mbx_register(struct ice_vf *vf) 677 { 678 struct ice_pf *pf = vf->pf; 679 680 wr32(&pf->hw, VF_MBX_ARQLEN(vf->vf_id), 0); 681 wr32(&pf->hw, VF_MBX_ATQLEN(vf->vf_id), 0); 682 } 683 684 /** 685 * ice_sriov_trigger_reset_register - trigger VF reset for SRIOV VF 686 * @vf: pointer to VF structure 687 * @is_vflr: true if reset occurred due to VFLR 688 * 689 * Trigger and cleanup after a VF reset for a SR-IOV VF. 690 */ 691 static void ice_sriov_trigger_reset_register(struct ice_vf *vf, bool is_vflr) 692 { 693 struct ice_pf *pf = vf->pf; 694 u32 reg, reg_idx, bit_idx; 695 unsigned int vf_abs_id, i; 696 struct device *dev; 697 struct ice_hw *hw; 698 699 dev = ice_pf_to_dev(pf); 700 hw = &pf->hw; 701 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id; 702 703 /* In the case of a VFLR, HW has already reset the VF and we just need 704 * to clean up. Otherwise we must first trigger the reset using the 705 * VFRTRIG register. 706 */ 707 if (!is_vflr) { 708 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id)); 709 reg |= VPGEN_VFRTRIG_VFSWR_M; 710 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg); 711 } 712 713 /* clear the VFLR bit in GLGEN_VFLRSTAT */ 714 reg_idx = (vf_abs_id) / 32; 715 bit_idx = (vf_abs_id) % 32; 716 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 717 ice_flush(hw); 718 719 wr32(hw, PF_PCI_CIAA, 720 VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S)); 721 for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) { 722 reg = rd32(hw, PF_PCI_CIAD); 723 /* no transactions pending so stop polling */ 724 if ((reg & VF_TRANS_PENDING_M) == 0) 725 break; 726 727 dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id); 728 udelay(ICE_PCI_CIAD_WAIT_DELAY_US); 729 } 730 } 731 732 /** 733 * ice_sriov_poll_reset_status - poll SRIOV VF reset status 734 * @vf: pointer to VF structure 735 * 736 * Returns true when reset is successful, else returns false 737 */ 738 static bool ice_sriov_poll_reset_status(struct ice_vf *vf) 739 { 740 struct ice_pf *pf = vf->pf; 741 unsigned int i; 742 u32 reg; 743 744 for (i = 0; i < 10; i++) { 745 /* VF reset requires driver to first reset the VF and then 746 * poll the status register to make sure that the reset 747 * completed successfully. 748 */ 749 reg = rd32(&pf->hw, VPGEN_VFRSTAT(vf->vf_id)); 750 if (reg & VPGEN_VFRSTAT_VFRD_M) 751 return true; 752 753 /* only sleep if the reset is not done */ 754 usleep_range(10, 20); 755 } 756 return false; 757 } 758 759 /** 760 * ice_sriov_clear_reset_trigger - enable VF to access hardware 761 * @vf: VF to enabled hardware access for 762 */ 763 static void ice_sriov_clear_reset_trigger(struct ice_vf *vf) 764 { 765 struct ice_hw *hw = &vf->pf->hw; 766 u32 reg; 767 768 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id)); 769 reg &= ~VPGEN_VFRTRIG_VFSWR_M; 770 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg); 771 ice_flush(hw); 772 } 773 774 /** 775 * ice_sriov_create_vsi - Create a new VSI for a VF 776 * @vf: VF to create the VSI for 777 * 778 * This is called by ice_vf_recreate_vsi to create the new VSI after the old 779 * VSI has been released. 780 */ 781 static int ice_sriov_create_vsi(struct ice_vf *vf) 782 { 783 struct ice_vsi *vsi; 784 785 vsi = ice_vf_vsi_setup(vf); 786 if (!vsi) 787 return -ENOMEM; 788 789 return 0; 790 } 791 792 /** 793 * ice_sriov_post_vsi_rebuild - tasks to do after the VF's VSI have been rebuilt 794 * @vf: VF to perform tasks on 795 */ 796 static void ice_sriov_post_vsi_rebuild(struct ice_vf *vf) 797 { 798 ice_ena_vf_mappings(vf); 799 wr32(&vf->pf->hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); 800 } 801 802 static const struct ice_vf_ops ice_sriov_vf_ops = { 803 .reset_type = ICE_VF_RESET, 804 .free = ice_sriov_free_vf, 805 .clear_reset_state = ice_sriov_clear_reset_state, 806 .clear_mbx_register = ice_sriov_clear_mbx_register, 807 .trigger_reset_register = ice_sriov_trigger_reset_register, 808 .poll_reset_status = ice_sriov_poll_reset_status, 809 .clear_reset_trigger = ice_sriov_clear_reset_trigger, 810 .irq_close = NULL, 811 .create_vsi = ice_sriov_create_vsi, 812 .post_vsi_rebuild = ice_sriov_post_vsi_rebuild, 813 }; 814 815 /** 816 * ice_create_vf_entries - Allocate and insert VF entries 817 * @pf: pointer to the PF structure 818 * @num_vfs: the number of VFs to allocate 819 * 820 * Allocate new VF entries and insert them into the hash table. Set some 821 * basic default fields for initializing the new VFs. 822 * 823 * After this function exits, the hash table will have num_vfs entries 824 * inserted. 825 * 826 * Returns 0 on success or an integer error code on failure. 827 */ 828 static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs) 829 { 830 struct pci_dev *pdev = pf->pdev; 831 struct ice_vfs *vfs = &pf->vfs; 832 struct pci_dev *vfdev = NULL; 833 struct ice_vf *vf; 834 u16 vf_pdev_id; 835 int err, pos; 836 837 lockdep_assert_held(&vfs->table_lock); 838 839 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 840 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_pdev_id); 841 842 for (u16 vf_id = 0; vf_id < num_vfs; vf_id++) { 843 vf = kzalloc(sizeof(*vf), GFP_KERNEL); 844 if (!vf) { 845 err = -ENOMEM; 846 goto err_free_entries; 847 } 848 kref_init(&vf->refcnt); 849 850 vf->pf = pf; 851 vf->vf_id = vf_id; 852 853 /* set sriov vf ops for VFs created during SRIOV flow */ 854 vf->vf_ops = &ice_sriov_vf_ops; 855 856 ice_initialize_vf_entry(vf); 857 858 do { 859 vfdev = pci_get_device(pdev->vendor, vf_pdev_id, vfdev); 860 } while (vfdev && vfdev->physfn != pdev); 861 vf->vfdev = vfdev; 862 vf->vf_sw_id = pf->first_sw; 863 864 pci_dev_get(vfdev); 865 866 /* set default number of MSI-X */ 867 vf->num_msix = pf->vfs.num_msix_per; 868 vf->num_vf_qs = pf->vfs.num_qps_per; 869 ice_vc_set_default_allowlist(vf); 870 871 hash_add_rcu(vfs->table, &vf->entry, vf_id); 872 } 873 874 /* Decrement of refcount done by pci_get_device() inside the loop does 875 * not touch the last iteration's vfdev, so it has to be done manually 876 * to balance pci_dev_get() added within the loop. 877 */ 878 pci_dev_put(vfdev); 879 880 return 0; 881 882 err_free_entries: 883 ice_free_vf_entries(pf); 884 return err; 885 } 886 887 /** 888 * ice_ena_vfs - enable VFs so they are ready to be used 889 * @pf: pointer to the PF structure 890 * @num_vfs: number of VFs to enable 891 */ 892 static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) 893 { 894 int total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors; 895 struct device *dev = ice_pf_to_dev(pf); 896 struct ice_hw *hw = &pf->hw; 897 int ret; 898 899 pf->sriov_irq_bm = bitmap_zalloc(total_vectors, GFP_KERNEL); 900 if (!pf->sriov_irq_bm) 901 return -ENOMEM; 902 pf->sriov_irq_size = total_vectors; 903 904 /* Disable global interrupt 0 so we don't try to handle the VFLR. */ 905 wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index), 906 ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S); 907 set_bit(ICE_OICR_INTR_DIS, pf->state); 908 ice_flush(hw); 909 910 ret = pci_enable_sriov(pf->pdev, num_vfs); 911 if (ret) 912 goto err_unroll_intr; 913 914 mutex_lock(&pf->vfs.table_lock); 915 916 ret = ice_set_per_vf_res(pf, num_vfs); 917 if (ret) { 918 dev_err(dev, "Not enough resources for %d VFs, err %d. Try with fewer number of VFs\n", 919 num_vfs, ret); 920 goto err_unroll_sriov; 921 } 922 923 ret = ice_create_vf_entries(pf, num_vfs); 924 if (ret) { 925 dev_err(dev, "Failed to allocate VF entries for %d VFs\n", 926 num_vfs); 927 goto err_unroll_sriov; 928 } 929 930 ice_eswitch_reserve_cp_queues(pf, num_vfs); 931 ret = ice_start_vfs(pf); 932 if (ret) { 933 dev_err(dev, "Failed to start %d VFs, err %d\n", num_vfs, ret); 934 ret = -EAGAIN; 935 goto err_unroll_vf_entries; 936 } 937 938 clear_bit(ICE_VF_DIS, pf->state); 939 940 /* rearm global interrupts */ 941 if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state)) 942 ice_irq_dynamic_ena(hw, NULL, NULL); 943 944 mutex_unlock(&pf->vfs.table_lock); 945 946 return 0; 947 948 err_unroll_vf_entries: 949 ice_free_vf_entries(pf); 950 err_unroll_sriov: 951 mutex_unlock(&pf->vfs.table_lock); 952 pci_disable_sriov(pf->pdev); 953 err_unroll_intr: 954 /* rearm interrupts here */ 955 ice_irq_dynamic_ena(hw, NULL, NULL); 956 clear_bit(ICE_OICR_INTR_DIS, pf->state); 957 bitmap_free(pf->sriov_irq_bm); 958 return ret; 959 } 960 961 /** 962 * ice_pci_sriov_ena - Enable or change number of VFs 963 * @pf: pointer to the PF structure 964 * @num_vfs: number of VFs to allocate 965 * 966 * Returns 0 on success and negative on failure 967 */ 968 static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs) 969 { 970 struct device *dev = ice_pf_to_dev(pf); 971 int err; 972 973 if (!num_vfs) { 974 ice_free_vfs(pf); 975 return 0; 976 } 977 978 if (num_vfs > pf->vfs.num_supported) { 979 dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n", 980 num_vfs, pf->vfs.num_supported); 981 return -EOPNOTSUPP; 982 } 983 984 dev_info(dev, "Enabling %d VFs\n", num_vfs); 985 err = ice_ena_vfs(pf, num_vfs); 986 if (err) { 987 dev_err(dev, "Failed to enable SR-IOV: %d\n", err); 988 return err; 989 } 990 991 set_bit(ICE_FLAG_SRIOV_ENA, pf->flags); 992 return 0; 993 } 994 995 /** 996 * ice_check_sriov_allowed - check if SR-IOV is allowed based on various checks 997 * @pf: PF to enabled SR-IOV on 998 */ 999 static int ice_check_sriov_allowed(struct ice_pf *pf) 1000 { 1001 struct device *dev = ice_pf_to_dev(pf); 1002 1003 if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) { 1004 dev_err(dev, "This device is not capable of SR-IOV\n"); 1005 return -EOPNOTSUPP; 1006 } 1007 1008 if (ice_is_safe_mode(pf)) { 1009 dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n"); 1010 return -EOPNOTSUPP; 1011 } 1012 1013 if (!ice_pf_state_is_nominal(pf)) { 1014 dev_err(dev, "Cannot enable SR-IOV, device not ready\n"); 1015 return -EBUSY; 1016 } 1017 1018 return 0; 1019 } 1020 1021 /** 1022 * ice_sriov_get_vf_total_msix - return number of MSI-X used by VFs 1023 * @pdev: pointer to pci_dev struct 1024 * 1025 * The function is called via sysfs ops 1026 */ 1027 u32 ice_sriov_get_vf_total_msix(struct pci_dev *pdev) 1028 { 1029 struct ice_pf *pf = pci_get_drvdata(pdev); 1030 1031 return pf->sriov_irq_size - ice_get_max_used_msix_vector(pf); 1032 } 1033 1034 static int ice_sriov_move_base_vector(struct ice_pf *pf, int move) 1035 { 1036 if (pf->sriov_base_vector - move < ice_get_max_used_msix_vector(pf)) 1037 return -ENOMEM; 1038 1039 pf->sriov_base_vector -= move; 1040 return 0; 1041 } 1042 1043 static void ice_sriov_remap_vectors(struct ice_pf *pf, u16 restricted_id) 1044 { 1045 u16 vf_ids[ICE_MAX_SRIOV_VFS]; 1046 struct ice_vf *tmp_vf; 1047 int to_remap = 0, bkt; 1048 1049 /* For better irqs usage try to remap irqs of VFs 1050 * that aren't running yet 1051 */ 1052 ice_for_each_vf(pf, bkt, tmp_vf) { 1053 /* skip VF which is changing the number of MSI-X */ 1054 if (restricted_id == tmp_vf->vf_id || 1055 test_bit(ICE_VF_STATE_ACTIVE, tmp_vf->vf_states)) 1056 continue; 1057 1058 ice_dis_vf_mappings(tmp_vf); 1059 ice_sriov_free_irqs(pf, tmp_vf); 1060 1061 vf_ids[to_remap] = tmp_vf->vf_id; 1062 to_remap += 1; 1063 } 1064 1065 for (int i = 0; i < to_remap; i++) { 1066 tmp_vf = ice_get_vf_by_id(pf, vf_ids[i]); 1067 if (!tmp_vf) 1068 continue; 1069 1070 tmp_vf->first_vector_idx = 1071 ice_sriov_get_irqs(pf, tmp_vf->num_msix); 1072 /* there is no need to rebuild VSI as we are only changing the 1073 * vector indexes not amount of MSI-X or queues 1074 */ 1075 ice_ena_vf_mappings(tmp_vf); 1076 ice_put_vf(tmp_vf); 1077 } 1078 } 1079 1080 /** 1081 * ice_sriov_set_msix_vec_count 1082 * @vf_dev: pointer to pci_dev struct of VF device 1083 * @msix_vec_count: new value for MSI-X amount on this VF 1084 * 1085 * Set requested MSI-X, queues and registers for @vf_dev. 1086 * 1087 * First do some sanity checks like if there are any VFs, if the new value 1088 * is correct etc. Then disable old mapping (MSI-X and queues registers), change 1089 * MSI-X and queues, rebuild VSI and enable new mapping. 1090 * 1091 * If it is possible (driver not binded to VF) try to remap also other VFs to 1092 * linearize irqs register usage. 1093 */ 1094 int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count) 1095 { 1096 struct pci_dev *pdev = pci_physfn(vf_dev); 1097 struct ice_pf *pf = pci_get_drvdata(pdev); 1098 u16 prev_msix, prev_queues, queues; 1099 bool needs_rebuild = false; 1100 struct ice_vf *vf; 1101 int id; 1102 1103 if (!ice_get_num_vfs(pf)) 1104 return -ENOENT; 1105 1106 if (!msix_vec_count) 1107 return 0; 1108 1109 queues = msix_vec_count; 1110 /* add 1 MSI-X for OICR */ 1111 msix_vec_count += 1; 1112 1113 if (queues > min(ice_get_avail_txq_count(pf), 1114 ice_get_avail_rxq_count(pf))) 1115 return -EINVAL; 1116 1117 if (msix_vec_count < ICE_MIN_INTR_PER_VF) 1118 return -EINVAL; 1119 1120 /* Transition of PCI VF function number to function_id */ 1121 for (id = 0; id < pci_num_vf(pdev); id++) { 1122 if (vf_dev->devfn == pci_iov_virtfn_devfn(pdev, id)) 1123 break; 1124 } 1125 1126 if (id == pci_num_vf(pdev)) 1127 return -ENOENT; 1128 1129 vf = ice_get_vf_by_id(pf, id); 1130 1131 if (!vf) 1132 return -ENOENT; 1133 1134 prev_msix = vf->num_msix; 1135 prev_queues = vf->num_vf_qs; 1136 1137 if (ice_sriov_move_base_vector(pf, msix_vec_count - prev_msix)) { 1138 ice_put_vf(vf); 1139 return -ENOSPC; 1140 } 1141 1142 ice_dis_vf_mappings(vf); 1143 ice_sriov_free_irqs(pf, vf); 1144 1145 /* Remap all VFs beside the one is now configured */ 1146 ice_sriov_remap_vectors(pf, vf->vf_id); 1147 1148 vf->num_msix = msix_vec_count; 1149 vf->num_vf_qs = queues; 1150 vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix); 1151 if (vf->first_vector_idx < 0) 1152 goto unroll; 1153 1154 ice_vf_vsi_release(vf); 1155 if (vf->vf_ops->create_vsi(vf)) { 1156 /* Try to rebuild with previous values */ 1157 needs_rebuild = true; 1158 goto unroll; 1159 } 1160 1161 dev_info(ice_pf_to_dev(pf), 1162 "Changing VF %d resources to %d vectors and %d queues\n", 1163 vf->vf_id, vf->num_msix, vf->num_vf_qs); 1164 1165 ice_ena_vf_mappings(vf); 1166 ice_put_vf(vf); 1167 1168 return 0; 1169 1170 unroll: 1171 dev_info(ice_pf_to_dev(pf), 1172 "Can't set %d vectors on VF %d, falling back to %d\n", 1173 vf->num_msix, vf->vf_id, prev_msix); 1174 1175 vf->num_msix = prev_msix; 1176 vf->num_vf_qs = prev_queues; 1177 vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix); 1178 if (vf->first_vector_idx < 0) 1179 return -EINVAL; 1180 1181 if (needs_rebuild) 1182 vf->vf_ops->create_vsi(vf); 1183 1184 ice_ena_vf_mappings(vf); 1185 ice_put_vf(vf); 1186 1187 return -EINVAL; 1188 } 1189 1190 /** 1191 * ice_sriov_configure - Enable or change number of VFs via sysfs 1192 * @pdev: pointer to a pci_dev structure 1193 * @num_vfs: number of VFs to allocate or 0 to free VFs 1194 * 1195 * This function is called when the user updates the number of VFs in sysfs. On 1196 * success return whatever num_vfs was set to by the caller. Return negative on 1197 * failure. 1198 */ 1199 int ice_sriov_configure(struct pci_dev *pdev, int num_vfs) 1200 { 1201 struct ice_pf *pf = pci_get_drvdata(pdev); 1202 struct device *dev = ice_pf_to_dev(pf); 1203 int err; 1204 1205 err = ice_check_sriov_allowed(pf); 1206 if (err) 1207 return err; 1208 1209 if (!num_vfs) { 1210 if (!pci_vfs_assigned(pdev)) { 1211 ice_free_vfs(pf); 1212 return 0; 1213 } 1214 1215 dev_err(dev, "can't free VFs because some are assigned to VMs.\n"); 1216 return -EBUSY; 1217 } 1218 1219 err = ice_pci_sriov_ena(pf, num_vfs); 1220 if (err) 1221 return err; 1222 1223 return num_vfs; 1224 } 1225 1226 /** 1227 * ice_process_vflr_event - Free VF resources via IRQ calls 1228 * @pf: pointer to the PF structure 1229 * 1230 * called from the VFLR IRQ handler to 1231 * free up VF resources and state variables 1232 */ 1233 void ice_process_vflr_event(struct ice_pf *pf) 1234 { 1235 struct ice_hw *hw = &pf->hw; 1236 struct ice_vf *vf; 1237 unsigned int bkt; 1238 u32 reg; 1239 1240 if (!test_and_clear_bit(ICE_VFLR_EVENT_PENDING, pf->state) || 1241 !ice_has_vfs(pf)) 1242 return; 1243 1244 mutex_lock(&pf->vfs.table_lock); 1245 ice_for_each_vf(pf, bkt, vf) { 1246 u32 reg_idx, bit_idx; 1247 1248 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32; 1249 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32; 1250 /* read GLGEN_VFLRSTAT register to find out the flr VFs */ 1251 reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx)); 1252 if (reg & BIT(bit_idx)) 1253 /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */ 1254 ice_reset_vf(vf, ICE_VF_RESET_VFLR | ICE_VF_RESET_LOCK); 1255 } 1256 mutex_unlock(&pf->vfs.table_lock); 1257 } 1258 1259 /** 1260 * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in 1261 * @pf: PF used to index all VFs 1262 * @pfq: queue index relative to the PF's function space 1263 * 1264 * If no VF is found who owns the pfq then return NULL, otherwise return a 1265 * pointer to the VF who owns the pfq 1266 * 1267 * If this function returns non-NULL, it acquires a reference count of the VF 1268 * structure. The caller is responsible for calling ice_put_vf() to drop this 1269 * reference. 1270 */ 1271 static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq) 1272 { 1273 struct ice_vf *vf; 1274 unsigned int bkt; 1275 1276 rcu_read_lock(); 1277 ice_for_each_vf_rcu(pf, bkt, vf) { 1278 struct ice_vsi *vsi; 1279 u16 rxq_idx; 1280 1281 vsi = ice_get_vf_vsi(vf); 1282 if (!vsi) 1283 continue; 1284 1285 ice_for_each_rxq(vsi, rxq_idx) 1286 if (vsi->rxq_map[rxq_idx] == pfq) { 1287 struct ice_vf *found; 1288 1289 if (kref_get_unless_zero(&vf->refcnt)) 1290 found = vf; 1291 else 1292 found = NULL; 1293 rcu_read_unlock(); 1294 return found; 1295 } 1296 } 1297 rcu_read_unlock(); 1298 1299 return NULL; 1300 } 1301 1302 /** 1303 * ice_globalq_to_pfq - convert from global queue index to PF space queue index 1304 * @pf: PF used for conversion 1305 * @globalq: global queue index used to convert to PF space queue index 1306 */ 1307 static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq) 1308 { 1309 return globalq - pf->hw.func_caps.common_cap.rxq_first_id; 1310 } 1311 1312 /** 1313 * ice_vf_lan_overflow_event - handle LAN overflow event for a VF 1314 * @pf: PF that the LAN overflow event happened on 1315 * @event: structure holding the event information for the LAN overflow event 1316 * 1317 * Determine if the LAN overflow event was caused by a VF queue. If it was not 1318 * caused by a VF, do nothing. If a VF caused this LAN overflow event trigger a 1319 * reset on the offending VF. 1320 */ 1321 void 1322 ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) 1323 { 1324 u32 gldcb_rtctq, queue; 1325 struct ice_vf *vf; 1326 1327 gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq); 1328 dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq); 1329 1330 /* event returns device global Rx queue number */ 1331 queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >> 1332 GLDCB_RTCTQ_RXQNUM_S; 1333 1334 vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue)); 1335 if (!vf) 1336 return; 1337 1338 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY | ICE_VF_RESET_LOCK); 1339 ice_put_vf(vf); 1340 } 1341 1342 /** 1343 * ice_set_vf_spoofchk 1344 * @netdev: network interface device structure 1345 * @vf_id: VF identifier 1346 * @ena: flag to enable or disable feature 1347 * 1348 * Enable or disable VF spoof checking 1349 */ 1350 int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena) 1351 { 1352 struct ice_netdev_priv *np = netdev_priv(netdev); 1353 struct ice_pf *pf = np->vsi->back; 1354 struct ice_vsi *vf_vsi; 1355 struct device *dev; 1356 struct ice_vf *vf; 1357 int ret; 1358 1359 dev = ice_pf_to_dev(pf); 1360 1361 vf = ice_get_vf_by_id(pf, vf_id); 1362 if (!vf) 1363 return -EINVAL; 1364 1365 ret = ice_check_vf_ready_for_cfg(vf); 1366 if (ret) 1367 goto out_put_vf; 1368 1369 vf_vsi = ice_get_vf_vsi(vf); 1370 if (!vf_vsi) { 1371 netdev_err(netdev, "VSI %d for VF %d is null\n", 1372 vf->lan_vsi_idx, vf->vf_id); 1373 ret = -EINVAL; 1374 goto out_put_vf; 1375 } 1376 1377 if (vf_vsi->type != ICE_VSI_VF) { 1378 netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n", 1379 vf_vsi->type, vf_vsi->vsi_num, vf->vf_id); 1380 ret = -ENODEV; 1381 goto out_put_vf; 1382 } 1383 1384 if (ena == vf->spoofchk) { 1385 dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF"); 1386 ret = 0; 1387 goto out_put_vf; 1388 } 1389 1390 ret = ice_vsi_apply_spoofchk(vf_vsi, ena); 1391 if (ret) 1392 dev_err(dev, "Failed to set spoofchk %s for VF %d VSI %d\n error %d\n", 1393 ena ? "ON" : "OFF", vf->vf_id, vf_vsi->vsi_num, ret); 1394 else 1395 vf->spoofchk = ena; 1396 1397 out_put_vf: 1398 ice_put_vf(vf); 1399 return ret; 1400 } 1401 1402 /** 1403 * ice_get_vf_cfg 1404 * @netdev: network interface device structure 1405 * @vf_id: VF identifier 1406 * @ivi: VF configuration structure 1407 * 1408 * return VF configuration 1409 */ 1410 int 1411 ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi) 1412 { 1413 struct ice_pf *pf = ice_netdev_to_pf(netdev); 1414 struct ice_vf *vf; 1415 int ret; 1416 1417 vf = ice_get_vf_by_id(pf, vf_id); 1418 if (!vf) 1419 return -EINVAL; 1420 1421 ret = ice_check_vf_ready_for_cfg(vf); 1422 if (ret) 1423 goto out_put_vf; 1424 1425 ivi->vf = vf_id; 1426 ether_addr_copy(ivi->mac, vf->hw_lan_addr); 1427 1428 /* VF configuration for VLAN and applicable QoS */ 1429 ivi->vlan = ice_vf_get_port_vlan_id(vf); 1430 ivi->qos = ice_vf_get_port_vlan_prio(vf); 1431 if (ice_vf_is_port_vlan_ena(vf)) 1432 ivi->vlan_proto = cpu_to_be16(ice_vf_get_port_vlan_tpid(vf)); 1433 1434 ivi->trusted = vf->trusted; 1435 ivi->spoofchk = vf->spoofchk; 1436 if (!vf->link_forced) 1437 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; 1438 else if (vf->link_up) 1439 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; 1440 else 1441 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; 1442 ivi->max_tx_rate = vf->max_tx_rate; 1443 ivi->min_tx_rate = vf->min_tx_rate; 1444 1445 out_put_vf: 1446 ice_put_vf(vf); 1447 return ret; 1448 } 1449 1450 /** 1451 * ice_set_vf_mac 1452 * @netdev: network interface device structure 1453 * @vf_id: VF identifier 1454 * @mac: MAC address 1455 * 1456 * program VF MAC address 1457 */ 1458 int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) 1459 { 1460 struct ice_pf *pf = ice_netdev_to_pf(netdev); 1461 struct ice_vf *vf; 1462 int ret; 1463 1464 if (is_multicast_ether_addr(mac)) { 1465 netdev_err(netdev, "%pM not a valid unicast address\n", mac); 1466 return -EINVAL; 1467 } 1468 1469 vf = ice_get_vf_by_id(pf, vf_id); 1470 if (!vf) 1471 return -EINVAL; 1472 1473 /* nothing left to do, unicast MAC already set */ 1474 if (ether_addr_equal(vf->dev_lan_addr, mac) && 1475 ether_addr_equal(vf->hw_lan_addr, mac)) { 1476 ret = 0; 1477 goto out_put_vf; 1478 } 1479 1480 ret = ice_check_vf_ready_for_cfg(vf); 1481 if (ret) 1482 goto out_put_vf; 1483 1484 mutex_lock(&vf->cfg_lock); 1485 1486 /* VF is notified of its new MAC via the PF's response to the 1487 * VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset 1488 */ 1489 ether_addr_copy(vf->dev_lan_addr, mac); 1490 ether_addr_copy(vf->hw_lan_addr, mac); 1491 if (is_zero_ether_addr(mac)) { 1492 /* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */ 1493 vf->pf_set_mac = false; 1494 netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n", 1495 vf->vf_id); 1496 } else { 1497 /* PF will add MAC rule for the VF */ 1498 vf->pf_set_mac = true; 1499 netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n", 1500 mac, vf_id); 1501 } 1502 1503 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY); 1504 mutex_unlock(&vf->cfg_lock); 1505 1506 out_put_vf: 1507 ice_put_vf(vf); 1508 return ret; 1509 } 1510 1511 /** 1512 * ice_set_vf_trust 1513 * @netdev: network interface device structure 1514 * @vf_id: VF identifier 1515 * @trusted: Boolean value to enable/disable trusted VF 1516 * 1517 * Enable or disable a given VF as trusted 1518 */ 1519 int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted) 1520 { 1521 struct ice_pf *pf = ice_netdev_to_pf(netdev); 1522 struct ice_vf *vf; 1523 int ret; 1524 1525 vf = ice_get_vf_by_id(pf, vf_id); 1526 if (!vf) 1527 return -EINVAL; 1528 1529 if (ice_is_eswitch_mode_switchdev(pf)) { 1530 dev_info(ice_pf_to_dev(pf), "Trusted VF is forbidden in switchdev mode\n"); 1531 return -EOPNOTSUPP; 1532 } 1533 1534 ret = ice_check_vf_ready_for_cfg(vf); 1535 if (ret) 1536 goto out_put_vf; 1537 1538 /* Check if already trusted */ 1539 if (trusted == vf->trusted) { 1540 ret = 0; 1541 goto out_put_vf; 1542 } 1543 1544 mutex_lock(&vf->cfg_lock); 1545 1546 vf->trusted = trusted; 1547 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY); 1548 dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n", 1549 vf_id, trusted ? "" : "un"); 1550 1551 mutex_unlock(&vf->cfg_lock); 1552 1553 out_put_vf: 1554 ice_put_vf(vf); 1555 return ret; 1556 } 1557 1558 /** 1559 * ice_set_vf_link_state 1560 * @netdev: network interface device structure 1561 * @vf_id: VF identifier 1562 * @link_state: required link state 1563 * 1564 * Set VF's link state, irrespective of physical link state status 1565 */ 1566 int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state) 1567 { 1568 struct ice_pf *pf = ice_netdev_to_pf(netdev); 1569 struct ice_vf *vf; 1570 int ret; 1571 1572 vf = ice_get_vf_by_id(pf, vf_id); 1573 if (!vf) 1574 return -EINVAL; 1575 1576 ret = ice_check_vf_ready_for_cfg(vf); 1577 if (ret) 1578 goto out_put_vf; 1579 1580 switch (link_state) { 1581 case IFLA_VF_LINK_STATE_AUTO: 1582 vf->link_forced = false; 1583 break; 1584 case IFLA_VF_LINK_STATE_ENABLE: 1585 vf->link_forced = true; 1586 vf->link_up = true; 1587 break; 1588 case IFLA_VF_LINK_STATE_DISABLE: 1589 vf->link_forced = true; 1590 vf->link_up = false; 1591 break; 1592 default: 1593 ret = -EINVAL; 1594 goto out_put_vf; 1595 } 1596 1597 ice_vc_notify_vf_link_state(vf); 1598 1599 out_put_vf: 1600 ice_put_vf(vf); 1601 return ret; 1602 } 1603 1604 /** 1605 * ice_calc_all_vfs_min_tx_rate - calculate cumulative min Tx rate on all VFs 1606 * @pf: PF associated with VFs 1607 */ 1608 static int ice_calc_all_vfs_min_tx_rate(struct ice_pf *pf) 1609 { 1610 struct ice_vf *vf; 1611 unsigned int bkt; 1612 int rate = 0; 1613 1614 rcu_read_lock(); 1615 ice_for_each_vf_rcu(pf, bkt, vf) 1616 rate += vf->min_tx_rate; 1617 rcu_read_unlock(); 1618 1619 return rate; 1620 } 1621 1622 /** 1623 * ice_min_tx_rate_oversubscribed - check if min Tx rate causes oversubscription 1624 * @vf: VF trying to configure min_tx_rate 1625 * @min_tx_rate: min Tx rate in Mbps 1626 * 1627 * Check if the min_tx_rate being passed in will cause oversubscription of total 1628 * min_tx_rate based on the current link speed and all other VFs configured 1629 * min_tx_rate 1630 * 1631 * Return true if the passed min_tx_rate would cause oversubscription, else 1632 * return false 1633 */ 1634 static bool 1635 ice_min_tx_rate_oversubscribed(struct ice_vf *vf, int min_tx_rate) 1636 { 1637 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 1638 int all_vfs_min_tx_rate; 1639 int link_speed_mbps; 1640 1641 if (WARN_ON(!vsi)) 1642 return false; 1643 1644 link_speed_mbps = ice_get_link_speed_mbps(vsi); 1645 all_vfs_min_tx_rate = ice_calc_all_vfs_min_tx_rate(vf->pf); 1646 1647 /* this VF's previous rate is being overwritten */ 1648 all_vfs_min_tx_rate -= vf->min_tx_rate; 1649 1650 if (all_vfs_min_tx_rate + min_tx_rate > link_speed_mbps) { 1651 dev_err(ice_pf_to_dev(vf->pf), "min_tx_rate of %d Mbps on VF %u would cause oversubscription of %d Mbps based on the current link speed %d Mbps\n", 1652 min_tx_rate, vf->vf_id, 1653 all_vfs_min_tx_rate + min_tx_rate - link_speed_mbps, 1654 link_speed_mbps); 1655 return true; 1656 } 1657 1658 return false; 1659 } 1660 1661 /** 1662 * ice_set_vf_bw - set min/max VF bandwidth 1663 * @netdev: network interface device structure 1664 * @vf_id: VF identifier 1665 * @min_tx_rate: Minimum Tx rate in Mbps 1666 * @max_tx_rate: Maximum Tx rate in Mbps 1667 */ 1668 int 1669 ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, 1670 int max_tx_rate) 1671 { 1672 struct ice_pf *pf = ice_netdev_to_pf(netdev); 1673 struct ice_vsi *vsi; 1674 struct device *dev; 1675 struct ice_vf *vf; 1676 int ret; 1677 1678 dev = ice_pf_to_dev(pf); 1679 1680 vf = ice_get_vf_by_id(pf, vf_id); 1681 if (!vf) 1682 return -EINVAL; 1683 1684 ret = ice_check_vf_ready_for_cfg(vf); 1685 if (ret) 1686 goto out_put_vf; 1687 1688 vsi = ice_get_vf_vsi(vf); 1689 if (!vsi) { 1690 ret = -EINVAL; 1691 goto out_put_vf; 1692 } 1693 1694 if (min_tx_rate && ice_is_dcb_active(pf)) { 1695 dev_err(dev, "DCB on PF is currently enabled. VF min Tx rate limiting not allowed on this PF.\n"); 1696 ret = -EOPNOTSUPP; 1697 goto out_put_vf; 1698 } 1699 1700 if (ice_min_tx_rate_oversubscribed(vf, min_tx_rate)) { 1701 ret = -EINVAL; 1702 goto out_put_vf; 1703 } 1704 1705 if (vf->min_tx_rate != (unsigned int)min_tx_rate) { 1706 ret = ice_set_min_bw_limit(vsi, (u64)min_tx_rate * 1000); 1707 if (ret) { 1708 dev_err(dev, "Unable to set min-tx-rate for VF %d\n", 1709 vf->vf_id); 1710 goto out_put_vf; 1711 } 1712 1713 vf->min_tx_rate = min_tx_rate; 1714 } 1715 1716 if (vf->max_tx_rate != (unsigned int)max_tx_rate) { 1717 ret = ice_set_max_bw_limit(vsi, (u64)max_tx_rate * 1000); 1718 if (ret) { 1719 dev_err(dev, "Unable to set max-tx-rate for VF %d\n", 1720 vf->vf_id); 1721 goto out_put_vf; 1722 } 1723 1724 vf->max_tx_rate = max_tx_rate; 1725 } 1726 1727 out_put_vf: 1728 ice_put_vf(vf); 1729 return ret; 1730 } 1731 1732 /** 1733 * ice_get_vf_stats - populate some stats for the VF 1734 * @netdev: the netdev of the PF 1735 * @vf_id: the host OS identifier (0-255) 1736 * @vf_stats: pointer to the OS memory to be initialized 1737 */ 1738 int ice_get_vf_stats(struct net_device *netdev, int vf_id, 1739 struct ifla_vf_stats *vf_stats) 1740 { 1741 struct ice_pf *pf = ice_netdev_to_pf(netdev); 1742 struct ice_eth_stats *stats; 1743 struct ice_vsi *vsi; 1744 struct ice_vf *vf; 1745 int ret; 1746 1747 vf = ice_get_vf_by_id(pf, vf_id); 1748 if (!vf) 1749 return -EINVAL; 1750 1751 ret = ice_check_vf_ready_for_cfg(vf); 1752 if (ret) 1753 goto out_put_vf; 1754 1755 vsi = ice_get_vf_vsi(vf); 1756 if (!vsi) { 1757 ret = -EINVAL; 1758 goto out_put_vf; 1759 } 1760 1761 ice_update_eth_stats(vsi); 1762 stats = &vsi->eth_stats; 1763 1764 memset(vf_stats, 0, sizeof(*vf_stats)); 1765 1766 vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast + 1767 stats->rx_multicast; 1768 vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast + 1769 stats->tx_multicast; 1770 vf_stats->rx_bytes = stats->rx_bytes; 1771 vf_stats->tx_bytes = stats->tx_bytes; 1772 vf_stats->broadcast = stats->rx_broadcast; 1773 vf_stats->multicast = stats->rx_multicast; 1774 vf_stats->rx_dropped = stats->rx_discards; 1775 vf_stats->tx_dropped = stats->tx_discards; 1776 1777 out_put_vf: 1778 ice_put_vf(vf); 1779 return ret; 1780 } 1781 1782 /** 1783 * ice_is_supported_port_vlan_proto - make sure the vlan_proto is supported 1784 * @hw: hardware structure used to check the VLAN mode 1785 * @vlan_proto: VLAN TPID being checked 1786 * 1787 * If the device is configured in Double VLAN Mode (DVM), then both ETH_P_8021Q 1788 * and ETH_P_8021AD are supported. If the device is configured in Single VLAN 1789 * Mode (SVM), then only ETH_P_8021Q is supported. 1790 */ 1791 static bool 1792 ice_is_supported_port_vlan_proto(struct ice_hw *hw, u16 vlan_proto) 1793 { 1794 bool is_supported = false; 1795 1796 switch (vlan_proto) { 1797 case ETH_P_8021Q: 1798 is_supported = true; 1799 break; 1800 case ETH_P_8021AD: 1801 if (ice_is_dvm_ena(hw)) 1802 is_supported = true; 1803 break; 1804 } 1805 1806 return is_supported; 1807 } 1808 1809 /** 1810 * ice_set_vf_port_vlan 1811 * @netdev: network interface device structure 1812 * @vf_id: VF identifier 1813 * @vlan_id: VLAN ID being set 1814 * @qos: priority setting 1815 * @vlan_proto: VLAN protocol 1816 * 1817 * program VF Port VLAN ID and/or QoS 1818 */ 1819 int 1820 ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, 1821 __be16 vlan_proto) 1822 { 1823 struct ice_pf *pf = ice_netdev_to_pf(netdev); 1824 u16 local_vlan_proto = ntohs(vlan_proto); 1825 struct device *dev; 1826 struct ice_vf *vf; 1827 int ret; 1828 1829 dev = ice_pf_to_dev(pf); 1830 1831 if (vlan_id >= VLAN_N_VID || qos > 7) { 1832 dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n", 1833 vf_id, vlan_id, qos); 1834 return -EINVAL; 1835 } 1836 1837 if (!ice_is_supported_port_vlan_proto(&pf->hw, local_vlan_proto)) { 1838 dev_err(dev, "VF VLAN protocol 0x%04x is not supported\n", 1839 local_vlan_proto); 1840 return -EPROTONOSUPPORT; 1841 } 1842 1843 vf = ice_get_vf_by_id(pf, vf_id); 1844 if (!vf) 1845 return -EINVAL; 1846 1847 ret = ice_check_vf_ready_for_cfg(vf); 1848 if (ret) 1849 goto out_put_vf; 1850 1851 if (ice_vf_get_port_vlan_prio(vf) == qos && 1852 ice_vf_get_port_vlan_tpid(vf) == local_vlan_proto && 1853 ice_vf_get_port_vlan_id(vf) == vlan_id) { 1854 /* duplicate request, so just return success */ 1855 dev_dbg(dev, "Duplicate port VLAN %u, QoS %u, TPID 0x%04x request\n", 1856 vlan_id, qos, local_vlan_proto); 1857 ret = 0; 1858 goto out_put_vf; 1859 } 1860 1861 mutex_lock(&vf->cfg_lock); 1862 1863 vf->port_vlan_info = ICE_VLAN(local_vlan_proto, vlan_id, qos); 1864 if (ice_vf_is_port_vlan_ena(vf)) 1865 dev_info(dev, "Setting VLAN %u, QoS %u, TPID 0x%04x on VF %d\n", 1866 vlan_id, qos, local_vlan_proto, vf_id); 1867 else 1868 dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id); 1869 1870 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY); 1871 mutex_unlock(&vf->cfg_lock); 1872 1873 out_put_vf: 1874 ice_put_vf(vf); 1875 return ret; 1876 } 1877 1878 /** 1879 * ice_print_vf_rx_mdd_event - print VF Rx malicious driver detect event 1880 * @vf: pointer to the VF structure 1881 */ 1882 void ice_print_vf_rx_mdd_event(struct ice_vf *vf) 1883 { 1884 struct ice_pf *pf = vf->pf; 1885 struct device *dev; 1886 1887 dev = ice_pf_to_dev(pf); 1888 1889 dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n", 1890 vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id, 1891 vf->dev_lan_addr, 1892 test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags) 1893 ? "on" : "off"); 1894 } 1895 1896 /** 1897 * ice_print_vfs_mdd_events - print VFs malicious driver detect event 1898 * @pf: pointer to the PF structure 1899 * 1900 * Called from ice_handle_mdd_event to rate limit and print VFs MDD events. 1901 */ 1902 void ice_print_vfs_mdd_events(struct ice_pf *pf) 1903 { 1904 struct device *dev = ice_pf_to_dev(pf); 1905 struct ice_hw *hw = &pf->hw; 1906 struct ice_vf *vf; 1907 unsigned int bkt; 1908 1909 /* check that there are pending MDD events to print */ 1910 if (!test_and_clear_bit(ICE_MDD_VF_PRINT_PENDING, pf->state)) 1911 return; 1912 1913 /* VF MDD event logs are rate limited to one second intervals */ 1914 if (time_is_after_jiffies(pf->vfs.last_printed_mdd_jiffies + HZ * 1)) 1915 return; 1916 1917 pf->vfs.last_printed_mdd_jiffies = jiffies; 1918 1919 mutex_lock(&pf->vfs.table_lock); 1920 ice_for_each_vf(pf, bkt, vf) { 1921 /* only print Rx MDD event message if there are new events */ 1922 if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) { 1923 vf->mdd_rx_events.last_printed = 1924 vf->mdd_rx_events.count; 1925 ice_print_vf_rx_mdd_event(vf); 1926 } 1927 1928 /* only print Tx MDD event message if there are new events */ 1929 if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) { 1930 vf->mdd_tx_events.last_printed = 1931 vf->mdd_tx_events.count; 1932 1933 dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n", 1934 vf->mdd_tx_events.count, hw->pf_id, vf->vf_id, 1935 vf->dev_lan_addr); 1936 } 1937 } 1938 mutex_unlock(&pf->vfs.table_lock); 1939 } 1940 1941 /** 1942 * ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR 1943 * @pf: pointer to the PF structure 1944 * 1945 * Called when recovering from a PF FLR to restore interrupt capability to 1946 * the VFs. 1947 */ 1948 void ice_restore_all_vfs_msi_state(struct ice_pf *pf) 1949 { 1950 struct ice_vf *vf; 1951 u32 bkt; 1952 1953 ice_for_each_vf(pf, bkt, vf) 1954 pci_restore_msi_state(vf->vfdev); 1955 } 1956