1 /* SPDX-License-Identifier: BSD-3-Clause */ 2 /* Copyright (c) 2025, Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * 3. Neither the name of the Intel Corporation nor the names of its 16 * contributors may be used to endorse or promote products derived from 17 * this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /** 33 * @file ice_iov.c 34 * @brief Virtualization support functions 35 * 36 * Contains functions for enabling and managing PCIe virtual function devices, 37 * including enabling new VFs, and managing VFs over the virtchnl interface. 38 */ 39 40 #include "ice_iov.h" 41 42 static struct ice_vf *ice_iov_get_vf(struct ice_softc *sc, int vf_num); 43 static void ice_iov_ready_vf(struct ice_softc *sc, struct ice_vf *vf); 44 static void ice_reset_vf(struct ice_softc *sc, struct ice_vf *vf, 45 bool trigger_vflr); 46 static void ice_iov_setup_intr_mapping(struct ice_softc *sc, struct ice_vf *vf); 47 48 static void ice_vc_version_msg(struct ice_softc *sc, struct ice_vf *vf, 49 u8 *msg_buf); 50 static void ice_vc_get_vf_res_msg(struct ice_softc *sc, struct ice_vf *vf, 51 u8 *msg_buf); 52 static void ice_vc_add_eth_addr_msg(struct ice_softc *sc, struct ice_vf *vf, 53 u8 *msg_buf); 54 static void ice_vc_del_eth_addr_msg(struct ice_softc *sc, struct ice_vf *vf, 55 u8 *msg_buf); 56 static bool ice_vc_isvalid_ring_len(u16 ring_len); 57 static void ice_vc_cfg_vsi_qs_msg(struct ice_softc *sc, struct ice_vf *vf, 58 u8 *msg_buf); 59 static void ice_vc_cfg_rss_key_msg(struct ice_softc *sc, struct ice_vf *vf, 60 u8 *msg_buf); 61 static void ice_vc_set_rss_hena_msg(struct ice_softc *sc, struct ice_vf *vf, 62 u8 *msg_buf); 63 static void ice_vc_enable_queues_msg(struct ice_softc *sc, struct ice_vf *vf, 64 u8 *msg_buf); 65 static void ice_vc_notify_vf_link_state(struct ice_softc *sc, struct ice_vf *vf); 66 static void ice_vc_disable_queues_msg(struct ice_softc *sc, struct ice_vf *vf, 67 u8 *msg_buf); 68 static void ice_vc_cfg_irq_map_msg(struct ice_softc *sc, struct ice_vf *vf, 69 u8 *msg_buf); 70 static void ice_vc_get_stats_msg(struct ice_softc *sc, struct ice_vf *vf, 71 u8 *msg_buf); 72 static void ice_eth_stats_to_virtchnl_eth_stats(struct ice_eth_stats *istats, 73 struct virtchnl_eth_stats *vstats); 74 static void ice_vc_cfg_rss_lut_msg(struct ice_softc *sc, struct ice_vf *vf, 75 u8 *msg_buf); 76 static void ice_vc_cfg_promisc_mode_msg(struct ice_softc *sc, struct ice_vf *vf, 77 u8 *msg_buf); 78 static void ice_vc_add_vlan_msg(struct ice_softc *sc, struct ice_vf *vf, 79 u8 *msg_buf); 80 static void ice_vc_del_vlan_msg(struct ice_softc *sc, struct ice_vf *vf, 81 u8 *msg_buf); 82 static enum virtchnl_status_code ice_iov_err_to_virt_err(int ice_err); 83 static int ice_vf_validate_mac(struct ice_vf *vf, const uint8_t *addr); 84 85 /** 86 * ice_iov_attach - Initialize SR-IOV PF host support 87 * @sc: device softc structure 88 * 89 * Initialize SR-IOV PF host support at the end of the driver attach process. 90 * 91 * @pre Must be called from sleepable context (calls malloc() w/ M_WAITOK) 92 * 93 * @returns 0 if successful, or 94 * - ENOMEM if there is no memory for the PF/VF schemas or iov device 95 * - ENXIO if the device isn't PCI-E or doesn't support the same SR-IOV 96 * version as the kernel 97 * - ENOENT if the device doesn't have the SR-IOV capability 98 */ 99 int 100 ice_iov_attach(struct ice_softc *sc) 101 { 102 device_t dev = sc->dev; 103 nvlist_t *pf_schema, *vf_schema; 104 int error; 105 106 pf_schema = pci_iov_schema_alloc_node(); 107 vf_schema = pci_iov_schema_alloc_node(); 108 109 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL); 110 pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof", 111 IOV_SCHEMA_HASDEFAULT, TRUE); 112 pci_iov_schema_add_bool(vf_schema, "allow-set-mac", 113 IOV_SCHEMA_HASDEFAULT, FALSE); 114 pci_iov_schema_add_bool(vf_schema, "allow-promisc", 115 IOV_SCHEMA_HASDEFAULT, FALSE); 116 pci_iov_schema_add_uint16(vf_schema, "num-queues", 117 IOV_SCHEMA_HASDEFAULT, ICE_DEFAULT_VF_QUEUES); 118 pci_iov_schema_add_uint16(vf_schema, "mirror-src-vsi", 119 IOV_SCHEMA_HASDEFAULT, ICE_INVALID_MIRROR_VSI); 120 pci_iov_schema_add_uint16(vf_schema, "max-vlan-allowed", 121 IOV_SCHEMA_HASDEFAULT, ICE_DEFAULT_VF_VLAN_LIMIT); 122 pci_iov_schema_add_uint16(vf_schema, "max-mac-filters", 123 IOV_SCHEMA_HASDEFAULT, ICE_DEFAULT_VF_FILTER_LIMIT); 124 125 error = pci_iov_attach(dev, pf_schema, vf_schema); 126 if (error != 0) { 127 device_printf(dev, 128 "pci_iov_attach failed (error=%s)\n", 129 ice_err_str(error)); 130 ice_clear_bit(ICE_FEATURE_SRIOV, sc->feat_en); 131 } else 132 ice_set_bit(ICE_FEATURE_SRIOV, sc->feat_en); 133 134 return (error); 135 } 136 137 /** 138 * ice_iov_detach - Teardown SR-IOV PF host support 139 * @sc: device softc structure 140 * 141 * Teardown SR-IOV PF host support at the start of the driver detach process. 142 * 143 * @returns 0 if successful or IOV support hasn't been setup, or 144 * - EBUSY if VFs still exist 145 */ 146 int 147 ice_iov_detach(struct ice_softc *sc) 148 { 149 device_t dev = sc->dev; 150 int error; 151 152 error = pci_iov_detach(dev); 153 if (error != 0) { 154 device_printf(dev, 155 "pci_iov_detach failed (error=%s)\n", 156 ice_err_str(error)); 157 } 158 159 return (error); 160 } 161 162 /** 163 * ice_iov_init - Called by the OS before the first VF is created. 164 * @sc: device softc structure 165 * @num_vfs: number of VFs to setup resources for 166 * @params: configuration parameters for the PF 167 * 168 * @returns 0 if successful or an error code on failure 169 */ 170 int 171 ice_iov_init(struct ice_softc *sc, uint16_t num_vfs, const nvlist_t *params __unused) 172 { 173 /* Allocate array of VFs, for tracking */ 174 sc->vfs = (struct ice_vf *)malloc(sizeof(struct ice_vf) * num_vfs, M_ICE, M_NOWAIT | 175 M_ZERO); 176 if (sc->vfs == NULL) 177 return (ENOMEM); 178 179 /* Initialize each VF with basic information */ 180 for (int i = 0; i < num_vfs; i++) 181 sc->vfs[i].vf_num = i; 182 183 /* Save off number of configured VFs */ 184 sc->num_vfs = num_vfs; 185 186 return (0); 187 } 188 189 /** 190 * ice_iov_get_vf - Get pointer to VF at given index 191 * @sc: device softc structure 192 * @vf_num: Index of VF to retrieve 193 * 194 * @remark will throw an assertion if vf_num is not in the 195 * range of allocated VFs 196 * 197 * @returns a pointer to the VF structure at the given index 198 */ 199 static struct ice_vf * 200 ice_iov_get_vf(struct ice_softc *sc, int vf_num) 201 { 202 MPASS(vf_num < sc->num_vfs); 203 204 return &sc->vfs[vf_num]; 205 } 206 207 /** 208 * ice_iov_add_vf - Called by the OS for each VF to create 209 * @sc: device softc structure 210 * @vfnum: index of VF to configure 211 * @params: configuration parameters for the VF 212 * 213 * @returns 0 if successful or an error code on failure 214 */ 215 int 216 ice_iov_add_vf(struct ice_softc *sc, uint16_t vfnum, const nvlist_t *params) 217 { 218 struct ice_tx_queue *txq; 219 struct ice_rx_queue *rxq; 220 device_t dev = sc->dev; 221 struct ice_vsi *vsi; 222 struct ice_vf *vf; 223 int vf_num_queues; 224 const void *mac; 225 size_t size; 226 int error; 227 int i; 228 229 vf = ice_iov_get_vf(sc, vfnum); 230 vf->vf_flags = VF_FLAG_ENABLED; 231 232 /* This VF needs at least one VSI */ 233 vsi = ice_alloc_vsi(sc, ICE_VSI_VF); 234 if (vsi == NULL) 235 return (ENOMEM); 236 vf->vsi = vsi; 237 vsi->vf_num = vfnum; 238 239 vf_num_queues = nvlist_get_number(params, "num-queues"); 240 /* Validate and clamp value if invalid */ 241 if (vf_num_queues < 1 || vf_num_queues > ICE_MAX_SCATTERED_QUEUES) 242 device_printf(dev, "Invalid num-queues (%d) for VF %d\n", 243 vf_num_queues, vf->vf_num); 244 if (vf_num_queues < 1) { 245 device_printf(dev, "Setting VF %d num-queues to 1\n", vf->vf_num); 246 vf_num_queues = 1; 247 } else if (vf_num_queues > ICE_MAX_SCATTERED_QUEUES) { 248 device_printf(dev, "Setting VF %d num-queues to %d\n", 249 vf->vf_num, ICE_MAX_SCATTERED_QUEUES); 250 vf_num_queues = ICE_MAX_SCATTERED_QUEUES; 251 } 252 vsi->qmap_type = ICE_RESMGR_ALLOC_SCATTERED; 253 254 /* Reserve VF queue allocation from PF queues */ 255 ice_alloc_vsi_qmap(vsi, vf_num_queues, vf_num_queues); 256 vsi->num_tx_queues = vsi->num_rx_queues = vf_num_queues; 257 258 /* Assign Tx queues from PF space */ 259 error = ice_resmgr_assign_scattered(&sc->tx_qmgr, vsi->tx_qmap, 260 vsi->num_tx_queues); 261 if (error) { 262 device_printf(sc->dev, "Unable to assign VF Tx queues: %s\n", 263 ice_err_str(error)); 264 goto release_vsi; 265 } 266 267 /* Assign Rx queues from PF space */ 268 error = ice_resmgr_assign_scattered(&sc->rx_qmgr, vsi->rx_qmap, 269 vsi->num_rx_queues); 270 if (error) { 271 device_printf(sc->dev, "Unable to assign VF Rx queues: %s\n", 272 ice_err_str(error)); 273 goto release_vsi; 274 } 275 276 vsi->max_frame_size = ICE_MAX_FRAME_SIZE; 277 278 /* Allocate queue structure memory */ 279 vsi->tx_queues = (struct ice_tx_queue *) 280 malloc(sizeof(struct ice_tx_queue) * vsi->num_tx_queues, M_ICE, 281 M_NOWAIT | M_ZERO); 282 if (!vsi->tx_queues) { 283 device_printf(sc->dev, "VF-%d: Unable to allocate Tx queue memory\n", 284 vfnum); 285 error = ENOMEM; 286 goto release_vsi; 287 } 288 for (i = 0, txq = vsi->tx_queues; i < vsi->num_tx_queues; i++, txq++) { 289 txq->me = i; 290 txq->vsi = vsi; 291 } 292 293 /* Allocate queue structure memory */ 294 vsi->rx_queues = (struct ice_rx_queue *) 295 malloc(sizeof(struct ice_rx_queue) * vsi->num_rx_queues, M_ICE, 296 M_NOWAIT | M_ZERO); 297 if (!vsi->rx_queues) { 298 device_printf(sc->dev, "VF-%d: Unable to allocate Rx queue memory\n", 299 vfnum); 300 error = ENOMEM; 301 goto free_txqs; 302 } 303 for (i = 0, rxq = vsi->rx_queues; i < vsi->num_rx_queues; i++, rxq++) { 304 rxq->me = i; 305 rxq->vsi = vsi; 306 } 307 308 /* Allocate space to store the IRQ vector data */ 309 vf->num_irq_vectors = vf_num_queues + 1; 310 vf->tx_irqvs = (struct ice_irq_vector *) 311 malloc(sizeof(struct ice_irq_vector) * (vf->num_irq_vectors), 312 M_ICE, M_NOWAIT); 313 if (!vf->tx_irqvs) { 314 device_printf(sc->dev, 315 "Unable to allocate TX irqv memory for VF-%d's %d vectors\n", 316 vfnum, vf->num_irq_vectors); 317 error = ENOMEM; 318 goto free_rxqs; 319 } 320 vf->rx_irqvs = (struct ice_irq_vector *) 321 malloc(sizeof(struct ice_irq_vector) * (vf->num_irq_vectors), 322 M_ICE, M_NOWAIT); 323 if (!vf->rx_irqvs) { 324 device_printf(sc->dev, 325 "Unable to allocate RX irqv memory for VF-%d's %d vectors\n", 326 vfnum, vf->num_irq_vectors); 327 error = ENOMEM; 328 goto free_txirqvs; 329 } 330 331 /* Assign VF interrupts from PF space */ 332 if (!(vf->vf_imap = 333 (u16 *)malloc(sizeof(u16) * vf->num_irq_vectors, 334 M_ICE, M_NOWAIT))) { 335 device_printf(dev, "Unable to allocate VF-%d imap memory\n", vfnum); 336 error = ENOMEM; 337 goto free_rxirqvs; 338 } 339 error = ice_resmgr_assign_contiguous(&sc->dev_imgr, vf->vf_imap, vf->num_irq_vectors); 340 if (error) { 341 device_printf(dev, "Unable to assign VF-%d interrupt mapping: %s\n", 342 vfnum, ice_err_str(error)); 343 goto free_imap; 344 } 345 346 if (nvlist_exists_binary(params, "mac-addr")) { 347 mac = nvlist_get_binary(params, "mac-addr", &size); 348 memcpy(vf->mac, mac, ETHER_ADDR_LEN); 349 350 if (nvlist_get_bool(params, "allow-set-mac")) 351 vf->vf_flags |= VF_FLAG_SET_MAC_CAP; 352 } else 353 /* 354 * If the administrator has not specified a MAC address then 355 * we must allow the VF to choose one. 356 */ 357 vf->vf_flags |= VF_FLAG_SET_MAC_CAP; 358 359 if (nvlist_get_bool(params, "mac-anti-spoof")) 360 vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF; 361 362 if (nvlist_get_bool(params, "allow-promisc")) 363 vf->vf_flags |= VF_FLAG_PROMISC_CAP; 364 365 vsi->mirror_src_vsi = nvlist_get_number(params, "mirror-src-vsi"); 366 367 vf->vlan_limit = nvlist_get_number(params, "max-vlan-allowed"); 368 vf->mac_filter_limit = nvlist_get_number(params, "max-mac-filters"); 369 370 vf->vf_flags |= VF_FLAG_VLAN_CAP; 371 372 /* Create and setup VSI in HW */ 373 error = ice_initialize_vsi(vsi); 374 if (error) { 375 device_printf(sc->dev, "Unable to initialize VF %d VSI: %s\n", 376 vfnum, ice_err_str(error)); 377 goto release_imap; 378 } 379 380 /* Add the broadcast address */ 381 error = ice_add_vsi_mac_filter(vsi, broadcastaddr); 382 if (error) { 383 device_printf(sc->dev, "Unable to add broadcast filter VF %d VSI: %s\n", 384 vfnum, ice_err_str(error)); 385 goto release_imap; 386 } 387 388 ice_iov_ready_vf(sc, vf); 389 390 return (0); 391 392 release_imap: 393 ice_resmgr_release_map(&sc->dev_imgr, vf->vf_imap, 394 vf->num_irq_vectors); 395 free_imap: 396 free(vf->vf_imap, M_ICE); 397 vf->vf_imap = NULL; 398 free_rxirqvs: 399 free(vf->rx_irqvs, M_ICE); 400 vf->rx_irqvs = NULL; 401 free_txirqvs: 402 free(vf->tx_irqvs, M_ICE); 403 vf->tx_irqvs = NULL; 404 free_rxqs: 405 free(vsi->rx_queues, M_ICE); 406 vsi->rx_queues = NULL; 407 free_txqs: 408 free(vsi->tx_queues, M_ICE); 409 vsi->tx_queues = NULL; 410 release_vsi: 411 ice_release_vsi(vsi); 412 vf->vsi = NULL; 413 return (error); 414 } 415 416 /** 417 * ice_iov_uninit - Called by the OS when VFs are destroyed 418 * @sc: device softc structure 419 */ 420 void 421 ice_iov_uninit(struct ice_softc *sc) 422 { 423 struct ice_vf *vf; 424 struct ice_vsi *vsi; 425 426 /* Release per-VF resources */ 427 for (int i = 0; i < sc->num_vfs; i++) { 428 vf = &sc->vfs[i]; 429 vsi = vf->vsi; 430 431 /* Free VF interrupt reservation */ 432 if (vf->vf_imap) { 433 free(vf->vf_imap, M_ICE); 434 vf->vf_imap = NULL; 435 } 436 437 /* Free queue interrupt mapping trackers */ 438 if (vf->tx_irqvs) { 439 free(vf->tx_irqvs, M_ICE); 440 vf->tx_irqvs = NULL; 441 } 442 if (vf->rx_irqvs) { 443 free(vf->rx_irqvs, M_ICE); 444 vf->rx_irqvs = NULL; 445 } 446 447 if (!vsi) 448 continue; 449 450 /* Free VSI queues */ 451 if (vsi->tx_queues) { 452 free(vsi->tx_queues, M_ICE); 453 vsi->tx_queues = NULL; 454 } 455 if (vsi->rx_queues) { 456 free(vsi->rx_queues, M_ICE); 457 vsi->rx_queues = NULL; 458 } 459 460 ice_release_vsi(vsi); 461 vf->vsi = NULL; 462 } 463 464 /* Release memory used for VF tracking */ 465 if (sc->vfs) { 466 free(sc->vfs, M_ICE); 467 sc->vfs = NULL; 468 } 469 sc->num_vfs = 0; 470 } 471 472 /** 473 * ice_iov_handle_vflr - Process VFLR event 474 * @sc: device softc structure 475 * 476 * Identifys which VFs have been reset and re-configure 477 * them. 478 */ 479 void 480 ice_iov_handle_vflr(struct ice_softc *sc) 481 { 482 struct ice_hw *hw = &sc->hw; 483 struct ice_vf *vf; 484 u32 reg, reg_idx, bit_idx; 485 486 for (int i = 0; i < sc->num_vfs; i++) { 487 vf = &sc->vfs[i]; 488 489 reg_idx = (hw->func_caps.vf_base_id + vf->vf_num) / 32; 490 bit_idx = (hw->func_caps.vf_base_id + vf->vf_num) % 32; 491 reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx)); 492 if (reg & BIT(bit_idx)) 493 ice_reset_vf(sc, vf, false); 494 } 495 } 496 497 /** 498 * ice_iov_ready_vf - Setup VF interrupts and mark it as ready 499 * @sc: device softc structure 500 * @vf: driver's VF structure for the VF to update 501 * 502 * Clears VF reset triggering bit, sets up the PF<->VF interrupt 503 * mapping and marks the VF as active in the HW so that the VF 504 * driver can use it. 505 */ 506 static void 507 ice_iov_ready_vf(struct ice_softc *sc, struct ice_vf *vf) 508 { 509 struct ice_hw *hw = &sc->hw; 510 u32 reg; 511 512 /* Clear the triggering bit */ 513 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_num)); 514 reg &= ~VPGEN_VFRTRIG_VFSWR_M; 515 wr32(hw, VPGEN_VFRTRIG(vf->vf_num), reg); 516 517 /* Setup VF interrupt allocation and mapping */ 518 ice_iov_setup_intr_mapping(sc, vf); 519 520 /* Indicate to the VF that reset is done */ 521 wr32(hw, VFGEN_RSTAT(vf->vf_num), VIRTCHNL_VFR_VFACTIVE); 522 523 ice_flush(hw); 524 } 525 526 /** 527 * ice_reset_vf - Perform a hardware reset (VFR) on a VF 528 * @sc: device softc structure 529 * @vf: driver's VF structure for VF to be reset 530 * @trigger_vflr: trigger a reset or only handle already executed reset 531 * 532 * Performs a VFR for the given VF. This function busy waits until the 533 * reset completes in the HW, notifies the VF that the reset is done 534 * by setting a bit in a HW register, then returns. 535 * 536 * @remark This also sets up the PF<->VF interrupt mapping and allocations in 537 * the hardware after the hardware reset is finished, via 538 * ice_iov_setup_intr_mapping() 539 */ 540 static void 541 ice_reset_vf(struct ice_softc *sc, struct ice_vf *vf, bool trigger_vflr) 542 { 543 u16 global_vf_num, reg_idx, bit_idx; 544 struct ice_hw *hw = &sc->hw; 545 int status; 546 u32 reg; 547 int i; 548 549 global_vf_num = vf->vf_num + hw->func_caps.vf_base_id; 550 551 if (trigger_vflr) { 552 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_num)); 553 reg |= VPGEN_VFRTRIG_VFSWR_M; 554 wr32(hw, VPGEN_VFRTRIG(vf->vf_num), reg); 555 } 556 557 /* clear the VFLR bit for the VF in a GLGEN_VFLRSTAT register */ 558 reg_idx = (global_vf_num) / 32; 559 bit_idx = (global_vf_num) % 32; 560 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 561 ice_flush(hw); 562 563 /* Wait until there are no pending PCI transactions */ 564 wr32(hw, PF_PCI_CIAA, 565 ICE_PCIE_DEV_STATUS | (global_vf_num << PF_PCI_CIAA_VF_NUM_S)); 566 567 for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) { 568 reg = rd32(hw, PF_PCI_CIAD); 569 if (!(reg & PCIEM_STA_TRANSACTION_PND)) 570 break; 571 572 DELAY(ICE_PCI_CIAD_WAIT_DELAY_US); 573 } 574 if (i == ICE_PCI_CIAD_WAIT_COUNT) 575 device_printf(sc->dev, 576 "VF-%d PCI transactions stuck\n", vf->vf_num); 577 578 /* Disable TX queues, which is required during VF reset */ 579 status = ice_dis_vsi_txq(hw->port_info, vf->vsi->idx, 0, 0, NULL, NULL, 580 NULL, ICE_VF_RESET, vf->vf_num, NULL); 581 if (status) 582 device_printf(sc->dev, 583 "%s: Failed to disable LAN Tx queues: err %s aq_err %s\n", 584 __func__, ice_status_str(status), 585 ice_aq_str(hw->adminq.sq_last_status)); 586 587 /* Then check for the VF reset to finish in HW */ 588 for (i = 0; i < ICE_VPGEN_VFRSTAT_WAIT_COUNT; i++) { 589 reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_num)); 590 if ((reg & VPGEN_VFRSTAT_VFRD_M)) 591 break; 592 593 DELAY(ICE_VPGEN_VFRSTAT_WAIT_DELAY_US); 594 } 595 if (i == ICE_VPGEN_VFRSTAT_WAIT_COUNT) 596 device_printf(sc->dev, 597 "VF-%d Reset is stuck\n", vf->vf_num); 598 599 ice_iov_ready_vf(sc, vf); 600 } 601 602 /** 603 * ice_vc_get_vf_res_msg - Handle VIRTCHNL_OP_GET_VF_RESOURCES msg from VF 604 * @sc: device private structure 605 * @vf: VF tracking structure 606 * @msg_buf: raw message buffer from the VF 607 * 608 * Receives a message from the VF listing its supported capabilities, and 609 * replies to the VF with information about what resources the PF has 610 * allocated for the VF. 611 * 612 * @remark This always replies to the VF with a success status; it does not 613 * fail. It's up to the VF driver to reject or complain about the PF's response. 614 */ 615 static void 616 ice_vc_get_vf_res_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf) 617 { 618 struct ice_hw *hw = &sc->hw; 619 struct virtchnl_vf_resource *vf_res; 620 struct virtchnl_vsi_resource *vsi_res; 621 u16 vf_res_len; 622 u32 vf_caps; 623 624 /* XXX: Only support one VSI per VF, so this size doesn't need adjusting */ 625 vf_res_len = sizeof(struct virtchnl_vf_resource); 626 vf_res = (struct virtchnl_vf_resource *)malloc(vf_res_len, M_ICE, 627 M_WAITOK | M_ZERO); 628 629 vf_res->num_vsis = 1; 630 vf_res->num_queue_pairs = vf->vsi->num_tx_queues; 631 vf_res->max_vectors = vf_res->num_queue_pairs + 1; 632 633 vf_res->rss_key_size = ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE; 634 vf_res->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE; 635 vf_res->max_mtu = 0; 636 637 vf_res->vf_cap_flags = VF_BASE_MODE_OFFLOADS; 638 if (msg_buf != NULL) { 639 vf_caps = *((u32 *)(msg_buf)); 640 641 if (vf_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) 642 vf_res->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED; 643 644 if (vf_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) 645 vf_res->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; 646 } 647 648 vsi_res = &vf_res->vsi_res[0]; 649 vsi_res->vsi_id = vf->vsi->idx; 650 vsi_res->num_queue_pairs = vf->vsi->num_tx_queues; 651 vsi_res->vsi_type = VIRTCHNL_VSI_SRIOV; 652 vsi_res->qset_handle = 0; 653 if (!ETHER_IS_ZERO(vf->mac)) 654 memcpy(vsi_res->default_mac_addr, vf->mac, ETHER_ADDR_LEN); 655 656 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_GET_VF_RESOURCES, 657 VIRTCHNL_STATUS_SUCCESS, (u8 *)vf_res, vf_res_len, NULL); 658 659 free(vf_res, M_ICE); 660 } 661 662 /** 663 * ice_vc_version_msg - Handle VIRTCHNL_OP_VERSION msg from VF 664 * @sc: device private structure 665 * @vf: VF tracking structure 666 * @msg_buf: raw message buffer from the VF 667 * 668 * Receives a version message from the VF, and responds to the VF with 669 * the version number that the PF will use. 670 * 671 * @remark This always replies to the VF with a success status; it does not 672 * fail. 673 */ 674 static void 675 ice_vc_version_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf) 676 { 677 struct virtchnl_version_info *recv_vf_version; 678 struct ice_hw *hw = &sc->hw; 679 device_t dev = sc->dev; 680 681 recv_vf_version = (struct virtchnl_version_info *)msg_buf; 682 683 /* VFs running the 1.0 API expect to get 1.0 back */ 684 if (VF_IS_V10(recv_vf_version)) { 685 vf->version.major = 1; 686 vf->version.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; 687 } else { 688 vf->version.major = VIRTCHNL_VERSION_MAJOR; 689 vf->version.minor = VIRTCHNL_VERSION_MINOR; 690 691 if ((recv_vf_version->major != VIRTCHNL_VERSION_MAJOR) || 692 (recv_vf_version->minor != VIRTCHNL_VERSION_MINOR)) 693 device_printf(dev, 694 "%s: VF-%d requested version (%d.%d) differs from PF version (%d.%d)\n", 695 __func__, vf->vf_num, 696 recv_vf_version->major, recv_vf_version->minor, 697 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR); 698 } 699 700 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_VERSION, 701 VIRTCHNL_STATUS_SUCCESS, (u8 *)&vf->version, sizeof(vf->version), 702 NULL); 703 } 704 705 /** 706 * ice_vf_validate_mac - Validate MAC address before adding it 707 * @vf: VF tracking structure 708 * @addr: MAC address to validate 709 * 710 * Validate a MAC address before adding it to a VF during the handling 711 * of a VIRTCHNL_OP_ADD_ETH_ADDR operation. Notably, this also checks if 712 * the VF is allowed to set its own arbitrary MAC addresses. 713 * 714 * Returns 0 if MAC address is valid for the given vf 715 */ 716 static int 717 ice_vf_validate_mac(struct ice_vf *vf, const uint8_t *addr) 718 { 719 720 if (ETHER_IS_ZERO(addr) || ETHER_IS_BROADCAST(addr)) 721 return (EINVAL); 722 723 /* 724 * If the VF is not allowed to change its MAC address, don't let it 725 * set a MAC filter for an address that is not a multicast address and 726 * is not its assigned MAC. 727 */ 728 if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) && 729 !(ETHER_IS_MULTICAST(addr) || !bcmp(addr, vf->mac, ETHER_ADDR_LEN))) 730 return (EPERM); 731 732 return (0); 733 } 734 735 /** 736 * ice_vc_add_eth_addr_msg - Handle VIRTCHNL_OP_ADD_ETH_ADDR msg from VF 737 * @sc: device private structure 738 * @vf: VF tracking structure 739 * @msg_buf: raw message buffer from the VF 740 * 741 * Receives a list of MAC addresses from the VF and adds those addresses 742 * to the VSI's filter list. 743 */ 744 static void 745 ice_vc_add_eth_addr_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf) 746 { 747 enum virtchnl_status_code v_status = VIRTCHNL_STATUS_SUCCESS; 748 struct virtchnl_ether_addr_list *addr_list; 749 struct ice_hw *hw = &sc->hw; 750 u16 added_addr_cnt = 0; 751 int error = 0; 752 753 addr_list = (struct virtchnl_ether_addr_list *)msg_buf; 754 755 if (addr_list->num_elements > 756 (vf->mac_filter_limit - vf->mac_filter_cnt)) { 757 v_status = VIRTCHNL_STATUS_ERR_NO_MEMORY; 758 goto done; 759 } 760 761 for (int i = 0; i < addr_list->num_elements; i++) { 762 u8 *addr = addr_list->list[i].addr; 763 764 /* The type flag is currently ignored; every MAC address is 765 * treated as the LEGACY type 766 */ 767 768 error = ice_vf_validate_mac(vf, addr); 769 if (error == EPERM) { 770 device_printf(sc->dev, 771 "%s: VF-%d: Not permitted to add MAC addr for VSI %d\n", 772 __func__, vf->vf_num, vf->vsi->idx); 773 v_status = VIRTCHNL_STATUS_ERR_PARAM; 774 continue; 775 } else if (error) { 776 device_printf(sc->dev, 777 "%s: VF-%d: Did not add invalid MAC addr for VSI %d\n", 778 __func__, vf->vf_num, vf->vsi->idx); 779 v_status = VIRTCHNL_STATUS_ERR_PARAM; 780 continue; 781 } 782 783 error = ice_add_vsi_mac_filter(vf->vsi, addr); 784 if (error) { 785 device_printf(sc->dev, 786 "%s: VF-%d: Error adding MAC addr for VSI %d\n", 787 __func__, vf->vf_num, vf->vsi->idx); 788 v_status = VIRTCHNL_STATUS_ERR_PARAM; 789 continue; 790 } 791 /* Don't count VF's MAC against its MAC filter limit */ 792 if (memcmp(addr, vf->mac, ETHER_ADDR_LEN)) 793 added_addr_cnt++; 794 } 795 796 vf->mac_filter_cnt += added_addr_cnt; 797 798 done: 799 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_ADD_ETH_ADDR, 800 v_status, NULL, 0, NULL); 801 } 802 803 /** 804 * ice_vc_del_eth_addr_msg - Handle VIRTCHNL_OP_DEL_ETH_ADDR msg from VF 805 * @sc: device private structure 806 * @vf: VF tracking structure 807 * @msg_buf: raw message buffer from the VF 808 * 809 * Receives a list of MAC addresses from the VF and removes those addresses 810 * from the VSI's filter list. 811 */ 812 static void 813 ice_vc_del_eth_addr_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf) 814 { 815 enum virtchnl_status_code v_status = VIRTCHNL_STATUS_SUCCESS; 816 struct virtchnl_ether_addr_list *addr_list; 817 struct ice_hw *hw = &sc->hw; 818 u16 deleted_addr_cnt = 0; 819 int error = 0; 820 821 addr_list = (struct virtchnl_ether_addr_list *)msg_buf; 822 823 for (int i = 0; i < addr_list->num_elements; i++) { 824 error = ice_remove_vsi_mac_filter(vf->vsi, addr_list->list[i].addr); 825 if (error) { 826 device_printf(sc->dev, 827 "%s: VF-%d: Error removing MAC addr for VSI %d\n", 828 __func__, vf->vf_num, vf->vsi->idx); 829 v_status = VIRTCHNL_STATUS_ERR_PARAM; 830 continue; 831 } 832 /* Don't count VF's MAC against its MAC filter limit */ 833 if (memcmp(addr_list->list[i].addr, vf->mac, ETHER_ADDR_LEN)) 834 deleted_addr_cnt++; 835 } 836 837 if (deleted_addr_cnt >= vf->mac_filter_cnt) 838 vf->mac_filter_cnt = 0; 839 else 840 vf->mac_filter_cnt -= deleted_addr_cnt; 841 842 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_DEL_ETH_ADDR, 843 v_status, NULL, 0, NULL); 844 } 845 846 /** 847 * ice_vc_add_vlan_msg - Handle VIRTCHNL_OP_ADD_VLAN msg from VF 848 * @sc: PF's softc structure 849 * @vf: VF tracking structure 850 * @msg_buf: message buffer from VF 851 * 852 * Adds the VLANs in msg_buf to the VF's VLAN filter list. 853 */ 854 static void 855 ice_vc_add_vlan_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf) 856 { 857 struct ice_hw *hw = &sc->hw; 858 struct virtchnl_vlan_filter_list *vlan_list; 859 int status = 0; 860 enum virtchnl_status_code v_status = VIRTCHNL_STATUS_SUCCESS; 861 struct ice_vsi *vsi = vf->vsi; 862 863 vlan_list = (struct virtchnl_vlan_filter_list *)msg_buf; 864 865 if (vlan_list->vsi_id != vsi->idx) { 866 device_printf(sc->dev, 867 "VF-%d: Message has invalid VSI ID (expected %d, got %d)\n", 868 vf->vf_num, vsi->idx, vlan_list->vsi_id); 869 v_status = VIRTCHNL_STATUS_ERR_PARAM; 870 goto done; 871 } 872 873 if (vlan_list->num_elements > (vf->vlan_limit - vf->vlan_cnt)) { 874 v_status = VIRTCHNL_STATUS_ERR_NO_MEMORY; 875 goto done; 876 } 877 878 status = ice_add_vlan_hw_filters(vsi, vlan_list->vlan_id, 879 vlan_list->num_elements); 880 if (status) { 881 device_printf(sc->dev, 882 "VF-%d: Failure adding VLANs to VSI %d, err %s aq_err %s\n", 883 vf->vf_num, vsi->idx, ice_status_str(status), 884 ice_aq_str(sc->hw.adminq.sq_last_status)); 885 v_status = ice_iov_err_to_virt_err(status); 886 goto done; 887 } 888 889 vf->vlan_cnt += vlan_list->num_elements; 890 891 done: 892 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_ADD_VLAN, 893 v_status, NULL, 0, NULL); 894 } 895 896 /** 897 * ice_vc_del_vlan_msg - Handle VIRTCHNL_OP_DEL_VLAN msg from VF 898 * @sc: PF's softc structure 899 * @vf: VF tracking structure 900 * @msg_buf: message buffer from VF 901 * 902 * Removes the VLANs in msg_buf from the VF's VLAN filter list. 903 */ 904 static void 905 ice_vc_del_vlan_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf) 906 { 907 struct ice_hw *hw = &sc->hw; 908 struct virtchnl_vlan_filter_list *vlan_list; 909 int status = 0; 910 enum virtchnl_status_code v_status = VIRTCHNL_STATUS_SUCCESS; 911 struct ice_vsi *vsi = vf->vsi; 912 913 vlan_list = (struct virtchnl_vlan_filter_list *)msg_buf; 914 915 if (vlan_list->vsi_id != vsi->idx) { 916 device_printf(sc->dev, 917 "VF-%d: Message has invalid VSI ID (expected %d, got %d)\n", 918 vf->vf_num, vsi->idx, vlan_list->vsi_id); 919 v_status = VIRTCHNL_STATUS_ERR_PARAM; 920 goto done; 921 } 922 923 status = ice_remove_vlan_hw_filters(vsi, vlan_list->vlan_id, 924 vlan_list->num_elements); 925 if (status) { 926 device_printf(sc->dev, 927 "VF-%d: Failure deleting VLANs from VSI %d, err %s aq_err %s\n", 928 vf->vf_num, vsi->idx, ice_status_str(status), 929 ice_aq_str(sc->hw.adminq.sq_last_status)); 930 v_status = ice_iov_err_to_virt_err(status); 931 goto done; 932 } 933 934 if (vlan_list->num_elements >= vf->vlan_cnt) 935 vf->vlan_cnt = 0; 936 else 937 vf->vlan_cnt -= vlan_list->num_elements; 938 939 done: 940 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_DEL_VLAN, 941 v_status, NULL, 0, NULL); 942 } 943 944 /** 945 * ice_vc_validate_ring_len - Check to see if a descriptor ring length is valid 946 * @ring_len: length of ring 947 * 948 * Check whether a ring size value is valid. 949 * 950 * @returns true if given ring size is valid 951 */ 952 static bool 953 ice_vc_isvalid_ring_len(u16 ring_len) 954 { 955 return (ring_len >= ICE_MIN_DESC_COUNT && 956 ring_len <= ICE_MAX_DESC_COUNT && 957 !(ring_len % ICE_DESC_COUNT_INCR)); 958 } 959 960 /** 961 * ice_vc_cfg_vsi_qs_msg - Handle VIRTCHNL_OP_CONFIG_VSI_QUEUES msg from VF 962 * @sc: PF's softc structure 963 * @vf: VF tracking structure 964 * @msg_buf: message buffer from VF 965 */ 966 static void 967 ice_vc_cfg_vsi_qs_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf) 968 { 969 device_t dev = sc->dev; 970 struct ice_hw *hw = &sc->hw; 971 struct virtchnl_vsi_queue_config_info *vqci; 972 struct virtchnl_queue_pair_info *vqpi; 973 enum virtchnl_status_code status = VIRTCHNL_STATUS_SUCCESS; 974 struct ice_vsi *vsi = vf->vsi; 975 struct ice_tx_queue *txq; 976 struct ice_rx_queue *rxq; 977 int i, error = 0; 978 979 vqci = (struct virtchnl_vsi_queue_config_info *)msg_buf; 980 981 if (vqci->num_queue_pairs > vf->vsi->num_tx_queues && 982 vqci->num_queue_pairs > vf->vsi->num_rx_queues) { 983 status = VIRTCHNL_STATUS_ERR_PARAM; 984 goto done; 985 } 986 987 ice_vsi_disable_tx(vf->vsi); 988 ice_control_all_rx_queues(vf->vsi, false); 989 990 /* 991 * Clear TX and RX queues config in case VF 992 * requests different number of queues. 993 */ 994 for (i = 0; i < vsi->num_tx_queues; i++) { 995 txq = &vsi->tx_queues[i]; 996 997 txq->desc_count = 0; 998 txq->tx_paddr = 0; 999 txq->tc = 0; 1000 } 1001 1002 for (i = 0; i < vsi->num_rx_queues; i++) { 1003 rxq = &vsi->rx_queues[i]; 1004 1005 rxq->desc_count = 0; 1006 rxq->rx_paddr = 0; 1007 } 1008 1009 vqpi = vqci->qpair; 1010 for (i = 0; i < vqci->num_queue_pairs; i++, vqpi++) { 1011 /* Initial parameter validation */ 1012 if (vqpi->txq.vsi_id != vf->vsi->idx || 1013 vqpi->rxq.vsi_id != vf->vsi->idx || 1014 vqpi->txq.queue_id != vqpi->rxq.queue_id || 1015 vqpi->txq.headwb_enabled || 1016 vqpi->rxq.splithdr_enabled || 1017 vqpi->rxq.crc_disable || 1018 !(ice_vc_isvalid_ring_len(vqpi->txq.ring_len)) || 1019 !(ice_vc_isvalid_ring_len(vqpi->rxq.ring_len))) { 1020 status = VIRTCHNL_STATUS_ERR_PARAM; 1021 goto done; 1022 } 1023 1024 /* Copy parameters into VF's queue/VSI structs */ 1025 txq = &vsi->tx_queues[vqpi->txq.queue_id]; 1026 1027 txq->desc_count = vqpi->txq.ring_len; 1028 txq->tx_paddr = vqpi->txq.dma_ring_addr; 1029 txq->q_handle = vqpi->txq.queue_id; 1030 txq->tc = 0; 1031 1032 rxq = &vsi->rx_queues[vqpi->rxq.queue_id]; 1033 1034 rxq->desc_count = vqpi->rxq.ring_len; 1035 rxq->rx_paddr = vqpi->rxq.dma_ring_addr; 1036 vsi->mbuf_sz = vqpi->rxq.databuffer_size; 1037 } 1038 1039 /* Configure TX queues in HW */ 1040 error = ice_cfg_vsi_for_tx(vsi); 1041 if (error) { 1042 device_printf(dev, 1043 "VF-%d: Unable to configure VSI for Tx: %s\n", 1044 vf->vf_num, ice_err_str(error)); 1045 status = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; 1046 goto done; 1047 } 1048 1049 /* Configure RX queues in HW */ 1050 error = ice_cfg_vsi_for_rx(vsi); 1051 if (error) { 1052 device_printf(dev, 1053 "VF-%d: Unable to configure VSI for Rx: %s\n", 1054 vf->vf_num, ice_err_str(error)); 1055 status = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; 1056 ice_vsi_disable_tx(vsi); 1057 goto done; 1058 } 1059 1060 done: 1061 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_CONFIG_VSI_QUEUES, 1062 status, NULL, 0, NULL); 1063 } 1064 1065 /** 1066 * ice_vc_cfg_rss_key_msg - Handle VIRTCHNL_OP_CONFIG_RSS_KEY msg from VF 1067 * @sc: PF's softc structure 1068 * @vf: VF tracking structure 1069 * @msg_buf: message buffer from VF 1070 * 1071 * Sets the RSS key for the given VF, using the contents of msg_buf. 1072 */ 1073 static void 1074 ice_vc_cfg_rss_key_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf) 1075 { 1076 struct ice_aqc_get_set_rss_keys keydata = 1077 { .standard_rss_key = {0}, .extended_hash_key = {0} }; 1078 struct ice_hw *hw = &sc->hw; 1079 struct virtchnl_rss_key *vrk; 1080 int status = 0; 1081 enum virtchnl_status_code v_status = VIRTCHNL_STATUS_SUCCESS; 1082 struct ice_vsi *vsi = vf->vsi; 1083 1084 vrk = (struct virtchnl_rss_key *)msg_buf; 1085 1086 if (vrk->vsi_id != vsi->idx) { 1087 device_printf(sc->dev, 1088 "VF-%d: Message has invalid VSI ID (expected %d, got %d)\n", 1089 vf->vf_num, vsi->idx, vrk->vsi_id); 1090 v_status = VIRTCHNL_STATUS_ERR_PARAM; 1091 goto done; 1092 } 1093 1094 if ((vrk->key_len > 1095 (ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE + 1096 ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE)) || 1097 vrk->key_len == 0) { 1098 v_status = VIRTCHNL_STATUS_ERR_PARAM; 1099 goto done; 1100 } 1101 1102 memcpy(&keydata, vrk->key, vrk->key_len); 1103 1104 status = ice_aq_set_rss_key(hw, vsi->idx, &keydata); 1105 if (status) { 1106 device_printf(sc->dev, 1107 "ice_aq_set_rss_key status %s, error %s\n", 1108 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 1109 v_status = ice_iov_err_to_virt_err(status); 1110 goto done; 1111 } 1112 1113 done: 1114 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_CONFIG_RSS_KEY, 1115 v_status, NULL, 0, NULL); 1116 } 1117 1118 /** 1119 * ice_vc_cfg_rss_lut_msg - Handle VIRTCHNL_OP_CONFIG_RSS_LUT msg from VF 1120 * @sc: PF's softc structure 1121 * @vf: VF tracking structure 1122 * @msg_buf: message buffer from VF 1123 * 1124 * Adds the LUT from the VF in msg_buf to the PF via an admin queue call. 1125 */ 1126 static void 1127 ice_vc_cfg_rss_lut_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf) 1128 { 1129 struct ice_hw *hw = &sc->hw; 1130 struct virtchnl_rss_lut *vrl; 1131 int status = 0; 1132 enum virtchnl_status_code v_status = VIRTCHNL_STATUS_SUCCESS; 1133 struct ice_aq_get_set_rss_lut_params lut_params = {}; 1134 struct ice_vsi *vsi = vf->vsi; 1135 1136 vrl = (struct virtchnl_rss_lut *)msg_buf; 1137 1138 if (vrl->vsi_id != vsi->idx) { 1139 device_printf(sc->dev, 1140 "VF-%d: Message has invalid VSI ID (expected %d, got %d)\n", 1141 vf->vf_num, vsi->idx, vrl->vsi_id); 1142 v_status = VIRTCHNL_STATUS_ERR_PARAM; 1143 goto done; 1144 } 1145 1146 if (vrl->lut_entries > ICE_VSIQF_HLUT_ARRAY_SIZE) { 1147 v_status = VIRTCHNL_STATUS_ERR_PARAM; 1148 goto done; 1149 } 1150 1151 lut_params.vsi_handle = vsi->idx; 1152 lut_params.lut_size = vsi->rss_table_size; 1153 lut_params.lut_type = vsi->rss_lut_type; 1154 lut_params.lut = vrl->lut; 1155 lut_params.global_lut_id = 0; 1156 1157 status = ice_aq_set_rss_lut(hw, &lut_params); 1158 if (status) { 1159 device_printf(sc->dev, 1160 "VF-%d: Cannot set RSS lut, err %s aq_err %s\n", 1161 vf->vf_num, ice_status_str(status), 1162 ice_aq_str(hw->adminq.sq_last_status)); 1163 v_status = ice_iov_err_to_virt_err(status); 1164 } 1165 1166 done: 1167 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_CONFIG_RSS_LUT, 1168 v_status, NULL, 0, NULL); 1169 } 1170 1171 /** 1172 * ice_vc_set_rss_hena_msg - Handle VIRTCHNL_OP_SET_RSS_HENA msg from VF 1173 * @sc: PF's softc structure 1174 * @vf: VF tracking structure 1175 * @msg_buf: message buffer from VF 1176 * 1177 * Adds the VF's hena (hash enable) bits as flow types to the PF's RSS flow 1178 * type list. 1179 */ 1180 static void 1181 ice_vc_set_rss_hena_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf) 1182 { 1183 struct ice_hw *hw = &sc->hw; 1184 struct virtchnl_rss_hena *vrh; 1185 int status = 0; 1186 enum virtchnl_status_code v_status = VIRTCHNL_STATUS_SUCCESS; 1187 struct ice_vsi *vsi = vf->vsi; 1188 1189 MPASS(vsi != NULL); 1190 1191 vrh = (struct virtchnl_rss_hena *)msg_buf; 1192 1193 /* 1194 * Remove existing configuration to make sure only requested 1195 * config is applied and allow VFs to disable RSS completly. 1196 */ 1197 status = ice_rem_vsi_rss_cfg(hw, vsi->idx); 1198 if (vrh->hena) { 1199 /* 1200 * Problem with removing config is not fatal, when new one 1201 * is requested. Warn about it but try to apply new config 1202 * anyway. 1203 */ 1204 if (status) 1205 device_printf(sc->dev, 1206 "ice_rem_vsi_rss_cfg status %s, error %s\n", 1207 ice_status_str(status), 1208 ice_aq_str(hw->adminq.sq_last_status)); 1209 status = ice_add_avf_rss_cfg(hw, vsi->idx, vrh->hena); 1210 if (status) 1211 device_printf(sc->dev, 1212 "ice_add_avf_rss_cfg status %s, error %s\n", 1213 ice_status_str(status), 1214 ice_aq_str(hw->adminq.sq_last_status)); 1215 } 1216 v_status = ice_iov_err_to_virt_err(status); 1217 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_SET_RSS_HENA, 1218 v_status, NULL, 0, NULL); 1219 } 1220 1221 /** 1222 * ice_vc_enable_queues_msg - Handle VIRTCHNL_OP_ENABLE_QUEUES msg from VF 1223 * @sc: PF's softc structure 1224 * @vf: VF tracking structure 1225 * @msg_buf: message buffer from VF 1226 * 1227 * Enables VF queues selected in msg_buf for Tx/Rx traffic. 1228 * 1229 * @remark Only actually operates on Rx queues; Tx queues are enabled in 1230 * CONFIG_VSI_QUEUES message handler. 1231 */ 1232 static void 1233 ice_vc_enable_queues_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf) 1234 { 1235 struct ice_hw *hw = &sc->hw; 1236 struct virtchnl_queue_select *vqs; 1237 enum virtchnl_status_code v_status = VIRTCHNL_STATUS_SUCCESS; 1238 struct ice_vsi *vsi = vf->vsi; 1239 int bit, error = 0; 1240 1241 vqs = (struct virtchnl_queue_select *)msg_buf; 1242 1243 if (vqs->vsi_id != vsi->idx) { 1244 device_printf(sc->dev, 1245 "%s: VF-%d: Message has invalid VSI ID (expected %d, got %d)\n", 1246 __func__, vf->vf_num, vsi->idx, vqs->vsi_id); 1247 v_status = VIRTCHNL_STATUS_ERR_PARAM; 1248 goto done; 1249 } 1250 1251 if (!vqs->rx_queues && !vqs->tx_queues) { 1252 device_printf(sc->dev, 1253 "%s: VF-%d: message queue masks are empty\n", 1254 __func__, vf->vf_num); 1255 v_status = VIRTCHNL_STATUS_ERR_PARAM; 1256 goto done; 1257 } 1258 1259 /* Validate rx_queue mask */ 1260 bit = fls(vqs->rx_queues); 1261 if (bit > vsi->num_rx_queues) { 1262 device_printf(sc->dev, 1263 "%s: VF-%d: message's rx_queues map (0x%08x) has invalid bit set (%d)\n", 1264 __func__, vf->vf_num, vqs->rx_queues, bit); 1265 v_status = VIRTCHNL_STATUS_ERR_PARAM; 1266 goto done; 1267 } 1268 1269 /* Tx ring enable is handled in an earlier message. */ 1270 for_each_set_bit(bit, &vqs->rx_queues, 32) { 1271 error = ice_control_rx_queue(vsi, bit, true); 1272 if (error) { 1273 device_printf(sc->dev, 1274 "Unable to enable Rx ring %d for receive: %s\n", 1275 bit, ice_err_str(error)); 1276 v_status = VIRTCHNL_STATUS_ERR_PARAM; 1277 goto done; 1278 } 1279 } 1280 1281 done: 1282 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_ENABLE_QUEUES, 1283 v_status, NULL, 0, NULL); 1284 } 1285 1286 /** 1287 * ice_vc_disable_queues_msg - Handle VIRTCHNL_OP_DISABLE_QUEUES msg 1288 * @sc: PF's softc structure 1289 * @vf: VF tracking structure 1290 * @msg_buf: message buffer from VF 1291 * 1292 * Disables all VF queues for the VF's VSI. 1293 * 1294 * @remark Unlike the ENABLE_QUEUES handler, this operates on both 1295 * Tx and Rx queues 1296 */ 1297 static void 1298 ice_vc_disable_queues_msg(struct ice_softc *sc, struct ice_vf *vf, 1299 u8 *msg_buf __unused) 1300 { 1301 struct ice_hw *hw = &sc->hw; 1302 enum virtchnl_status_code v_status = VIRTCHNL_STATUS_SUCCESS; 1303 struct ice_vsi *vsi = vf->vsi; 1304 int error = 0; 1305 1306 error = ice_control_all_rx_queues(vsi, false); 1307 if (error) { 1308 device_printf(sc->dev, 1309 "Unable to disable Rx rings for transmit: %s\n", 1310 ice_err_str(error)); 1311 v_status = VIRTCHNL_STATUS_ERR_PARAM; 1312 goto done; 1313 } 1314 1315 error = ice_vsi_disable_tx(vsi); 1316 if (error) { 1317 /* Already prints an error message */ 1318 v_status = VIRTCHNL_STATUS_ERR_PARAM; 1319 } 1320 1321 done: 1322 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_DISABLE_QUEUES, 1323 v_status, NULL, 0, NULL); 1324 } 1325 1326 /** 1327 * ice_vc_cfg_irq_map_msg - Handle VIRTCHNL_OP_CFG_IRQ_MAP msg from VF 1328 * @sc: PF's softc structure 1329 * @vf: VF tracking structure 1330 * @msg_buf: message buffer from VF 1331 * 1332 * Configures the interrupt vectors described in the message in msg_buf. The 1333 * VF needs to send this message during init, so that queues can be allowed 1334 * to generate interrupts. 1335 */ 1336 static void 1337 ice_vc_cfg_irq_map_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf) 1338 { 1339 #define ICE_VIRTCHNL_QUEUE_MAP_SIZE 16 1340 struct ice_hw *hw = &sc->hw; 1341 struct virtchnl_irq_map_info *vimi; 1342 struct virtchnl_vector_map *vvm; 1343 enum virtchnl_status_code v_status = VIRTCHNL_STATUS_SUCCESS; 1344 struct ice_vsi *vsi = vf->vsi; 1345 u16 vector; 1346 1347 vimi = (struct virtchnl_irq_map_info *)msg_buf; 1348 1349 if (vimi->num_vectors > vf->num_irq_vectors) { 1350 device_printf(sc->dev, 1351 "%s: VF-%d: message has more vectors (%d) than configured for VF (%d)\n", 1352 __func__, vf->vf_num, vimi->num_vectors, vf->num_irq_vectors); 1353 v_status = VIRTCHNL_STATUS_ERR_PARAM; 1354 goto done; 1355 } 1356 1357 vvm = vimi->vecmap; 1358 /* Save off information from message */ 1359 for (int i = 0; i < vimi->num_vectors; i++, vvm++) { 1360 struct ice_tx_queue *txq; 1361 struct ice_rx_queue *rxq; 1362 int bit; 1363 1364 if (vvm->vsi_id != vf->vsi->idx) { 1365 device_printf(sc->dev, 1366 "%s: VF-%d: message's VSI ID (%d) does not match VF's (%d) for vector %d\n", 1367 __func__, vf->vf_num, vvm->vsi_id, vf->vsi->idx, i); 1368 v_status = VIRTCHNL_STATUS_ERR_PARAM; 1369 goto done; 1370 } 1371 1372 /* vvm->vector_id is relative to VF space */ 1373 vector = vvm->vector_id; 1374 1375 if (vector >= vf->num_irq_vectors) { 1376 device_printf(sc->dev, 1377 "%s: VF-%d: message's vector ID (%d) is greater than VF's max ID (%d)\n", 1378 __func__, vf->vf_num, vector, vf->num_irq_vectors - 1); 1379 v_status = VIRTCHNL_STATUS_ERR_PARAM; 1380 goto done; 1381 } 1382 1383 /* The Misc/Admin Queue vector doesn't need mapping */ 1384 if (vector == 0) 1385 continue; 1386 1387 /* coverity[address_of] */ 1388 for_each_set_bit(bit, &vvm->txq_map, ICE_VIRTCHNL_QUEUE_MAP_SIZE) { 1389 if (bit >= vsi->num_tx_queues) { 1390 device_printf(sc->dev, 1391 "%s: VF-%d: txq map has invalid bit set\n", 1392 __func__, vf->vf_num); 1393 v_status = VIRTCHNL_STATUS_ERR_PARAM; 1394 goto done; 1395 } 1396 1397 vf->tx_irqvs[vector].me = vector; 1398 1399 txq = &vsi->tx_queues[bit]; 1400 txq->irqv = &vf->tx_irqvs[vector]; 1401 txq->itr_idx = vvm->txitr_idx; 1402 } 1403 /* coverity[address_of] */ 1404 for_each_set_bit(bit, &vvm->rxq_map, ICE_VIRTCHNL_QUEUE_MAP_SIZE) { 1405 if (bit >= vsi->num_rx_queues) { 1406 device_printf(sc->dev, 1407 "%s: VF-%d: rxq map has invalid bit set\n", 1408 __func__, vf->vf_num); 1409 v_status = VIRTCHNL_STATUS_ERR_PARAM; 1410 goto done; 1411 } 1412 vf->rx_irqvs[vector].me = vector; 1413 1414 rxq = &vsi->rx_queues[bit]; 1415 rxq->irqv = &vf->rx_irqvs[vector]; 1416 rxq->itr_idx = vvm->rxitr_idx; 1417 } 1418 } 1419 1420 /* Write to T/RQCTL registers to actually map vectors to queues */ 1421 for (int i = 0; i < vf->vsi->num_rx_queues; i++) 1422 if (vsi->rx_queues[i].irqv != NULL) 1423 ice_configure_rxq_interrupt(hw, vsi->rx_qmap[i], 1424 vsi->rx_queues[i].irqv->me, vsi->rx_queues[i].itr_idx); 1425 1426 for (int i = 0; i < vf->vsi->num_tx_queues; i++) 1427 if (vsi->tx_queues[i].irqv != NULL) 1428 ice_configure_txq_interrupt(hw, vsi->tx_qmap[i], 1429 vsi->tx_queues[i].irqv->me, vsi->tx_queues[i].itr_idx); 1430 1431 ice_flush(hw); 1432 1433 done: 1434 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_CONFIG_IRQ_MAP, 1435 v_status, NULL, 0, NULL); 1436 } 1437 1438 /** 1439 * ice_eth_stats_to_virtchnl_eth_stats - Convert stats for virtchnl 1440 * @istats: VSI stats from HW to convert 1441 * @vstats: stats struct to copy to 1442 * 1443 * This function copies all known stats in struct virtchnl_eth_stats from the 1444 * input struct ice_eth_stats to an output struct virtchnl_eth_stats. 1445 * 1446 * @remark These two structure types currently have the same definition up to 1447 * the size of struct virtchnl_eth_stats (on FreeBSD), but that could change 1448 * in the future. 1449 */ 1450 static void 1451 ice_eth_stats_to_virtchnl_eth_stats(struct ice_eth_stats *istats, 1452 struct virtchnl_eth_stats *vstats) 1453 { 1454 vstats->rx_bytes = istats->rx_bytes; 1455 vstats->rx_unicast = istats->rx_unicast; 1456 vstats->rx_multicast = istats->rx_multicast; 1457 vstats->rx_broadcast = istats->rx_broadcast; 1458 vstats->rx_discards = istats->rx_discards; 1459 vstats->rx_unknown_protocol = istats->rx_unknown_protocol; 1460 vstats->tx_bytes = istats->tx_bytes; 1461 vstats->tx_unicast = istats->tx_unicast; 1462 vstats->tx_multicast = istats->tx_multicast; 1463 vstats->tx_broadcast = istats->tx_broadcast; 1464 vstats->tx_discards = istats->tx_discards; 1465 vstats->tx_errors = istats->tx_errors; 1466 } 1467 1468 /** 1469 * ice_vc_get_stats_msg - Handle VIRTCHNL_OP_GET_STATS msg 1470 * @sc: device private structure 1471 * @vf: VF tracking structure 1472 * @msg_buf: raw message buffer from the VF 1473 * 1474 * Updates the VF's VSI stats and sends those stats back to the VF. 1475 */ 1476 static void 1477 ice_vc_get_stats_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf) 1478 { 1479 struct virtchnl_queue_select *vqs; 1480 struct virtchnl_eth_stats stats; 1481 struct ice_vsi *vsi = vf->vsi; 1482 struct ice_hw *hw = &sc->hw; 1483 1484 vqs = (struct virtchnl_queue_select *)msg_buf; 1485 1486 if (vqs->vsi_id != vsi->idx) { 1487 device_printf(sc->dev, 1488 "%s: VF-%d: message has invalid VSI ID %d (VF has VSI ID %d)\n", 1489 __func__, vf->vf_num, vqs->vsi_id, vsi->idx); 1490 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_GET_STATS, 1491 VIRTCHNL_STATUS_ERR_PARAM, NULL, 0, NULL); 1492 } 1493 1494 ice_update_vsi_hw_stats(vf->vsi); 1495 ice_eth_stats_to_virtchnl_eth_stats(&vsi->hw_stats.cur, &stats); 1496 1497 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_GET_STATS, 1498 VIRTCHNL_STATUS_SUCCESS, (u8 *)&stats, 1499 sizeof(struct virtchnl_eth_stats), NULL); 1500 } 1501 1502 /** 1503 * ice_vc_cfg_promisc_mode_msg - Handle VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE 1504 * @sc: PF's softc structure 1505 * @vf: VF tracking structure 1506 * @msg_buf: message buffer from VF 1507 * 1508 * Configures the promiscuous modes for the given VSI in msg_buf. 1509 */ 1510 static void 1511 ice_vc_cfg_promisc_mode_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf) 1512 { 1513 struct ice_hw *hw = &sc->hw; 1514 struct virtchnl_promisc_info *vpi; 1515 enum virtchnl_status_code v_status = VIRTCHNL_STATUS_SUCCESS; 1516 int status = 0; 1517 struct ice_vsi *vsi = vf->vsi; 1518 ice_declare_bitmap(old_promisc_mask, ICE_PROMISC_MAX); 1519 ice_declare_bitmap(req_promisc_mask, ICE_PROMISC_MAX); 1520 ice_declare_bitmap(clear_promisc_mask, ICE_PROMISC_MAX); 1521 ice_declare_bitmap(set_promisc_mask, ICE_PROMISC_MAX); 1522 ice_declare_bitmap(old_req_xor_mask, ICE_PROMISC_MAX); 1523 u16 vid; 1524 1525 vpi = (struct virtchnl_promisc_info *)msg_buf; 1526 1527 /* Check to see if VF has permission to configure promiscuous mode */ 1528 if (!(vf->vf_flags & VF_FLAG_PROMISC_CAP)) { 1529 device_printf(sc->dev, 1530 "VF-%d: attempted to configure promiscuous mode\n", 1531 vf->vf_num); 1532 /* Don't reply to VF with an error */ 1533 goto done; 1534 } 1535 1536 if (vpi->vsi_id != vsi->idx) { 1537 device_printf(sc->dev, 1538 "VF-%d: Message has invalid VSI ID (expected %d, got %d)\n", 1539 vf->vf_num, vsi->idx, vpi->vsi_id); 1540 v_status = VIRTCHNL_STATUS_ERR_PARAM; 1541 goto done; 1542 } 1543 1544 if (vpi->flags & ~ICE_VIRTCHNL_VALID_PROMISC_FLAGS) { 1545 device_printf(sc->dev, 1546 "VF-%d: Message has invalid promiscuous flags set (valid 0x%02x, got 0x%02x)\n", 1547 vf->vf_num, ICE_VIRTCHNL_VALID_PROMISC_FLAGS, 1548 vpi->flags); 1549 v_status = VIRTCHNL_STATUS_ERR_PARAM; 1550 goto done; 1551 1552 } 1553 1554 ice_zero_bitmap(req_promisc_mask, ICE_PROMISC_MAX); 1555 /* Convert virtchnl flags to ice AQ promiscuous mode flags */ 1556 if (vpi->flags & FLAG_VF_UNICAST_PROMISC) { 1557 ice_set_bit(ICE_PROMISC_UCAST_TX, req_promisc_mask); 1558 ice_set_bit(ICE_PROMISC_UCAST_RX, req_promisc_mask); 1559 } 1560 if (vpi->flags & FLAG_VF_MULTICAST_PROMISC) { 1561 ice_set_bit(ICE_PROMISC_MCAST_TX, req_promisc_mask); 1562 ice_set_bit(ICE_PROMISC_MCAST_RX, req_promisc_mask); 1563 } 1564 1565 status = ice_get_vsi_promisc(hw, vsi->idx, old_promisc_mask, &vid); 1566 if (status) { 1567 device_printf(sc->dev, 1568 "VF-%d: Failed to get promiscuous mode mask for VSI %d, err %s aq_err %s\n", 1569 vf->vf_num, vsi->idx, 1570 ice_status_str(status), 1571 ice_aq_str(hw->adminq.sq_last_status)); 1572 v_status = ice_iov_err_to_virt_err(status); 1573 goto done; 1574 } 1575 1576 /* Figure out what got added and what got removed */ 1577 ice_zero_bitmap(old_req_xor_mask, ICE_PROMISC_MAX); 1578 ice_xor_bitmap(old_req_xor_mask, old_promisc_mask, req_promisc_mask, ICE_PROMISC_MAX); 1579 ice_and_bitmap(clear_promisc_mask, old_req_xor_mask, old_promisc_mask, ICE_PROMISC_MAX); 1580 ice_and_bitmap(set_promisc_mask, old_req_xor_mask, req_promisc_mask, ICE_PROMISC_MAX); 1581 1582 if (ice_is_any_bit_set(clear_promisc_mask, ICE_PROMISC_MAX)) { 1583 status = ice_clear_vsi_promisc(hw, vsi->idx, 1584 clear_promisc_mask, 0); 1585 if (status) { 1586 device_printf(sc->dev, 1587 "VF-%d: Failed to clear promiscuous mode for VSI %d, err %s aq_err %s\n", 1588 vf->vf_num, vsi->idx, 1589 ice_status_str(status), 1590 ice_aq_str(hw->adminq.sq_last_status)); 1591 v_status = ice_iov_err_to_virt_err(status); 1592 goto done; 1593 } 1594 } 1595 1596 if (ice_is_any_bit_set(set_promisc_mask, ICE_PROMISC_MAX)) { 1597 status = ice_set_vsi_promisc(hw, vsi->idx, set_promisc_mask, 0); 1598 if (status) { 1599 device_printf(sc->dev, 1600 "VF-%d: Failed to set promiscuous mode for VSI %d, err %s aq_err %s\n", 1601 vf->vf_num, vsi->idx, 1602 ice_status_str(status), 1603 ice_aq_str(hw->adminq.sq_last_status)); 1604 v_status = ice_iov_err_to_virt_err(status); 1605 goto done; 1606 } 1607 } 1608 1609 done: 1610 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, 1611 v_status, NULL, 0, NULL); 1612 } 1613 1614 /** 1615 * ice_vc_notify_all_vfs_link_state - Notify all VFs of PF link state 1616 * @sc: device private structure 1617 * 1618 * Sends a message to all VFs about the status of the PF's link 1619 * state. For more details, @see ice_vc_notify_vf_link_state. 1620 */ 1621 void 1622 ice_vc_notify_all_vfs_link_state(struct ice_softc *sc) 1623 { 1624 for (int i = 0; i < sc->num_vfs; i++) 1625 ice_vc_notify_vf_link_state(sc, &sc->vfs[i]); 1626 } 1627 1628 /** 1629 * ice_vc_notify_vf_link_state - Notify VF of PF link state 1630 * @sc: device private structure 1631 * @vf: VF tracking structure 1632 * 1633 * Sends an event message to the specified VF with information about 1634 * the current link state from the PF's port. This includes whether 1635 * link is up or down, and the link speed in 100Mbps units. 1636 */ 1637 static void 1638 ice_vc_notify_vf_link_state(struct ice_softc *sc, struct ice_vf *vf) 1639 { 1640 struct virtchnl_pf_event event = {}; 1641 struct ice_hw *hw = &sc->hw; 1642 1643 event.event = VIRTCHNL_EVENT_LINK_CHANGE; 1644 event.severity = PF_EVENT_SEVERITY_INFO; 1645 event.event_data.link_event_adv.link_status = sc->link_up; 1646 event.event_data.link_event_adv.link_speed = 1647 (u32)ice_conv_link_speed_to_virtchnl(true, 1648 hw->port_info->phy.link_info.link_speed); 1649 1650 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_EVENT, 1651 VIRTCHNL_STATUS_SUCCESS, (u8 *)&event, sizeof(event), NULL); 1652 } 1653 1654 /** 1655 * ice_vc_handle_vf_msg - Handle a message from a VF 1656 * @sc: device private structure 1657 * @event: event received from the HW MBX queue 1658 * 1659 * Called whenever an event is received from a VF on the HW mailbox queue. 1660 * Responsible for handling these messages as well as responding to the 1661 * VF afterwards, depending on the received message type. 1662 */ 1663 void 1664 ice_vc_handle_vf_msg(struct ice_softc *sc, struct ice_rq_event_info *event) 1665 { 1666 struct ice_hw *hw = &sc->hw; 1667 device_t dev = sc->dev; 1668 struct ice_vf *vf; 1669 int err = 0; 1670 1671 u32 v_opcode = event->desc.cookie_high; 1672 u16 v_id = event->desc.retval; 1673 u8 *msg = event->msg_buf; 1674 u16 msglen = event->msg_len; 1675 1676 if (v_id >= sc->num_vfs) { 1677 device_printf(dev, "%s: Received msg from invalid VF-%d: opcode %d, len %d\n", 1678 __func__, v_id, v_opcode, msglen); 1679 return; 1680 } 1681 1682 vf = &sc->vfs[v_id]; 1683 1684 /* Perform basic checks on the msg */ 1685 err = virtchnl_vc_validate_vf_msg(&vf->version, v_opcode, msg, msglen); 1686 if (err) { 1687 device_printf(dev, "%s: Received invalid msg from VF-%d: opcode %d, len %d, error %d\n", 1688 __func__, vf->vf_num, v_opcode, msglen, err); 1689 ice_aq_send_msg_to_vf(hw, v_id, v_opcode, VIRTCHNL_STATUS_ERR_PARAM, NULL, 0, NULL); 1690 return; 1691 } 1692 1693 switch (v_opcode) { 1694 case VIRTCHNL_OP_VERSION: 1695 ice_vc_version_msg(sc, vf, msg); 1696 break; 1697 case VIRTCHNL_OP_RESET_VF: 1698 ice_reset_vf(sc, vf, true); 1699 break; 1700 case VIRTCHNL_OP_GET_VF_RESOURCES: 1701 ice_vc_get_vf_res_msg(sc, vf, msg); 1702 break; 1703 case VIRTCHNL_OP_ADD_ETH_ADDR: 1704 ice_vc_add_eth_addr_msg(sc, vf, msg); 1705 break; 1706 case VIRTCHNL_OP_DEL_ETH_ADDR: 1707 ice_vc_del_eth_addr_msg(sc, vf, msg); 1708 break; 1709 case VIRTCHNL_OP_ADD_VLAN: 1710 ice_vc_add_vlan_msg(sc, vf, msg); 1711 break; 1712 case VIRTCHNL_OP_DEL_VLAN: 1713 ice_vc_del_vlan_msg(sc, vf, msg); 1714 break; 1715 case VIRTCHNL_OP_CONFIG_VSI_QUEUES: 1716 ice_vc_cfg_vsi_qs_msg(sc, vf, msg); 1717 break; 1718 case VIRTCHNL_OP_CONFIG_RSS_KEY: 1719 ice_vc_cfg_rss_key_msg(sc, vf, msg); 1720 break; 1721 case VIRTCHNL_OP_CONFIG_RSS_LUT: 1722 ice_vc_cfg_rss_lut_msg(sc, vf, msg); 1723 break; 1724 case VIRTCHNL_OP_SET_RSS_HENA: 1725 ice_vc_set_rss_hena_msg(sc, vf, msg); 1726 break; 1727 case VIRTCHNL_OP_ENABLE_QUEUES: 1728 ice_vc_enable_queues_msg(sc, vf, msg); 1729 ice_vc_notify_vf_link_state(sc, vf); 1730 break; 1731 case VIRTCHNL_OP_DISABLE_QUEUES: 1732 ice_vc_disable_queues_msg(sc, vf, msg); 1733 break; 1734 case VIRTCHNL_OP_CONFIG_IRQ_MAP: 1735 ice_vc_cfg_irq_map_msg(sc, vf, msg); 1736 break; 1737 case VIRTCHNL_OP_GET_STATS: 1738 ice_vc_get_stats_msg(sc, vf, msg); 1739 break; 1740 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: 1741 ice_vc_cfg_promisc_mode_msg(sc, vf, msg); 1742 break; 1743 default: 1744 device_printf(dev, "%s: Received unknown msg from VF-%d: opcode %d, len %d\n", 1745 __func__, vf->vf_num, v_opcode, msglen); 1746 ice_aq_send_msg_to_vf(hw, v_id, v_opcode, 1747 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL, 0, NULL); 1748 break; 1749 } 1750 } 1751 1752 /** 1753 * ice_iov_setup_intr_mapping - Setup interrupt config for a VF 1754 * @sc: device softc structure 1755 * @vf: driver's VF structure for VF to be configured 1756 * 1757 * Before a VF can be used, and after a VF reset, the PF must configure 1758 * the VF's interrupt allocation registers. This includes allocating 1759 * interrupts from the PF's interrupt pool to the VF using the 1760 * VPINT_ALLOC(_PCI) registers, and setting up a mapping from PF vectors 1761 * to VF vectors in GLINT_VECT2FUNC. 1762 * 1763 * As well, this sets up queue allocation registers and maps the mailbox 1764 * interrupt for the VF. 1765 */ 1766 static void 1767 ice_iov_setup_intr_mapping(struct ice_softc *sc, struct ice_vf *vf) 1768 { 1769 struct ice_hw *hw = &sc->hw; 1770 struct ice_vsi *vsi = vf->vsi; 1771 u16 v; 1772 1773 /* Calculate indices for register ops below */ 1774 u16 vf_first_irq_idx = vf->vf_imap[0]; 1775 u16 vf_last_irq_idx = (vf_first_irq_idx + vf->num_irq_vectors) - 1; 1776 u16 abs_vf_first_irq_idx = hw->func_caps.common_cap.msix_vector_first_id + 1777 vf_first_irq_idx; 1778 u16 abs_vf_last_irq_idx = (abs_vf_first_irq_idx + vf->num_irq_vectors) - 1; 1779 u16 abs_vf_num = vf->vf_num + hw->func_caps.vf_base_id; 1780 1781 /* Map out VF interrupt allocation in global device space. Both 1782 * VPINT_ALLOC and VPINT_ALLOC_PCI use the same values. 1783 */ 1784 wr32(hw, VPINT_ALLOC(vf->vf_num), 1785 (((abs_vf_first_irq_idx << VPINT_ALLOC_FIRST_S) & VPINT_ALLOC_FIRST_M) | 1786 ((abs_vf_last_irq_idx << VPINT_ALLOC_LAST_S) & VPINT_ALLOC_LAST_M) | 1787 VPINT_ALLOC_VALID_M)); 1788 wr32(hw, VPINT_ALLOC_PCI(vf->vf_num), 1789 (((abs_vf_first_irq_idx << VPINT_ALLOC_PCI_FIRST_S) & VPINT_ALLOC_PCI_FIRST_M) | 1790 ((abs_vf_last_irq_idx << VPINT_ALLOC_PCI_LAST_S) & VPINT_ALLOC_PCI_LAST_M) | 1791 VPINT_ALLOC_PCI_VALID_M)); 1792 1793 /* Create inverse mapping of vectors to PF/VF combinations */ 1794 for (v = vf_first_irq_idx; v <= vf_last_irq_idx; v++) 1795 { 1796 wr32(hw, GLINT_VECT2FUNC(v), 1797 (((abs_vf_num << GLINT_VECT2FUNC_VF_NUM_S) & GLINT_VECT2FUNC_VF_NUM_M) | 1798 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) & GLINT_VECT2FUNC_PF_NUM_M))); 1799 } 1800 1801 /* Map mailbox interrupt to MSI-X index 0. Disable ITR for it, too. */ 1802 wr32(hw, VPINT_MBX_CTL(abs_vf_num), 1803 ((0 << VPINT_MBX_CTL_MSIX_INDX_S) & VPINT_MBX_CTL_MSIX_INDX_M) | 1804 ((0x3 << VPINT_MBX_CTL_ITR_INDX_S) & VPINT_MBX_CTL_ITR_INDX_M) | 1805 VPINT_MBX_CTL_CAUSE_ENA_M); 1806 1807 /* Mark the TX queue mapping registers as valid */ 1808 wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_num), VPLAN_TXQ_MAPENA_TX_ENA_M); 1809 1810 /* Indicate to HW that VF has scattered queue allocation */ 1811 wr32(hw, VPLAN_TX_QBASE(vf->vf_num), VPLAN_TX_QBASE_VFQTABLE_ENA_M); 1812 for (int i = 0; i < vsi->num_tx_queues; i++) { 1813 wr32(hw, VPLAN_TX_QTABLE(i, vf->vf_num), 1814 (vsi->tx_qmap[i] << VPLAN_TX_QTABLE_QINDEX_S) & VPLAN_TX_QTABLE_QINDEX_M); 1815 } 1816 1817 /* Mark the RX queue mapping registers as valid */ 1818 wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_num), VPLAN_RXQ_MAPENA_RX_ENA_M); 1819 wr32(hw, VPLAN_RX_QBASE(vf->vf_num), VPLAN_RX_QBASE_VFQTABLE_ENA_M); 1820 for (int i = 0; i < vsi->num_rx_queues; i++) { 1821 wr32(hw, VPLAN_RX_QTABLE(i, vf->vf_num), 1822 (vsi->rx_qmap[i] << VPLAN_RX_QTABLE_QINDEX_S) & VPLAN_RX_QTABLE_QINDEX_M); 1823 } 1824 } 1825 1826 /** 1827 * ice_err_to_virt err - translate ice errors into virtchnl errors 1828 * @ice_err: status returned from ice function 1829 */ 1830 static enum virtchnl_status_code 1831 ice_iov_err_to_virt_err(int ice_err) 1832 { 1833 switch (ice_err) { 1834 case 0: 1835 return VIRTCHNL_STATUS_SUCCESS; 1836 case ICE_ERR_BAD_PTR: 1837 case ICE_ERR_INVAL_SIZE: 1838 case ICE_ERR_DEVICE_NOT_SUPPORTED: 1839 case ICE_ERR_PARAM: 1840 case ICE_ERR_CFG: 1841 return VIRTCHNL_STATUS_ERR_PARAM; 1842 case ICE_ERR_NO_MEMORY: 1843 return VIRTCHNL_STATUS_ERR_NO_MEMORY; 1844 case ICE_ERR_NOT_READY: 1845 case ICE_ERR_RESET_FAILED: 1846 case ICE_ERR_FW_API_VER: 1847 case ICE_ERR_AQ_ERROR: 1848 case ICE_ERR_AQ_TIMEOUT: 1849 case ICE_ERR_AQ_FULL: 1850 case ICE_ERR_AQ_NO_WORK: 1851 case ICE_ERR_AQ_EMPTY: 1852 return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; 1853 default: 1854 return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; 1855 } 1856 } 1857