1 /* SPDX-License-Identifier: BSD-3-Clause */ 2 /* Copyright (c) 2025, Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * 3. Neither the name of the Intel Corporation nor the names of its 16 * contributors may be used to endorse or promote products derived from 17 * this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /** 33 * @file ice_iov.c 34 * @brief Virtualization support functions 35 * 36 * Contains functions for enabling and managing PCIe virtual function devices, 37 * including enabling new VFs, and managing VFs over the virtchnl interface. 38 */ 39 40 #include "ice_iov.h" 41 42 static struct ice_vf *ice_iov_get_vf(struct ice_softc *sc, int vf_num); 43 static void ice_iov_ready_vf(struct ice_softc *sc, struct ice_vf *vf); 44 static void ice_reset_vf(struct ice_softc *sc, struct ice_vf *vf, 45 bool trigger_vflr); 46 static void ice_iov_setup_intr_mapping(struct ice_softc *sc, struct ice_vf *vf); 47 48 static void ice_vc_version_msg(struct ice_softc *sc, struct ice_vf *vf, 49 u8 *msg_buf); 50 static void ice_vc_get_vf_res_msg(struct ice_softc *sc, struct ice_vf *vf, 51 u8 *msg_buf); 52 static void ice_vc_add_eth_addr_msg(struct ice_softc *sc, struct ice_vf *vf, 53 u8 *msg_buf); 54 static void ice_vc_del_eth_addr_msg(struct ice_softc *sc, struct ice_vf *vf, 55 u8 *msg_buf); 56 static bool ice_vc_isvalid_ring_len(u16 ring_len); 57 static void ice_vc_cfg_vsi_qs_msg(struct ice_softc *sc, struct ice_vf *vf, 58 u8 *msg_buf); 59 static void ice_vc_cfg_rss_key_msg(struct ice_softc *sc, struct ice_vf *vf, 60 u8 *msg_buf); 61 static void ice_vc_set_rss_hena_msg(struct ice_softc *sc, struct ice_vf *vf, 62 u8 *msg_buf); 63 static void ice_vc_enable_queues_msg(struct ice_softc *sc, struct ice_vf *vf, 64 u8 *msg_buf); 65 static void ice_vc_notify_vf_link_state(struct ice_softc *sc, struct ice_vf *vf); 66 static void ice_vc_disable_queues_msg(struct ice_softc *sc, struct ice_vf *vf, 67 u8 *msg_buf); 68 static void ice_vc_cfg_irq_map_msg(struct ice_softc *sc, struct ice_vf *vf, 69 u8 *msg_buf); 70 static void ice_vc_get_stats_msg(struct ice_softc *sc, struct ice_vf *vf, 71 u8 *msg_buf); 72 static void ice_eth_stats_to_virtchnl_eth_stats(struct ice_eth_stats *istats, 73 struct virtchnl_eth_stats *vstats); 74 static void ice_vc_cfg_rss_lut_msg(struct ice_softc *sc, struct ice_vf *vf, 75 u8 *msg_buf); 76 static void ice_vc_cfg_promisc_mode_msg(struct ice_softc *sc, struct ice_vf *vf, 77 u8 *msg_buf); 78 static void ice_vc_add_vlan_msg(struct ice_softc *sc, struct ice_vf *vf, 79 u8 *msg_buf); 80 static void ice_vc_del_vlan_msg(struct ice_softc *sc, struct ice_vf *vf, 81 u8 *msg_buf); 82 static enum virtchnl_status_code ice_iov_err_to_virt_err(int ice_err); 83 static int ice_vf_validate_mac(struct ice_vf *vf, const uint8_t *addr); 84 85 /** 86 * ice_iov_attach - Initialize SR-IOV PF host support 87 * @sc: device softc structure 88 * 89 * Initialize SR-IOV PF host support at the end of the driver attach process. 90 * 91 * @pre Must be called from sleepable context (calls malloc() w/ M_WAITOK) 92 * 93 * @returns 0 if successful, or 94 * - ENOMEM if there is no memory for the PF/VF schemas or iov device 95 * - ENXIO if the device isn't PCI-E or doesn't support the same SR-IOV 96 * version as the kernel 97 * - ENOENT if the device doesn't have the SR-IOV capability 98 */ 99 int 100 ice_iov_attach(struct ice_softc *sc) 101 { 102 device_t dev = sc->dev; 103 nvlist_t *pf_schema, *vf_schema; 104 int error; 105 106 pf_schema = pci_iov_schema_alloc_node(); 107 vf_schema = pci_iov_schema_alloc_node(); 108 109 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL); 110 pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof", 111 IOV_SCHEMA_HASDEFAULT, TRUE); 112 pci_iov_schema_add_bool(vf_schema, "allow-set-mac", 113 IOV_SCHEMA_HASDEFAULT, FALSE); 114 pci_iov_schema_add_bool(vf_schema, "allow-promisc", 115 IOV_SCHEMA_HASDEFAULT, FALSE); 116 pci_iov_schema_add_uint16(vf_schema, "num-queues", 117 IOV_SCHEMA_HASDEFAULT, ICE_DEFAULT_VF_QUEUES); 118 pci_iov_schema_add_uint16(vf_schema, "mirror-src-vsi", 119 IOV_SCHEMA_HASDEFAULT, ICE_INVALID_MIRROR_VSI); 120 121 error = pci_iov_attach(dev, pf_schema, vf_schema); 122 if (error != 0) { 123 device_printf(dev, 124 "pci_iov_attach failed (error=%s)\n", 125 ice_err_str(error)); 126 ice_clear_bit(ICE_FEATURE_SRIOV, sc->feat_en); 127 } else 128 ice_set_bit(ICE_FEATURE_SRIOV, sc->feat_en); 129 130 return (error); 131 } 132 133 /** 134 * ice_iov_detach - Teardown SR-IOV PF host support 135 * @sc: device softc structure 136 * 137 * Teardown SR-IOV PF host support at the start of the driver detach process. 138 * 139 * @returns 0 if successful or IOV support hasn't been setup, or 140 * - EBUSY if VFs still exist 141 */ 142 int 143 ice_iov_detach(struct ice_softc *sc) 144 { 145 device_t dev = sc->dev; 146 int error; 147 148 error = pci_iov_detach(dev); 149 if (error != 0) { 150 device_printf(dev, 151 "pci_iov_detach failed (error=%s)\n", 152 ice_err_str(error)); 153 } 154 155 return (error); 156 } 157 158 /** 159 * ice_iov_init - Called by the OS before the first VF is created. 160 * @sc: device softc structure 161 * @num_vfs: number of VFs to setup resources for 162 * @params: configuration parameters for the PF 163 * 164 * @returns 0 if successful or an error code on failure 165 */ 166 int 167 ice_iov_init(struct ice_softc *sc, uint16_t num_vfs, const nvlist_t *params __unused) 168 { 169 /* Allocate array of VFs, for tracking */ 170 sc->vfs = (struct ice_vf *)malloc(sizeof(struct ice_vf) * num_vfs, M_ICE, M_NOWAIT | 171 M_ZERO); 172 if (sc->vfs == NULL) 173 return (ENOMEM); 174 175 /* Initialize each VF with basic information */ 176 for (int i = 0; i < num_vfs; i++) 177 sc->vfs[i].vf_num = i; 178 179 /* Save off number of configured VFs */ 180 sc->num_vfs = num_vfs; 181 182 return (0); 183 } 184 185 /** 186 * ice_iov_get_vf - Get pointer to VF at given index 187 * @sc: device softc structure 188 * @vf_num: Index of VF to retrieve 189 * 190 * @remark will throw an assertion if vf_num is not in the 191 * range of allocated VFs 192 * 193 * @returns a pointer to the VF structure at the given index 194 */ 195 static struct ice_vf * 196 ice_iov_get_vf(struct ice_softc *sc, int vf_num) 197 { 198 MPASS(vf_num < sc->num_vfs); 199 200 return &sc->vfs[vf_num]; 201 } 202 203 /** 204 * ice_iov_add_vf - Called by the OS for each VF to create 205 * @sc: device softc structure 206 * @vfnum: index of VF to configure 207 * @params: configuration parameters for the VF 208 * 209 * @returns 0 if successful or an error code on failure 210 */ 211 int 212 ice_iov_add_vf(struct ice_softc *sc, uint16_t vfnum, const nvlist_t *params) 213 { 214 struct ice_tx_queue *txq; 215 struct ice_rx_queue *rxq; 216 device_t dev = sc->dev; 217 struct ice_vsi *vsi; 218 struct ice_vf *vf; 219 int vf_num_queues; 220 const void *mac; 221 size_t size; 222 int error; 223 int i; 224 225 vf = ice_iov_get_vf(sc, vfnum); 226 vf->vf_flags = VF_FLAG_ENABLED; 227 228 /* This VF needs at least one VSI */ 229 vsi = ice_alloc_vsi(sc, ICE_VSI_VF); 230 if (vsi == NULL) 231 return (ENOMEM); 232 vf->vsi = vsi; 233 vsi->vf_num = vfnum; 234 235 vf_num_queues = nvlist_get_number(params, "num-queues"); 236 /* Validate and clamp value if invalid */ 237 if (vf_num_queues < 1 || vf_num_queues > ICE_MAX_SCATTERED_QUEUES) 238 device_printf(dev, "Invalid num-queues (%d) for VF %d\n", 239 vf_num_queues, vf->vf_num); 240 if (vf_num_queues < 1) { 241 device_printf(dev, "Setting VF %d num-queues to 1\n", vf->vf_num); 242 vf_num_queues = 1; 243 } else if (vf_num_queues > ICE_MAX_SCATTERED_QUEUES) { 244 device_printf(dev, "Setting VF %d num-queues to %d\n", 245 vf->vf_num, ICE_MAX_SCATTERED_QUEUES); 246 vf_num_queues = ICE_MAX_SCATTERED_QUEUES; 247 } 248 vsi->qmap_type = ICE_RESMGR_ALLOC_SCATTERED; 249 250 /* Reserve VF queue allocation from PF queues */ 251 ice_alloc_vsi_qmap(vsi, vf_num_queues, vf_num_queues); 252 vsi->num_tx_queues = vsi->num_rx_queues = vf_num_queues; 253 254 /* Assign Tx queues from PF space */ 255 error = ice_resmgr_assign_scattered(&sc->tx_qmgr, vsi->tx_qmap, 256 vsi->num_tx_queues); 257 if (error) { 258 device_printf(sc->dev, "Unable to assign VF Tx queues: %s\n", 259 ice_err_str(error)); 260 goto release_vsi; 261 } 262 263 /* Assign Rx queues from PF space */ 264 error = ice_resmgr_assign_scattered(&sc->rx_qmgr, vsi->rx_qmap, 265 vsi->num_rx_queues); 266 if (error) { 267 device_printf(sc->dev, "Unable to assign VF Rx queues: %s\n", 268 ice_err_str(error)); 269 goto release_vsi; 270 } 271 272 vsi->max_frame_size = ICE_MAX_FRAME_SIZE; 273 274 /* Allocate queue structure memory */ 275 vsi->tx_queues = (struct ice_tx_queue *) 276 malloc(sizeof(struct ice_tx_queue) * vsi->num_tx_queues, M_ICE, 277 M_NOWAIT | M_ZERO); 278 if (!vsi->tx_queues) { 279 device_printf(sc->dev, "VF-%d: Unable to allocate Tx queue memory\n", 280 vfnum); 281 error = ENOMEM; 282 goto release_vsi; 283 } 284 for (i = 0, txq = vsi->tx_queues; i < vsi->num_tx_queues; i++, txq++) { 285 txq->me = i; 286 txq->vsi = vsi; 287 } 288 289 /* Allocate queue structure memory */ 290 vsi->rx_queues = (struct ice_rx_queue *) 291 malloc(sizeof(struct ice_rx_queue) * vsi->num_rx_queues, M_ICE, 292 M_NOWAIT | M_ZERO); 293 if (!vsi->rx_queues) { 294 device_printf(sc->dev, "VF-%d: Unable to allocate Rx queue memory\n", 295 vfnum); 296 error = ENOMEM; 297 goto free_txqs; 298 } 299 for (i = 0, rxq = vsi->rx_queues; i < vsi->num_rx_queues; i++, rxq++) { 300 rxq->me = i; 301 rxq->vsi = vsi; 302 } 303 304 /* Allocate space to store the IRQ vector data */ 305 vf->num_irq_vectors = vf_num_queues + 1; 306 vf->tx_irqvs = (struct ice_irq_vector *) 307 malloc(sizeof(struct ice_irq_vector) * (vf->num_irq_vectors), 308 M_ICE, M_NOWAIT); 309 if (!vf->tx_irqvs) { 310 device_printf(sc->dev, 311 "Unable to allocate TX irqv memory for VF-%d's %d vectors\n", 312 vfnum, vf->num_irq_vectors); 313 error = ENOMEM; 314 goto free_rxqs; 315 } 316 vf->rx_irqvs = (struct ice_irq_vector *) 317 malloc(sizeof(struct ice_irq_vector) * (vf->num_irq_vectors), 318 M_ICE, M_NOWAIT); 319 if (!vf->rx_irqvs) { 320 device_printf(sc->dev, 321 "Unable to allocate RX irqv memory for VF-%d's %d vectors\n", 322 vfnum, vf->num_irq_vectors); 323 error = ENOMEM; 324 goto free_txirqvs; 325 } 326 327 /* Assign VF interrupts from PF space */ 328 if (!(vf->vf_imap = 329 (u16 *)malloc(sizeof(u16) * vf->num_irq_vectors, 330 M_ICE, M_NOWAIT))) { 331 device_printf(dev, "Unable to allocate VF-%d imap memory\n", vfnum); 332 error = ENOMEM; 333 goto free_rxirqvs; 334 } 335 error = ice_resmgr_assign_contiguous(&sc->dev_imgr, vf->vf_imap, vf->num_irq_vectors); 336 if (error) { 337 device_printf(dev, "Unable to assign VF-%d interrupt mapping: %s\n", 338 vfnum, ice_err_str(error)); 339 goto free_imap; 340 } 341 342 if (nvlist_exists_binary(params, "mac-addr")) { 343 mac = nvlist_get_binary(params, "mac-addr", &size); 344 bcopy(mac, vf->mac, ETHER_ADDR_LEN); 345 346 if (nvlist_get_bool(params, "allow-set-mac")) 347 vf->vf_flags |= VF_FLAG_SET_MAC_CAP; 348 } else 349 /* 350 * If the administrator has not specified a MAC address then 351 * we must allow the VF to choose one. 352 */ 353 vf->vf_flags |= VF_FLAG_SET_MAC_CAP; 354 355 if (nvlist_get_bool(params, "mac-anti-spoof")) 356 vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF; 357 358 if (nvlist_get_bool(params, "allow-promisc")) 359 vf->vf_flags |= VF_FLAG_PROMISC_CAP; 360 361 vsi->mirror_src_vsi = nvlist_get_number(params, "mirror-src-vsi"); 362 363 vf->vf_flags |= VF_FLAG_VLAN_CAP; 364 365 /* Create and setup VSI in HW */ 366 error = ice_initialize_vsi(vsi); 367 if (error) { 368 device_printf(sc->dev, "Unable to initialize VF %d VSI: %s\n", 369 vfnum, ice_err_str(error)); 370 goto release_imap; 371 } 372 373 /* Add the broadcast address */ 374 error = ice_add_vsi_mac_filter(vsi, broadcastaddr); 375 if (error) { 376 device_printf(sc->dev, "Unable to add broadcast filter VF %d VSI: %s\n", 377 vfnum, ice_err_str(error)); 378 goto release_imap; 379 } 380 381 ice_iov_ready_vf(sc, vf); 382 383 return (0); 384 385 release_imap: 386 ice_resmgr_release_map(&sc->dev_imgr, vf->vf_imap, 387 vf->num_irq_vectors); 388 free_imap: 389 free(vf->vf_imap, M_ICE); 390 vf->vf_imap = NULL; 391 free_rxirqvs: 392 free(vf->rx_irqvs, M_ICE); 393 vf->rx_irqvs = NULL; 394 free_txirqvs: 395 free(vf->tx_irqvs, M_ICE); 396 vf->tx_irqvs = NULL; 397 free_rxqs: 398 free(vsi->rx_queues, M_ICE); 399 vsi->rx_queues = NULL; 400 free_txqs: 401 free(vsi->tx_queues, M_ICE); 402 vsi->tx_queues = NULL; 403 release_vsi: 404 ice_release_vsi(vsi); 405 vf->vsi = NULL; 406 return (error); 407 } 408 409 /** 410 * ice_iov_uninit - Called by the OS when VFs are destroyed 411 * @sc: device softc structure 412 */ 413 void 414 ice_iov_uninit(struct ice_softc *sc) 415 { 416 struct ice_vf *vf; 417 struct ice_vsi *vsi; 418 419 /* Release per-VF resources */ 420 for (int i = 0; i < sc->num_vfs; i++) { 421 vf = &sc->vfs[i]; 422 vsi = vf->vsi; 423 424 /* Free VF interrupt reservation */ 425 if (vf->vf_imap) { 426 free(vf->vf_imap, M_ICE); 427 vf->vf_imap = NULL; 428 } 429 430 /* Free queue interrupt mapping trackers */ 431 if (vf->tx_irqvs) { 432 free(vf->tx_irqvs, M_ICE); 433 vf->tx_irqvs = NULL; 434 } 435 if (vf->rx_irqvs) { 436 free(vf->rx_irqvs, M_ICE); 437 vf->rx_irqvs = NULL; 438 } 439 440 if (!vsi) 441 continue; 442 443 /* Free VSI queues */ 444 if (vsi->tx_queues) { 445 free(vsi->tx_queues, M_ICE); 446 vsi->tx_queues = NULL; 447 } 448 if (vsi->rx_queues) { 449 free(vsi->rx_queues, M_ICE); 450 vsi->rx_queues = NULL; 451 } 452 453 ice_release_vsi(vsi); 454 vf->vsi = NULL; 455 } 456 457 /* Release memory used for VF tracking */ 458 if (sc->vfs) { 459 free(sc->vfs, M_ICE); 460 sc->vfs = NULL; 461 } 462 sc->num_vfs = 0; 463 } 464 465 /** 466 * ice_iov_handle_vflr - Process VFLR event 467 * @sc: device softc structure 468 * 469 * Identifys which VFs have been reset and re-configure 470 * them. 471 */ 472 void 473 ice_iov_handle_vflr(struct ice_softc *sc) 474 { 475 struct ice_hw *hw = &sc->hw; 476 struct ice_vf *vf; 477 u32 reg, reg_idx, bit_idx; 478 479 for (int i = 0; i < sc->num_vfs; i++) { 480 vf = &sc->vfs[i]; 481 482 reg_idx = (hw->func_caps.vf_base_id + vf->vf_num) / 32; 483 bit_idx = (hw->func_caps.vf_base_id + vf->vf_num) % 32; 484 reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx)); 485 if (reg & BIT(bit_idx)) 486 ice_reset_vf(sc, vf, false); 487 } 488 } 489 490 /** 491 * ice_iov_ready_vf - Setup VF interrupts and mark it as ready 492 * @sc: device softc structure 493 * @vf: driver's VF structure for the VF to update 494 * 495 * Clears VF reset triggering bit, sets up the PF<->VF interrupt 496 * mapping and marks the VF as active in the HW so that the VF 497 * driver can use it. 498 */ 499 static void 500 ice_iov_ready_vf(struct ice_softc *sc, struct ice_vf *vf) 501 { 502 struct ice_hw *hw = &sc->hw; 503 u32 reg; 504 505 /* Clear the triggering bit */ 506 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_num)); 507 reg &= ~VPGEN_VFRTRIG_VFSWR_M; 508 wr32(hw, VPGEN_VFRTRIG(vf->vf_num), reg); 509 510 /* Setup VF interrupt allocation and mapping */ 511 ice_iov_setup_intr_mapping(sc, vf); 512 513 /* Indicate to the VF that reset is done */ 514 wr32(hw, VFGEN_RSTAT(vf->vf_num), VIRTCHNL_VFR_VFACTIVE); 515 516 ice_flush(hw); 517 } 518 519 /** 520 * ice_reset_vf - Perform a hardware reset (VFR) on a VF 521 * @sc: device softc structure 522 * @vf: driver's VF structure for VF to be reset 523 * @trigger_vflr: trigger a reset or only handle already executed reset 524 * 525 * Performs a VFR for the given VF. This function busy waits until the 526 * reset completes in the HW, notifies the VF that the reset is done 527 * by setting a bit in a HW register, then returns. 528 * 529 * @remark This also sets up the PF<->VF interrupt mapping and allocations in 530 * the hardware after the hardware reset is finished, via 531 * ice_iov_setup_intr_mapping() 532 */ 533 static void 534 ice_reset_vf(struct ice_softc *sc, struct ice_vf *vf, bool trigger_vflr) 535 { 536 u16 global_vf_num, reg_idx, bit_idx; 537 struct ice_hw *hw = &sc->hw; 538 int status; 539 u32 reg; 540 int i; 541 542 global_vf_num = vf->vf_num + hw->func_caps.vf_base_id; 543 544 if (trigger_vflr) { 545 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_num)); 546 reg |= VPGEN_VFRTRIG_VFSWR_M; 547 wr32(hw, VPGEN_VFRTRIG(vf->vf_num), reg); 548 } 549 550 /* clear the VFLR bit for the VF in a GLGEN_VFLRSTAT register */ 551 reg_idx = (global_vf_num) / 32; 552 bit_idx = (global_vf_num) % 32; 553 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 554 ice_flush(hw); 555 556 /* Wait until there are no pending PCI transactions */ 557 wr32(hw, PF_PCI_CIAA, 558 ICE_PCIE_DEV_STATUS | (global_vf_num << PF_PCI_CIAA_VF_NUM_S)); 559 560 for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) { 561 reg = rd32(hw, PF_PCI_CIAD); 562 if (!(reg & PCIEM_STA_TRANSACTION_PND)) 563 break; 564 565 DELAY(ICE_PCI_CIAD_WAIT_DELAY_US); 566 } 567 if (i == ICE_PCI_CIAD_WAIT_COUNT) 568 device_printf(sc->dev, 569 "VF-%d PCI transactions stuck\n", vf->vf_num); 570 571 /* Disable TX queues, which is required during VF reset */ 572 status = ice_dis_vsi_txq(hw->port_info, vf->vsi->idx, 0, 0, NULL, NULL, 573 NULL, ICE_VF_RESET, vf->vf_num, NULL); 574 if (status) 575 device_printf(sc->dev, 576 "%s: Failed to disable LAN Tx queues: err %s aq_err %s\n", 577 __func__, ice_status_str(status), 578 ice_aq_str(hw->adminq.sq_last_status)); 579 580 /* Then check for the VF reset to finish in HW */ 581 for (i = 0; i < ICE_VPGEN_VFRSTAT_WAIT_COUNT; i++) { 582 reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_num)); 583 if ((reg & VPGEN_VFRSTAT_VFRD_M)) 584 break; 585 586 DELAY(ICE_VPGEN_VFRSTAT_WAIT_DELAY_US); 587 } 588 if (i == ICE_VPGEN_VFRSTAT_WAIT_COUNT) 589 device_printf(sc->dev, 590 "VF-%d Reset is stuck\n", vf->vf_num); 591 592 ice_iov_ready_vf(sc, vf); 593 } 594 595 /** 596 * ice_vc_get_vf_res_msg - Handle VIRTCHNL_OP_GET_VF_RESOURCES msg from VF 597 * @sc: device private structure 598 * @vf: VF tracking structure 599 * @msg_buf: raw message buffer from the VF 600 * 601 * Receives a message from the VF listing its supported capabilities, and 602 * replies to the VF with information about what resources the PF has 603 * allocated for the VF. 604 * 605 * @remark This always replies to the VF with a success status; it does not 606 * fail. It's up to the VF driver to reject or complain about the PF's response. 607 */ 608 static void 609 ice_vc_get_vf_res_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf) 610 { 611 struct ice_hw *hw = &sc->hw; 612 struct virtchnl_vf_resource *vf_res; 613 u16 vf_res_len; 614 u32 vf_caps; 615 616 /* XXX: Only support one VSI per VF, so this size doesn't need adjusting */ 617 vf_res_len = sizeof(struct virtchnl_vf_resource); 618 vf_res = (struct virtchnl_vf_resource *)malloc(vf_res_len, M_ICE, M_WAITOK | M_ZERO); 619 620 vf_res->num_vsis = 1; 621 vf_res->num_queue_pairs = vf->vsi->num_tx_queues; 622 vf_res->max_vectors = vf_res->num_queue_pairs + 1; 623 624 vf_res->rss_key_size = ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE; 625 vf_res->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE; 626 vf_res->max_mtu = 0; 627 628 vf_res->vf_cap_flags = VF_BASE_MODE_OFFLOADS; 629 if (msg_buf != NULL) { 630 vf_caps = *((u32 *)(msg_buf)); 631 632 if (vf_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) 633 vf_res->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED; 634 } 635 636 vf_res->vsi_res[0].vsi_id = vf->vsi->idx; 637 vf_res->vsi_res[0].num_queue_pairs = vf->vsi->num_tx_queues; 638 vf_res->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV; 639 vf_res->vsi_res[0].qset_handle = 0; 640 641 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_GET_VF_RESOURCES, 642 VIRTCHNL_STATUS_SUCCESS, (u8 *)vf_res, vf_res_len, NULL); 643 644 free(vf_res, M_ICE); 645 } 646 647 /** 648 * ice_vc_version_msg - Handle VIRTCHNL_OP_VERSION msg from VF 649 * @sc: device private structure 650 * @vf: VF tracking structure 651 * @msg_buf: raw message buffer from the VF 652 * 653 * Receives a version message from the VF, and responds to the VF with 654 * the version number that the PF will use. 655 * 656 * @remark This always replies to the VF with a success status; it does not 657 * fail. 658 */ 659 static void 660 ice_vc_version_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf) 661 { 662 struct virtchnl_version_info *recv_vf_version; 663 struct ice_hw *hw = &sc->hw; 664 device_t dev = sc->dev; 665 666 recv_vf_version = (struct virtchnl_version_info *)msg_buf; 667 668 /* VFs running the 1.0 API expect to get 1.0 back */ 669 if (VF_IS_V10(recv_vf_version)) { 670 vf->version.major = 1; 671 vf->version.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; 672 } else { 673 vf->version.major = VIRTCHNL_VERSION_MAJOR; 674 vf->version.minor = VIRTCHNL_VERSION_MINOR; 675 676 if ((recv_vf_version->major != VIRTCHNL_VERSION_MAJOR) || 677 (recv_vf_version->minor != VIRTCHNL_VERSION_MINOR)) 678 device_printf(dev, 679 "%s: VF-%d requested version (%d.%d) differs from PF version (%d.%d)\n", 680 __func__, vf->vf_num, 681 recv_vf_version->major, recv_vf_version->minor, 682 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR); 683 } 684 685 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_VERSION, 686 VIRTCHNL_STATUS_SUCCESS, (u8 *)&vf->version, sizeof(vf->version), 687 NULL); 688 } 689 690 /** 691 * ice_vf_validate_mac - Validate MAC address before adding it 692 * @vf: VF tracking structure 693 * @addr: MAC address to validate 694 * 695 * Validate a MAC address before adding it to a VF during the handling 696 * of a VIRTCHNL_OP_ADD_ETH_ADDR operation. Notably, this also checks if 697 * the VF is allowed to set its own arbitrary MAC addresses. 698 * 699 * Returns 0 if MAC address is valid for the given vf 700 */ 701 static int 702 ice_vf_validate_mac(struct ice_vf *vf, const uint8_t *addr) 703 { 704 705 if (ETHER_IS_ZERO(addr) || ETHER_IS_BROADCAST(addr)) 706 return (EINVAL); 707 708 /* 709 * If the VF is not allowed to change its MAC address, don't let it 710 * set a MAC filter for an address that is not a multicast address and 711 * is not its assigned MAC. 712 */ 713 if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) && 714 !(ETHER_IS_MULTICAST(addr) || !bcmp(addr, vf->mac, ETHER_ADDR_LEN))) 715 return (EPERM); 716 717 return (0); 718 } 719 720 /** 721 * ice_vc_add_eth_addr_msg - Handle VIRTCHNL_OP_ADD_ETH_ADDR msg from VF 722 * @sc: device private structure 723 * @vf: VF tracking structure 724 * @msg_buf: raw message buffer from the VF 725 * 726 * Receives a list of MAC addresses from the VF and adds those addresses 727 * to the VSI's filter list. 728 */ 729 static void 730 ice_vc_add_eth_addr_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf) 731 { 732 enum virtchnl_status_code v_status = VIRTCHNL_STATUS_SUCCESS; 733 struct virtchnl_ether_addr_list *addr_list; 734 struct ice_hw *hw = &sc->hw; 735 int error = 0; 736 737 addr_list = (struct virtchnl_ether_addr_list *)msg_buf; 738 739 for (int i = 0; i < addr_list->num_elements; i++) { 740 u8 *addr = addr_list->list[i].addr; 741 742 /* The type flag is currently ignored; every MAC address is 743 * treated as the LEGACY type 744 */ 745 746 error = ice_vf_validate_mac(vf, addr); 747 if (error == EPERM) { 748 device_printf(sc->dev, 749 "%s: VF-%d: Not permitted to add MAC addr for VSI %d\n", 750 __func__, vf->vf_num, vf->vsi->idx); 751 v_status = VIRTCHNL_STATUS_ERR_PARAM; 752 continue; 753 } else if (error) { 754 device_printf(sc->dev, 755 "%s: VF-%d: Did not add invalid MAC addr for VSI %d\n", 756 __func__, vf->vf_num, vf->vsi->idx); 757 v_status = VIRTCHNL_STATUS_ERR_PARAM; 758 continue; 759 } 760 761 error = ice_add_vsi_mac_filter(vf->vsi, addr); 762 if (error) { 763 device_printf(sc->dev, 764 "%s: VF-%d: Error adding MAC addr for VSI %d\n", 765 __func__, vf->vf_num, vf->vsi->idx); 766 v_status = VIRTCHNL_STATUS_ERR_PARAM; 767 goto done; 768 } 769 } 770 771 done: 772 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_ADD_ETH_ADDR, 773 v_status, NULL, 0, NULL); 774 } 775 776 /** 777 * ice_vc_del_eth_addr_msg - Handle VIRTCHNL_OP_DEL_ETH_ADDR msg from VF 778 * @sc: device private structure 779 * @vf: VF tracking structure 780 * @msg_buf: raw message buffer from the VF 781 * 782 * Receives a list of MAC addresses from the VF and removes those addresses 783 * from the VSI's filter list. 784 */ 785 static void 786 ice_vc_del_eth_addr_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf) 787 { 788 enum virtchnl_status_code v_status = VIRTCHNL_STATUS_SUCCESS; 789 struct virtchnl_ether_addr_list *addr_list; 790 struct ice_hw *hw = &sc->hw; 791 int error = 0; 792 793 addr_list = (struct virtchnl_ether_addr_list *)msg_buf; 794 795 for (int i = 0; i < addr_list->num_elements; i++) { 796 error = ice_remove_vsi_mac_filter(vf->vsi, addr_list->list[i].addr); 797 if (error) { 798 device_printf(sc->dev, 799 "%s: VF-%d: Error removing MAC addr for VSI %d\n", 800 __func__, vf->vf_num, vf->vsi->idx); 801 v_status = VIRTCHNL_STATUS_ERR_PARAM; 802 goto done; 803 } 804 } 805 806 done: 807 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_DEL_ETH_ADDR, 808 v_status, NULL, 0, NULL); 809 } 810 811 /** 812 * ice_vc_add_vlan_msg - Handle VIRTCHNL_OP_ADD_VLAN msg from VF 813 * @sc: PF's softc structure 814 * @vf: VF tracking structure 815 * @msg_buf: message buffer from VF 816 * 817 * Adds the VLANs in msg_buf to the VF's VLAN filter list. 818 */ 819 static void 820 ice_vc_add_vlan_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf) 821 { 822 struct ice_hw *hw = &sc->hw; 823 struct virtchnl_vlan_filter_list *vlan_list; 824 int status = 0; 825 enum virtchnl_status_code v_status = VIRTCHNL_STATUS_SUCCESS; 826 struct ice_vsi *vsi = vf->vsi; 827 828 vlan_list = (struct virtchnl_vlan_filter_list *)msg_buf; 829 830 if (vlan_list->vsi_id != vsi->idx) { 831 device_printf(sc->dev, 832 "VF-%d: Message has invalid VSI ID (expected %d, got %d)\n", 833 vf->vf_num, vsi->idx, vlan_list->vsi_id); 834 v_status = VIRTCHNL_STATUS_ERR_PARAM; 835 goto done; 836 } 837 838 status = ice_add_vlan_hw_filters(vsi, vlan_list->vlan_id, 839 vlan_list->num_elements); 840 if (status) { 841 device_printf(sc->dev, 842 "VF-%d: Failure adding VLANs to VSI %d, err %s aq_err %s\n", 843 vf->vf_num, vsi->idx, ice_status_str(status), 844 ice_aq_str(sc->hw.adminq.sq_last_status)); 845 v_status = ice_iov_err_to_virt_err(status); 846 goto done; 847 } 848 849 done: 850 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_ADD_VLAN, 851 v_status, NULL, 0, NULL); 852 } 853 854 /** 855 * ice_vc_del_vlan_msg - Handle VIRTCHNL_OP_DEL_VLAN msg from VF 856 * @sc: PF's softc structure 857 * @vf: VF tracking structure 858 * @msg_buf: message buffer from VF 859 * 860 * Removes the VLANs in msg_buf from the VF's VLAN filter list. 861 */ 862 static void 863 ice_vc_del_vlan_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf) 864 { 865 struct ice_hw *hw = &sc->hw; 866 struct virtchnl_vlan_filter_list *vlan_list; 867 int status = 0; 868 enum virtchnl_status_code v_status = VIRTCHNL_STATUS_SUCCESS; 869 struct ice_vsi *vsi = vf->vsi; 870 871 vlan_list = (struct virtchnl_vlan_filter_list *)msg_buf; 872 873 if (vlan_list->vsi_id != vsi->idx) { 874 device_printf(sc->dev, 875 "VF-%d: Message has invalid VSI ID (expected %d, got %d)\n", 876 vf->vf_num, vsi->idx, vlan_list->vsi_id); 877 v_status = VIRTCHNL_STATUS_ERR_PARAM; 878 goto done; 879 } 880 881 status = ice_remove_vlan_hw_filters(vsi, vlan_list->vlan_id, 882 vlan_list->num_elements); 883 if (status) { 884 device_printf(sc->dev, 885 "VF-%d: Failure deleting VLANs from VSI %d, err %s aq_err %s\n", 886 vf->vf_num, vsi->idx, ice_status_str(status), 887 ice_aq_str(sc->hw.adminq.sq_last_status)); 888 v_status = ice_iov_err_to_virt_err(status); 889 goto done; 890 } 891 892 done: 893 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_DEL_VLAN, 894 v_status, NULL, 0, NULL); 895 } 896 897 /** 898 * ice_vc_validate_ring_len - Check to see if a descriptor ring length is valid 899 * @ring_len: length of ring 900 * 901 * Check whether a ring size value is valid. 902 * 903 * @returns true if given ring size is valid 904 */ 905 static bool 906 ice_vc_isvalid_ring_len(u16 ring_len) 907 { 908 return (ring_len >= ICE_MIN_DESC_COUNT && 909 ring_len <= ICE_MAX_DESC_COUNT && 910 !(ring_len % ICE_DESC_COUNT_INCR)); 911 } 912 913 /** 914 * ice_vc_cfg_vsi_qs_msg - Handle VIRTCHNL_OP_CONFIG_VSI_QUEUES msg from VF 915 * @sc: PF's softc structure 916 * @vf: VF tracking structure 917 * @msg_buf: message buffer from VF 918 */ 919 static void 920 ice_vc_cfg_vsi_qs_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf) 921 { 922 device_t dev = sc->dev; 923 struct ice_hw *hw = &sc->hw; 924 struct virtchnl_vsi_queue_config_info *vqci; 925 struct virtchnl_queue_pair_info *vqpi; 926 enum virtchnl_status_code status = VIRTCHNL_STATUS_SUCCESS; 927 struct ice_vsi *vsi = vf->vsi; 928 struct ice_tx_queue *txq; 929 struct ice_rx_queue *rxq; 930 int i, error = 0; 931 932 vqci = (struct virtchnl_vsi_queue_config_info *)msg_buf; 933 934 if (vqci->num_queue_pairs > vf->vsi->num_tx_queues && 935 vqci->num_queue_pairs > vf->vsi->num_rx_queues) { 936 status = VIRTCHNL_STATUS_ERR_PARAM; 937 goto done; 938 } 939 940 ice_vsi_disable_tx(vf->vsi); 941 ice_control_all_rx_queues(vf->vsi, false); 942 943 /* 944 * Clear TX and RX queues config in case VF 945 * requests different number of queues. 946 */ 947 for (i = 0; i < vsi->num_tx_queues; i++) { 948 txq = &vsi->tx_queues[i]; 949 950 txq->desc_count = 0; 951 txq->tx_paddr = 0; 952 txq->tc = 0; 953 } 954 955 for (i = 0; i < vsi->num_rx_queues; i++) { 956 rxq = &vsi->rx_queues[i]; 957 958 rxq->desc_count = 0; 959 rxq->rx_paddr = 0; 960 } 961 962 vqpi = vqci->qpair; 963 for (i = 0; i < vqci->num_queue_pairs; i++, vqpi++) { 964 /* Initial parameter validation */ 965 if (vqpi->txq.vsi_id != vf->vsi->idx || 966 vqpi->rxq.vsi_id != vf->vsi->idx || 967 vqpi->txq.queue_id != vqpi->rxq.queue_id || 968 vqpi->txq.headwb_enabled || 969 vqpi->rxq.splithdr_enabled || 970 vqpi->rxq.crc_disable || 971 !(ice_vc_isvalid_ring_len(vqpi->txq.ring_len)) || 972 !(ice_vc_isvalid_ring_len(vqpi->rxq.ring_len))) { 973 status = VIRTCHNL_STATUS_ERR_PARAM; 974 goto done; 975 } 976 977 /* Copy parameters into VF's queue/VSI structs */ 978 txq = &vsi->tx_queues[vqpi->txq.queue_id]; 979 980 txq->desc_count = vqpi->txq.ring_len; 981 txq->tx_paddr = vqpi->txq.dma_ring_addr; 982 txq->q_handle = vqpi->txq.queue_id; 983 txq->tc = 0; 984 985 rxq = &vsi->rx_queues[vqpi->rxq.queue_id]; 986 987 rxq->desc_count = vqpi->rxq.ring_len; 988 rxq->rx_paddr = vqpi->rxq.dma_ring_addr; 989 vsi->mbuf_sz = vqpi->rxq.databuffer_size; 990 } 991 992 /* Configure TX queues in HW */ 993 error = ice_cfg_vsi_for_tx(vsi); 994 if (error) { 995 device_printf(dev, 996 "VF-%d: Unable to configure VSI for Tx: %s\n", 997 vf->vf_num, ice_err_str(error)); 998 status = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; 999 goto done; 1000 } 1001 1002 /* Configure RX queues in HW */ 1003 error = ice_cfg_vsi_for_rx(vsi); 1004 if (error) { 1005 device_printf(dev, 1006 "VF-%d: Unable to configure VSI for Rx: %s\n", 1007 vf->vf_num, ice_err_str(error)); 1008 status = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; 1009 ice_vsi_disable_tx(vsi); 1010 goto done; 1011 } 1012 1013 done: 1014 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_CONFIG_VSI_QUEUES, 1015 status, NULL, 0, NULL); 1016 } 1017 1018 /** 1019 * ice_vc_cfg_rss_key_msg - Handle VIRTCHNL_OP_CONFIG_RSS_KEY msg from VF 1020 * @sc: PF's softc structure 1021 * @vf: VF tracking structure 1022 * @msg_buf: message buffer from VF 1023 * 1024 * Sets the RSS key for the given VF, using the contents of msg_buf. 1025 */ 1026 static void 1027 ice_vc_cfg_rss_key_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf) 1028 { 1029 struct ice_aqc_get_set_rss_keys keydata = 1030 { .standard_rss_key = {0}, .extended_hash_key = {0} }; 1031 struct ice_hw *hw = &sc->hw; 1032 struct virtchnl_rss_key *vrk; 1033 int status = 0; 1034 enum virtchnl_status_code v_status = VIRTCHNL_STATUS_SUCCESS; 1035 struct ice_vsi *vsi = vf->vsi; 1036 1037 vrk = (struct virtchnl_rss_key *)msg_buf; 1038 1039 if (vrk->vsi_id != vsi->idx) { 1040 device_printf(sc->dev, 1041 "VF-%d: Message has invalid VSI ID (expected %d, got %d)\n", 1042 vf->vf_num, vsi->idx, vrk->vsi_id); 1043 v_status = VIRTCHNL_STATUS_ERR_PARAM; 1044 goto done; 1045 } 1046 1047 if ((vrk->key_len > 1048 (ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE + 1049 ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE)) || 1050 vrk->key_len == 0) { 1051 v_status = VIRTCHNL_STATUS_ERR_PARAM; 1052 goto done; 1053 } 1054 1055 memcpy(&keydata, vrk->key, vrk->key_len); 1056 1057 status = ice_aq_set_rss_key(hw, vsi->idx, &keydata); 1058 if (status) { 1059 device_printf(sc->dev, 1060 "ice_aq_set_rss_key status %s, error %s\n", 1061 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 1062 v_status = ice_iov_err_to_virt_err(status); 1063 goto done; 1064 } 1065 1066 done: 1067 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_CONFIG_RSS_KEY, 1068 v_status, NULL, 0, NULL); 1069 } 1070 1071 /** 1072 * ice_vc_cfg_rss_lut_msg - Handle VIRTCHNL_OP_CONFIG_RSS_LUT msg from VF 1073 * @sc: PF's softc structure 1074 * @vf: VF tracking structure 1075 * @msg_buf: message buffer from VF 1076 * 1077 * Adds the LUT from the VF in msg_buf to the PF via an admin queue call. 1078 */ 1079 static void 1080 ice_vc_cfg_rss_lut_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf) 1081 { 1082 struct ice_hw *hw = &sc->hw; 1083 struct virtchnl_rss_lut *vrl; 1084 int status = 0; 1085 enum virtchnl_status_code v_status = VIRTCHNL_STATUS_SUCCESS; 1086 struct ice_aq_get_set_rss_lut_params lut_params = {}; 1087 struct ice_vsi *vsi = vf->vsi; 1088 1089 vrl = (struct virtchnl_rss_lut *)msg_buf; 1090 1091 if (vrl->vsi_id != vsi->idx) { 1092 device_printf(sc->dev, 1093 "VF-%d: Message has invalid VSI ID (expected %d, got %d)\n", 1094 vf->vf_num, vsi->idx, vrl->vsi_id); 1095 v_status = VIRTCHNL_STATUS_ERR_PARAM; 1096 goto done; 1097 } 1098 1099 if (vrl->lut_entries > ICE_VSIQF_HLUT_ARRAY_SIZE) { 1100 v_status = VIRTCHNL_STATUS_ERR_PARAM; 1101 goto done; 1102 } 1103 1104 lut_params.vsi_handle = vsi->idx; 1105 lut_params.lut_size = vsi->rss_table_size; 1106 lut_params.lut_type = vsi->rss_lut_type; 1107 lut_params.lut = vrl->lut; 1108 lut_params.global_lut_id = 0; 1109 1110 status = ice_aq_set_rss_lut(hw, &lut_params); 1111 if (status) { 1112 device_printf(sc->dev, 1113 "VF-%d: Cannot set RSS lut, err %s aq_err %s\n", 1114 vf->vf_num, ice_status_str(status), 1115 ice_aq_str(hw->adminq.sq_last_status)); 1116 v_status = ice_iov_err_to_virt_err(status); 1117 } 1118 1119 done: 1120 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_CONFIG_RSS_LUT, 1121 v_status, NULL, 0, NULL); 1122 } 1123 1124 /** 1125 * ice_vc_set_rss_hena_msg - Handle VIRTCHNL_OP_SET_RSS_HENA msg from VF 1126 * @sc: PF's softc structure 1127 * @vf: VF tracking structure 1128 * @msg_buf: message buffer from VF 1129 * 1130 * Adds the VF's hena (hash enable) bits as flow types to the PF's RSS flow 1131 * type list. 1132 */ 1133 static void 1134 ice_vc_set_rss_hena_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf) 1135 { 1136 struct ice_hw *hw = &sc->hw; 1137 struct virtchnl_rss_hena *vrh; 1138 int status = 0; 1139 enum virtchnl_status_code v_status = VIRTCHNL_STATUS_SUCCESS; 1140 struct ice_vsi *vsi = vf->vsi; 1141 1142 MPASS(vsi != NULL); 1143 1144 vrh = (struct virtchnl_rss_hena *)msg_buf; 1145 1146 /* 1147 * Remove existing configuration to make sure only requested 1148 * config is applied and allow VFs to disable RSS completly. 1149 */ 1150 status = ice_rem_vsi_rss_cfg(hw, vsi->idx); 1151 if (vrh->hena) { 1152 /* 1153 * Problem with removing config is not fatal, when new one 1154 * is requested. Warn about it but try to apply new config 1155 * anyway. 1156 */ 1157 if (status) 1158 device_printf(sc->dev, 1159 "ice_rem_vsi_rss_cfg status %s, error %s\n", 1160 ice_status_str(status), 1161 ice_aq_str(hw->adminq.sq_last_status)); 1162 status = ice_add_avf_rss_cfg(hw, vsi->idx, vrh->hena); 1163 if (status) 1164 device_printf(sc->dev, 1165 "ice_add_avf_rss_cfg status %s, error %s\n", 1166 ice_status_str(status), 1167 ice_aq_str(hw->adminq.sq_last_status)); 1168 } 1169 v_status = ice_iov_err_to_virt_err(status); 1170 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_SET_RSS_HENA, 1171 v_status, NULL, 0, NULL); 1172 } 1173 1174 /** 1175 * ice_vc_enable_queues_msg - Handle VIRTCHNL_OP_ENABLE_QUEUES msg from VF 1176 * @sc: PF's softc structure 1177 * @vf: VF tracking structure 1178 * @msg_buf: message buffer from VF 1179 * 1180 * Enables VF queues selected in msg_buf for Tx/Rx traffic. 1181 * 1182 * @remark Only actually operates on Rx queues; Tx queues are enabled in 1183 * CONFIG_VSI_QUEUES message handler. 1184 */ 1185 static void 1186 ice_vc_enable_queues_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf) 1187 { 1188 struct ice_hw *hw = &sc->hw; 1189 struct virtchnl_queue_select *vqs; 1190 enum virtchnl_status_code v_status = VIRTCHNL_STATUS_SUCCESS; 1191 struct ice_vsi *vsi = vf->vsi; 1192 int bit, error = 0; 1193 1194 vqs = (struct virtchnl_queue_select *)msg_buf; 1195 1196 if (vqs->vsi_id != vsi->idx) { 1197 device_printf(sc->dev, 1198 "%s: VF-%d: Message has invalid VSI ID (expected %d, got %d)\n", 1199 __func__, vf->vf_num, vsi->idx, vqs->vsi_id); 1200 v_status = VIRTCHNL_STATUS_ERR_PARAM; 1201 goto done; 1202 } 1203 1204 if (!vqs->rx_queues && !vqs->tx_queues) { 1205 device_printf(sc->dev, 1206 "%s: VF-%d: message queue masks are empty\n", 1207 __func__, vf->vf_num); 1208 v_status = VIRTCHNL_STATUS_ERR_PARAM; 1209 goto done; 1210 } 1211 1212 /* Validate rx_queue mask */ 1213 bit = fls(vqs->rx_queues); 1214 if (bit > vsi->num_rx_queues) { 1215 device_printf(sc->dev, 1216 "%s: VF-%d: message's rx_queues map (0x%08x) has invalid bit set (%d)\n", 1217 __func__, vf->vf_num, vqs->rx_queues, bit); 1218 v_status = VIRTCHNL_STATUS_ERR_PARAM; 1219 goto done; 1220 } 1221 1222 /* Tx ring enable is handled in an earlier message. */ 1223 for_each_set_bit(bit, &vqs->rx_queues, 32) { 1224 error = ice_control_rx_queue(vsi, bit, true); 1225 if (error) { 1226 device_printf(sc->dev, 1227 "Unable to enable Rx ring %d for receive: %s\n", 1228 bit, ice_err_str(error)); 1229 v_status = VIRTCHNL_STATUS_ERR_PARAM; 1230 goto done; 1231 } 1232 } 1233 1234 done: 1235 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_ENABLE_QUEUES, 1236 v_status, NULL, 0, NULL); 1237 } 1238 1239 /** 1240 * ice_vc_disable_queues_msg - Handle VIRTCHNL_OP_DISABLE_QUEUES msg 1241 * @sc: PF's softc structure 1242 * @vf: VF tracking structure 1243 * @msg_buf: message buffer from VF 1244 * 1245 * Disables all VF queues for the VF's VSI. 1246 * 1247 * @remark Unlike the ENABLE_QUEUES handler, this operates on both 1248 * Tx and Rx queues 1249 */ 1250 static void 1251 ice_vc_disable_queues_msg(struct ice_softc *sc, struct ice_vf *vf, 1252 u8 *msg_buf __unused) 1253 { 1254 struct ice_hw *hw = &sc->hw; 1255 enum virtchnl_status_code v_status = VIRTCHNL_STATUS_SUCCESS; 1256 struct ice_vsi *vsi = vf->vsi; 1257 int error = 0; 1258 1259 error = ice_control_all_rx_queues(vsi, false); 1260 if (error) { 1261 device_printf(sc->dev, 1262 "Unable to disable Rx rings for transmit: %s\n", 1263 ice_err_str(error)); 1264 v_status = VIRTCHNL_STATUS_ERR_PARAM; 1265 goto done; 1266 } 1267 1268 error = ice_vsi_disable_tx(vsi); 1269 if (error) { 1270 /* Already prints an error message */ 1271 v_status = VIRTCHNL_STATUS_ERR_PARAM; 1272 } 1273 1274 done: 1275 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_DISABLE_QUEUES, 1276 v_status, NULL, 0, NULL); 1277 } 1278 1279 /** 1280 * ice_vc_cfg_irq_map_msg - Handle VIRTCHNL_OP_CFG_IRQ_MAP msg from VF 1281 * @sc: PF's softc structure 1282 * @vf: VF tracking structure 1283 * @msg_buf: message buffer from VF 1284 * 1285 * Configures the interrupt vectors described in the message in msg_buf. The 1286 * VF needs to send this message during init, so that queues can be allowed 1287 * to generate interrupts. 1288 */ 1289 static void 1290 ice_vc_cfg_irq_map_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf) 1291 { 1292 #define ICE_VIRTCHNL_QUEUE_MAP_SIZE 16 1293 struct ice_hw *hw = &sc->hw; 1294 struct virtchnl_irq_map_info *vimi; 1295 struct virtchnl_vector_map *vvm; 1296 enum virtchnl_status_code v_status = VIRTCHNL_STATUS_SUCCESS; 1297 struct ice_vsi *vsi = vf->vsi; 1298 u16 vector; 1299 1300 vimi = (struct virtchnl_irq_map_info *)msg_buf; 1301 1302 if (vimi->num_vectors > vf->num_irq_vectors) { 1303 device_printf(sc->dev, 1304 "%s: VF-%d: message has more vectors (%d) than configured for VF (%d)\n", 1305 __func__, vf->vf_num, vimi->num_vectors, vf->num_irq_vectors); 1306 v_status = VIRTCHNL_STATUS_ERR_PARAM; 1307 goto done; 1308 } 1309 1310 vvm = vimi->vecmap; 1311 /* Save off information from message */ 1312 for (int i = 0; i < vimi->num_vectors; i++, vvm++) { 1313 struct ice_tx_queue *txq; 1314 struct ice_rx_queue *rxq; 1315 int bit; 1316 1317 if (vvm->vsi_id != vf->vsi->idx) { 1318 device_printf(sc->dev, 1319 "%s: VF-%d: message's VSI ID (%d) does not match VF's (%d) for vector %d\n", 1320 __func__, vf->vf_num, vvm->vsi_id, vf->vsi->idx, i); 1321 v_status = VIRTCHNL_STATUS_ERR_PARAM; 1322 goto done; 1323 } 1324 1325 /* vvm->vector_id is relative to VF space */ 1326 vector = vvm->vector_id; 1327 1328 if (vector >= vf->num_irq_vectors) { 1329 device_printf(sc->dev, 1330 "%s: VF-%d: message's vector ID (%d) is greater than VF's max ID (%d)\n", 1331 __func__, vf->vf_num, vector, vf->num_irq_vectors - 1); 1332 v_status = VIRTCHNL_STATUS_ERR_PARAM; 1333 goto done; 1334 } 1335 1336 /* The Misc/Admin Queue vector doesn't need mapping */ 1337 if (vector == 0) 1338 continue; 1339 1340 /* coverity[address_of] */ 1341 for_each_set_bit(bit, &vvm->txq_map, ICE_VIRTCHNL_QUEUE_MAP_SIZE) { 1342 if (bit >= vsi->num_tx_queues) { 1343 device_printf(sc->dev, 1344 "%s: VF-%d: txq map has invalid bit set\n", 1345 __func__, vf->vf_num); 1346 v_status = VIRTCHNL_STATUS_ERR_PARAM; 1347 goto done; 1348 } 1349 1350 vf->tx_irqvs[vector].me = vector; 1351 1352 txq = &vsi->tx_queues[bit]; 1353 txq->irqv = &vf->tx_irqvs[vector]; 1354 txq->itr_idx = vvm->txitr_idx; 1355 } 1356 /* coverity[address_of] */ 1357 for_each_set_bit(bit, &vvm->rxq_map, ICE_VIRTCHNL_QUEUE_MAP_SIZE) { 1358 if (bit >= vsi->num_rx_queues) { 1359 device_printf(sc->dev, 1360 "%s: VF-%d: rxq map has invalid bit set\n", 1361 __func__, vf->vf_num); 1362 v_status = VIRTCHNL_STATUS_ERR_PARAM; 1363 goto done; 1364 } 1365 vf->rx_irqvs[vector].me = vector; 1366 1367 rxq = &vsi->rx_queues[bit]; 1368 rxq->irqv = &vf->rx_irqvs[vector]; 1369 rxq->itr_idx = vvm->rxitr_idx; 1370 } 1371 } 1372 1373 /* Write to T/RQCTL registers to actually map vectors to queues */ 1374 for (int i = 0; i < vf->vsi->num_rx_queues; i++) 1375 if (vsi->rx_queues[i].irqv != NULL) 1376 ice_configure_rxq_interrupt(hw, vsi->rx_qmap[i], 1377 vsi->rx_queues[i].irqv->me, vsi->rx_queues[i].itr_idx); 1378 1379 for (int i = 0; i < vf->vsi->num_tx_queues; i++) 1380 if (vsi->tx_queues[i].irqv != NULL) 1381 ice_configure_txq_interrupt(hw, vsi->tx_qmap[i], 1382 vsi->tx_queues[i].irqv->me, vsi->tx_queues[i].itr_idx); 1383 1384 ice_flush(hw); 1385 1386 done: 1387 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_CONFIG_IRQ_MAP, 1388 v_status, NULL, 0, NULL); 1389 } 1390 1391 /** 1392 * ice_eth_stats_to_virtchnl_eth_stats - Convert stats for virtchnl 1393 * @istats: VSI stats from HW to convert 1394 * @vstats: stats struct to copy to 1395 * 1396 * This function copies all known stats in struct virtchnl_eth_stats from the 1397 * input struct ice_eth_stats to an output struct virtchnl_eth_stats. 1398 * 1399 * @remark These two structure types currently have the same definition up to 1400 * the size of struct virtchnl_eth_stats (on FreeBSD), but that could change 1401 * in the future. 1402 */ 1403 static void 1404 ice_eth_stats_to_virtchnl_eth_stats(struct ice_eth_stats *istats, 1405 struct virtchnl_eth_stats *vstats) 1406 { 1407 vstats->rx_bytes = istats->rx_bytes; 1408 vstats->rx_unicast = istats->rx_unicast; 1409 vstats->rx_multicast = istats->rx_multicast; 1410 vstats->rx_broadcast = istats->rx_broadcast; 1411 vstats->rx_discards = istats->rx_discards; 1412 vstats->rx_unknown_protocol = istats->rx_unknown_protocol; 1413 vstats->tx_bytes = istats->tx_bytes; 1414 vstats->tx_unicast = istats->tx_unicast; 1415 vstats->tx_multicast = istats->tx_multicast; 1416 vstats->tx_broadcast = istats->tx_broadcast; 1417 vstats->tx_discards = istats->tx_discards; 1418 vstats->tx_errors = istats->tx_errors; 1419 } 1420 1421 /** 1422 * ice_vc_get_stats_msg - Handle VIRTCHNL_OP_GET_STATS msg 1423 * @sc: device private structure 1424 * @vf: VF tracking structure 1425 * @msg_buf: raw message buffer from the VF 1426 * 1427 * Updates the VF's VSI stats and sends those stats back to the VF. 1428 */ 1429 static void 1430 ice_vc_get_stats_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf) 1431 { 1432 struct virtchnl_queue_select *vqs; 1433 struct virtchnl_eth_stats stats; 1434 struct ice_vsi *vsi = vf->vsi; 1435 struct ice_hw *hw = &sc->hw; 1436 1437 vqs = (struct virtchnl_queue_select *)msg_buf; 1438 1439 if (vqs->vsi_id != vsi->idx) { 1440 device_printf(sc->dev, 1441 "%s: VF-%d: message has invalid VSI ID %d (VF has VSI ID %d)\n", 1442 __func__, vf->vf_num, vqs->vsi_id, vsi->idx); 1443 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_GET_STATS, 1444 VIRTCHNL_STATUS_ERR_PARAM, NULL, 0, NULL); 1445 } 1446 1447 ice_update_vsi_hw_stats(vf->vsi); 1448 ice_eth_stats_to_virtchnl_eth_stats(&vsi->hw_stats.cur, &stats); 1449 1450 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_GET_STATS, 1451 VIRTCHNL_STATUS_SUCCESS, (u8 *)&stats, 1452 sizeof(struct virtchnl_eth_stats), NULL); 1453 } 1454 1455 /** 1456 * ice_vc_cfg_promisc_mode_msg - Handle VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE 1457 * @sc: PF's softc structure 1458 * @vf: VF tracking structure 1459 * @msg_buf: message buffer from VF 1460 * 1461 * Configures the promiscuous modes for the given VSI in msg_buf. 1462 */ 1463 static void 1464 ice_vc_cfg_promisc_mode_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf) 1465 { 1466 struct ice_hw *hw = &sc->hw; 1467 struct virtchnl_promisc_info *vpi; 1468 enum virtchnl_status_code v_status = VIRTCHNL_STATUS_SUCCESS; 1469 int status = 0; 1470 struct ice_vsi *vsi = vf->vsi; 1471 ice_declare_bitmap(old_promisc_mask, ICE_PROMISC_MAX); 1472 ice_declare_bitmap(req_promisc_mask, ICE_PROMISC_MAX); 1473 ice_declare_bitmap(clear_promisc_mask, ICE_PROMISC_MAX); 1474 ice_declare_bitmap(set_promisc_mask, ICE_PROMISC_MAX); 1475 ice_declare_bitmap(old_req_xor_mask, ICE_PROMISC_MAX); 1476 u16 vid; 1477 1478 vpi = (struct virtchnl_promisc_info *)msg_buf; 1479 1480 /* Check to see if VF has permission to configure promiscuous mode */ 1481 if (!(vf->vf_flags & VF_FLAG_PROMISC_CAP)) { 1482 device_printf(sc->dev, 1483 "VF-%d: attempted to configure promiscuous mode\n", 1484 vf->vf_num); 1485 /* Don't reply to VF with an error */ 1486 goto done; 1487 } 1488 1489 if (vpi->vsi_id != vsi->idx) { 1490 device_printf(sc->dev, 1491 "VF-%d: Message has invalid VSI ID (expected %d, got %d)\n", 1492 vf->vf_num, vsi->idx, vpi->vsi_id); 1493 v_status = VIRTCHNL_STATUS_ERR_PARAM; 1494 goto done; 1495 } 1496 1497 if (vpi->flags & ~ICE_VIRTCHNL_VALID_PROMISC_FLAGS) { 1498 device_printf(sc->dev, 1499 "VF-%d: Message has invalid promiscuous flags set (valid 0x%02x, got 0x%02x)\n", 1500 vf->vf_num, ICE_VIRTCHNL_VALID_PROMISC_FLAGS, 1501 vpi->flags); 1502 v_status = VIRTCHNL_STATUS_ERR_PARAM; 1503 goto done; 1504 1505 } 1506 1507 ice_zero_bitmap(req_promisc_mask, ICE_PROMISC_MAX); 1508 /* Convert virtchnl flags to ice AQ promiscuous mode flags */ 1509 if (vpi->flags & FLAG_VF_UNICAST_PROMISC) { 1510 ice_set_bit(ICE_PROMISC_UCAST_TX, req_promisc_mask); 1511 ice_set_bit(ICE_PROMISC_UCAST_RX, req_promisc_mask); 1512 } 1513 if (vpi->flags & FLAG_VF_MULTICAST_PROMISC) { 1514 ice_set_bit(ICE_PROMISC_MCAST_TX, req_promisc_mask); 1515 ice_set_bit(ICE_PROMISC_MCAST_RX, req_promisc_mask); 1516 } 1517 1518 status = ice_get_vsi_promisc(hw, vsi->idx, old_promisc_mask, &vid); 1519 if (status) { 1520 device_printf(sc->dev, 1521 "VF-%d: Failed to get promiscuous mode mask for VSI %d, err %s aq_err %s\n", 1522 vf->vf_num, vsi->idx, 1523 ice_status_str(status), 1524 ice_aq_str(hw->adminq.sq_last_status)); 1525 v_status = ice_iov_err_to_virt_err(status); 1526 goto done; 1527 } 1528 1529 /* Figure out what got added and what got removed */ 1530 ice_zero_bitmap(old_req_xor_mask, ICE_PROMISC_MAX); 1531 ice_xor_bitmap(old_req_xor_mask, old_promisc_mask, req_promisc_mask, ICE_PROMISC_MAX); 1532 ice_and_bitmap(clear_promisc_mask, old_req_xor_mask, old_promisc_mask, ICE_PROMISC_MAX); 1533 ice_and_bitmap(set_promisc_mask, old_req_xor_mask, req_promisc_mask, ICE_PROMISC_MAX); 1534 1535 if (ice_is_any_bit_set(clear_promisc_mask, ICE_PROMISC_MAX)) { 1536 status = ice_clear_vsi_promisc(hw, vsi->idx, 1537 clear_promisc_mask, 0); 1538 if (status) { 1539 device_printf(sc->dev, 1540 "VF-%d: Failed to clear promiscuous mode for VSI %d, err %s aq_err %s\n", 1541 vf->vf_num, vsi->idx, 1542 ice_status_str(status), 1543 ice_aq_str(hw->adminq.sq_last_status)); 1544 v_status = ice_iov_err_to_virt_err(status); 1545 goto done; 1546 } 1547 } 1548 1549 if (ice_is_any_bit_set(set_promisc_mask, ICE_PROMISC_MAX)) { 1550 status = ice_set_vsi_promisc(hw, vsi->idx, set_promisc_mask, 0); 1551 if (status) { 1552 device_printf(sc->dev, 1553 "VF-%d: Failed to set promiscuous mode for VSI %d, err %s aq_err %s\n", 1554 vf->vf_num, vsi->idx, 1555 ice_status_str(status), 1556 ice_aq_str(hw->adminq.sq_last_status)); 1557 v_status = ice_iov_err_to_virt_err(status); 1558 goto done; 1559 } 1560 } 1561 1562 done: 1563 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, 1564 v_status, NULL, 0, NULL); 1565 } 1566 1567 /** 1568 * ice_vc_notify_all_vfs_link_state - Notify all VFs of PF link state 1569 * @sc: device private structure 1570 * 1571 * Sends a message to all VFs about the status of the PF's link 1572 * state. For more details, @see ice_vc_notify_vf_link_state. 1573 */ 1574 void 1575 ice_vc_notify_all_vfs_link_state(struct ice_softc *sc) 1576 { 1577 for (int i = 0; i < sc->num_vfs; i++) 1578 ice_vc_notify_vf_link_state(sc, &sc->vfs[i]); 1579 } 1580 1581 /** 1582 * ice_vc_notify_vf_link_state - Notify VF of PF link state 1583 * @sc: device private structure 1584 * @vf: VF tracking structure 1585 * 1586 * Sends an event message to the specified VF with information about 1587 * the current link state from the PF's port. This includes whether 1588 * link is up or down, and the link speed in 100Mbps units. 1589 */ 1590 static void 1591 ice_vc_notify_vf_link_state(struct ice_softc *sc, struct ice_vf *vf) 1592 { 1593 struct virtchnl_pf_event event = {}; 1594 struct ice_hw *hw = &sc->hw; 1595 1596 event.event = VIRTCHNL_EVENT_LINK_CHANGE; 1597 event.severity = PF_EVENT_SEVERITY_INFO; 1598 event.event_data.link_event_adv.link_status = sc->link_up; 1599 event.event_data.link_event_adv.link_speed = 1600 (u32)ice_conv_link_speed_to_virtchnl(true, 1601 hw->port_info->phy.link_info.link_speed); 1602 1603 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_EVENT, 1604 VIRTCHNL_STATUS_SUCCESS, (u8 *)&event, sizeof(event), NULL); 1605 } 1606 1607 /** 1608 * ice_vc_handle_vf_msg - Handle a message from a VF 1609 * @sc: device private structure 1610 * @event: event received from the HW MBX queue 1611 * 1612 * Called whenever an event is received from a VF on the HW mailbox queue. 1613 * Responsible for handling these messages as well as responding to the 1614 * VF afterwards, depending on the received message type. 1615 */ 1616 void 1617 ice_vc_handle_vf_msg(struct ice_softc *sc, struct ice_rq_event_info *event) 1618 { 1619 struct ice_hw *hw = &sc->hw; 1620 device_t dev = sc->dev; 1621 struct ice_vf *vf; 1622 int err = 0; 1623 1624 u32 v_opcode = event->desc.cookie_high; 1625 u16 v_id = event->desc.retval; 1626 u8 *msg = event->msg_buf; 1627 u16 msglen = event->msg_len; 1628 1629 if (v_id >= sc->num_vfs) { 1630 device_printf(dev, "%s: Received msg from invalid VF-%d: opcode %d, len %d\n", 1631 __func__, v_id, v_opcode, msglen); 1632 return; 1633 } 1634 1635 vf = &sc->vfs[v_id]; 1636 1637 /* Perform basic checks on the msg */ 1638 err = virtchnl_vc_validate_vf_msg(&vf->version, v_opcode, msg, msglen); 1639 if (err) { 1640 device_printf(dev, "%s: Received invalid msg from VF-%d: opcode %d, len %d, error %d\n", 1641 __func__, vf->vf_num, v_opcode, msglen, err); 1642 ice_aq_send_msg_to_vf(hw, v_id, v_opcode, VIRTCHNL_STATUS_ERR_PARAM, NULL, 0, NULL); 1643 return; 1644 } 1645 1646 switch (v_opcode) { 1647 case VIRTCHNL_OP_VERSION: 1648 ice_vc_version_msg(sc, vf, msg); 1649 break; 1650 case VIRTCHNL_OP_RESET_VF: 1651 ice_reset_vf(sc, vf, true); 1652 break; 1653 case VIRTCHNL_OP_GET_VF_RESOURCES: 1654 ice_vc_get_vf_res_msg(sc, vf, msg); 1655 break; 1656 case VIRTCHNL_OP_ADD_ETH_ADDR: 1657 ice_vc_add_eth_addr_msg(sc, vf, msg); 1658 break; 1659 case VIRTCHNL_OP_DEL_ETH_ADDR: 1660 ice_vc_del_eth_addr_msg(sc, vf, msg); 1661 break; 1662 case VIRTCHNL_OP_ADD_VLAN: 1663 ice_vc_add_vlan_msg(sc, vf, msg); 1664 break; 1665 case VIRTCHNL_OP_DEL_VLAN: 1666 ice_vc_del_vlan_msg(sc, vf, msg); 1667 break; 1668 case VIRTCHNL_OP_CONFIG_VSI_QUEUES: 1669 ice_vc_cfg_vsi_qs_msg(sc, vf, msg); 1670 break; 1671 case VIRTCHNL_OP_CONFIG_RSS_KEY: 1672 ice_vc_cfg_rss_key_msg(sc, vf, msg); 1673 break; 1674 case VIRTCHNL_OP_CONFIG_RSS_LUT: 1675 ice_vc_cfg_rss_lut_msg(sc, vf, msg); 1676 break; 1677 case VIRTCHNL_OP_SET_RSS_HENA: 1678 ice_vc_set_rss_hena_msg(sc, vf, msg); 1679 break; 1680 case VIRTCHNL_OP_ENABLE_QUEUES: 1681 ice_vc_enable_queues_msg(sc, vf, msg); 1682 ice_vc_notify_vf_link_state(sc, vf); 1683 break; 1684 case VIRTCHNL_OP_DISABLE_QUEUES: 1685 ice_vc_disable_queues_msg(sc, vf, msg); 1686 break; 1687 case VIRTCHNL_OP_CONFIG_IRQ_MAP: 1688 ice_vc_cfg_irq_map_msg(sc, vf, msg); 1689 break; 1690 case VIRTCHNL_OP_GET_STATS: 1691 ice_vc_get_stats_msg(sc, vf, msg); 1692 break; 1693 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: 1694 ice_vc_cfg_promisc_mode_msg(sc, vf, msg); 1695 break; 1696 default: 1697 device_printf(dev, "%s: Received unknown msg from VF-%d: opcode %d, len %d\n", 1698 __func__, vf->vf_num, v_opcode, msglen); 1699 ice_aq_send_msg_to_vf(hw, v_id, v_opcode, 1700 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL, 0, NULL); 1701 break; 1702 } 1703 } 1704 1705 /** 1706 * ice_iov_setup_intr_mapping - Setup interrupt config for a VF 1707 * @sc: device softc structure 1708 * @vf: driver's VF structure for VF to be configured 1709 * 1710 * Before a VF can be used, and after a VF reset, the PF must configure 1711 * the VF's interrupt allocation registers. This includes allocating 1712 * interrupts from the PF's interrupt pool to the VF using the 1713 * VPINT_ALLOC(_PCI) registers, and setting up a mapping from PF vectors 1714 * to VF vectors in GLINT_VECT2FUNC. 1715 * 1716 * As well, this sets up queue allocation registers and maps the mailbox 1717 * interrupt for the VF. 1718 */ 1719 static void 1720 ice_iov_setup_intr_mapping(struct ice_softc *sc, struct ice_vf *vf) 1721 { 1722 struct ice_hw *hw = &sc->hw; 1723 struct ice_vsi *vsi = vf->vsi; 1724 u16 v; 1725 1726 /* Calculate indices for register ops below */ 1727 u16 vf_first_irq_idx = vf->vf_imap[0]; 1728 u16 vf_last_irq_idx = (vf_first_irq_idx + vf->num_irq_vectors) - 1; 1729 u16 abs_vf_first_irq_idx = hw->func_caps.common_cap.msix_vector_first_id + 1730 vf_first_irq_idx; 1731 u16 abs_vf_last_irq_idx = (abs_vf_first_irq_idx + vf->num_irq_vectors) - 1; 1732 u16 abs_vf_num = vf->vf_num + hw->func_caps.vf_base_id; 1733 1734 /* Map out VF interrupt allocation in global device space. Both 1735 * VPINT_ALLOC and VPINT_ALLOC_PCI use the same values. 1736 */ 1737 wr32(hw, VPINT_ALLOC(vf->vf_num), 1738 (((abs_vf_first_irq_idx << VPINT_ALLOC_FIRST_S) & VPINT_ALLOC_FIRST_M) | 1739 ((abs_vf_last_irq_idx << VPINT_ALLOC_LAST_S) & VPINT_ALLOC_LAST_M) | 1740 VPINT_ALLOC_VALID_M)); 1741 wr32(hw, VPINT_ALLOC_PCI(vf->vf_num), 1742 (((abs_vf_first_irq_idx << VPINT_ALLOC_PCI_FIRST_S) & VPINT_ALLOC_PCI_FIRST_M) | 1743 ((abs_vf_last_irq_idx << VPINT_ALLOC_PCI_LAST_S) & VPINT_ALLOC_PCI_LAST_M) | 1744 VPINT_ALLOC_PCI_VALID_M)); 1745 1746 /* Create inverse mapping of vectors to PF/VF combinations */ 1747 for (v = vf_first_irq_idx; v <= vf_last_irq_idx; v++) 1748 { 1749 wr32(hw, GLINT_VECT2FUNC(v), 1750 (((abs_vf_num << GLINT_VECT2FUNC_VF_NUM_S) & GLINT_VECT2FUNC_VF_NUM_M) | 1751 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) & GLINT_VECT2FUNC_PF_NUM_M))); 1752 } 1753 1754 /* Map mailbox interrupt to MSI-X index 0. Disable ITR for it, too. */ 1755 wr32(hw, VPINT_MBX_CTL(abs_vf_num), 1756 ((0 << VPINT_MBX_CTL_MSIX_INDX_S) & VPINT_MBX_CTL_MSIX_INDX_M) | 1757 ((0x3 << VPINT_MBX_CTL_ITR_INDX_S) & VPINT_MBX_CTL_ITR_INDX_M) | 1758 VPINT_MBX_CTL_CAUSE_ENA_M); 1759 1760 /* Mark the TX queue mapping registers as valid */ 1761 wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_num), VPLAN_TXQ_MAPENA_TX_ENA_M); 1762 1763 /* Indicate to HW that VF has scattered queue allocation */ 1764 wr32(hw, VPLAN_TX_QBASE(vf->vf_num), VPLAN_TX_QBASE_VFQTABLE_ENA_M); 1765 for (int i = 0; i < vsi->num_tx_queues; i++) { 1766 wr32(hw, VPLAN_TX_QTABLE(i, vf->vf_num), 1767 (vsi->tx_qmap[i] << VPLAN_TX_QTABLE_QINDEX_S) & VPLAN_TX_QTABLE_QINDEX_M); 1768 } 1769 1770 /* Mark the RX queue mapping registers as valid */ 1771 wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_num), VPLAN_RXQ_MAPENA_RX_ENA_M); 1772 wr32(hw, VPLAN_RX_QBASE(vf->vf_num), VPLAN_RX_QBASE_VFQTABLE_ENA_M); 1773 for (int i = 0; i < vsi->num_rx_queues; i++) { 1774 wr32(hw, VPLAN_RX_QTABLE(i, vf->vf_num), 1775 (vsi->rx_qmap[i] << VPLAN_RX_QTABLE_QINDEX_S) & VPLAN_RX_QTABLE_QINDEX_M); 1776 } 1777 } 1778 1779 /** 1780 * ice_err_to_virt err - translate ice errors into virtchnl errors 1781 * @ice_err: status returned from ice function 1782 */ 1783 static enum virtchnl_status_code 1784 ice_iov_err_to_virt_err(int ice_err) 1785 { 1786 switch (ice_err) { 1787 case 0: 1788 return VIRTCHNL_STATUS_SUCCESS; 1789 case ICE_ERR_BAD_PTR: 1790 case ICE_ERR_INVAL_SIZE: 1791 case ICE_ERR_DEVICE_NOT_SUPPORTED: 1792 case ICE_ERR_PARAM: 1793 case ICE_ERR_CFG: 1794 return VIRTCHNL_STATUS_ERR_PARAM; 1795 case ICE_ERR_NO_MEMORY: 1796 return VIRTCHNL_STATUS_ERR_NO_MEMORY; 1797 case ICE_ERR_NOT_READY: 1798 case ICE_ERR_RESET_FAILED: 1799 case ICE_ERR_FW_API_VER: 1800 case ICE_ERR_AQ_ERROR: 1801 case ICE_ERR_AQ_TIMEOUT: 1802 case ICE_ERR_AQ_FULL: 1803 case ICE_ERR_AQ_NO_WORK: 1804 case ICE_ERR_AQ_EMPTY: 1805 return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; 1806 default: 1807 return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; 1808 } 1809 } 1810