Lines Matching +full:tx +full:- +full:mailbox +full:- +full:count
1 /* SPDX-License-Identifier: BSD-3-Clause */
86 * ice_iov_attach - Initialize SR-IOV PF host support
89 * Initialize SR-IOV PF host support at the end of the driver attach process.
94 * - ENOMEM if there is no memory for the PF/VF schemas or iov device
95 * - ENXIO if the device isn't PCI-E or doesn't support the same SR-IOV
97 * - ENOENT if the device doesn't have the SR-IOV capability
102 device_t dev = sc->dev; in ice_iov_attach()
109 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL); in ice_iov_attach()
110 pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof", in ice_iov_attach()
112 pci_iov_schema_add_bool(vf_schema, "allow-set-mac", in ice_iov_attach()
114 pci_iov_schema_add_bool(vf_schema, "allow-promisc", in ice_iov_attach()
116 pci_iov_schema_add_uint16(vf_schema, "num-queues", in ice_iov_attach()
118 pci_iov_schema_add_uint16(vf_schema, "mirror-src-vsi", in ice_iov_attach()
120 pci_iov_schema_add_uint16(vf_schema, "max-vlan-allowed", in ice_iov_attach()
122 pci_iov_schema_add_uint16(vf_schema, "max-mac-filters", in ice_iov_attach()
130 ice_clear_bit(ICE_FEATURE_SRIOV, sc->feat_en); in ice_iov_attach()
132 ice_set_bit(ICE_FEATURE_SRIOV, sc->feat_en); in ice_iov_attach()
138 * ice_iov_detach - Teardown SR-IOV PF host support
141 * Teardown SR-IOV PF host support at the start of the driver detach process.
144 * - EBUSY if VFs still exist
149 device_t dev = sc->dev; in ice_iov_detach()
163 * ice_iov_init - Called by the OS before the first VF is created.
174 sc->vfs = (struct ice_vf *)malloc(sizeof(struct ice_vf) * num_vfs, M_ICE, M_NOWAIT | in ice_iov_init()
176 if (sc->vfs == NULL) in ice_iov_init()
181 sc->vfs[i].vf_num = i; in ice_iov_init()
184 sc->num_vfs = num_vfs; in ice_iov_init()
190 * ice_iov_get_vf - Get pointer to VF at given index
202 MPASS(vf_num < sc->num_vfs); in ice_iov_get_vf()
204 return &sc->vfs[vf_num]; in ice_iov_get_vf()
208 * ice_iov_add_vf - Called by the OS for each VF to create
220 device_t dev = sc->dev; in ice_iov_add_vf()
230 vf->vf_flags = VF_FLAG_ENABLED; in ice_iov_add_vf()
236 vf->vsi = vsi; in ice_iov_add_vf()
237 vsi->vf_num = vfnum; in ice_iov_add_vf()
239 vf_num_queues = nvlist_get_number(params, "num-queues"); in ice_iov_add_vf()
242 device_printf(dev, "Invalid num-queues (%d) for VF %d\n", in ice_iov_add_vf()
243 vf_num_queues, vf->vf_num); in ice_iov_add_vf()
245 device_printf(dev, "Setting VF %d num-queues to 1\n", vf->vf_num); in ice_iov_add_vf()
248 device_printf(dev, "Setting VF %d num-queues to %d\n", in ice_iov_add_vf()
249 vf->vf_num, ICE_MAX_SCATTERED_QUEUES); in ice_iov_add_vf()
252 vsi->qmap_type = ICE_RESMGR_ALLOC_SCATTERED; in ice_iov_add_vf()
256 vsi->num_tx_queues = vsi->num_rx_queues = vf_num_queues; in ice_iov_add_vf()
258 /* Assign Tx queues from PF space */ in ice_iov_add_vf()
259 error = ice_resmgr_assign_scattered(&sc->tx_qmgr, vsi->tx_qmap, in ice_iov_add_vf()
260 vsi->num_tx_queues); in ice_iov_add_vf()
262 device_printf(sc->dev, "Unable to assign VF Tx queues: %s\n", in ice_iov_add_vf()
268 error = ice_resmgr_assign_scattered(&sc->rx_qmgr, vsi->rx_qmap, in ice_iov_add_vf()
269 vsi->num_rx_queues); in ice_iov_add_vf()
271 device_printf(sc->dev, "Unable to assign VF Rx queues: %s\n", in ice_iov_add_vf()
276 vsi->max_frame_size = ICE_MAX_FRAME_SIZE; in ice_iov_add_vf()
279 vsi->tx_queues = (struct ice_tx_queue *) in ice_iov_add_vf()
280 malloc(sizeof(struct ice_tx_queue) * vsi->num_tx_queues, M_ICE, in ice_iov_add_vf()
282 if (!vsi->tx_queues) { in ice_iov_add_vf()
283 device_printf(sc->dev, "VF-%d: Unable to allocate Tx queue memory\n", in ice_iov_add_vf()
288 for (i = 0, txq = vsi->tx_queues; i < vsi->num_tx_queues; i++, txq++) { in ice_iov_add_vf()
289 txq->me = i; in ice_iov_add_vf()
290 txq->vsi = vsi; in ice_iov_add_vf()
294 vsi->rx_queues = (struct ice_rx_queue *) in ice_iov_add_vf()
295 malloc(sizeof(struct ice_rx_queue) * vsi->num_rx_queues, M_ICE, in ice_iov_add_vf()
297 if (!vsi->rx_queues) { in ice_iov_add_vf()
298 device_printf(sc->dev, "VF-%d: Unable to allocate Rx queue memory\n", in ice_iov_add_vf()
303 for (i = 0, rxq = vsi->rx_queues; i < vsi->num_rx_queues; i++, rxq++) { in ice_iov_add_vf()
304 rxq->me = i; in ice_iov_add_vf()
305 rxq->vsi = vsi; in ice_iov_add_vf()
309 vf->num_irq_vectors = vf_num_queues + 1; in ice_iov_add_vf()
310 vf->tx_irqvs = (struct ice_irq_vector *) in ice_iov_add_vf()
311 malloc(sizeof(struct ice_irq_vector) * (vf->num_irq_vectors), in ice_iov_add_vf()
313 if (!vf->tx_irqvs) { in ice_iov_add_vf()
314 device_printf(sc->dev, in ice_iov_add_vf()
315 "Unable to allocate TX irqv memory for VF-%d's %d vectors\n", in ice_iov_add_vf()
316 vfnum, vf->num_irq_vectors); in ice_iov_add_vf()
320 vf->rx_irqvs = (struct ice_irq_vector *) in ice_iov_add_vf()
321 malloc(sizeof(struct ice_irq_vector) * (vf->num_irq_vectors), in ice_iov_add_vf()
323 if (!vf->rx_irqvs) { in ice_iov_add_vf()
324 device_printf(sc->dev, in ice_iov_add_vf()
325 "Unable to allocate RX irqv memory for VF-%d's %d vectors\n", in ice_iov_add_vf()
326 vfnum, vf->num_irq_vectors); in ice_iov_add_vf()
332 if (!(vf->vf_imap = in ice_iov_add_vf()
333 (u16 *)malloc(sizeof(u16) * vf->num_irq_vectors, in ice_iov_add_vf()
335 device_printf(dev, "Unable to allocate VF-%d imap memory\n", vfnum); in ice_iov_add_vf()
339 error = ice_resmgr_assign_contiguous(&sc->dev_imgr, vf->vf_imap, vf->num_irq_vectors); in ice_iov_add_vf()
341 device_printf(dev, "Unable to assign VF-%d interrupt mapping: %s\n", in ice_iov_add_vf()
346 if (nvlist_exists_binary(params, "mac-addr")) { in ice_iov_add_vf()
347 mac = nvlist_get_binary(params, "mac-addr", &size); in ice_iov_add_vf()
348 memcpy(vf->mac, mac, ETHER_ADDR_LEN); in ice_iov_add_vf()
350 if (nvlist_get_bool(params, "allow-set-mac")) in ice_iov_add_vf()
351 vf->vf_flags |= VF_FLAG_SET_MAC_CAP; in ice_iov_add_vf()
357 vf->vf_flags |= VF_FLAG_SET_MAC_CAP; in ice_iov_add_vf()
359 if (nvlist_get_bool(params, "mac-anti-spoof")) in ice_iov_add_vf()
360 vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF; in ice_iov_add_vf()
362 if (nvlist_get_bool(params, "allow-promisc")) in ice_iov_add_vf()
363 vf->vf_flags |= VF_FLAG_PROMISC_CAP; in ice_iov_add_vf()
365 vsi->mirror_src_vsi = nvlist_get_number(params, "mirror-src-vsi"); in ice_iov_add_vf()
367 vf->vlan_limit = nvlist_get_number(params, "max-vlan-allowed"); in ice_iov_add_vf()
368 vf->mac_filter_limit = nvlist_get_number(params, "max-mac-filters"); in ice_iov_add_vf()
370 vf->vf_flags |= VF_FLAG_VLAN_CAP; in ice_iov_add_vf()
375 device_printf(sc->dev, "Unable to initialize VF %d VSI: %s\n", in ice_iov_add_vf()
383 device_printf(sc->dev, "Unable to add broadcast filter VF %d VSI: %s\n", in ice_iov_add_vf()
393 ice_resmgr_release_map(&sc->dev_imgr, vf->vf_imap, in ice_iov_add_vf()
394 vf->num_irq_vectors); in ice_iov_add_vf()
396 free(vf->vf_imap, M_ICE); in ice_iov_add_vf()
397 vf->vf_imap = NULL; in ice_iov_add_vf()
399 free(vf->rx_irqvs, M_ICE); in ice_iov_add_vf()
400 vf->rx_irqvs = NULL; in ice_iov_add_vf()
402 free(vf->tx_irqvs, M_ICE); in ice_iov_add_vf()
403 vf->tx_irqvs = NULL; in ice_iov_add_vf()
405 free(vsi->rx_queues, M_ICE); in ice_iov_add_vf()
406 vsi->rx_queues = NULL; in ice_iov_add_vf()
408 free(vsi->tx_queues, M_ICE); in ice_iov_add_vf()
409 vsi->tx_queues = NULL; in ice_iov_add_vf()
412 vf->vsi = NULL; in ice_iov_add_vf()
417 * ice_iov_uninit - Called by the OS when VFs are destroyed
426 /* Release per-VF resources */ in ice_iov_uninit()
427 for (int i = 0; i < sc->num_vfs; i++) { in ice_iov_uninit()
428 vf = &sc->vfs[i]; in ice_iov_uninit()
429 vsi = vf->vsi; in ice_iov_uninit()
432 if (vf->vf_imap) { in ice_iov_uninit()
433 free(vf->vf_imap, M_ICE); in ice_iov_uninit()
434 vf->vf_imap = NULL; in ice_iov_uninit()
438 if (vf->tx_irqvs) { in ice_iov_uninit()
439 free(vf->tx_irqvs, M_ICE); in ice_iov_uninit()
440 vf->tx_irqvs = NULL; in ice_iov_uninit()
442 if (vf->rx_irqvs) { in ice_iov_uninit()
443 free(vf->rx_irqvs, M_ICE); in ice_iov_uninit()
444 vf->rx_irqvs = NULL; in ice_iov_uninit()
451 if (vsi->tx_queues) { in ice_iov_uninit()
452 free(vsi->tx_queues, M_ICE); in ice_iov_uninit()
453 vsi->tx_queues = NULL; in ice_iov_uninit()
455 if (vsi->rx_queues) { in ice_iov_uninit()
456 free(vsi->rx_queues, M_ICE); in ice_iov_uninit()
457 vsi->rx_queues = NULL; in ice_iov_uninit()
461 vf->vsi = NULL; in ice_iov_uninit()
465 if (sc->vfs) { in ice_iov_uninit()
466 free(sc->vfs, M_ICE); in ice_iov_uninit()
467 sc->vfs = NULL; in ice_iov_uninit()
469 sc->num_vfs = 0; in ice_iov_uninit()
473 * ice_iov_handle_vflr - Process VFLR event
476 * Identifys which VFs have been reset and re-configure
482 struct ice_hw *hw = &sc->hw; in ice_iov_handle_vflr()
486 for (int i = 0; i < sc->num_vfs; i++) { in ice_iov_handle_vflr()
487 vf = &sc->vfs[i]; in ice_iov_handle_vflr()
489 reg_idx = (hw->func_caps.vf_base_id + vf->vf_num) / 32; in ice_iov_handle_vflr()
490 bit_idx = (hw->func_caps.vf_base_id + vf->vf_num) % 32; in ice_iov_handle_vflr()
498 * ice_iov_ready_vf - Setup VF interrupts and mark it as ready
502 * Clears VF reset triggering bit, sets up the PF<->VF interrupt
509 struct ice_hw *hw = &sc->hw; in ice_iov_ready_vf()
513 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_num)); in ice_iov_ready_vf()
515 wr32(hw, VPGEN_VFRTRIG(vf->vf_num), reg); in ice_iov_ready_vf()
521 wr32(hw, VFGEN_RSTAT(vf->vf_num), VIRTCHNL_VFR_VFACTIVE); in ice_iov_ready_vf()
527 * ice_reset_vf - Perform a hardware reset (VFR) on a VF
536 * @remark This also sets up the PF<->VF interrupt mapping and allocations in
544 struct ice_hw *hw = &sc->hw; in ice_reset_vf()
549 global_vf_num = vf->vf_num + hw->func_caps.vf_base_id; in ice_reset_vf()
552 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_num)); in ice_reset_vf()
554 wr32(hw, VPGEN_VFRTRIG(vf->vf_num), reg); in ice_reset_vf()
575 device_printf(sc->dev, in ice_reset_vf()
576 "VF-%d PCI transactions stuck\n", vf->vf_num); in ice_reset_vf()
578 /* Disable TX queues, which is required during VF reset */ in ice_reset_vf()
579 status = ice_dis_vsi_txq(hw->port_info, vf->vsi->idx, 0, 0, NULL, NULL, in ice_reset_vf()
580 NULL, ICE_VF_RESET, vf->vf_num, NULL); in ice_reset_vf()
582 device_printf(sc->dev, in ice_reset_vf()
583 "%s: Failed to disable LAN Tx queues: err %s aq_err %s\n", in ice_reset_vf()
585 ice_aq_str(hw->adminq.sq_last_status)); in ice_reset_vf()
589 reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_num)); in ice_reset_vf()
596 device_printf(sc->dev, in ice_reset_vf()
597 "VF-%d Reset is stuck\n", vf->vf_num); in ice_reset_vf()
603 * ice_vc_get_vf_res_msg - Handle VIRTCHNL_OP_GET_VF_RESOURCES msg from VF
618 struct ice_hw *hw = &sc->hw; in ice_vc_get_vf_res_msg()
629 vf_res->num_vsis = 1; in ice_vc_get_vf_res_msg()
630 vf_res->num_queue_pairs = vf->vsi->num_tx_queues; in ice_vc_get_vf_res_msg()
631 vf_res->max_vectors = vf_res->num_queue_pairs + 1; in ice_vc_get_vf_res_msg()
633 vf_res->rss_key_size = ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE; in ice_vc_get_vf_res_msg()
634 vf_res->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE; in ice_vc_get_vf_res_msg()
635 vf_res->max_mtu = 0; in ice_vc_get_vf_res_msg()
637 vf_res->vf_cap_flags = VF_BASE_MODE_OFFLOADS; in ice_vc_get_vf_res_msg()
642 vf_res->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED; in ice_vc_get_vf_res_msg()
645 vf_res->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; in ice_vc_get_vf_res_msg()
648 vsi_res = &vf_res->vsi_res[0]; in ice_vc_get_vf_res_msg()
649 vsi_res->vsi_id = vf->vsi->idx; in ice_vc_get_vf_res_msg()
650 vsi_res->num_queue_pairs = vf->vsi->num_tx_queues; in ice_vc_get_vf_res_msg()
651 vsi_res->vsi_type = VIRTCHNL_VSI_SRIOV; in ice_vc_get_vf_res_msg()
652 vsi_res->qset_handle = 0; in ice_vc_get_vf_res_msg()
653 if (!ETHER_IS_ZERO(vf->mac)) in ice_vc_get_vf_res_msg()
654 memcpy(vsi_res->default_mac_addr, vf->mac, ETHER_ADDR_LEN); in ice_vc_get_vf_res_msg()
656 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_GET_VF_RESOURCES, in ice_vc_get_vf_res_msg()
663 * ice_vc_version_msg - Handle VIRTCHNL_OP_VERSION msg from VF
678 struct ice_hw *hw = &sc->hw; in ice_vc_version_msg()
679 device_t dev = sc->dev; in ice_vc_version_msg()
685 vf->version.major = 1; in ice_vc_version_msg()
686 vf->version.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; in ice_vc_version_msg()
688 vf->version.major = VIRTCHNL_VERSION_MAJOR; in ice_vc_version_msg()
689 vf->version.minor = VIRTCHNL_VERSION_MINOR; in ice_vc_version_msg()
691 if ((recv_vf_version->major != VIRTCHNL_VERSION_MAJOR) || in ice_vc_version_msg()
692 (recv_vf_version->minor != VIRTCHNL_VERSION_MINOR)) in ice_vc_version_msg()
694 "%s: VF-%d requested version (%d.%d) differs from PF version (%d.%d)\n", in ice_vc_version_msg()
695 __func__, vf->vf_num, in ice_vc_version_msg()
696 recv_vf_version->major, recv_vf_version->minor, in ice_vc_version_msg()
700 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_VERSION, in ice_vc_version_msg()
701 VIRTCHNL_STATUS_SUCCESS, (u8 *)&vf->version, sizeof(vf->version), in ice_vc_version_msg()
706 * ice_vf_validate_mac - Validate MAC address before adding it
728 if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) && in ice_vf_validate_mac()
729 !(ETHER_IS_MULTICAST(addr) || !bcmp(addr, vf->mac, ETHER_ADDR_LEN))) in ice_vf_validate_mac()
736 * ice_vc_add_eth_addr_msg - Handle VIRTCHNL_OP_ADD_ETH_ADDR msg from VF
749 struct ice_hw *hw = &sc->hw; in ice_vc_add_eth_addr_msg()
755 if (addr_list->num_elements > in ice_vc_add_eth_addr_msg()
756 (vf->mac_filter_limit - vf->mac_filter_cnt)) { in ice_vc_add_eth_addr_msg()
761 for (int i = 0; i < addr_list->num_elements; i++) { in ice_vc_add_eth_addr_msg()
762 u8 *addr = addr_list->list[i].addr; in ice_vc_add_eth_addr_msg()
770 device_printf(sc->dev, in ice_vc_add_eth_addr_msg()
771 "%s: VF-%d: Not permitted to add MAC addr for VSI %d\n", in ice_vc_add_eth_addr_msg()
772 __func__, vf->vf_num, vf->vsi->idx); in ice_vc_add_eth_addr_msg()
776 device_printf(sc->dev, in ice_vc_add_eth_addr_msg()
777 "%s: VF-%d: Did not add invalid MAC addr for VSI %d\n", in ice_vc_add_eth_addr_msg()
778 __func__, vf->vf_num, vf->vsi->idx); in ice_vc_add_eth_addr_msg()
783 error = ice_add_vsi_mac_filter(vf->vsi, addr); in ice_vc_add_eth_addr_msg()
785 device_printf(sc->dev, in ice_vc_add_eth_addr_msg()
786 "%s: VF-%d: Error adding MAC addr for VSI %d\n", in ice_vc_add_eth_addr_msg()
787 __func__, vf->vf_num, vf->vsi->idx); in ice_vc_add_eth_addr_msg()
791 /* Don't count VF's MAC against its MAC filter limit */ in ice_vc_add_eth_addr_msg()
792 if (memcmp(addr, vf->mac, ETHER_ADDR_LEN)) in ice_vc_add_eth_addr_msg()
796 vf->mac_filter_cnt += added_addr_cnt; in ice_vc_add_eth_addr_msg()
799 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_ADD_ETH_ADDR, in ice_vc_add_eth_addr_msg()
804 * ice_vc_del_eth_addr_msg - Handle VIRTCHNL_OP_DEL_ETH_ADDR msg from VF
817 struct ice_hw *hw = &sc->hw; in ice_vc_del_eth_addr_msg()
823 for (int i = 0; i < addr_list->num_elements; i++) { in ice_vc_del_eth_addr_msg()
824 error = ice_remove_vsi_mac_filter(vf->vsi, addr_list->list[i].addr); in ice_vc_del_eth_addr_msg()
826 device_printf(sc->dev, in ice_vc_del_eth_addr_msg()
827 "%s: VF-%d: Error removing MAC addr for VSI %d\n", in ice_vc_del_eth_addr_msg()
828 __func__, vf->vf_num, vf->vsi->idx); in ice_vc_del_eth_addr_msg()
832 /* Don't count VF's MAC against its MAC filter limit */ in ice_vc_del_eth_addr_msg()
833 if (memcmp(addr_list->list[i].addr, vf->mac, ETHER_ADDR_LEN)) in ice_vc_del_eth_addr_msg()
837 if (deleted_addr_cnt >= vf->mac_filter_cnt) in ice_vc_del_eth_addr_msg()
838 vf->mac_filter_cnt = 0; in ice_vc_del_eth_addr_msg()
840 vf->mac_filter_cnt -= deleted_addr_cnt; in ice_vc_del_eth_addr_msg()
842 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_DEL_ETH_ADDR, in ice_vc_del_eth_addr_msg()
847 * ice_vc_add_vlan_msg - Handle VIRTCHNL_OP_ADD_VLAN msg from VF
857 struct ice_hw *hw = &sc->hw; in ice_vc_add_vlan_msg()
861 struct ice_vsi *vsi = vf->vsi; in ice_vc_add_vlan_msg()
865 if (vlan_list->vsi_id != vsi->idx) { in ice_vc_add_vlan_msg()
866 device_printf(sc->dev, in ice_vc_add_vlan_msg()
867 "VF-%d: Message has invalid VSI ID (expected %d, got %d)\n", in ice_vc_add_vlan_msg()
868 vf->vf_num, vsi->idx, vlan_list->vsi_id); in ice_vc_add_vlan_msg()
873 if (vlan_list->num_elements > (vf->vlan_limit - vf->vlan_cnt)) { in ice_vc_add_vlan_msg()
878 status = ice_add_vlan_hw_filters(vsi, vlan_list->vlan_id, in ice_vc_add_vlan_msg()
879 vlan_list->num_elements); in ice_vc_add_vlan_msg()
881 device_printf(sc->dev, in ice_vc_add_vlan_msg()
882 "VF-%d: Failure adding VLANs to VSI %d, err %s aq_err %s\n", in ice_vc_add_vlan_msg()
883 vf->vf_num, vsi->idx, ice_status_str(status), in ice_vc_add_vlan_msg()
884 ice_aq_str(sc->hw.adminq.sq_last_status)); in ice_vc_add_vlan_msg()
889 vf->vlan_cnt += vlan_list->num_elements; in ice_vc_add_vlan_msg()
892 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_ADD_VLAN, in ice_vc_add_vlan_msg()
897 * ice_vc_del_vlan_msg - Handle VIRTCHNL_OP_DEL_VLAN msg from VF
907 struct ice_hw *hw = &sc->hw; in ice_vc_del_vlan_msg()
911 struct ice_vsi *vsi = vf->vsi; in ice_vc_del_vlan_msg()
915 if (vlan_list->vsi_id != vsi->idx) { in ice_vc_del_vlan_msg()
916 device_printf(sc->dev, in ice_vc_del_vlan_msg()
917 "VF-%d: Message has invalid VSI ID (expected %d, got %d)\n", in ice_vc_del_vlan_msg()
918 vf->vf_num, vsi->idx, vlan_list->vsi_id); in ice_vc_del_vlan_msg()
923 status = ice_remove_vlan_hw_filters(vsi, vlan_list->vlan_id, in ice_vc_del_vlan_msg()
924 vlan_list->num_elements); in ice_vc_del_vlan_msg()
926 device_printf(sc->dev, in ice_vc_del_vlan_msg()
927 "VF-%d: Failure deleting VLANs from VSI %d, err %s aq_err %s\n", in ice_vc_del_vlan_msg()
928 vf->vf_num, vsi->idx, ice_status_str(status), in ice_vc_del_vlan_msg()
929 ice_aq_str(sc->hw.adminq.sq_last_status)); in ice_vc_del_vlan_msg()
934 if (vlan_list->num_elements >= vf->vlan_cnt) in ice_vc_del_vlan_msg()
935 vf->vlan_cnt = 0; in ice_vc_del_vlan_msg()
937 vf->vlan_cnt -= vlan_list->num_elements; in ice_vc_del_vlan_msg()
940 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_DEL_VLAN, in ice_vc_del_vlan_msg()
945 * ice_vc_validate_ring_len - Check to see if a descriptor ring length is valid
961 * ice_vc_cfg_vsi_qs_msg - Handle VIRTCHNL_OP_CONFIG_VSI_QUEUES msg from VF
969 device_t dev = sc->dev; in ice_vc_cfg_vsi_qs_msg()
970 struct ice_hw *hw = &sc->hw; in ice_vc_cfg_vsi_qs_msg()
974 struct ice_vsi *vsi = vf->vsi; in ice_vc_cfg_vsi_qs_msg()
981 if (vqci->num_queue_pairs > vf->vsi->num_tx_queues && in ice_vc_cfg_vsi_qs_msg()
982 vqci->num_queue_pairs > vf->vsi->num_rx_queues) { in ice_vc_cfg_vsi_qs_msg()
987 ice_vsi_disable_tx(vf->vsi); in ice_vc_cfg_vsi_qs_msg()
988 ice_control_all_rx_queues(vf->vsi, false); in ice_vc_cfg_vsi_qs_msg()
991 * Clear TX and RX queues config in case VF in ice_vc_cfg_vsi_qs_msg()
994 for (i = 0; i < vsi->num_tx_queues; i++) { in ice_vc_cfg_vsi_qs_msg()
995 txq = &vsi->tx_queues[i]; in ice_vc_cfg_vsi_qs_msg()
997 txq->desc_count = 0; in ice_vc_cfg_vsi_qs_msg()
998 txq->tx_paddr = 0; in ice_vc_cfg_vsi_qs_msg()
999 txq->tc = 0; in ice_vc_cfg_vsi_qs_msg()
1002 for (i = 0; i < vsi->num_rx_queues; i++) { in ice_vc_cfg_vsi_qs_msg()
1003 rxq = &vsi->rx_queues[i]; in ice_vc_cfg_vsi_qs_msg()
1005 rxq->desc_count = 0; in ice_vc_cfg_vsi_qs_msg()
1006 rxq->rx_paddr = 0; in ice_vc_cfg_vsi_qs_msg()
1009 vqpi = vqci->qpair; in ice_vc_cfg_vsi_qs_msg()
1010 for (i = 0; i < vqci->num_queue_pairs; i++, vqpi++) { in ice_vc_cfg_vsi_qs_msg()
1012 if (vqpi->txq.vsi_id != vf->vsi->idx || in ice_vc_cfg_vsi_qs_msg()
1013 vqpi->rxq.vsi_id != vf->vsi->idx || in ice_vc_cfg_vsi_qs_msg()
1014 vqpi->txq.queue_id != vqpi->rxq.queue_id || in ice_vc_cfg_vsi_qs_msg()
1015 vqpi->txq.headwb_enabled || in ice_vc_cfg_vsi_qs_msg()
1016 vqpi->rxq.splithdr_enabled || in ice_vc_cfg_vsi_qs_msg()
1017 vqpi->rxq.crc_disable || in ice_vc_cfg_vsi_qs_msg()
1018 !(ice_vc_isvalid_ring_len(vqpi->txq.ring_len)) || in ice_vc_cfg_vsi_qs_msg()
1019 !(ice_vc_isvalid_ring_len(vqpi->rxq.ring_len))) { in ice_vc_cfg_vsi_qs_msg()
1025 txq = &vsi->tx_queues[vqpi->txq.queue_id]; in ice_vc_cfg_vsi_qs_msg()
1027 txq->desc_count = vqpi->txq.ring_len; in ice_vc_cfg_vsi_qs_msg()
1028 txq->tx_paddr = vqpi->txq.dma_ring_addr; in ice_vc_cfg_vsi_qs_msg()
1029 txq->q_handle = vqpi->txq.queue_id; in ice_vc_cfg_vsi_qs_msg()
1030 txq->tc = 0; in ice_vc_cfg_vsi_qs_msg()
1032 rxq = &vsi->rx_queues[vqpi->rxq.queue_id]; in ice_vc_cfg_vsi_qs_msg()
1034 rxq->desc_count = vqpi->rxq.ring_len; in ice_vc_cfg_vsi_qs_msg()
1035 rxq->rx_paddr = vqpi->rxq.dma_ring_addr; in ice_vc_cfg_vsi_qs_msg()
1036 vsi->mbuf_sz = vqpi->rxq.databuffer_size; in ice_vc_cfg_vsi_qs_msg()
1039 /* Configure TX queues in HW */ in ice_vc_cfg_vsi_qs_msg()
1043 "VF-%d: Unable to configure VSI for Tx: %s\n", in ice_vc_cfg_vsi_qs_msg()
1044 vf->vf_num, ice_err_str(error)); in ice_vc_cfg_vsi_qs_msg()
1053 "VF-%d: Unable to configure VSI for Rx: %s\n", in ice_vc_cfg_vsi_qs_msg()
1054 vf->vf_num, ice_err_str(error)); in ice_vc_cfg_vsi_qs_msg()
1061 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_CONFIG_VSI_QUEUES, in ice_vc_cfg_vsi_qs_msg()
1066 * ice_vc_cfg_rss_key_msg - Handle VIRTCHNL_OP_CONFIG_RSS_KEY msg from VF
1078 struct ice_hw *hw = &sc->hw; in ice_vc_cfg_rss_key_msg()
1082 struct ice_vsi *vsi = vf->vsi; in ice_vc_cfg_rss_key_msg()
1086 if (vrk->vsi_id != vsi->idx) { in ice_vc_cfg_rss_key_msg()
1087 device_printf(sc->dev, in ice_vc_cfg_rss_key_msg()
1088 "VF-%d: Message has invalid VSI ID (expected %d, got %d)\n", in ice_vc_cfg_rss_key_msg()
1089 vf->vf_num, vsi->idx, vrk->vsi_id); in ice_vc_cfg_rss_key_msg()
1094 if ((vrk->key_len > in ice_vc_cfg_rss_key_msg()
1097 vrk->key_len == 0) { in ice_vc_cfg_rss_key_msg()
1102 memcpy(&keydata, vrk->key, vrk->key_len); in ice_vc_cfg_rss_key_msg()
1104 status = ice_aq_set_rss_key(hw, vsi->idx, &keydata); in ice_vc_cfg_rss_key_msg()
1106 device_printf(sc->dev, in ice_vc_cfg_rss_key_msg()
1108 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); in ice_vc_cfg_rss_key_msg()
1114 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_CONFIG_RSS_KEY, in ice_vc_cfg_rss_key_msg()
1119 * ice_vc_cfg_rss_lut_msg - Handle VIRTCHNL_OP_CONFIG_RSS_LUT msg from VF
1129 struct ice_hw *hw = &sc->hw; in ice_vc_cfg_rss_lut_msg()
1134 struct ice_vsi *vsi = vf->vsi; in ice_vc_cfg_rss_lut_msg()
1138 if (vrl->vsi_id != vsi->idx) { in ice_vc_cfg_rss_lut_msg()
1139 device_printf(sc->dev, in ice_vc_cfg_rss_lut_msg()
1140 "VF-%d: Message has invalid VSI ID (expected %d, got %d)\n", in ice_vc_cfg_rss_lut_msg()
1141 vf->vf_num, vsi->idx, vrl->vsi_id); in ice_vc_cfg_rss_lut_msg()
1146 if (vrl->lut_entries > ICE_VSIQF_HLUT_ARRAY_SIZE) { in ice_vc_cfg_rss_lut_msg()
1151 lut_params.vsi_handle = vsi->idx; in ice_vc_cfg_rss_lut_msg()
1152 lut_params.lut_size = vsi->rss_table_size; in ice_vc_cfg_rss_lut_msg()
1153 lut_params.lut_type = vsi->rss_lut_type; in ice_vc_cfg_rss_lut_msg()
1154 lut_params.lut = vrl->lut; in ice_vc_cfg_rss_lut_msg()
1159 device_printf(sc->dev, in ice_vc_cfg_rss_lut_msg()
1160 "VF-%d: Cannot set RSS lut, err %s aq_err %s\n", in ice_vc_cfg_rss_lut_msg()
1161 vf->vf_num, ice_status_str(status), in ice_vc_cfg_rss_lut_msg()
1162 ice_aq_str(hw->adminq.sq_last_status)); in ice_vc_cfg_rss_lut_msg()
1167 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_CONFIG_RSS_LUT, in ice_vc_cfg_rss_lut_msg()
1172 * ice_vc_set_rss_hena_msg - Handle VIRTCHNL_OP_SET_RSS_HENA msg from VF
1183 struct ice_hw *hw = &sc->hw; in ice_vc_set_rss_hena_msg()
1187 struct ice_vsi *vsi = vf->vsi; in ice_vc_set_rss_hena_msg()
1197 status = ice_rem_vsi_rss_cfg(hw, vsi->idx); in ice_vc_set_rss_hena_msg()
1198 if (vrh->hena) { in ice_vc_set_rss_hena_msg()
1205 device_printf(sc->dev, in ice_vc_set_rss_hena_msg()
1208 ice_aq_str(hw->adminq.sq_last_status)); in ice_vc_set_rss_hena_msg()
1209 status = ice_add_avf_rss_cfg(hw, vsi->idx, vrh->hena); in ice_vc_set_rss_hena_msg()
1211 device_printf(sc->dev, in ice_vc_set_rss_hena_msg()
1214 ice_aq_str(hw->adminq.sq_last_status)); in ice_vc_set_rss_hena_msg()
1217 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_SET_RSS_HENA, in ice_vc_set_rss_hena_msg()
1222 * ice_vc_enable_queues_msg - Handle VIRTCHNL_OP_ENABLE_QUEUES msg from VF
1227 * Enables VF queues selected in msg_buf for Tx/Rx traffic.
1229 * @remark Only actually operates on Rx queues; Tx queues are enabled in
1235 struct ice_hw *hw = &sc->hw; in ice_vc_enable_queues_msg()
1238 struct ice_vsi *vsi = vf->vsi; in ice_vc_enable_queues_msg()
1243 if (vqs->vsi_id != vsi->idx) { in ice_vc_enable_queues_msg()
1244 device_printf(sc->dev, in ice_vc_enable_queues_msg()
1245 "%s: VF-%d: Message has invalid VSI ID (expected %d, got %d)\n", in ice_vc_enable_queues_msg()
1246 __func__, vf->vf_num, vsi->idx, vqs->vsi_id); in ice_vc_enable_queues_msg()
1251 if (!vqs->rx_queues && !vqs->tx_queues) { in ice_vc_enable_queues_msg()
1252 device_printf(sc->dev, in ice_vc_enable_queues_msg()
1253 "%s: VF-%d: message queue masks are empty\n", in ice_vc_enable_queues_msg()
1254 __func__, vf->vf_num); in ice_vc_enable_queues_msg()
1260 bit = fls(vqs->rx_queues); in ice_vc_enable_queues_msg()
1261 if (bit > vsi->num_rx_queues) { in ice_vc_enable_queues_msg()
1262 device_printf(sc->dev, in ice_vc_enable_queues_msg()
1263 "%s: VF-%d: message's rx_queues map (0x%08x) has invalid bit set (%d)\n", in ice_vc_enable_queues_msg()
1264 __func__, vf->vf_num, vqs->rx_queues, bit); in ice_vc_enable_queues_msg()
1269 /* Tx ring enable is handled in an earlier message. */ in ice_vc_enable_queues_msg()
1270 for_each_set_bit(bit, &vqs->rx_queues, 32) { in ice_vc_enable_queues_msg()
1273 device_printf(sc->dev, in ice_vc_enable_queues_msg()
1282 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_ENABLE_QUEUES, in ice_vc_enable_queues_msg()
1287 * ice_vc_disable_queues_msg - Handle VIRTCHNL_OP_DISABLE_QUEUES msg
1295 * Tx and Rx queues
1301 struct ice_hw *hw = &sc->hw; in ice_vc_disable_queues_msg()
1303 struct ice_vsi *vsi = vf->vsi; in ice_vc_disable_queues_msg()
1308 device_printf(sc->dev, in ice_vc_disable_queues_msg()
1322 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_DISABLE_QUEUES, in ice_vc_disable_queues_msg()
1327 * ice_vc_cfg_irq_map_msg - Handle VIRTCHNL_OP_CFG_IRQ_MAP msg from VF
1340 struct ice_hw *hw = &sc->hw; in ice_vc_cfg_irq_map_msg()
1344 struct ice_vsi *vsi = vf->vsi; in ice_vc_cfg_irq_map_msg()
1349 if (vimi->num_vectors > vf->num_irq_vectors) { in ice_vc_cfg_irq_map_msg()
1350 device_printf(sc->dev, in ice_vc_cfg_irq_map_msg()
1351 "%s: VF-%d: message has more vectors (%d) than configured for VF (%d)\n", in ice_vc_cfg_irq_map_msg()
1352 __func__, vf->vf_num, vimi->num_vectors, vf->num_irq_vectors); in ice_vc_cfg_irq_map_msg()
1357 vvm = vimi->vecmap; in ice_vc_cfg_irq_map_msg()
1359 for (int i = 0; i < vimi->num_vectors; i++, vvm++) { in ice_vc_cfg_irq_map_msg()
1364 if (vvm->vsi_id != vf->vsi->idx) { in ice_vc_cfg_irq_map_msg()
1365 device_printf(sc->dev, in ice_vc_cfg_irq_map_msg()
1366 "%s: VF-%d: message's VSI ID (%d) does not match VF's (%d) for vector %d\n", in ice_vc_cfg_irq_map_msg()
1367 __func__, vf->vf_num, vvm->vsi_id, vf->vsi->idx, i); in ice_vc_cfg_irq_map_msg()
1372 /* vvm->vector_id is relative to VF space */ in ice_vc_cfg_irq_map_msg()
1373 vector = vvm->vector_id; in ice_vc_cfg_irq_map_msg()
1375 if (vector >= vf->num_irq_vectors) { in ice_vc_cfg_irq_map_msg()
1376 device_printf(sc->dev, in ice_vc_cfg_irq_map_msg()
1377 "%s: VF-%d: message's vector ID (%d) is greater than VF's max ID (%d)\n", in ice_vc_cfg_irq_map_msg()
1378 __func__, vf->vf_num, vector, vf->num_irq_vectors - 1); in ice_vc_cfg_irq_map_msg()
1388 for_each_set_bit(bit, &vvm->txq_map, ICE_VIRTCHNL_QUEUE_MAP_SIZE) { in ice_vc_cfg_irq_map_msg()
1389 if (bit >= vsi->num_tx_queues) { in ice_vc_cfg_irq_map_msg()
1390 device_printf(sc->dev, in ice_vc_cfg_irq_map_msg()
1391 "%s: VF-%d: txq map has invalid bit set\n", in ice_vc_cfg_irq_map_msg()
1392 __func__, vf->vf_num); in ice_vc_cfg_irq_map_msg()
1397 vf->tx_irqvs[vector].me = vector; in ice_vc_cfg_irq_map_msg()
1399 txq = &vsi->tx_queues[bit]; in ice_vc_cfg_irq_map_msg()
1400 txq->irqv = &vf->tx_irqvs[vector]; in ice_vc_cfg_irq_map_msg()
1401 txq->itr_idx = vvm->txitr_idx; in ice_vc_cfg_irq_map_msg()
1404 for_each_set_bit(bit, &vvm->rxq_map, ICE_VIRTCHNL_QUEUE_MAP_SIZE) { in ice_vc_cfg_irq_map_msg()
1405 if (bit >= vsi->num_rx_queues) { in ice_vc_cfg_irq_map_msg()
1406 device_printf(sc->dev, in ice_vc_cfg_irq_map_msg()
1407 "%s: VF-%d: rxq map has invalid bit set\n", in ice_vc_cfg_irq_map_msg()
1408 __func__, vf->vf_num); in ice_vc_cfg_irq_map_msg()
1412 vf->rx_irqvs[vector].me = vector; in ice_vc_cfg_irq_map_msg()
1414 rxq = &vsi->rx_queues[bit]; in ice_vc_cfg_irq_map_msg()
1415 rxq->irqv = &vf->rx_irqvs[vector]; in ice_vc_cfg_irq_map_msg()
1416 rxq->itr_idx = vvm->rxitr_idx; in ice_vc_cfg_irq_map_msg()
1421 for (int i = 0; i < vf->vsi->num_rx_queues; i++) in ice_vc_cfg_irq_map_msg()
1422 if (vsi->rx_queues[i].irqv != NULL) in ice_vc_cfg_irq_map_msg()
1423 ice_configure_rxq_interrupt(hw, vsi->rx_qmap[i], in ice_vc_cfg_irq_map_msg()
1424 vsi->rx_queues[i].irqv->me, vsi->rx_queues[i].itr_idx); in ice_vc_cfg_irq_map_msg()
1426 for (int i = 0; i < vf->vsi->num_tx_queues; i++) in ice_vc_cfg_irq_map_msg()
1427 if (vsi->tx_queues[i].irqv != NULL) in ice_vc_cfg_irq_map_msg()
1428 ice_configure_txq_interrupt(hw, vsi->tx_qmap[i], in ice_vc_cfg_irq_map_msg()
1429 vsi->tx_queues[i].irqv->me, vsi->tx_queues[i].itr_idx); in ice_vc_cfg_irq_map_msg()
1434 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_CONFIG_IRQ_MAP, in ice_vc_cfg_irq_map_msg()
1439 * ice_eth_stats_to_virtchnl_eth_stats - Convert stats for virtchnl
1454 vstats->rx_bytes = istats->rx_bytes; in ice_eth_stats_to_virtchnl_eth_stats()
1455 vstats->rx_unicast = istats->rx_unicast; in ice_eth_stats_to_virtchnl_eth_stats()
1456 vstats->rx_multicast = istats->rx_multicast; in ice_eth_stats_to_virtchnl_eth_stats()
1457 vstats->rx_broadcast = istats->rx_broadcast; in ice_eth_stats_to_virtchnl_eth_stats()
1458 vstats->rx_discards = istats->rx_discards; in ice_eth_stats_to_virtchnl_eth_stats()
1459 vstats->rx_unknown_protocol = istats->rx_unknown_protocol; in ice_eth_stats_to_virtchnl_eth_stats()
1460 vstats->tx_bytes = istats->tx_bytes; in ice_eth_stats_to_virtchnl_eth_stats()
1461 vstats->tx_unicast = istats->tx_unicast; in ice_eth_stats_to_virtchnl_eth_stats()
1462 vstats->tx_multicast = istats->tx_multicast; in ice_eth_stats_to_virtchnl_eth_stats()
1463 vstats->tx_broadcast = istats->tx_broadcast; in ice_eth_stats_to_virtchnl_eth_stats()
1464 vstats->tx_discards = istats->tx_discards; in ice_eth_stats_to_virtchnl_eth_stats()
1465 vstats->tx_errors = istats->tx_errors; in ice_eth_stats_to_virtchnl_eth_stats()
1469 * ice_vc_get_stats_msg - Handle VIRTCHNL_OP_GET_STATS msg
1481 struct ice_vsi *vsi = vf->vsi; in ice_vc_get_stats_msg()
1482 struct ice_hw *hw = &sc->hw; in ice_vc_get_stats_msg()
1486 if (vqs->vsi_id != vsi->idx) { in ice_vc_get_stats_msg()
1487 device_printf(sc->dev, in ice_vc_get_stats_msg()
1488 "%s: VF-%d: message has invalid VSI ID %d (VF has VSI ID %d)\n", in ice_vc_get_stats_msg()
1489 __func__, vf->vf_num, vqs->vsi_id, vsi->idx); in ice_vc_get_stats_msg()
1490 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_GET_STATS, in ice_vc_get_stats_msg()
1494 ice_update_vsi_hw_stats(vf->vsi); in ice_vc_get_stats_msg()
1495 ice_eth_stats_to_virtchnl_eth_stats(&vsi->hw_stats.cur, &stats); in ice_vc_get_stats_msg()
1497 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_GET_STATS, in ice_vc_get_stats_msg()
1503 * ice_vc_cfg_promisc_mode_msg - Handle VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
1513 struct ice_hw *hw = &sc->hw; in ice_vc_cfg_promisc_mode_msg()
1517 struct ice_vsi *vsi = vf->vsi; in ice_vc_cfg_promisc_mode_msg()
1528 if (!(vf->vf_flags & VF_FLAG_PROMISC_CAP)) { in ice_vc_cfg_promisc_mode_msg()
1529 device_printf(sc->dev, in ice_vc_cfg_promisc_mode_msg()
1530 "VF-%d: attempted to configure promiscuous mode\n", in ice_vc_cfg_promisc_mode_msg()
1531 vf->vf_num); in ice_vc_cfg_promisc_mode_msg()
1536 if (vpi->vsi_id != vsi->idx) { in ice_vc_cfg_promisc_mode_msg()
1537 device_printf(sc->dev, in ice_vc_cfg_promisc_mode_msg()
1538 "VF-%d: Message has invalid VSI ID (expected %d, got %d)\n", in ice_vc_cfg_promisc_mode_msg()
1539 vf->vf_num, vsi->idx, vpi->vsi_id); in ice_vc_cfg_promisc_mode_msg()
1544 if (vpi->flags & ~ICE_VIRTCHNL_VALID_PROMISC_FLAGS) { in ice_vc_cfg_promisc_mode_msg()
1545 device_printf(sc->dev, in ice_vc_cfg_promisc_mode_msg()
1546 "VF-%d: Message has invalid promiscuous flags set (valid 0x%02x, got 0x%02x)\n", in ice_vc_cfg_promisc_mode_msg()
1547 vf->vf_num, ICE_VIRTCHNL_VALID_PROMISC_FLAGS, in ice_vc_cfg_promisc_mode_msg()
1548 vpi->flags); in ice_vc_cfg_promisc_mode_msg()
1556 if (vpi->flags & FLAG_VF_UNICAST_PROMISC) { in ice_vc_cfg_promisc_mode_msg()
1560 if (vpi->flags & FLAG_VF_MULTICAST_PROMISC) { in ice_vc_cfg_promisc_mode_msg()
1565 status = ice_get_vsi_promisc(hw, vsi->idx, old_promisc_mask, &vid); in ice_vc_cfg_promisc_mode_msg()
1567 device_printf(sc->dev, in ice_vc_cfg_promisc_mode_msg()
1568 "VF-%d: Failed to get promiscuous mode mask for VSI %d, err %s aq_err %s\n", in ice_vc_cfg_promisc_mode_msg()
1569 vf->vf_num, vsi->idx, in ice_vc_cfg_promisc_mode_msg()
1571 ice_aq_str(hw->adminq.sq_last_status)); in ice_vc_cfg_promisc_mode_msg()
1583 status = ice_clear_vsi_promisc(hw, vsi->idx, in ice_vc_cfg_promisc_mode_msg()
1586 device_printf(sc->dev, in ice_vc_cfg_promisc_mode_msg()
1587 "VF-%d: Failed to clear promiscuous mode for VSI %d, err %s aq_err %s\n", in ice_vc_cfg_promisc_mode_msg()
1588 vf->vf_num, vsi->idx, in ice_vc_cfg_promisc_mode_msg()
1590 ice_aq_str(hw->adminq.sq_last_status)); in ice_vc_cfg_promisc_mode_msg()
1597 status = ice_set_vsi_promisc(hw, vsi->idx, set_promisc_mask, 0); in ice_vc_cfg_promisc_mode_msg()
1599 device_printf(sc->dev, in ice_vc_cfg_promisc_mode_msg()
1600 "VF-%d: Failed to set promiscuous mode for VSI %d, err %s aq_err %s\n", in ice_vc_cfg_promisc_mode_msg()
1601 vf->vf_num, vsi->idx, in ice_vc_cfg_promisc_mode_msg()
1603 ice_aq_str(hw->adminq.sq_last_status)); in ice_vc_cfg_promisc_mode_msg()
1610 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, in ice_vc_cfg_promisc_mode_msg()
1615 * ice_vc_notify_all_vfs_link_state - Notify all VFs of PF link state
1624 for (int i = 0; i < sc->num_vfs; i++) in ice_vc_notify_all_vfs_link_state()
1625 ice_vc_notify_vf_link_state(sc, &sc->vfs[i]); in ice_vc_notify_all_vfs_link_state()
1629 * ice_vc_notify_vf_link_state - Notify VF of PF link state
1641 struct ice_hw *hw = &sc->hw; in ice_vc_notify_vf_link_state()
1645 event.event_data.link_event_adv.link_status = sc->link_up; in ice_vc_notify_vf_link_state()
1648 hw->port_info->phy.link_info.link_speed); in ice_vc_notify_vf_link_state()
1650 ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_EVENT, in ice_vc_notify_vf_link_state()
1655 * ice_vc_handle_vf_msg - Handle a message from a VF
1659 * Called whenever an event is received from a VF on the HW mailbox queue.
1666 struct ice_hw *hw = &sc->hw; in ice_vc_handle_vf_msg()
1667 device_t dev = sc->dev; in ice_vc_handle_vf_msg()
1671 u32 v_opcode = event->desc.cookie_high; in ice_vc_handle_vf_msg()
1672 u16 v_id = event->desc.retval; in ice_vc_handle_vf_msg()
1673 u8 *msg = event->msg_buf; in ice_vc_handle_vf_msg()
1674 u16 msglen = event->msg_len; in ice_vc_handle_vf_msg()
1676 if (v_id >= sc->num_vfs) { in ice_vc_handle_vf_msg()
1677 device_printf(dev, "%s: Received msg from invalid VF-%d: opcode %d, len %d\n", in ice_vc_handle_vf_msg()
1682 vf = &sc->vfs[v_id]; in ice_vc_handle_vf_msg()
1685 err = virtchnl_vc_validate_vf_msg(&vf->version, v_opcode, msg, msglen); in ice_vc_handle_vf_msg()
1687 device_printf(dev, "%s: Received invalid msg from VF-%d: opcode %d, len %d, error %d\n", in ice_vc_handle_vf_msg()
1688 __func__, vf->vf_num, v_opcode, msglen, err); in ice_vc_handle_vf_msg()
1744 device_printf(dev, "%s: Received unknown msg from VF-%d: opcode %d, len %d\n", in ice_vc_handle_vf_msg()
1745 __func__, vf->vf_num, v_opcode, msglen); in ice_vc_handle_vf_msg()
1753 * ice_iov_setup_intr_mapping - Setup interrupt config for a VF
1763 * As well, this sets up queue allocation registers and maps the mailbox
1769 struct ice_hw *hw = &sc->hw; in ice_iov_setup_intr_mapping()
1770 struct ice_vsi *vsi = vf->vsi; in ice_iov_setup_intr_mapping()
1774 u16 vf_first_irq_idx = vf->vf_imap[0]; in ice_iov_setup_intr_mapping()
1775 u16 vf_last_irq_idx = (vf_first_irq_idx + vf->num_irq_vectors) - 1; in ice_iov_setup_intr_mapping()
1776 u16 abs_vf_first_irq_idx = hw->func_caps.common_cap.msix_vector_first_id + in ice_iov_setup_intr_mapping()
1778 u16 abs_vf_last_irq_idx = (abs_vf_first_irq_idx + vf->num_irq_vectors) - 1; in ice_iov_setup_intr_mapping()
1779 u16 abs_vf_num = vf->vf_num + hw->func_caps.vf_base_id; in ice_iov_setup_intr_mapping()
1784 wr32(hw, VPINT_ALLOC(vf->vf_num), in ice_iov_setup_intr_mapping()
1788 wr32(hw, VPINT_ALLOC_PCI(vf->vf_num), in ice_iov_setup_intr_mapping()
1798 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) & GLINT_VECT2FUNC_PF_NUM_M))); in ice_iov_setup_intr_mapping()
1801 /* Map mailbox interrupt to MSI-X index 0. Disable ITR for it, too. */ in ice_iov_setup_intr_mapping()
1807 /* Mark the TX queue mapping registers as valid */ in ice_iov_setup_intr_mapping()
1808 wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_num), VPLAN_TXQ_MAPENA_TX_ENA_M); in ice_iov_setup_intr_mapping()
1811 wr32(hw, VPLAN_TX_QBASE(vf->vf_num), VPLAN_TX_QBASE_VFQTABLE_ENA_M); in ice_iov_setup_intr_mapping()
1812 for (int i = 0; i < vsi->num_tx_queues; i++) { in ice_iov_setup_intr_mapping()
1813 wr32(hw, VPLAN_TX_QTABLE(i, vf->vf_num), in ice_iov_setup_intr_mapping()
1814 (vsi->tx_qmap[i] << VPLAN_TX_QTABLE_QINDEX_S) & VPLAN_TX_QTABLE_QINDEX_M); in ice_iov_setup_intr_mapping()
1818 wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_num), VPLAN_RXQ_MAPENA_RX_ENA_M); in ice_iov_setup_intr_mapping()
1819 wr32(hw, VPLAN_RX_QBASE(vf->vf_num), VPLAN_RX_QBASE_VFQTABLE_ENA_M); in ice_iov_setup_intr_mapping()
1820 for (int i = 0; i < vsi->num_rx_queues; i++) { in ice_iov_setup_intr_mapping()
1821 wr32(hw, VPLAN_RX_QTABLE(i, vf->vf_num), in ice_iov_setup_intr_mapping()
1822 (vsi->rx_qmap[i] << VPLAN_RX_QTABLE_QINDEX_S) & VPLAN_RX_QTABLE_QINDEX_M); in ice_iov_setup_intr_mapping()
1827 * ice_err_to_virt err - translate ice errors into virtchnl errors