Lines Matching +full:tx +full:- +full:queues +full:- +full:config

1 /* SPDX-License-Identifier: BSD-3-Clause */
252 * scctx->isc_tx_tso_size_max + the VLAN header is a valid size.
256 * DMA tag. However, scctx->isc_tx_tso_segsize_max is used to set the
285 * IFLIB_SKIP_MSIX allows the driver to handle allocating MSI-X
308 /* Static driver-wide sysctls */
312 * ice_pci_mapping - Map PCI BAR memory
323 rc = ice_map_bar(sc->dev, &sc->bar0, 0); in ice_pci_mapping()
331 * ice_free_pci_mapping - Release PCI BAR memory
340 ice_free_bar(sc->dev, &sc->bar0); in ice_free_pci_mapping()
348 * ice_register - register device method callback
360 * ice_setup_scctx - Setup the iflib softc context structure
369 if_softc_ctx_t scctx = sc->scctx; in ice_setup_scctx()
370 struct ice_hw *hw = &sc->hw; in ice_setup_scctx()
371 device_t dev = sc->dev; in ice_setup_scctx()
374 safe_mode = ice_is_bit_set(sc->feat_en, ICE_FEATURE_SAFE_MODE); in ice_setup_scctx()
375 recovery_mode = ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE); in ice_setup_scctx()
382 scctx->isc_ntxqsets = scctx->isc_nrxqsets = 1; in ice_setup_scctx()
383 scctx->isc_ntxqsets_max = 1; in ice_setup_scctx()
384 scctx->isc_nrxqsets_max = 1; in ice_setup_scctx()
390 * sysctl value is when setting up MSI-X vectors. in ice_setup_scctx()
392 sc->ifc_sysctl_ntxqs = scctx->isc_ntxqsets; in ice_setup_scctx()
393 sc->ifc_sysctl_nrxqs = scctx->isc_nrxqsets; in ice_setup_scctx()
395 if (scctx->isc_ntxqsets == 0) in ice_setup_scctx()
396 scctx->isc_ntxqsets = hw->func_caps.common_cap.rss_table_size; in ice_setup_scctx()
397 if (scctx->isc_nrxqsets == 0) in ice_setup_scctx()
398 scctx->isc_nrxqsets = hw->func_caps.common_cap.rss_table_size; in ice_setup_scctx()
400 scctx->isc_ntxqsets_max = hw->func_caps.common_cap.num_txq; in ice_setup_scctx()
401 scctx->isc_nrxqsets_max = hw->func_caps.common_cap.num_rxq; in ice_setup_scctx()
407 if (sc->ifc_sysctl_ntxqs > scctx->isc_ntxqsets_max) in ice_setup_scctx()
408 sc->ifc_sysctl_ntxqs = scctx->isc_ntxqsets_max; in ice_setup_scctx()
409 if (sc->ifc_sysctl_nrxqs > scctx->isc_nrxqsets_max) in ice_setup_scctx()
410 sc->ifc_sysctl_nrxqs = scctx->isc_nrxqsets_max; in ice_setup_scctx()
413 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] in ice_setup_scctx()
415 scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] in ice_setup_scctx()
418 scctx->isc_tx_nsegments = ICE_MAX_TX_SEGS; in ice_setup_scctx()
419 scctx->isc_tx_tso_segments_max = ICE_MAX_TSO_SEGS; in ice_setup_scctx()
420 scctx->isc_tx_tso_size_max = ICE_TSO_SIZE; in ice_setup_scctx()
421 scctx->isc_tx_tso_segsize_max = ICE_MAX_DMA_SEG_SIZE; in ice_setup_scctx()
423 scctx->isc_msix_bar = pci_msix_table_bar(dev); in ice_setup_scctx()
424 scctx->isc_rss_table_size = hw->func_caps.common_cap.rss_table_size; in ice_setup_scctx()
427 * If the driver loads in recovery mode, disable Tx/Rx functionality in ice_setup_scctx()
430 scctx->isc_txrx = &ice_recovery_txrx; in ice_setup_scctx()
432 scctx->isc_txrx = &ice_txrx; in ice_setup_scctx()
439 scctx->isc_capenable = ICE_SAFE_CAPS; in ice_setup_scctx()
440 scctx->isc_tx_csum_flags = 0; in ice_setup_scctx()
442 scctx->isc_capenable = ICE_FULL_CAPS; in ice_setup_scctx()
443 scctx->isc_tx_csum_flags = ICE_CSUM_OFFLOAD; in ice_setup_scctx()
446 scctx->isc_capabilities = scctx->isc_capenable; in ice_setup_scctx()
450 * ice_if_attach_pre - Early device attach logic
455 * before the Tx and Rx queues are allocated.
470 ice_set_state(&sc->state, ICE_STATE_ATTACHING); in ice_if_attach_pre()
472 sc->ctx = ctx; in ice_if_attach_pre()
473 sc->media = iflib_get_media(ctx); in ice_if_attach_pre()
474 sc->sctx = iflib_get_sctx(ctx); in ice_if_attach_pre()
475 sc->iflib_ctx_lock = iflib_ctx_lock_get(ctx); in ice_if_attach_pre()
476 sc->ifp = iflib_get_ifp(ctx); in ice_if_attach_pre()
478 dev = sc->dev = iflib_get_dev(ctx); in ice_if_attach_pre()
479 scctx = sc->scctx = iflib_get_softc_ctx(ctx); in ice_if_attach_pre()
481 hw = &sc->hw; in ice_if_attach_pre()
482 hw->back = sc; in ice_if_attach_pre()
484 snprintf(sc->admin_mtx_name, sizeof(sc->admin_mtx_name), in ice_if_attach_pre()
486 mtx_init(&sc->admin_mtx, sc->admin_mtx_name, NULL, MTX_DEF); in ice_if_attach_pre()
487 callout_init_mtx(&sc->admin_timer, &sc->admin_mtx, 0); in ice_if_attach_pre()
536 ice_aq_str(hw->adminq.sq_last_status)); in ice_if_attach_pre()
544 ice_set_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN); in ice_if_attach_pre()
576 ice_aq_str(hw->adminq.sq_last_status)); in ice_if_attach_pre()
583 iflib_set_mac(ctx, hw->port_info->mac.lan_addr); in ice_if_attach_pre()
588 /* Initialize the Tx queue manager */ in ice_if_attach_pre()
589 err = ice_resmgr_init(&sc->tx_qmgr, hw->func_caps.common_cap.num_txq); in ice_if_attach_pre()
591 device_printf(dev, "Unable to initialize Tx queue manager: %s\n", in ice_if_attach_pre()
597 err = ice_resmgr_init(&sc->rx_qmgr, hw->func_caps.common_cap.num_rxq); in ice_if_attach_pre()
611 sc->num_available_vsi = min(ICE_MAX_VSI_AVAILABLE, in ice_if_attach_pre()
612 hw->func_caps.guar_num_vsi); in ice_if_attach_pre()
614 if (!sc->num_available_vsi) { in ice_if_attach_pre()
621 sc->all_vsi = (struct ice_vsi **) in ice_if_attach_pre()
622 malloc(sizeof(struct ice_vsi *) * sc->num_available_vsi, in ice_if_attach_pre()
624 if (!sc->all_vsi) { in ice_if_attach_pre()
636 ice_alloc_vsi_qmap(&sc->pf_vsi, scctx->isc_ntxqsets_max, in ice_if_attach_pre()
637 scctx->isc_nrxqsets_max); in ice_if_attach_pre()
639 /* Allocate MSI-X vectors (due to isc_flags IFLIB_SKIP_MSIX) */ in ice_if_attach_pre()
648 ice_release_vsi(&sc->pf_vsi); in ice_if_attach_pre()
649 free(sc->all_vsi, M_ICE); in ice_if_attach_pre()
650 sc->all_vsi = NULL; in ice_if_attach_pre()
654 ice_resmgr_destroy(&sc->rx_qmgr); in ice_if_attach_pre()
656 ice_resmgr_destroy(&sc->tx_qmgr); in ice_if_attach_pre()
662 mtx_lock(&sc->admin_mtx); in ice_if_attach_pre()
663 callout_stop(&sc->admin_timer); in ice_if_attach_pre()
664 mtx_unlock(&sc->admin_mtx); in ice_if_attach_pre()
665 mtx_destroy(&sc->admin_mtx); in ice_if_attach_pre()
670 * ice_attach_pre_recovery_mode - Limited driver attach_pre for FW recovery
677 * detected to be in an invalid state and must be re-programmed, or (b) the
678 * driver detects that the loaded firmware has a non-compatible API version
684 ice_set_state(&sc->state, ICE_STATE_RECOVERY_MODE); in ice_attach_pre_recovery_mode()
690 sc->pf_vsi.sc = sc; in ice_attach_pre_recovery_mode()
693 * We still need to allocate MSI-X vectors since we need one vector to in ice_attach_pre_recovery_mode()
700 * ice_update_link_status - notify OS of link state change
713 struct ice_hw *hw = &sc->hw; in ice_update_link_status()
717 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) in ice_update_link_status()
721 if (!ice_testandset_state(&sc->state, ICE_STATE_LINK_STATUS_REPORTED)) { in ice_update_link_status()
722 if (sc->link_up) { /* link is up */ in ice_update_link_status()
723 uint64_t baudrate = ice_aq_speed_to_rate(sc->hw.port_info); in ice_update_link_status()
725 if (!(hw->port_info->phy.link_info_old.link_info & ICE_AQ_LINK_UP)) in ice_update_link_status()
728 iflib_link_state_change(sc->ctx, LINK_STATE_UP, baudrate); in ice_update_link_status()
733 iflib_link_state_change(sc->ctx, LINK_STATE_DOWN, 0); in ice_update_link_status()
740 if (update_media && !ice_test_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET)) { in ice_update_link_status()
741 status = ice_add_media_types(sc, sc->media); in ice_update_link_status()
743 device_printf(sc->dev, "Error adding device media types: %s aq_err %s\n", in ice_update_link_status()
745 ice_aq_str(hw->adminq.sq_last_status)); in ice_update_link_status()
750 * ice_if_attach_post - Late device attach logic
754 * logic which must wait until after the Tx and Rx queues have been
767 /* We don't yet support loading if MSI-X is not supported */ in ice_if_attach_post()
768 if (sc->scctx->isc_intr != IFLIB_INTR_MSIX) { in ice_if_attach_post()
769 device_printf(sc->dev, "The ice driver does not support loading without MSI-X\n"); in ice_if_attach_post()
777 sc->scctx->isc_max_frame_size = if_getmtu(ifp) + in ice_if_attach_post()
784 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) { in ice_if_attach_post()
789 sc->pf_vsi.max_frame_size = sc->scctx->isc_max_frame_size; in ice_if_attach_post()
791 err = ice_initialize_vsi(&sc->pf_vsi); in ice_if_attach_post()
793 device_printf(sc->dev, "Unable to initialize Main VSI: %s\n", in ice_if_attach_post()
802 err = ice_config_rss(&sc->pf_vsi); in ice_if_attach_post()
804 device_printf(sc->dev, in ice_if_attach_post()
828 status = ice_aq_set_pfc_mode(&sc->hw, ICE_AQC_PFC_VLAN_BASED_PFC, NULL); in ice_if_attach_post()
830 device_printf(sc->dev, "Setting pfc mode failed, status %s\n", ice_status_str(status)); in ice_if_attach_post()
844 ice_enable_intr(&sc->hw, sc->irqvs[0].me); in ice_if_attach_post()
851 mtx_lock(&sc->admin_mtx); in ice_if_attach_post()
852 callout_reset(&sc->admin_timer, hz/2, ice_admin_timer, sc); in ice_if_attach_post()
853 mtx_unlock(&sc->admin_mtx); in ice_if_attach_post()
855 if (ice_test_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN) && in ice_if_attach_post()
856 !ice_test_state(&sc->state, ICE_STATE_NO_MEDIA)) in ice_if_attach_post()
857 ice_set_state(&sc->state, ICE_STATE_FIRST_INIT_LINK); in ice_if_attach_post()
859 ice_clear_state(&sc->state, ICE_STATE_ATTACHING); in ice_if_attach_post()
865 * ice_attach_post_recovery_mode - Limited driver attach_post for FW recovery
878 ice_enable_intr(&sc->hw, sc->irqvs[0].me); in ice_attach_post_recovery_mode()
881 mtx_lock(&sc->admin_mtx); in ice_attach_post_recovery_mode()
882 callout_reset(&sc->admin_timer, hz/2, ice_admin_timer, sc); in ice_attach_post_recovery_mode()
883 mtx_unlock(&sc->admin_mtx); in ice_attach_post_recovery_mode()
885 ice_clear_state(&sc->state, ICE_STATE_ATTACHING); in ice_attach_post_recovery_mode()
889 * ice_free_irqvs - Free IRQ vector memory
897 struct ice_vsi *vsi = &sc->pf_vsi; in ice_free_irqvs()
898 if_ctx_t ctx = sc->ctx; in ice_free_irqvs()
902 if (sc->irqvs == NULL) in ice_free_irqvs()
906 for (i = 0; i < sc->num_irq_vectors; i++) in ice_free_irqvs()
907 iflib_irq_free(ctx, &sc->irqvs[i].irq); in ice_free_irqvs()
910 for (i = 0; i < vsi->num_rx_queues; i++) in ice_free_irqvs()
911 vsi->rx_queues[i].irqv = NULL; in ice_free_irqvs()
913 for (i = 0; i < vsi->num_tx_queues; i++) in ice_free_irqvs()
914 vsi->tx_queues[i].irqv = NULL; in ice_free_irqvs()
917 free(sc->irqvs, M_ICE); in ice_free_irqvs()
918 sc->irqvs = NULL; in ice_free_irqvs()
919 sc->num_irq_vectors = 0; in ice_free_irqvs()
923 * ice_if_detach - Device driver detach logic
936 struct ice_vsi *vsi = &sc->pf_vsi; in ice_if_detach()
943 ice_set_state(&sc->state, ICE_STATE_DETACHING); in ice_if_detach()
946 mtx_lock(&sc->admin_mtx); in ice_if_detach()
947 callout_stop(&sc->admin_timer); in ice_if_detach()
948 mtx_unlock(&sc->admin_mtx); in ice_if_detach()
949 mtx_destroy(&sc->admin_mtx); in ice_if_detach()
952 if (sc->mirr_if) in ice_if_detach()
957 ifmedia_removeall(sc->media); in ice_if_detach()
959 /* Free the Tx and Rx sysctl contexts, and assign NULL to the node in ice_if_detach()
967 /* Release MSI-X resources */ in ice_if_detach()
970 for (i = 0; i < sc->num_available_vsi; i++) { in ice_if_detach()
971 if (sc->all_vsi[i]) in ice_if_detach()
972 ice_release_vsi(sc->all_vsi[i]); in ice_if_detach()
975 if (sc->all_vsi) { in ice_if_detach()
976 free(sc->all_vsi, M_ICE); in ice_if_detach()
977 sc->all_vsi = NULL; in ice_if_detach()
980 /* Release MSI-X memory */ in ice_if_detach()
981 pci_release_msi(sc->dev); in ice_if_detach()
983 if (sc->msix_table != NULL) { in ice_if_detach()
984 bus_release_resource(sc->dev, SYS_RES_MEMORY, in ice_if_detach()
985 rman_get_rid(sc->msix_table), in ice_if_detach()
986 sc->msix_table); in ice_if_detach()
987 sc->msix_table = NULL; in ice_if_detach()
993 ice_resmgr_destroy(&sc->tx_qmgr); in ice_if_detach()
994 ice_resmgr_destroy(&sc->rx_qmgr); in ice_if_detach()
996 if (!ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) in ice_if_detach()
997 ice_deinit_hw(&sc->hw); in ice_if_detach()
1000 status = ice_reset(&sc->hw, ICE_RESET_PFR); in ice_if_detach()
1003 device_printf(sc->dev, "device PF reset failed, err %s\n", in ice_if_detach()
1013 * ice_if_tx_queues_alloc - Allocate Tx queue memory
1017 * @ntxqs: the number of Tx queues per set (should always be 1)
1018 * @ntxqsets: the number of Tx queue sets to allocate
1020 * Called by iflib to allocate Tx queues for the device. Allocates driver
1022 * status reporting, and Tx queue sysctls.
1029 struct ice_vsi *vsi = &sc->pf_vsi; in ice_if_tx_queues_alloc()
1034 MPASS(sc->scctx->isc_ntxd[0] <= ICE_MAX_DESC_COUNT); in ice_if_tx_queues_alloc()
1037 /* Do not bother allocating queues if we're in recovery mode */ in ice_if_tx_queues_alloc()
1038 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) in ice_if_tx_queues_alloc()
1042 if (!(vsi->tx_queues = in ice_if_tx_queues_alloc()
1044 device_printf(sc->dev, "Unable to allocate Tx queue memory\n"); in ice_if_tx_queues_alloc()
1049 for (i = 0, txq = vsi->tx_queues; i < ntxqsets; i++, txq++) { in ice_if_tx_queues_alloc()
1050 if (!(txq->tx_rsq = in ice_if_tx_queues_alloc()
1051 (uint16_t *) malloc(sizeof(uint16_t) * sc->scctx->isc_ntxd[0], M_ICE, M_NOWAIT))) { in ice_if_tx_queues_alloc()
1052 device_printf(sc->dev, "Unable to allocate tx_rsq memory\n"); in ice_if_tx_queues_alloc()
1057 for (j = 0; j < sc->scctx->isc_ntxd[0]; j++) in ice_if_tx_queues_alloc()
1058 txq->tx_rsq[j] = QIDX_INVALID; in ice_if_tx_queues_alloc()
1061 /* Assign queues from PF space to the main VSI */ in ice_if_tx_queues_alloc()
1062 err = ice_resmgr_assign_contiguous(&sc->tx_qmgr, vsi->tx_qmap, ntxqsets); in ice_if_tx_queues_alloc()
1064 device_printf(sc->dev, "Unable to assign PF queues: %s\n", in ice_if_tx_queues_alloc()
1068 vsi->qmap_type = ICE_RESMGR_ALLOC_CONTIGUOUS; in ice_if_tx_queues_alloc()
1070 /* Add Tx queue sysctls context */ in ice_if_tx_queues_alloc()
1073 for (i = 0, txq = vsi->tx_queues; i < ntxqsets; i++, txq++) { in ice_if_tx_queues_alloc()
1075 txq->me = txq->q_handle = i; in ice_if_tx_queues_alloc()
1076 txq->vsi = vsi; in ice_if_tx_queues_alloc()
1079 txq->desc_count = sc->scctx->isc_ntxd[0]; in ice_if_tx_queues_alloc()
1081 /* get the virtual and physical address of the hardware queues */ in ice_if_tx_queues_alloc()
1082 txq->tail = QTX_COMM_DBELL(vsi->tx_qmap[i]); in ice_if_tx_queues_alloc()
1083 txq->tx_base = (struct ice_tx_desc *)vaddrs[i]; in ice_if_tx_queues_alloc()
1084 txq->tx_paddr = paddrs[i]; in ice_if_tx_queues_alloc()
1089 vsi->num_tx_queues = ntxqsets; in ice_if_tx_queues_alloc()
1094 for (i = 0, txq = vsi->tx_queues; i < ntxqsets; i++, txq++) { in ice_if_tx_queues_alloc()
1095 if (txq->tx_rsq != NULL) { in ice_if_tx_queues_alloc()
1096 free(txq->tx_rsq, M_ICE); in ice_if_tx_queues_alloc()
1097 txq->tx_rsq = NULL; in ice_if_tx_queues_alloc()
1100 free(vsi->tx_queues, M_ICE); in ice_if_tx_queues_alloc()
1101 vsi->tx_queues = NULL; in ice_if_tx_queues_alloc()
1106 * ice_if_rx_queues_alloc - Allocate Rx queue memory
1110 * @nrxqs: number of Rx queues per set (should always be 1)
1113 * Called by iflib to allocate Rx queues for the device. Allocates driver
1121 struct ice_vsi *vsi = &sc->pf_vsi; in ice_if_rx_queues_alloc()
1126 MPASS(sc->scctx->isc_nrxd[0] <= ICE_MAX_DESC_COUNT); in ice_if_rx_queues_alloc()
1129 /* Do not bother allocating queues if we're in recovery mode */ in ice_if_rx_queues_alloc()
1130 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) in ice_if_rx_queues_alloc()
1134 if (!(vsi->rx_queues = in ice_if_rx_queues_alloc()
1136 device_printf(sc->dev, "Unable to allocate Rx queue memory\n"); in ice_if_rx_queues_alloc()
1140 /* Assign queues from PF space to the main VSI */ in ice_if_rx_queues_alloc()
1141 err = ice_resmgr_assign_contiguous(&sc->rx_qmgr, vsi->rx_qmap, nrxqsets); in ice_if_rx_queues_alloc()
1143 device_printf(sc->dev, "Unable to assign PF queues: %s\n", in ice_if_rx_queues_alloc()
1147 vsi->qmap_type = ICE_RESMGR_ALLOC_CONTIGUOUS; in ice_if_rx_queues_alloc()
1152 for (i = 0, rxq = vsi->rx_queues; i < nrxqsets; i++, rxq++) { in ice_if_rx_queues_alloc()
1153 rxq->me = i; in ice_if_rx_queues_alloc()
1154 rxq->vsi = vsi; in ice_if_rx_queues_alloc()
1157 rxq->desc_count = sc->scctx->isc_nrxd[0]; in ice_if_rx_queues_alloc()
1159 /* get the virtual and physical address of the hardware queues */ in ice_if_rx_queues_alloc()
1160 rxq->tail = QRX_TAIL(vsi->rx_qmap[i]); in ice_if_rx_queues_alloc()
1161 rxq->rx_base = (union ice_32b_rx_flex_desc *)vaddrs[i]; in ice_if_rx_queues_alloc()
1162 rxq->rx_paddr = paddrs[i]; in ice_if_rx_queues_alloc()
1167 vsi->num_rx_queues = nrxqsets; in ice_if_rx_queues_alloc()
1172 free(vsi->rx_queues, M_ICE); in ice_if_rx_queues_alloc()
1173 vsi->rx_queues = NULL; in ice_if_rx_queues_alloc()
1178 * ice_if_queues_free - Free queue memory
1198 struct ice_vsi *vsi = &sc->pf_vsi; in ice_if_queues_free()
1202 /* Free the Tx and Rx sysctl contexts, and assign NULL to the node in ice_if_queues_free()
1210 /* Release MSI-X IRQ vectors, if not yet released in ice_if_detach */ in ice_if_queues_free()
1213 if (vsi->tx_queues != NULL) { in ice_if_queues_free()
1215 for (i = 0, txq = vsi->tx_queues; i < vsi->num_tx_queues; i++, txq++) { in ice_if_queues_free()
1216 if (txq->tx_rsq != NULL) { in ice_if_queues_free()
1217 free(txq->tx_rsq, M_ICE); in ice_if_queues_free()
1218 txq->tx_rsq = NULL; in ice_if_queues_free()
1221 free(vsi->tx_queues, M_ICE); in ice_if_queues_free()
1222 vsi->tx_queues = NULL; in ice_if_queues_free()
1223 vsi->num_tx_queues = 0; in ice_if_queues_free()
1225 if (vsi->rx_queues != NULL) { in ice_if_queues_free()
1226 free(vsi->rx_queues, M_ICE); in ice_if_queues_free()
1227 vsi->rx_queues = NULL; in ice_if_queues_free()
1228 vsi->num_rx_queues = 0; in ice_if_queues_free()
1233 * ice_msix_que - Fast interrupt handler for MSI-X receive queues
1236 * Interrupt filter function for iflib MSI-X interrupts. Called by iflib when
1237 * an MSI-X interrupt for a given queue is triggered. Currently this just asks
1251 * ice_msix_admin - Fast interrupt handler for MSI-X admin interrupt
1262 struct ice_hw *hw = &sc->hw; in ice_msix_admin()
1263 device_t dev = sc->dev; in ice_msix_admin()
1270 * vector will not be re-enabled until after we exit this function, in ice_msix_admin()
1289 ice_set_state(&sc->state, ICE_STATE_CONTROLQ_EVENT_PENDING); in ice_msix_admin()
1292 ice_set_state(&sc->state, ICE_STATE_VFLR_PENDING); in ice_msix_admin()
1296 ice_set_state(&sc->state, ICE_STATE_MDD_PENDING); in ice_msix_admin()
1306 sc->soft_stats.corer_count++; in ice_msix_admin()
1308 sc->soft_stats.globr_count++; in ice_msix_admin()
1310 sc->soft_stats.empr_count++; in ice_msix_admin()
1316 * happen. Second, we set hw->reset_ongoing to indicate that in ice_msix_admin()
1325 if (!ice_testandset_state(&sc->state, ICE_STATE_RESET_OICR_RECV)) { in ice_msix_admin()
1326 hw->reset_ongoing = true; in ice_msix_admin()
1329 * goes down and then up. The below if-statement prevents a second in ice_msix_admin()
1332 if (if_getflags(sc->ifp) & IFF_UP) in ice_msix_admin()
1333 ice_set_state(&sc->state, ICE_STATE_FIRST_INIT_LINK); in ice_msix_admin()
1339 ice_set_state(&sc->state, ICE_STATE_RESET_PFR_REQ); in ice_msix_admin()
1351 ice_set_state(&sc->state, ICE_STATE_RESET_PFR_REQ); in ice_msix_admin()
1358 * ice_allocate_msix - Allocate MSI-X vectors for the interface
1361 * Map the MSI-X bar, and then request MSI-X vectors in a two-stage process.
1369 * of requested queues or reducing the demand from other features such as
1373 * IFLIB_SKIP_MSIX flag indicating that the driver will manage MSI-X vectors
1376 * @remark This driver will only use MSI-X vectors. If this is not possible,
1391 if_softc_ctx_t scctx = sc->scctx; in ice_allocate_msix()
1392 device_t dev = sc->dev; in ice_allocate_msix()
1394 int bar, queues, vectors, requested; in ice_allocate_msix() local
1398 /* Allocate the MSI-X bar */ in ice_allocate_msix()
1399 bar = scctx->isc_msix_bar; in ice_allocate_msix()
1400 sc->msix_table = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &bar, RF_ACTIVE); in ice_allocate_msix()
1401 if (!sc->msix_table) { in ice_allocate_msix()
1402 device_printf(dev, "Unable to map MSI-X table\n"); in ice_allocate_msix()
1407 if (sc->ifc_sysctl_ntxqs || sc->ifc_sysctl_nrxqs) in ice_allocate_msix()
1420 * If the override sysctls have been set, limit the queues to in ice_allocate_msix()
1423 queues = mp_ncpus; in ice_allocate_msix()
1429 queues = CPU_COUNT(&cpus); in ice_allocate_msix()
1433 queues = imin(queues, rss_getnumbuckets()); in ice_allocate_msix()
1436 * Clamp the number of queue pairs to the minimum of the requested Tx in ice_allocate_msix()
1437 * and Rx queues. in ice_allocate_msix()
1439 queues = imin(queues, sc->ifc_sysctl_ntxqs ?: scctx->isc_ntxqsets); in ice_allocate_msix()
1440 queues = imin(queues, sc->ifc_sysctl_nrxqs ?: scctx->isc_nrxqsets); in ice_allocate_msix()
1442 if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_RDMA)) { in ice_allocate_msix()
1459 requested = rdma + queues + 1; in ice_allocate_msix()
1463 if_ctx_t ctx = sc->ctx; in ice_allocate_msix()
1470 device_printf(dev, "Failed to allocate %d MSI-X vectors, err %s\n", in ice_allocate_msix()
1477 int diff = requested - vectors; in ice_allocate_msix()
1479 device_printf(dev, "Requested %d MSI-X vectors, but got only %d\n", in ice_allocate_msix()
1492 rdma -= diff; in ice_allocate_msix()
1496 ice_clear_bit(ICE_FEATURE_RDMA, sc->feat_cap); in ice_allocate_msix()
1497 diff -= rdma; in ice_allocate_msix()
1508 if (queues <= diff) { in ice_allocate_msix()
1509 device_printf(dev, "Unable to allocate sufficient MSI-X vectors\n"); in ice_allocate_msix()
1514 queues -= diff; in ice_allocate_msix()
1517 device_printf(dev, "Using %d Tx and Rx queues\n", queues); in ice_allocate_msix()
1519 device_printf(dev, "Reserving %d MSI-X interrupts for iRDMA\n", in ice_allocate_msix()
1521 device_printf(dev, "Using MSI-X interrupts with %d vectors\n", in ice_allocate_msix()
1525 scctx->isc_vectors = vectors; in ice_allocate_msix()
1526 scctx->isc_nrxqsets = queues; in ice_allocate_msix()
1527 scctx->isc_ntxqsets = queues; in ice_allocate_msix()
1528 scctx->isc_intr = IFLIB_INTR_MSIX; in ice_allocate_msix()
1530 sc->irdma_vectors = rdma; in ice_allocate_msix()
1535 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) in ice_allocate_msix()
1539 sc->lan_vectors = vectors - rdma; in ice_allocate_msix()
1540 sc->lan_vectors -= extra_vectors; in ice_allocate_msix()
1541 err = ice_resmgr_assign_contiguous(&sc->dev_imgr, sc->pf_imap, sc->lan_vectors); in ice_allocate_msix()
1547 err = ice_resmgr_assign_contiguous(&sc->dev_imgr, sc->rdma_imap, rdma); in ice_allocate_msix()
1553 sc->extra_vectors = extra_vectors; in ice_allocate_msix()
1558 err = ice_resmgr_init(&sc->os_imgr, sc->extra_vectors); in ice_allocate_msix()
1562 ice_resmgr_release_map(&sc->dev_imgr, sc->rdma_imap, in ice_allocate_msix()
1569 ice_resmgr_release_map(&sc->dev_imgr, sc->pf_imap, in ice_allocate_msix()
1570 sc->lan_vectors); in ice_allocate_msix()
1574 if (sc->msix_table != NULL) { in ice_allocate_msix()
1575 bus_release_resource(sc->dev, SYS_RES_MEMORY, in ice_allocate_msix()
1576 rman_get_rid(sc->msix_table), in ice_allocate_msix()
1577 sc->msix_table); in ice_allocate_msix()
1578 sc->msix_table = NULL; in ice_allocate_msix()
1585 * ice_if_msix_intr_assign - Assign MSI-X interrupt vectors to queues
1589 * Called by iflib to assign MSI-X vectors to queues. Currently requires that
1590 * we get at least the same number of vectors as we have queues, and that we
1591 * always have the same number of Tx and Rx queues.
1593 * Tx queues use a softirq instead of using their own hardware interrupt.
1599 struct ice_vsi *vsi = &sc->pf_vsi; in ice_if_msix_intr_assign()
1604 if (vsi->num_rx_queues != vsi->num_tx_queues) { in ice_if_msix_intr_assign()
1605 device_printf(sc->dev, in ice_if_msix_intr_assign()
1606 …"iflib requested %d Tx queues, and %d Rx queues, but the driver isn't able to support a differing … in ice_if_msix_intr_assign()
1607 vsi->num_tx_queues, vsi->num_rx_queues); in ice_if_msix_intr_assign()
1611 if (msix < (vsi->num_rx_queues + 1)) { in ice_if_msix_intr_assign()
1612 device_printf(sc->dev, in ice_if_msix_intr_assign()
1613 "Not enough MSI-X vectors to assign one vector to each queue pair\n"); in ice_if_msix_intr_assign()
1618 sc->num_irq_vectors = vsi->num_rx_queues + 1; in ice_if_msix_intr_assign()
1621 if (!(sc->irqvs = in ice_if_msix_intr_assign()
1622 (struct ice_irq_vector *) malloc(sizeof(struct ice_irq_vector) * (sc->num_irq_vectors), in ice_if_msix_intr_assign()
1624 device_printf(sc->dev, in ice_if_msix_intr_assign()
1630 err = iflib_irq_alloc_generic(ctx, &sc->irqvs[0].irq, 1, IFLIB_INTR_ADMIN, in ice_if_msix_intr_assign()
1633 device_printf(sc->dev, in ice_if_msix_intr_assign()
1638 sc->irqvs[0].me = 0; in ice_if_msix_intr_assign()
1641 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) in ice_if_msix_intr_assign()
1645 for (i = 0, vector = 1; i < vsi->num_rx_queues; i++, vector++) { in ice_if_msix_intr_assign()
1646 struct ice_rx_queue *rxq = &vsi->rx_queues[i]; in ice_if_msix_intr_assign()
1647 struct ice_tx_queue *txq = &vsi->tx_queues[i]; in ice_if_msix_intr_assign()
1653 err = iflib_irq_alloc_generic(ctx, &sc->irqvs[vector].irq, rid, in ice_if_msix_intr_assign()
1655 rxq, rxq->me, irq_name); in ice_if_msix_intr_assign()
1657 device_printf(sc->dev, in ice_if_msix_intr_assign()
1660 vector--; in ice_if_msix_intr_assign()
1661 i--; in ice_if_msix_intr_assign()
1664 sc->irqvs[vector].me = vector; in ice_if_msix_intr_assign()
1665 rxq->irqv = &sc->irqvs[vector]; in ice_if_msix_intr_assign()
1670 iflib_softirq_alloc_generic(ctx, &sc->irqvs[vector].irq, in ice_if_msix_intr_assign()
1672 txq->me, irq_name); in ice_if_msix_intr_assign()
1673 txq->irqv = &sc->irqvs[vector]; in ice_if_msix_intr_assign()
1677 sc->last_rid = rid + sc->irdma_vectors; in ice_if_msix_intr_assign()
1681 for (; i >= 0; i--, vector--) in ice_if_msix_intr_assign()
1682 iflib_irq_free(ctx, &sc->irqvs[vector].irq); in ice_if_msix_intr_assign()
1683 iflib_irq_free(ctx, &sc->irqvs[0].irq); in ice_if_msix_intr_assign()
1685 free(sc->irqvs, M_ICE); in ice_if_msix_intr_assign()
1686 sc->irqvs = NULL; in ice_if_msix_intr_assign()
1691 * ice_if_mtu_set - Set the device MTU
1707 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) in ice_if_mtu_set()
1713 sc->scctx->isc_max_frame_size = mtu + in ice_if_mtu_set()
1716 sc->pf_vsi.max_frame_size = sc->scctx->isc_max_frame_size; in ice_if_mtu_set()
1722 * ice_if_intr_enable - Enable device interrupts
1731 struct ice_vsi *vsi = &sc->pf_vsi; in ice_if_intr_enable()
1732 struct ice_hw *hw = &sc->hw; in ice_if_intr_enable()
1737 ice_enable_intr(hw, sc->irqvs[0].me); in ice_if_intr_enable()
1740 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) in ice_if_intr_enable()
1744 for (int i = 0; i < vsi->num_rx_queues; i++) in ice_if_intr_enable()
1745 ice_enable_intr(hw, vsi->rx_queues[i].irqv->me); in ice_if_intr_enable()
1749 * ice_if_intr_disable - Disable device interrupts
1758 struct ice_hw *hw = &sc->hw; in ice_if_intr_disable()
1764 * assigned to queues. Instead of assuming that the interrupt in ice_if_intr_disable()
1772 for (i = 1; i < hw->func_caps.common_cap.num_msix_vectors; i++) in ice_if_intr_disable()
1777 * ice_if_rx_queue_intr_enable - Enable a specific Rx queue interrupt
1789 struct ice_vsi *vsi = &sc->pf_vsi; in ice_if_rx_queue_intr_enable()
1790 struct ice_hw *hw = &sc->hw; in ice_if_rx_queue_intr_enable()
1793 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) in ice_if_rx_queue_intr_enable()
1796 ice_enable_intr(hw, vsi->rx_queues[rxqid].irqv->me); in ice_if_rx_queue_intr_enable()
1801 * ice_if_tx_queue_intr_enable - Enable a specific Tx queue interrupt
1803 * @txqid: the Tx queue to enable
1805 * Enable a specific Tx queue interrupt.
1813 struct ice_vsi *vsi = &sc->pf_vsi; in ice_if_tx_queue_intr_enable()
1814 struct ice_hw *hw = &sc->hw; in ice_if_tx_queue_intr_enable()
1817 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) in ice_if_tx_queue_intr_enable()
1820 ice_enable_intr(hw, vsi->tx_queues[txqid].irqv->me); in ice_if_tx_queue_intr_enable()
1825 * ice_set_default_promisc_mask - Set default config for promisc settings
1831 * non-VLAN-tagged/VLAN 0 traffic.
1844 * ice_if_promisc_set - Set device promiscuous mode
1856 struct ice_hw *hw = &sc->hw; in ice_if_promisc_set()
1857 device_t dev = sc->dev; in ice_if_promisc_set()
1864 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) in ice_if_promisc_set()
1873 status = ice_set_vsi_promisc(hw, sc->pf_vsi.idx, in ice_if_promisc_set()
1879 ice_aq_str(hw->adminq.sq_last_status)); in ice_if_promisc_set()
1883 status = ice_clear_vsi_promisc(hw, sc->pf_vsi.idx, in ice_if_promisc_set()
1889 ice_aq_str(hw->adminq.sq_last_status)); in ice_if_promisc_set()
1898 * ice_if_media_change - Change device media
1909 device_printf(sc->dev, "Media change is not supported.\n"); in ice_if_media_change()
1914 * ice_if_media_status - Report current device media
1925 struct ice_link_status *li = &sc->hw.port_info->phy.link_info; in ice_if_media_status()
1927 ifmr->ifm_status = IFM_AVALID; in ice_if_media_status()
1928 ifmr->ifm_active = IFM_ETHER; in ice_if_media_status()
1931 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) in ice_if_media_status()
1934 if (!sc->link_up) in ice_if_media_status()
1937 ifmr->ifm_status |= IFM_ACTIVE; in ice_if_media_status()
1938 ifmr->ifm_active |= IFM_FDX; in ice_if_media_status()
1940 if (li->phy_type_low) in ice_if_media_status()
1941 ifmr->ifm_active |= ice_get_phy_type_low(li->phy_type_low); in ice_if_media_status()
1942 else if (li->phy_type_high) in ice_if_media_status()
1943 ifmr->ifm_active |= ice_get_phy_type_high(li->phy_type_high); in ice_if_media_status()
1945 ifmr->ifm_active |= IFM_UNKNOWN; in ice_if_media_status()
1948 if (li->an_info & ICE_AQ_LINK_PAUSE_TX) in ice_if_media_status()
1949 ifmr->ifm_active |= IFM_ETH_TXPAUSE; in ice_if_media_status()
1950 if (li->an_info & ICE_AQ_LINK_PAUSE_RX) in ice_if_media_status()
1951 ifmr->ifm_active |= IFM_ETH_RXPAUSE; in ice_if_media_status()
1955 * ice_init_tx_tracking - Initialize Tx queue software tracking values
1958 * Initialize Tx queue software tracking values, including the Report Status
1968 for (i = 0, txq = vsi->tx_queues; i < vsi->num_tx_queues; i++, txq++) { in ice_init_tx_tracking()
1970 txq->tx_rs_cidx = txq->tx_rs_pidx = 0; in ice_init_tx_tracking()
1974 * off-by-one error in ice_ift_txd_credits_update for the in ice_init_tx_tracking()
1977 txq->tx_cidx_processed = txq->desc_count - 1; in ice_init_tx_tracking()
1979 for (j = 0; j < txq->desc_count; j++) in ice_init_tx_tracking()
1980 txq->tx_rsq[j] = QIDX_INVALID; in ice_init_tx_tracking()
1985 * ice_update_rx_mbuf_sz - Update the Rx buffer size for all queues
1994 uint32_t mbuf_sz = iflib_get_rx_mbuf_sz(sc->ctx); in ice_update_rx_mbuf_sz()
1995 struct ice_vsi *vsi = &sc->pf_vsi; in ice_update_rx_mbuf_sz()
1998 vsi->mbuf_sz = mbuf_sz; in ice_update_rx_mbuf_sz()
2002 * ice_if_init - Initialize the device
2006 * device filters and prepares the Tx and Rx engines.
2015 device_t dev = sc->dev; in ice_if_init()
2030 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) in ice_if_init()
2033 if (ice_test_state(&sc->state, ICE_STATE_RESET_FAILED)) { in ice_if_init()
2034 …device_printf(sc->dev, "request to start interface cannot be completed as the device failed to res… in ice_if_init()
2038 if (ice_test_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET)) { in ice_if_init()
2039 …device_printf(sc->dev, "request to start interface while device is prepared for impending reset\n"… in ice_if_init()
2054 /* Initialize software Tx tracking values */ in ice_if_init()
2055 ice_init_tx_tracking(&sc->pf_vsi); in ice_if_init()
2057 err = ice_cfg_vsi_for_tx(&sc->pf_vsi); in ice_if_init()
2060 "Unable to configure the main VSI for Tx: %s\n", in ice_if_init()
2065 err = ice_cfg_vsi_for_rx(&sc->pf_vsi); in ice_if_init()
2073 err = ice_control_all_rx_queues(&sc->pf_vsi, true); in ice_if_init()
2089 /* We use software interrupts for Tx, so we only program the hardware in ice_if_init()
2092 ice_configure_all_rxq_interrupts(&sc->pf_vsi); in ice_if_init()
2093 ice_configure_rx_itr(&sc->pf_vsi); in ice_if_init()
2096 ice_if_promisc_set(ctx, if_getflags(sc->ifp)); in ice_if_init()
2098 if (!ice_testandclear_state(&sc->state, ICE_STATE_FIRST_INIT_LINK)) in ice_if_init()
2099 if (!sc->link_up && ((if_getflags(sc->ifp) & IFF_UP) || in ice_if_init()
2100 ice_test_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN))) in ice_if_init()
2105 ice_set_state(&sc->state, ICE_STATE_DRIVER_INITIALIZED); in ice_if_init()
2107 if (sc->mirr_if && ice_testandclear_state(&mif->state, ICE_STATE_SUBIF_NEEDS_REINIT)) { in ice_if_init()
2108 ice_clear_state(&mif->state, ICE_STATE_DRIVER_INITIALIZED); in ice_if_init()
2109 iflib_request_reset(sc->mirr_if->subctx); in ice_if_init()
2110 iflib_admin_intr_deferred(sc->mirr_if->subctx); in ice_if_init()
2116 ice_control_all_rx_queues(&sc->pf_vsi, false); in ice_if_init()
2118 ice_vsi_disable_tx(&sc->pf_vsi); in ice_if_init()
2122 * ice_poll_for_media_avail - Re-enable link if media is detected
2126 * sends the Get Link Status AQ command and re-enables HW link if the
2136 struct ice_hw *hw = &sc->hw; in ice_poll_for_media_avail()
2137 struct ice_port_info *pi = hw->port_info; in ice_poll_for_media_avail()
2144 ice_test_state(&sc->state, ICE_STATE_PHY_FW_INIT_PENDING)) { in ice_poll_for_media_avail()
2146 ice_clear_state(&sc->state, ICE_STATE_PHY_FW_INIT_PENDING); in ice_poll_for_media_avail()
2151 if (ice_test_state(&sc->state, ICE_STATE_NO_MEDIA)) { in ice_poll_for_media_avail()
2152 pi->phy.get_link_info = true; in ice_poll_for_media_avail()
2153 ice_get_link_status(pi, &sc->link_up); in ice_poll_for_media_avail()
2155 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { in ice_poll_for_media_avail()
2158 /* Re-enable link and re-apply user link settings */ in ice_poll_for_media_avail()
2159 if (ice_test_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN) || in ice_poll_for_media_avail()
2160 (if_getflags(sc->ifp) & IFF_UP)) { in ice_poll_for_media_avail()
2164 status = ice_add_media_types(sc, sc->media); in ice_poll_for_media_avail()
2166 device_printf(sc->dev, in ice_poll_for_media_avail()
2169 ice_aq_str(hw->adminq.sq_last_status)); in ice_poll_for_media_avail()
2172 ice_clear_state(&sc->state, ICE_STATE_NO_MEDIA); in ice_poll_for_media_avail()
2178 * ice_if_timer - called by iflib periodically
2191 uint64_t prev_link_xoff_rx = sc->stats.cur.link_xoff_rx; in ice_if_timer()
2197 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) in ice_if_timer()
2208 if (sc->stats.cur.link_xoff_rx != prev_link_xoff_rx) in ice_if_timer()
2209 sc->scctx->isc_pause_frames = 1; in ice_if_timer()
2212 ice_update_vsi_hw_stats(&sc->pf_vsi); in ice_if_timer()
2215 if (sc->mirr_if && sc->mirr_if->if_attached) in ice_if_timer()
2216 ice_update_vsi_hw_stats(sc->mirr_if->vsi); in ice_if_timer()
2220 * ice_admin_timer - called periodically to trigger the admin task
2250 iflib_admin_intr_deferred(sc->ctx); in ice_admin_timer()
2253 callout_schedule(&sc->admin_timer, hz/2); in ice_admin_timer()
2257 * ice_transition_recovery_mode - Transition to recovery mode
2266 struct ice_vsi *vsi = &sc->pf_vsi; in ice_transition_recovery_mode()
2269 …device_printf(sc->dev, "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R)… in ice_transition_recovery_mode()
2272 iflib_link_state_change(sc->ctx, LINK_STATE_DOWN, 0); in ice_transition_recovery_mode()
2274 /* Request that the device be re-initialized */ in ice_transition_recovery_mode()
2278 ice_clear_bit(ICE_FEATURE_RDMA, sc->feat_cap); in ice_transition_recovery_mode()
2280 ice_clear_bit(ICE_FEATURE_SRIOV, sc->feat_en); in ice_transition_recovery_mode()
2281 ice_clear_bit(ICE_FEATURE_SRIOV, sc->feat_cap); in ice_transition_recovery_mode()
2286 for (i = 0; i < sc->num_available_vsi; i++) { in ice_transition_recovery_mode()
2287 if (sc->all_vsi[i]) in ice_transition_recovery_mode()
2288 ice_release_vsi(sc->all_vsi[i]); in ice_transition_recovery_mode()
2290 sc->num_available_vsi = 0; in ice_transition_recovery_mode()
2292 if (sc->all_vsi) { in ice_transition_recovery_mode()
2293 free(sc->all_vsi, M_ICE); in ice_transition_recovery_mode()
2294 sc->all_vsi = NULL; in ice_transition_recovery_mode()
2298 ice_resmgr_destroy(&sc->dev_imgr); in ice_transition_recovery_mode()
2300 ice_resmgr_destroy(&sc->tx_qmgr); in ice_transition_recovery_mode()
2301 ice_resmgr_destroy(&sc->rx_qmgr); in ice_transition_recovery_mode()
2303 ice_deinit_hw(&sc->hw); in ice_transition_recovery_mode()
2307 * ice_transition_safe_mode - Transition to safe mode
2315 * change the number of queues dynamically when using iflib. Due to this, we
2316 * do not attempt to reduce the number of queues.
2322 ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_cap); in ice_transition_safe_mode()
2323 ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_en); in ice_transition_safe_mode()
2326 ice_clear_bit(ICE_FEATURE_RDMA, sc->feat_cap); in ice_transition_safe_mode()
2328 ice_clear_bit(ICE_FEATURE_SRIOV, sc->feat_en); in ice_transition_safe_mode()
2329 ice_clear_bit(ICE_FEATURE_SRIOV, sc->feat_cap); in ice_transition_safe_mode()
2331 ice_clear_bit(ICE_FEATURE_RSS, sc->feat_cap); in ice_transition_safe_mode()
2332 ice_clear_bit(ICE_FEATURE_RSS, sc->feat_en); in ice_transition_safe_mode()
2336 * ice_if_update_admin_status - update admin status
2356 fw_mode = ice_get_fw_mode(&sc->hw); in ice_if_update_admin_status()
2358 if (!ice_testandset_state(&sc->state, ICE_STATE_RECOVERY_MODE)) { in ice_if_update_admin_status()
2366 if (!ice_testandset_state(&sc->state, ICE_STATE_ROLLBACK_MODE)) { in ice_if_update_admin_status()
2370 ice_print_rollback_msg(&sc->hw); in ice_if_update_admin_status()
2383 if (ice_test_state(&sc->state, ICE_STATE_RESET_FAILED) || in ice_if_update_admin_status()
2384 ice_test_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET) || in ice_if_update_admin_status()
2385 ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) { in ice_if_update_admin_status()
2387 * If we know the control queues are disabled, skip processing in ice_if_update_admin_status()
2388 * the control queues entirely. in ice_if_update_admin_status()
2391 } else if (ice_testandclear_state(&sc->state, ICE_STATE_CONTROLQ_EVENT_PENDING)) { in ice_if_update_admin_status()
2396 if (ice_is_generic_mac(&sc->hw)) { in ice_if_update_admin_status()
2415 * ourselves. Otherwise, we can just re-enable the interrupt. We'll be in ice_if_update_admin_status()
2419 ice_set_state(&sc->state, ICE_STATE_CONTROLQ_EVENT_PENDING); in ice_if_update_admin_status()
2422 ice_enable_intr(&sc->hw, sc->irqvs[0].me); in ice_if_update_admin_status()
2427 * ice_prepare_for_reset - Prepare device for an impending reset
2438 struct ice_hw *hw = &sc->hw; in ice_prepare_for_reset()
2441 if (ice_testandset_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET)) in ice_prepare_for_reset()
2444 log(LOG_INFO, "%s: preparing to reset device logic\n", if_name(sc->ifp)); in ice_prepare_for_reset()
2447 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) in ice_prepare_for_reset()
2456 ice_resmgr_release_map(&sc->tx_qmgr, sc->pf_vsi.tx_qmap, in ice_prepare_for_reset()
2457 sc->pf_vsi.num_tx_queues); in ice_prepare_for_reset()
2458 ice_resmgr_release_map(&sc->rx_qmgr, sc->pf_vsi.rx_qmap, in ice_prepare_for_reset()
2459 sc->pf_vsi.num_rx_queues); in ice_prepare_for_reset()
2460 if (sc->mirr_if) { in ice_prepare_for_reset()
2461 ice_resmgr_release_map(&sc->tx_qmgr, sc->mirr_if->vsi->tx_qmap, in ice_prepare_for_reset()
2462 sc->mirr_if->num_irq_vectors); in ice_prepare_for_reset()
2463 ice_resmgr_release_map(&sc->rx_qmgr, sc->mirr_if->vsi->rx_qmap, in ice_prepare_for_reset()
2464 sc->mirr_if->num_irq_vectors); in ice_prepare_for_reset()
2469 if (hw->port_info) in ice_prepare_for_reset()
2476 * ice_rebuild_pf_vsi_qmap - Rebuild the main PF VSI queue mapping
2479 * Loops over the Tx and Rx queues for the main PF VSI and reassigns the queue
2485 struct ice_vsi *vsi = &sc->pf_vsi; in ice_rebuild_pf_vsi_qmap()
2490 /* Re-assign Tx queues from PF space to the main VSI */ in ice_rebuild_pf_vsi_qmap()
2491 err = ice_resmgr_assign_contiguous(&sc->tx_qmgr, vsi->tx_qmap, in ice_rebuild_pf_vsi_qmap()
2492 vsi->num_tx_queues); in ice_rebuild_pf_vsi_qmap()
2494 device_printf(sc->dev, "Unable to re-assign PF Tx queues: %s\n", in ice_rebuild_pf_vsi_qmap()
2499 /* Re-assign Rx queues from PF space to this VSI */ in ice_rebuild_pf_vsi_qmap()
2500 err = ice_resmgr_assign_contiguous(&sc->rx_qmgr, vsi->rx_qmap, in ice_rebuild_pf_vsi_qmap()
2501 vsi->num_rx_queues); in ice_rebuild_pf_vsi_qmap()
2503 device_printf(sc->dev, "Unable to re-assign PF Rx queues: %s\n", in ice_rebuild_pf_vsi_qmap()
2508 vsi->qmap_type = ICE_RESMGR_ALLOC_CONTIGUOUS; in ice_rebuild_pf_vsi_qmap()
2510 /* Re-assign Tx queue tail pointers */ in ice_rebuild_pf_vsi_qmap()
2511 for (i = 0, txq = vsi->tx_queues; i < vsi->num_tx_queues; i++, txq++) in ice_rebuild_pf_vsi_qmap()
2512 txq->tail = QTX_COMM_DBELL(vsi->tx_qmap[i]); in ice_rebuild_pf_vsi_qmap()
2514 /* Re-assign Rx queue tail pointers */ in ice_rebuild_pf_vsi_qmap()
2515 for (i = 0, rxq = vsi->rx_queues; i < vsi->num_rx_queues; i++, rxq++) in ice_rebuild_pf_vsi_qmap()
2516 rxq->tail = QRX_TAIL(vsi->rx_qmap[i]); in ice_rebuild_pf_vsi_qmap()
2521 ice_resmgr_release_map(&sc->tx_qmgr, sc->pf_vsi.tx_qmap, in ice_rebuild_pf_vsi_qmap()
2522 sc->pf_vsi.num_tx_queues); in ice_rebuild_pf_vsi_qmap()
2531 * ice_rebuild_recovery_mode - Rebuild driver state while in recovery mode
2540 device_t dev = sc->dev; in ice_rebuild_recovery_mode()
2549 ice_enable_intr(&sc->hw, sc->irqvs[0].me); in ice_rebuild_recovery_mode()
2552 ice_clear_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET); in ice_rebuild_recovery_mode()
2554 log(LOG_INFO, "%s: device rebuild successful\n", if_name(sc->ifp)); in ice_rebuild_recovery_mode()
2559 * the iflib core, we also want re-run the admin task so that iflib in ice_rebuild_recovery_mode()
2568 * ice_rebuild - Rebuild driver state post reset
2572 * the hardware port, and re-enable the VSIs.
2577 struct ice_hw *hw = &sc->hw; in ice_rebuild()
2578 device_t dev = sc->dev; in ice_rebuild()
2583 sc->rebuild_ticks = ticks; in ice_rebuild()
2586 ice_clear_state(&sc->state, ICE_STATE_RESET_FAILED); in ice_rebuild()
2592 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) { in ice_rebuild()
2602 device_printf(dev, "failed to re-init controlqs, err %s\n", in ice_rebuild()
2607 /* Query the allocated resources for Tx scheduler */ in ice_rebuild()
2613 ice_aq_str(hw->adminq.sq_last_status)); in ice_rebuild()
2617 /* Re-enable FW logging. Keep going even if this fails */ in ice_rebuild()
2619 if (hw->pf_id == 0) in ice_rebuild()
2620 status = ice_fwlog_set(hw, &hw->fwlog_cfg); in ice_rebuild()
2626 * enabled pre-rebuild. in ice_rebuild()
2628 if (hw->fwlog_cfg.options & ICE_FWLOG_OPTION_IS_REGISTERED) { in ice_rebuild()
2631 device_printf(dev, "failed to re-register fw logging, err %s aq_err %s\n", in ice_rebuild()
2633 ice_aq_str(hw->adminq.sq_last_status)); in ice_rebuild()
2638 ice_aq_str(hw->adminq.sq_last_status)); in ice_rebuild()
2669 status = ice_sched_init_port(hw->port_info); in ice_rebuild()
2677 if (!ice_is_bit_set(sc->feat_en, ICE_FEATURE_SAFE_MODE)) { in ice_rebuild()
2678 pkg_state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size); in ice_rebuild()
2689 device_printf(sc->dev, "Unable to re-assign main VSI queues, err %s\n", in ice_rebuild()
2693 err = ice_initialize_vsi(&sc->pf_vsi); in ice_rebuild()
2695 device_printf(sc->dev, "Unable to re-initialize Main VSI, err %s\n", in ice_rebuild()
2705 /* Re-enable FW health event reporting */ in ice_rebuild()
2709 err = ice_config_rss(&sc->pf_vsi); in ice_rebuild()
2711 device_printf(sc->dev, in ice_rebuild()
2717 if (hw->port_info->qos_cfg.is_sw_lldp) in ice_rebuild()
2723 ice_clear_state(&sc->state, ICE_STATE_LINK_STATUS_REPORTED); in ice_rebuild()
2726 /* RDMA interface will be restarted by the stack re-init */ in ice_rebuild()
2732 ice_enable_intr(&sc->hw, sc->irqvs[0].me); in ice_rebuild()
2735 ice_clear_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET); in ice_rebuild()
2738 if (sc->mirr_if) { in ice_rebuild()
2744 log(LOG_INFO, "%s: device rebuild successful\n", if_name(sc->ifp)); in ice_rebuild()
2749 * the iflib core, we also want re-run the admin task so that iflib in ice_rebuild()
2752 * queues, not only 0. It contains ice_request_stack_reinit as well. in ice_rebuild()
2754 if (hw->port_info->qos_cfg.is_sw_lldp) in ice_rebuild()
2762 ice_deinit_vsi(&sc->pf_vsi); in ice_rebuild()
2764 ice_resmgr_release_map(&sc->tx_qmgr, sc->pf_vsi.tx_qmap, in ice_rebuild()
2765 sc->pf_vsi.num_tx_queues); in ice_rebuild()
2766 ice_resmgr_release_map(&sc->rx_qmgr, sc->pf_vsi.rx_qmap, in ice_rebuild()
2767 sc->pf_vsi.num_rx_queues); in ice_rebuild()
2772 ice_clear_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET); in ice_rebuild()
2773 ice_set_state(&sc->state, ICE_STATE_RESET_FAILED); in ice_rebuild()
2778 * ice_handle_reset_event - Handle reset events triggered by OICR
2791 struct ice_hw *hw = &sc->hw; in ice_handle_reset_event()
2793 device_t dev = sc->dev; in ice_handle_reset_event()
2800 if (!ice_testandclear_state(&sc->state, ICE_STATE_RESET_OICR_RECV)) in ice_handle_reset_event()
2823 ice_set_state(&sc->state, ICE_STATE_RESET_FAILED); in ice_handle_reset_event()
2824 ice_clear_state(&sc->state, ICE_STATE_RESET_PFR_REQ); in ice_handle_reset_event()
2825 ice_clear_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET); in ice_handle_reset_event()
2831 sc->hw.reset_ongoing = false; in ice_handle_reset_event()
2838 if (ice_testandclear_state(&sc->state, ICE_STATE_RESET_PFR_REQ)) in ice_handle_reset_event()
2843 * ice_handle_pf_reset_request - Initiate PF reset requested by software
2857 struct ice_hw *hw = &sc->hw; in ice_handle_pf_reset_request()
2861 if (!ice_testandclear_state(&sc->state, ICE_STATE_RESET_PFR_REQ)) in ice_handle_pf_reset_request()
2875 device_printf(sc->dev, "device PF reset failed, err %s\n", in ice_handle_pf_reset_request()
2877 ice_set_state(&sc->state, ICE_STATE_RESET_FAILED); in ice_handle_pf_reset_request()
2881 sc->soft_stats.pfr_count++; in ice_handle_pf_reset_request()
2886 * ice_init_device_features - Init device driver features
2895 struct ice_hw *hw = &sc->hw; in ice_init_device_features()
2898 ice_set_bit(ICE_FEATURE_SRIOV, sc->feat_cap); in ice_init_device_features()
2899 ice_set_bit(ICE_FEATURE_RSS, sc->feat_cap); in ice_init_device_features()
2900 ice_set_bit(ICE_FEATURE_RDMA, sc->feat_cap); in ice_init_device_features()
2901 ice_set_bit(ICE_FEATURE_LENIENT_LINK_MODE, sc->feat_cap); in ice_init_device_features()
2902 ice_set_bit(ICE_FEATURE_LINK_MGMT_VER_1, sc->feat_cap); in ice_init_device_features()
2903 ice_set_bit(ICE_FEATURE_LINK_MGMT_VER_2, sc->feat_cap); in ice_init_device_features()
2904 ice_set_bit(ICE_FEATURE_HEALTH_STATUS, sc->feat_cap); in ice_init_device_features()
2905 ice_set_bit(ICE_FEATURE_FW_LOGGING, sc->feat_cap); in ice_init_device_features()
2906 ice_set_bit(ICE_FEATURE_HAS_PBA, sc->feat_cap); in ice_init_device_features()
2907 ice_set_bit(ICE_FEATURE_DCB, sc->feat_cap); in ice_init_device_features()
2908 ice_set_bit(ICE_FEATURE_TX_BALANCE, sc->feat_cap); in ice_init_device_features()
2909 ice_set_bit(ICE_FEATURE_PHY_STATISTICS, sc->feat_cap); in ice_init_device_features()
2912 ice_set_bit(ICE_FEATURE_PHY_STATISTICS, sc->feat_en); in ice_init_device_features()
2915 ice_set_bit(ICE_FEATURE_DUAL_NAC, sc->feat_cap); in ice_init_device_features()
2917 if (!hw->func_caps.common_cap.rss_table_size) in ice_init_device_features()
2918 ice_clear_bit(ICE_FEATURE_RSS, sc->feat_cap); in ice_init_device_features()
2919 if (!hw->func_caps.common_cap.iwarp || !ice_enable_irdma) in ice_init_device_features()
2920 ice_clear_bit(ICE_FEATURE_RDMA, sc->feat_cap); in ice_init_device_features()
2921 if (!hw->func_caps.common_cap.dcb) in ice_init_device_features()
2922 ice_clear_bit(ICE_FEATURE_DCB, sc->feat_cap); in ice_init_device_features()
2925 ice_clear_bit(ICE_FEATURE_HEALTH_STATUS, sc->feat_cap); in ice_init_device_features()
2927 ice_clear_bit(ICE_FEATURE_FW_LOGGING, sc->feat_cap); in ice_init_device_features()
2928 if (hw->fwlog_cfg.options & ICE_FWLOG_OPTION_IS_REGISTERED) { in ice_init_device_features()
2929 if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_FW_LOGGING)) in ice_init_device_features()
2930 ice_set_bit(ICE_FEATURE_FW_LOGGING, sc->feat_en); in ice_init_device_features()
2936 ice_disable_unsupported_features(sc->feat_cap); in ice_init_device_features()
2939 if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_RSS)) in ice_init_device_features()
2940 ice_set_bit(ICE_FEATURE_RSS, sc->feat_en); in ice_init_device_features()
2944 ice_clear_bit(ICE_FEATURE_TX_BALANCE, sc->feat_cap); in ice_init_device_features()
2946 if (hw->dev_caps.supported_sensors & ICE_SENSOR_SUPPORT_E810_INT_TEMP) { in ice_init_device_features()
2947 ice_set_bit(ICE_FEATURE_TEMP_SENSOR, sc->feat_cap); in ice_init_device_features()
2948 ice_set_bit(ICE_FEATURE_TEMP_SENSOR, sc->feat_en); in ice_init_device_features()
2951 if (hw->func_caps.common_cap.next_cluster_id_support || in ice_init_device_features()
2952 hw->dev_caps.common_cap.next_cluster_id_support) { in ice_init_device_features()
2953 ice_set_bit(ICE_FEATURE_NEXT_CLUSTER_ID, sc->feat_cap); in ice_init_device_features()
2954 ice_set_bit(ICE_FEATURE_NEXT_CLUSTER_ID, sc->feat_en); in ice_init_device_features()
2959 * ice_if_multi_set - Callback to update Multicast filters in HW
2977 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) in ice_if_multi_set()
2982 device_printf(sc->dev, in ice_if_multi_set()
2990 * ice_if_vlan_register - Register a VLAN with the hardware
3007 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) in ice_if_vlan_register()
3010 status = ice_add_vlan_hw_filter(&sc->pf_vsi, vtag); in ice_if_vlan_register()
3012 device_printf(sc->dev, in ice_if_vlan_register()
3015 ice_aq_str(sc->hw.adminq.sq_last_status)); in ice_if_vlan_register()
3020 * ice_if_vlan_unregister - Remove a VLAN filter from the hardware
3037 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) in ice_if_vlan_unregister()
3040 status = ice_remove_vlan_hw_filter(&sc->pf_vsi, vtag); in ice_if_vlan_unregister()
3042 device_printf(sc->dev, in ice_if_vlan_unregister()
3045 ice_aq_str(sc->hw.adminq.sq_last_status)); in ice_if_vlan_unregister()
3050 * ice_if_stop - Stop the device
3069 * don't have, and disable Tx queues which aren't yet configured. in ice_if_stop()
3078 if (!ice_testandclear_state(&sc->state, ICE_STATE_DRIVER_INITIALIZED)) in ice_if_stop()
3081 if (ice_test_state(&sc->state, ICE_STATE_RESET_FAILED)) { in ice_if_stop()
3082 …device_printf(sc->dev, "request to stop interface cannot be completed as the device failed to rese… in ice_if_stop()
3086 if (ice_test_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET)) { in ice_if_stop()
3087 …device_printf(sc->dev, "request to stop interface while device is prepared for impending reset\n"); in ice_if_stop()
3093 /* Remove the MAC filters, stop Tx, and stop Rx. We don't check the in ice_if_stop()
3100 /* Dissociate the Tx and Rx queues from the interrupts */ in ice_if_stop()
3101 ice_flush_txq_interrupts(&sc->pf_vsi); in ice_if_stop()
3102 ice_flush_rxq_interrupts(&sc->pf_vsi); in ice_if_stop()
3104 /* Disable the Tx and Rx queues */ in ice_if_stop()
3105 ice_vsi_disable_tx(&sc->pf_vsi); in ice_if_stop()
3106 ice_control_all_rx_queues(&sc->pf_vsi, false); in ice_if_stop()
3108 if (!ice_test_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN) && in ice_if_stop()
3109 !(if_getflags(sc->ifp) & IFF_UP) && sc->link_up) in ice_if_stop()
3112 if (sc->mirr_if && ice_test_state(&mif->state, ICE_STATE_SUBIF_NEEDS_REINIT)) { in ice_if_stop()
3113 ice_subif_if_stop(sc->mirr_if->subctx); in ice_if_stop()
3114 device_printf(sc->dev, "The subinterface also comes down and up after reset\n"); in ice_if_stop()
3119 * ice_if_get_counter - Get current value of an ifnet statistic
3133 return ice_get_ifnet_counter(&sc->pf_vsi, counter); in ice_if_get_counter()
3137 * ice_request_stack_reinit - Request that iflib re-initialize
3140 * Request that the device be brought down and up, to re-initialize. For
3141 * example, this may be called when a device reset occurs, or when Tx and Rx
3142 * queues need to be re-initialized.
3145 * re-initialized if we need to resart Tx and Rx queues.
3150 if (CTX_ACTIVE(sc->ctx)) { in ice_request_stack_reinit()
3151 iflib_request_reset(sc->ctx); in ice_request_stack_reinit()
3152 iflib_admin_intr_deferred(sc->ctx); in ice_request_stack_reinit()
3157 * ice_driver_is_detaching - Check if the driver is detaching/unloading
3168 * detach-based race conditions as it is possible for a thread to race with
3174 return (ice_test_state(&sc->state, ICE_STATE_DETACHING) || in ice_driver_is_detaching()
3175 iflib_in_detach(sc->ctx)); in ice_driver_is_detaching()
3179 * ice_if_priv_ioctl - Device private ioctl handler
3193 device_t dev = sc->dev; in ice_if_priv_ioctl()
3226 switch (ifd->ifd_cmd) { in ice_if_priv_ioctl()
3237 * ice_if_i2c_req - I2C request handler for iflib
3243 * @remark The iflib-only part is pretty simple.
3254 * ice_if_suspend - PCI device suspend handler for iflib
3268 * either via FLR or during the D3->D0 transition. in ice_if_suspend()
3270 ice_clear_state(&sc->state, ICE_STATE_RESET_PFR_REQ); in ice_if_suspend()
3278 * ice_if_resume - PCI device resume handler for iflib
3297 * ice_if_needs_restart - Tell iflib when the driver needs to be reinitialized
3312 if (!ice_test_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN) && in ice_if_needs_restart()
3313 !(if_getflags(sc->ifp) & IFF_UP)) in ice_if_needs_restart()
3321 * ice_init_link - Do link configuration and link status reporting
3324 * Contains an extra check that skips link config when an E830 device
3330 struct ice_hw *hw = &sc->hw; in ice_init_link()
3331 device_t dev = sc->dev; in ice_init_link()
3338 ice_set_state(&sc->state, ICE_STATE_PHY_FW_INIT_PENDING); in ice_init_link()
3343 /* Do not access PHY config while PHY FW is busy initializing */ in ice_init_link()
3345 ice_clear_state(&sc->state, ICE_STATE_PHY_FW_INIT_PENDING); in ice_init_link()
3407 * - isc_admin_intrcnt is set to 0
3408 * - Uses subif iflib driver methods
3409 * - Flagged as a VF for iflib
3454 if_softc_ctx_t scctx = mif->subscctx; in ice_subif_setup_scctx()
3456 scctx->isc_txrx = &ice_subif_txrx; in ice_subif_setup_scctx()
3458 scctx->isc_capenable = ICE_FULL_CAPS; in ice_subif_setup_scctx()
3459 scctx->isc_tx_csum_flags = ICE_CSUM_OFFLOAD; in ice_subif_setup_scctx()
3461 scctx->isc_ntxqsets = 4; in ice_subif_setup_scctx()
3462 scctx->isc_nrxqsets = 4; in ice_subif_setup_scctx()
3463 scctx->isc_vectors = scctx->isc_nrxqsets; in ice_subif_setup_scctx()
3465 scctx->isc_ntxqsets_max = 256; in ice_subif_setup_scctx()
3466 scctx->isc_nrxqsets_max = 256; in ice_subif_setup_scctx()
3468 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] in ice_subif_setup_scctx()
3470 scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] in ice_subif_setup_scctx()
3473 scctx->isc_tx_nsegments = ICE_MAX_TX_SEGS; in ice_subif_setup_scctx()
3474 scctx->isc_tx_tso_segments_max = ICE_MAX_TSO_SEGS; in ice_subif_setup_scctx()
3475 scctx->isc_tx_tso_size_max = ICE_TSO_SIZE; in ice_subif_setup_scctx()
3476 scctx->isc_tx_tso_segsize_max = ICE_MAX_DMA_SEG_SIZE; in ice_subif_setup_scctx()
3485 mif->subctx = ctx; in ice_subif_if_attach_pre()
3486 mif->subdev = dev; in ice_subif_if_attach_pre()
3487 mif->subscctx = iflib_get_softc_ctx(ctx); in ice_subif_if_attach_pre()
3502 * ice_destroy_mirror_interface - destroy mirror interface
3513 struct ice_mirr_if *mif = sc->mirr_if; in ice_destroy_mirror_interface()
3514 struct ice_vsi *vsi = mif->vsi; in ice_destroy_mirror_interface()
3518 is_locked = sx_xlocked(sc->iflib_ctx_lock); in ice_destroy_mirror_interface()
3522 if (mif->ifp) { in ice_destroy_mirror_interface()
3523 ret = iflib_device_deregister(mif->subctx); in ice_destroy_mirror_interface()
3525 device_printf(sc->dev, in ice_destroy_mirror_interface()
3532 ret = device_delete_child(sc->dev, mif->subdev); in ice_destroy_mirror_interface()
3535 device_printf(sc->dev, in ice_destroy_mirror_interface()
3543 if (mif->if_imap) { in ice_destroy_mirror_interface()
3544 free(mif->if_imap, M_ICE); in ice_destroy_mirror_interface()
3545 mif->if_imap = NULL; in ice_destroy_mirror_interface()
3547 if (mif->os_imap) { in ice_destroy_mirror_interface()
3548 free(mif->os_imap, M_ICE); in ice_destroy_mirror_interface()
3549 mif->os_imap = NULL; in ice_destroy_mirror_interface()
3554 * - rx_irqvs in ice_destroy_mirror_interface()
3555 * - tx_queues in ice_destroy_mirror_interface()
3556 * - rx_queues in ice_destroy_mirror_interface()
3561 sc->mirr_if = NULL; in ice_destroy_mirror_interface()
3566 * ice_setup_mirror_vsi - Initialize mirror VSI
3577 struct ice_softc *sc = mif->back; in ice_setup_mirror_vsi()
3578 device_t dev = sc->dev; in ice_setup_mirror_vsi()
3588 mif->vsi = vsi; in ice_setup_mirror_vsi()
3590 /* Reserve VSI queue allocation from PF queues */ in ice_setup_mirror_vsi()
3592 vsi->num_tx_queues = vsi->num_rx_queues = ICE_DEFAULT_VF_QUEUES; in ice_setup_mirror_vsi()
3594 /* Assign Tx queues from PF space */ in ice_setup_mirror_vsi()
3595 ret = ice_resmgr_assign_scattered(&sc->tx_qmgr, vsi->tx_qmap, in ice_setup_mirror_vsi()
3596 vsi->num_tx_queues); in ice_setup_mirror_vsi()
3598 device_printf(dev, "Unable to assign mirror VSI Tx queues: %s\n", in ice_setup_mirror_vsi()
3602 /* Assign Rx queues from PF space */ in ice_setup_mirror_vsi()
3603 ret = ice_resmgr_assign_scattered(&sc->rx_qmgr, vsi->rx_qmap, in ice_setup_mirror_vsi()
3604 vsi->num_rx_queues); in ice_setup_mirror_vsi()
3606 device_printf(dev, "Unable to assign mirror VSI Rx queues: %s\n", in ice_setup_mirror_vsi()
3610 vsi->qmap_type = ICE_RESMGR_ALLOC_SCATTERED; in ice_setup_mirror_vsi()
3611 vsi->max_frame_size = ICE_MAX_FRAME_SIZE; in ice_setup_mirror_vsi()
3630 vsi->mirror_src_vsi = sc->pf_vsi.idx; in ice_setup_mirror_vsi()
3632 ice_debug(&sc->hw, ICE_DBG_INIT, in ice_setup_mirror_vsi()
3634 vsi->mirror_src_vsi, vsi->idx); in ice_setup_mirror_vsi()
3635 ice_debug(&sc->hw, ICE_DBG_INIT, "(HW num: VSI %d to %d)\n", in ice_setup_mirror_vsi()
3636 ice_get_hw_vsi_num(&sc->hw, vsi->mirror_src_vsi), in ice_setup_mirror_vsi()
3637 ice_get_hw_vsi_num(&sc->hw, vsi->idx)); in ice_setup_mirror_vsi()
3651 mif->vsi = NULL; in ice_setup_mirror_vsi()
3656 * ice_create_mirror_interface - Initialize mirror interface
3671 device_t dev = sc->dev; in ice_create_mirror_interface()
3684 sc->mirr_if = mif; in ice_create_mirror_interface()
3685 mif->back = sc; in ice_create_mirror_interface()
3688 * - ice_subif_if_tx_queues_alloc in ice_create_mirror_interface()
3689 * - ice_subif_if_rx_queues_alloc in ice_create_mirror_interface()
3706 mif->subdev = device_add_child(dev, sbuf_data(sb), 0); in ice_create_mirror_interface()
3709 if (!mif->subdev) { in ice_create_mirror_interface()
3713 sc->mirr_if = NULL; in ice_create_mirror_interface()
3718 device_set_driver(mif->subdev, &ice_subif_driver); in ice_create_mirror_interface()
3723 ret = iflib_device_register(mif->subdev, mif, &ice_subif_sctx, &mif->subctx); in ice_create_mirror_interface()
3728 mif->ifp = iflib_get_ifp(mif->subctx); in ice_create_mirror_interface()
3729 if_setflagbits(mif->ifp, IFF_MONITOR, 0); in ice_create_mirror_interface()
3732 media = iflib_get_media(mif->subctx); in ice_create_mirror_interface()
3737 device_get_nameunit(mif->subdev), if_name(mif->ifp)); in ice_create_mirror_interface()
3739 ice_add_vsi_sysctls(mif->vsi); in ice_create_mirror_interface()
3745 mif->if_attached = true; in ice_create_mirror_interface()
3761 * number of vectors as we have queues, and that we always have the same number
3762 * of Tx and Rx queues. Unlike that function, this calls a special
3764 * driver needs to get MSI-X resources from the parent device.
3766 * Tx queues use a softirq instead of using their own hardware interrupt so that
3775 struct ice_softc *sc = mif->back; in ice_wire_mirror_intrs()
3776 struct ice_hw *hw = &sc->hw; in ice_wire_mirror_intrs()
3777 struct ice_vsi *vsi = mif->vsi; in ice_wire_mirror_intrs()
3778 device_t dev = mif->subdev; in ice_wire_mirror_intrs()
3781 if_ctx_t ctx = mif->subctx; in ice_wire_mirror_intrs()
3783 ice_debug(hw, ICE_DBG_INIT, "%s: Last rid: %d\n", __func__, sc->last_rid); in ice_wire_mirror_intrs()
3785 rid = sc->last_rid + 1; in ice_wire_mirror_intrs()
3786 for (i = 0; i < vsi->num_rx_queues; i++, rid++) { in ice_wire_mirror_intrs()
3787 struct ice_rx_queue *rxq = &vsi->rx_queues[i]; in ice_wire_mirror_intrs()
3788 struct ice_tx_queue *txq = &vsi->tx_queues[i]; in ice_wire_mirror_intrs()
3794 err = iflib_irq_alloc_generic_subctx(sc->ctx, ctx, in ice_wire_mirror_intrs()
3795 &mif->rx_irqvs[i].irq, rid, IFLIB_INTR_RXTX, ice_msix_que, in ice_wire_mirror_intrs()
3796 rxq, rxq->me, irq_name); in ice_wire_mirror_intrs()
3801 i--; in ice_wire_mirror_intrs()
3804 MPASS(rid - 1 > 0); in ice_wire_mirror_intrs()
3806 mif->rx_irqvs[i].me = rid - 1; in ice_wire_mirror_intrs()
3807 rxq->irqv = &mif->rx_irqvs[i]; in ice_wire_mirror_intrs()
3811 iflib_softirq_alloc_generic(ctx, &mif->rx_irqvs[i].irq, in ice_wire_mirror_intrs()
3812 IFLIB_INTR_TX, txq, txq->me, irq_name); in ice_wire_mirror_intrs()
3813 txq->irqv = &mif->rx_irqvs[i]; in ice_wire_mirror_intrs()
3816 sc->last_rid = rid - 1; in ice_wire_mirror_intrs()
3819 sc->last_rid); in ice_wire_mirror_intrs()
3824 for (; i >= 0; i--) in ice_wire_mirror_intrs()
3825 iflib_irq_free(ctx, &mif->rx_irqvs[i].irq); in ice_wire_mirror_intrs()
3830 * ice_subif_rebuild - Rebuild subinterface post reset
3839 struct ice_mirr_if *mif = (struct ice_mirr_if *)iflib_get_softc(sc->ctx); in ice_subif_rebuild()
3840 struct ice_vsi *vsi = sc->mirr_if->vsi; in ice_subif_rebuild()
3845 device_printf(sc->dev, "Unable to re-assign mirror VSI queues, err %s\n", in ice_subif_rebuild()
3852 device_printf(sc->dev, "Unable to re-initialize mirror VSI, err %s\n", in ice_subif_rebuild()
3859 device_printf(sc->dev, in ice_subif_rebuild()
3865 vsi->mirror_src_vsi = sc->pf_vsi.idx; in ice_subif_rebuild()
3869 device_printf(sc->dev, in ice_subif_rebuild()
3875 ice_set_state(&mif->state, ICE_STATE_SUBIF_NEEDS_REINIT); in ice_subif_rebuild()
3882 ice_resmgr_release_map(&sc->tx_qmgr, vsi->tx_qmap, in ice_subif_rebuild()
3883 sc->mirr_if->num_irq_vectors); in ice_subif_rebuild()
3884 ice_resmgr_release_map(&sc->rx_qmgr, vsi->rx_qmap, in ice_subif_rebuild()
3885 sc->mirr_if->num_irq_vectors); in ice_subif_rebuild()
3891 * ice_subif_rebuild_vsi_qmap - Rebuild the mirror VSI queue mapping
3894 * Loops over the Tx and Rx queues for the mirror VSI and reassigns the queue
3900 struct ice_vsi *vsi = sc->mirr_if->vsi; in ice_subif_rebuild_vsi_qmap()
3905 err = ice_resmgr_assign_scattered(&sc->tx_qmgr, vsi->tx_qmap, sc->mirr_if->num_irq_vectors); in ice_subif_rebuild_vsi_qmap()
3907 device_printf(sc->dev, "Unable to assign mirror VSI Tx queues: %s\n", in ice_subif_rebuild_vsi_qmap()
3912 err = ice_resmgr_assign_scattered(&sc->rx_qmgr, vsi->rx_qmap, sc->mirr_if->num_irq_vectors); in ice_subif_rebuild_vsi_qmap()
3914 device_printf(sc->dev, "Unable to assign mirror VSI Rx queues: %s\n", in ice_subif_rebuild_vsi_qmap()
3919 vsi->qmap_type = ICE_RESMGR_ALLOC_SCATTERED; in ice_subif_rebuild_vsi_qmap()
3921 /* Re-assign Tx queue tail pointers */ in ice_subif_rebuild_vsi_qmap()
3922 for (i = 0, txq = vsi->tx_queues; i < vsi->num_tx_queues; i++, txq++) in ice_subif_rebuild_vsi_qmap()
3923 txq->tail = QTX_COMM_DBELL(vsi->tx_qmap[i]); in ice_subif_rebuild_vsi_qmap()
3925 /* Re-assign Rx queue tail pointers */ in ice_subif_rebuild_vsi_qmap()
3926 for (i = 0, rxq = vsi->rx_queues; i < vsi->num_rx_queues; i++, rxq++) in ice_subif_rebuild_vsi_qmap()
3927 rxq->tail = QRX_TAIL(vsi->rx_qmap[i]); in ice_subif_rebuild_vsi_qmap()
3932 ice_resmgr_release_map(&sc->tx_qmgr, vsi->tx_qmap, vsi->num_tx_queues); in ice_subif_rebuild_vsi_qmap()
3938 * ice_subif_if_tx_queues_alloc - Allocate Tx queue memory for subinterfaces
3942 * @ntxqs: the number of Tx queues per set (should always be 1)
3943 * @ntxqsets: the number of Tx queue sets to allocate
3954 device_t dev = mif->subdev; in ice_subif_if_tx_queues_alloc()
3960 MPASS(mif->subscctx->isc_ntxd[0] <= ICE_MAX_DESC_COUNT); in ice_subif_if_tx_queues_alloc()
3962 vsi = mif->vsi; in ice_subif_if_tx_queues_alloc()
3964 MPASS(vsi->num_tx_queues == ntxqsets); in ice_subif_if_tx_queues_alloc()
3967 if (!(vsi->tx_queues = in ice_subif_if_tx_queues_alloc()
3969 device_printf(dev, "%s: Unable to allocate Tx queue memory for subfunction\n", in ice_subif_if_tx_queues_alloc()
3975 for (i = 0, txq = vsi->tx_queues; i < ntxqsets; i++, txq++) { in ice_subif_if_tx_queues_alloc()
3976 if (!(txq->tx_rsq = in ice_subif_if_tx_queues_alloc()
3977 (uint16_t *)malloc(sizeof(uint16_t) * mif->subscctx->isc_ntxd[0], M_ICE, M_NOWAIT))) { in ice_subif_if_tx_queues_alloc()
3984 for (j = 0; j < mif->subscctx->isc_ntxd[0]; j++) in ice_subif_if_tx_queues_alloc()
3985 txq->tx_rsq[j] = QIDX_INVALID; in ice_subif_if_tx_queues_alloc()
3988 /* Add Tx queue sysctls context */ in ice_subif_if_tx_queues_alloc()
3991 for (i = 0, txq = vsi->tx_queues; i < ntxqsets; i++, txq++) { in ice_subif_if_tx_queues_alloc()
3993 txq->me = txq->q_handle = i; in ice_subif_if_tx_queues_alloc()
3994 txq->vsi = vsi; in ice_subif_if_tx_queues_alloc()
3997 txq->desc_count = mif->subscctx->isc_ntxd[0]; in ice_subif_if_tx_queues_alloc()
3999 /* get the virtual and physical address of the hardware queues */ in ice_subif_if_tx_queues_alloc()
4000 txq->tail = QTX_COMM_DBELL(vsi->tx_qmap[i]); in ice_subif_if_tx_queues_alloc()
4001 txq->tx_base = (struct ice_tx_desc *)vaddrs[i]; in ice_subif_if_tx_queues_alloc()
4002 txq->tx_paddr = paddrs[i]; in ice_subif_if_tx_queues_alloc()
4010 for (i = 0, txq = vsi->tx_queues; i < ntxqsets; i++, txq++) { in ice_subif_if_tx_queues_alloc()
4011 if (txq->tx_rsq != NULL) { in ice_subif_if_tx_queues_alloc()
4012 free(txq->tx_rsq, M_ICE); in ice_subif_if_tx_queues_alloc()
4013 txq->tx_rsq = NULL; in ice_subif_if_tx_queues_alloc()
4016 free(vsi->tx_queues, M_ICE); in ice_subif_if_tx_queues_alloc()
4017 vsi->tx_queues = NULL; in ice_subif_if_tx_queues_alloc()
4022 * ice_subif_if_rx_queues_alloc - Allocate Rx queue memory for subinterfaces
4026 * @nrxqs: number of Rx queues per set (should always be 1)
4038 device_t dev = mif->subdev; in ice_subif_if_rx_queues_alloc()
4044 MPASS(mif->subscctx->isc_nrxd[0] <= ICE_MAX_DESC_COUNT); in ice_subif_if_rx_queues_alloc()
4046 vsi = mif->vsi; in ice_subif_if_rx_queues_alloc()
4048 MPASS(vsi->num_rx_queues == nrxqsets); in ice_subif_if_rx_queues_alloc()
4051 if (!(vsi->rx_queues = in ice_subif_if_rx_queues_alloc()
4061 for (i = 0, rxq = vsi->rx_queues; i < nrxqsets; i++, rxq++) { in ice_subif_if_rx_queues_alloc()
4062 rxq->me = i; in ice_subif_if_rx_queues_alloc()
4063 rxq->vsi = vsi; in ice_subif_if_rx_queues_alloc()
4066 rxq->desc_count = mif->subscctx->isc_nrxd[0]; in ice_subif_if_rx_queues_alloc()
4068 /* get the virtual and physical address of the hardware queues */ in ice_subif_if_rx_queues_alloc()
4069 rxq->tail = QRX_TAIL(vsi->rx_qmap[i]); in ice_subif_if_rx_queues_alloc()
4070 rxq->rx_base = (union ice_32b_rx_flex_desc *)vaddrs[i]; in ice_subif_if_rx_queues_alloc()
4071 rxq->rx_paddr = paddrs[i]; in ice_subif_if_rx_queues_alloc()
4080 * ice_subif_if_msix_intr_assign - Assign MSI-X interrupts to new sub interface
4084 * Allocates and assigns driver private resources for MSI-X interrupt tracking.
4086 * @pre OS MSI-X resources have been pre-allocated by parent interface.
4092 struct ice_softc *sc = mif->back; in ice_subif_if_msix_intr_assign()
4093 struct ice_vsi *vsi = mif->vsi; in ice_subif_if_msix_intr_assign()
4095 device_t dev = mif->subdev; in ice_subif_if_msix_intr_assign()
4098 if (vsi->num_rx_queues != vsi->num_tx_queues) { in ice_subif_if_msix_intr_assign()
4100 …"iflib requested %d Tx queues, and %d Rx queues, but the driver isn't able to support a differing … in ice_subif_if_msix_intr_assign()
4101 vsi->num_tx_queues, vsi->num_rx_queues); in ice_subif_if_msix_intr_assign()
4105 if (msix > sc->extra_vectors) { in ice_subif_if_msix_intr_assign()
4107 "%s: Not enough spare (%d) msix vectors for new sub-interface requested (%d)\n", in ice_subif_if_msix_intr_assign()
4108 __func__, sc->extra_vectors, msix); in ice_subif_if_msix_intr_assign()
4111 device_printf(dev, "%s: Using %d vectors for sub-interface\n", __func__, in ice_subif_if_msix_intr_assign()
4115 mif->num_irq_vectors = vsi->num_rx_queues; in ice_subif_if_msix_intr_assign()
4116 mif->rx_irqvs = (struct ice_irq_vector *) in ice_subif_if_msix_intr_assign()
4117 malloc(sizeof(struct ice_irq_vector) * (mif->num_irq_vectors), in ice_subif_if_msix_intr_assign()
4119 if (!mif->rx_irqvs) { in ice_subif_if_msix_intr_assign()
4122 mif->num_irq_vectors); in ice_subif_if_msix_intr_assign()
4127 if (!(mif->if_imap = in ice_subif_if_msix_intr_assign()
4128 (u16 *)malloc(sizeof(u16) * mif->num_irq_vectors, in ice_subif_if_msix_intr_assign()
4134 ret = ice_resmgr_assign_contiguous(&sc->dev_imgr, mif->if_imap, mif->num_irq_vectors); in ice_subif_if_msix_intr_assign()
4141 if (!(mif->os_imap = in ice_subif_if_msix_intr_assign()
4142 (u16 *)malloc(sizeof(u16) * mif->num_irq_vectors, in ice_subif_if_msix_intr_assign()
4148 ret = ice_resmgr_assign_contiguous(&sc->os_imgr, mif->os_imap, mif->num_irq_vectors); in ice_subif_if_msix_intr_assign()
4158 free(mif->if_imap, M_ICE); in ice_subif_if_msix_intr_assign()
4159 mif->if_imap = NULL; in ice_subif_if_msix_intr_assign()
4161 free(mif->rx_irqvs, M_ICE); in ice_subif_if_msix_intr_assign()
4162 mif->rx_irqvs = NULL; in ice_subif_if_msix_intr_assign()
4167 * ice_subif_if_intr_enable - Enable device interrupts for a subinterface
4177 struct ice_softc *sc = mif->back; in ice_subif_if_intr_enable()
4178 struct ice_vsi *vsi = mif->vsi; in ice_subif_if_intr_enable()
4179 struct ice_hw *hw = &sc->hw; in ice_subif_if_intr_enable()
4182 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) in ice_subif_if_intr_enable()
4186 for (int i = 0; i < vsi->num_rx_queues; i++) in ice_subif_if_intr_enable()
4187 ice_enable_intr(hw, vsi->rx_queues[i].irqv->me); in ice_subif_if_intr_enable()
4191 * ice_subif_if_rx_queue_intr_enable - Enable a specific Rx queue interrupt
4203 struct ice_softc *sc = mif->back; in ice_subif_if_rx_queue_intr_enable()
4204 struct ice_vsi *vsi = mif->vsi; in ice_subif_if_rx_queue_intr_enable()
4205 struct ice_hw *hw = &sc->hw; in ice_subif_if_rx_queue_intr_enable()
4208 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) in ice_subif_if_rx_queue_intr_enable()
4211 ice_enable_intr(hw, vsi->rx_queues[rxqid].irqv->me); in ice_subif_if_rx_queue_intr_enable()
4216 * ice_subif_if_tx_queue_intr_enable - Enable a specific Tx queue interrupt
4218 * @txqid: the Tx queue to enable
4220 * Enable a specific Tx queue interrupt.
4228 struct ice_softc *sc = mif->back; in ice_subif_if_tx_queue_intr_enable()
4229 struct ice_vsi *vsi = mif->vsi; in ice_subif_if_tx_queue_intr_enable()
4230 struct ice_hw *hw = &sc->hw; in ice_subif_if_tx_queue_intr_enable()
4233 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) in ice_subif_if_tx_queue_intr_enable()
4236 ice_enable_intr(hw, vsi->tx_queues[txqid].irqv->me); in ice_subif_if_tx_queue_intr_enable()
4241 * ice_subif_if_init - Initialize the subinterface
4245 * Prepares the Tx and Rx engines and enables interrupts.
4253 struct ice_softc *sc = mif->back; in ice_subif_if_init()
4254 struct ice_vsi *vsi = mif->vsi; in ice_subif_if_init()
4255 device_t dev = mif->subdev; in ice_subif_if_init()
4261 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) in ice_subif_if_init()
4264 if (ice_test_state(&sc->state, ICE_STATE_RESET_FAILED)) { in ice_subif_if_init()
4267 device_get_nameunit(sc->dev)); in ice_subif_if_init()
4271 if (ice_test_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET)) { in ice_subif_if_init()
4274 device_get_nameunit(sc->dev)); in ice_subif_if_init()
4279 vsi->mbuf_sz = iflib_get_rx_mbuf_sz(ctx); in ice_subif_if_init()
4281 /* Initialize software Tx tracking values */ in ice_subif_if_init()
4287 "Unable to configure subif VSI for Tx: %s\n", in ice_subif_if_init()
4311 ice_set_state(&mif->state, ICE_STATE_DRIVER_INITIALIZED); in ice_subif_if_init()
4319 * ice_if_stop_subif - Stop the subinterface
4332 struct ice_softc *sc = mif->back; in ice_subif_if_stop()
4333 struct ice_vsi *vsi = mif->vsi; in ice_subif_if_stop()
4334 device_t dev = mif->subdev; in ice_subif_if_stop()
4336 if (!ice_testandclear_state(&mif->state, ICE_STATE_DRIVER_INITIALIZED)) in ice_subif_if_stop()
4339 if (ice_test_state(&sc->state, ICE_STATE_RESET_FAILED)) { in ice_subif_if_stop()
4342 device_get_nameunit(sc->dev)); in ice_subif_if_stop()
4346 if (ice_test_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET)) { in ice_subif_if_stop()
4349 device_get_nameunit(sc->dev)); in ice_subif_if_stop()
4353 /* Dissociate the Tx and Rx queues from the interrupts */ in ice_subif_if_stop()
4357 /* Disable the Tx and Rx queues */ in ice_subif_if_stop()
4363 * ice_free_irqvs_subif - Free IRQ vector memory for subinterfaces
4371 struct ice_softc *sc = mif->back; in ice_free_irqvs_subif()
4372 struct ice_vsi *vsi = mif->vsi; in ice_free_irqvs_subif()
4373 if_ctx_t ctx = sc->ctx; in ice_free_irqvs_subif()
4377 if (mif->rx_irqvs == NULL) in ice_free_irqvs_subif()
4380 /* Free the IRQ vectors -- currently subinterfaces have number in ice_free_irqvs_subif()
4381 * of vectors equal to number of RX queues in ice_free_irqvs_subif()
4385 for (i = 0; i < vsi->num_rx_queues; i++) in ice_free_irqvs_subif()
4386 iflib_irq_free(ctx, &mif->rx_irqvs[i].irq); in ice_free_irqvs_subif()
4388 ice_resmgr_release_map(&sc->os_imgr, mif->os_imap, in ice_free_irqvs_subif()
4389 mif->num_irq_vectors); in ice_free_irqvs_subif()
4390 ice_resmgr_release_map(&sc->dev_imgr, mif->if_imap, in ice_free_irqvs_subif()
4391 mif->num_irq_vectors); in ice_free_irqvs_subif()
4393 sc->last_rid -= vsi->num_rx_queues; in ice_free_irqvs_subif()
4396 for (i = 0; i < vsi->num_rx_queues; i++) in ice_free_irqvs_subif()
4397 vsi->rx_queues[i].irqv = NULL; in ice_free_irqvs_subif()
4399 for (i = 0; i < vsi->num_tx_queues; i++) in ice_free_irqvs_subif()
4400 vsi->tx_queues[i].irqv = NULL; in ice_free_irqvs_subif()
4403 free(mif->rx_irqvs, M_ICE); in ice_free_irqvs_subif()
4404 mif->rx_irqvs = NULL; in ice_free_irqvs_subif()
4408 * ice_subif_if_queues_free - Free queue memory for subinterfaces
4418 struct ice_vsi *vsi = mif->vsi; in ice_subif_if_queues_free()
4422 /* Free the Tx and Rx sysctl contexts, and assign NULL to the node in ice_subif_if_queues_free()
4428 /* Release MSI-X IRQ vectors */ in ice_subif_if_queues_free()
4431 if (vsi->tx_queues != NULL) { in ice_subif_if_queues_free()
4433 for (i = 0, txq = vsi->tx_queues; i < vsi->num_tx_queues; i++, txq++) { in ice_subif_if_queues_free()
4434 if (txq->tx_rsq != NULL) { in ice_subif_if_queues_free()
4435 free(txq->tx_rsq, M_ICE); in ice_subif_if_queues_free()
4436 txq->tx_rsq = NULL; in ice_subif_if_queues_free()
4439 free(vsi->tx_queues, M_ICE); in ice_subif_if_queues_free()
4440 vsi->tx_queues = NULL; in ice_subif_if_queues_free()
4442 if (vsi->rx_queues != NULL) { in ice_subif_if_queues_free()
4443 free(vsi->rx_queues, M_ICE); in ice_subif_if_queues_free()
4444 vsi->rx_queues = NULL; in ice_subif_if_queues_free()
4449 * ice_subif_if_media_status - Report subinterface media
4461 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; in ice_subif_if_media_status()
4462 ifmr->ifm_active = IFM_ETHER | IFM_AUTO; in ice_subif_if_media_status()
4466 * ice_subif_if_promisc_set - Set subinterface promiscuous mode