Lines Matching +full:fw +full:- +full:init +full:- +full:baudrate
1 /* SPDX-License-Identifier: BSD-3-Clause */
272 * scctx->isc_tx_tso_size_max + the VLAN header is a valid size.
276 * DMA tag. However, scctx->isc_tx_tso_segsize_max is used to set the
305 * IFLIB_SKIP_MSIX allows the driver to handle allocating MSI-X
328 /* Static driver-wide sysctls */
332 * ice_pci_mapping - Map PCI BAR memory
343 rc = ice_map_bar(sc->dev, &sc->bar0, 0); in ice_pci_mapping()
351 * ice_free_pci_mapping - Release PCI BAR memory
360 ice_free_bar(sc->dev, &sc->bar0); in ice_free_pci_mapping()
368 * ice_register - register device method callback
380 * ice_setup_scctx - Setup the iflib softc context structure
389 if_softc_ctx_t scctx = sc->scctx; in ice_setup_scctx()
390 struct ice_hw *hw = &sc->hw; in ice_setup_scctx()
391 device_t dev = sc->dev; in ice_setup_scctx()
394 safe_mode = ice_is_bit_set(sc->feat_en, ICE_FEATURE_SAFE_MODE); in ice_setup_scctx()
395 recovery_mode = ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE); in ice_setup_scctx()
402 scctx->isc_ntxqsets = scctx->isc_nrxqsets = 1; in ice_setup_scctx()
403 scctx->isc_ntxqsets_max = 1; in ice_setup_scctx()
404 scctx->isc_nrxqsets_max = 1; in ice_setup_scctx()
410 * sysctl value is when setting up MSI-X vectors. in ice_setup_scctx()
412 sc->ifc_sysctl_ntxqs = scctx->isc_ntxqsets; in ice_setup_scctx()
413 sc->ifc_sysctl_nrxqs = scctx->isc_nrxqsets; in ice_setup_scctx()
415 if (scctx->isc_ntxqsets == 0) in ice_setup_scctx()
416 scctx->isc_ntxqsets = hw->func_caps.common_cap.rss_table_size; in ice_setup_scctx()
417 if (scctx->isc_nrxqsets == 0) in ice_setup_scctx()
418 scctx->isc_nrxqsets = hw->func_caps.common_cap.rss_table_size; in ice_setup_scctx()
420 scctx->isc_ntxqsets_max = hw->func_caps.common_cap.num_txq; in ice_setup_scctx()
421 scctx->isc_nrxqsets_max = hw->func_caps.common_cap.num_rxq; in ice_setup_scctx()
427 if (sc->ifc_sysctl_ntxqs > scctx->isc_ntxqsets_max) in ice_setup_scctx()
428 sc->ifc_sysctl_ntxqs = scctx->isc_ntxqsets_max; in ice_setup_scctx()
429 if (sc->ifc_sysctl_nrxqs > scctx->isc_nrxqsets_max) in ice_setup_scctx()
430 sc->ifc_sysctl_nrxqs = scctx->isc_nrxqsets_max; in ice_setup_scctx()
433 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] in ice_setup_scctx()
435 scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] in ice_setup_scctx()
438 scctx->isc_tx_nsegments = ICE_MAX_TX_SEGS; in ice_setup_scctx()
439 scctx->isc_tx_tso_segments_max = ICE_MAX_TSO_SEGS; in ice_setup_scctx()
440 scctx->isc_tx_tso_size_max = ICE_TSO_SIZE; in ice_setup_scctx()
441 scctx->isc_tx_tso_segsize_max = ICE_MAX_DMA_SEG_SIZE; in ice_setup_scctx()
443 scctx->isc_msix_bar = pci_msix_table_bar(dev); in ice_setup_scctx()
444 scctx->isc_rss_table_size = hw->func_caps.common_cap.rss_table_size; in ice_setup_scctx()
450 scctx->isc_txrx = &ice_recovery_txrx; in ice_setup_scctx()
452 scctx->isc_txrx = &ice_txrx; in ice_setup_scctx()
459 scctx->isc_capenable = ICE_SAFE_CAPS; in ice_setup_scctx()
460 scctx->isc_tx_csum_flags = 0; in ice_setup_scctx()
462 scctx->isc_capenable = ICE_FULL_CAPS; in ice_setup_scctx()
463 scctx->isc_tx_csum_flags = ICE_CSUM_OFFLOAD; in ice_setup_scctx()
466 scctx->isc_capabilities = scctx->isc_capenable; in ice_setup_scctx()
470 * ice_if_attach_pre - Early device attach logic
490 ice_set_state(&sc->state, ICE_STATE_ATTACHING); in ice_if_attach_pre()
492 sc->ctx = ctx; in ice_if_attach_pre()
493 sc->media = iflib_get_media(ctx); in ice_if_attach_pre()
494 sc->sctx = iflib_get_sctx(ctx); in ice_if_attach_pre()
495 sc->iflib_ctx_lock = iflib_ctx_lock_get(ctx); in ice_if_attach_pre()
496 sc->ifp = iflib_get_ifp(ctx); in ice_if_attach_pre()
498 dev = sc->dev = iflib_get_dev(ctx); in ice_if_attach_pre()
499 scctx = sc->scctx = iflib_get_softc_ctx(ctx); in ice_if_attach_pre()
501 hw = &sc->hw; in ice_if_attach_pre()
502 hw->back = sc; in ice_if_attach_pre()
504 snprintf(sc->admin_mtx_name, sizeof(sc->admin_mtx_name), in ice_if_attach_pre()
506 mtx_init(&sc->admin_mtx, sc->admin_mtx_name, NULL, MTX_DEF); in ice_if_attach_pre()
507 callout_init_mtx(&sc->admin_timer, &sc->admin_mtx, 0); in ice_if_attach_pre()
556 ice_aq_str(hw->adminq.sq_last_status)); in ice_if_attach_pre()
564 ice_set_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN); in ice_if_attach_pre()
588 /* Initialize VLAN mode in FW; if dual VLAN mode is supported by the package in ice_if_attach_pre()
596 ice_aq_str(hw->adminq.sq_last_status)); in ice_if_attach_pre()
603 iflib_set_mac(ctx, hw->port_info->mac.lan_addr); in ice_if_attach_pre()
609 err = ice_resmgr_init(&sc->tx_qmgr, hw->func_caps.common_cap.num_txq); in ice_if_attach_pre()
617 err = ice_resmgr_init(&sc->rx_qmgr, hw->func_caps.common_cap.num_rxq); in ice_if_attach_pre()
631 sc->num_available_vsi = min(ICE_MAX_VSI_AVAILABLE, in ice_if_attach_pre()
632 hw->func_caps.guar_num_vsi); in ice_if_attach_pre()
634 if (!sc->num_available_vsi) { in ice_if_attach_pre()
641 sc->all_vsi = (struct ice_vsi **) in ice_if_attach_pre()
642 malloc(sizeof(struct ice_vsi *) * sc->num_available_vsi, in ice_if_attach_pre()
644 if (!sc->all_vsi) { in ice_if_attach_pre()
656 ice_alloc_vsi_qmap(&sc->pf_vsi, scctx->isc_ntxqsets_max, in ice_if_attach_pre()
657 scctx->isc_nrxqsets_max); in ice_if_attach_pre()
659 /* Allocate MSI-X vectors (due to isc_flags IFLIB_SKIP_MSIX) */ in ice_if_attach_pre()
668 ice_release_vsi(&sc->pf_vsi); in ice_if_attach_pre()
669 free(sc->all_vsi, M_ICE); in ice_if_attach_pre()
670 sc->all_vsi = NULL; in ice_if_attach_pre()
674 ice_resmgr_destroy(&sc->rx_qmgr); in ice_if_attach_pre()
676 ice_resmgr_destroy(&sc->tx_qmgr); in ice_if_attach_pre()
682 mtx_lock(&sc->admin_mtx); in ice_if_attach_pre()
683 callout_stop(&sc->admin_timer); in ice_if_attach_pre()
684 mtx_unlock(&sc->admin_mtx); in ice_if_attach_pre()
685 mtx_destroy(&sc->admin_mtx); in ice_if_attach_pre()
690 * ice_attach_pre_recovery_mode - Limited driver attach_pre for FW recovery
697 * detected to be in an invalid state and must be re-programmed, or (b) the
698 * driver detects that the loaded firmware has a non-compatible API version
704 ice_set_state(&sc->state, ICE_STATE_RECOVERY_MODE); in ice_attach_pre_recovery_mode()
710 sc->pf_vsi.sc = sc; in ice_attach_pre_recovery_mode()
713 * We still need to allocate MSI-X vectors since we need one vector to in ice_attach_pre_recovery_mode()
720 * ice_update_link_status - notify OS of link state change
733 struct ice_hw *hw = &sc->hw; in ice_update_link_status()
737 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) in ice_update_link_status()
741 if (!ice_testandset_state(&sc->state, ICE_STATE_LINK_STATUS_REPORTED)) { in ice_update_link_status()
742 if (sc->link_up) { /* link is up */ in ice_update_link_status()
743 uint64_t baudrate = ice_aq_speed_to_rate(sc->hw.port_info); in ice_update_link_status() local
745 if (!(hw->port_info->phy.link_info_old.link_info & ICE_AQ_LINK_UP)) in ice_update_link_status()
748 iflib_link_state_change(sc->ctx, LINK_STATE_UP, baudrate); in ice_update_link_status()
749 ice_rdma_link_change(sc, LINK_STATE_UP, baudrate); in ice_update_link_status()
753 iflib_link_state_change(sc->ctx, LINK_STATE_DOWN, 0); in ice_update_link_status()
763 if (update_media && !ice_test_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET)) { in ice_update_link_status()
764 status = ice_add_media_types(sc, sc->media); in ice_update_link_status()
766 device_printf(sc->dev, "Error adding device media types: %s aq_err %s\n", in ice_update_link_status()
768 ice_aq_str(hw->adminq.sq_last_status)); in ice_update_link_status()
773 * ice_if_attach_post - Late device attach logic
790 /* We don't yet support loading if MSI-X is not supported */ in ice_if_attach_post()
791 if (sc->scctx->isc_intr != IFLIB_INTR_MSIX) { in ice_if_attach_post()
792 device_printf(sc->dev, "The ice driver does not support loading without MSI-X\n"); in ice_if_attach_post()
800 sc->scctx->isc_max_frame_size = if_getmtu(ifp) + in ice_if_attach_post()
807 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) { in ice_if_attach_post()
812 sc->pf_vsi.max_frame_size = sc->scctx->isc_max_frame_size; in ice_if_attach_post()
814 err = ice_initialize_vsi(&sc->pf_vsi); in ice_if_attach_post()
816 device_printf(sc->dev, "Unable to initialize Main VSI: %s\n", in ice_if_attach_post()
821 /* Enable FW health event reporting */ in ice_if_attach_post()
825 err = ice_config_rss(&sc->pf_vsi); in ice_if_attach_post()
827 device_printf(sc->dev, in ice_if_attach_post()
846 /* Set a default value for PFC mode on attach since the FW state is unknown in ice_if_attach_post()
848 * issue when loading the driver with the FW LLDP agent enabled but the FW in ice_if_attach_post()
851 status = ice_aq_set_pfc_mode(&sc->hw, ICE_AQC_PFC_VLAN_BASED_PFC, NULL); in ice_if_attach_post()
853 device_printf(sc->dev, "Setting pfc mode failed, status %s\n", ice_status_str(status)); in ice_if_attach_post()
858 if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_SRIOV)) { in ice_if_attach_post()
868 /* Setup link, if PHY FW is ready */ in ice_if_attach_post()
875 ice_enable_intr(&sc->hw, sc->irqvs[0].me); in ice_if_attach_post()
882 mtx_lock(&sc->admin_mtx); in ice_if_attach_post()
883 callout_reset(&sc->admin_timer, hz/2, ice_admin_timer, sc); in ice_if_attach_post()
884 mtx_unlock(&sc->admin_mtx); in ice_if_attach_post()
886 if (ice_test_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN) && in ice_if_attach_post()
887 !ice_test_state(&sc->state, ICE_STATE_NO_MEDIA)) in ice_if_attach_post()
888 ice_set_state(&sc->state, ICE_STATE_FIRST_INIT_LINK); in ice_if_attach_post()
890 ice_clear_state(&sc->state, ICE_STATE_ATTACHING); in ice_if_attach_post()
896 * ice_attach_post_recovery_mode - Limited driver attach_post for FW recovery
909 ice_enable_intr(&sc->hw, sc->irqvs[0].me); in ice_attach_post_recovery_mode()
912 mtx_lock(&sc->admin_mtx); in ice_attach_post_recovery_mode()
913 callout_reset(&sc->admin_timer, hz/2, ice_admin_timer, sc); in ice_attach_post_recovery_mode()
914 mtx_unlock(&sc->admin_mtx); in ice_attach_post_recovery_mode()
916 ice_clear_state(&sc->state, ICE_STATE_ATTACHING); in ice_attach_post_recovery_mode()
920 * ice_free_irqvs - Free IRQ vector memory
928 struct ice_vsi *vsi = &sc->pf_vsi; in ice_free_irqvs()
929 if_ctx_t ctx = sc->ctx; in ice_free_irqvs()
933 if (sc->irqvs == NULL) in ice_free_irqvs()
937 for (i = 0; i < sc->num_irq_vectors; i++) in ice_free_irqvs()
938 iflib_irq_free(ctx, &sc->irqvs[i].irq); in ice_free_irqvs()
941 for (i = 0; i < vsi->num_rx_queues; i++) in ice_free_irqvs()
942 vsi->rx_queues[i].irqv = NULL; in ice_free_irqvs()
944 for (i = 0; i < vsi->num_tx_queues; i++) in ice_free_irqvs()
945 vsi->tx_queues[i].irqv = NULL; in ice_free_irqvs()
948 free(sc->irqvs, M_ICE); in ice_free_irqvs()
949 sc->irqvs = NULL; in ice_free_irqvs()
950 sc->num_irq_vectors = 0; in ice_free_irqvs()
954 * ice_if_detach - Device driver detach logic
967 struct ice_vsi *vsi = &sc->pf_vsi; in ice_if_detach()
974 ice_set_state(&sc->state, ICE_STATE_DETACHING); in ice_if_detach()
977 mtx_lock(&sc->admin_mtx); in ice_if_detach()
978 callout_stop(&sc->admin_timer); in ice_if_detach()
979 mtx_unlock(&sc->admin_mtx); in ice_if_detach()
980 mtx_destroy(&sc->admin_mtx); in ice_if_detach()
983 if (sc->mirr_if) in ice_if_detach()
988 if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_SRIOV)) in ice_if_detach()
993 ifmedia_removeall(sc->media); in ice_if_detach()
1003 /* Release MSI-X resources */ in ice_if_detach()
1006 for (i = 0; i < sc->num_available_vsi; i++) { in ice_if_detach()
1007 if (sc->all_vsi[i]) in ice_if_detach()
1008 ice_release_vsi(sc->all_vsi[i]); in ice_if_detach()
1011 if (sc->all_vsi) { in ice_if_detach()
1012 free(sc->all_vsi, M_ICE); in ice_if_detach()
1013 sc->all_vsi = NULL; in ice_if_detach()
1016 /* Release MSI-X memory */ in ice_if_detach()
1017 pci_release_msi(sc->dev); in ice_if_detach()
1019 if (sc->msix_table != NULL) { in ice_if_detach()
1020 bus_release_resource(sc->dev, SYS_RES_MEMORY, in ice_if_detach()
1021 rman_get_rid(sc->msix_table), in ice_if_detach()
1022 sc->msix_table); in ice_if_detach()
1023 sc->msix_table = NULL; in ice_if_detach()
1029 ice_resmgr_destroy(&sc->tx_qmgr); in ice_if_detach()
1030 ice_resmgr_destroy(&sc->rx_qmgr); in ice_if_detach()
1032 if (!ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) in ice_if_detach()
1033 ice_deinit_hw(&sc->hw); in ice_if_detach()
1036 status = ice_reset(&sc->hw, ICE_RESET_PFR); in ice_if_detach()
1039 device_printf(sc->dev, "device PF reset failed, err %s\n", in ice_if_detach()
1049 * ice_if_tx_queues_alloc - Allocate Tx queue memory
1065 struct ice_vsi *vsi = &sc->pf_vsi; in ice_if_tx_queues_alloc()
1070 MPASS(sc->scctx->isc_ntxd[0] <= ICE_MAX_DESC_COUNT); in ice_if_tx_queues_alloc()
1074 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) in ice_if_tx_queues_alloc()
1078 if (!(vsi->tx_queues = in ice_if_tx_queues_alloc()
1080 device_printf(sc->dev, "Unable to allocate Tx queue memory\n"); in ice_if_tx_queues_alloc()
1085 for (i = 0, txq = vsi->tx_queues; i < ntxqsets; i++, txq++) { in ice_if_tx_queues_alloc()
1086 if (!(txq->tx_rsq = in ice_if_tx_queues_alloc()
1087 (uint16_t *) malloc(sizeof(uint16_t) * sc->scctx->isc_ntxd[0], M_ICE, M_NOWAIT))) { in ice_if_tx_queues_alloc()
1088 device_printf(sc->dev, "Unable to allocate tx_rsq memory\n"); in ice_if_tx_queues_alloc()
1093 for (j = 0; j < sc->scctx->isc_ntxd[0]; j++) in ice_if_tx_queues_alloc()
1094 txq->tx_rsq[j] = QIDX_INVALID; in ice_if_tx_queues_alloc()
1098 err = ice_resmgr_assign_contiguous(&sc->tx_qmgr, vsi->tx_qmap, ntxqsets); in ice_if_tx_queues_alloc()
1100 device_printf(sc->dev, "Unable to assign PF queues: %s\n", in ice_if_tx_queues_alloc()
1104 vsi->qmap_type = ICE_RESMGR_ALLOC_CONTIGUOUS; in ice_if_tx_queues_alloc()
1109 for (i = 0, txq = vsi->tx_queues; i < ntxqsets; i++, txq++) { in ice_if_tx_queues_alloc()
1111 txq->me = txq->q_handle = i; in ice_if_tx_queues_alloc()
1112 txq->vsi = vsi; in ice_if_tx_queues_alloc()
1115 txq->desc_count = sc->scctx->isc_ntxd[0]; in ice_if_tx_queues_alloc()
1118 txq->tail = QTX_COMM_DBELL(vsi->tx_qmap[i]); in ice_if_tx_queues_alloc()
1119 txq->tx_base = (struct ice_tx_desc *)vaddrs[i]; in ice_if_tx_queues_alloc()
1120 txq->tx_paddr = paddrs[i]; in ice_if_tx_queues_alloc()
1125 vsi->num_tx_queues = ntxqsets; in ice_if_tx_queues_alloc()
1130 for (i = 0, txq = vsi->tx_queues; i < ntxqsets; i++, txq++) { in ice_if_tx_queues_alloc()
1131 if (txq->tx_rsq != NULL) { in ice_if_tx_queues_alloc()
1132 free(txq->tx_rsq, M_ICE); in ice_if_tx_queues_alloc()
1133 txq->tx_rsq = NULL; in ice_if_tx_queues_alloc()
1136 free(vsi->tx_queues, M_ICE); in ice_if_tx_queues_alloc()
1137 vsi->tx_queues = NULL; in ice_if_tx_queues_alloc()
1142 * ice_if_rx_queues_alloc - Allocate Rx queue memory
1157 struct ice_vsi *vsi = &sc->pf_vsi; in ice_if_rx_queues_alloc()
1162 MPASS(sc->scctx->isc_nrxd[0] <= ICE_MAX_DESC_COUNT); in ice_if_rx_queues_alloc()
1166 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) in ice_if_rx_queues_alloc()
1170 if (!(vsi->rx_queues = in ice_if_rx_queues_alloc()
1172 device_printf(sc->dev, "Unable to allocate Rx queue memory\n"); in ice_if_rx_queues_alloc()
1177 err = ice_resmgr_assign_contiguous(&sc->rx_qmgr, vsi->rx_qmap, nrxqsets); in ice_if_rx_queues_alloc()
1179 device_printf(sc->dev, "Unable to assign PF queues: %s\n", in ice_if_rx_queues_alloc()
1183 vsi->qmap_type = ICE_RESMGR_ALLOC_CONTIGUOUS; in ice_if_rx_queues_alloc()
1188 for (i = 0, rxq = vsi->rx_queues; i < nrxqsets; i++, rxq++) { in ice_if_rx_queues_alloc()
1189 rxq->me = i; in ice_if_rx_queues_alloc()
1190 rxq->vsi = vsi; in ice_if_rx_queues_alloc()
1193 rxq->desc_count = sc->scctx->isc_nrxd[0]; in ice_if_rx_queues_alloc()
1196 rxq->tail = QRX_TAIL(vsi->rx_qmap[i]); in ice_if_rx_queues_alloc()
1197 rxq->rx_base = (union ice_32b_rx_flex_desc *)vaddrs[i]; in ice_if_rx_queues_alloc()
1198 rxq->rx_paddr = paddrs[i]; in ice_if_rx_queues_alloc()
1203 vsi->num_rx_queues = nrxqsets; in ice_if_rx_queues_alloc()
1208 free(vsi->rx_queues, M_ICE); in ice_if_rx_queues_alloc()
1209 vsi->rx_queues = NULL; in ice_if_rx_queues_alloc()
1214 * ice_if_queues_free - Free queue memory
1234 struct ice_vsi *vsi = &sc->pf_vsi; in ice_if_queues_free()
1246 /* Release MSI-X IRQ vectors, if not yet released in ice_if_detach */ in ice_if_queues_free()
1249 if (vsi->tx_queues != NULL) { in ice_if_queues_free()
1251 for (i = 0, txq = vsi->tx_queues; i < vsi->num_tx_queues; i++, txq++) { in ice_if_queues_free()
1252 if (txq->tx_rsq != NULL) { in ice_if_queues_free()
1253 free(txq->tx_rsq, M_ICE); in ice_if_queues_free()
1254 txq->tx_rsq = NULL; in ice_if_queues_free()
1257 free(vsi->tx_queues, M_ICE); in ice_if_queues_free()
1258 vsi->tx_queues = NULL; in ice_if_queues_free()
1259 vsi->num_tx_queues = 0; in ice_if_queues_free()
1261 if (vsi->rx_queues != NULL) { in ice_if_queues_free()
1262 free(vsi->rx_queues, M_ICE); in ice_if_queues_free()
1263 vsi->rx_queues = NULL; in ice_if_queues_free()
1264 vsi->num_rx_queues = 0; in ice_if_queues_free()
1269 * ice_msix_que - Fast interrupt handler for MSI-X receive queues
1272 * Interrupt filter function for iflib MSI-X interrupts. Called by iflib when
1273 * an MSI-X interrupt for a given queue is triggered. Currently this just asks
1287 * ice_msix_admin - Fast interrupt handler for MSI-X admin interrupt
1298 struct ice_hw *hw = &sc->hw; in ice_msix_admin()
1299 device_t dev = sc->dev; in ice_msix_admin()
1306 * vector will not be re-enabled until after we exit this function, in ice_msix_admin()
1325 ice_set_state(&sc->state, ICE_STATE_CONTROLQ_EVENT_PENDING); in ice_msix_admin()
1328 ice_set_state(&sc->state, ICE_STATE_VFLR_PENDING); in ice_msix_admin()
1332 ice_set_state(&sc->state, ICE_STATE_MDD_PENDING); in ice_msix_admin()
1342 sc->soft_stats.corer_count++; in ice_msix_admin()
1344 sc->soft_stats.globr_count++; in ice_msix_admin()
1346 sc->soft_stats.empr_count++; in ice_msix_admin()
1352 * happen. Second, we set hw->reset_ongoing to indicate that in ice_msix_admin()
1361 if (!ice_testandset_state(&sc->state, ICE_STATE_RESET_OICR_RECV)) { in ice_msix_admin()
1362 hw->reset_ongoing = true; in ice_msix_admin()
1365 * goes down and then up. The below if-statement prevents a second in ice_msix_admin()
1368 if (if_getflags(sc->ifp) & IFF_UP) in ice_msix_admin()
1369 ice_set_state(&sc->state, ICE_STATE_FIRST_INIT_LINK); in ice_msix_admin()
1375 ice_set_state(&sc->state, ICE_STATE_RESET_PFR_REQ); in ice_msix_admin()
1387 ice_set_state(&sc->state, ICE_STATE_RESET_PFR_REQ); in ice_msix_admin()
1394 * ice_allocate_msix - Allocate MSI-X vectors for the interface
1397 * Map the MSI-X bar, and then request MSI-X vectors in a two-stage process.
1409 * IFLIB_SKIP_MSIX flag indicating that the driver will manage MSI-X vectors
1412 * @remark This driver will only use MSI-X vectors. If this is not possible,
1427 if_softc_ctx_t scctx = sc->scctx; in ice_allocate_msix()
1428 device_t dev = sc->dev; in ice_allocate_msix()
1434 /* Allocate the MSI-X bar */ in ice_allocate_msix()
1435 bar = scctx->isc_msix_bar; in ice_allocate_msix()
1436 sc->msix_table = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &bar, RF_ACTIVE); in ice_allocate_msix()
1437 if (!sc->msix_table) { in ice_allocate_msix()
1438 device_printf(dev, "Unable to map MSI-X table\n"); in ice_allocate_msix()
1443 if (sc->ifc_sysctl_ntxqs || sc->ifc_sysctl_nrxqs) in ice_allocate_msix()
1475 queues = imin(queues, sc->ifc_sysctl_ntxqs ?: scctx->isc_ntxqsets); in ice_allocate_msix()
1476 queues = imin(queues, sc->ifc_sysctl_nrxqs ?: scctx->isc_nrxqsets); in ice_allocate_msix()
1478 if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_RDMA)) { in ice_allocate_msix()
1499 if_ctx_t ctx = sc->ctx; in ice_allocate_msix()
1506 device_printf(dev, "Failed to allocate %d MSI-X vectors, err %s\n", in ice_allocate_msix()
1513 int diff = requested - vectors; in ice_allocate_msix()
1515 device_printf(dev, "Requested %d MSI-X vectors, but got only %d\n", in ice_allocate_msix()
1528 rdma -= diff; in ice_allocate_msix()
1532 ice_clear_bit(ICE_FEATURE_RDMA, sc->feat_cap); in ice_allocate_msix()
1533 diff -= rdma; in ice_allocate_msix()
1545 device_printf(dev, "Unable to allocate sufficient MSI-X vectors\n"); in ice_allocate_msix()
1550 queues -= diff; in ice_allocate_msix()
1555 device_printf(dev, "Reserving %d MSI-X interrupts for iRDMA\n", in ice_allocate_msix()
1557 device_printf(dev, "Using MSI-X interrupts with %d vectors\n", in ice_allocate_msix()
1561 scctx->isc_vectors = vectors; in ice_allocate_msix()
1562 scctx->isc_nrxqsets = queues; in ice_allocate_msix()
1563 scctx->isc_ntxqsets = queues; in ice_allocate_msix()
1564 scctx->isc_intr = IFLIB_INTR_MSIX; in ice_allocate_msix()
1566 sc->irdma_vectors = rdma; in ice_allocate_msix()
1571 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) in ice_allocate_msix()
1575 sc->lan_vectors = vectors - rdma; in ice_allocate_msix()
1576 sc->lan_vectors -= extra_vectors; in ice_allocate_msix()
1577 err = ice_resmgr_assign_contiguous(&sc->dev_imgr, sc->pf_imap, sc->lan_vectors); in ice_allocate_msix()
1583 err = ice_resmgr_assign_contiguous(&sc->dev_imgr, sc->rdma_imap, rdma); in ice_allocate_msix()
1589 sc->extra_vectors = extra_vectors; in ice_allocate_msix()
1594 err = ice_resmgr_init(&sc->os_imgr, sc->extra_vectors); in ice_allocate_msix()
1598 ice_resmgr_release_map(&sc->dev_imgr, sc->rdma_imap, in ice_allocate_msix()
1605 ice_resmgr_release_map(&sc->dev_imgr, sc->pf_imap, in ice_allocate_msix()
1606 sc->lan_vectors); in ice_allocate_msix()
1610 if (sc->msix_table != NULL) { in ice_allocate_msix()
1611 bus_release_resource(sc->dev, SYS_RES_MEMORY, in ice_allocate_msix()
1612 rman_get_rid(sc->msix_table), in ice_allocate_msix()
1613 sc->msix_table); in ice_allocate_msix()
1614 sc->msix_table = NULL; in ice_allocate_msix()
1621 * ice_if_msix_intr_assign - Assign MSI-X interrupt vectors to queues
1625 * Called by iflib to assign MSI-X vectors to queues. Currently requires that
1635 struct ice_vsi *vsi = &sc->pf_vsi; in ice_if_msix_intr_assign()
1640 if (vsi->num_rx_queues != vsi->num_tx_queues) { in ice_if_msix_intr_assign()
1641 device_printf(sc->dev, in ice_if_msix_intr_assign()
1643 vsi->num_tx_queues, vsi->num_rx_queues); in ice_if_msix_intr_assign()
1647 if (msix < (vsi->num_rx_queues + 1)) { in ice_if_msix_intr_assign()
1648 device_printf(sc->dev, in ice_if_msix_intr_assign()
1649 "Not enough MSI-X vectors to assign one vector to each queue pair\n"); in ice_if_msix_intr_assign()
1654 sc->num_irq_vectors = vsi->num_rx_queues + 1; in ice_if_msix_intr_assign()
1657 if (!(sc->irqvs = in ice_if_msix_intr_assign()
1658 (struct ice_irq_vector *) malloc(sizeof(struct ice_irq_vector) * (sc->num_irq_vectors), in ice_if_msix_intr_assign()
1660 device_printf(sc->dev, in ice_if_msix_intr_assign()
1666 err = iflib_irq_alloc_generic(ctx, &sc->irqvs[0].irq, 1, IFLIB_INTR_ADMIN, in ice_if_msix_intr_assign()
1669 device_printf(sc->dev, in ice_if_msix_intr_assign()
1674 sc->irqvs[0].me = 0; in ice_if_msix_intr_assign()
1677 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) in ice_if_msix_intr_assign()
1681 for (i = 0, vector = 1; i < vsi->num_rx_queues; i++, vector++) { in ice_if_msix_intr_assign()
1682 struct ice_rx_queue *rxq = &vsi->rx_queues[i]; in ice_if_msix_intr_assign()
1683 struct ice_tx_queue *txq = &vsi->tx_queues[i]; in ice_if_msix_intr_assign()
1689 err = iflib_irq_alloc_generic(ctx, &sc->irqvs[vector].irq, rid, in ice_if_msix_intr_assign()
1691 rxq, rxq->me, irq_name); in ice_if_msix_intr_assign()
1693 device_printf(sc->dev, in ice_if_msix_intr_assign()
1696 vector--; in ice_if_msix_intr_assign()
1697 i--; in ice_if_msix_intr_assign()
1700 sc->irqvs[vector].me = vector; in ice_if_msix_intr_assign()
1701 rxq->irqv = &sc->irqvs[vector]; in ice_if_msix_intr_assign()
1706 iflib_softirq_alloc_generic(ctx, &sc->irqvs[vector].irq, in ice_if_msix_intr_assign()
1708 txq->me, irq_name); in ice_if_msix_intr_assign()
1709 txq->irqv = &sc->irqvs[vector]; in ice_if_msix_intr_assign()
1713 sc->last_rid = rid + sc->irdma_vectors; in ice_if_msix_intr_assign()
1722 for (; i >= 0; i--, vector--) in ice_if_msix_intr_assign()
1723 iflib_irq_free(ctx, &sc->irqvs[vector].irq); in ice_if_msix_intr_assign()
1724 iflib_irq_free(ctx, &sc->irqvs[0].irq); in ice_if_msix_intr_assign()
1726 free(sc->irqvs, M_ICE); in ice_if_msix_intr_assign()
1727 sc->irqvs = NULL; in ice_if_msix_intr_assign()
1732 * ice_if_mtu_set - Set the device MTU
1748 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) in ice_if_mtu_set()
1754 sc->scctx->isc_max_frame_size = mtu + in ice_if_mtu_set()
1757 sc->pf_vsi.max_frame_size = sc->scctx->isc_max_frame_size; in ice_if_mtu_set()
1763 * ice_if_intr_enable - Enable device interrupts
1772 struct ice_vsi *vsi = &sc->pf_vsi; in ice_if_intr_enable()
1773 struct ice_hw *hw = &sc->hw; in ice_if_intr_enable()
1778 ice_enable_intr(hw, sc->irqvs[0].me); in ice_if_intr_enable()
1781 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) in ice_if_intr_enable()
1785 for (int i = 0; i < vsi->num_rx_queues; i++) in ice_if_intr_enable()
1786 ice_enable_intr(hw, vsi->rx_queues[i].irqv->me); in ice_if_intr_enable()
1790 * ice_if_intr_disable - Disable device interrupts
1799 struct ice_hw *hw = &sc->hw; in ice_if_intr_disable()
1813 for (i = 1; i < hw->func_caps.common_cap.num_msix_vectors; i++) in ice_if_intr_disable()
1818 * ice_if_rx_queue_intr_enable - Enable a specific Rx queue interrupt
1830 struct ice_vsi *vsi = &sc->pf_vsi; in ice_if_rx_queue_intr_enable()
1831 struct ice_hw *hw = &sc->hw; in ice_if_rx_queue_intr_enable()
1834 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) in ice_if_rx_queue_intr_enable()
1837 ice_enable_intr(hw, vsi->rx_queues[rxqid].irqv->me); in ice_if_rx_queue_intr_enable()
1842 * ice_if_tx_queue_intr_enable - Enable a specific Tx queue interrupt
1854 struct ice_vsi *vsi = &sc->pf_vsi; in ice_if_tx_queue_intr_enable()
1855 struct ice_hw *hw = &sc->hw; in ice_if_tx_queue_intr_enable()
1858 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) in ice_if_tx_queue_intr_enable()
1861 ice_enable_intr(hw, vsi->tx_queues[txqid].irqv->me); in ice_if_tx_queue_intr_enable()
1866 * ice_set_default_promisc_mask - Set default config for promisc settings
1872 * non-VLAN-tagged/VLAN 0 traffic.
1885 * ice_if_promisc_set - Set device promiscuous mode
1897 struct ice_hw *hw = &sc->hw; in ice_if_promisc_set()
1898 device_t dev = sc->dev; in ice_if_promisc_set()
1905 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) in ice_if_promisc_set()
1914 status = ice_set_vsi_promisc(hw, sc->pf_vsi.idx, in ice_if_promisc_set()
1920 ice_aq_str(hw->adminq.sq_last_status)); in ice_if_promisc_set()
1924 status = ice_clear_vsi_promisc(hw, sc->pf_vsi.idx, in ice_if_promisc_set()
1930 ice_aq_str(hw->adminq.sq_last_status)); in ice_if_promisc_set()
1939 * ice_if_media_change - Change device media
1950 device_printf(sc->dev, "Media change is not supported.\n"); in ice_if_media_change()
1955 * ice_if_media_status - Report current device media
1966 struct ice_link_status *li = &sc->hw.port_info->phy.link_info; in ice_if_media_status()
1968 ifmr->ifm_status = IFM_AVALID; in ice_if_media_status()
1969 ifmr->ifm_active = IFM_ETHER; in ice_if_media_status()
1972 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) in ice_if_media_status()
1975 if (!sc->link_up) in ice_if_media_status()
1978 ifmr->ifm_status |= IFM_ACTIVE; in ice_if_media_status()
1979 ifmr->ifm_active |= IFM_FDX; in ice_if_media_status()
1981 if (li->phy_type_low) in ice_if_media_status()
1982 ifmr->ifm_active |= ice_get_phy_type_low(li->phy_type_low); in ice_if_media_status()
1983 else if (li->phy_type_high) in ice_if_media_status()
1984 ifmr->ifm_active |= ice_get_phy_type_high(li->phy_type_high); in ice_if_media_status()
1986 ifmr->ifm_active |= IFM_UNKNOWN; in ice_if_media_status()
1989 if (li->an_info & ICE_AQ_LINK_PAUSE_TX) in ice_if_media_status()
1990 ifmr->ifm_active |= IFM_ETH_TXPAUSE; in ice_if_media_status()
1991 if (li->an_info & ICE_AQ_LINK_PAUSE_RX) in ice_if_media_status()
1992 ifmr->ifm_active |= IFM_ETH_RXPAUSE; in ice_if_media_status()
1996 * ice_init_tx_tracking - Initialize Tx queue software tracking values
2009 for (i = 0, txq = vsi->tx_queues; i < vsi->num_tx_queues; i++, txq++) { in ice_init_tx_tracking()
2011 txq->tx_rs_cidx = txq->tx_rs_pidx = 0; in ice_init_tx_tracking()
2015 * off-by-one error in ice_ift_txd_credits_update for the in ice_init_tx_tracking()
2018 txq->tx_cidx_processed = txq->desc_count - 1; in ice_init_tx_tracking()
2020 for (j = 0; j < txq->desc_count; j++) in ice_init_tx_tracking()
2021 txq->tx_rsq[j] = QIDX_INVALID; in ice_init_tx_tracking()
2026 * ice_update_rx_mbuf_sz - Update the Rx buffer size for all queues
2035 uint32_t mbuf_sz = iflib_get_rx_mbuf_sz(sc->ctx); in ice_update_rx_mbuf_sz()
2036 struct ice_vsi *vsi = &sc->pf_vsi; in ice_update_rx_mbuf_sz()
2039 vsi->mbuf_sz = mbuf_sz; in ice_update_rx_mbuf_sz()
2043 * ice_if_init - Initialize the device
2056 device_t dev = sc->dev; in ice_if_init()
2071 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) in ice_if_init()
2074 if (ice_test_state(&sc->state, ICE_STATE_RESET_FAILED)) { in ice_if_init()
2075 …device_printf(sc->dev, "request to start interface cannot be completed as the device failed to res… in ice_if_init()
2079 if (ice_test_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET)) { in ice_if_init()
2080 …device_printf(sc->dev, "request to start interface while device is prepared for impending reset\n"… in ice_if_init()
2096 ice_init_tx_tracking(&sc->pf_vsi); in ice_if_init()
2098 err = ice_cfg_vsi_for_tx(&sc->pf_vsi); in ice_if_init()
2106 err = ice_cfg_vsi_for_rx(&sc->pf_vsi); in ice_if_init()
2114 err = ice_control_all_rx_queues(&sc->pf_vsi, true); in ice_if_init()
2133 ice_configure_all_rxq_interrupts(&sc->pf_vsi); in ice_if_init()
2134 ice_configure_rx_itr(&sc->pf_vsi); in ice_if_init()
2137 ice_if_promisc_set(ctx, if_getflags(sc->ifp)); in ice_if_init()
2139 if (!ice_testandclear_state(&sc->state, ICE_STATE_FIRST_INIT_LINK)) in ice_if_init()
2140 if (!sc->link_up && ((if_getflags(sc->ifp) & IFF_UP) || in ice_if_init()
2141 ice_test_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN))) in ice_if_init()
2146 ice_set_state(&sc->state, ICE_STATE_DRIVER_INITIALIZED); in ice_if_init()
2148 if (sc->mirr_if && ice_testandclear_state(&mif->state, ICE_STATE_SUBIF_NEEDS_REINIT)) { in ice_if_init()
2149 ice_clear_state(&mif->state, ICE_STATE_DRIVER_INITIALIZED); in ice_if_init()
2150 iflib_request_reset(sc->mirr_if->subctx); in ice_if_init()
2151 iflib_admin_intr_deferred(sc->mirr_if->subctx); in ice_if_init()
2157 ice_control_all_rx_queues(&sc->pf_vsi, false); in ice_if_init()
2159 ice_vsi_disable_tx(&sc->pf_vsi); in ice_if_init()
2163 * ice_poll_for_media_avail - Re-enable link if media is detected
2167 * sends the Get Link Status AQ command and re-enables HW link if the
2177 struct ice_hw *hw = &sc->hw; in ice_poll_for_media_avail()
2178 struct ice_port_info *pi = hw->port_info; in ice_poll_for_media_avail()
2180 /* E830 only: There's no interrupt for when the PHY FW has finished loading, in ice_poll_for_media_avail()
2185 ice_test_state(&sc->state, ICE_STATE_PHY_FW_INIT_PENDING)) { in ice_poll_for_media_avail()
2187 ice_clear_state(&sc->state, ICE_STATE_PHY_FW_INIT_PENDING); in ice_poll_for_media_avail()
2192 if (ice_test_state(&sc->state, ICE_STATE_NO_MEDIA)) { in ice_poll_for_media_avail()
2193 pi->phy.get_link_info = true; in ice_poll_for_media_avail()
2194 ice_get_link_status(pi, &sc->link_up); in ice_poll_for_media_avail()
2196 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { in ice_poll_for_media_avail()
2199 /* Re-enable link and re-apply user link settings */ in ice_poll_for_media_avail()
2200 if (ice_test_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN) || in ice_poll_for_media_avail()
2201 (if_getflags(sc->ifp) & IFF_UP)) { in ice_poll_for_media_avail()
2205 status = ice_add_media_types(sc, sc->media); in ice_poll_for_media_avail()
2207 device_printf(sc->dev, in ice_poll_for_media_avail()
2210 ice_aq_str(hw->adminq.sq_last_status)); in ice_poll_for_media_avail()
2213 ice_clear_state(&sc->state, ICE_STATE_NO_MEDIA); in ice_poll_for_media_avail()
2219 * ice_if_timer - called by iflib periodically
2232 uint64_t prev_link_xoff_rx = sc->stats.cur.link_xoff_rx; in ice_if_timer()
2238 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) in ice_if_timer()
2249 if (sc->stats.cur.link_xoff_rx != prev_link_xoff_rx) in ice_if_timer()
2250 sc->scctx->isc_pause_frames = 1; in ice_if_timer()
2253 ice_update_vsi_hw_stats(&sc->pf_vsi); in ice_if_timer()
2256 if (sc->mirr_if && sc->mirr_if->if_attached) in ice_if_timer()
2257 ice_update_vsi_hw_stats(sc->mirr_if->vsi); in ice_if_timer()
2261 * ice_admin_timer - called periodically to trigger the admin task
2291 iflib_admin_intr_deferred(sc->ctx); in ice_admin_timer()
2294 callout_schedule(&sc->admin_timer, hz/2); in ice_admin_timer()
2298 * ice_transition_recovery_mode - Transition to recovery mode
2307 struct ice_vsi *vsi = &sc->pf_vsi; in ice_transition_recovery_mode()
2310 …device_printf(sc->dev, "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R)… in ice_transition_recovery_mode()
2313 iflib_link_state_change(sc->ctx, LINK_STATE_DOWN, 0); in ice_transition_recovery_mode()
2315 /* Request that the device be re-initialized */ in ice_transition_recovery_mode()
2319 ice_clear_bit(ICE_FEATURE_RDMA, sc->feat_cap); in ice_transition_recovery_mode()
2322 if (ice_test_and_clear_bit(ICE_FEATURE_SRIOV, sc->feat_en)) in ice_transition_recovery_mode()
2325 ice_clear_bit(ICE_FEATURE_SRIOV, sc->feat_en); in ice_transition_recovery_mode()
2327 ice_clear_bit(ICE_FEATURE_SRIOV, sc->feat_cap); in ice_transition_recovery_mode()
2332 for (i = 0; i < sc->num_available_vsi; i++) { in ice_transition_recovery_mode()
2333 if (sc->all_vsi[i]) in ice_transition_recovery_mode()
2334 ice_release_vsi(sc->all_vsi[i]); in ice_transition_recovery_mode()
2336 sc->num_available_vsi = 0; in ice_transition_recovery_mode()
2338 if (sc->all_vsi) { in ice_transition_recovery_mode()
2339 free(sc->all_vsi, M_ICE); in ice_transition_recovery_mode()
2340 sc->all_vsi = NULL; in ice_transition_recovery_mode()
2344 ice_resmgr_destroy(&sc->dev_imgr); in ice_transition_recovery_mode()
2346 ice_resmgr_destroy(&sc->tx_qmgr); in ice_transition_recovery_mode()
2347 ice_resmgr_destroy(&sc->rx_qmgr); in ice_transition_recovery_mode()
2349 ice_deinit_hw(&sc->hw); in ice_transition_recovery_mode()
2353 * ice_transition_safe_mode - Transition to safe mode
2368 ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_cap); in ice_transition_safe_mode()
2369 ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_en); in ice_transition_safe_mode()
2372 ice_clear_bit(ICE_FEATURE_RDMA, sc->feat_cap); in ice_transition_safe_mode()
2375 if (ice_test_and_clear_bit(ICE_FEATURE_SRIOV, sc->feat_en)) in ice_transition_safe_mode()
2378 ice_clear_bit(ICE_FEATURE_SRIOV, sc->feat_en); in ice_transition_safe_mode()
2380 ice_clear_bit(ICE_FEATURE_SRIOV, sc->feat_cap); in ice_transition_safe_mode()
2382 ice_clear_bit(ICE_FEATURE_RSS, sc->feat_cap); in ice_transition_safe_mode()
2383 ice_clear_bit(ICE_FEATURE_RSS, sc->feat_en); in ice_transition_safe_mode()
2387 * ice_if_update_admin_status - update admin status
2407 fw_mode = ice_get_fw_mode(&sc->hw); in ice_if_update_admin_status()
2409 if (!ice_testandset_state(&sc->state, ICE_STATE_RECOVERY_MODE)) { in ice_if_update_admin_status()
2417 if (!ice_testandset_state(&sc->state, ICE_STATE_ROLLBACK_MODE)) { in ice_if_update_admin_status()
2421 ice_print_rollback_msg(&sc->hw); in ice_if_update_admin_status()
2434 if (ice_test_state(&sc->state, ICE_STATE_RESET_FAILED) || in ice_if_update_admin_status()
2435 ice_test_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET) || in ice_if_update_admin_status()
2436 ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) { in ice_if_update_admin_status()
2442 } else if (ice_testandclear_state(&sc->state, ICE_STATE_CONTROLQ_EVENT_PENDING)) { in ice_if_update_admin_status()
2447 if (ice_is_generic_mac(&sc->hw)) { in ice_if_update_admin_status()
2469 if (ice_testandclear_state(&sc->state, ICE_STATE_VFLR_PENDING)) in ice_if_update_admin_status()
2475 * ourselves. Otherwise, we can just re-enable the interrupt. We'll be in ice_if_update_admin_status()
2479 ice_set_state(&sc->state, ICE_STATE_CONTROLQ_EVENT_PENDING); in ice_if_update_admin_status()
2482 ice_enable_intr(&sc->hw, sc->irqvs[0].me); in ice_if_update_admin_status()
2487 * ice_prepare_for_reset - Prepare device for an impending reset
2498 struct ice_hw *hw = &sc->hw; in ice_prepare_for_reset()
2501 if (ice_testandset_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET)) in ice_prepare_for_reset()
2504 log(LOG_INFO, "%s: preparing to reset device logic\n", if_name(sc->ifp)); in ice_prepare_for_reset()
2507 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) in ice_prepare_for_reset()
2516 ice_resmgr_release_map(&sc->tx_qmgr, sc->pf_vsi.tx_qmap, in ice_prepare_for_reset()
2517 sc->pf_vsi.num_tx_queues); in ice_prepare_for_reset()
2518 ice_resmgr_release_map(&sc->rx_qmgr, sc->pf_vsi.rx_qmap, in ice_prepare_for_reset()
2519 sc->pf_vsi.num_rx_queues); in ice_prepare_for_reset()
2520 if (sc->mirr_if) { in ice_prepare_for_reset()
2521 ice_resmgr_release_map(&sc->tx_qmgr, sc->mirr_if->vsi->tx_qmap, in ice_prepare_for_reset()
2522 sc->mirr_if->num_irq_vectors); in ice_prepare_for_reset()
2523 ice_resmgr_release_map(&sc->rx_qmgr, sc->mirr_if->vsi->rx_qmap, in ice_prepare_for_reset()
2524 sc->mirr_if->num_irq_vectors); in ice_prepare_for_reset()
2529 if (hw->port_info) in ice_prepare_for_reset()
2536 * ice_rebuild_pf_vsi_qmap - Rebuild the main PF VSI queue mapping
2545 struct ice_vsi *vsi = &sc->pf_vsi; in ice_rebuild_pf_vsi_qmap()
2550 /* Re-assign Tx queues from PF space to the main VSI */ in ice_rebuild_pf_vsi_qmap()
2551 err = ice_resmgr_assign_contiguous(&sc->tx_qmgr, vsi->tx_qmap, in ice_rebuild_pf_vsi_qmap()
2552 vsi->num_tx_queues); in ice_rebuild_pf_vsi_qmap()
2554 device_printf(sc->dev, "Unable to re-assign PF Tx queues: %s\n", in ice_rebuild_pf_vsi_qmap()
2559 /* Re-assign Rx queues from PF space to this VSI */ in ice_rebuild_pf_vsi_qmap()
2560 err = ice_resmgr_assign_contiguous(&sc->rx_qmgr, vsi->rx_qmap, in ice_rebuild_pf_vsi_qmap()
2561 vsi->num_rx_queues); in ice_rebuild_pf_vsi_qmap()
2563 device_printf(sc->dev, "Unable to re-assign PF Rx queues: %s\n", in ice_rebuild_pf_vsi_qmap()
2568 vsi->qmap_type = ICE_RESMGR_ALLOC_CONTIGUOUS; in ice_rebuild_pf_vsi_qmap()
2570 /* Re-assign Tx queue tail pointers */ in ice_rebuild_pf_vsi_qmap()
2571 for (i = 0, txq = vsi->tx_queues; i < vsi->num_tx_queues; i++, txq++) in ice_rebuild_pf_vsi_qmap()
2572 txq->tail = QTX_COMM_DBELL(vsi->tx_qmap[i]); in ice_rebuild_pf_vsi_qmap()
2574 /* Re-assign Rx queue tail pointers */ in ice_rebuild_pf_vsi_qmap()
2575 for (i = 0, rxq = vsi->rx_queues; i < vsi->num_rx_queues; i++, rxq++) in ice_rebuild_pf_vsi_qmap()
2576 rxq->tail = QRX_TAIL(vsi->rx_qmap[i]); in ice_rebuild_pf_vsi_qmap()
2581 ice_resmgr_release_map(&sc->tx_qmgr, sc->pf_vsi.tx_qmap, in ice_rebuild_pf_vsi_qmap()
2582 sc->pf_vsi.num_tx_queues); in ice_rebuild_pf_vsi_qmap()
2591 * ice_rebuild_recovery_mode - Rebuild driver state while in recovery mode
2600 device_t dev = sc->dev; in ice_rebuild_recovery_mode()
2609 ice_enable_intr(&sc->hw, sc->irqvs[0].me); in ice_rebuild_recovery_mode()
2612 ice_clear_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET); in ice_rebuild_recovery_mode()
2614 log(LOG_INFO, "%s: device rebuild successful\n", if_name(sc->ifp)); in ice_rebuild_recovery_mode()
2619 * the iflib core, we also want re-run the admin task so that iflib in ice_rebuild_recovery_mode()
2628 * ice_rebuild - Rebuild driver state post reset
2632 * the hardware port, and re-enable the VSIs.
2637 struct ice_hw *hw = &sc->hw; in ice_rebuild()
2638 device_t dev = sc->dev; in ice_rebuild()
2643 sc->rebuild_ticks = ticks; in ice_rebuild()
2646 ice_clear_state(&sc->state, ICE_STATE_RESET_FAILED); in ice_rebuild()
2652 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) { in ice_rebuild()
2662 device_printf(dev, "failed to re-init controlqs, err %s\n", in ice_rebuild()
2673 ice_aq_str(hw->adminq.sq_last_status)); in ice_rebuild()
2677 /* Re-enable FW logging. Keep going even if this fails */ in ice_rebuild()
2679 if (hw->pf_id == 0) in ice_rebuild()
2680 status = ice_fwlog_set(hw, &hw->fwlog_cfg); in ice_rebuild()
2686 * enabled pre-rebuild. in ice_rebuild()
2688 if (hw->fwlog_cfg.options & ICE_FWLOG_OPTION_IS_REGISTERED) { in ice_rebuild()
2691 device_printf(dev, "failed to re-register fw logging, err %s aq_err %s\n", in ice_rebuild()
2693 ice_aq_str(hw->adminq.sq_last_status)); in ice_rebuild()
2696 device_printf(dev, "failed to rebuild fw logging configuration, err %s aq_err %s\n", in ice_rebuild()
2698 ice_aq_str(hw->adminq.sq_last_status)); in ice_rebuild()
2729 status = ice_sched_init_port(hw->port_info); in ice_rebuild()
2737 if (!ice_is_bit_set(sc->feat_en, ICE_FEATURE_SAFE_MODE)) { in ice_rebuild()
2738 pkg_state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size); in ice_rebuild()
2749 device_printf(sc->dev, "Unable to re-assign main VSI queues, err %s\n", in ice_rebuild()
2753 err = ice_initialize_vsi(&sc->pf_vsi); in ice_rebuild()
2755 device_printf(sc->dev, "Unable to re-initialize Main VSI, err %s\n", in ice_rebuild()
2765 /* Re-enable FW health event reporting */ in ice_rebuild()
2769 err = ice_config_rss(&sc->pf_vsi); in ice_rebuild()
2771 device_printf(sc->dev, in ice_rebuild()
2777 if (hw->port_info->qos_cfg.is_sw_lldp) in ice_rebuild()
2781 * FW is ready. in ice_rebuild()
2783 ice_clear_state(&sc->state, ICE_STATE_LINK_STATUS_REPORTED); in ice_rebuild()
2786 /* RDMA interface will be restarted by the stack re-init */ in ice_rebuild()
2792 ice_enable_intr(&sc->hw, sc->irqvs[0].me); in ice_rebuild()
2795 ice_clear_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET); in ice_rebuild()
2798 if (sc->mirr_if) { in ice_rebuild()
2804 log(LOG_INFO, "%s: device rebuild successful\n", if_name(sc->ifp)); in ice_rebuild()
2809 * the iflib core, we also want re-run the admin task so that iflib in ice_rebuild()
2814 if (hw->port_info->qos_cfg.is_sw_lldp) in ice_rebuild()
2822 ice_deinit_vsi(&sc->pf_vsi); in ice_rebuild()
2824 ice_resmgr_release_map(&sc->tx_qmgr, sc->pf_vsi.tx_qmap, in ice_rebuild()
2825 sc->pf_vsi.num_tx_queues); in ice_rebuild()
2826 ice_resmgr_release_map(&sc->rx_qmgr, sc->pf_vsi.rx_qmap, in ice_rebuild()
2827 sc->pf_vsi.num_rx_queues); in ice_rebuild()
2832 ice_clear_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET); in ice_rebuild()
2833 ice_set_state(&sc->state, ICE_STATE_RESET_FAILED); in ice_rebuild()
2838 * ice_handle_reset_event - Handle reset events triggered by OICR
2851 struct ice_hw *hw = &sc->hw; in ice_handle_reset_event()
2853 device_t dev = sc->dev; in ice_handle_reset_event()
2860 if (!ice_testandclear_state(&sc->state, ICE_STATE_RESET_OICR_RECV)) in ice_handle_reset_event()
2883 ice_set_state(&sc->state, ICE_STATE_RESET_FAILED); in ice_handle_reset_event()
2884 ice_clear_state(&sc->state, ICE_STATE_RESET_PFR_REQ); in ice_handle_reset_event()
2885 ice_clear_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET); in ice_handle_reset_event()
2891 sc->hw.reset_ongoing = false; in ice_handle_reset_event()
2898 if (ice_testandclear_state(&sc->state, ICE_STATE_RESET_PFR_REQ)) in ice_handle_reset_event()
2903 * ice_handle_pf_reset_request - Initiate PF reset requested by software
2917 struct ice_hw *hw = &sc->hw; in ice_handle_pf_reset_request()
2921 if (!ice_testandclear_state(&sc->state, ICE_STATE_RESET_PFR_REQ)) in ice_handle_pf_reset_request()
2935 device_printf(sc->dev, "device PF reset failed, err %s\n", in ice_handle_pf_reset_request()
2937 ice_set_state(&sc->state, ICE_STATE_RESET_FAILED); in ice_handle_pf_reset_request()
2941 sc->soft_stats.pfr_count++; in ice_handle_pf_reset_request()
2946 * ice_init_device_features - Init device driver features
2955 struct ice_hw *hw = &sc->hw; in ice_init_device_features()
2958 ice_set_bit(ICE_FEATURE_SRIOV, sc->feat_cap); in ice_init_device_features()
2959 ice_set_bit(ICE_FEATURE_RSS, sc->feat_cap); in ice_init_device_features()
2960 ice_set_bit(ICE_FEATURE_RDMA, sc->feat_cap); in ice_init_device_features()
2961 ice_set_bit(ICE_FEATURE_LENIENT_LINK_MODE, sc->feat_cap); in ice_init_device_features()
2962 ice_set_bit(ICE_FEATURE_LINK_MGMT_VER_1, sc->feat_cap); in ice_init_device_features()
2963 ice_set_bit(ICE_FEATURE_LINK_MGMT_VER_2, sc->feat_cap); in ice_init_device_features()
2964 ice_set_bit(ICE_FEATURE_HEALTH_STATUS, sc->feat_cap); in ice_init_device_features()
2965 ice_set_bit(ICE_FEATURE_FW_LOGGING, sc->feat_cap); in ice_init_device_features()
2966 ice_set_bit(ICE_FEATURE_HAS_PBA, sc->feat_cap); in ice_init_device_features()
2967 ice_set_bit(ICE_FEATURE_DCB, sc->feat_cap); in ice_init_device_features()
2968 ice_set_bit(ICE_FEATURE_TX_BALANCE, sc->feat_cap); in ice_init_device_features()
2969 ice_set_bit(ICE_FEATURE_PHY_STATISTICS, sc->feat_cap); in ice_init_device_features()
2972 ice_set_bit(ICE_FEATURE_PHY_STATISTICS, sc->feat_en); in ice_init_device_features()
2975 ice_set_bit(ICE_FEATURE_DUAL_NAC, sc->feat_cap); in ice_init_device_features()
2977 if (!hw->func_caps.common_cap.rss_table_size) in ice_init_device_features()
2978 ice_clear_bit(ICE_FEATURE_RSS, sc->feat_cap); in ice_init_device_features()
2979 if (!hw->func_caps.common_cap.iwarp || !ice_enable_irdma) in ice_init_device_features()
2980 ice_clear_bit(ICE_FEATURE_RDMA, sc->feat_cap); in ice_init_device_features()
2981 if (!hw->func_caps.common_cap.dcb) in ice_init_device_features()
2982 ice_clear_bit(ICE_FEATURE_DCB, sc->feat_cap); in ice_init_device_features()
2985 ice_clear_bit(ICE_FEATURE_HEALTH_STATUS, sc->feat_cap); in ice_init_device_features()
2987 ice_clear_bit(ICE_FEATURE_FW_LOGGING, sc->feat_cap); in ice_init_device_features()
2988 if (hw->fwlog_cfg.options & ICE_FWLOG_OPTION_IS_REGISTERED) { in ice_init_device_features()
2989 if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_FW_LOGGING)) in ice_init_device_features()
2990 ice_set_bit(ICE_FEATURE_FW_LOGGING, sc->feat_en); in ice_init_device_features()
2996 ice_disable_unsupported_features(sc->feat_cap); in ice_init_device_features()
2999 if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_RSS)) in ice_init_device_features()
3000 ice_set_bit(ICE_FEATURE_RSS, sc->feat_en); in ice_init_device_features()
3004 ice_clear_bit(ICE_FEATURE_TX_BALANCE, sc->feat_cap); in ice_init_device_features()
3006 if (hw->dev_caps.supported_sensors & ICE_SENSOR_SUPPORT_E810_INT_TEMP) { in ice_init_device_features()
3007 ice_set_bit(ICE_FEATURE_TEMP_SENSOR, sc->feat_cap); in ice_init_device_features()
3008 ice_set_bit(ICE_FEATURE_TEMP_SENSOR, sc->feat_en); in ice_init_device_features()
3011 if (hw->func_caps.common_cap.next_cluster_id_support || in ice_init_device_features()
3012 hw->dev_caps.common_cap.next_cluster_id_support) { in ice_init_device_features()
3013 ice_set_bit(ICE_FEATURE_NEXT_CLUSTER_ID, sc->feat_cap); in ice_init_device_features()
3014 ice_set_bit(ICE_FEATURE_NEXT_CLUSTER_ID, sc->feat_en); in ice_init_device_features()
3019 * ice_if_multi_set - Callback to update Multicast filters in HW
3037 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) in ice_if_multi_set()
3042 device_printf(sc->dev, in ice_if_multi_set()
3050 * ice_if_vlan_register - Register a VLAN with the hardware
3067 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) in ice_if_vlan_register()
3070 status = ice_add_vlan_hw_filter(&sc->pf_vsi, vtag); in ice_if_vlan_register()
3072 device_printf(sc->dev, in ice_if_vlan_register()
3075 ice_aq_str(sc->hw.adminq.sq_last_status)); in ice_if_vlan_register()
3080 * ice_if_vlan_unregister - Remove a VLAN filter from the hardware
3097 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) in ice_if_vlan_unregister()
3100 status = ice_remove_vlan_hw_filter(&sc->pf_vsi, vtag); in ice_if_vlan_unregister()
3102 device_printf(sc->dev, in ice_if_vlan_unregister()
3105 ice_aq_str(sc->hw.adminq.sq_last_status)); in ice_if_vlan_unregister()
3110 * ice_if_stop - Stop the device
3138 if (!ice_testandclear_state(&sc->state, ICE_STATE_DRIVER_INITIALIZED)) in ice_if_stop()
3141 if (ice_test_state(&sc->state, ICE_STATE_RESET_FAILED)) { in ice_if_stop()
3142 …device_printf(sc->dev, "request to stop interface cannot be completed as the device failed to rese… in ice_if_stop()
3146 if (ice_test_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET)) { in ice_if_stop()
3147 …device_printf(sc->dev, "request to stop interface while device is prepared for impending reset\n"); in ice_if_stop()
3161 ice_flush_txq_interrupts(&sc->pf_vsi); in ice_if_stop()
3162 ice_flush_rxq_interrupts(&sc->pf_vsi); in ice_if_stop()
3165 ice_vsi_disable_tx(&sc->pf_vsi); in ice_if_stop()
3166 ice_control_all_rx_queues(&sc->pf_vsi, false); in ice_if_stop()
3168 if (!ice_test_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN) && in ice_if_stop()
3169 !(if_getflags(sc->ifp) & IFF_UP) && sc->link_up) in ice_if_stop()
3172 if (sc->mirr_if && ice_test_state(&mif->state, ICE_STATE_SUBIF_NEEDS_REINIT)) { in ice_if_stop()
3173 ice_subif_if_stop(sc->mirr_if->subctx); in ice_if_stop()
3174 device_printf(sc->dev, "The subinterface also comes down and up after reset\n"); in ice_if_stop()
3179 * ice_if_get_counter - Get current value of an ifnet statistic
3193 return ice_get_ifnet_counter(&sc->pf_vsi, counter); in ice_if_get_counter()
3197 * ice_request_stack_reinit - Request that iflib re-initialize
3200 * Request that the device be brought down and up, to re-initialize. For
3202 * queues need to be re-initialized.
3205 * re-initialized if we need to resart Tx and Rx queues.
3210 if (CTX_ACTIVE(sc->ctx)) { in ice_request_stack_reinit()
3211 iflib_request_reset(sc->ctx); in ice_request_stack_reinit()
3212 iflib_admin_intr_deferred(sc->ctx); in ice_request_stack_reinit()
3217 * ice_driver_is_detaching - Check if the driver is detaching/unloading
3228 * detach-based race conditions as it is possible for a thread to race with
3234 return (ice_test_state(&sc->state, ICE_STATE_DETACHING) || in ice_driver_is_detaching()
3235 iflib_in_detach(sc->ctx)); in ice_driver_is_detaching()
3239 * ice_if_priv_ioctl - Device private ioctl handler
3253 device_t dev = sc->dev; in ice_if_priv_ioctl()
3286 switch (ifd->ifd_cmd) { in ice_if_priv_ioctl()
3297 * ice_if_i2c_req - I2C request handler for iflib
3303 * @remark The iflib-only part is pretty simple.
3314 * ice_if_suspend - PCI device suspend handler for iflib
3328 * either via FLR or during the D3->D0 transition. in ice_if_suspend()
3330 ice_clear_state(&sc->state, ICE_STATE_RESET_PFR_REQ); in ice_if_suspend()
3338 * ice_if_resume - PCI device resume handler for iflib
3342 * an FLR. An init is performed by iflib after this function is finished.
3357 * ice_if_needs_restart - Tell iflib when the driver needs to be reinitialized
3372 if (!ice_test_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN) && in ice_if_needs_restart()
3373 !(if_getflags(sc->ifp) & IFF_UP)) in ice_if_needs_restart()
3381 * ice_init_link - Do link configuration and link status reporting
3390 struct ice_hw *hw = &sc->hw; in ice_init_link()
3391 device_t dev = sc->dev; in ice_init_link()
3393 /* Check if FW is ready before setting up link; defer setup to the in ice_init_link()
3398 ice_set_state(&sc->state, ICE_STATE_PHY_FW_INIT_PENDING); in ice_init_link()
3400 "Link initialization is blocked by PHY FW initialization.\n"); in ice_init_link()
3402 "Link initialization will continue after PHY FW initialization completes.\n"); in ice_init_link()
3403 /* Do not access PHY config while PHY FW is busy initializing */ in ice_init_link()
3405 ice_clear_state(&sc->state, ICE_STATE_PHY_FW_INIT_PENDING); in ice_init_link()
3414 * ice_if_iov_init - iov init handler for iflib
3419 * Configure the driver for SR-IOV mode. Used to setup things like memory
3433 * ice_if_iov_uninit - iov uninit handler for iflib
3449 * ice_if_iov_vf_add - iov add vf handler for iflib
3468 * ice_if_vflr_handle - iov VFLR handler
3539 * - isc_admin_intrcnt is set to 0
3540 * - Uses subif iflib driver methods
3541 * - Flagged as a VF for iflib
3586 if_softc_ctx_t scctx = mif->subscctx; in ice_subif_setup_scctx()
3588 scctx->isc_txrx = &ice_subif_txrx; in ice_subif_setup_scctx()
3590 scctx->isc_capenable = ICE_FULL_CAPS; in ice_subif_setup_scctx()
3591 scctx->isc_tx_csum_flags = ICE_CSUM_OFFLOAD; in ice_subif_setup_scctx()
3593 scctx->isc_ntxqsets = 4; in ice_subif_setup_scctx()
3594 scctx->isc_nrxqsets = 4; in ice_subif_setup_scctx()
3595 scctx->isc_vectors = scctx->isc_nrxqsets; in ice_subif_setup_scctx()
3597 scctx->isc_ntxqsets_max = 256; in ice_subif_setup_scctx()
3598 scctx->isc_nrxqsets_max = 256; in ice_subif_setup_scctx()
3600 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] in ice_subif_setup_scctx()
3602 scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] in ice_subif_setup_scctx()
3605 scctx->isc_tx_nsegments = ICE_MAX_TX_SEGS; in ice_subif_setup_scctx()
3606 scctx->isc_tx_tso_segments_max = ICE_MAX_TSO_SEGS; in ice_subif_setup_scctx()
3607 scctx->isc_tx_tso_size_max = ICE_TSO_SIZE; in ice_subif_setup_scctx()
3608 scctx->isc_tx_tso_segsize_max = ICE_MAX_DMA_SEG_SIZE; in ice_subif_setup_scctx()
3617 mif->subctx = ctx; in ice_subif_if_attach_pre()
3618 mif->subdev = dev; in ice_subif_if_attach_pre()
3619 mif->subscctx = iflib_get_softc_ctx(ctx); in ice_subif_if_attach_pre()
3634 * ice_destroy_mirror_interface - destroy mirror interface
3645 struct ice_mirr_if *mif = sc->mirr_if; in ice_destroy_mirror_interface()
3646 struct ice_vsi *vsi = mif->vsi; in ice_destroy_mirror_interface()
3650 is_locked = sx_xlocked(sc->iflib_ctx_lock); in ice_destroy_mirror_interface()
3654 if (mif->ifp) { in ice_destroy_mirror_interface()
3655 ret = iflib_device_deregister(mif->subctx); in ice_destroy_mirror_interface()
3657 device_printf(sc->dev, in ice_destroy_mirror_interface()
3664 ret = device_delete_child(sc->dev, mif->subdev); in ice_destroy_mirror_interface()
3667 device_printf(sc->dev, in ice_destroy_mirror_interface()
3675 if (mif->if_imap) { in ice_destroy_mirror_interface()
3676 free(mif->if_imap, M_ICE); in ice_destroy_mirror_interface()
3677 mif->if_imap = NULL; in ice_destroy_mirror_interface()
3679 if (mif->os_imap) { in ice_destroy_mirror_interface()
3680 free(mif->os_imap, M_ICE); in ice_destroy_mirror_interface()
3681 mif->os_imap = NULL; in ice_destroy_mirror_interface()
3686 * - rx_irqvs in ice_destroy_mirror_interface()
3687 * - tx_queues in ice_destroy_mirror_interface()
3688 * - rx_queues in ice_destroy_mirror_interface()
3693 sc->mirr_if = NULL; in ice_destroy_mirror_interface()
3698 * ice_setup_mirror_vsi - Initialize mirror VSI
3709 struct ice_softc *sc = mif->back; in ice_setup_mirror_vsi()
3710 device_t dev = sc->dev; in ice_setup_mirror_vsi()
3720 mif->vsi = vsi; in ice_setup_mirror_vsi()
3724 vsi->num_tx_queues = vsi->num_rx_queues = ICE_DEFAULT_VF_QUEUES; in ice_setup_mirror_vsi()
3727 ret = ice_resmgr_assign_scattered(&sc->tx_qmgr, vsi->tx_qmap, in ice_setup_mirror_vsi()
3728 vsi->num_tx_queues); in ice_setup_mirror_vsi()
3735 ret = ice_resmgr_assign_scattered(&sc->rx_qmgr, vsi->rx_qmap, in ice_setup_mirror_vsi()
3736 vsi->num_rx_queues); in ice_setup_mirror_vsi()
3742 vsi->qmap_type = ICE_RESMGR_ALLOC_SCATTERED; in ice_setup_mirror_vsi()
3743 vsi->max_frame_size = ICE_MAX_FRAME_SIZE; in ice_setup_mirror_vsi()
3762 vsi->mirror_src_vsi = sc->pf_vsi.idx; in ice_setup_mirror_vsi()
3764 ice_debug(&sc->hw, ICE_DBG_INIT, in ice_setup_mirror_vsi()
3766 vsi->mirror_src_vsi, vsi->idx); in ice_setup_mirror_vsi()
3767 ice_debug(&sc->hw, ICE_DBG_INIT, "(HW num: VSI %d to %d)\n", in ice_setup_mirror_vsi()
3768 ice_get_hw_vsi_num(&sc->hw, vsi->mirror_src_vsi), in ice_setup_mirror_vsi()
3769 ice_get_hw_vsi_num(&sc->hw, vsi->idx)); in ice_setup_mirror_vsi()
3783 mif->vsi = NULL; in ice_setup_mirror_vsi()
3788 * ice_create_mirror_interface - Initialize mirror interface
3803 device_t dev = sc->dev; in ice_create_mirror_interface()
3816 sc->mirr_if = mif; in ice_create_mirror_interface()
3817 mif->back = sc; in ice_create_mirror_interface()
3820 * - ice_subif_if_tx_queues_alloc in ice_create_mirror_interface()
3821 * - ice_subif_if_rx_queues_alloc in ice_create_mirror_interface()
3838 mif->subdev = device_add_child(dev, sbuf_data(sb), 0); in ice_create_mirror_interface()
3841 if (!mif->subdev) { in ice_create_mirror_interface()
3845 sc->mirr_if = NULL; in ice_create_mirror_interface()
3850 device_set_driver(mif->subdev, &ice_subif_driver); in ice_create_mirror_interface()
3855 ret = iflib_device_register(mif->subdev, mif, &ice_subif_sctx, &mif->subctx); in ice_create_mirror_interface()
3860 mif->ifp = iflib_get_ifp(mif->subctx); in ice_create_mirror_interface()
3861 if_setflagbits(mif->ifp, IFF_MONITOR, 0); in ice_create_mirror_interface()
3864 media = iflib_get_media(mif->subctx); in ice_create_mirror_interface()
3869 device_get_nameunit(mif->subdev), if_name(mif->ifp)); in ice_create_mirror_interface()
3871 ice_add_vsi_sysctls(mif->vsi); in ice_create_mirror_interface()
3877 mif->if_attached = true; in ice_create_mirror_interface()
3896 * driver needs to get MSI-X resources from the parent device.
3907 struct ice_softc *sc = mif->back; in ice_wire_mirror_intrs()
3908 struct ice_hw *hw = &sc->hw; in ice_wire_mirror_intrs()
3909 struct ice_vsi *vsi = mif->vsi; in ice_wire_mirror_intrs()
3910 device_t dev = mif->subdev; in ice_wire_mirror_intrs()
3913 if_ctx_t ctx = mif->subctx; in ice_wire_mirror_intrs()
3915 ice_debug(hw, ICE_DBG_INIT, "%s: Last rid: %d\n", __func__, sc->last_rid); in ice_wire_mirror_intrs()
3917 rid = sc->last_rid + 1; in ice_wire_mirror_intrs()
3918 for (i = 0; i < vsi->num_rx_queues; i++, rid++) { in ice_wire_mirror_intrs()
3919 struct ice_rx_queue *rxq = &vsi->rx_queues[i]; in ice_wire_mirror_intrs()
3920 struct ice_tx_queue *txq = &vsi->tx_queues[i]; in ice_wire_mirror_intrs()
3926 err = iflib_irq_alloc_generic_subctx(sc->ctx, ctx, in ice_wire_mirror_intrs()
3927 &mif->rx_irqvs[i].irq, rid, IFLIB_INTR_RXTX, ice_msix_que, in ice_wire_mirror_intrs()
3928 rxq, rxq->me, irq_name); in ice_wire_mirror_intrs()
3933 i--; in ice_wire_mirror_intrs()
3936 MPASS(rid - 1 > 0); in ice_wire_mirror_intrs()
3938 mif->rx_irqvs[i].me = rid - 1; in ice_wire_mirror_intrs()
3939 rxq->irqv = &mif->rx_irqvs[i]; in ice_wire_mirror_intrs()
3943 iflib_softirq_alloc_generic(ctx, &mif->rx_irqvs[i].irq, in ice_wire_mirror_intrs()
3944 IFLIB_INTR_TX, txq, txq->me, irq_name); in ice_wire_mirror_intrs()
3945 txq->irqv = &mif->rx_irqvs[i]; in ice_wire_mirror_intrs()
3948 sc->last_rid = rid - 1; in ice_wire_mirror_intrs()
3951 sc->last_rid); in ice_wire_mirror_intrs()
3956 for (; i >= 0; i--) in ice_wire_mirror_intrs()
3957 iflib_irq_free(ctx, &mif->rx_irqvs[i].irq); in ice_wire_mirror_intrs()
3962 * ice_subif_rebuild - Rebuild subinterface post reset
3971 struct ice_mirr_if *mif = (struct ice_mirr_if *)iflib_get_softc(sc->ctx); in ice_subif_rebuild()
3972 struct ice_vsi *vsi = sc->mirr_if->vsi; in ice_subif_rebuild()
3977 device_printf(sc->dev, "Unable to re-assign mirror VSI queues, err %s\n", in ice_subif_rebuild()
3984 device_printf(sc->dev, "Unable to re-initialize mirror VSI, err %s\n", in ice_subif_rebuild()
3991 device_printf(sc->dev, in ice_subif_rebuild()
3997 vsi->mirror_src_vsi = sc->pf_vsi.idx; in ice_subif_rebuild()
4001 device_printf(sc->dev, in ice_subif_rebuild()
4007 ice_set_state(&mif->state, ICE_STATE_SUBIF_NEEDS_REINIT); in ice_subif_rebuild()
4014 ice_resmgr_release_map(&sc->tx_qmgr, vsi->tx_qmap, in ice_subif_rebuild()
4015 sc->mirr_if->num_irq_vectors); in ice_subif_rebuild()
4016 ice_resmgr_release_map(&sc->rx_qmgr, vsi->rx_qmap, in ice_subif_rebuild()
4017 sc->mirr_if->num_irq_vectors); in ice_subif_rebuild()
4023 * ice_subif_rebuild_vsi_qmap - Rebuild the mirror VSI queue mapping
4032 struct ice_vsi *vsi = sc->mirr_if->vsi; in ice_subif_rebuild_vsi_qmap()
4037 err = ice_resmgr_assign_scattered(&sc->tx_qmgr, vsi->tx_qmap, sc->mirr_if->num_irq_vectors); in ice_subif_rebuild_vsi_qmap()
4039 device_printf(sc->dev, "Unable to assign mirror VSI Tx queues: %s\n", in ice_subif_rebuild_vsi_qmap()
4044 err = ice_resmgr_assign_scattered(&sc->rx_qmgr, vsi->rx_qmap, sc->mirr_if->num_irq_vectors); in ice_subif_rebuild_vsi_qmap()
4046 device_printf(sc->dev, "Unable to assign mirror VSI Rx queues: %s\n", in ice_subif_rebuild_vsi_qmap()
4051 vsi->qmap_type = ICE_RESMGR_ALLOC_SCATTERED; in ice_subif_rebuild_vsi_qmap()
4053 /* Re-assign Tx queue tail pointers */ in ice_subif_rebuild_vsi_qmap()
4054 for (i = 0, txq = vsi->tx_queues; i < vsi->num_tx_queues; i++, txq++) in ice_subif_rebuild_vsi_qmap()
4055 txq->tail = QTX_COMM_DBELL(vsi->tx_qmap[i]); in ice_subif_rebuild_vsi_qmap()
4057 /* Re-assign Rx queue tail pointers */ in ice_subif_rebuild_vsi_qmap()
4058 for (i = 0, rxq = vsi->rx_queues; i < vsi->num_rx_queues; i++, rxq++) in ice_subif_rebuild_vsi_qmap()
4059 rxq->tail = QRX_TAIL(vsi->rx_qmap[i]); in ice_subif_rebuild_vsi_qmap()
4064 ice_resmgr_release_map(&sc->tx_qmgr, vsi->tx_qmap, vsi->num_tx_queues); in ice_subif_rebuild_vsi_qmap()
4070 * ice_subif_if_tx_queues_alloc - Allocate Tx queue memory for subinterfaces
4086 device_t dev = mif->subdev; in ice_subif_if_tx_queues_alloc()
4092 MPASS(mif->subscctx->isc_ntxd[0] <= ICE_MAX_DESC_COUNT); in ice_subif_if_tx_queues_alloc()
4094 vsi = mif->vsi; in ice_subif_if_tx_queues_alloc()
4096 MPASS(vsi->num_tx_queues == ntxqsets); in ice_subif_if_tx_queues_alloc()
4099 if (!(vsi->tx_queues = in ice_subif_if_tx_queues_alloc()
4107 for (i = 0, txq = vsi->tx_queues; i < ntxqsets; i++, txq++) { in ice_subif_if_tx_queues_alloc()
4108 if (!(txq->tx_rsq = in ice_subif_if_tx_queues_alloc()
4109 (uint16_t *)malloc(sizeof(uint16_t) * mif->subscctx->isc_ntxd[0], M_ICE, M_NOWAIT))) { in ice_subif_if_tx_queues_alloc()
4116 for (j = 0; j < mif->subscctx->isc_ntxd[0]; j++) in ice_subif_if_tx_queues_alloc()
4117 txq->tx_rsq[j] = QIDX_INVALID; in ice_subif_if_tx_queues_alloc()
4123 for (i = 0, txq = vsi->tx_queues; i < ntxqsets; i++, txq++) { in ice_subif_if_tx_queues_alloc()
4125 txq->me = txq->q_handle = i; in ice_subif_if_tx_queues_alloc()
4126 txq->vsi = vsi; in ice_subif_if_tx_queues_alloc()
4129 txq->desc_count = mif->subscctx->isc_ntxd[0]; in ice_subif_if_tx_queues_alloc()
4132 txq->tail = QTX_COMM_DBELL(vsi->tx_qmap[i]); in ice_subif_if_tx_queues_alloc()
4133 txq->tx_base = (struct ice_tx_desc *)vaddrs[i]; in ice_subif_if_tx_queues_alloc()
4134 txq->tx_paddr = paddrs[i]; in ice_subif_if_tx_queues_alloc()
4142 for (i = 0, txq = vsi->tx_queues; i < ntxqsets; i++, txq++) { in ice_subif_if_tx_queues_alloc()
4143 if (txq->tx_rsq != NULL) { in ice_subif_if_tx_queues_alloc()
4144 free(txq->tx_rsq, M_ICE); in ice_subif_if_tx_queues_alloc()
4145 txq->tx_rsq = NULL; in ice_subif_if_tx_queues_alloc()
4148 free(vsi->tx_queues, M_ICE); in ice_subif_if_tx_queues_alloc()
4149 vsi->tx_queues = NULL; in ice_subif_if_tx_queues_alloc()
4154 * ice_subif_if_rx_queues_alloc - Allocate Rx queue memory for subinterfaces
4170 device_t dev = mif->subdev; in ice_subif_if_rx_queues_alloc()
4176 MPASS(mif->subscctx->isc_nrxd[0] <= ICE_MAX_DESC_COUNT); in ice_subif_if_rx_queues_alloc()
4178 vsi = mif->vsi; in ice_subif_if_rx_queues_alloc()
4180 MPASS(vsi->num_rx_queues == nrxqsets); in ice_subif_if_rx_queues_alloc()
4183 if (!(vsi->rx_queues = in ice_subif_if_rx_queues_alloc()
4193 for (i = 0, rxq = vsi->rx_queues; i < nrxqsets; i++, rxq++) { in ice_subif_if_rx_queues_alloc()
4194 rxq->me = i; in ice_subif_if_rx_queues_alloc()
4195 rxq->vsi = vsi; in ice_subif_if_rx_queues_alloc()
4198 rxq->desc_count = mif->subscctx->isc_nrxd[0]; in ice_subif_if_rx_queues_alloc()
4201 rxq->tail = QRX_TAIL(vsi->rx_qmap[i]); in ice_subif_if_rx_queues_alloc()
4202 rxq->rx_base = (union ice_32b_rx_flex_desc *)vaddrs[i]; in ice_subif_if_rx_queues_alloc()
4203 rxq->rx_paddr = paddrs[i]; in ice_subif_if_rx_queues_alloc()
4212 * ice_subif_if_msix_intr_assign - Assign MSI-X interrupts to new sub interface
4216 * Allocates and assigns driver private resources for MSI-X interrupt tracking.
4218 * @pre OS MSI-X resources have been pre-allocated by parent interface.
4224 struct ice_softc *sc = mif->back; in ice_subif_if_msix_intr_assign()
4225 struct ice_vsi *vsi = mif->vsi; in ice_subif_if_msix_intr_assign()
4227 device_t dev = mif->subdev; in ice_subif_if_msix_intr_assign()
4230 if (vsi->num_rx_queues != vsi->num_tx_queues) { in ice_subif_if_msix_intr_assign()
4233 vsi->num_tx_queues, vsi->num_rx_queues); in ice_subif_if_msix_intr_assign()
4237 if (msix > sc->extra_vectors) { in ice_subif_if_msix_intr_assign()
4239 "%s: Not enough spare (%d) msix vectors for new sub-interface requested (%d)\n", in ice_subif_if_msix_intr_assign()
4240 __func__, sc->extra_vectors, msix); in ice_subif_if_msix_intr_assign()
4243 device_printf(dev, "%s: Using %d vectors for sub-interface\n", __func__, in ice_subif_if_msix_intr_assign()
4247 mif->num_irq_vectors = vsi->num_rx_queues; in ice_subif_if_msix_intr_assign()
4248 mif->rx_irqvs = (struct ice_irq_vector *) in ice_subif_if_msix_intr_assign()
4249 malloc(sizeof(struct ice_irq_vector) * (mif->num_irq_vectors), in ice_subif_if_msix_intr_assign()
4251 if (!mif->rx_irqvs) { in ice_subif_if_msix_intr_assign()
4254 mif->num_irq_vectors); in ice_subif_if_msix_intr_assign()
4259 if (!(mif->if_imap = in ice_subif_if_msix_intr_assign()
4260 (u16 *)malloc(sizeof(u16) * mif->num_irq_vectors, in ice_subif_if_msix_intr_assign()
4266 ret = ice_resmgr_assign_contiguous(&sc->dev_imgr, mif->if_imap, mif->num_irq_vectors); in ice_subif_if_msix_intr_assign()
4273 if (!(mif->os_imap = in ice_subif_if_msix_intr_assign()
4274 (u16 *)malloc(sizeof(u16) * mif->num_irq_vectors, in ice_subif_if_msix_intr_assign()
4280 ret = ice_resmgr_assign_contiguous(&sc->os_imgr, mif->os_imap, mif->num_irq_vectors); in ice_subif_if_msix_intr_assign()
4290 free(mif->if_imap, M_ICE); in ice_subif_if_msix_intr_assign()
4291 mif->if_imap = NULL; in ice_subif_if_msix_intr_assign()
4293 free(mif->rx_irqvs, M_ICE); in ice_subif_if_msix_intr_assign()
4294 mif->rx_irqvs = NULL; in ice_subif_if_msix_intr_assign()
4299 * ice_subif_if_intr_enable - Enable device interrupts for a subinterface
4309 struct ice_softc *sc = mif->back; in ice_subif_if_intr_enable()
4310 struct ice_vsi *vsi = mif->vsi; in ice_subif_if_intr_enable()
4311 struct ice_hw *hw = &sc->hw; in ice_subif_if_intr_enable()
4314 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) in ice_subif_if_intr_enable()
4318 for (int i = 0; i < vsi->num_rx_queues; i++) in ice_subif_if_intr_enable()
4319 ice_enable_intr(hw, vsi->rx_queues[i].irqv->me); in ice_subif_if_intr_enable()
4323 * ice_subif_if_rx_queue_intr_enable - Enable a specific Rx queue interrupt
4335 struct ice_softc *sc = mif->back; in ice_subif_if_rx_queue_intr_enable()
4336 struct ice_vsi *vsi = mif->vsi; in ice_subif_if_rx_queue_intr_enable()
4337 struct ice_hw *hw = &sc->hw; in ice_subif_if_rx_queue_intr_enable()
4340 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) in ice_subif_if_rx_queue_intr_enable()
4343 ice_enable_intr(hw, vsi->rx_queues[rxqid].irqv->me); in ice_subif_if_rx_queue_intr_enable()
4348 * ice_subif_if_tx_queue_intr_enable - Enable a specific Tx queue interrupt
4360 struct ice_softc *sc = mif->back; in ice_subif_if_tx_queue_intr_enable()
4361 struct ice_vsi *vsi = mif->vsi; in ice_subif_if_tx_queue_intr_enable()
4362 struct ice_hw *hw = &sc->hw; in ice_subif_if_tx_queue_intr_enable()
4365 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) in ice_subif_if_tx_queue_intr_enable()
4368 ice_enable_intr(hw, vsi->tx_queues[txqid].irqv->me); in ice_subif_if_tx_queue_intr_enable()
4373 * ice_subif_if_init - Initialize the subinterface
4385 struct ice_softc *sc = mif->back; in ice_subif_if_init()
4386 struct ice_vsi *vsi = mif->vsi; in ice_subif_if_init()
4387 device_t dev = mif->subdev; in ice_subif_if_init()
4393 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) in ice_subif_if_init()
4396 if (ice_test_state(&sc->state, ICE_STATE_RESET_FAILED)) { in ice_subif_if_init()
4399 device_get_nameunit(sc->dev)); in ice_subif_if_init()
4403 if (ice_test_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET)) { in ice_subif_if_init()
4406 device_get_nameunit(sc->dev)); in ice_subif_if_init()
4411 vsi->mbuf_sz = iflib_get_rx_mbuf_sz(ctx); in ice_subif_if_init()
4443 ice_set_state(&mif->state, ICE_STATE_DRIVER_INITIALIZED); in ice_subif_if_init()
4451 * ice_if_stop_subif - Stop the subinterface
4464 struct ice_softc *sc = mif->back; in ice_subif_if_stop()
4465 struct ice_vsi *vsi = mif->vsi; in ice_subif_if_stop()
4466 device_t dev = mif->subdev; in ice_subif_if_stop()
4468 if (!ice_testandclear_state(&mif->state, ICE_STATE_DRIVER_INITIALIZED)) in ice_subif_if_stop()
4471 if (ice_test_state(&sc->state, ICE_STATE_RESET_FAILED)) { in ice_subif_if_stop()
4474 device_get_nameunit(sc->dev)); in ice_subif_if_stop()
4478 if (ice_test_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET)) { in ice_subif_if_stop()
4481 device_get_nameunit(sc->dev)); in ice_subif_if_stop()
4495 * ice_free_irqvs_subif - Free IRQ vector memory for subinterfaces
4503 struct ice_softc *sc = mif->back; in ice_free_irqvs_subif()
4504 struct ice_vsi *vsi = mif->vsi; in ice_free_irqvs_subif()
4505 if_ctx_t ctx = sc->ctx; in ice_free_irqvs_subif()
4509 if (mif->rx_irqvs == NULL) in ice_free_irqvs_subif()
4512 /* Free the IRQ vectors -- currently subinterfaces have number in ice_free_irqvs_subif()
4517 for (i = 0; i < vsi->num_rx_queues; i++) in ice_free_irqvs_subif()
4518 iflib_irq_free(ctx, &mif->rx_irqvs[i].irq); in ice_free_irqvs_subif()
4520 ice_resmgr_release_map(&sc->os_imgr, mif->os_imap, in ice_free_irqvs_subif()
4521 mif->num_irq_vectors); in ice_free_irqvs_subif()
4522 ice_resmgr_release_map(&sc->dev_imgr, mif->if_imap, in ice_free_irqvs_subif()
4523 mif->num_irq_vectors); in ice_free_irqvs_subif()
4525 sc->last_rid -= vsi->num_rx_queues; in ice_free_irqvs_subif()
4528 for (i = 0; i < vsi->num_rx_queues; i++) in ice_free_irqvs_subif()
4529 vsi->rx_queues[i].irqv = NULL; in ice_free_irqvs_subif()
4531 for (i = 0; i < vsi->num_tx_queues; i++) in ice_free_irqvs_subif()
4532 vsi->tx_queues[i].irqv = NULL; in ice_free_irqvs_subif()
4535 free(mif->rx_irqvs, M_ICE); in ice_free_irqvs_subif()
4536 mif->rx_irqvs = NULL; in ice_free_irqvs_subif()
4540 * ice_subif_if_queues_free - Free queue memory for subinterfaces
4550 struct ice_vsi *vsi = mif->vsi; in ice_subif_if_queues_free()
4560 /* Release MSI-X IRQ vectors */ in ice_subif_if_queues_free()
4563 if (vsi->tx_queues != NULL) { in ice_subif_if_queues_free()
4565 for (i = 0, txq = vsi->tx_queues; i < vsi->num_tx_queues; i++, txq++) { in ice_subif_if_queues_free()
4566 if (txq->tx_rsq != NULL) { in ice_subif_if_queues_free()
4567 free(txq->tx_rsq, M_ICE); in ice_subif_if_queues_free()
4568 txq->tx_rsq = NULL; in ice_subif_if_queues_free()
4571 free(vsi->tx_queues, M_ICE); in ice_subif_if_queues_free()
4572 vsi->tx_queues = NULL; in ice_subif_if_queues_free()
4574 if (vsi->rx_queues != NULL) { in ice_subif_if_queues_free()
4575 free(vsi->rx_queues, M_ICE); in ice_subif_if_queues_free()
4576 vsi->rx_queues = NULL; in ice_subif_if_queues_free()
4581 * ice_subif_if_media_status - Report subinterface media
4593 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; in ice_subif_if_media_status()
4594 ifmr->ifm_active = IFM_ETHER | IFM_AUTO; in ice_subif_if_media_status()
4598 * ice_subif_if_promisc_set - Set subinterface promiscuous mode