Lines Matching +full:report +full:- +full:speed +full:- +full:hz

3   Copyright (c) 2001-2017, Intel Corporation
48 static const char ixgbe_driver_version[] = "4.0.1-k";
93 "Intel(R) X520-T 82599 LOM"),
105 "Intel(R) X520-1 82599EN (SFP+)"),
107 "Intel(R) X520-4 82599 (Quad SFP+)"),
109 "Intel(R) X520-Q1 82599 (QSFP+)"),
111 "Intel(R) X540-AT2"),
112 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, "Intel(R) X540-T1"),
113 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, "Intel(R) X550-T2"),
114 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) X550-T1"),
120 "Intel(R) X552/X557-AT (10GBASE-T)"),
122 "Intel(R) X552 (1000BASE-T)"),
138 "Intel(R) X553/X557-AT (10GBASE-T)"),
144 "Intel(R) X540-T2 (Bypass)"),
345 /* Advertise Speed, default to 0 (auto) */
348 &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
351 * Smart speed setting, default to on
360 * MSI-X should be the default for best performance,
366 "Enable MSI-X interrupts");
387 /* Receive-Side Scaling */
391 "Enable Receive-Side Scaling (RSS)");
457 if_softc_ctx_t scctx = sc->shared; in ixgbe_if_tx_queues_alloc()
461 MPASS(sc->num_tx_queues > 0); in ixgbe_if_tx_queues_alloc()
462 MPASS(sc->num_tx_queues == ntxqsets); in ixgbe_if_tx_queues_alloc()
466 sc->tx_queues = in ixgbe_if_tx_queues_alloc()
469 if (!sc->tx_queues) { in ixgbe_if_tx_queues_alloc()
475 for (i = 0, que = sc->tx_queues; i < ntxqsets; i++, que++) { in ixgbe_if_tx_queues_alloc()
476 struct tx_ring *txr = &que->txr; in ixgbe_if_tx_queues_alloc()
478 /* In case SR-IOV is enabled, align the index properly */ in ixgbe_if_tx_queues_alloc()
479 txr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool, i); in ixgbe_if_tx_queues_alloc()
481 txr->sc = que->sc = sc; in ixgbe_if_tx_queues_alloc()
483 /* Allocate report status array */ in ixgbe_if_tx_queues_alloc()
484 txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * in ixgbe_if_tx_queues_alloc()
485 scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO); in ixgbe_if_tx_queues_alloc()
486 if (txr->tx_rsq == NULL) { in ixgbe_if_tx_queues_alloc()
490 for (j = 0; j < scctx->isc_ntxd[0]; j++) in ixgbe_if_tx_queues_alloc()
491 txr->tx_rsq[j] = QIDX_INVALID; in ixgbe_if_tx_queues_alloc()
493 txr->tail = IXGBE_TDT(txr->me); in ixgbe_if_tx_queues_alloc()
494 txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i]; in ixgbe_if_tx_queues_alloc()
495 txr->tx_paddr = paddrs[i]; in ixgbe_if_tx_queues_alloc()
497 txr->bytes = 0; in ixgbe_if_tx_queues_alloc()
498 txr->total_packets = 0; in ixgbe_if_tx_queues_alloc()
501 if (sc->feat_en & IXGBE_FEATURE_FDIR) in ixgbe_if_tx_queues_alloc()
502 txr->atr_sample = atr_sample_rate; in ixgbe_if_tx_queues_alloc()
507 sc->num_tx_queues); in ixgbe_if_tx_queues_alloc()
528 MPASS(sc->num_rx_queues > 0); in ixgbe_if_rx_queues_alloc()
529 MPASS(sc->num_rx_queues == nrxqsets); in ixgbe_if_rx_queues_alloc()
533 sc->rx_queues = in ixgbe_if_rx_queues_alloc()
536 if (!sc->rx_queues) { in ixgbe_if_rx_queues_alloc()
542 for (i = 0, que = sc->rx_queues; i < nrxqsets; i++, que++) { in ixgbe_if_rx_queues_alloc()
543 struct rx_ring *rxr = &que->rxr; in ixgbe_if_rx_queues_alloc()
545 /* In case SR-IOV is enabled, align the index properly */ in ixgbe_if_rx_queues_alloc()
546 rxr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool, i); in ixgbe_if_rx_queues_alloc()
548 rxr->sc = que->sc = sc; in ixgbe_if_rx_queues_alloc()
551 rxr->tail = IXGBE_RDT(rxr->me); in ixgbe_if_rx_queues_alloc()
552 rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i]; in ixgbe_if_rx_queues_alloc()
553 rxr->rx_paddr = paddrs[i]; in ixgbe_if_rx_queues_alloc()
554 rxr->bytes = 0; in ixgbe_if_rx_queues_alloc()
555 rxr->que = que; in ixgbe_if_rx_queues_alloc()
559 sc->num_rx_queues); in ixgbe_if_rx_queues_alloc()
571 struct ix_tx_queue *tx_que = sc->tx_queues; in ixgbe_if_queues_free()
572 struct ix_rx_queue *rx_que = sc->rx_queues; in ixgbe_if_queues_free()
576 for (i = 0; i < sc->num_tx_queues; i++, tx_que++) { in ixgbe_if_queues_free()
577 struct tx_ring *txr = &tx_que->txr; in ixgbe_if_queues_free()
578 if (txr->tx_rsq == NULL) in ixgbe_if_queues_free()
581 free(txr->tx_rsq, M_IXGBE); in ixgbe_if_queues_free()
582 txr->tx_rsq = NULL; in ixgbe_if_queues_free()
585 free(sc->tx_queues, M_IXGBE); in ixgbe_if_queues_free()
586 sc->tx_queues = NULL; in ixgbe_if_queues_free()
589 free(sc->rx_queues, M_IXGBE); in ixgbe_if_queues_free()
590 sc->rx_queues = NULL; in ixgbe_if_queues_free()
600 struct ixgbe_hw *hw = &sc->hw; in ixgbe_initialize_rss_mapping()
606 if (sc->feat_en & IXGBE_FEATURE_RSS) { in ixgbe_initialize_rss_mapping()
617 switch (sc->hw.mac.type) { in ixgbe_initialize_rss_mapping()
632 if (j == sc->num_rx_queues) in ixgbe_initialize_rss_mapping()
635 if (sc->feat_en & IXGBE_FEATURE_RSS) { in ixgbe_initialize_rss_mapping()
642 queue_id = queue_id % sc->num_rx_queues; in ixgbe_initialize_rss_mapping()
657 IXGBE_ERETA((i >> 2) - 32), reta); in ixgbe_initialize_rss_mapping()
667 if (sc->feat_en & IXGBE_FEATURE_RSS) in ixgbe_initialize_rss_mapping()
671 * Disable UDP - IP fragments aren't currently being handled in ixgbe_initialize_rss_mapping()
672 * and so we end up with a mix of 2-tuple and 4-tuple in ixgbe_initialize_rss_mapping()
702 mrqc |= ixgbe_get_mrqc(sc->iov_mode); in ixgbe_initialize_rss_mapping()
707 * ixgbe_initialize_receive_units - Setup receive registers and features.
709 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
715 if_softc_ctx_t scctx = sc->shared; in ixgbe_initialize_receive_units()
716 struct ixgbe_hw *hw = &sc->hw; in ixgbe_initialize_receive_units()
732 if (sc->hw.mac.type == ixgbe_mac_82598EB) { in ixgbe_initialize_receive_units()
746 bufsz = (sc->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >> in ixgbe_initialize_receive_units()
750 for (i = 0, que = sc->rx_queues; i < sc->num_rx_queues; i++, que++) { in ixgbe_initialize_receive_units()
751 struct rx_ring *rxr = &que->rxr; in ixgbe_initialize_receive_units()
752 u64 rdba = rxr->rx_paddr; in ixgbe_initialize_receive_units()
754 j = rxr->me; in ixgbe_initialize_receive_units()
761 scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc)); in ixgbe_initialize_receive_units()
776 if (sc->num_rx_queues > 1 && in ixgbe_initialize_receive_units()
777 sc->hw.fc.requested_mode == ixgbe_fc_none) { in ixgbe_initialize_receive_units()
790 rxr->tail = IXGBE_RDT(rxr->me); in ixgbe_initialize_receive_units()
793 if (sc->hw.mac.type != ixgbe_mac_82598EB) { in ixgbe_initialize_receive_units()
805 if (sc->feat_en & IXGBE_FEATURE_RSS) { in ixgbe_initialize_receive_units()
822 * ixgbe_initialize_transmit_units - Enable transmit units.
828 struct ixgbe_hw *hw = &sc->hw; in ixgbe_initialize_transmit_units()
829 if_softc_ctx_t scctx = sc->shared; in ixgbe_initialize_transmit_units()
834 for (i = 0, que = sc->tx_queues; i < sc->num_tx_queues; in ixgbe_initialize_transmit_units()
836 struct tx_ring *txr = &que->txr; in ixgbe_initialize_transmit_units()
837 u64 tdba = txr->tx_paddr; in ixgbe_initialize_transmit_units()
839 int j = txr->me; in ixgbe_initialize_transmit_units()
845 scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc)); in ixgbe_initialize_transmit_units()
852 txr->tail = IXGBE_TDT(txr->me); in ixgbe_initialize_transmit_units()
854 txr->tx_rs_cidx = txr->tx_rs_pidx; in ixgbe_initialize_transmit_units()
855 txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1; in ixgbe_initialize_transmit_units()
856 for (int k = 0; k < scctx->isc_ntxd[0]; k++) in ixgbe_initialize_transmit_units()
857 txr->tx_rsq[k] = QIDX_INVALID; in ixgbe_initialize_transmit_units()
865 switch (hw->mac.type) { in ixgbe_initialize_transmit_units()
875 switch (hw->mac.type) { in ixgbe_initialize_transmit_units()
887 if (hw->mac.type != ixgbe_mac_82598EB) { in ixgbe_initialize_transmit_units()
898 ixgbe_get_mtqc(sc->iov_mode)); in ixgbe_initialize_transmit_units()
915 * ixgbe_if_attach_pre - Device initialization routine, part 1
939 sc->hw.back = sc; in ixgbe_if_attach_pre()
940 sc->ctx = ctx; in ixgbe_if_attach_pre()
941 sc->dev = dev; in ixgbe_if_attach_pre()
942 scctx = sc->shared = iflib_get_softc_ctx(ctx); in ixgbe_if_attach_pre()
943 sc->media = iflib_get_media(ctx); in ixgbe_if_attach_pre()
944 hw = &sc->hw; in ixgbe_if_attach_pre()
947 hw->vendor_id = pci_get_vendor(dev); in ixgbe_if_attach_pre()
948 hw->device_id = pci_get_device(dev); in ixgbe_if_attach_pre()
949 hw->revision_id = pci_get_revid(dev); in ixgbe_if_attach_pre()
950 hw->subsystem_vendor_id = pci_get_subvendor(dev); in ixgbe_if_attach_pre()
951 hw->subsystem_device_id = pci_get_subdevice(dev); in ixgbe_if_attach_pre()
953 /* Do base PCI setup - map BAR0 */ in ixgbe_if_attach_pre()
973 if (hw->mac.ops.fw_recovery_mode && in ixgbe_if_attach_pre()
974 hw->mac.ops.fw_recovery_mode(hw)) { in ixgbe_if_attach_pre()
984 /* 82598 Does not support SR-IOV, initialize everything else */ in ixgbe_if_attach_pre()
985 if (hw->mac.type >= ixgbe_mac_82599_vf) { in ixgbe_if_attach_pre()
986 for (i = 0; i < sc->num_vfs; i++) in ixgbe_if_attach_pre()
987 hw->mbx.ops[i].init_params(hw); in ixgbe_if_attach_pre()
990 hw->allow_unsupported_sfp = allow_unsupported_sfp; in ixgbe_if_attach_pre()
992 if (hw->mac.type != ixgbe_mac_82598EB) in ixgbe_if_attach_pre()
993 hw->phy.smart_speed = ixgbe_smart_speed; in ixgbe_if_attach_pre()
1001 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) { in ixgbe_if_attach_pre()
1010 hw->fc.requested_mode = ixgbe_flow_control; in ixgbe_if_attach_pre()
1012 hw->phy.reset_if_overtemp = true; in ixgbe_if_attach_pre()
1014 hw->phy.reset_if_overtemp = false; in ixgbe_if_attach_pre()
1021 sc->sfp_probe = true; in ixgbe_if_attach_pre()
1034 if (ixgbe_validate_eeprom_checksum(&sc->hw, NULL) < 0) { in ixgbe_if_attach_pre()
1044 "This device is a pre-production adapter/LOM. Please be" in ixgbe_if_attach_pre()
1063 iflib_set_mac(ctx, hw->mac.addr); in ixgbe_if_attach_pre()
1064 switch (sc->hw.mac.type) { in ixgbe_if_attach_pre()
1068 scctx->isc_rss_table_size = 512; in ixgbe_if_attach_pre()
1069 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64; in ixgbe_if_attach_pre()
1072 scctx->isc_rss_table_size = 128; in ixgbe_if_attach_pre()
1073 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16; in ixgbe_if_attach_pre()
1079 scctx->isc_txqsizes[0] = in ixgbe_if_attach_pre()
1080 roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) + in ixgbe_if_attach_pre()
1082 scctx->isc_rxqsizes[0] = in ixgbe_if_attach_pre()
1083 roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc), in ixgbe_if_attach_pre()
1087 scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO | in ixgbe_if_attach_pre()
1089 if (sc->hw.mac.type == ixgbe_mac_82598EB) { in ixgbe_if_attach_pre()
1090 scctx->isc_tx_nsegments = IXGBE_82598_SCATTER; in ixgbe_if_attach_pre()
1092 scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP; in ixgbe_if_attach_pre()
1093 scctx->isc_tx_nsegments = IXGBE_82599_SCATTER; in ixgbe_if_attach_pre()
1096 scctx->isc_msix_bar = pci_msix_table_bar(dev); in ixgbe_if_attach_pre()
1098 scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments; in ixgbe_if_attach_pre()
1099 scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE; in ixgbe_if_attach_pre()
1100 scctx->isc_tx_tso_segsize_max = PAGE_SIZE; in ixgbe_if_attach_pre()
1102 scctx->isc_txrx = &ixgbe_txrx; in ixgbe_if_attach_pre()
1104 scctx->isc_capabilities = scctx->isc_capenable = IXGBE_CAPS; in ixgbe_if_attach_pre()
1109 ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT); in ixgbe_if_attach_pre()
1111 IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext); in ixgbe_if_attach_pre()
1118 * ixgbe_if_attach_post - Device initialization routine, part 2
1136 hw = &sc->hw; in ixgbe_if_attach_post()
1138 if (sc->intr_type == IFLIB_INTR_LEGACY && in ixgbe_if_attach_post()
1139 (sc->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) { in ixgbe_if_attach_post()
1146 sc->mta = malloc(sizeof(*sc->mta) * MAX_NUM_MULTICAST_ADDRESSES, in ixgbe_if_attach_post()
1148 if (sc->mta == NULL) { in ixgbe_if_attach_post()
1178 /* Check PCIE slot type/speed/width */ in ixgbe_if_attach_post()
1191 sc->dmac = 0; in ixgbe_if_attach_post()
1193 sc->advertise = ixgbe_get_default_advertise(sc); in ixgbe_if_attach_post()
1195 if (sc->feat_cap & IXGBE_FEATURE_SRIOV) in ixgbe_if_attach_post()
1202 if (sc->feat_en & IXGBE_FEATURE_RECOVERY_MODE) { in ixgbe_if_attach_post()
1203 sc->recovery_mode = 0; in ixgbe_if_attach_post()
1206 callout_init(&sc->fw_mode_timer, true); in ixgbe_if_attach_post()
1209 callout_reset(&sc->fw_mode_timer, hz, ixgbe_fw_mode_timer, sc); in ixgbe_if_attach_post()
1223 * Sets each port's hw->wol_enabled value depending
1229 struct ixgbe_hw *hw = &sc->hw; in ixgbe_check_wol_support()
1233 sc->wol_support = hw->wol_enabled = 0; in ixgbe_check_wol_support()
1237 hw->bus.func == 0)) in ixgbe_check_wol_support()
1238 sc->wol_support = hw->wol_enabled = 1; in ixgbe_check_wol_support()
1241 sc->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC); in ixgbe_check_wol_support()
1261 sc->max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN; in ixgbe_setup_interface()
1263 sc->phy_layer = ixgbe_get_supported_physical_layer(&sc->hw); in ixgbe_setup_interface()
1268 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO); in ixgbe_setup_interface()
1284 return (sc->ipackets); in ixgbe_if_get_counter()
1286 return (sc->opackets); in ixgbe_if_get_counter()
1288 return (sc->ibytes); in ixgbe_if_get_counter()
1290 return (sc->obytes); in ixgbe_if_get_counter()
1292 return (sc->imcasts); in ixgbe_if_get_counter()
1294 return (sc->omcasts); in ixgbe_if_get_counter()
1298 return (sc->iqdrops); in ixgbe_if_get_counter()
1302 return (sc->ierrors); in ixgbe_if_get_counter()
1315 struct ixgbe_hw *hw = &sc->hw; in ixgbe_if_i2c_req()
1318 if (hw->phy.ops.read_i2c_byte == NULL) in ixgbe_if_i2c_req()
1320 for (i = 0; i < req->len; i++) in ixgbe_if_i2c_req()
1321 hw->phy.ops.read_i2c_byte(hw, req->offset + i, in ixgbe_if_i2c_req()
1322 req->dev_addr, &req->data[i]); in ixgbe_if_i2c_req()
1326 /* ixgbe_if_needs_restart - Tell iflib when the driver needs to be
1352 struct ixgbe_hw *hw = &sc->hw; in ixgbe_add_media_types()
1356 layer = sc->phy_layer = ixgbe_get_supported_physical_layer(hw); in ixgbe_add_media_types()
1360 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_T, 0, NULL); in ixgbe_add_media_types()
1362 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL); in ixgbe_add_media_types()
1364 ifmedia_add(sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); in ixgbe_add_media_types()
1366 ifmedia_add(sc->media, IFM_ETHER | IFM_10_T, 0, NULL); in ixgbe_add_media_types()
1368 if (hw->mac.type == ixgbe_mac_X550) { in ixgbe_add_media_types()
1369 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_T, 0, NULL); in ixgbe_add_media_types()
1370 ifmedia_add(sc->media, IFM_ETHER | IFM_5000_T, 0, NULL); in ixgbe_add_media_types()
1375 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_TWINAX, 0, in ixgbe_add_media_types()
1377 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL); in ixgbe_add_media_types()
1381 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_LR, 0, NULL); in ixgbe_add_media_types()
1382 if (hw->phy.multispeed_fiber) in ixgbe_add_media_types()
1383 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_LX, 0, in ixgbe_add_media_types()
1387 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL); in ixgbe_add_media_types()
1388 if (hw->phy.multispeed_fiber) in ixgbe_add_media_types()
1389 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0, in ixgbe_add_media_types()
1392 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL); in ixgbe_add_media_types()
1394 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); in ixgbe_add_media_types()
1398 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_KR, 0, NULL); in ixgbe_add_media_types()
1400 ifmedia_add( sc->media, IFM_ETHER | IFM_10G_KX4, 0, NULL); in ixgbe_add_media_types()
1402 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL); in ixgbe_add_media_types()
1404 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_KX, 0, NULL); in ixgbe_add_media_types()
1409 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL); in ixgbe_add_media_types()
1414 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); in ixgbe_add_media_types()
1419 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_CX, 0, NULL); in ixgbe_add_media_types()
1424 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_SX, 0, NULL); in ixgbe_add_media_types()
1430 if (hw->device_id == IXGBE_DEV_ID_82598AT) { in ixgbe_add_media_types()
1431 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX, in ixgbe_add_media_types()
1433 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL); in ixgbe_add_media_types()
1436 ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); in ixgbe_add_media_types()
1445 switch (hw->mac.type) { in ixgbe_is_sfp()
1447 if (hw->phy.type == ixgbe_phy_nl) in ixgbe_is_sfp()
1451 switch (hw->mac.ops.get_media_type(hw)) { in ixgbe_is_sfp()
1460 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) in ixgbe_is_sfp()
1475 struct ixgbe_hw *hw = &sc->hw; in ixgbe_config_link()
1482 sc->task_requests |= IXGBE_REQUEST_TASK_MOD; in ixgbe_config_link()
1485 if (hw->mac.ops.check_link) in ixgbe_config_link()
1486 err = ixgbe_check_link(hw, &sc->link_speed, in ixgbe_config_link()
1487 &sc->link_up, false); in ixgbe_config_link()
1490 autoneg = hw->phy.autoneg_advertised; in ixgbe_config_link()
1491 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) in ixgbe_config_link()
1492 err = hw->mac.ops.get_link_capabilities(hw, &autoneg, in ixgbe_config_link()
1497 if (hw->mac.type == ixgbe_mac_X550 && in ixgbe_config_link()
1498 hw->phy.autoneg_advertised == 0) { in ixgbe_config_link()
1509 * If hw->phy.autoneg_advertised does not in ixgbe_config_link()
1517 * Otherwise (i.e. if hw->phy.autoneg_advertised in ixgbe_config_link()
1526 if (hw->mac.ops.setup_link) in ixgbe_config_link()
1527 err = hw->mac.ops.setup_link(hw, autoneg, in ixgbe_config_link()
1528 sc->link_up); in ixgbe_config_link()
1533 * ixgbe_update_stats_counters - Update board statistics counters.
1538 struct ixgbe_hw *hw = &sc->hw; in ixgbe_update_stats_counters()
1539 struct ixgbe_hw_stats *stats = &sc->stats.pf; in ixgbe_update_stats_counters()
1544 stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); in ixgbe_update_stats_counters()
1545 stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC); in ixgbe_update_stats_counters()
1546 stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC); in ixgbe_update_stats_counters()
1547 stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC); in ixgbe_update_stats_counters()
1548 stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0)); in ixgbe_update_stats_counters()
1551 stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); in ixgbe_update_stats_counters()
1552 stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); in ixgbe_update_stats_counters()
1553 stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); in ixgbe_update_stats_counters()
1555 stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC); in ixgbe_update_stats_counters()
1556 stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC); in ixgbe_update_stats_counters()
1557 stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); in ixgbe_update_stats_counters()
1560 stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); in ixgbe_update_stats_counters()
1561 stats->gprc -= missed_rx; in ixgbe_update_stats_counters()
1563 if (hw->mac.type != ixgbe_mac_82598EB) { in ixgbe_update_stats_counters()
1564 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) + in ixgbe_update_stats_counters()
1566 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) + in ixgbe_update_stats_counters()
1568 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) + in ixgbe_update_stats_counters()
1570 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); in ixgbe_update_stats_counters()
1572 stats->lxoffrxc += lxoffrxc; in ixgbe_update_stats_counters()
1574 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); in ixgbe_update_stats_counters()
1576 stats->lxoffrxc += lxoffrxc; in ixgbe_update_stats_counters()
1578 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); in ixgbe_update_stats_counters()
1579 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); in ixgbe_update_stats_counters()
1580 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); in ixgbe_update_stats_counters()
1588 sc->shared->isc_pause_frames = 1; in ixgbe_update_stats_counters()
1595 stats->bprc += bprc; in ixgbe_update_stats_counters()
1596 stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); in ixgbe_update_stats_counters()
1597 if (hw->mac.type == ixgbe_mac_82598EB) in ixgbe_update_stats_counters()
1598 stats->mprc -= bprc; in ixgbe_update_stats_counters()
1600 stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); in ixgbe_update_stats_counters()
1601 stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); in ixgbe_update_stats_counters()
1602 stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); in ixgbe_update_stats_counters()
1603 stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); in ixgbe_update_stats_counters()
1604 stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); in ixgbe_update_stats_counters()
1605 stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); in ixgbe_update_stats_counters()
1608 stats->lxontxc += lxon; in ixgbe_update_stats_counters()
1610 stats->lxofftxc += lxoff; in ixgbe_update_stats_counters()
1613 stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); in ixgbe_update_stats_counters()
1614 stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); in ixgbe_update_stats_counters()
1615 stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); in ixgbe_update_stats_counters()
1616 stats->gptc -= total; in ixgbe_update_stats_counters()
1617 stats->mptc -= total; in ixgbe_update_stats_counters()
1618 stats->ptc64 -= total; in ixgbe_update_stats_counters()
1619 stats->gotc -= total * ETHER_MIN_LEN; in ixgbe_update_stats_counters()
1621 stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); in ixgbe_update_stats_counters()
1622 stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); in ixgbe_update_stats_counters()
1623 stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); in ixgbe_update_stats_counters()
1624 stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); in ixgbe_update_stats_counters()
1625 stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC); in ixgbe_update_stats_counters()
1626 stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC); in ixgbe_update_stats_counters()
1627 stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC); in ixgbe_update_stats_counters()
1628 stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); in ixgbe_update_stats_counters()
1629 stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT); in ixgbe_update_stats_counters()
1630 stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); in ixgbe_update_stats_counters()
1631 stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); in ixgbe_update_stats_counters()
1632 stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); in ixgbe_update_stats_counters()
1633 stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); in ixgbe_update_stats_counters()
1634 stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); in ixgbe_update_stats_counters()
1635 stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); in ixgbe_update_stats_counters()
1636 stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC); in ixgbe_update_stats_counters()
1637 stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); in ixgbe_update_stats_counters()
1638 stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST); in ixgbe_update_stats_counters()
1640 if (hw->mac.type != ixgbe_mac_82598EB) { in ixgbe_update_stats_counters()
1641 stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); in ixgbe_update_stats_counters()
1642 stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); in ixgbe_update_stats_counters()
1643 stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); in ixgbe_update_stats_counters()
1644 stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); in ixgbe_update_stats_counters()
1645 stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); in ixgbe_update_stats_counters()
1649 IXGBE_SET_IPACKETS(sc, stats->gprc); in ixgbe_update_stats_counters()
1650 IXGBE_SET_OPACKETS(sc, stats->gptc); in ixgbe_update_stats_counters()
1651 IXGBE_SET_IBYTES(sc, stats->gorc); in ixgbe_update_stats_counters()
1652 IXGBE_SET_OBYTES(sc, stats->gotc); in ixgbe_update_stats_counters()
1653 IXGBE_SET_IMCASTS(sc, stats->mprc); in ixgbe_update_stats_counters()
1654 IXGBE_SET_OMCASTS(sc, stats->mptc); in ixgbe_update_stats_counters()
1660 * - CRC error count, in ixgbe_update_stats_counters()
1661 * - illegal byte error count, in ixgbe_update_stats_counters()
1662 * - missed packets count, in ixgbe_update_stats_counters()
1663 * - length error count, in ixgbe_update_stats_counters()
1664 * - undersized packets count, in ixgbe_update_stats_counters()
1665 * - fragmented packets count, in ixgbe_update_stats_counters()
1666 * - oversized packets count, in ixgbe_update_stats_counters()
1667 * - jabber count. in ixgbe_update_stats_counters()
1669 IXGBE_SET_IERRORS(sc, stats->crcerrs + stats->illerrc + in ixgbe_update_stats_counters()
1670 stats->mpc[0] + stats->rlec + stats->ruc + stats->rfc + in ixgbe_update_stats_counters()
1671 stats->roc + stats->rjc); in ixgbe_update_stats_counters()
1682 device_t dev = iflib_get_dev(sc->ctx); in ixgbe_add_hw_stats()
1688 struct ixgbe_hw_stats *stats = &sc->stats.pf; in ixgbe_add_hw_stats()
1698 CTLFLAG_RD, &sc->dropped_pkts, "Driver dropped packets"); in ixgbe_add_hw_stats()
1700 CTLFLAG_RD, &sc->watchdog_events, "Watchdog timeouts"); in ixgbe_add_hw_stats()
1702 CTLFLAG_RD, &sc->link_irq, "Link MSI-X IRQ Handled"); in ixgbe_add_hw_stats()
1704 for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues; in ixgbe_add_hw_stats()
1706 struct tx_ring *txr = &tx_que->txr; in ixgbe_add_hw_stats()
1721 CTLFLAG_RD, &txr->tso_tx, "TSO"); in ixgbe_add_hw_stats()
1723 CTLFLAG_RD, &txr->total_packets, in ixgbe_add_hw_stats()
1727 for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues; in ixgbe_add_hw_stats()
1729 struct rx_ring *rxr = &rx_que->rxr; in ixgbe_add_hw_stats()
1737 &sc->rx_queues[i], 0, in ixgbe_add_hw_stats()
1741 CTLFLAG_RD, &(sc->rx_queues[i].irqs), in ixgbe_add_hw_stats()
1752 CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received"); in ixgbe_add_hw_stats()
1754 CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received"); in ixgbe_add_hw_stats()
1756 CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames"); in ixgbe_add_hw_stats()
1758 CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets"); in ixgbe_add_hw_stats()
1767 CTLFLAG_RD, &sc->ierrors, IXGBE_SYSCTL_DESC_RX_ERRS); in ixgbe_add_hw_stats()
1769 CTLFLAG_RD, &stats->crcerrs, "CRC Errors"); in ixgbe_add_hw_stats()
1771 CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors"); in ixgbe_add_hw_stats()
1773 CTLFLAG_RD, &stats->errbc, "Byte Errors"); in ixgbe_add_hw_stats()
1775 CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded"); in ixgbe_add_hw_stats()
1777 CTLFLAG_RD, &stats->mlfc, "MAC Local Faults"); in ixgbe_add_hw_stats()
1779 CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults"); in ixgbe_add_hw_stats()
1781 CTLFLAG_RD, &stats->rlec, "Receive Length Errors"); in ixgbe_add_hw_stats()
1783 CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count"); in ixgbe_add_hw_stats()
1787 CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted"); in ixgbe_add_hw_stats()
1789 CTLFLAG_RD, &stats->lxonrxc, "Link XON Received"); in ixgbe_add_hw_stats()
1791 CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted"); in ixgbe_add_hw_stats()
1793 CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received"); in ixgbe_add_hw_stats()
1797 CTLFLAG_RD, &stats->tor, "Total Octets Received"); in ixgbe_add_hw_stats()
1799 CTLFLAG_RD, &stats->gorc, "Good Octets Received"); in ixgbe_add_hw_stats()
1801 CTLFLAG_RD, &stats->tpr, "Total Packets Received"); in ixgbe_add_hw_stats()
1803 CTLFLAG_RD, &stats->gprc, "Good Packets Received"); in ixgbe_add_hw_stats()
1805 CTLFLAG_RD, &stats->mprc, "Multicast Packets Received"); in ixgbe_add_hw_stats()
1807 CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received"); in ixgbe_add_hw_stats()
1809 CTLFLAG_RD, &stats->prc64, "64 byte frames received "); in ixgbe_add_hw_stats()
1811 CTLFLAG_RD, &stats->prc127, "65-127 byte frames received"); in ixgbe_add_hw_stats()
1813 CTLFLAG_RD, &stats->prc255, "128-255 byte frames received"); in ixgbe_add_hw_stats()
1815 CTLFLAG_RD, &stats->prc511, "256-511 byte frames received"); in ixgbe_add_hw_stats()
1817 CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received"); in ixgbe_add_hw_stats()
1819 CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received"); in ixgbe_add_hw_stats()
1821 CTLFLAG_RD, &stats->ruc, "Receive Undersized"); in ixgbe_add_hw_stats()
1823 CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received "); in ixgbe_add_hw_stats()
1825 CTLFLAG_RD, &stats->roc, "Oversized Packets Received"); in ixgbe_add_hw_stats()
1827 CTLFLAG_RD, &stats->rjc, "Received Jabber"); in ixgbe_add_hw_stats()
1829 CTLFLAG_RD, &stats->mngprc, "Management Packets Received"); in ixgbe_add_hw_stats()
1831 CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped"); in ixgbe_add_hw_stats()
1833 CTLFLAG_RD, &stats->xec, "Checksum Errors"); in ixgbe_add_hw_stats()
1837 CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted"); in ixgbe_add_hw_stats()
1839 CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted"); in ixgbe_add_hw_stats()
1841 CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted"); in ixgbe_add_hw_stats()
1843 CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted"); in ixgbe_add_hw_stats()
1845 CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted"); in ixgbe_add_hw_stats()
1847 CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted"); in ixgbe_add_hw_stats()
1849 CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted "); in ixgbe_add_hw_stats()
1851 CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted"); in ixgbe_add_hw_stats()
1853 CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted"); in ixgbe_add_hw_stats()
1855 CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted"); in ixgbe_add_hw_stats()
1857 CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted"); in ixgbe_add_hw_stats()
1859 CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted"); in ixgbe_add_hw_stats()
1863 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
1870 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1); in ixgbe_sysctl_tdh_handler()
1878 if (atomic_load_acq_int(&txr->sc->recovery_mode)) in ixgbe_sysctl_tdh_handler()
1881 val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDH(txr->me)); in ixgbe_sysctl_tdh_handler()
1883 if (error || !req->newptr) in ixgbe_sysctl_tdh_handler()
1890 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1897 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1); in ixgbe_sysctl_tdt_handler()
1904 if (atomic_load_acq_int(&txr->sc->recovery_mode)) in ixgbe_sysctl_tdt_handler()
1907 val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDT(txr->me)); in ixgbe_sysctl_tdt_handler()
1909 if (error || !req->newptr) in ixgbe_sysctl_tdt_handler()
1916 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
1923 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1); in ixgbe_sysctl_rdh_handler()
1930 if (atomic_load_acq_int(&rxr->sc->recovery_mode)) in ixgbe_sysctl_rdh_handler()
1933 val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDH(rxr->me)); in ixgbe_sysctl_rdh_handler()
1935 if (error || !req->newptr) in ixgbe_sysctl_rdh_handler()
1942 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
1949 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1); in ixgbe_sysctl_rdt_handler()
1956 if (atomic_load_acq_int(&rxr->sc->recovery_mode)) in ixgbe_sysctl_rdt_handler()
1959 val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDT(rxr->me)); in ixgbe_sysctl_rdt_handler()
1961 if (error || !req->newptr) in ixgbe_sysctl_rdt_handler()
1983 sc->shadow_vfta[index] |= (1 << bit); in ixgbe_if_vlan_register()
1984 ++sc->num_vlans; in ixgbe_if_vlan_register()
2001 sc->shadow_vfta[index] &= ~(1 << bit); in ixgbe_if_vlan_unregister()
2002 --sc->num_vlans; in ixgbe_if_vlan_unregister()
2003 /* Re-init to load the changes */ in ixgbe_if_vlan_unregister()
2015 struct ixgbe_hw *hw = &sc->hw; in ixgbe_setup_vlan_hw_support()
2027 if (sc->num_vlans == 0 || in ixgbe_setup_vlan_hw_support()
2030 for (i = 0; i < sc->num_rx_queues; i++) { in ixgbe_setup_vlan_hw_support()
2031 rxr = &sc->rx_queues[i].rxr; in ixgbe_setup_vlan_hw_support()
2033 if (hw->mac.type != ixgbe_mac_82598EB) { in ixgbe_setup_vlan_hw_support()
2035 IXGBE_RXDCTL(rxr->me)); in ixgbe_setup_vlan_hw_support()
2037 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), in ixgbe_setup_vlan_hw_support()
2040 rxr->vtag_strip = false; in ixgbe_setup_vlan_hw_support()
2046 if (hw->mac.type == ixgbe_mac_82598EB) in ixgbe_setup_vlan_hw_support()
2054 for (i = 0; i < sc->num_rx_queues; i++) { in ixgbe_setup_vlan_hw_support()
2055 rxr = &sc->rx_queues[i].rxr; in ixgbe_setup_vlan_hw_support()
2057 if (hw->mac.type != ixgbe_mac_82598EB) { in ixgbe_setup_vlan_hw_support()
2059 IXGBE_RXDCTL(rxr->me)); in ixgbe_setup_vlan_hw_support()
2061 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), in ixgbe_setup_vlan_hw_support()
2064 rxr->vtag_strip = true; in ixgbe_setup_vlan_hw_support()
2075 if (sc->shadow_vfta[i] != 0) in ixgbe_setup_vlan_hw_support()
2077 sc->shadow_vfta[i]); in ixgbe_setup_vlan_hw_support()
2085 if (hw->mac.type == ixgbe_mac_82598EB) in ixgbe_setup_vlan_hw_support()
2093 * Get the width and transaction speed of
2099 device_t dev = iflib_get_dev(sc->ctx); in ixgbe_get_slot_info()
2100 struct ixgbe_hw *hw = &sc->hw; in ixgbe_get_slot_info()
2106 switch (hw->device_id) { in ixgbe_get_slot_info()
2117 * Some devices don't use PCI-E, but there is no need in ixgbe_get_slot_info()
2118 * to display "Unknown" for bus speed and width. in ixgbe_get_slot_info()
2120 switch (hw->mac.type) { in ixgbe_get_slot_info()
2131 * up the PCI tree to find the speed of the expansion in ixgbe_get_slot_info()
2147 * Hmm...can't get PCI-Express capabilities. in ixgbe_get_slot_info()
2159 device_printf(dev, "PCI Express Bus: Speed %s %s\n", in ixgbe_get_slot_info()
2160 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" : in ixgbe_get_slot_info()
2161 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" : in ixgbe_get_slot_info()
2162 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" : in ixgbe_get_slot_info()
2164 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" : in ixgbe_get_slot_info()
2165 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" : in ixgbe_get_slot_info()
2166 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" : in ixgbe_get_slot_info()
2170 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) && in ixgbe_get_slot_info()
2171 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) && in ixgbe_get_slot_info()
2172 (hw->bus.speed == ixgbe_bus_speed_2500))) { in ixgbe_get_slot_info()
2174 "PCI-Express bandwidth available for this card" in ixgbe_get_slot_info()
2180 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) && in ixgbe_get_slot_info()
2181 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) && in ixgbe_get_slot_info()
2182 (hw->bus.speed < ixgbe_bus_speed_8000))) { in ixgbe_get_slot_info()
2184 "PCI-Express bandwidth available for this card" in ixgbe_get_slot_info()
2192 "Unable to determine slot speed/width. The speed/width" in ixgbe_get_slot_info()
2201 * Setup MSI-X Interrupt resources and handlers
2207 struct ix_rx_queue *rx_que = sc->rx_queues; in ixgbe_if_msix_intr_assign()
2214 for (int i = 0; i < sc->num_rx_queues; i++, vector++, rx_que++) { in ixgbe_if_msix_intr_assign()
2218 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, in ixgbe_if_msix_intr_assign()
2219 IFLIB_INTR_RXTX, ixgbe_msix_que, rx_que, rx_que->rxr.me, in ixgbe_if_msix_intr_assign()
2226 sc->num_rx_queues = i + 1; in ixgbe_if_msix_intr_assign()
2230 rx_que->msix = vector; in ixgbe_if_msix_intr_assign()
2232 for (int i = 0; i < sc->num_tx_queues; i++) { in ixgbe_if_msix_intr_assign()
2234 tx_que = &sc->tx_queues[i]; in ixgbe_if_msix_intr_assign()
2235 tx_que->msix = i % sc->num_rx_queues; in ixgbe_if_msix_intr_assign()
2237 &sc->rx_queues[tx_que->msix].que_irq, in ixgbe_if_msix_intr_assign()
2238 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf); in ixgbe_if_msix_intr_assign()
2241 error = iflib_irq_alloc_generic(ctx, &sc->irq, rid, in ixgbe_if_msix_intr_assign()
2249 sc->vector = vector; in ixgbe_if_msix_intr_assign()
2253 iflib_irq_free(ctx, &sc->irq); in ixgbe_if_msix_intr_assign()
2254 rx_que = sc->rx_queues; in ixgbe_if_msix_intr_assign()
2255 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++) in ixgbe_if_msix_intr_assign()
2256 iflib_irq_free(ctx, &rx_que->que_irq); in ixgbe_if_msix_intr_assign()
2265 struct rx_ring *rxr = &que->rxr; in ixgbe_perform_aim()
2266 /* FIXME struct tx_ring *txr = ... ->txr; */ in ixgbe_perform_aim()
2270 * - Write out last calculated setting in ixgbe_perform_aim()
2271 * - Calculate based on average size over in ixgbe_perform_aim()
2274 if (que->eitr_setting) { in ixgbe_perform_aim()
2275 IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(que->msix), in ixgbe_perform_aim()
2276 que->eitr_setting); in ixgbe_perform_aim()
2279 que->eitr_setting = 0; in ixgbe_perform_aim()
2281 if (rxr->bytes == 0) { in ixgbe_perform_aim()
2282 /* FIXME && txr->bytes == 0 */ in ixgbe_perform_aim()
2286 if ((rxr->bytes) && (rxr->packets)) in ixgbe_perform_aim()
2287 newitr = rxr->bytes / rxr->packets; in ixgbe_perform_aim()
2289 * if ((txr->bytes) && (txr->packets)) in ixgbe_perform_aim()
2290 * newitr = txr->bytes/txr->packets; in ixgbe_perform_aim()
2291 * if ((rxr->bytes) && (rxr->packets)) in ixgbe_perform_aim()
2292 * newitr = max(newitr, (rxr->bytes / rxr->packets)); in ixgbe_perform_aim()
2306 if (sc->hw.mac.type == ixgbe_mac_82598EB) { in ixgbe_perform_aim()
2313 que->eitr_setting = newitr; in ixgbe_perform_aim()
2316 /* FIXME txr->bytes = 0; */ in ixgbe_perform_aim()
2317 /* FIXME txr->packets = 0; */ in ixgbe_perform_aim()
2318 rxr->bytes = 0; in ixgbe_perform_aim()
2319 rxr->packets = 0; in ixgbe_perform_aim()
2325 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2331 struct ixgbe_softc *sc = que->sc; in ixgbe_msix_que()
2332 if_t ifp = iflib_get_ifp(que->sc->ctx); in ixgbe_msix_que()
2338 ixgbe_disable_queue(sc, que->msix); in ixgbe_msix_que()
2339 ++que->irqs; in ixgbe_msix_que()
2342 if (sc->enable_aim) { in ixgbe_msix_que()
2350 * ixgbe_media_status - Media Ioctl callback
2359 struct ixgbe_hw *hw = &sc->hw; in ixgbe_if_media_status()
2364 ifmr->ifm_status = IFM_AVALID; in ixgbe_if_media_status()
2365 ifmr->ifm_active = IFM_ETHER; in ixgbe_if_media_status()
2367 if (!sc->link_active) in ixgbe_if_media_status()
2370 ifmr->ifm_status |= IFM_ACTIVE; in ixgbe_if_media_status()
2371 layer = sc->phy_layer; in ixgbe_if_media_status()
2377 switch (sc->link_speed) { in ixgbe_if_media_status()
2379 ifmr->ifm_active |= IFM_10G_T | IFM_FDX; in ixgbe_if_media_status()
2382 ifmr->ifm_active |= IFM_1000_T | IFM_FDX; in ixgbe_if_media_status()
2385 ifmr->ifm_active |= IFM_100_TX | IFM_FDX; in ixgbe_if_media_status()
2388 ifmr->ifm_active |= IFM_10_T | IFM_FDX; in ixgbe_if_media_status()
2391 if (hw->mac.type == ixgbe_mac_X550) in ixgbe_if_media_status()
2392 switch (sc->link_speed) { in ixgbe_if_media_status()
2394 ifmr->ifm_active |= IFM_5000_T | IFM_FDX; in ixgbe_if_media_status()
2397 ifmr->ifm_active |= IFM_2500_T | IFM_FDX; in ixgbe_if_media_status()
2402 switch (sc->link_speed) { in ixgbe_if_media_status()
2404 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX; in ixgbe_if_media_status()
2407 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; in ixgbe_if_media_status()
2411 switch (sc->link_speed) { in ixgbe_if_media_status()
2413 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX; in ixgbe_if_media_status()
2416 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; in ixgbe_if_media_status()
2420 switch (sc->link_speed) { in ixgbe_if_media_status()
2422 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX; in ixgbe_if_media_status()
2425 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; in ixgbe_if_media_status()
2430 switch (sc->link_speed) { in ixgbe_if_media_status()
2432 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; in ixgbe_if_media_status()
2435 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; in ixgbe_if_media_status()
2439 switch (sc->link_speed) { in ixgbe_if_media_status()
2441 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX; in ixgbe_if_media_status()
2450 switch (sc->link_speed) { in ixgbe_if_media_status()
2452 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; in ixgbe_if_media_status()
2455 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX; in ixgbe_if_media_status()
2458 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX; in ixgbe_if_media_status()
2464 switch (sc->link_speed) { in ixgbe_if_media_status()
2466 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX; in ixgbe_if_media_status()
2469 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX; in ixgbe_if_media_status()
2472 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX; in ixgbe_if_media_status()
2477 switch (sc->link_speed) { in ixgbe_if_media_status()
2479 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX; in ixgbe_if_media_status()
2482 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX; in ixgbe_if_media_status()
2485 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; in ixgbe_if_media_status()
2491 switch (sc->link_speed) { in ixgbe_if_media_status()
2493 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX; in ixgbe_if_media_status()
2496 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX; in ixgbe_if_media_status()
2499 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; in ixgbe_if_media_status()
2505 if (IFM_SUBTYPE(ifmr->ifm_active) == 0) in ixgbe_if_media_status()
2506 ifmr->ifm_active |= IFM_UNKNOWN; in ixgbe_if_media_status()
2509 if (hw->fc.current_mode == ixgbe_fc_rx_pause || in ixgbe_if_media_status()
2510 hw->fc.current_mode == ixgbe_fc_full) in ixgbe_if_media_status()
2511 ifmr->ifm_active |= IFM_ETH_RXPAUSE; in ixgbe_if_media_status()
2512 if (hw->fc.current_mode == ixgbe_fc_tx_pause || in ixgbe_if_media_status()
2513 hw->fc.current_mode == ixgbe_fc_full) in ixgbe_if_media_status()
2514 ifmr->ifm_active |= IFM_ETH_TXPAUSE; in ixgbe_if_media_status()
2518 * ixgbe_media_change - Media Ioctl callback
2520 * Called when the user changes speed/duplex using
2528 struct ixgbe_hw *hw = &sc->hw; in ixgbe_if_media_change()
2529 ixgbe_link_speed speed = 0; in ixgbe_if_media_change() local
2533 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) in ixgbe_if_media_change()
2536 if (hw->phy.media_type == ixgbe_media_type_backplane) in ixgbe_if_media_change()
2544 switch (IFM_SUBTYPE(ifm->ifm_media)) { in ixgbe_if_media_change()
2547 speed |= IXGBE_LINK_SPEED_100_FULL; in ixgbe_if_media_change()
2548 speed |= IXGBE_LINK_SPEED_1GB_FULL; in ixgbe_if_media_change()
2549 speed |= IXGBE_LINK_SPEED_10GB_FULL; in ixgbe_if_media_change()
2560 speed |= IXGBE_LINK_SPEED_1GB_FULL; in ixgbe_if_media_change()
2561 speed |= IXGBE_LINK_SPEED_10GB_FULL; in ixgbe_if_media_change()
2570 speed |= IXGBE_LINK_SPEED_1GB_FULL; in ixgbe_if_media_change()
2573 speed |= IXGBE_LINK_SPEED_100_FULL; in ixgbe_if_media_change()
2574 speed |= IXGBE_LINK_SPEED_1GB_FULL; in ixgbe_if_media_change()
2577 speed |= IXGBE_LINK_SPEED_10GB_FULL; in ixgbe_if_media_change()
2580 speed |= IXGBE_LINK_SPEED_5GB_FULL; in ixgbe_if_media_change()
2583 speed |= IXGBE_LINK_SPEED_2_5GB_FULL; in ixgbe_if_media_change()
2586 speed |= IXGBE_LINK_SPEED_100_FULL; in ixgbe_if_media_change()
2589 speed |= IXGBE_LINK_SPEED_10_FULL; in ixgbe_if_media_change()
2595 hw->mac.autotry_restart = true; in ixgbe_if_media_change()
2596 hw->mac.ops.setup_link(hw, speed, true); in ixgbe_if_media_change()
2597 sc->advertise = in ixgbe_if_media_change()
2598 ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 0x4 : 0) | in ixgbe_if_media_change()
2599 ((speed & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0) | in ixgbe_if_media_change()
2600 ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) | in ixgbe_if_media_change()
2601 ((speed & IXGBE_LINK_SPEED_1GB_FULL) ? 0x2 : 0) | in ixgbe_if_media_change()
2602 ((speed & IXGBE_LINK_SPEED_100_FULL) ? 0x1 : 0) | in ixgbe_if_media_change()
2603 ((speed & IXGBE_LINK_SPEED_10_FULL) ? 0x8 : 0); in ixgbe_if_media_change()
2624 rctl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL); in ixgbe_if_promisc_set()
2634 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl); in ixgbe_if_promisc_set()
2638 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl); in ixgbe_if_promisc_set()
2642 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl); in ixgbe_if_promisc_set()
2648 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2654 struct ixgbe_hw *hw = &sc->hw; in ixgbe_msix_link()
2658 ++sc->link_irq; in ixgbe_msix_link()
2673 sc->task_requests |= IXGBE_REQUEST_TASK_LSC; in ixgbe_msix_link()
2676 if (sc->hw.mac.type != ixgbe_mac_82598EB) { in ixgbe_msix_link()
2677 if ((sc->feat_en & IXGBE_FEATURE_FDIR) && in ixgbe_msix_link()
2680 if (!atomic_cmpset_int(&sc->fdir_reinit, 0, 1)) in ixgbe_msix_link()
2684 sc->task_requests |= IXGBE_REQUEST_TASK_FDIR; in ixgbe_msix_link()
2687 device_printf(iflib_get_dev(sc->ctx), in ixgbe_msix_link()
2689 hw->mac.flags |= in ixgbe_msix_link()
2697 if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR) { in ixgbe_msix_link()
2698 switch (sc->hw.mac.type) { in ixgbe_msix_link()
2706 retval = hw->phy.ops.check_overtemp(hw); in ixgbe_msix_link()
2709 device_printf(iflib_get_dev(sc->ctx), in ixgbe_msix_link()
2712 device_printf(iflib_get_dev(sc->ctx), in ixgbe_msix_link()
2718 retval = hw->phy.ops.check_overtemp(hw); in ixgbe_msix_link()
2721 device_printf(iflib_get_dev(sc->ctx), in ixgbe_msix_link()
2724 device_printf(iflib_get_dev(sc->ctx), in ixgbe_msix_link()
2733 if ((sc->feat_en & IXGBE_FEATURE_SRIOV) && in ixgbe_msix_link()
2735 sc->task_requests |= IXGBE_REQUEST_TASK_MBX; in ixgbe_msix_link()
2739 /* Pluggable optics-related interrupt */ in ixgbe_msix_link()
2740 if (hw->mac.type >= ixgbe_mac_X540) in ixgbe_msix_link()
2747 sc->task_requests |= IXGBE_REQUEST_TASK_MOD; in ixgbe_msix_link()
2750 if ((hw->mac.type == ixgbe_mac_82599EB) && in ixgbe_msix_link()
2754 sc->task_requests |= IXGBE_REQUEST_TASK_MSF; in ixgbe_msix_link()
2759 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) { in ixgbe_msix_link()
2766 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) && in ixgbe_msix_link()
2769 sc->task_requests |= IXGBE_REQUEST_TASK_PHY; in ixgbe_msix_link()
2772 return (sc->task_requests != 0) ? in ixgbe_msix_link()
2782 struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1); in ixgbe_sysctl_interrupt_rate_handler()
2786 if (atomic_load_acq_int(&que->sc->recovery_mode)) in ixgbe_sysctl_interrupt_rate_handler()
2789 reg = IXGBE_READ_REG(&que->sc->hw, IXGBE_EITR(que->msix)); in ixgbe_sysctl_interrupt_rate_handler()
2796 if (error || !req->newptr) in ixgbe_sysctl_interrupt_rate_handler()
2806 IXGBE_WRITE_REG(&que->sc->hw, IXGBE_EITR(que->msix), reg); in ixgbe_sysctl_interrupt_rate_handler()
2819 struct ixgbe_hw *hw = &sc->hw; in ixgbe_add_device_sysctls()
2837 sc->enable_aim = ixgbe_enable_aim; in ixgbe_add_device_sysctls()
2839 &sc->enable_aim, 0, "Interrupt Moderation"); in ixgbe_add_device_sysctls()
2875 if (hw->mac.type >= ixgbe_mac_X550) in ixgbe_add_device_sysctls()
2881 /* for WoL-capable devices */ in ixgbe_add_device_sysctls()
2882 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { in ixgbe_add_device_sysctls()
2894 /* for X552/X557-AT devices */ in ixgbe_add_device_sysctls()
2895 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { in ixgbe_add_device_sysctls()
2916 if (sc->feat_cap & IXGBE_FEATURE_EEE) { in ixgbe_add_device_sysctls()
2934 sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, in ixgbe_allocate_pci_resources()
2937 if (!(sc->pci_mem)) { in ixgbe_allocate_pci_resources()
2944 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->pci_mem); in ixgbe_allocate_pci_resources()
2945 sc->osdep.mem_bus_space_handle = in ixgbe_allocate_pci_resources()
2946 rman_get_bushandle(sc->pci_mem); in ixgbe_allocate_pci_resources()
2948 sc->hw.hw_addr = (u8 *)&sc->osdep.mem_bus_space_handle; in ixgbe_allocate_pci_resources()
2954 * ixgbe_detach - Device removal routine
2972 device_printf(dev, "SR-IOV in use; detach first.\n"); in ixgbe_if_detach()
2979 ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT); in ixgbe_if_detach()
2981 IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext); in ixgbe_if_detach()
2983 callout_drain(&sc->fw_mode_timer); in ixgbe_if_detach()
2986 free(sc->mta, M_IXGBE); in ixgbe_if_detach()
2992 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
3000 struct ixgbe_hw *hw = &sc->hw; in ixgbe_setup_low_power_mode()
3004 if (!hw->wol_enabled) in ixgbe_setup_low_power_mode()
3008 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T && in ixgbe_setup_low_power_mode()
3009 hw->phy.ops.enter_lplu) { in ixgbe_setup_low_power_mode()
3025 IXGBE_WRITE_REG(hw, IXGBE_WUFC, sc->wufc); in ixgbe_setup_low_power_mode()
3032 hw->phy.reset_disable = true; in ixgbe_setup_low_power_mode()
3034 error = hw->phy.ops.enter_lplu(hw); in ixgbe_setup_low_power_mode()
3038 hw->phy.reset_disable = false; in ixgbe_setup_low_power_mode()
3048 * ixgbe_shutdown - Shutdown entry point
3090 struct ixgbe_hw *hw = &sc->hw; in ixgbe_if_resume()
3101 /* And clear WUFC until next low-power transition */ in ixgbe_if_resume()
3105 * Required after D3->D0 transition; in ixgbe_if_resume()
3106 * will re-advertise all previous advertised speeds in ixgbe_if_resume()
3115 * ixgbe_if_mtu_set - Ioctl mtu entry point
3130 sc->max_frame_size = mtu + IXGBE_MTU_HDR; in ixgbe_if_mtu_set()
3143 struct ixgbe_hw *hw = &sc->hw; in ixgbe_if_crcstrip_set()
3183 * ixgbe_if_init - Init entry point
3198 struct ixgbe_hw *hw = &sc->hw; in ixgbe_if_init()
3213 ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, IXGBE_RAH_AV); in ixgbe_if_init()
3216 bcopy(if_getlladdr(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS); in ixgbe_if_init()
3217 ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, 1); in ixgbe_if_init()
3218 hw->addr_ctrl.rar_used_count = 1; in ixgbe_if_init()
3230 sc->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx); in ixgbe_if_init()
3237 * from MSI-X interrupts in ixgbe_if_init()
3239 sc->task_requests = 0; in ixgbe_if_init()
3241 /* Enable SDP & MSI-X interrupts based on adapter */ in ixgbe_if_init()
3249 mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT; in ixgbe_if_init()
3254 for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues; in ixgbe_if_init()
3256 struct tx_ring *txr = &tx_que->txr; in ixgbe_if_init()
3258 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me)); in ixgbe_if_init()
3270 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl); in ixgbe_if_init()
3273 for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues; in ixgbe_if_init()
3275 struct rx_ring *rxr = &rx_que->rxr; in ixgbe_if_init()
3277 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); in ixgbe_if_init()
3278 if (hw->mac.type == ixgbe_mac_82598EB) { in ixgbe_if_init()
3288 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl); in ixgbe_if_init()
3290 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) & in ixgbe_if_init()
3301 if (hw->mac.type == ixgbe_mac_82598EB) in ixgbe_if_init()
3306 /* Set up MSI/MSI-X routing */ in ixgbe_if_init()
3309 /* Set up auto-mask */ in ixgbe_if_init()
3310 if (hw->mac.type == ixgbe_mac_82598EB) in ixgbe_if_init()
3326 * need to be kick-started in ixgbe_if_init()
3328 if (hw->phy.type == ixgbe_phy_none) { in ixgbe_if_init()
3329 err = hw->phy.ops.identify(hw); in ixgbe_if_init()
3338 IXGBE_WRITE_REG(hw, IXGBE_EITR(sc->vector), IXGBE_LINK_ITR); in ixgbe_if_init()
3362 if (sc->feat_en & IXGBE_FEATURE_SRIOV) { in ixgbe_if_init()
3373 * Setup the correct IVAR register for a particular MSI-X interrupt
3375 * - entry is the register array entry
3376 * - vector is the MSI-X vector for this queue
3377 * - type is RX/TX/MISC
3382 struct ixgbe_hw *hw = &sc->hw; in ixgbe_set_ivar()
3387 switch (hw->mac.type) { in ixgbe_set_ivar()
3389 if (type == -1) in ixgbe_set_ivar()
3397 IXGBE_WRITE_REG(&sc->hw, IXGBE_IVAR(index), ivar); in ixgbe_set_ivar()
3404 if (type == -1) { /* MISC IVAR */ in ixgbe_set_ivar()
3428 struct ix_rx_queue *rx_que = sc->rx_queues; in ixgbe_configure_ivars()
3429 struct ix_tx_queue *tx_que = sc->tx_queues; in ixgbe_configure_ivars()
3439 sc->dmac = 0; in ixgbe_configure_ivars()
3443 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++) { in ixgbe_configure_ivars()
3444 struct rx_ring *rxr = &rx_que->rxr; in ixgbe_configure_ivars()
3447 ixgbe_set_ivar(sc, rxr->me, rx_que->msix, 0); in ixgbe_configure_ivars()
3450 IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(rx_que->msix), newitr); in ixgbe_configure_ivars()
3452 for (int i = 0; i < sc->num_tx_queues; i++, tx_que++) { in ixgbe_configure_ivars()
3453 struct tx_ring *txr = &tx_que->txr; in ixgbe_configure_ivars()
3456 ixgbe_set_ivar(sc, txr->me, tx_que->msix, 1); in ixgbe_configure_ivars()
3459 ixgbe_set_ivar(sc, 1, sc->vector, -1); in ixgbe_configure_ivars()
3468 struct ixgbe_hw *hw = &sc->hw; in ixgbe_config_gpie()
3473 if (sc->intr_type == IFLIB_INTR_MSIX) { in ixgbe_config_gpie()
3474 /* Enable Enhanced MSI-X mode */ in ixgbe_config_gpie()
3482 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) in ixgbe_config_gpie()
3486 if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR) in ixgbe_config_gpie()
3490 switch (hw->mac.type) { in ixgbe_config_gpie()
3509 * Requires sc->max_frame_size to be set.
3514 struct ixgbe_hw *hw = &sc->hw; in ixgbe_config_delay_values()
3517 frame = sc->max_frame_size; in ixgbe_config_delay_values()
3520 switch (hw->mac.type) { in ixgbe_config_delay_values()
3533 hw->fc.high_water[0] = rxpb - size; in ixgbe_config_delay_values()
3536 switch (hw->mac.type) { in ixgbe_config_delay_values()
3547 hw->fc.low_water[0] = IXGBE_BT2KB(tmp); in ixgbe_config_delay_values()
3549 hw->fc.pause_time = IXGBE_FC_PAUSE; in ixgbe_config_delay_values()
3550 hw->fc.send_xon = true; in ixgbe_config_delay_values()
3554 * ixgbe_set_multi - Multicast Update
3562 struct ixgbe_mc_addr *mta = sc->mta; in ixgbe_mc_filter_apply()
3567 mta[idx].vmdq = sc->pool; in ixgbe_mc_filter_apply()
3584 mta = sc->mta; in ixgbe_if_multi_set()
3592 ixgbe_update_mc_addr_list(&sc->hw, update_ptr, mcnt, in ixgbe_if_multi_set()
3596 fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL); in ixgbe_if_multi_set()
3607 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl); in ixgbe_if_multi_set()
3623 *vmdq = mta->vmdq; in ixgbe_mc_array_itr()
3627 return (mta->addr); in ixgbe_mc_array_itr()
3631 * ixgbe_local_timer - Timer routine
3645 if (sc->sfp_probe) in ixgbe_if_timer()
3649 ixgbe_check_link(&sc->hw, &sc->link_speed, &sc->link_up, 0); in ixgbe_if_timer()
3657 * ixgbe_fw_mode_timer - FW mode timer routine
3663 struct ixgbe_hw *hw = &sc->hw; in ixgbe_fw_mode_timer()
3666 if (atomic_cmpset_acq_int(&sc->recovery_mode, 0, 1)) { in ixgbe_fw_mode_timer()
3668 device_printf(sc->dev, in ixgbe_fw_mode_timer()
3674 if (hw->adapter_stopped == FALSE) in ixgbe_fw_mode_timer()
3675 ixgbe_if_stop(sc->ctx); in ixgbe_fw_mode_timer()
3678 atomic_cmpset_acq_int(&sc->recovery_mode, 1, 0); in ixgbe_fw_mode_timer()
3681 callout_reset(&sc->fw_mode_timer, hz, in ixgbe_fw_mode_timer()
3694 struct ixgbe_hw *hw = &sc->hw; in ixgbe_sfp_probe()
3698 if ((hw->phy.type == ixgbe_phy_nl) && in ixgbe_sfp_probe()
3699 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) { in ixgbe_sfp_probe()
3700 s32 ret = hw->phy.ops.identify_sfp(hw); in ixgbe_sfp_probe()
3703 ret = hw->phy.ops.reset(hw); in ixgbe_sfp_probe()
3704 sc->sfp_probe = false; in ixgbe_sfp_probe()
3722 * ixgbe_handle_mod - Tasklet for SFP module interrupts
3729 struct ixgbe_hw *hw = &sc->hw; in ixgbe_handle_mod()
3733 if (sc->hw.need_crosstalk_fix) { in ixgbe_handle_mod()
3734 switch (hw->mac.type) { in ixgbe_handle_mod()
3752 err = hw->phy.ops.identify_sfp(hw); in ixgbe_handle_mod()
3759 if (hw->mac.type == ixgbe_mac_82598EB) in ixgbe_handle_mod()
3760 err = hw->phy.ops.reset(hw); in ixgbe_handle_mod()
3762 err = hw->mac.ops.setup_sfp(hw); in ixgbe_handle_mod()
3766 "Setup failure - unsupported SFP+ module type.\n"); in ixgbe_handle_mod()
3769 sc->task_requests |= IXGBE_REQUEST_TASK_MSF; in ixgbe_handle_mod()
3773 sc->task_requests &= ~(IXGBE_REQUEST_TASK_MSF); in ixgbe_handle_mod()
3778 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
3785 struct ixgbe_hw *hw = &sc->hw; in ixgbe_handle_msf()
3789 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */ in ixgbe_handle_msf()
3790 sc->phy_layer = ixgbe_get_supported_physical_layer(hw); in ixgbe_handle_msf()
3792 autoneg = hw->phy.autoneg_advertised; in ixgbe_handle_msf()
3793 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) in ixgbe_handle_msf()
3794 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate); in ixgbe_handle_msf()
3795 if (hw->mac.ops.setup_link) in ixgbe_handle_msf()
3796 hw->mac.ops.setup_link(hw, autoneg, true); in ixgbe_handle_msf()
3799 ifmedia_removeall(sc->media); in ixgbe_handle_msf()
3800 ixgbe_add_media_types(sc->ctx); in ixgbe_handle_msf()
3801 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO); in ixgbe_handle_msf()
3805 * ixgbe_handle_phy - Tasklet for external PHY interrupts
3812 struct ixgbe_hw *hw = &sc->hw; in ixgbe_handle_phy()
3815 error = hw->phy.ops.handle_lasi(hw); in ixgbe_handle_phy()
3817 device_printf(sc->dev, in ixgbe_handle_phy()
3821 device_printf(sc->dev, in ixgbe_handle_phy()
3826 * ixgbe_if_stop - Stop the hardware
3835 struct ixgbe_hw *hw = &sc->hw; in ixgbe_if_stop()
3840 hw->adapter_stopped = false; in ixgbe_if_stop()
3842 if (hw->mac.type == ixgbe_mac_82599EB) in ixgbe_if_stop()
3844 /* Turn off the laser - noop with no optics */ in ixgbe_if_stop()
3848 sc->link_up = false; in ixgbe_if_stop()
3852 ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV); in ixgbe_if_stop()
3858 * ixgbe_update_link_status - Update OS on link state
3870 if (sc->link_up) { in ixgbe_if_update_admin_status()
3871 if (sc->link_active == false) { in ixgbe_if_update_admin_status()
3874 ((sc->link_speed == 128) ? 10 : 1), in ixgbe_if_update_admin_status()
3876 sc->link_active = true; in ixgbe_if_update_admin_status()
3878 ixgbe_fc_enable(&sc->hw); in ixgbe_if_update_admin_status()
3882 ixgbe_link_speed_to_baudrate(sc->link_speed)); in ixgbe_if_update_admin_status()
3884 if (sc->feat_en & IXGBE_FEATURE_SRIOV) in ixgbe_if_update_admin_status()
3888 if (sc->link_active == true) { in ixgbe_if_update_admin_status()
3892 sc->link_active = false; in ixgbe_if_update_admin_status()
3893 if (sc->feat_en & IXGBE_FEATURE_SRIOV) in ixgbe_if_update_admin_status()
3899 if (sc->task_requests & IXGBE_REQUEST_TASK_MOD) in ixgbe_if_update_admin_status()
3901 if (sc->task_requests & IXGBE_REQUEST_TASK_MSF) in ixgbe_if_update_admin_status()
3903 if (sc->task_requests & IXGBE_REQUEST_TASK_MBX) in ixgbe_if_update_admin_status()
3905 if (sc->task_requests & IXGBE_REQUEST_TASK_FDIR) in ixgbe_if_update_admin_status()
3907 if (sc->task_requests & IXGBE_REQUEST_TASK_PHY) in ixgbe_if_update_admin_status()
3909 sc->task_requests = 0; in ixgbe_if_update_admin_status()
3915 * ixgbe_config_dmac - Configure DMA Coalescing
3920 struct ixgbe_hw *hw = &sc->hw; in ixgbe_config_dmac()
3921 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config; in ixgbe_config_dmac()
3923 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config) in ixgbe_config_dmac()
3926 if (dcfg->watchdog_timer ^ sc->dmac || in ixgbe_config_dmac()
3927 dcfg->link_speed ^ sc->link_speed) { in ixgbe_config_dmac()
3928 dcfg->watchdog_timer = sc->dmac; in ixgbe_config_dmac()
3929 dcfg->fcoe_en = false; in ixgbe_config_dmac()
3930 dcfg->link_speed = sc->link_speed; in ixgbe_config_dmac()
3931 dcfg->num_tcs = 1; in ixgbe_config_dmac()
3933 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n", in ixgbe_config_dmac()
3934 dcfg->watchdog_timer, dcfg->link_speed); in ixgbe_config_dmac()
3936 hw->mac.ops.dmac_config(hw); in ixgbe_config_dmac()
3947 struct ixgbe_hw *hw = &sc->hw; in ixgbe_if_enable_intr()
3948 struct ix_rx_queue *que = sc->rx_queues; in ixgbe_if_enable_intr()
3953 switch (sc->hw.mac.type) { in ixgbe_if_enable_intr()
3977 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP || in ixgbe_if_enable_intr()
3978 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP || in ixgbe_if_enable_intr()
3979 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N || in ixgbe_if_enable_intr()
3980 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) in ixgbe_if_enable_intr()
3982 if (hw->phy.type == ixgbe_phy_x550em_ext_t) in ixgbe_if_enable_intr()
3991 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) in ixgbe_if_enable_intr()
3993 /* Enable SR-IOV */ in ixgbe_if_enable_intr()
3994 if (sc->feat_en & IXGBE_FEATURE_SRIOV) in ixgbe_if_enable_intr()
3997 if (sc->feat_en & IXGBE_FEATURE_FDIR) in ixgbe_if_enable_intr()
4002 /* With MSI-X we use auto clear */ in ixgbe_if_enable_intr()
4003 if (sc->intr_type == IFLIB_INTR_MSIX) { in ixgbe_if_enable_intr()
4008 if (sc->feat_cap & IXGBE_FEATURE_SRIOV) in ixgbe_if_enable_intr()
4015 * allow for handling the extended (beyond 32) MSI-X in ixgbe_if_enable_intr()
4018 for (int i = 0; i < sc->num_rx_queues; i++, que++) in ixgbe_if_enable_intr()
4019 ixgbe_enable_queue(sc, que->msix); in ixgbe_if_enable_intr()
4033 if (sc->intr_type == IFLIB_INTR_MSIX) in ixgbe_if_disable_intr()
4034 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC, 0); in ixgbe_if_disable_intr()
4035 if (sc->hw.mac.type == ixgbe_mac_82598EB) { in ixgbe_if_disable_intr()
4036 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, ~0); in ixgbe_if_disable_intr()
4038 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, 0xFFFF0000); in ixgbe_if_disable_intr()
4039 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(0), ~0); in ixgbe_if_disable_intr()
4040 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(1), ~0); in ixgbe_if_disable_intr()
4042 IXGBE_WRITE_FLUSH(&sc->hw); in ixgbe_if_disable_intr()
4053 &((struct ixgbe_softc *)iflib_get_softc(ctx))->hw; in ixgbe_link_intr_enable()
4055 /* Re-enable other interrupts */ in ixgbe_link_intr_enable()
4066 struct ix_rx_queue *que = &sc->rx_queues[rxqid]; in ixgbe_if_rx_queue_intr_enable()
4068 ixgbe_enable_queue(sc, que->msix); in ixgbe_if_rx_queue_intr_enable()
4079 struct ixgbe_hw *hw = &sc->hw; in ixgbe_enable_queue()
4083 if (hw->mac.type == ixgbe_mac_82598EB) { in ixgbe_enable_queue()
4102 struct ixgbe_hw *hw = &sc->hw; in ixgbe_disable_queue()
4106 if (hw->mac.type == ixgbe_mac_82598EB) { in ixgbe_disable_queue()
4120 * ixgbe_intr - Legacy Interrupt Service Routine
4126 struct ix_rx_queue *que = sc->rx_queues; in ixgbe_intr()
4127 struct ixgbe_hw *hw = &sc->hw; in ixgbe_intr()
4128 if_ctx_t ctx = sc->ctx; in ixgbe_intr()
4133 ++que->irqs; in ixgbe_intr()
4140 if ((sc->feat_en & IXGBE_FEATURE_FAN_FAIL) && in ixgbe_intr()
4142 device_printf(sc->dev, in ixgbe_intr()
4155 /* Pluggable optics-related interrupt */ in ixgbe_intr()
4156 if (hw->mac.type >= ixgbe_mac_X540) in ixgbe_intr()
4163 sc->task_requests |= IXGBE_REQUEST_TASK_MOD; in ixgbe_intr()
4166 if ((hw->mac.type == ixgbe_mac_82599EB) && in ixgbe_intr()
4170 sc->task_requests |= IXGBE_REQUEST_TASK_MSF; in ixgbe_intr()
4175 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) && in ixgbe_intr()
4177 sc->task_requests |= IXGBE_REQUEST_TASK_PHY; in ixgbe_intr()
4189 struct ix_rx_queue *que = sc->rx_queues; in ixgbe_free_pci_resources()
4192 /* Release all MSI-X queue resources */ in ixgbe_free_pci_resources()
4193 if (sc->intr_type == IFLIB_INTR_MSIX) in ixgbe_free_pci_resources()
4194 iflib_irq_free(ctx, &sc->irq); in ixgbe_free_pci_resources()
4197 for (int i = 0; i < sc->num_rx_queues; i++, que++) { in ixgbe_free_pci_resources()
4198 iflib_irq_free(ctx, &que->que_irq); in ixgbe_free_pci_resources()
4202 if (sc->pci_mem != NULL) in ixgbe_free_pci_resources()
4204 rman_get_rid(sc->pci_mem), sc->pci_mem); in ixgbe_free_pci_resources()
4219 fc = sc->hw.fc.current_mode; in ixgbe_sysctl_flowcntl()
4222 if ((error) || (req->newptr == NULL)) in ixgbe_sysctl_flowcntl()
4226 if (fc == sc->hw.fc.current_mode) in ixgbe_sysctl_flowcntl()
4233 * ixgbe_set_flowcntl - Set flow control
4236 * 0 - off
4237 * 1 - rx pause
4238 * 2 - tx pause
4239 * 3 - full
4248 sc->hw.fc.requested_mode = fc; in ixgbe_set_flowcntl()
4249 if (sc->num_rx_queues > 1) in ixgbe_set_flowcntl()
4253 sc->hw.fc.requested_mode = ixgbe_fc_none; in ixgbe_set_flowcntl()
4254 if (sc->num_rx_queues > 1) in ixgbe_set_flowcntl()
4262 sc->hw.fc.disable_fc_autoneg = true; in ixgbe_set_flowcntl()
4263 ixgbe_fc_enable(&sc->hw); in ixgbe_set_flowcntl()
4280 struct ixgbe_hw *hw = &sc->hw; in ixgbe_enable_rx_drop()
4284 for (int i = 0; i < sc->num_rx_queues; i++) { in ixgbe_enable_rx_drop()
4285 rxr = &sc->rx_queues[i].rxr; in ixgbe_enable_rx_drop()
4286 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me)); in ixgbe_enable_rx_drop()
4288 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl); in ixgbe_enable_rx_drop()
4292 for (int i = 0; i < sc->num_vfs; i++) { in ixgbe_enable_rx_drop()
4306 struct ixgbe_hw *hw = &sc->hw; in ixgbe_disable_rx_drop()
4310 for (int i = 0; i < sc->num_rx_queues; i++) { in ixgbe_disable_rx_drop()
4311 rxr = &sc->rx_queues[i].rxr; in ixgbe_disable_rx_drop()
4312 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me)); in ixgbe_disable_rx_drop()
4314 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl); in ixgbe_disable_rx_drop()
4318 for (int i = 0; i < sc->num_vfs; i++) { in ixgbe_disable_rx_drop()
4327 * SYSCTL wrapper around setting advertised speed
4336 if (atomic_load_acq_int(&sc->recovery_mode)) in ixgbe_sysctl_advertise()
4339 advertise = sc->advertise; in ixgbe_sysctl_advertise()
4342 if ((error) || (req->newptr == NULL)) in ixgbe_sysctl_advertise()
4349 * ixgbe_set_advertise - Control advertised link speed
4352 * 0x1 - advertise 100 Mb
4353 * 0x2 - advertise 1G
4354 * 0x4 - advertise 10G
4355 * 0x8 - advertise 10 Mb (yes, Mb)
4356 * 0x10 - advertise 2.5G (disabled by default)
4357 * 0x20 - advertise 5G (disabled by default)
4363 device_t dev = iflib_get_dev(sc->ctx); in ixgbe_set_advertise()
4365 ixgbe_link_speed speed = 0; in ixgbe_set_advertise() local
4371 if (sc->advertise == advertise) /* no change */ in ixgbe_set_advertise()
4374 hw = &sc->hw; in ixgbe_set_advertise()
4376 /* No speed changes for backplane media */ in ixgbe_set_advertise()
4377 if (hw->phy.media_type == ixgbe_media_type_backplane) in ixgbe_set_advertise()
4380 if (!((hw->phy.media_type == ixgbe_media_type_copper) || in ixgbe_set_advertise()
4381 (hw->phy.multispeed_fiber))) { in ixgbe_set_advertise()
4383 "Advertised speed can only be set on copper or multispeed" in ixgbe_set_advertise()
4390 "Invalid advertised speed; valid modes are 0x1 through" in ixgbe_set_advertise()
4395 if (hw->mac.ops.get_link_capabilities) { in ixgbe_set_advertise()
4396 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, in ixgbe_set_advertise()
4406 /* Set new value and report new advertised mode */ in ixgbe_set_advertise()
4411 " speed\n"); in ixgbe_set_advertise()
4414 speed |= IXGBE_LINK_SPEED_100_FULL; in ixgbe_set_advertise()
4419 "Interface does not support 1Gb advertised speed" in ixgbe_set_advertise()
4423 speed |= IXGBE_LINK_SPEED_1GB_FULL; in ixgbe_set_advertise()
4428 "Interface does not support 10Gb advertised speed" in ixgbe_set_advertise()
4432 speed |= IXGBE_LINK_SPEED_10GB_FULL; in ixgbe_set_advertise()
4437 "Interface does not support 10Mb advertised speed" in ixgbe_set_advertise()
4441 speed |= IXGBE_LINK_SPEED_10_FULL; in ixgbe_set_advertise()
4446 "Interface does not support 2.5G advertised speed" in ixgbe_set_advertise()
4450 speed |= IXGBE_LINK_SPEED_2_5GB_FULL; in ixgbe_set_advertise()
4455 "Interface does not support 5G advertised speed" in ixgbe_set_advertise()
4459 speed |= IXGBE_LINK_SPEED_5GB_FULL; in ixgbe_set_advertise()
4462 hw->mac.autotry_restart = true; in ixgbe_set_advertise()
4463 hw->mac.ops.setup_link(hw, speed, true); in ixgbe_set_advertise()
4464 sc->advertise = advertise; in ixgbe_set_advertise()
4470 * ixgbe_get_default_advertise - Get default advertised speed settings
4474 * 0x1 - advertise 100 Mb
4475 * 0x2 - advertise 1G
4476 * 0x4 - advertise 10G
4477 * 0x8 - advertise 10 Mb (yes, Mb)
4478 * 0x10 - advertise 2.5G (disabled by default)
4479 * 0x20 - advertise 5G (disabled by default)
4484 struct ixgbe_hw *hw = &sc->hw; in ixgbe_get_default_advertise()
4485 int speed; in ixgbe_get_default_advertise() local
4491 * Advertised speed means nothing unless it's copper or in ixgbe_get_default_advertise()
4492 * multi-speed fiber in ixgbe_get_default_advertise()
4494 if (!(hw->phy.media_type == ixgbe_media_type_copper) && in ixgbe_get_default_advertise()
4495 !(hw->phy.multispeed_fiber)) in ixgbe_get_default_advertise()
4498 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate); in ixgbe_get_default_advertise()
4502 if (hw->mac.type == ixgbe_mac_X550) { in ixgbe_get_default_advertise()
4512 speed = in ixgbe_get_default_advertise()
4520 return speed; in ixgbe_get_default_advertise()
4524 * ixgbe_sysctl_dmac - Manage DMA Coalescing
4527 * 0/1 - off / on (use default value of 1000)
4538 if_t ifp = iflib_get_ifp(sc->ctx); in ixgbe_sysctl_dmac()
4542 newval = sc->dmac; in ixgbe_sysctl_dmac()
4544 if ((error) || (req->newptr == NULL)) in ixgbe_sysctl_dmac()
4550 sc->dmac = 0; in ixgbe_sysctl_dmac()
4554 sc->dmac = 1000; in ixgbe_sysctl_dmac()
4564 /* Legal values - allow */ in ixgbe_sysctl_dmac()
4565 sc->dmac = newval; in ixgbe_sysctl_dmac()
4572 /* Re-initialize hardware if it's already running */ in ixgbe_sysctl_dmac()
4585 * 0 - set device to D0
4586 * 3 - set device to D3
4587 * (none) - get current device power state
4593 device_t dev = sc->dev; in ixgbe_sysctl_power_state()
4599 if ((error) || (req->newptr == NULL)) in ixgbe_sysctl_power_state()
4625 * 0 - disabled
4626 * 1 - enabled
4632 struct ixgbe_hw *hw = &sc->hw; in ixgbe_sysctl_wol_enable()
4636 new_wol_enabled = hw->wol_enabled; in ixgbe_sysctl_wol_enable()
4638 if ((error) || (req->newptr == NULL)) in ixgbe_sysctl_wol_enable()
4641 if (new_wol_enabled == hw->wol_enabled) in ixgbe_sysctl_wol_enable()
4644 if (new_wol_enabled > 0 && !sc->wol_support) in ixgbe_sysctl_wol_enable()
4647 hw->wol_enabled = new_wol_enabled; in ixgbe_sysctl_wol_enable()
4653 * ixgbe_sysctl_wufc - Wake Up Filter Control
4658 * 0x1 - Link Status Change
4659 * 0x2 - Magic Packet
4660 * 0x4 - Direct Exact
4661 * 0x8 - Directed Multicast
4662 * 0x10 - Broadcast
4663 * 0x20 - ARP/IPv4 Request Packet
4664 * 0x40 - Direct IPv4 Packet
4665 * 0x80 - Direct IPv6 Packet
4676 new_wufc = sc->wufc; in ixgbe_sysctl_wufc()
4679 if ((error) || (req->newptr == NULL)) in ixgbe_sysctl_wufc()
4681 if (new_wufc == sc->wufc) in ixgbe_sysctl_wufc()
4688 new_wufc |= (0xffffff & sc->wufc); in ixgbe_sysctl_wufc()
4689 sc->wufc = new_wufc; in ixgbe_sysctl_wufc()
4702 struct ixgbe_hw *hw = &sc->hw; in ixgbe_sysctl_print_rss_config()
4703 device_t dev = sc->dev; in ixgbe_sysctl_print_rss_config()
4708 if (atomic_load_acq_int(&sc->recovery_mode)) in ixgbe_sysctl_print_rss_config()
4719 switch (sc->hw.mac.type) { in ixgbe_sysctl_print_rss_config()
4737 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32)); in ixgbe_sysctl_print_rss_config()
4738 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg); in ixgbe_sysctl_print_rss_config()
4755 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
4757 * For X552/X557-AT devices using an external PHY
4763 struct ixgbe_hw *hw = &sc->hw; in ixgbe_sysctl_phy_temp()
4766 if (atomic_load_acq_int(&sc->recovery_mode)) in ixgbe_sysctl_phy_temp()
4769 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) { in ixgbe_sysctl_phy_temp()
4770 device_printf(iflib_get_dev(sc->ctx), in ixgbe_sysctl_phy_temp()
4775 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP, in ixgbe_sysctl_phy_temp()
4777 device_printf(iflib_get_dev(sc->ctx), in ixgbe_sysctl_phy_temp()
4799 struct ixgbe_hw *hw = &sc->hw; in ixgbe_sysctl_phy_overtemp_occurred()
4802 if (atomic_load_acq_int(&sc->recovery_mode)) in ixgbe_sysctl_phy_overtemp_occurred()
4805 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) { in ixgbe_sysctl_phy_overtemp_occurred()
4806 device_printf(iflib_get_dev(sc->ctx), in ixgbe_sysctl_phy_overtemp_occurred()
4811 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS, in ixgbe_sysctl_phy_overtemp_occurred()
4813 device_printf(iflib_get_dev(sc->ctx), in ixgbe_sysctl_phy_overtemp_occurred()
4829 * 0 - disable EEE
4830 * 1 - enable EEE
4831 * (none) - get current device EEE state
4837 device_t dev = sc->dev; in ixgbe_sysctl_eee_state()
4838 if_t ifp = iflib_get_ifp(sc->ctx); in ixgbe_sysctl_eee_state()
4842 if (atomic_load_acq_int(&sc->recovery_mode)) in ixgbe_sysctl_eee_state()
4845 curr_eee = new_eee = !!(sc->feat_en & IXGBE_FEATURE_EEE); in ixgbe_sysctl_eee_state()
4848 if ((error) || (req->newptr == NULL)) in ixgbe_sysctl_eee_state()
4856 if (!(sc->feat_cap & IXGBE_FEATURE_EEE)) in ixgbe_sysctl_eee_state()
4863 retval = ixgbe_setup_eee(&sc->hw, new_eee); in ixgbe_sysctl_eee_state()
4869 /* Restart auto-neg */ in ixgbe_sysctl_eee_state()
4876 sc->feat_en |= IXGBE_FEATURE_EEE; in ixgbe_sysctl_eee_state()
4878 sc->feat_en &= ~IXGBE_FEATURE_EEE; in ixgbe_sysctl_eee_state()
4890 sc = oidp->oid_arg1; in ixgbe_sysctl_tso_tcp_flags_mask()
4891 switch (oidp->oid_arg2) { in ixgbe_sysctl_tso_tcp_flags_mask()
4908 val = IXGBE_READ_REG(&sc->hw, reg); in ixgbe_sysctl_tso_tcp_flags_mask()
4911 if (error != 0 || req->newptr == NULL) in ixgbe_sysctl_tso_tcp_flags_mask()
4916 IXGBE_WRITE_REG(&sc->hw, reg, val); in ixgbe_sysctl_tso_tcp_flags_mask()
4926 sc->feat_cap = IXGBE_FEATURE_NETMAP | in ixgbe_init_device_features()
4933 switch (sc->hw.mac.type) { in ixgbe_init_device_features()
4935 if (sc->hw.device_id == IXGBE_DEV_ID_82598AT) in ixgbe_init_device_features()
4936 sc->feat_cap |= IXGBE_FEATURE_FAN_FAIL; in ixgbe_init_device_features()
4939 sc->feat_cap |= IXGBE_FEATURE_SRIOV; in ixgbe_init_device_features()
4940 sc->feat_cap |= IXGBE_FEATURE_FDIR; in ixgbe_init_device_features()
4941 if ((sc->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) && in ixgbe_init_device_features()
4942 (sc->hw.bus.func == 0)) in ixgbe_init_device_features()
4943 sc->feat_cap |= IXGBE_FEATURE_BYPASS; in ixgbe_init_device_features()
4946 sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE; in ixgbe_init_device_features()
4947 sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR; in ixgbe_init_device_features()
4948 sc->feat_cap |= IXGBE_FEATURE_SRIOV; in ixgbe_init_device_features()
4949 sc->feat_cap |= IXGBE_FEATURE_FDIR; in ixgbe_init_device_features()
4952 sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE; in ixgbe_init_device_features()
4953 sc->feat_cap |= IXGBE_FEATURE_SRIOV; in ixgbe_init_device_features()
4954 sc->feat_cap |= IXGBE_FEATURE_FDIR; in ixgbe_init_device_features()
4955 if (sc->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR) in ixgbe_init_device_features()
4956 sc->feat_cap |= IXGBE_FEATURE_EEE; in ixgbe_init_device_features()
4959 sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE; in ixgbe_init_device_features()
4960 sc->feat_cap |= IXGBE_FEATURE_SRIOV; in ixgbe_init_device_features()
4961 sc->feat_cap |= IXGBE_FEATURE_FDIR; in ixgbe_init_device_features()
4962 sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ; in ixgbe_init_device_features()
4963 if ((sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) || in ixgbe_init_device_features()
4964 (sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) { in ixgbe_init_device_features()
4965 sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR; in ixgbe_init_device_features()
4966 sc->feat_cap |= IXGBE_FEATURE_EEE; in ixgbe_init_device_features()
4970 sc->feat_cap |= IXGBE_FEATURE_SRIOV; in ixgbe_init_device_features()
4971 sc->feat_cap |= IXGBE_FEATURE_FDIR; in ixgbe_init_device_features()
4972 if ((sc->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) && in ixgbe_init_device_features()
4973 (sc->hw.bus.func == 0)) in ixgbe_init_device_features()
4974 sc->feat_cap |= IXGBE_FEATURE_BYPASS; in ixgbe_init_device_features()
4975 if (sc->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) in ixgbe_init_device_features()
4976 sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ; in ixgbe_init_device_features()
4984 if (sc->feat_cap & IXGBE_FEATURE_FAN_FAIL) in ixgbe_init_device_features()
4985 sc->feat_en |= IXGBE_FEATURE_FAN_FAIL; in ixgbe_init_device_features()
4987 if (sc->feat_cap & IXGBE_FEATURE_NETMAP) in ixgbe_init_device_features()
4988 sc->feat_en |= IXGBE_FEATURE_NETMAP; in ixgbe_init_device_features()
4990 if (sc->feat_cap & IXGBE_FEATURE_EEE) in ixgbe_init_device_features()
4991 sc->feat_en |= IXGBE_FEATURE_EEE; in ixgbe_init_device_features()
4993 if (sc->feat_cap & IXGBE_FEATURE_TEMP_SENSOR) in ixgbe_init_device_features()
4994 sc->feat_en |= IXGBE_FEATURE_TEMP_SENSOR; in ixgbe_init_device_features()
4996 if (sc->feat_cap & IXGBE_FEATURE_RECOVERY_MODE) in ixgbe_init_device_features()
4997 sc->feat_en |= IXGBE_FEATURE_RECOVERY_MODE; in ixgbe_init_device_features()
5002 if (sc->feat_cap & IXGBE_FEATURE_FDIR) in ixgbe_init_device_features()
5003 sc->feat_en |= IXGBE_FEATURE_FDIR; in ixgbe_init_device_features()
5005 device_printf(sc->dev, in ixgbe_init_device_features()
5010 * Message Signal Interrupts - Extended (MSI-X) in ixgbe_init_device_features()
5011 * Normal MSI is only enabled if MSI-X calls fail. in ixgbe_init_device_features()
5014 sc->feat_cap &= ~IXGBE_FEATURE_MSIX; in ixgbe_init_device_features()
5015 /* Receive-Side Scaling (RSS) */ in ixgbe_init_device_features()
5016 if ((sc->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss) in ixgbe_init_device_features()
5017 sc->feat_en |= IXGBE_FEATURE_RSS; in ixgbe_init_device_features()
5020 /* No MSI-X */ in ixgbe_init_device_features()
5021 if (!(sc->feat_cap & IXGBE_FEATURE_MSIX)) { in ixgbe_init_device_features()
5022 sc->feat_cap &= ~IXGBE_FEATURE_RSS; in ixgbe_init_device_features()
5023 sc->feat_cap &= ~IXGBE_FEATURE_SRIOV; in ixgbe_init_device_features()
5024 sc->feat_en &= ~IXGBE_FEATURE_RSS; in ixgbe_init_device_features()
5025 sc->feat_en &= ~IXGBE_FEATURE_SRIOV; in ixgbe_init_device_features()
5037 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&sc->hw) : in ixgbe_check_fan_failure()
5041 device_printf(sc->dev, in ixgbe_check_fan_failure()
5097 sbuf_printf(buf, "%sOption ROM V%d-b%d-p%d", in ixgbe_sbuf_fw_version()
5116 struct ixgbe_hw *hw = &sc->hw; in ixgbe_print_fw_version()
5117 device_t dev = sc->dev; in ixgbe_print_fw_version()
5145 struct ixgbe_hw *hw = &sc->hw; in ixgbe_sysctl_print_fw_version()
5146 device_t dev = sc->dev; in ixgbe_sysctl_print_fw_version()