Lines Matching +full:pme +full:- +full:active +full:- +full:high
1 /*-
2 * SPDX-License-Identifier: BSD-4-Clause
4 * Copyright (c) 1997, 1998-2003
18 * 4. Neither the name of the author nor the names of any co-contributors
59 * o 64-bit DMA
63 * o High and normal priority transmit DMA rings
69 * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+
74 * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the
93 * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska'
96 * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs
97 * (the 'S' stands for 'single-chip'). These devices have the same
98 * programming API as the older 8169, but also have some vendor-specific
99 * registers for the on-board PHY. The 8110S is a LAN-on-motherboard
100 * part designed to be pin-compatible with the RealTek 8100 10/100 chip.
178 "D-Link DGE-528(T) Gigabit Ethernet Adapter" },
180 "D-Link DGE-530(T) Gigabit Ethernet Adapter" },
192 "TP-Link TG-3468 v2 (RTL8168) Gigabit Ethernet" },
196 "RealTek 8169SC/8110SC Single-chip Gigabit Ethernet" },
198 "Corega CG-LAPCIGT (RTL8169S) Gigabit Ethernet" },
208 { RL_HWREV_8139AG, RL_8139, "A-G", RL_MTU },
243 { RL_HWREV_8168E_VL, RL_8169, "8168E/8111E-VL", RL_JUMBO_MTU_6K},
373 d = addr | (RL_9346_READ << sc->rl_eewidth); in re_eeprom_putbyte()
379 for (i = 1 << (sc->rl_eewidth + 3); i; i >>= 1) { in re_eeprom_putbyte()
472 device_printf(sc->rl_dev, "PHY read failed\n"); in re_gmii_readreg()
504 device_printf(sc->rl_dev, "PHY write failed\n"); in re_gmii_writereg()
525 if (sc->rl_type == RL_8169) { in re_miibus_readreg()
559 device_printf(sc->rl_dev, "bad phy register\n"); in re_miibus_readreg()
563 if (sc->rl_type == RL_8139CPLUS && re8139_reg == RL_BMCR) { in re_miibus_readreg()
579 if (sc->rl_type == RL_8169) { in re_miibus_writereg()
587 if (sc->rl_type == RL_8139CPLUS) { in re_miibus_writereg()
609 device_printf(sc->rl_dev, "bad phy register\n"); in re_miibus_writereg()
624 mii = device_get_softc(sc->rl_miibus); in re_miibus_statchg()
625 ifp = sc->rl_ifp; in re_miibus_statchg()
630 sc->rl_flags &= ~RL_FLAG_LINK; in re_miibus_statchg()
631 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == in re_miibus_statchg()
633 switch (IFM_SUBTYPE(mii->mii_media_active)) { in re_miibus_statchg()
636 sc->rl_flags |= RL_FLAG_LINK; in re_miibus_statchg()
639 if ((sc->rl_flags & RL_FLAG_FASTETHER) != 0) in re_miibus_statchg()
641 sc->rl_flags |= RL_FLAG_LINK; in re_miibus_statchg()
649 * MACs for resolved speed, duplex and flow-control parameters. in re_miibus_statchg()
662 hashes[1] |= (1 << (h - 32)); in re_hash_maddr()
668 * Set the RX configuration and 64-bit multicast hash filter.
679 ifp = sc->rl_ifp; in re_set_rxmode()
682 if ((sc->rl_flags & RL_FLAG_EARLYOFF) != 0) in re_set_rxmode()
684 else if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0) in re_set_rxmode()
710 if ((sc->rl_flags & RL_FLAG_PCIE) != 0) { in re_set_rxmode()
718 if (sc->rl_hwrev->rl_rev == RL_HWREV_8168F) { in re_set_rxmode()
745 device_printf(sc->rl_dev, "reset never completed!\n"); in re_reset()
747 if ((sc->rl_flags & RL_FLAG_MACRESET) != 0) in re_reset()
749 if (sc->rl_hwrev->rl_rev == RL_HWREV_8169S) in re_reset()
750 re_gmii_writereg(sc->rl_dev, 1, 0x0b, 0); in re_reset()
757 * 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64#
758 * lines connected to the bus, however for a 32-bit only card, they
759 * should be pulled high. The result of this defect is that the
760 * NIC will not work right if you plug it into a 64-bit slot: DMA
761 * operations will be done with 64-bit transfers, which will fail
762 * because the 64-bit data lines aren't connected.
770 * a defective NIC which has been mistakenly plugged into a 64-bit PCI
778 if_t ifp = sc->rl_ifp; in re_diag()
799 * - Puts receiver in promiscuous mode in re_diag()
800 * - Enables digital loopback mode in re_diag()
801 * - Leaves interrupts turned off in re_diag()
805 sc->rl_testmode = 1; in re_diag()
808 sc->rl_flags |= RL_FLAG_LINK; in re_diag()
809 if (sc->rl_type == RL_8169) in re_diag()
814 re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_RESET); in re_diag()
816 status = re_miibus_readreg(sc->rl_dev, phyaddr, MII_BMCR); in re_diag()
821 re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_LOOP); in re_diag()
829 bcopy ((char *)&dst, eh->ether_dhost, ETHER_ADDR_LEN); in re_diag()
830 bcopy ((char *)&src, eh->ether_shost, ETHER_ADDR_LEN); in re_diag()
831 eh->ether_type = htons(ETHERTYPE_IP); in re_diag()
832 m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN; in re_diag()
859 device_printf(sc->rl_dev, in re_diag()
871 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, in re_diag()
872 sc->rl_ldata.rl_rx_list_map, in re_diag()
874 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, in re_diag()
875 sc->rl_ldata.rl_rx_desc[0].rx_dmamap, in re_diag()
877 bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, in re_diag()
878 sc->rl_ldata.rl_rx_desc[0].rx_dmamap); in re_diag()
880 m0 = sc->rl_ldata.rl_rx_desc[0].rx_m; in re_diag()
881 sc->rl_ldata.rl_rx_desc[0].rx_m = NULL; in re_diag()
884 cur_rx = &sc->rl_ldata.rl_rx_list[0]; in re_diag()
886 rxstat = le32toh(cur_rx->rl_cmdstat); in re_diag()
889 device_printf(sc->rl_dev, in re_diag()
897 if (bcmp((char *)&eh->ether_dhost, (char *)&dst, ETHER_ADDR_LEN) || in re_diag()
898 bcmp((char *)&eh->ether_shost, (char *)&src, ETHER_ADDR_LEN) || in re_diag()
899 ntohs(eh->ether_type) != ETHERTYPE_IP) { in re_diag()
900 device_printf(sc->rl_dev, "WARNING, DMA FAILURE!\n"); in re_diag()
901 device_printf(sc->rl_dev, "expected TX data: %6D/%6D/0x%x\n", in re_diag()
903 device_printf(sc->rl_dev, "received RX data: %6D/%6D/0x%x\n", in re_diag()
904 eh->ether_dhost, ":", eh->ether_shost, ":", in re_diag()
905 ntohs(eh->ether_type)); in re_diag()
906 device_printf(sc->rl_dev, "You may have a defective 32-bit " in re_diag()
907 "NIC plugged into a 64-bit PCI slot.\n"); in re_diag()
908 device_printf(sc->rl_dev, "Please re-install the NIC in a " in re_diag()
909 "32-bit slot for proper operation.\n"); in re_diag()
910 device_printf(sc->rl_dev, "Read the re(4) man page for more " in re_diag()
918 sc->rl_testmode = 0; in re_diag()
919 sc->rl_flags &= ~RL_FLAG_LINK; in re_diag()
968 if (vendor == t->rl_vid && devid == t->rl_did) { in re_probe()
969 device_set_desc(dev, t->rl_name); in re_probe()
991 *addr = segs->ds_addr; in re_dma_map_addr()
1002 rx_list_size = sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc); in re_allocmem()
1003 tx_list_size = sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc); in re_allocmem()
1014 if ((sc->rl_flags & RL_FLAG_PCIE) == 0) in re_allocmem()
1019 NULL, NULL, &sc->rl_parent_tag); in re_allocmem()
1028 error = bus_dma_tag_create(sc->rl_parent_tag, 1, 0, in re_allocmem()
1031 NULL, NULL, &sc->rl_ldata.rl_tx_mtag); in re_allocmem()
1041 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { in re_allocmem()
1042 error = bus_dma_tag_create(sc->rl_parent_tag, sizeof(uint64_t), in re_allocmem()
1045 &sc->rl_ldata.rl_jrx_mtag); in re_allocmem()
1052 error = bus_dma_tag_create(sc->rl_parent_tag, sizeof(uint64_t), 0, in re_allocmem()
1054 MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &sc->rl_ldata.rl_rx_mtag); in re_allocmem()
1063 error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN, in re_allocmem()
1066 NULL, NULL, &sc->rl_ldata.rl_tx_list_tag); in re_allocmem()
1074 error = bus_dmamem_alloc(sc->rl_ldata.rl_tx_list_tag, in re_allocmem()
1075 (void **)&sc->rl_ldata.rl_tx_list, in re_allocmem()
1077 &sc->rl_ldata.rl_tx_list_map); in re_allocmem()
1085 sc->rl_ldata.rl_tx_list_addr = 0; in re_allocmem()
1086 error = bus_dmamap_load(sc->rl_ldata.rl_tx_list_tag, in re_allocmem()
1087 sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list, in re_allocmem()
1089 &sc->rl_ldata.rl_tx_list_addr, BUS_DMA_NOWAIT); in re_allocmem()
1090 if (error != 0 || sc->rl_ldata.rl_tx_list_addr == 0) { in re_allocmem()
1097 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { in re_allocmem()
1098 error = bus_dmamap_create(sc->rl_ldata.rl_tx_mtag, 0, in re_allocmem()
1099 &sc->rl_ldata.rl_tx_desc[i].tx_dmamap); in re_allocmem()
1109 error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN, in re_allocmem()
1112 NULL, NULL, &sc->rl_ldata.rl_rx_list_tag); in re_allocmem()
1120 error = bus_dmamem_alloc(sc->rl_ldata.rl_rx_list_tag, in re_allocmem()
1121 (void **)&sc->rl_ldata.rl_rx_list, in re_allocmem()
1123 &sc->rl_ldata.rl_rx_list_map); in re_allocmem()
1131 sc->rl_ldata.rl_rx_list_addr = 0; in re_allocmem()
1132 error = bus_dmamap_load(sc->rl_ldata.rl_rx_list_tag, in re_allocmem()
1133 sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list, in re_allocmem()
1135 &sc->rl_ldata.rl_rx_list_addr, BUS_DMA_NOWAIT); in re_allocmem()
1136 if (error != 0 || sc->rl_ldata.rl_rx_list_addr == 0) { in re_allocmem()
1143 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { in re_allocmem()
1144 error = bus_dmamap_create(sc->rl_ldata.rl_jrx_mtag, 0, in re_allocmem()
1145 &sc->rl_ldata.rl_jrx_sparemap); in re_allocmem()
1151 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { in re_allocmem()
1152 error = bus_dmamap_create(sc->rl_ldata.rl_jrx_mtag, 0, in re_allocmem()
1153 &sc->rl_ldata.rl_jrx_desc[i].rx_dmamap); in re_allocmem()
1161 error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0, in re_allocmem()
1162 &sc->rl_ldata.rl_rx_sparemap); in re_allocmem()
1167 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { in re_allocmem()
1168 error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0, in re_allocmem()
1169 &sc->rl_ldata.rl_rx_desc[i].rx_dmamap); in re_allocmem()
1177 error = bus_dma_tag_create(sc->rl_parent_tag, RL_DUMP_ALIGN, 0, in re_allocmem()
1180 &sc->rl_ldata.rl_stag); in re_allocmem()
1186 error = bus_dmamem_alloc(sc->rl_ldata.rl_stag, in re_allocmem()
1187 (void **)&sc->rl_ldata.rl_stats, in re_allocmem()
1189 &sc->rl_ldata.rl_smap); in re_allocmem()
1196 sc->rl_ldata.rl_stats_addr = 0; in re_allocmem()
1197 error = bus_dmamap_load(sc->rl_ldata.rl_stag, sc->rl_ldata.rl_smap, in re_allocmem()
1198 sc->rl_ldata.rl_stats, sizeof(struct rl_stats), re_dma_map_addr, in re_allocmem()
1199 &sc->rl_ldata.rl_stats_addr, BUS_DMA_NOWAIT); in re_allocmem()
1200 if (error != 0 || sc->rl_ldata.rl_stats_addr == 0) { in re_allocmem()
1227 sc->rl_dev = dev; in re_attach()
1229 mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, in re_attach()
1231 callout_init_mtx(&sc->rl_stat_callout, &sc->rl_mtx, 0); in re_attach()
1247 sc->rl_res_id = PCIR_BAR(1); in re_attach()
1248 sc->rl_res_type = SYS_RES_MEMORY; in re_attach()
1251 sc->rl_res_id = PCIR_BAR(2); in re_attach()
1253 sc->rl_res_id = PCIR_BAR(0); in re_attach()
1254 sc->rl_res_type = SYS_RES_IOPORT; in re_attach()
1256 sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type, in re_attach()
1257 &sc->rl_res_id, RF_ACTIVE); in re_attach()
1258 if (sc->rl_res == NULL && prefer_iomap == 0) { in re_attach()
1259 sc->rl_res_id = PCIR_BAR(0); in re_attach()
1260 sc->rl_res_type = SYS_RES_IOPORT; in re_attach()
1261 sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type, in re_attach()
1262 &sc->rl_res_id, RF_ACTIVE); in re_attach()
1264 if (sc->rl_res == NULL) { in re_attach()
1270 sc->rl_btag = rman_get_bustag(sc->rl_res); in re_attach()
1271 sc->rl_bhandle = rman_get_bushandle(sc->rl_res); in re_attach()
1276 sc->rl_flags |= RL_FLAG_PCIE; in re_attach()
1277 sc->rl_expcap = reg; in re_attach()
1281 device_printf(dev, "MSI-X count : %d\n", msixc); in re_attach()
1287 /* Prefer MSI-X to MSI. */ in re_attach()
1291 sc->rl_res_pba = bus_alloc_resource_any(dev, SYS_RES_MEMORY, in re_attach()
1293 if (sc->rl_res_pba == NULL) { in re_attach()
1294 device_printf(sc->rl_dev, in re_attach()
1295 "could not allocate MSI-X PBA resource\n"); in re_attach()
1297 if (sc->rl_res_pba != NULL && in re_attach()
1300 device_printf(dev, "Using %d MSI-X message\n", in re_attach()
1302 sc->rl_flags |= RL_FLAG_MSIX; in re_attach()
1306 if ((sc->rl_flags & RL_FLAG_MSIX) == 0) { in re_attach()
1307 if (sc->rl_res_pba != NULL) in re_attach()
1309 sc->rl_res_pba); in re_attach()
1310 sc->rl_res_pba = NULL; in re_attach()
1321 sc->rl_flags |= RL_FLAG_MSI; in re_attach()
1331 if ((sc->rl_flags & RL_FLAG_MSI) == 0) in re_attach()
1336 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0) { in re_attach()
1338 sc->rl_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, in re_attach()
1340 if (sc->rl_irq[0] == NULL) { in re_attach()
1347 sc->rl_irq[i] = bus_alloc_resource_any(dev, in re_attach()
1349 if (sc->rl_irq[i] == NULL) { in re_attach()
1359 if ((sc->rl_flags & RL_FLAG_MSI) == 0) { in re_attach()
1371 if (sc->rl_expcap != 0) { in re_attach()
1372 cap = pci_read_config(dev, sc->rl_expcap + in re_attach()
1375 ctl = pci_read_config(dev, sc->rl_expcap + in re_attach()
1381 pci_write_config(dev, sc->rl_expcap + in re_attach()
1399 sc->rl_macrev = hwrev & 0x00700000; in re_attach()
1403 device_printf(dev, "MAC rev. 0x%08x\n", sc->rl_macrev); in re_attach()
1404 while (hw_rev->rl_desc != NULL) { in re_attach()
1405 if (hw_rev->rl_rev == hwrev) { in re_attach()
1406 sc->rl_type = hw_rev->rl_type; in re_attach()
1407 sc->rl_hwrev = hw_rev; in re_attach()
1412 if (hw_rev->rl_desc == NULL) { in re_attach()
1418 switch (hw_rev->rl_rev) { in re_attach()
1420 sc->rl_flags |= RL_FLAG_FASTETHER | RL_FLAG_AUTOPAD; in re_attach()
1424 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_FASTETHER; in re_attach()
1429 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 | in re_attach()
1434 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 | in re_attach()
1442 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | in re_attach()
1447 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | in re_attach()
1454 sc->rl_flags |= RL_FLAG_WOLRXENB; in re_attach()
1457 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_MACSTAT; in re_attach()
1460 sc->rl_flags |= RL_FLAG_MACSLEEP; in re_attach()
1463 if (sc->rl_macrev == 0x00200000) in re_attach()
1464 sc->rl_flags |= RL_FLAG_MACSLEEP; in re_attach()
1467 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | in re_attach()
1472 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | in re_attach()
1478 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | in re_attach()
1483 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | in re_attach()
1490 sc->rl_flags |= RL_FLAG_EARLYOFF; in re_attach()
1493 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | in re_attach()
1502 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | in re_attach()
1512 sc->rl_flags |= RL_FLAG_FASTETHER; in re_attach()
1514 sc->rl_flags |= RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK; in re_attach()
1516 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | in re_attach()
1525 sc->rl_flags |= RL_FLAG_PHYWAKE; in re_attach()
1530 sc->rl_flags |= RL_FLAG_MACRESET; in re_attach()
1536 if (sc->rl_hwrev->rl_rev == RL_HWREV_8139CPLUS) { in re_attach()
1537 sc->rl_cfg0 = RL_8139_CFG0; in re_attach()
1538 sc->rl_cfg1 = RL_8139_CFG1; in re_attach()
1539 sc->rl_cfg2 = 0; in re_attach()
1540 sc->rl_cfg3 = RL_8139_CFG3; in re_attach()
1541 sc->rl_cfg4 = RL_8139_CFG4; in re_attach()
1542 sc->rl_cfg5 = RL_8139_CFG5; in re_attach()
1544 sc->rl_cfg0 = RL_CFG0; in re_attach()
1545 sc->rl_cfg1 = RL_CFG1; in re_attach()
1546 sc->rl_cfg2 = RL_CFG2; in re_attach()
1547 sc->rl_cfg3 = RL_CFG3; in re_attach()
1548 sc->rl_cfg4 = RL_CFG4; in re_attach()
1549 sc->rl_cfg5 = RL_CFG5; in re_attach()
1557 /* Enable PME. */ in re_attach()
1559 cfg = CSR_READ_1(sc, sc->rl_cfg1); in re_attach()
1561 CSR_WRITE_1(sc, sc->rl_cfg1, cfg); in re_attach()
1562 cfg = CSR_READ_1(sc, sc->rl_cfg5); in re_attach()
1564 CSR_WRITE_1(sc, sc->rl_cfg5, cfg); in re_attach()
1567 if ((sc->rl_flags & RL_FLAG_PAR) != 0) { in re_attach()
1575 sc->rl_eewidth = RL_9356_ADDR_LEN; in re_attach()
1578 sc->rl_eewidth = RL_9346_ADDR_LEN; in re_attach()
1589 if (sc->rl_type == RL_8169) { in re_attach()
1591 sc->rl_rxlenmask = RL_RDESC_STAT_GFRAGLEN; in re_attach()
1592 sc->rl_txstart = RL_GTXSTART; in re_attach()
1593 sc->rl_ldata.rl_tx_desc_cnt = RL_8169_TX_DESC_CNT; in re_attach()
1594 sc->rl_ldata.rl_rx_desc_cnt = RL_8169_RX_DESC_CNT; in re_attach()
1597 sc->rl_rxlenmask = RL_RDESC_STAT_FRAGLEN; in re_attach()
1598 sc->rl_txstart = RL_TXSTART; in re_attach()
1599 sc->rl_ldata.rl_tx_desc_cnt = RL_8139_TX_DESC_CNT; in re_attach()
1600 sc->rl_ldata.rl_rx_desc_cnt = RL_8139_RX_DESC_CNT; in re_attach()
1608 ifp = sc->rl_ifp = if_alloc(IFT_ETHER); in re_attach()
1611 if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) { in re_attach()
1621 if ((sc->rl_flags & RL_FLAG_PHYWAKE_PM) != 0) { in re_attach()
1623 if (hw_rev->rl_rev == RL_HWREV_8401E) in re_attach()
1626 if ((sc->rl_flags & RL_FLAG_PHYWAKE) != 0) { in re_attach()
1640 if (sc->rl_hwrev->rl_rev == RL_HWREV_8168C || in re_attach()
1641 sc->rl_hwrev->rl_rev == RL_HWREV_8168C_SPIN2 || in re_attach()
1642 sc->rl_hwrev->rl_rev == RL_HWREV_8168CP) { in re_attach()
1655 NET_TASK_INIT(&sc->rl_inttask, 0, re_int_task, sc); in re_attach()
1661 if (sc->rl_type == RL_8169) in re_attach()
1664 if ((sc->rl_flags & RL_FLAG_FASTETHER) != 0) in re_attach()
1666 error = mii_attach(dev, &sc->rl_miibus, ifp, re_ifmedia_upd, in re_attach()
1688 if (pci_find_cap(sc->rl_dev, PCIY_PMG, ®) == 0) in re_attach()
1716 * Some 32-bit cards were incorrectly wired and would in re_attach()
1717 * malfunction if plugged into a 64-bit slot. in re_attach()
1734 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0 && in re_attach()
1736 error = bus_setup_intr(dev, sc->rl_irq[0], in re_attach()
1738 &sc->rl_intrhand[0]); in re_attach()
1740 error = bus_setup_intr(dev, sc->rl_irq[0], in re_attach()
1742 &sc->rl_intrhand[0]); in re_attach()
1774 ifp = sc->rl_ifp; in re_detach()
1775 KASSERT(mtx_initialized(&sc->rl_mtx), ("re mutex not initialized")); in re_detach()
1777 /* These should only be active if attach succeeded */ in re_detach()
1785 sc->suspended = 1; in re_detach()
1789 callout_drain(&sc->rl_stat_callout); in re_detach()
1790 taskqueue_drain(taskqueue_fast, &sc->rl_inttask); in re_detach()
1813 if (sc->rl_intrhand[0] != NULL) { in re_detach()
1814 bus_teardown_intr(dev, sc->rl_irq[0], sc->rl_intrhand[0]); in re_detach()
1815 sc->rl_intrhand[0] = NULL; in re_detach()
1823 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0) in re_detach()
1827 if (sc->rl_irq[0] != NULL) { in re_detach()
1828 bus_release_resource(dev, SYS_RES_IRQ, rid, sc->rl_irq[0]); in re_detach()
1829 sc->rl_irq[0] = NULL; in re_detach()
1831 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0) in re_detach()
1833 if (sc->rl_res_pba) { in re_detach()
1835 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->rl_res_pba); in re_detach()
1837 if (sc->rl_res) in re_detach()
1838 bus_release_resource(dev, sc->rl_res_type, sc->rl_res_id, in re_detach()
1839 sc->rl_res); in re_detach()
1843 if (sc->rl_ldata.rl_rx_list_tag) { in re_detach()
1844 if (sc->rl_ldata.rl_rx_list_addr) in re_detach()
1845 bus_dmamap_unload(sc->rl_ldata.rl_rx_list_tag, in re_detach()
1846 sc->rl_ldata.rl_rx_list_map); in re_detach()
1847 if (sc->rl_ldata.rl_rx_list) in re_detach()
1848 bus_dmamem_free(sc->rl_ldata.rl_rx_list_tag, in re_detach()
1849 sc->rl_ldata.rl_rx_list, in re_detach()
1850 sc->rl_ldata.rl_rx_list_map); in re_detach()
1851 bus_dma_tag_destroy(sc->rl_ldata.rl_rx_list_tag); in re_detach()
1856 if (sc->rl_ldata.rl_tx_list_tag) { in re_detach()
1857 if (sc->rl_ldata.rl_tx_list_addr) in re_detach()
1858 bus_dmamap_unload(sc->rl_ldata.rl_tx_list_tag, in re_detach()
1859 sc->rl_ldata.rl_tx_list_map); in re_detach()
1860 if (sc->rl_ldata.rl_tx_list) in re_detach()
1861 bus_dmamem_free(sc->rl_ldata.rl_tx_list_tag, in re_detach()
1862 sc->rl_ldata.rl_tx_list, in re_detach()
1863 sc->rl_ldata.rl_tx_list_map); in re_detach()
1864 bus_dma_tag_destroy(sc->rl_ldata.rl_tx_list_tag); in re_detach()
1869 if (sc->rl_ldata.rl_tx_mtag) { in re_detach()
1870 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { in re_detach()
1871 if (sc->rl_ldata.rl_tx_desc[i].tx_dmamap) in re_detach()
1872 bus_dmamap_destroy(sc->rl_ldata.rl_tx_mtag, in re_detach()
1873 sc->rl_ldata.rl_tx_desc[i].tx_dmamap); in re_detach()
1875 bus_dma_tag_destroy(sc->rl_ldata.rl_tx_mtag); in re_detach()
1877 if (sc->rl_ldata.rl_rx_mtag) { in re_detach()
1878 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { in re_detach()
1879 if (sc->rl_ldata.rl_rx_desc[i].rx_dmamap) in re_detach()
1880 bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag, in re_detach()
1881 sc->rl_ldata.rl_rx_desc[i].rx_dmamap); in re_detach()
1883 if (sc->rl_ldata.rl_rx_sparemap) in re_detach()
1884 bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag, in re_detach()
1885 sc->rl_ldata.rl_rx_sparemap); in re_detach()
1886 bus_dma_tag_destroy(sc->rl_ldata.rl_rx_mtag); in re_detach()
1888 if (sc->rl_ldata.rl_jrx_mtag) { in re_detach()
1889 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { in re_detach()
1890 if (sc->rl_ldata.rl_jrx_desc[i].rx_dmamap) in re_detach()
1891 bus_dmamap_destroy(sc->rl_ldata.rl_jrx_mtag, in re_detach()
1892 sc->rl_ldata.rl_jrx_desc[i].rx_dmamap); in re_detach()
1894 if (sc->rl_ldata.rl_jrx_sparemap) in re_detach()
1895 bus_dmamap_destroy(sc->rl_ldata.rl_jrx_mtag, in re_detach()
1896 sc->rl_ldata.rl_jrx_sparemap); in re_detach()
1897 bus_dma_tag_destroy(sc->rl_ldata.rl_jrx_mtag); in re_detach()
1901 if (sc->rl_ldata.rl_stag) { in re_detach()
1902 if (sc->rl_ldata.rl_stats_addr) in re_detach()
1903 bus_dmamap_unload(sc->rl_ldata.rl_stag, in re_detach()
1904 sc->rl_ldata.rl_smap); in re_detach()
1905 if (sc->rl_ldata.rl_stats) in re_detach()
1906 bus_dmamem_free(sc->rl_ldata.rl_stag, in re_detach()
1907 sc->rl_ldata.rl_stats, sc->rl_ldata.rl_smap); in re_detach()
1908 bus_dma_tag_destroy(sc->rl_ldata.rl_stag); in re_detach()
1911 if (sc->rl_parent_tag) in re_detach()
1912 bus_dma_tag_destroy(sc->rl_parent_tag); in re_detach()
1914 mtx_destroy(&sc->rl_mtx); in re_detach()
1926 if (if_getmtu(sc->rl_ifp) > RL_MTU && in re_discard_rxbuf()
1927 (sc->rl_flags & RL_FLAG_JUMBOV2) != 0) in re_discard_rxbuf()
1928 rxd = &sc->rl_ldata.rl_jrx_desc[idx]; in re_discard_rxbuf()
1930 rxd = &sc->rl_ldata.rl_rx_desc[idx]; in re_discard_rxbuf()
1931 desc = &sc->rl_ldata.rl_rx_list[idx]; in re_discard_rxbuf()
1932 desc->rl_vlanctl = 0; in re_discard_rxbuf()
1933 cmdstat = rxd->rx_size; in re_discard_rxbuf()
1934 if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1) in re_discard_rxbuf()
1936 desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN); in re_discard_rxbuf()
1954 m->m_len = m->m_pkthdr.len = MCLBYTES; in re_newbuf()
1957 * This is part of an evil trick to deal with non-x86 platforms. in re_newbuf()
1958 * The RealTek chip requires RX buffers to be aligned on 64-bit in re_newbuf()
1959 * boundaries, but that will hose non-x86 machines. To get around in re_newbuf()
1961 * and for non-x86 hosts, we copy the buffer back six bytes in re_newbuf()
1968 error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_rx_mtag, in re_newbuf()
1969 sc->rl_ldata.rl_rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT); in re_newbuf()
1976 rxd = &sc->rl_ldata.rl_rx_desc[idx]; in re_newbuf()
1977 if (rxd->rx_m != NULL) { in re_newbuf()
1978 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap, in re_newbuf()
1980 bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap); in re_newbuf()
1983 rxd->rx_m = m; in re_newbuf()
1984 map = rxd->rx_dmamap; in re_newbuf()
1985 rxd->rx_dmamap = sc->rl_ldata.rl_rx_sparemap; in re_newbuf()
1986 rxd->rx_size = segs[0].ds_len; in re_newbuf()
1987 sc->rl_ldata.rl_rx_sparemap = map; in re_newbuf()
1988 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap, in re_newbuf()
1991 desc = &sc->rl_ldata.rl_rx_list[idx]; in re_newbuf()
1992 desc->rl_vlanctl = 0; in re_newbuf()
1993 desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[0].ds_addr)); in re_newbuf()
1994 desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[0].ds_addr)); in re_newbuf()
1996 if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1) in re_newbuf()
1998 desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN); in re_newbuf()
2017 m->m_len = m->m_pkthdr.len = MJUM9BYTES; in re_jumbo_newbuf()
2021 error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_jrx_mtag, in re_jumbo_newbuf()
2022 sc->rl_ldata.rl_jrx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT); in re_jumbo_newbuf()
2029 rxd = &sc->rl_ldata.rl_jrx_desc[idx]; in re_jumbo_newbuf()
2030 if (rxd->rx_m != NULL) { in re_jumbo_newbuf()
2031 bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap, in re_jumbo_newbuf()
2033 bus_dmamap_unload(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap); in re_jumbo_newbuf()
2036 rxd->rx_m = m; in re_jumbo_newbuf()
2037 map = rxd->rx_dmamap; in re_jumbo_newbuf()
2038 rxd->rx_dmamap = sc->rl_ldata.rl_jrx_sparemap; in re_jumbo_newbuf()
2039 rxd->rx_size = segs[0].ds_len; in re_jumbo_newbuf()
2040 sc->rl_ldata.rl_jrx_sparemap = map; in re_jumbo_newbuf()
2041 bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap, in re_jumbo_newbuf()
2044 desc = &sc->rl_ldata.rl_rx_list[idx]; in re_jumbo_newbuf()
2045 desc->rl_vlanctl = 0; in re_jumbo_newbuf()
2046 desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[0].ds_addr)); in re_jumbo_newbuf()
2047 desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[0].ds_addr)); in re_jumbo_newbuf()
2049 if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1) in re_jumbo_newbuf()
2051 desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN); in re_jumbo_newbuf()
2064 dst = src - (RE_ETHER_ALIGN - ETHER_ALIGN) / sizeof *src; in re_fixup_rx()
2066 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) in re_fixup_rx()
2069 m->m_data -= RE_ETHER_ALIGN - ETHER_ALIGN; in re_fixup_rx()
2081 bzero(sc->rl_ldata.rl_tx_list, in re_tx_list_init()
2082 sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc)); in re_tx_list_init()
2083 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) in re_tx_list_init()
2084 sc->rl_ldata.rl_tx_desc[i].tx_m = NULL; in re_tx_list_init()
2089 desc = &sc->rl_ldata.rl_tx_list[sc->rl_ldata.rl_tx_desc_cnt - 1]; in re_tx_list_init()
2090 desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOR); in re_tx_list_init()
2092 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, in re_tx_list_init()
2093 sc->rl_ldata.rl_tx_list_map, in re_tx_list_init()
2096 sc->rl_ldata.rl_tx_prodidx = 0; in re_tx_list_init()
2097 sc->rl_ldata.rl_tx_considx = 0; in re_tx_list_init()
2098 sc->rl_ldata.rl_tx_free = sc->rl_ldata.rl_tx_desc_cnt; in re_tx_list_init()
2108 bzero(sc->rl_ldata.rl_rx_list, in re_rx_list_init()
2109 sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)); in re_rx_list_init()
2110 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { in re_rx_list_init()
2111 sc->rl_ldata.rl_rx_desc[i].rx_m = NULL; in re_rx_list_init()
2121 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, in re_rx_list_init()
2122 sc->rl_ldata.rl_rx_list_map, in re_rx_list_init()
2125 sc->rl_ldata.rl_rx_prodidx = 0; in re_rx_list_init()
2126 sc->rl_head = sc->rl_tail = NULL; in re_rx_list_init()
2127 sc->rl_int_rx_act = 0; in re_rx_list_init()
2137 bzero(sc->rl_ldata.rl_rx_list, in re_jrx_list_init()
2138 sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)); in re_jrx_list_init()
2139 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { in re_jrx_list_init()
2140 sc->rl_ldata.rl_jrx_desc[i].rx_m = NULL; in re_jrx_list_init()
2145 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, in re_jrx_list_init()
2146 sc->rl_ldata.rl_rx_list_map, in re_jrx_list_init()
2149 sc->rl_ldata.rl_rx_prodidx = 0; in re_jrx_list_init()
2150 sc->rl_head = sc->rl_tail = NULL; in re_jrx_list_init()
2151 sc->rl_int_rx_act = 0; in re_jrx_list_init()
2173 ifp = sc->rl_ifp; in re_rxeof()
2178 if (if_getmtu(ifp) > RL_MTU && (sc->rl_flags & RL_FLAG_JUMBOV2) != 0) in re_rxeof()
2185 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, in re_rxeof()
2186 sc->rl_ldata.rl_rx_list_map, in re_rxeof()
2189 for (i = sc->rl_ldata.rl_rx_prodidx; maxpkt > 0; in re_rxeof()
2193 cur_rx = &sc->rl_ldata.rl_rx_list[i]; in re_rxeof()
2194 rxstat = le32toh(cur_rx->rl_cmdstat); in re_rxeof()
2197 total_len = rxstat & sc->rl_rxlenmask; in re_rxeof()
2198 rxvlan = le32toh(cur_rx->rl_vlanctl); in re_rxeof()
2200 m = sc->rl_ldata.rl_jrx_desc[i].rx_m; in re_rxeof()
2202 m = sc->rl_ldata.rl_rx_desc[i].rx_m; in re_rxeof()
2204 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 && in re_rxeof()
2209 * support multi-fragment packet. in re_rxeof()
2216 * If this is part of a multi-fragment packet, in re_rxeof()
2219 if (sc->rl_head != NULL) { in re_rxeof()
2220 m_freem(sc->rl_head); in re_rxeof()
2221 sc->rl_head = sc->rl_tail = NULL; in re_rxeof()
2226 m->m_len = RE_RX_DESC_BUFLEN; in re_rxeof()
2227 if (sc->rl_head == NULL) in re_rxeof()
2228 sc->rl_head = sc->rl_tail = m; in re_rxeof()
2230 m->m_flags &= ~M_PKTHDR; in re_rxeof()
2231 sc->rl_tail->m_next = m; in re_rxeof()
2232 sc->rl_tail = m; in re_rxeof()
2253 if (sc->rl_type == RL_8169) in re_rxeof()
2257 * if total_len > 2^13-1, both _RXERRSUM and _GIANT will be in re_rxeof()
2262 if ((sc->rl_flags & RL_FLAG_JUMBOV2) == 0 && in re_rxeof()
2269 * If this is part of a multi-fragment packet, in re_rxeof()
2272 if (sc->rl_head != NULL) { in re_rxeof()
2273 m_freem(sc->rl_head); in re_rxeof()
2274 sc->rl_head = sc->rl_tail = NULL; in re_rxeof()
2291 if (sc->rl_head != NULL) { in re_rxeof()
2292 m_freem(sc->rl_head); in re_rxeof()
2293 sc->rl_head = sc->rl_tail = NULL; in re_rxeof()
2299 if (sc->rl_head != NULL) { in re_rxeof()
2301 m->m_len = total_len; in re_rxeof()
2303 m->m_len = total_len % RE_RX_DESC_BUFLEN; in re_rxeof()
2304 if (m->m_len == 0) in re_rxeof()
2305 m->m_len = RE_RX_DESC_BUFLEN; in re_rxeof()
2313 if (m->m_len <= ETHER_CRC_LEN) { in re_rxeof()
2314 sc->rl_tail->m_len -= in re_rxeof()
2315 (ETHER_CRC_LEN - m->m_len); in re_rxeof()
2318 m->m_len -= ETHER_CRC_LEN; in re_rxeof()
2319 m->m_flags &= ~M_PKTHDR; in re_rxeof()
2320 sc->rl_tail->m_next = m; in re_rxeof()
2322 m = sc->rl_head; in re_rxeof()
2323 sc->rl_head = sc->rl_tail = NULL; in re_rxeof()
2324 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; in re_rxeof()
2326 m->m_pkthdr.len = m->m_len = in re_rxeof()
2327 (total_len - ETHER_CRC_LEN); in re_rxeof()
2333 m->m_pkthdr.rcvif = ifp; in re_rxeof()
2338 if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) { in re_rxeof()
2341 m->m_pkthdr.csum_flags |= in re_rxeof()
2344 m->m_pkthdr.csum_flags |= in re_rxeof()
2352 m->m_pkthdr.csum_flags |= in re_rxeof()
2354 m->m_pkthdr.csum_data = 0xffff; in re_rxeof()
2362 m->m_pkthdr.csum_flags |= in re_rxeof()
2366 m->m_pkthdr.csum_flags |= in re_rxeof()
2372 m->m_pkthdr.csum_flags |= in re_rxeof()
2374 m->m_pkthdr.csum_data = 0xffff; in re_rxeof()
2378 maxpkt--; in re_rxeof()
2380 m->m_pkthdr.ether_vtag = in re_rxeof()
2382 m->m_flags |= M_VLANTAG; in re_rxeof()
2392 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, in re_rxeof()
2393 sc->rl_ldata.rl_rx_list_map, in re_rxeof()
2396 sc->rl_ldata.rl_rx_prodidx = i; in re_rxeof()
2414 cons = sc->rl_ldata.rl_tx_considx; in re_txeof()
2415 if (cons == sc->rl_ldata.rl_tx_prodidx) in re_txeof()
2418 ifp = sc->rl_ifp; in re_txeof()
2424 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, in re_txeof()
2425 sc->rl_ldata.rl_tx_list_map, in re_txeof()
2428 for (; cons != sc->rl_ldata.rl_tx_prodidx; in re_txeof()
2430 txstat = le32toh(sc->rl_ldata.rl_tx_list[cons].rl_cmdstat); in re_txeof()
2440 txd = &sc->rl_ldata.rl_tx_desc[cons]; in re_txeof()
2441 bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, in re_txeof()
2442 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); in re_txeof()
2443 bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, in re_txeof()
2444 txd->tx_dmamap); in re_txeof()
2445 KASSERT(txd->tx_m != NULL, in re_txeof()
2447 m_freem(txd->tx_m); in re_txeof()
2448 txd->tx_m = NULL; in re_txeof()
2457 sc->rl_ldata.rl_tx_free++; in re_txeof()
2460 sc->rl_ldata.rl_tx_considx = cons; in re_txeof()
2464 if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt) { in re_txeof()
2469 * interrupt that will cause us to re-enter this routine. in re_txeof()
2475 sc->rl_watchdog_timer = 0; in re_txeof()
2488 mii = device_get_softc(sc->rl_miibus); in re_tick()
2490 if ((sc->rl_flags & RL_FLAG_LINK) == 0) in re_tick()
2491 re_miibus_statchg(sc->rl_dev); in re_tick()
2500 callout_reset(&sc->rl_stat_callout, hz, re_tick, sc); in re_tick()
2525 sc->rxcycles = count; in re_poll_locked()
2541 (sc->rl_flags & RL_FLAG_PCIE)) in re_poll_locked()
2542 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); in re_poll_locked()
2570 taskqueue_enqueue(taskqueue_fast, &sc->rl_inttask); in re_intr()
2584 ifp = sc->rl_ifp; in re_int_task()
2591 if (sc->suspended || in re_int_task()
2616 (sc->rl_flags & RL_FLAG_PCIE)) in re_int_task()
2617 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); in re_int_task()
2638 taskqueue_enqueue(taskqueue_fast, &sc->rl_inttask); in re_int_task()
2655 ifp = sc->rl_ifp; in re_intr_msi()
2672 if (sc->rl_int_rx_act > 0) { in re_intr_msi()
2683 if (sc->rl_int_rx_mod != 0 && in re_intr_msi()
2686 /* Rearm one-shot timer. */ in re_intr_msi()
2690 sc->rl_int_rx_act = 1; in re_intr_msi()
2694 sc->rl_int_rx_act = 0; in re_intr_msi()
2708 (sc->rl_flags & RL_FLAG_PCIE)) in re_intr_msi()
2709 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); in re_intr_msi()
2753 if ((sc->rl_flags & RL_FLAG_AUTOPAD) == 0 && in re_encap()
2754 (*m_head)->m_pkthdr.len < RL_IP4CSUMTX_PADLEN && in re_encap()
2755 ((*m_head)->m_pkthdr.csum_flags & CSUM_IP) != 0) { in re_encap()
2756 padlen = RL_MIN_FRAMELEN - (*m_head)->m_pkthdr.len; in re_encap()
2767 if ((*m_head)->m_next != NULL || in re_encap()
2782 bzero(mtod(m_new, char *) + m_new->m_pkthdr.len, padlen); in re_encap()
2783 m_new->m_pkthdr.len += padlen; in re_encap()
2784 m_new->m_len = m_new->m_pkthdr.len; in re_encap()
2788 prod = sc->rl_ldata.rl_tx_prodidx; in re_encap()
2789 txd = &sc->rl_ldata.rl_tx_desc[prod]; in re_encap()
2790 error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap, in re_encap()
2800 error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag, in re_encap()
2801 txd->tx_dmamap, *m_head, segs, &nsegs, BUS_DMA_NOWAIT); in re_encap()
2816 if (sc->rl_ldata.rl_tx_free - nsegs <= 1) { in re_encap()
2817 bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap); in re_encap()
2821 bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap, in re_encap()
2826 * appear in all descriptors of a multi-descriptor transmit in re_encap()
2832 if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) { in re_encap()
2833 if ((sc->rl_flags & RL_FLAG_DESCV2) != 0) { in re_encap()
2835 vlanctl |= ((uint32_t)(*m_head)->m_pkthdr.tso_segsz << in re_encap()
2839 ((uint32_t)(*m_head)->m_pkthdr.tso_segsz << in re_encap()
2848 if (((*m_head)->m_pkthdr.csum_flags & RE_CSUM_FEATURES) != 0) { in re_encap()
2849 if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) { in re_encap()
2851 if (((*m_head)->m_pkthdr.csum_flags & in re_encap()
2854 if (((*m_head)->m_pkthdr.csum_flags & in re_encap()
2859 if (((*m_head)->m_pkthdr.csum_flags & in re_encap()
2862 if (((*m_head)->m_pkthdr.csum_flags & in re_encap()
2871 * appear in all descriptors of a multi-descriptor in re_encap()
2874 if ((*m_head)->m_flags & M_VLANTAG) in re_encap()
2875 vlanctl |= bswap16((*m_head)->m_pkthdr.ether_vtag) | in re_encap()
2880 desc = &sc->rl_ldata.rl_tx_list[prod]; in re_encap()
2881 desc->rl_vlanctl = htole32(vlanctl); in re_encap()
2882 desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[i].ds_addr)); in re_encap()
2883 desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[i].ds_addr)); in re_encap()
2887 if (prod == sc->rl_ldata.rl_tx_desc_cnt - 1) in re_encap()
2889 desc->rl_cmdstat = htole32(cmdstat | csum_flags); in re_encap()
2890 sc->rl_ldata.rl_tx_free--; in re_encap()
2893 sc->rl_ldata.rl_tx_prodidx = prod; in re_encap()
2897 desc = &sc->rl_ldata.rl_tx_list[ei]; in re_encap()
2898 desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOF); in re_encap()
2900 desc = &sc->rl_ldata.rl_tx_list[si]; in re_encap()
2902 desc->rl_cmdstat |= htole32(RL_TDESC_CMD_OWN | RL_TDESC_CMD_SOF); in re_encap()
2909 txd_last = &sc->rl_ldata.rl_tx_desc[ei]; in re_encap()
2910 map = txd->tx_dmamap; in re_encap()
2911 txd->tx_dmamap = txd_last->tx_dmamap; in re_encap()
2912 txd_last->tx_dmamap = map; in re_encap()
2913 txd_last->tx_m = *m_head; in re_encap()
2944 struct netmap_kring *kring = NA(ifp)->tx_rings[0]; in re_start_locked()
2945 if (sc->rl_ldata.rl_tx_prodidx != kring->nr_hwcur) { in re_start_locked()
2947 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); in re_start_locked()
2951 sc->rl_watchdog_timer = 5; in re_start_locked()
2958 IFF_DRV_RUNNING || (sc->rl_flags & RL_FLAG_LINK) == 0) in re_start_locked()
2962 sc->rl_ldata.rl_tx_free > 1;) { in re_start_locked()
2986 if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt) in re_start_locked()
3000 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, in re_start_tx()
3001 sc->rl_ldata.rl_tx_list_map, in re_start_tx()
3004 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); in re_start_tx()
3021 sc->rl_watchdog_timer = 5; in re_start_tx()
3028 if (sc->rl_hwrev->rl_rev == RL_HWREV_8168E_VL) { in re_set_jumbo()
3029 pci_set_max_read_req(sc->rl_dev, 4096); in re_set_jumbo()
3035 CSR_WRITE_1(sc, sc->rl_cfg3, CSR_READ_1(sc, sc->rl_cfg3) | in re_set_jumbo()
3037 switch (sc->rl_hwrev->rl_rev) { in re_set_jumbo()
3041 CSR_WRITE_1(sc, sc->rl_cfg4, in re_set_jumbo()
3042 CSR_READ_1(sc, sc->rl_cfg4) | 0x01); in re_set_jumbo()
3045 CSR_WRITE_1(sc, sc->rl_cfg4, in re_set_jumbo()
3046 CSR_READ_1(sc, sc->rl_cfg4) | RL_CFG4_JUMBO_EN1); in re_set_jumbo()
3049 CSR_WRITE_1(sc, sc->rl_cfg3, CSR_READ_1(sc, sc->rl_cfg3) & in re_set_jumbo()
3051 switch (sc->rl_hwrev->rl_rev) { in re_set_jumbo()
3055 CSR_WRITE_1(sc, sc->rl_cfg4, in re_set_jumbo()
3056 CSR_READ_1(sc, sc->rl_cfg4) & ~0x01); in re_set_jumbo()
3059 CSR_WRITE_1(sc, sc->rl_cfg4, in re_set_jumbo()
3060 CSR_READ_1(sc, sc->rl_cfg4) & ~RL_CFG4_JUMBO_EN1); in re_set_jumbo()
3065 switch (sc->rl_hwrev->rl_rev) { in re_set_jumbo()
3067 pci_set_max_read_req(sc->rl_dev, 4096); in re_set_jumbo()
3071 pci_set_max_read_req(sc->rl_dev, 512); in re_set_jumbo()
3073 pci_set_max_read_req(sc->rl_dev, 4096); in re_set_jumbo()
3090 if_t ifp = sc->rl_ifp; in re_init_locked()
3098 mii = device_get_softc(sc->rl_miibus); in re_init_locked()
3114 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { in re_init_locked()
3117 device_printf(sc->rl_dev, in re_init_locked()
3127 device_printf(sc->rl_dev, in re_init_locked()
3136 device_printf(sc->rl_dev, "no memory for RX buffers\n"); in re_init_locked()
3140 if ((sc->rl_flags & RL_FLAG_PCIE) != 0 && in re_init_locked()
3141 pci_get_device(sc->rl_dev) != RT_DEVICEID_8101E) { in re_init_locked()
3143 pci_set_max_read_req(sc->rl_dev, 512); in re_init_locked()
3145 pci_set_max_read_req(sc->rl_dev, 4096); in re_init_locked()
3160 if ((sc->rl_flags & RL_FLAG_MACSTAT) != 0) { in re_init_locked()
3167 if (sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SC || in re_init_locked()
3168 sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SCE) { in re_init_locked()
3170 if ((CSR_READ_1(sc, sc->rl_cfg2) & RL_CFG2_PCI66MHZ) != 0) in re_init_locked()
3172 if (sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SCE) in re_init_locked()
3205 RL_ADDR_HI(sc->rl_ldata.rl_rx_list_addr)); in re_init_locked()
3207 RL_ADDR_LO(sc->rl_ldata.rl_rx_list_addr)); in re_init_locked()
3210 RL_ADDR_HI(sc->rl_ldata.rl_tx_list_addr)); in re_init_locked()
3212 RL_ADDR_LO(sc->rl_ldata.rl_tx_list_addr)); in re_init_locked()
3214 if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0) { in re_init_locked()
3221 * Enable transmit and receive for pre-RTL8168G controllers. in re_init_locked()
3224 if ((sc->rl_flags & RL_FLAG_8168G_PLUS) == 0) in re_init_locked()
3230 if (sc->rl_testmode) { in re_init_locked()
3231 if (sc->rl_type == RL_8169) in re_init_locked()
3248 if (sc->rl_type == RL_8169) { in re_init_locked()
3257 if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0) in re_init_locked()
3272 if (sc->rl_testmode) in re_init_locked()
3279 sc->rl_txthresh = RL_TX_THRESH_INIT; in re_init_locked()
3295 if (sc->rl_type == RL_8169) in re_init_locked()
3304 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0 && in re_init_locked()
3306 if (sc->rl_type == RL_8169) in re_init_locked()
3308 RL_USECS(sc->rl_int_rx_mod)); in re_init_locked()
3310 if (sc->rl_type == RL_8169) in re_init_locked()
3319 if (sc->rl_type == RL_8169) { in re_init_locked()
3320 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { in re_init_locked()
3328 sc->rl_hwrev->rl_max_mtu + in re_init_locked()
3334 } else if ((sc->rl_flags & RL_FLAG_PCIE) != 0 && in re_init_locked()
3335 sc->rl_hwrev->rl_max_mtu == RL_MTU) { in re_init_locked()
3342 if (sc->rl_testmode) in re_init_locked()
3345 CSR_WRITE_1(sc, sc->rl_cfg1, CSR_READ_1(sc, sc->rl_cfg1) | in re_init_locked()
3351 sc->rl_flags &= ~RL_FLAG_LINK; in re_init_locked()
3354 sc->rl_watchdog_timer = 0; in re_init_locked()
3355 callout_reset(&sc->rl_stat_callout, hz, re_tick, sc); in re_init_locked()
3373 mii = device_get_softc(sc->rl_miibus); in re_ifmedia_upd()
3391 mii = device_get_softc(sc->rl_miibus); in re_ifmedia_sts()
3395 ifmr->ifm_active = mii->mii_media_active; in re_ifmedia_sts()
3396 ifmr->ifm_status = mii->mii_media_status; in re_ifmedia_sts()
3410 if (ifr->ifr_mtu < ETHERMIN || in re_ioctl()
3411 ifr->ifr_mtu > sc->rl_hwrev->rl_max_mtu || in re_ioctl()
3412 ((sc->rl_flags & RL_FLAG_FASTETHER) != 0 && in re_ioctl()
3413 ifr->ifr_mtu > RL_MTU)) { in re_ioctl()
3418 if (if_getmtu(ifp) != ifr->ifr_mtu) { in re_ioctl()
3419 if_setmtu(ifp, ifr->ifr_mtu); in re_ioctl()
3420 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 && in re_ioctl()
3439 if (((if_getflags(ifp) ^ sc->rl_if_flags) in re_ioctl()
3448 sc->rl_if_flags = if_getflags(ifp); in re_ioctl()
3460 mii = device_get_softc(sc->rl_miibus); in re_ioctl()
3461 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); in re_ioctl()
3467 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); in re_ioctl()
3471 if (ifr->ifr_reqcap & IFCAP_POLLING) { in re_ioctl()
3529 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 && in re_ioctl()
3565 if (sc->rl_watchdog_timer == 0 || --sc->rl_watchdog_timer != 0) in re_watchdog()
3568 ifp = sc->rl_ifp; in re_watchdog()
3570 if (sc->rl_ldata.rl_tx_free == sc->rl_ldata.rl_tx_desc_cnt) { in re_watchdog()
3572 "-- recovering\n"); in re_watchdog()
3602 ifp = sc->rl_ifp; in re_stop()
3604 sc->rl_watchdog_timer = 0; in re_stop()
3605 callout_stop(&sc->rl_stat_callout); in re_stop()
3622 if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0) { in re_stop()
3628 if ((sc->rl_flags & RL_FLAG_WAIT_TXPOLL) != 0) { in re_stop()
3629 for (i = RL_TIMEOUT; i > 0; i--) { in re_stop()
3630 if ((CSR_READ_1(sc, sc->rl_txstart) & in re_stop()
3636 device_printf(sc->rl_dev, in re_stop()
3639 } else if ((sc->rl_flags & RL_FLAG_CMDSTOP) != 0) { in re_stop()
3642 if ((sc->rl_flags & RL_FLAG_CMDSTOP_WAIT_TXQ) != 0) { in re_stop()
3643 for (i = RL_TIMEOUT; i > 0; i--) { in re_stop()
3650 device_printf(sc->rl_dev, in re_stop()
3659 if (sc->rl_head != NULL) { in re_stop()
3660 m_freem(sc->rl_head); in re_stop()
3661 sc->rl_head = sc->rl_tail = NULL; in re_stop()
3665 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { in re_stop()
3666 txd = &sc->rl_ldata.rl_tx_desc[i]; in re_stop()
3667 if (txd->tx_m != NULL) { in re_stop()
3668 bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, in re_stop()
3669 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); in re_stop()
3670 bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, in re_stop()
3671 txd->tx_dmamap); in re_stop()
3672 m_freem(txd->tx_m); in re_stop()
3673 txd->tx_m = NULL; in re_stop()
3678 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { in re_stop()
3679 rxd = &sc->rl_ldata.rl_rx_desc[i]; in re_stop()
3680 if (rxd->rx_m != NULL) { in re_stop()
3681 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, in re_stop()
3682 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); in re_stop()
3683 bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, in re_stop()
3684 rxd->rx_dmamap); in re_stop()
3685 m_freem(rxd->rx_m); in re_stop()
3686 rxd->rx_m = NULL; in re_stop()
3690 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { in re_stop()
3691 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { in re_stop()
3692 rxd = &sc->rl_ldata.rl_jrx_desc[i]; in re_stop()
3693 if (rxd->rx_m != NULL) { in re_stop()
3694 bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, in re_stop()
3695 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); in re_stop()
3696 bus_dmamap_unload(sc->rl_ldata.rl_jrx_mtag, in re_stop()
3697 rxd->rx_dmamap); in re_stop()
3698 m_freem(rxd->rx_m); in re_stop()
3699 rxd->rx_m = NULL; in re_stop()
3720 sc->suspended = 1; in re_suspend()
3728 * doesn't, re-enable busmastering, and restart the interface if
3741 ifp = sc->rl_ifp; in re_resume()
3743 if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) { in re_resume()
3759 sc->suspended = 0; in re_resume()
3783 if_setflagbits(sc->rl_ifp, 0, IFF_UP); in re_shutdown()
3799 mii = device_get_softc(sc->rl_miibus); in re_set_linkspeed()
3802 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == in re_set_linkspeed()
3804 switch IFM_SUBTYPE(mii->mii_media_active) { in re_set_linkspeed()
3815 miisc = LIST_FIRST(&mii->mii_phys); in re_set_linkspeed()
3816 phyno = miisc->mii_phy; in re_set_linkspeed()
3817 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) in re_set_linkspeed()
3819 re_miibus_writereg(sc->rl_dev, phyno, MII_100T2CR, 0); in re_set_linkspeed()
3820 re_miibus_writereg(sc->rl_dev, phyno, in re_set_linkspeed()
3822 re_miibus_writereg(sc->rl_dev, phyno, in re_set_linkspeed()
3831 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) in re_set_linkspeed()
3833 switch (IFM_SUBTYPE(mii->mii_media_active)) { in re_set_linkspeed()
3846 device_printf(sc->rl_dev, in re_set_linkspeed()
3850 * No link, force MAC to have 100Mbps, full-duplex link. in re_set_linkspeed()
3854 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; in re_set_linkspeed()
3855 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; in re_set_linkspeed()
3868 if (pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) != 0) in re_setwol()
3871 ifp = sc->rl_ifp; in re_setwol()
3873 if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) { in re_setwol()
3879 if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0) { in re_setwol()
3885 if ((sc->rl_flags & RL_FLAG_WOL_MANLINK) != 0) in re_setwol()
3887 if ((sc->rl_flags & RL_FLAG_WOLRXENB) != 0) in re_setwol()
3893 /* Enable PME. */ in re_setwol()
3894 v = CSR_READ_1(sc, sc->rl_cfg1); in re_setwol()
3898 CSR_WRITE_1(sc, sc->rl_cfg1, v); in re_setwol()
3900 v = CSR_READ_1(sc, sc->rl_cfg3); in re_setwol()
3904 CSR_WRITE_1(sc, sc->rl_cfg3, v); in re_setwol()
3906 v = CSR_READ_1(sc, sc->rl_cfg5); in re_setwol()
3915 CSR_WRITE_1(sc, sc->rl_cfg5, v); in re_setwol()
3921 (sc->rl_flags & RL_FLAG_PHYWAKE_PM) != 0) in re_setwol()
3929 /* Request PME if WOL is requested. */ in re_setwol()
3930 pmstat = pci_read_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, 2); in re_setwol()
3934 pci_write_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); in re_setwol()
3945 if (pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) != 0) in re_clrwol()
3951 v = CSR_READ_1(sc, sc->rl_cfg3); in re_clrwol()
3953 CSR_WRITE_1(sc, sc->rl_cfg3, v); in re_clrwol()
3958 v = CSR_READ_1(sc, sc->rl_cfg5); in re_clrwol()
3961 CSR_WRITE_1(sc, sc->rl_cfg5, v); in re_clrwol()
3971 ctx = device_get_sysctl_ctx(sc->rl_dev); in re_add_sysctls()
3972 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->rl_dev)); in re_add_sysctls()
3977 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0) in re_add_sysctls()
3982 &sc->rl_int_rx_mod, 0, sysctl_hw_re_int_mod, "I", in re_add_sysctls()
3985 sc->rl_int_rx_mod = RL_TIMER_DEFAULT; in re_add_sysctls()
3986 error = resource_int_value(device_get_name(sc->rl_dev), in re_add_sysctls()
3987 device_get_unit(sc->rl_dev), "int_rx_mod", &sc->rl_int_rx_mod); in re_add_sysctls()
3989 if (sc->rl_int_rx_mod < RL_TIMER_MIN || in re_add_sysctls()
3990 sc->rl_int_rx_mod > RL_TIMER_MAX) { in re_add_sysctls()
3991 device_printf(sc->rl_dev, "int_rx_mod value out of " in re_add_sysctls()
3994 sc->rl_int_rx_mod = RL_TIMER_DEFAULT; in re_add_sysctls()
4006 result = -1; in re_sysctl_stats()
4008 if (error || req->newptr == NULL) in re_sysctl_stats()
4014 if ((if_getdrvflags(sc->rl_ifp) & IFF_DRV_RUNNING) == 0) { in re_sysctl_stats()
4018 bus_dmamap_sync(sc->rl_ldata.rl_stag, in re_sysctl_stats()
4019 sc->rl_ldata.rl_smap, BUS_DMASYNC_PREREAD); in re_sysctl_stats()
4021 RL_ADDR_HI(sc->rl_ldata.rl_stats_addr)); in re_sysctl_stats()
4023 RL_ADDR_LO(sc->rl_ldata.rl_stats_addr)); in re_sysctl_stats()
4025 RL_ADDR_LO(sc->rl_ldata.rl_stats_addr | in re_sysctl_stats()
4027 for (i = RL_TIMEOUT; i > 0; i--) { in re_sysctl_stats()
4033 bus_dmamap_sync(sc->rl_ldata.rl_stag, in re_sysctl_stats()
4034 sc->rl_ldata.rl_smap, BUS_DMASYNC_POSTREAD); in re_sysctl_stats()
4037 device_printf(sc->rl_dev, in re_sysctl_stats()
4042 stats = sc->rl_ldata.rl_stats; in re_sysctl_stats()
4043 printf("%s statistics:\n", device_get_nameunit(sc->rl_dev)); in re_sysctl_stats()
4045 (uintmax_t)le64toh(stats->rl_tx_pkts)); in re_sysctl_stats()
4047 (uintmax_t)le64toh(stats->rl_rx_pkts)); in re_sysctl_stats()
4049 (uintmax_t)le64toh(stats->rl_tx_errs)); in re_sysctl_stats()
4051 le32toh(stats->rl_rx_errs)); in re_sysctl_stats()
4053 (uint32_t)le16toh(stats->rl_missed_pkts)); in re_sysctl_stats()
4055 (uint32_t)le16toh(stats->rl_rx_framealign_errs)); in re_sysctl_stats()
4057 le32toh(stats->rl_tx_onecoll)); in re_sysctl_stats()
4059 le32toh(stats->rl_tx_multicolls)); in re_sysctl_stats()
4061 (uintmax_t)le64toh(stats->rl_rx_ucasts)); in re_sysctl_stats()
4063 (uintmax_t)le64toh(stats->rl_rx_bcasts)); in re_sysctl_stats()
4065 le32toh(stats->rl_rx_mcasts)); in re_sysctl_stats()
4067 (uint32_t)le16toh(stats->rl_tx_aborts)); in re_sysctl_stats()
4069 (uint32_t)le16toh(stats->rl_rx_underruns)); in re_sysctl_stats()
4076 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) in sysctl_int_range() argument
4084 if (error || req->newptr == NULL) in sysctl_int_range()
4086 if (value < low || value > high) in sysctl_int_range()
4109 *nrxr = sc->rl_ldata.rl_rx_desc_cnt; in re_debugnet_init()
4112 (sc->rl_flags & RL_FLAG_JUMBOV2) != 0) ? MJUM9BYTES : MCLBYTES; in re_debugnet_init()
4129 IFF_DRV_RUNNING || (sc->rl_flags & RL_FLAG_LINK) == 0) in re_debugnet_transmit()
4146 (sc->rl_flags & RL_FLAG_LINK) == 0) in re_debugnet_poll()