Lines Matching +full:rx +full:- +full:m

1 /*-
2 * SPDX-License-Identifier: BSD-4-Clause
19 * 4. Neither the name of the author nor the names of any co-contributors
45 * sold by D-Link, Addtron, SMC and Asante. Both parts are
46 * virtually the same, except the 83820 is a 64-bit/32-bit part,
47 * while the 83821 is 32-bit only.
64 * ports. Other features include 8K TX FIFO and 32K RX FIFO, TCP/IP
66 * priority TX and RX queues, a 2048 bit multicast hash filter, 4 RX pattern
68 * moderation. The 83820 supports both 64-bit and 32-bit addressing
69 * and data transfers: the 64-bit support can be toggled on or off
76 * - Receive buffers must be aligned on 64-bit boundaries, which means
80 * - In order to transmit jumbo frames larger than 8170 bytes, you have
88 * if the user selects an MTU larger than 8152 (8170 - 18).
201 * MII bit-bang glue
263 for (idx = (300 / 33) + 1; idx > 0; idx--) in nge_delay()
378 * Read the MII serial port for the MII bit-bang module.
396 * Write the MII serial port for the MII bit-bang module.
417 if ((sc->nge_flags & NGE_FLAG_TBI) != 0) { in nge_miibus_readreg()
450 device_printf(sc->nge_dev, in nge_miibus_readreg()
466 if ((sc->nge_flags & NGE_FLAG_TBI) != 0) { in nge_miibus_writereg()
492 device_printf(sc->nge_dev, in nge_miibus_writereg()
521 mii = device_get_softc(sc->nge_miibus); in nge_miibus_statchg()
522 ifp = sc->nge_ifp; in nge_miibus_statchg()
527 sc->nge_flags &= ~NGE_FLAG_LINK; in nge_miibus_statchg()
528 if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) == in nge_miibus_statchg()
530 switch (IFM_SUBTYPE(mii->mii_media_active)) { in nge_miibus_statchg()
537 sc->nge_flags |= NGE_FLAG_LINK; in nge_miibus_statchg()
544 /* Stop Tx/Rx MACs. */ in nge_miibus_statchg()
546 device_printf(sc->nge_dev, in nge_miibus_statchg()
547 "%s: unable to stop Tx/Rx MAC\n", __func__); in nge_miibus_statchg()
550 if (sc->nge_head != NULL) { in nge_miibus_statchg()
551 m_freem(sc->nge_head); in nge_miibus_statchg()
552 sc->nge_head = sc->nge_tail = NULL; in nge_miibus_statchg()
557 txd = &sc->nge_cdata.nge_txdesc[i]; in nge_miibus_statchg()
558 if (txd->tx_m != NULL) { in nge_miibus_statchg()
559 bus_dmamap_sync(sc->nge_cdata.nge_tx_tag, in nge_miibus_statchg()
560 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); in nge_miibus_statchg()
561 bus_dmamap_unload(sc->nge_cdata.nge_tx_tag, in nge_miibus_statchg()
562 txd->tx_dmamap); in nge_miibus_statchg()
563 m_freem(txd->tx_m); in nge_miibus_statchg()
564 txd->tx_m = NULL; in nge_miibus_statchg()
569 if ((sc->nge_flags & NGE_FLAG_LINK) != 0) { in nge_miibus_statchg()
570 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { in nge_miibus_statchg()
575 /* Enable flow-control. */ in nge_miibus_statchg()
576 if ((IFM_OPTIONS(mii->mii_media_active) & in nge_miibus_statchg()
589 switch (IFM_SUBTYPE(mii->mii_media_active)) { in nge_miibus_statchg()
602 /* Reset Tx/Rx MAC. */ in nge_miibus_statchg()
620 device_printf(sc->nge_dev, in nge_miibus_statchg()
621 "%s: unable to reset Tx/Rx MAC\n", __func__); in nge_miibus_statchg()
622 /* Reuse Rx buffer and reset consumer pointer. */ in nge_miibus_statchg()
623 sc->nge_cdata.nge_rx_cons = 0; in nge_miibus_statchg()
625 * It seems that resetting Rx/Tx MAC results in in nge_miibus_statchg()
626 * resetting Tx/Rx descriptor pointer registers such in nge_miibus_statchg()
627 * that reloading Tx/Rx lists address are needed. in nge_miibus_statchg()
630 NGE_ADDR_HI(sc->nge_rdata.nge_rx_ring_paddr)); in nge_miibus_statchg()
632 NGE_ADDR_LO(sc->nge_rdata.nge_rx_ring_paddr)); in nge_miibus_statchg()
634 NGE_ADDR_HI(sc->nge_rdata.nge_tx_ring_paddr)); in nge_miibus_statchg()
636 NGE_ADDR_LO(sc->nge_rdata.nge_tx_ring_paddr)); in nge_miibus_statchg()
640 /* Restart Rx MAC. */ in nge_miibus_statchg()
650 device_printf(sc->nge_dev, in nge_miibus_statchg()
651 "%s: unable to restart Rx MAC\n", __func__); in nge_miibus_statchg()
655 if ((sc->nge_flags & NGE_FLAG_TBI) != 0) in nge_miibus_statchg()
669 * bits represent the 16-bit word in the mcast hash table in nge_write_maddr()
689 ifp = sc->nge_ifp; in nge_rxfilter()
691 /* Make sure to stop Rx filtering. */ in nge_rxfilter()
763 device_printf(sc->nge_dev, "reset never completed\n"); in nge_reset()
775 /* Clear WOL events which may interfere normal Rx filter opertaion. */ in nge_reset()
799 while (t->nge_name != NULL) { in nge_probe()
800 if ((pci_get_vendor(dev) == t->nge_vid) && in nge_probe()
801 (pci_get_device(dev) == t->nge_did)) { in nge_probe()
802 device_set_desc(dev, t->nge_name); in nge_probe()
826 sc->nge_dev = dev; in nge_attach()
829 callout_init_mtx(&sc->nge_stat_ch, &sc->nge_mtx, 0); in nge_attach()
837 sc->nge_res_type = SYS_RES_IOPORT; in nge_attach()
838 sc->nge_res_id = PCIR_BAR(0); in nge_attach()
840 sc->nge_res_type = SYS_RES_MEMORY; in nge_attach()
841 sc->nge_res_id = PCIR_BAR(1); in nge_attach()
843 sc->nge_res = bus_alloc_resource_any(dev, sc->nge_res_type, in nge_attach()
844 &sc->nge_res_id, RF_ACTIVE); in nge_attach()
846 if (sc->nge_res == NULL) { in nge_attach()
847 if (sc->nge_res_type == SYS_RES_MEMORY) { in nge_attach()
848 sc->nge_res_type = SYS_RES_IOPORT; in nge_attach()
849 sc->nge_res_id = PCIR_BAR(0); in nge_attach()
851 sc->nge_res_type = SYS_RES_MEMORY; in nge_attach()
852 sc->nge_res_id = PCIR_BAR(1); in nge_attach()
854 sc->nge_res = bus_alloc_resource_any(dev, sc->nge_res_type, in nge_attach()
855 &sc->nge_res_id, RF_ACTIVE); in nge_attach()
856 if (sc->nge_res == NULL) { in nge_attach()
858 sc->nge_res_type == SYS_RES_MEMORY ? "memory" : in nge_attach()
867 sc->nge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, in nge_attach()
870 if (sc->nge_irq == NULL) { in nge_attach()
902 ifp = sc->nge_ifp = if_alloc(IFT_ETHER); in nge_attach()
909 if_setsendqlen(ifp, NGE_TX_RING_CNT - 1); in nge_attach()
918 if (pci_has_pm(sc->nge_dev)) in nge_attach()
923 sc->nge_flags |= NGE_FLAG_TBI; in nge_attach()
936 error = mii_attach(dev, &sc->nge_miibus, ifp, nge_mediachange, in nge_attach()
965 error = bus_setup_intr(dev, sc->nge_irq, INTR_TYPE_NET | INTR_MPSAFE, in nge_attach()
966 NULL, nge_intr, sc, &sc->nge_intrhand); in nge_attach()
985 ifp = sc->nge_ifp; in nge_detach()
994 sc->nge_flags |= NGE_FLAG_DETACH; in nge_detach()
997 callout_drain(&sc->nge_stat_ch); in nge_detach()
1003 if (sc->nge_intrhand != NULL) in nge_detach()
1004 bus_teardown_intr(dev, sc->nge_irq, sc->nge_intrhand); in nge_detach()
1005 if (sc->nge_irq != NULL) in nge_detach()
1006 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq); in nge_detach()
1007 if (sc->nge_res != NULL) in nge_detach()
1008 bus_release_resource(dev, sc->nge_res_type, sc->nge_res_id, in nge_detach()
1009 sc->nge_res); in nge_detach()
1032 ctx->nge_busaddr = segs[0].ds_addr; in nge_dmamap_cb()
1045 bus_get_dma_tag(sc->nge_dev), /* parent */ in nge_dma_alloc()
1055 &sc->nge_cdata.nge_parent_tag); in nge_dma_alloc()
1057 device_printf(sc->nge_dev, "failed to create parent DMA tag\n"); in nge_dma_alloc()
1061 error = bus_dma_tag_create(sc->nge_cdata.nge_parent_tag,/* parent */ in nge_dma_alloc()
1071 &sc->nge_cdata.nge_tx_ring_tag); in nge_dma_alloc()
1073 device_printf(sc->nge_dev, "failed to create Tx ring DMA tag\n"); in nge_dma_alloc()
1077 /* Create tag for Rx ring. */ in nge_dma_alloc()
1078 error = bus_dma_tag_create(sc->nge_cdata.nge_parent_tag,/* parent */ in nge_dma_alloc()
1088 &sc->nge_cdata.nge_rx_ring_tag); in nge_dma_alloc()
1090 device_printf(sc->nge_dev, in nge_dma_alloc()
1091 "failed to create Rx ring DMA tag\n"); in nge_dma_alloc()
1096 error = bus_dma_tag_create(sc->nge_cdata.nge_parent_tag,/* parent */ in nge_dma_alloc()
1106 &sc->nge_cdata.nge_tx_tag); in nge_dma_alloc()
1108 device_printf(sc->nge_dev, "failed to create Tx DMA tag\n"); in nge_dma_alloc()
1112 /* Create tag for Rx buffers. */ in nge_dma_alloc()
1113 error = bus_dma_tag_create(sc->nge_cdata.nge_parent_tag,/* parent */ in nge_dma_alloc()
1123 &sc->nge_cdata.nge_rx_tag); in nge_dma_alloc()
1125 device_printf(sc->nge_dev, "failed to create Rx DMA tag\n"); in nge_dma_alloc()
1130 error = bus_dmamem_alloc(sc->nge_cdata.nge_tx_ring_tag, in nge_dma_alloc()
1131 (void **)&sc->nge_rdata.nge_tx_ring, BUS_DMA_WAITOK | in nge_dma_alloc()
1132 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->nge_cdata.nge_tx_ring_map); in nge_dma_alloc()
1134 device_printf(sc->nge_dev, in nge_dma_alloc()
1140 error = bus_dmamap_load(sc->nge_cdata.nge_tx_ring_tag, in nge_dma_alloc()
1141 sc->nge_cdata.nge_tx_ring_map, sc->nge_rdata.nge_tx_ring, in nge_dma_alloc()
1144 device_printf(sc->nge_dev, in nge_dma_alloc()
1148 sc->nge_rdata.nge_tx_ring_paddr = ctx.nge_busaddr; in nge_dma_alloc()
1150 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ in nge_dma_alloc()
1151 error = bus_dmamem_alloc(sc->nge_cdata.nge_rx_ring_tag, in nge_dma_alloc()
1152 (void **)&sc->nge_rdata.nge_rx_ring, BUS_DMA_WAITOK | in nge_dma_alloc()
1153 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->nge_cdata.nge_rx_ring_map); in nge_dma_alloc()
1155 device_printf(sc->nge_dev, in nge_dma_alloc()
1156 "failed to allocate DMA'able memory for Rx ring\n"); in nge_dma_alloc()
1161 error = bus_dmamap_load(sc->nge_cdata.nge_rx_ring_tag, in nge_dma_alloc()
1162 sc->nge_cdata.nge_rx_ring_map, sc->nge_rdata.nge_rx_ring, in nge_dma_alloc()
1165 device_printf(sc->nge_dev, in nge_dma_alloc()
1166 "failed to load DMA'able memory for Rx ring\n"); in nge_dma_alloc()
1169 sc->nge_rdata.nge_rx_ring_paddr = ctx.nge_busaddr; in nge_dma_alloc()
1173 txd = &sc->nge_cdata.nge_txdesc[i]; in nge_dma_alloc()
1174 txd->tx_m = NULL; in nge_dma_alloc()
1175 txd->tx_dmamap = NULL; in nge_dma_alloc()
1176 error = bus_dmamap_create(sc->nge_cdata.nge_tx_tag, 0, in nge_dma_alloc()
1177 &txd->tx_dmamap); in nge_dma_alloc()
1179 device_printf(sc->nge_dev, in nge_dma_alloc()
1184 /* Create DMA maps for Rx buffers. */ in nge_dma_alloc()
1185 if ((error = bus_dmamap_create(sc->nge_cdata.nge_rx_tag, 0, in nge_dma_alloc()
1186 &sc->nge_cdata.nge_rx_sparemap)) != 0) { in nge_dma_alloc()
1187 device_printf(sc->nge_dev, in nge_dma_alloc()
1188 "failed to create spare Rx dmamap\n"); in nge_dma_alloc()
1192 rxd = &sc->nge_cdata.nge_rxdesc[i]; in nge_dma_alloc()
1193 rxd->rx_m = NULL; in nge_dma_alloc()
1194 rxd->rx_dmamap = NULL; in nge_dma_alloc()
1195 error = bus_dmamap_create(sc->nge_cdata.nge_rx_tag, 0, in nge_dma_alloc()
1196 &rxd->rx_dmamap); in nge_dma_alloc()
1198 device_printf(sc->nge_dev, in nge_dma_alloc()
1199 "failed to create Rx dmamap\n"); in nge_dma_alloc()
1216 if (sc->nge_cdata.nge_tx_ring_tag) { in nge_dma_free()
1217 if (sc->nge_rdata.nge_tx_ring_paddr) in nge_dma_free()
1218 bus_dmamap_unload(sc->nge_cdata.nge_tx_ring_tag, in nge_dma_free()
1219 sc->nge_cdata.nge_tx_ring_map); in nge_dma_free()
1220 if (sc->nge_rdata.nge_tx_ring) in nge_dma_free()
1221 bus_dmamem_free(sc->nge_cdata.nge_tx_ring_tag, in nge_dma_free()
1222 sc->nge_rdata.nge_tx_ring, in nge_dma_free()
1223 sc->nge_cdata.nge_tx_ring_map); in nge_dma_free()
1224 sc->nge_rdata.nge_tx_ring = NULL; in nge_dma_free()
1225 sc->nge_rdata.nge_tx_ring_paddr = 0; in nge_dma_free()
1226 bus_dma_tag_destroy(sc->nge_cdata.nge_tx_ring_tag); in nge_dma_free()
1227 sc->nge_cdata.nge_tx_ring_tag = NULL; in nge_dma_free()
1229 /* Rx ring. */ in nge_dma_free()
1230 if (sc->nge_cdata.nge_rx_ring_tag) { in nge_dma_free()
1231 if (sc->nge_rdata.nge_rx_ring_paddr) in nge_dma_free()
1232 bus_dmamap_unload(sc->nge_cdata.nge_rx_ring_tag, in nge_dma_free()
1233 sc->nge_cdata.nge_rx_ring_map); in nge_dma_free()
1234 if (sc->nge_rdata.nge_rx_ring) in nge_dma_free()
1235 bus_dmamem_free(sc->nge_cdata.nge_rx_ring_tag, in nge_dma_free()
1236 sc->nge_rdata.nge_rx_ring, in nge_dma_free()
1237 sc->nge_cdata.nge_rx_ring_map); in nge_dma_free()
1238 sc->nge_rdata.nge_rx_ring = NULL; in nge_dma_free()
1239 sc->nge_rdata.nge_rx_ring_paddr = 0; in nge_dma_free()
1240 bus_dma_tag_destroy(sc->nge_cdata.nge_rx_ring_tag); in nge_dma_free()
1241 sc->nge_cdata.nge_rx_ring_tag = NULL; in nge_dma_free()
1244 if (sc->nge_cdata.nge_tx_tag) { in nge_dma_free()
1246 txd = &sc->nge_cdata.nge_txdesc[i]; in nge_dma_free()
1247 if (txd->tx_dmamap) { in nge_dma_free()
1248 bus_dmamap_destroy(sc->nge_cdata.nge_tx_tag, in nge_dma_free()
1249 txd->tx_dmamap); in nge_dma_free()
1250 txd->tx_dmamap = NULL; in nge_dma_free()
1253 bus_dma_tag_destroy(sc->nge_cdata.nge_tx_tag); in nge_dma_free()
1254 sc->nge_cdata.nge_tx_tag = NULL; in nge_dma_free()
1256 /* Rx buffers. */ in nge_dma_free()
1257 if (sc->nge_cdata.nge_rx_tag) { in nge_dma_free()
1259 rxd = &sc->nge_cdata.nge_rxdesc[i]; in nge_dma_free()
1260 if (rxd->rx_dmamap) { in nge_dma_free()
1261 bus_dmamap_destroy(sc->nge_cdata.nge_rx_tag, in nge_dma_free()
1262 rxd->rx_dmamap); in nge_dma_free()
1263 rxd->rx_dmamap = NULL; in nge_dma_free()
1266 if (sc->nge_cdata.nge_rx_sparemap) { in nge_dma_free()
1267 bus_dmamap_destroy(sc->nge_cdata.nge_rx_tag, in nge_dma_free()
1268 sc->nge_cdata.nge_rx_sparemap); in nge_dma_free()
1269 sc->nge_cdata.nge_rx_sparemap = 0; in nge_dma_free()
1271 bus_dma_tag_destroy(sc->nge_cdata.nge_rx_tag); in nge_dma_free()
1272 sc->nge_cdata.nge_rx_tag = NULL; in nge_dma_free()
1275 if (sc->nge_cdata.nge_parent_tag) { in nge_dma_free()
1276 bus_dma_tag_destroy(sc->nge_cdata.nge_parent_tag); in nge_dma_free()
1277 sc->nge_cdata.nge_parent_tag = NULL; in nge_dma_free()
1292 sc->nge_cdata.nge_tx_prod = 0; in nge_list_tx_init()
1293 sc->nge_cdata.nge_tx_cons = 0; in nge_list_tx_init()
1294 sc->nge_cdata.nge_tx_cnt = 0; in nge_list_tx_init()
1296 rd = &sc->nge_rdata; in nge_list_tx_init()
1297 bzero(rd->nge_tx_ring, sizeof(struct nge_desc) * NGE_TX_RING_CNT); in nge_list_tx_init()
1299 if (i == NGE_TX_RING_CNT - 1) in nge_list_tx_init()
1303 rd->nge_tx_ring[i].nge_next = htole32(NGE_ADDR_LO(addr)); in nge_list_tx_init()
1304 txd = &sc->nge_cdata.nge_txdesc[i]; in nge_list_tx_init()
1305 txd->tx_m = NULL; in nge_list_tx_init()
1308 bus_dmamap_sync(sc->nge_cdata.nge_tx_ring_tag, in nge_list_tx_init()
1309 sc->nge_cdata.nge_tx_ring_map, in nge_list_tx_init()
1316 * Initialize the RX descriptors and allocate mbufs for them. Note that
1327 sc->nge_cdata.nge_rx_cons = 0; in nge_list_rx_init()
1328 sc->nge_head = sc->nge_tail = NULL; in nge_list_rx_init()
1330 rd = &sc->nge_rdata; in nge_list_rx_init()
1331 bzero(rd->nge_rx_ring, sizeof(struct nge_desc) * NGE_RX_RING_CNT); in nge_list_rx_init()
1335 if (i == NGE_RX_RING_CNT - 1) in nge_list_rx_init()
1339 rd->nge_rx_ring[i].nge_next = htole32(NGE_ADDR_LO(addr)); in nge_list_rx_init()
1342 bus_dmamap_sync(sc->nge_cdata.nge_rx_ring_tag, in nge_list_rx_init()
1343 sc->nge_cdata.nge_rx_ring_map, in nge_list_rx_init()
1354 desc = &sc->nge_rdata.nge_rx_ring[idx]; in nge_discard_rxbuf()
1355 desc->nge_cmdsts = htole32(MCLBYTES - sizeof(uint64_t)); in nge_discard_rxbuf()
1356 desc->nge_extsts = 0; in nge_discard_rxbuf()
1360 * Initialize an RX descriptor and attach an MBUF cluster.
1367 struct mbuf *m; in nge_newbuf() local
1372 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); in nge_newbuf()
1373 if (m == NULL) in nge_newbuf()
1375 m->m_len = m->m_pkthdr.len = MCLBYTES; in nge_newbuf()
1376 m_adj(m, sizeof(uint64_t)); in nge_newbuf()
1378 if (bus_dmamap_load_mbuf_sg(sc->nge_cdata.nge_rx_tag, in nge_newbuf()
1379 sc->nge_cdata.nge_rx_sparemap, m, segs, &nsegs, 0) != 0) { in nge_newbuf()
1380 m_freem(m); in nge_newbuf()
1385 rxd = &sc->nge_cdata.nge_rxdesc[idx]; in nge_newbuf()
1386 if (rxd->rx_m != NULL) { in nge_newbuf()
1387 bus_dmamap_sync(sc->nge_cdata.nge_rx_tag, rxd->rx_dmamap, in nge_newbuf()
1389 bus_dmamap_unload(sc->nge_cdata.nge_rx_tag, rxd->rx_dmamap); in nge_newbuf()
1391 map = rxd->rx_dmamap; in nge_newbuf()
1392 rxd->rx_dmamap = sc->nge_cdata.nge_rx_sparemap; in nge_newbuf()
1393 sc->nge_cdata.nge_rx_sparemap = map; in nge_newbuf()
1394 bus_dmamap_sync(sc->nge_cdata.nge_rx_tag, rxd->rx_dmamap, in nge_newbuf()
1396 rxd->rx_m = m; in nge_newbuf()
1397 desc = &sc->nge_rdata.nge_rx_ring[idx]; in nge_newbuf()
1398 desc->nge_ptr = htole32(NGE_ADDR_LO(segs[0].ds_addr)); in nge_newbuf()
1399 desc->nge_cmdsts = htole32(segs[0].ds_len); in nge_newbuf()
1400 desc->nge_extsts = 0; in nge_newbuf()
1407 nge_fixup_rx(struct mbuf *m) in nge_fixup_rx() argument
1412 src = mtod(m, uint16_t *); in nge_fixup_rx()
1413 dst = src - 1; in nge_fixup_rx()
1415 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) in nge_fixup_rx()
1418 m->m_data -= ETHER_ALIGN; in nge_fixup_rx()
1429 struct mbuf *m; in nge_rxeof() local
1438 ifp = sc->nge_ifp; in nge_rxeof()
1439 cons = sc->nge_cdata.nge_rx_cons; in nge_rxeof()
1442 bus_dmamap_sync(sc->nge_cdata.nge_rx_ring_tag, in nge_rxeof()
1443 sc->nge_cdata.nge_rx_ring_map, in nge_rxeof()
1451 if (sc->rxcycles <= 0) in nge_rxeof()
1453 sc->rxcycles--; in nge_rxeof()
1456 cur_rx = &sc->nge_rdata.nge_rx_ring[cons]; in nge_rxeof()
1457 cmdsts = le32toh(cur_rx->nge_cmdsts); in nge_rxeof()
1458 extsts = le32toh(cur_rx->nge_extsts); in nge_rxeof()
1462 rxd = &sc->nge_cdata.nge_rxdesc[cons]; in nge_rxeof()
1463 m = rxd->rx_m; in nge_rxeof()
1469 if (sc->nge_head != NULL) { in nge_rxeof()
1470 m_freem(sc->nge_head); in nge_rxeof()
1471 sc->nge_head = sc->nge_tail = NULL; in nge_rxeof()
1476 m->m_len = total_len; in nge_rxeof()
1477 if (sc->nge_head == NULL) { in nge_rxeof()
1478 m->m_pkthdr.len = total_len; in nge_rxeof()
1479 sc->nge_head = sc->nge_tail = m; in nge_rxeof()
1481 m->m_flags &= ~M_PKTHDR; in nge_rxeof()
1482 sc->nge_head->m_pkthdr.len += total_len; in nge_rxeof()
1483 sc->nge_tail->m_next = m; in nge_rxeof()
1484 sc->nge_tail = m; in nge_rxeof()
1492 * it should simply get re-used next time this descriptor in nge_rxeof()
1497 total_len >= (ETHER_MIN_LEN - ETHER_CRC_LEN - 4)) { in nge_rxeof()
1499 * Work-around hardware bug, accept runt frames in nge_rxeof()
1506 if (sc->nge_head != NULL) { in nge_rxeof()
1507 m_freem(sc->nge_head); in nge_rxeof()
1508 sc->nge_head = sc->nge_tail = NULL; in nge_rxeof()
1519 if (sc->nge_head != NULL) { in nge_rxeof()
1520 m_freem(sc->nge_head); in nge_rxeof()
1521 sc->nge_head = sc->nge_tail = NULL; in nge_rxeof()
1528 if (sc->nge_head != NULL) { in nge_rxeof()
1529 m->m_len = total_len; in nge_rxeof()
1530 m->m_flags &= ~M_PKTHDR; in nge_rxeof()
1531 sc->nge_tail->m_next = m; in nge_rxeof()
1532 m = sc->nge_head; in nge_rxeof()
1533 m->m_pkthdr.len += total_len; in nge_rxeof()
1534 sc->nge_head = sc->nge_tail = NULL; in nge_rxeof()
1536 m->m_pkthdr.len = m->m_len = total_len; in nge_rxeof()
1541 * on receive buffers. RX buffers must be 64-bit aligned. in nge_rxeof()
1545 * on the non-strict alignment platform. The performance hit in nge_rxeof()
1552 nge_fixup_rx(m); in nge_rxeof()
1554 m->m_pkthdr.rcvif = ifp; in nge_rxeof()
1560 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; in nge_rxeof()
1562 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; in nge_rxeof()
1567 m->m_pkthdr.csum_flags |= in nge_rxeof()
1569 m->m_pkthdr.csum_data = 0xffff; in nge_rxeof()
1579 m->m_pkthdr.ether_vtag = in nge_rxeof()
1581 m->m_flags |= M_VLANTAG; in nge_rxeof()
1584 if_input(ifp, m); in nge_rxeof()
1590 sc->nge_cdata.nge_rx_cons = cons; in nge_rxeof()
1591 bus_dmamap_sync(sc->nge_cdata.nge_rx_ring_tag, in nge_rxeof()
1592 sc->nge_cdata.nge_rx_ring_map, in nge_rxeof()
1612 ifp = sc->nge_ifp; in nge_txeof()
1614 cons = sc->nge_cdata.nge_tx_cons; in nge_txeof()
1615 prod = sc->nge_cdata.nge_tx_prod; in nge_txeof()
1619 bus_dmamap_sync(sc->nge_cdata.nge_tx_ring_tag, in nge_txeof()
1620 sc->nge_cdata.nge_tx_ring_map, in nge_txeof()
1628 cur_tx = &sc->nge_rdata.nge_tx_ring[cons]; in nge_txeof()
1629 cmdsts = le32toh(cur_tx->nge_cmdsts); in nge_txeof()
1632 sc->nge_cdata.nge_tx_cnt--; in nge_txeof()
1637 txd = &sc->nge_cdata.nge_txdesc[cons]; in nge_txeof()
1638 bus_dmamap_sync(sc->nge_cdata.nge_tx_tag, txd->tx_dmamap, in nge_txeof()
1640 bus_dmamap_unload(sc->nge_cdata.nge_tx_tag, txd->tx_dmamap); in nge_txeof()
1651 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!\n", in nge_txeof()
1653 m_freem(txd->tx_m); in nge_txeof()
1654 txd->tx_m = NULL; in nge_txeof()
1657 sc->nge_cdata.nge_tx_cons = cons; in nge_txeof()
1658 if (sc->nge_cdata.nge_tx_cnt == 0) in nge_txeof()
1659 sc->nge_watchdog_timer = 0; in nge_txeof()
1670 mii = device_get_softc(sc->nge_miibus); in nge_tick()
1679 if ((sc->nge_flags & NGE_FLAG_LINK) == 0) in nge_tick()
1680 nge_miibus_statchg(sc->nge_dev); in nge_tick()
1683 callout_reset(&sc->nge_stat_ch, hz, nge_tick, sc); in nge_tick()
1694 ifp = sc->nge_ifp; in nge_stats_update()
1696 stats->rx_pkts_errs = in nge_stats_update()
1698 stats->rx_crc_errs = in nge_stats_update()
1700 stats->rx_fifo_oflows = in nge_stats_update()
1702 stats->rx_align_errs = in nge_stats_update()
1704 stats->rx_sym_errs = in nge_stats_update()
1706 stats->rx_pkts_jumbos = in nge_stats_update()
1708 stats->rx_len_errs = in nge_stats_update()
1710 stats->rx_unctl_frames = in nge_stats_update()
1712 stats->rx_pause = in nge_stats_update()
1714 stats->tx_pause = in nge_stats_update()
1716 stats->tx_seq_errs = in nge_stats_update()
1720 * Since we've accept errored frames exclude Rx length errors. in nge_stats_update()
1723 stats->rx_pkts_errs + stats->rx_crc_errs + in nge_stats_update()
1724 stats->rx_fifo_oflows + stats->rx_sym_errs); in nge_stats_update()
1726 nstats = &sc->nge_stats; in nge_stats_update()
1727 nstats->rx_pkts_errs += stats->rx_pkts_errs; in nge_stats_update()
1728 nstats->rx_crc_errs += stats->rx_crc_errs; in nge_stats_update()
1729 nstats->rx_fifo_oflows += stats->rx_fifo_oflows; in nge_stats_update()
1730 nstats->rx_align_errs += stats->rx_align_errs; in nge_stats_update()
1731 nstats->rx_sym_errs += stats->rx_sym_errs; in nge_stats_update()
1732 nstats->rx_pkts_jumbos += stats->rx_pkts_jumbos; in nge_stats_update()
1733 nstats->rx_len_errs += stats->rx_len_errs; in nge_stats_update()
1734 nstats->rx_unctl_frames += stats->rx_unctl_frames; in nge_stats_update()
1735 nstats->rx_pause += stats->rx_pause; in nge_stats_update()
1736 nstats->tx_pause += stats->tx_pause; in nge_stats_update()
1737 nstats->tx_seq_errs += stats->tx_seq_errs; in nge_stats_update()
1764 sc->rxcycles = count; in nge_poll()
1770 if (sc->rxcycles > 0 || cmd == POLL_AND_CHECK_STATUS) { in nge_poll()
1800 ifp = sc->nge_ifp; in nge_intr()
1804 if ((sc->nge_flags & NGE_FLAG_SUSPENDED) != 0) in nge_intr()
1822 if ((sc->nge_flags & NGE_FLAG_TBI) != 0) in nge_intr()
1847 /* Re-enable interrupts. */ in nge_intr()
1854 if ((sc->nge_flags & NGE_FLAG_TBI) != 0) in nge_intr()
1871 struct mbuf *m; in nge_encap() local
1878 m = *m_head; in nge_encap()
1879 prod = sc->nge_cdata.nge_tx_prod; in nge_encap()
1880 txd = &sc->nge_cdata.nge_txdesc[prod]; in nge_encap()
1882 map = txd->tx_dmamap; in nge_encap()
1883 error = bus_dmamap_load_mbuf_sg(sc->nge_cdata.nge_tx_tag, map, in nge_encap()
1886 m = m_collapse(*m_head, M_NOWAIT, NGE_MAXTXSEGS); in nge_encap()
1887 if (m == NULL) { in nge_encap()
1892 *m_head = m; in nge_encap()
1893 error = bus_dmamap_load_mbuf_sg(sc->nge_cdata.nge_tx_tag, in nge_encap()
1909 if (sc->nge_cdata.nge_tx_cnt + nsegs >= (NGE_TX_RING_CNT - 1)) { in nge_encap()
1910 bus_dmamap_unload(sc->nge_cdata.nge_tx_tag, map); in nge_encap()
1914 bus_dmamap_sync(sc->nge_cdata.nge_tx_tag, map, BUS_DMASYNC_PREWRITE); in nge_encap()
1918 desc = &sc->nge_rdata.nge_tx_ring[prod]; in nge_encap()
1919 desc->nge_ptr = htole32(NGE_ADDR_LO(txsegs[i].ds_addr)); in nge_encap()
1921 desc->nge_cmdsts = htole32(txsegs[i].ds_len | in nge_encap()
1924 desc->nge_cmdsts = htole32(txsegs[i].ds_len | in nge_encap()
1926 desc->nge_extsts = 0; in nge_encap()
1927 sc->nge_cdata.nge_tx_cnt++; in nge_encap()
1931 sc->nge_cdata.nge_tx_prod = prod; in nge_encap()
1933 prod = (prod + NGE_TX_RING_CNT - 1) % NGE_TX_RING_CNT; in nge_encap()
1934 desc = &sc->nge_rdata.nge_tx_ring[prod]; in nge_encap()
1936 if ((m->m_flags & M_VLANTAG) != 0) in nge_encap()
1937 desc->nge_extsts |= htole32(NGE_TXEXTSTS_VLANPKT | in nge_encap()
1938 bswap16(m->m_pkthdr.ether_vtag)); in nge_encap()
1940 desc->nge_cmdsts &= htole32(~NGE_CMDSTS_MORE); in nge_encap()
1943 desc = &sc->nge_rdata.nge_tx_ring[si]; in nge_encap()
1944 if ((m->m_pkthdr.csum_flags & NGE_CSUM_FEATURES) != 0) { in nge_encap()
1945 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) in nge_encap()
1946 desc->nge_extsts |= htole32(NGE_TXEXTSTS_IPCSUM); in nge_encap()
1947 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) in nge_encap()
1948 desc->nge_extsts |= htole32(NGE_TXEXTSTS_TCPCSUM); in nge_encap()
1949 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) in nge_encap()
1950 desc->nge_extsts |= htole32(NGE_TXEXTSTS_UDPCSUM); in nge_encap()
1953 desc->nge_cmdsts |= htole32(NGE_CMDSTS_OWN); in nge_encap()
1955 txd = &sc->nge_cdata.nge_txdesc[prod]; in nge_encap()
1956 map = txd_last->tx_dmamap; in nge_encap()
1957 txd_last->tx_dmamap = txd->tx_dmamap; in nge_encap()
1958 txd->tx_dmamap = map; in nge_encap()
1959 txd->tx_m = m; in nge_encap()
1994 IFF_DRV_RUNNING || (sc->nge_flags & NGE_FLAG_LINK) == 0) in nge_start_locked()
1998 sc->nge_cdata.nge_tx_cnt < NGE_TX_RING_CNT - 2; ) { in nge_start_locked()
2024 bus_dmamap_sync(sc->nge_cdata.nge_tx_ring_tag, in nge_start_locked()
2025 sc->nge_cdata.nge_tx_ring_map, in nge_start_locked()
2031 sc->nge_watchdog_timer = 5; in nge_start_locked()
2048 if_t ifp = sc->nge_ifp; in nge_init_locked()
2059 * Cancel pending I/O and free all RX/TX buffers. in nge_init_locked()
2066 /* Disable Rx filter prior to programming Rx filter. */ in nge_init_locked()
2070 mii = device_get_softc(sc->nge_miibus); in nge_init_locked()
2073 eaddr = if_getlladdr(sc->nge_ifp); in nge_init_locked()
2081 /* Init circular RX list. */ in nge_init_locked()
2083 device_printf(sc->nge_dev, "initialization failed: no " in nge_init_locked()
2084 "memory for rx buffers\n"); in nge_init_locked()
2094 /* Set Rx filter. */ in nge_init_locked()
2102 * Rx stat FIFO hi-threshold : 2 or more packets in nge_init_locked()
2103 * Rx stat FIFO lo-threshold : less than 2 packets in nge_init_locked()
2104 * Rx data FIFO hi-threshold : 2K or more bytes in nge_init_locked()
2105 * Rx data FIFO lo-threshold : less than 2K bytes in nge_init_locked()
2106 * pause time : (512ns * 0xffff) -> 33.55ms in nge_init_locked()
2118 * Load the address of the RX and TX lists. in nge_init_locked()
2121 NGE_ADDR_HI(sc->nge_rdata.nge_rx_ring_paddr)); in nge_init_locked()
2123 NGE_ADDR_LO(sc->nge_rdata.nge_rx_ring_paddr)); in nge_init_locked()
2125 NGE_ADDR_HI(sc->nge_rdata.nge_tx_ring_paddr)); in nge_init_locked()
2127 NGE_ADDR_LO(sc->nge_rdata.nge_tx_ring_paddr)); in nge_init_locked()
2129 /* Set RX configuration. */ in nge_init_locked()
2143 * field in the RX descriptors. in nge_init_locked()
2153 * Enable TX IPv4 checksumming on a per-packet basis. in nge_init_locked()
2158 * Tell the chip to insert VLAN tags on a per-packet basis as in nge_init_locked()
2178 CSR_WRITE_4(sc, NGE_IHR, sc->nge_int_holdoff); in nge_init_locked()
2203 sc->nge_flags &= ~NGE_FLAG_LINK; in nge_init_locked()
2206 sc->nge_watchdog_timer = 0; in nge_init_locked()
2207 callout_reset(&sc->nge_stat_ch, hz, nge_tick, sc); in nge_init_locked()
2226 mii = device_get_softc(sc->nge_miibus); in nge_mediachange()
2227 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) in nge_mediachange()
2246 mii = device_get_softc(sc->nge_miibus); in nge_mediastatus()
2248 ifmr->ifm_active = mii->mii_media_active; in nge_mediastatus()
2249 ifmr->ifm_status = mii->mii_media_status; in nge_mediastatus()
2263 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > NGE_JUMBO_MTU) in nge_ioctl()
2267 if_setmtu(ifp, ifr->ifr_mtu); in nge_ioctl()
2273 if (ifr->ifr_mtu >= 8152) { in nge_ioctl()
2288 if ((if_getflags(ifp) ^ sc->nge_if_flags) & in nge_ioctl()
2292 if ((sc->nge_flags & NGE_FLAG_DETACH) == 0) in nge_ioctl()
2299 sc->nge_if_flags = if_getflags(ifp); in nge_ioctl()
2312 mii = device_get_softc(sc->nge_miibus); in nge_ioctl()
2313 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); in nge_ioctl()
2317 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); in nge_ioctl()
2403 if (sc->nge_watchdog_timer == 0 || --sc->nge_watchdog_timer) in nge_watchdog()
2406 ifp = sc->nge_ifp; in nge_watchdog()
2445 * RX and TX lists.
2456 ifp = sc->nge_ifp; in nge_stop()
2459 sc->nge_flags &= ~NGE_FLAG_LINK; in nge_stop()
2460 callout_stop(&sc->nge_stat_ch); in nge_stop()
2461 sc->nge_watchdog_timer = 0; in nge_stop()
2466 device_printf(sc->nge_dev, in nge_stop()
2467 "%s: unable to stop Tx/Rx MAC\n", __func__); in nge_stop()
2473 if (sc->nge_head != NULL) { in nge_stop()
2474 m_freem(sc->nge_head); in nge_stop()
2475 sc->nge_head = sc->nge_tail = NULL; in nge_stop()
2479 * Free RX and TX mbufs still in the queues. in nge_stop()
2482 rxd = &sc->nge_cdata.nge_rxdesc[i]; in nge_stop()
2483 if (rxd->rx_m != NULL) { in nge_stop()
2484 bus_dmamap_sync(sc->nge_cdata.nge_rx_tag, in nge_stop()
2485 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); in nge_stop()
2486 bus_dmamap_unload(sc->nge_cdata.nge_rx_tag, in nge_stop()
2487 rxd->rx_dmamap); in nge_stop()
2488 m_freem(rxd->rx_m); in nge_stop()
2489 rxd->rx_m = NULL; in nge_stop()
2493 txd = &sc->nge_cdata.nge_txdesc[i]; in nge_stop()
2494 if (txd->tx_m != NULL) { in nge_stop()
2495 bus_dmamap_sync(sc->nge_cdata.nge_tx_tag, in nge_stop()
2496 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); in nge_stop()
2497 bus_dmamap_unload(sc->nge_cdata.nge_tx_tag, in nge_stop()
2498 txd->tx_dmamap); in nge_stop()
2499 m_freem(txd->tx_m); in nge_stop()
2500 txd->tx_m = NULL; in nge_stop()
2516 if (!pci_has_pm(sc->nge_dev)) in nge_wol()
2519 ifp = sc->nge_ifp; in nge_wol()
2526 device_printf(sc->nge_dev, in nge_wol()
2527 "%s: unable to stop Tx/Rx MAC\n", __func__); in nge_wol()
2529 * Make sure wake frames will be buffered in the Rx FIFO. in nge_wol()
2530 * (i.e. Silent Rx mode.) in nge_wol()
2536 /* Enable Rx again. */ in nge_wol()
2558 pci_enable_pme(sc->nge_dev); in nge_wol()
2582 sc->nge_flags |= NGE_FLAG_SUSPENDED; in nge_suspend()
2597 ifp = sc->nge_ifp; in nge_resume()
2603 sc->nge_flags &= ~NGE_FLAG_SUSPENDED; in nge_resume()
2621 ctx = device_get_sysctl_ctx(sc->nge_dev); in nge_sysctl_node()
2622 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->nge_dev)); in nge_sysctl_node()
2624 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &sc->nge_int_holdoff, in nge_sysctl_node()
2627 sc->nge_int_holdoff = NGE_INT_HOLDOFF_DEFAULT; in nge_sysctl_node()
2628 error = resource_int_value(device_get_name(sc->nge_dev), in nge_sysctl_node()
2629 device_get_unit(sc->nge_dev), "int_holdoff", &sc->nge_int_holdoff); in nge_sysctl_node()
2631 if (sc->nge_int_holdoff < NGE_INT_HOLDOFF_MIN || in nge_sysctl_node()
2632 sc->nge_int_holdoff > NGE_INT_HOLDOFF_MAX ) { in nge_sysctl_node()
2633 device_printf(sc->nge_dev, in nge_sysctl_node()
2638 sc->nge_int_holdoff = NGE_INT_HOLDOFF_DEFAULT; in nge_sysctl_node()
2642 stats = &sc->nge_stats; in nge_sysctl_node()
2647 /* Rx statistics. */ in nge_sysctl_node()
2648 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", in nge_sysctl_node()
2649 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Rx MAC statistics"); in nge_sysctl_node()
2652 &stats->rx_pkts_errs, in nge_sysctl_node()
2655 &stats->rx_crc_errs, "CRC errors"); in nge_sysctl_node()
2657 &stats->rx_fifo_oflows, "FIFO overflows"); in nge_sysctl_node()
2659 &stats->rx_align_errs, "Frame alignment errors"); in nge_sysctl_node()
2661 &stats->rx_sym_errs, "One or more symbol errors"); in nge_sysctl_node()
2663 &stats->rx_pkts_jumbos, in nge_sysctl_node()
2666 &stats->rx_len_errs, "In Range Length errors"); in nge_sysctl_node()
2668 &stats->rx_unctl_frames, "Control frames with unsupported opcode"); in nge_sysctl_node()
2670 &stats->rx_pause, "Pause frames"); in nge_sysctl_node()
2677 &stats->tx_pause, "Pause frames"); in nge_sysctl_node()
2679 &stats->tx_seq_errs, in nge_sysctl_node()
2694 if (error != 0 || req->newptr == NULL) in sysctl_int_range()