Lines Matching +full:rx +full:- +full:input +full:- +full:m
1 /*-
2 * SPDX-License-Identifier: BSD-4-Clause
19 * 4. Neither the name of the author nor the names of any co-contributors
43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
45 * frames, highly configurable RX filtering, and 16 RX and TX queues
46 * (which, along with RX filter rules, can be used for QOS applications).
48 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
55 * The BCM5701 is a single-chip solution incorporating both the BCM5700
60 * brand name, which is functionally similar but lacks PCI-X support.
63 * and the use of the mini RX ring is disabled. This seems to imply
65 * result, this driver does not implement any support for the mini RX
119 #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
363 #define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO)
364 #define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
365 #define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS)
366 #define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
367 #define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS)
368 #define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5755_PLUS)
369 #define BGE_IS_5717_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5717_PLUS)
370 #define BGE_IS_57765_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_57765_PLUS)
491 * The BGE_REGISTER_DEBUG option is only for low-level debugging. It may
535 nitems(bge_devs) - 1);
557 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 && in bge_readmem_ind()
561 dev = sc->bge_dev; in bge_readmem_ind()
574 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 && in bge_writemem_ind()
578 dev = sc->bge_dev; in bge_writemem_ind()
591 dev = sc->bge_dev; in bge_readreg_ind()
603 dev = sc->bge_dev; in bge_writereg_ind()
618 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) in bge_writembx()
619 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI; in bge_writembx()
622 if ((sc->bge_flags & BGE_FLAG_MBOX_REORDER) != 0) in bge_writembx()
635 if (sc->bge_asicrev == BGE_ASICREV_BCM5761) in bge_ape_lock_init()
650 if (sc->bge_func_addr == 0) in bge_ape_lock_init()
653 bit = (1 << sc->bge_func_addr); in bge_ape_lock_init()
659 switch (sc->bge_func_addr) { in bge_ape_lock_init()
661 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY0; in bge_ape_lock_init()
664 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY1; in bge_ape_lock_init()
667 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY2; in bge_ape_lock_init()
670 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY3; in bge_ape_lock_init()
673 device_printf(sc->bge_dev, in bge_ape_lock_init()
690 sc->bge_mfw_flags &= ~ BGE_MFW_ON_APE; in bge_ape_read_fw_ver()
697 device_printf(sc->bge_dev, "APE signature found " in bge_ape_read_fw_ver()
702 sc->bge_mfw_flags |= BGE_MFW_ON_APE; in bge_ape_read_fw_ver()
708 sc->bge_mfw_flags |= BGE_MFW_TYPE_NCSI; in bge_ape_read_fw_ver()
711 sc->bge_mfw_flags |= BGE_MFW_TYPE_DASH; in bge_ape_read_fw_ver()
717 device_printf(sc->bge_dev, "APE FW version: %s v%d.%d.%d.%d\n", in bge_ape_read_fw_ver()
731 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) in bge_ape_lock()
735 if (sc->bge_asicrev == BGE_ASICREV_BCM5761) { in bge_ape_lock()
748 if (sc->bge_asicrev == BGE_ASICREV_BCM5761) in bge_ape_lock()
750 if (sc->bge_func_addr == 0) in bge_ape_lock()
753 bit = (1 << sc->bge_func_addr); in bge_ape_lock()
757 if (sc->bge_func_addr == 0) in bge_ape_lock()
760 bit = (1 << sc->bge_func_addr); in bge_ape_lock()
764 if (sc->bge_func_addr == 0) in bge_ape_lock()
767 bit = (1 << sc->bge_func_addr); in bge_ape_lock()
793 device_printf(sc->bge_dev, "APE lock %d request failed! " in bge_ape_lock()
811 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) in bge_ape_unlock()
814 if (sc->bge_asicrev == BGE_ASICREV_BCM5761) in bge_ape_unlock()
823 if (sc->bge_asicrev == BGE_ASICREV_BCM5761) in bge_ape_unlock()
825 if (sc->bge_func_addr == 0) in bge_ape_unlock()
828 bit = (1 << sc->bge_func_addr); in bge_ape_unlock()
831 if (sc->bge_func_addr == 0) in bge_ape_unlock()
834 bit = (1 << sc->bge_func_addr); in bge_ape_unlock()
837 if (sc->bge_func_addr == 0) in bge_ape_unlock()
840 bit = (1 << sc->bge_func_addr); in bge_ape_unlock()
865 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) in bge_ape_send_event()
869 for (i = 10; i > 0; i--) { in bge_ape_send_event()
884 device_printf(sc->bge_dev, "APE event 0x%08x send timed out\n", in bge_ape_send_event()
893 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) in bge_ape_driver_state_change()
953 ctx->bge_busaddr = segs->ds_addr; in bge_dma_map_addr()
987 if_printf(sc->bge_ifp, "nvram read timed out\n"); in bge_nvram_getbyte()
1015 if (sc->bge_asicrev != BGE_ASICREV_BCM5906) in bge_read_nvram()
1062 device_printf(sc->bge_dev, "EEPROM read timed out\n"); in bge_eeprom_getbyte()
1102 if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0) in bge_miibus_readreg()
1106 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) { in bge_miibus_readreg()
1108 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL); in bge_miibus_readreg()
1127 device_printf(sc->bge_dev, in bge_miibus_readreg()
1134 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) { in bge_miibus_readreg()
1135 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode); in bge_miibus_readreg()
1139 bge_ape_unlock(sc, sc->bge_phy_ape_lock); in bge_miibus_readreg()
1155 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 && in bge_miibus_writereg()
1159 if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0) in bge_miibus_writereg()
1163 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) { in bge_miibus_writereg()
1165 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL); in bge_miibus_writereg()
1182 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) { in bge_miibus_writereg()
1183 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode); in bge_miibus_writereg()
1187 bge_ape_unlock(sc, sc->bge_phy_ape_lock); in bge_miibus_writereg()
1190 device_printf(sc->bge_dev, in bge_miibus_writereg()
1205 if ((if_getdrvflags(sc->bge_ifp) & IFF_DRV_RUNNING) == 0) in bge_miibus_statchg()
1207 mii = device_get_softc(sc->bge_miibus); in bge_miibus_statchg()
1209 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == in bge_miibus_statchg()
1211 switch (IFM_SUBTYPE(mii->mii_media_active)) { in bge_miibus_statchg()
1214 sc->bge_link = 1; in bge_miibus_statchg()
1219 if (sc->bge_asicrev != BGE_ASICREV_BCM5906) in bge_miibus_statchg()
1220 sc->bge_link = 1; in bge_miibus_statchg()
1222 sc->bge_link = 0; in bge_miibus_statchg()
1225 sc->bge_link = 0; in bge_miibus_statchg()
1229 sc->bge_link = 0; in bge_miibus_statchg()
1230 if (sc->bge_link == 0) in bge_miibus_statchg()
1245 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || in bge_miibus_statchg()
1246 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) in bge_miibus_statchg()
1254 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { in bge_miibus_statchg()
1255 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) in bge_miibus_statchg()
1257 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) in bge_miibus_statchg()
1274 struct mbuf *m; in bge_newbuf_std() local
1280 if (sc->bge_flags & BGE_FLAG_JUMBO_STD && in bge_newbuf_std()
1281 (if_getmtu(sc->bge_ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN + in bge_newbuf_std()
1282 ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN))) { in bge_newbuf_std()
1283 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES); in bge_newbuf_std()
1284 if (m == NULL) in bge_newbuf_std()
1286 m->m_len = m->m_pkthdr.len = MJUM9BYTES; in bge_newbuf_std()
1288 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); in bge_newbuf_std()
1289 if (m == NULL) in bge_newbuf_std()
1291 m->m_len = m->m_pkthdr.len = MCLBYTES; in bge_newbuf_std()
1293 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) in bge_newbuf_std()
1294 m_adj(m, ETHER_ALIGN); in bge_newbuf_std()
1296 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_rx_mtag, in bge_newbuf_std()
1297 sc->bge_cdata.bge_rx_std_sparemap, m, segs, &nsegs, 0); in bge_newbuf_std()
1299 m_freem(m); in bge_newbuf_std()
1302 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { in bge_newbuf_std()
1303 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag, in bge_newbuf_std()
1304 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_POSTREAD); in bge_newbuf_std()
1305 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag, in bge_newbuf_std()
1306 sc->bge_cdata.bge_rx_std_dmamap[i]); in bge_newbuf_std()
1308 map = sc->bge_cdata.bge_rx_std_dmamap[i]; in bge_newbuf_std()
1309 sc->bge_cdata.bge_rx_std_dmamap[i] = sc->bge_cdata.bge_rx_std_sparemap; in bge_newbuf_std()
1310 sc->bge_cdata.bge_rx_std_sparemap = map; in bge_newbuf_std()
1311 sc->bge_cdata.bge_rx_std_chain[i] = m; in bge_newbuf_std()
1312 sc->bge_cdata.bge_rx_std_seglen[i] = segs[0].ds_len; in bge_newbuf_std()
1313 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std]; in bge_newbuf_std()
1314 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr); in bge_newbuf_std()
1315 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr); in bge_newbuf_std()
1316 r->bge_flags = BGE_RXBDFLAG_END; in bge_newbuf_std()
1317 r->bge_len = segs[0].ds_len; in bge_newbuf_std()
1318 r->bge_idx = i; in bge_newbuf_std()
1320 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag, in bge_newbuf_std()
1321 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_PREREAD); in bge_newbuf_std()
1336 struct mbuf *m; in bge_newbuf_jumbo() local
1339 MGETHDR(m, M_NOWAIT, MT_DATA); in bge_newbuf_jumbo()
1340 if (m == NULL) in bge_newbuf_jumbo()
1343 if (m_cljget(m, M_NOWAIT, MJUM9BYTES) == NULL) { in bge_newbuf_jumbo()
1344 m_freem(m); in bge_newbuf_jumbo()
1347 m->m_len = m->m_pkthdr.len = MJUM9BYTES; in bge_newbuf_jumbo()
1348 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) in bge_newbuf_jumbo()
1349 m_adj(m, ETHER_ALIGN); in bge_newbuf_jumbo()
1351 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo, in bge_newbuf_jumbo()
1352 sc->bge_cdata.bge_rx_jumbo_sparemap, m, segs, &nsegs, 0); in bge_newbuf_jumbo()
1354 m_freem(m); in bge_newbuf_jumbo()
1358 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { in bge_newbuf_jumbo()
1359 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, in bge_newbuf_jumbo()
1360 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_POSTREAD); in bge_newbuf_jumbo()
1361 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo, in bge_newbuf_jumbo()
1362 sc->bge_cdata.bge_rx_jumbo_dmamap[i]); in bge_newbuf_jumbo()
1364 map = sc->bge_cdata.bge_rx_jumbo_dmamap[i]; in bge_newbuf_jumbo()
1365 sc->bge_cdata.bge_rx_jumbo_dmamap[i] = in bge_newbuf_jumbo()
1366 sc->bge_cdata.bge_rx_jumbo_sparemap; in bge_newbuf_jumbo()
1367 sc->bge_cdata.bge_rx_jumbo_sparemap = map; in bge_newbuf_jumbo()
1368 sc->bge_cdata.bge_rx_jumbo_chain[i] = m; in bge_newbuf_jumbo()
1369 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = 0; in bge_newbuf_jumbo()
1370 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = 0; in bge_newbuf_jumbo()
1371 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = 0; in bge_newbuf_jumbo()
1372 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = 0; in bge_newbuf_jumbo()
1375 * Fill in the extended RX buffer descriptor. in bge_newbuf_jumbo()
1377 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo]; in bge_newbuf_jumbo()
1378 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END; in bge_newbuf_jumbo()
1379 r->bge_idx = i; in bge_newbuf_jumbo()
1380 r->bge_len3 = r->bge_len2 = r->bge_len1 = 0; in bge_newbuf_jumbo()
1383 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr); in bge_newbuf_jumbo()
1384 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr); in bge_newbuf_jumbo()
1385 r->bge_len3 = segs[3].ds_len; in bge_newbuf_jumbo()
1386 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = segs[3].ds_len; in bge_newbuf_jumbo()
1388 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr); in bge_newbuf_jumbo()
1389 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr); in bge_newbuf_jumbo()
1390 r->bge_len2 = segs[2].ds_len; in bge_newbuf_jumbo()
1391 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = segs[2].ds_len; in bge_newbuf_jumbo()
1393 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr); in bge_newbuf_jumbo()
1394 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr); in bge_newbuf_jumbo()
1395 r->bge_len1 = segs[1].ds_len; in bge_newbuf_jumbo()
1396 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = segs[1].ds_len; in bge_newbuf_jumbo()
1398 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr); in bge_newbuf_jumbo()
1399 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr); in bge_newbuf_jumbo()
1400 r->bge_len0 = segs[0].ds_len; in bge_newbuf_jumbo()
1401 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = segs[0].ds_len; in bge_newbuf_jumbo()
1407 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, in bge_newbuf_jumbo()
1408 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_PREREAD); in bge_newbuf_jumbo()
1418 bzero(sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ); in bge_init_rx_ring_std()
1419 sc->bge_std = 0; in bge_init_rx_ring_std()
1423 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); in bge_init_rx_ring_std()
1426 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, in bge_init_rx_ring_std()
1427 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE); in bge_init_rx_ring_std()
1429 sc->bge_std = 0; in bge_init_rx_ring_std()
1430 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, BGE_STD_RX_RING_CNT - 1); in bge_init_rx_ring_std()
1441 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { in bge_free_rx_ring_std()
1442 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag, in bge_free_rx_ring_std()
1443 sc->bge_cdata.bge_rx_std_dmamap[i], in bge_free_rx_ring_std()
1445 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag, in bge_free_rx_ring_std()
1446 sc->bge_cdata.bge_rx_std_dmamap[i]); in bge_free_rx_ring_std()
1447 m_freem(sc->bge_cdata.bge_rx_std_chain[i]); in bge_free_rx_ring_std()
1448 sc->bge_cdata.bge_rx_std_chain[i] = NULL; in bge_free_rx_ring_std()
1450 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i], in bge_free_rx_ring_std()
1461 bzero(sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ); in bge_init_rx_ring_jumbo()
1462 sc->bge_jumbo = 0; in bge_init_rx_ring_jumbo()
1466 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); in bge_init_rx_ring_jumbo()
1469 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, in bge_init_rx_ring_jumbo()
1470 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE); in bge_init_rx_ring_jumbo()
1472 sc->bge_jumbo = 0; in bge_init_rx_ring_jumbo()
1475 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; in bge_init_rx_ring_jumbo()
1476 rcb->bge_maxlen_flags = in bge_init_rx_ring_jumbo()
1478 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); in bge_init_rx_ring_jumbo()
1480 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, BGE_JUMBO_RX_RING_CNT - 1); in bge_init_rx_ring_jumbo()
1491 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { in bge_free_rx_ring_jumbo()
1492 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, in bge_free_rx_ring_jumbo()
1493 sc->bge_cdata.bge_rx_jumbo_dmamap[i], in bge_free_rx_ring_jumbo()
1495 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo, in bge_free_rx_ring_jumbo()
1496 sc->bge_cdata.bge_rx_jumbo_dmamap[i]); in bge_free_rx_ring_jumbo()
1497 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); in bge_free_rx_ring_jumbo()
1498 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; in bge_free_rx_ring_jumbo()
1500 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i], in bge_free_rx_ring_jumbo()
1510 if (sc->bge_ldata.bge_tx_ring == NULL) in bge_free_tx_ring()
1514 if (sc->bge_cdata.bge_tx_chain[i] != NULL) { in bge_free_tx_ring()
1515 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, in bge_free_tx_ring()
1516 sc->bge_cdata.bge_tx_dmamap[i], in bge_free_tx_ring()
1518 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, in bge_free_tx_ring()
1519 sc->bge_cdata.bge_tx_dmamap[i]); in bge_free_tx_ring()
1520 m_freem(sc->bge_cdata.bge_tx_chain[i]); in bge_free_tx_ring()
1521 sc->bge_cdata.bge_tx_chain[i] = NULL; in bge_free_tx_ring()
1523 bzero((char *)&sc->bge_ldata.bge_tx_ring[i], in bge_free_tx_ring()
1531 sc->bge_txcnt = 0; in bge_init_tx_ring()
1532 sc->bge_tx_saved_considx = 0; in bge_init_tx_ring()
1534 bzero(sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ); in bge_init_tx_ring()
1535 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag, in bge_init_tx_ring()
1536 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE); in bge_init_tx_ring()
1538 /* Initialize transmit producer index for host-memory send ring. */ in bge_init_tx_ring()
1539 sc->bge_tx_prodidx = 0; in bge_init_tx_ring()
1540 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); in bge_init_tx_ring()
1543 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) in bge_init_tx_ring()
1544 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); in bge_init_tx_ring()
1546 /* NIC-memory send ring not used; initialize to zero. */ in bge_init_tx_ring()
1549 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) in bge_init_tx_ring()
1562 ifp = sc->bge_ifp; in bge_setpromisc()
1592 ifp = sc->bge_ifp; in bge_setmulti()
1617 ifp = sc->bge_ifp; in bge_setvlan()
1633 if (sc->bge_asf_mode) in bge_sig_pre_reset()
1636 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { in bge_sig_pre_reset()
1661 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { in bge_sig_post_reset()
1682 if (sc->bge_asf_mode) { in bge_sig_legacy()
1701 if (sc->bge_asf_mode) { in bge_stop_fw()
1738 /* Set endianness before we access any non-PCI registers. */ in bge_chipinit()
1740 if (sc->bge_flags & BGE_FLAG_TAGGED_STATUS) in bge_chipinit()
1742 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, misc_ctl, 4); in bge_chipinit()
1756 if (sc->bge_chiprev == BGE_CHIPREV_5704_BX) { in bge_chipinit()
1758 * Fix data corruption caused by non-qword write with WB. in bge_chipinit()
1762 val = pci_read_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, 2); in bge_chipinit()
1764 pci_write_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, val, 2); in bge_chipinit()
1767 if (sc->bge_asicrev == BGE_ASICREV_BCM57765 || in bge_chipinit()
1768 sc->bge_asicrev == BGE_ASICREV_BCM57766) { in bge_chipinit()
1774 if (sc->bge_chiprev != BGE_CHIPREV_57765_AX) { in bge_chipinit()
1786 if (sc->bge_flags & BGE_FLAG_PCIE) { in bge_chipinit()
1787 if (sc->bge_mps >= 256) in bge_chipinit()
1791 } else if (sc->bge_flags & BGE_FLAG_PCIX) { in bge_chipinit()
1796 dma_rw_ctl |= (sc->bge_asicrev == BGE_ASICREV_BCM5780) ? in bge_chipinit()
1799 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5703) { in bge_chipinit()
1803 * memory read byte count of the PCI-X command in bge_chipinit()
1808 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) { in bge_chipinit()
1818 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || in bge_chipinit()
1819 sc->bge_asicrev == BGE_ASICREV_BCM5704) { in bge_chipinit()
1828 /* Set PCI-X DMA write workaround. */ in bge_chipinit()
1836 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && in bge_chipinit()
1837 sc->bge_asicrev != BGE_ASICREV_BCM5750) in bge_chipinit()
1840 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 || in bge_chipinit()
1841 sc->bge_asicrev == BGE_ASICREV_BCM5701) in bge_chipinit()
1844 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || in bge_chipinit()
1845 sc->bge_asicrev == BGE_ASICREV_BCM5704) in bge_chipinit()
1849 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0) in bge_chipinit()
1857 sc->bge_asicrev != BGE_ASICREV_BCM5717 && in bge_chipinit()
1858 sc->bge_asicrev != BGE_ASICREV_BCM5762) in bge_chipinit()
1861 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4); in bge_chipinit()
1867 if (sc->bge_asicrev == BGE_ASICREV_BCM5720 || in bge_chipinit()
1868 sc->bge_asicrev == BGE_ASICREV_BCM5762) { in bge_chipinit()
1869 /* Retain Host-2-BMC settings written by APE firmware. */ in bge_chipinit()
1880 * 64-bit DMA reads, which can be terminated early and then in bge_chipinit()
1881 * completed later as 32-bit accesses, in combination with in bge_chipinit()
1884 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 && in bge_chipinit()
1885 sc->bge_chipid == BGE_CHIPID_BCM5701_B5) in bge_chipinit()
1891 if (sc->bge_asf_mode & ASF_STACKUP) in bge_chipinit()
1900 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4); in bge_chipinit()
1906 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { in bge_chipinit()
1931 * allow us to set up the TX send ring RCBs and the RX return in bge_blockinit()
1941 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) in bge_blockinit()
1955 if (if_getmtu(sc->bge_ifp) > ETHERMTU) { in bge_blockinit()
1966 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { in bge_blockinit()
1984 * round-robin instead of priority based for BCM5719. When in bge_blockinit()
1988 if (sc->bge_asicrev == BGE_ASICREV_BCM5719) in bge_blockinit()
2000 device_printf(sc->bge_dev, "buffer manager failed to start\n"); in bge_blockinit()
2004 /* Enable flow-through queues */ in bge_blockinit()
2016 device_printf(sc->bge_dev, "flow-through queue init failed\n"); in bge_blockinit()
2024 * - This ring is used to feed receive buffers for "standard" in bge_blockinit()
2028 * - This ring is used to feed receive buffers for jumbo sized in bge_blockinit()
2033 * - This ring is used to feed receive buffers for "mini" in bge_blockinit()
2035 * - This feature required external memory for the controller in bge_blockinit()
2040 * - After the controller has placed an incoming frame into a in bge_blockinit()
2047 * - This ring is used for outgoing frames. Many versions of in bge_blockinit()
2052 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb; in bge_blockinit()
2053 rcb->bge_hostaddr.bge_addr_lo = in bge_blockinit()
2054 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr); in bge_blockinit()
2055 rcb->bge_hostaddr.bge_addr_hi = in bge_blockinit()
2056 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr); in bge_blockinit()
2057 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, in bge_blockinit()
2058 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD); in bge_blockinit()
2061 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32) in bge_blockinit()
2062 * Bits 15-2 : Maximum RX frame size in bge_blockinit()
2066 rcb->bge_maxlen_flags = in bge_blockinit()
2070 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32) in bge_blockinit()
2071 * Bits 15-2 : Reserved (should be 0) in bge_blockinit()
2075 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); in bge_blockinit()
2079 * Bits 31-16: Maximum RX frame size in bge_blockinit()
2080 * Bits 15-2 : Reserved (should be 0) in bge_blockinit()
2084 rcb->bge_maxlen_flags = in bge_blockinit()
2087 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 || in bge_blockinit()
2088 sc->bge_asicrev == BGE_ASICREV_BCM5719 || in bge_blockinit()
2089 sc->bge_asicrev == BGE_ASICREV_BCM5720) in bge_blockinit()
2090 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717; in bge_blockinit()
2092 rcb->bge_nicaddr = BGE_STD_RX_RINGS; in bge_blockinit()
2094 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); in bge_blockinit()
2095 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); in bge_blockinit()
2096 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); in bge_blockinit()
2097 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); in bge_blockinit()
2103 * Initialize the jumbo RX producer ring control in bge_blockinit()
2110 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; in bge_blockinit()
2112 rcb->bge_hostaddr.bge_addr_lo = in bge_blockinit()
2113 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr); in bge_blockinit()
2114 rcb->bge_hostaddr.bge_addr_hi = in bge_blockinit()
2115 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr); in bge_blockinit()
2116 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, in bge_blockinit()
2117 sc->bge_cdata.bge_rx_jumbo_ring_map, in bge_blockinit()
2119 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, in bge_blockinit()
2121 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 || in bge_blockinit()
2122 sc->bge_asicrev == BGE_ASICREV_BCM5719 || in bge_blockinit()
2123 sc->bge_asicrev == BGE_ASICREV_BCM5720) in bge_blockinit()
2124 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717; in bge_blockinit()
2126 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; in bge_blockinit()
2128 rcb->bge_hostaddr.bge_addr_hi); in bge_blockinit()
2130 rcb->bge_hostaddr.bge_addr_lo); in bge_blockinit()
2133 rcb->bge_maxlen_flags); in bge_blockinit()
2134 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); in bge_blockinit()
2141 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb; in bge_blockinit()
2142 rcb->bge_maxlen_flags = in bge_blockinit()
2145 rcb->bge_maxlen_flags); in bge_blockinit()
2150 /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */ in bge_blockinit()
2151 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { in bge_blockinit()
2152 if (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 || in bge_blockinit()
2153 sc->bge_chipid == BGE_CHIPID_BCM5906_A1 || in bge_blockinit()
2154 sc->bge_chipid == BGE_CHIPID_BCM5906_A2) in bge_blockinit()
2197 sc->bge_asicrev == BGE_ASICREV_BCM5762) in bge_blockinit()
2213 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr); in bge_blockinit()
2216 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 || in bge_blockinit()
2217 sc->bge_asicrev == BGE_ASICREV_BCM5719 || in bge_blockinit()
2218 sc->bge_asicrev == BGE_ASICREV_BCM5720) in bge_blockinit()
2231 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 || in bge_blockinit()
2232 sc->bge_asicrev == BGE_ASICREV_BCM5719 || in bge_blockinit()
2233 sc->bge_asicrev == BGE_ASICREV_BCM5720) { in bge_blockinit()
2238 else if (sc->bge_asicrev == BGE_ASICREV_BCM5755 || in bge_blockinit()
2239 sc->bge_asicrev == BGE_ASICREV_BCM5762 || in bge_blockinit()
2259 * for RX return rings is 0x0. The return rings live entirely in bge_blockinit()
2263 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr); in bge_blockinit()
2268 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0)); in bge_blockinit()
2270 lladdr = if_getlladdr(sc->bge_ifp); in bge_blockinit()
2278 /* Set inter-packet gap */ in bge_blockinit()
2280 if (sc->bge_asicrev == BGE_ASICREV_BCM5720 || in bge_blockinit()
2281 sc->bge_asicrev == BGE_ASICREV_BCM5762) in bge_blockinit()
2288 * any RX rules. in bge_blockinit()
2293 * Configure number of RX lists. One interrupt distribution in bge_blockinit()
2298 /* Initialize RX list placement stats mask. */ in bge_blockinit()
2313 device_printf(sc->bge_dev, in bge_blockinit()
2319 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); in bge_blockinit()
2320 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); in bge_blockinit()
2321 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); in bge_blockinit()
2322 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); in bge_blockinit()
2333 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr)); in bge_blockinit()
2335 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr)); in bge_blockinit()
2338 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); in bge_blockinit()
2343 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr)); in bge_blockinit()
2345 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr)); in bge_blockinit()
2348 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && in bge_blockinit()
2349 sc->bge_chipid != BGE_CHIPID_BCM5700_C0) { in bge_blockinit()
2351 bzero(sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ); in bge_blockinit()
2354 bzero(sc->bge_ldata.bge_status_block, 32); in bge_blockinit()
2356 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, in bge_blockinit()
2357 sc->bge_cdata.bge_status_map, in bge_blockinit()
2363 /* Turn on RX BD completion state machine and enable attentions */ in bge_blockinit()
2367 /* Turn on RX list placement state machine */ in bge_blockinit()
2370 /* Turn on RX list selector state machine. */ in bge_blockinit()
2380 if (sc->bge_flags & BGE_FLAG_TBI) in bge_blockinit()
2382 else if (sc->bge_flags & BGE_FLAG_MII_SERDES) in bge_blockinit()
2388 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0) in bge_blockinit()
2416 if (sc->bge_asicrev == BGE_ASICREV_BCM5785) in bge_blockinit()
2426 if (sc->bge_asicrev == BGE_ASICREV_BCM5717) in bge_blockinit()
2429 if (sc->bge_asicrev == BGE_ASICREV_BCM5784 || in bge_blockinit()
2430 sc->bge_asicrev == BGE_ASICREV_BCM5785 || in bge_blockinit()
2431 sc->bge_asicrev == BGE_ASICREV_BCM57780) in bge_blockinit()
2435 if (sc->bge_flags & BGE_FLAG_PCIE) in bge_blockinit()
2437 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) { in bge_blockinit()
2439 if (sc->bge_flags & BGE_FLAG_TSO3 || in bge_blockinit()
2440 sc->bge_asicrev == BGE_ASICREV_BCM5785 || in bge_blockinit()
2441 sc->bge_asicrev == BGE_ASICREV_BCM57780) in bge_blockinit()
2445 if (sc->bge_asicrev == BGE_ASICREV_BCM5720 || in bge_blockinit()
2446 sc->bge_asicrev == BGE_ASICREV_BCM5762) { in bge_blockinit()
2451 * non-LSO read DMA engine. in bge_blockinit()
2456 if (sc->bge_asicrev == BGE_ASICREV_BCM5761 || in bge_blockinit()
2457 sc->bge_asicrev == BGE_ASICREV_BCM5784 || in bge_blockinit()
2458 sc->bge_asicrev == BGE_ASICREV_BCM5785 || in bge_blockinit()
2459 sc->bge_asicrev == BGE_ASICREV_BCM57780 || in bge_blockinit()
2461 if (sc->bge_asicrev == BGE_ASICREV_BCM5762) in bge_blockinit()
2470 if (sc->bge_chipid == BGE_CHIPID_BCM5719_A0 || in bge_blockinit()
2471 sc->bge_asicrev == BGE_ASICREV_BCM5762) { in bge_blockinit()
2481 * The fix is to limit the number of RX BDs in bge_blockinit()
2488 if (sc->bge_asicrev == BGE_ASICREV_BCM5719) { in bge_blockinit()
2493 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5720) { in bge_blockinit()
2495 * Allow 4KB burst length reads for non-LSO frames. in bge_blockinit()
2502 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5762) { in bge_blockinit()
2512 if (sc->bge_flags & BGE_FLAG_RDMA_BUG) { in bge_blockinit()
2522 if (sc->bge_asicrev == BGE_ASICREV_BCM5719) in bge_blockinit()
2530 /* Turn on RX data completion state machine */ in bge_blockinit()
2533 /* Turn on RX BD initiator state machine */ in bge_blockinit()
2536 /* Turn on RX data and RX BD initiator state machine */ in bge_blockinit()
2548 if (sc->bge_asicrev == BGE_ASICREV_BCM5761) in bge_blockinit()
2553 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) in bge_blockinit()
2579 if (sc->bge_flags & BGE_FLAG_TBI) { in bge_blockinit()
2582 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) { in bge_blockinit()
2583 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode); in bge_blockinit()
2586 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && in bge_blockinit()
2587 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) in bge_blockinit()
2595 * is cleared by bge_intr() -> bge_link_upd() sequence. in bge_blockinit()
2596 * It's not necessary on newer BCM chips - perhaps enabling link in bge_blockinit()
2614 for (br = bge_revisions; br->br_name != NULL; br++) { in bge_lookup_rev()
2615 if (br->br_chipid == chipid) in bge_lookup_rev()
2619 for (br = bge_majorrevs; br->br_name != NULL; br++) { in bge_lookup_rev()
2620 if (br->br_chipid == BGE_ASICREV(chipid)) in bge_lookup_rev()
2632 for (v = bge_vendors; v->v_name != NULL; v++) in bge_lookup_vendor()
2633 if (v->v_id == vid) in bge_lookup_vendor()
2695 * of the compiled-in string. It guarantees we'll always announce the
2696 * right product name. We fall back to the compiled-in string when
2712 sc->bge_dev = dev; in bge_probe()
2715 while(t->bge_vid != 0) { in bge_probe()
2716 if ((vid == t->bge_vid) && (did == t->bge_did)) { in bge_probe()
2725 v != NULL ? v->v_name : "Unknown", in bge_probe()
2726 br != NULL ? br->br_name : in bge_probe()
2744 /* Destroy DMA maps for RX buffers. */ in bge_dma_free()
2746 if (sc->bge_cdata.bge_rx_std_dmamap[i]) in bge_dma_free()
2747 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag, in bge_dma_free()
2748 sc->bge_cdata.bge_rx_std_dmamap[i]); in bge_dma_free()
2750 if (sc->bge_cdata.bge_rx_std_sparemap) in bge_dma_free()
2751 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag, in bge_dma_free()
2752 sc->bge_cdata.bge_rx_std_sparemap); in bge_dma_free()
2754 /* Destroy DMA maps for jumbo RX buffers. */ in bge_dma_free()
2756 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i]) in bge_dma_free()
2757 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo, in bge_dma_free()
2758 sc->bge_cdata.bge_rx_jumbo_dmamap[i]); in bge_dma_free()
2760 if (sc->bge_cdata.bge_rx_jumbo_sparemap) in bge_dma_free()
2761 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo, in bge_dma_free()
2762 sc->bge_cdata.bge_rx_jumbo_sparemap); in bge_dma_free()
2766 if (sc->bge_cdata.bge_tx_dmamap[i]) in bge_dma_free()
2767 bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag, in bge_dma_free()
2768 sc->bge_cdata.bge_tx_dmamap[i]); in bge_dma_free()
2771 if (sc->bge_cdata.bge_rx_mtag) in bge_dma_free()
2772 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag); in bge_dma_free()
2773 if (sc->bge_cdata.bge_mtag_jumbo) in bge_dma_free()
2774 bus_dma_tag_destroy(sc->bge_cdata.bge_mtag_jumbo); in bge_dma_free()
2775 if (sc->bge_cdata.bge_tx_mtag) in bge_dma_free()
2776 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag); in bge_dma_free()
2778 /* Destroy standard RX ring. */ in bge_dma_free()
2779 if (sc->bge_ldata.bge_rx_std_ring_paddr) in bge_dma_free()
2780 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag, in bge_dma_free()
2781 sc->bge_cdata.bge_rx_std_ring_map); in bge_dma_free()
2782 if (sc->bge_ldata.bge_rx_std_ring) in bge_dma_free()
2783 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag, in bge_dma_free()
2784 sc->bge_ldata.bge_rx_std_ring, in bge_dma_free()
2785 sc->bge_cdata.bge_rx_std_ring_map); in bge_dma_free()
2787 if (sc->bge_cdata.bge_rx_std_ring_tag) in bge_dma_free()
2788 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag); in bge_dma_free()
2790 /* Destroy jumbo RX ring. */ in bge_dma_free()
2791 if (sc->bge_ldata.bge_rx_jumbo_ring_paddr) in bge_dma_free()
2792 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag, in bge_dma_free()
2793 sc->bge_cdata.bge_rx_jumbo_ring_map); in bge_dma_free()
2795 if (sc->bge_ldata.bge_rx_jumbo_ring) in bge_dma_free()
2796 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag, in bge_dma_free()
2797 sc->bge_ldata.bge_rx_jumbo_ring, in bge_dma_free()
2798 sc->bge_cdata.bge_rx_jumbo_ring_map); in bge_dma_free()
2800 if (sc->bge_cdata.bge_rx_jumbo_ring_tag) in bge_dma_free()
2801 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag); in bge_dma_free()
2803 /* Destroy RX return ring. */ in bge_dma_free()
2804 if (sc->bge_ldata.bge_rx_return_ring_paddr) in bge_dma_free()
2805 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag, in bge_dma_free()
2806 sc->bge_cdata.bge_rx_return_ring_map); in bge_dma_free()
2808 if (sc->bge_ldata.bge_rx_return_ring) in bge_dma_free()
2809 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag, in bge_dma_free()
2810 sc->bge_ldata.bge_rx_return_ring, in bge_dma_free()
2811 sc->bge_cdata.bge_rx_return_ring_map); in bge_dma_free()
2813 if (sc->bge_cdata.bge_rx_return_ring_tag) in bge_dma_free()
2814 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag); in bge_dma_free()
2817 if (sc->bge_ldata.bge_tx_ring_paddr) in bge_dma_free()
2818 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag, in bge_dma_free()
2819 sc->bge_cdata.bge_tx_ring_map); in bge_dma_free()
2821 if (sc->bge_ldata.bge_tx_ring) in bge_dma_free()
2822 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag, in bge_dma_free()
2823 sc->bge_ldata.bge_tx_ring, in bge_dma_free()
2824 sc->bge_cdata.bge_tx_ring_map); in bge_dma_free()
2826 if (sc->bge_cdata.bge_tx_ring_tag) in bge_dma_free()
2827 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag); in bge_dma_free()
2830 if (sc->bge_ldata.bge_status_block_paddr) in bge_dma_free()
2831 bus_dmamap_unload(sc->bge_cdata.bge_status_tag, in bge_dma_free()
2832 sc->bge_cdata.bge_status_map); in bge_dma_free()
2834 if (sc->bge_ldata.bge_status_block) in bge_dma_free()
2835 bus_dmamem_free(sc->bge_cdata.bge_status_tag, in bge_dma_free()
2836 sc->bge_ldata.bge_status_block, in bge_dma_free()
2837 sc->bge_cdata.bge_status_map); in bge_dma_free()
2839 if (sc->bge_cdata.bge_status_tag) in bge_dma_free()
2840 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag); in bge_dma_free()
2843 if (sc->bge_ldata.bge_stats_paddr) in bge_dma_free()
2844 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag, in bge_dma_free()
2845 sc->bge_cdata.bge_stats_map); in bge_dma_free()
2847 if (sc->bge_ldata.bge_stats) in bge_dma_free()
2848 bus_dmamem_free(sc->bge_cdata.bge_stats_tag, in bge_dma_free()
2849 sc->bge_ldata.bge_stats, in bge_dma_free()
2850 sc->bge_cdata.bge_stats_map); in bge_dma_free()
2852 if (sc->bge_cdata.bge_stats_tag) in bge_dma_free()
2853 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag); in bge_dma_free()
2855 if (sc->bge_cdata.bge_buffer_tag) in bge_dma_free()
2856 bus_dma_tag_destroy(sc->bge_cdata.bge_buffer_tag); in bge_dma_free()
2859 if (sc->bge_cdata.bge_parent_tag) in bge_dma_free()
2860 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag); in bge_dma_free()
2875 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, in bge_dma_ring_alloc()
2879 device_printf(sc->bge_dev, in bge_dma_ring_alloc()
2887 device_printf(sc->bge_dev, in bge_dma_ring_alloc()
2896 device_printf(sc->bge_dev, in bge_dma_ring_alloc()
2902 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0 && in bge_dma_ring_alloc()
2912 device_printf(sc->bge_dev, "4GB boundary crossed, " in bge_dma_ring_alloc()
2931 if ((sc->bge_flags & BGE_FLAG_40BIT_BUG) != 0) in bge_dma_alloc()
2936 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev), in bge_dma_alloc()
2939 0, NULL, NULL, &sc->bge_cdata.bge_parent_tag); in bge_dma_alloc()
2941 device_printf(sc->bge_dev, in bge_dma_alloc()
2946 /* Create tag for standard RX ring. */ in bge_dma_alloc()
2948 &sc->bge_cdata.bge_rx_std_ring_tag, in bge_dma_alloc()
2949 (uint8_t **)&sc->bge_ldata.bge_rx_std_ring, in bge_dma_alloc()
2950 &sc->bge_cdata.bge_rx_std_ring_map, in bge_dma_alloc()
2951 &sc->bge_ldata.bge_rx_std_ring_paddr, "RX ring"); in bge_dma_alloc()
2955 /* Create tag for RX return ring. */ in bge_dma_alloc()
2957 &sc->bge_cdata.bge_rx_return_ring_tag, in bge_dma_alloc()
2958 (uint8_t **)&sc->bge_ldata.bge_rx_return_ring, in bge_dma_alloc()
2959 &sc->bge_cdata.bge_rx_return_ring_map, in bge_dma_alloc()
2960 &sc->bge_ldata.bge_rx_return_ring_paddr, "RX return ring"); in bge_dma_alloc()
2966 &sc->bge_cdata.bge_tx_ring_tag, in bge_dma_alloc()
2967 (uint8_t **)&sc->bge_ldata.bge_tx_ring, in bge_dma_alloc()
2968 &sc->bge_cdata.bge_tx_ring_map, in bge_dma_alloc()
2969 &sc->bge_ldata.bge_tx_ring_paddr, "TX ring"); in bge_dma_alloc()
2975 * Because we only use single Tx/Rx/Rx return ring, use in bge_dma_alloc()
2980 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && in bge_dma_alloc()
2981 sc->bge_chipid != BGE_CHIPID_BCM5700_C0) in bge_dma_alloc()
2986 &sc->bge_cdata.bge_status_tag, in bge_dma_alloc()
2987 (uint8_t **)&sc->bge_ldata.bge_status_block, in bge_dma_alloc()
2988 &sc->bge_cdata.bge_status_map, in bge_dma_alloc()
2989 &sc->bge_ldata.bge_status_block_paddr, "status block"); in bge_dma_alloc()
2995 &sc->bge_cdata.bge_stats_tag, in bge_dma_alloc()
2996 (uint8_t **)&sc->bge_ldata.bge_stats, in bge_dma_alloc()
2997 &sc->bge_cdata.bge_stats_map, in bge_dma_alloc()
2998 &sc->bge_ldata.bge_stats_paddr, "statistics block"); in bge_dma_alloc()
3002 /* Create tag for jumbo RX ring. */ in bge_dma_alloc()
3005 &sc->bge_cdata.bge_rx_jumbo_ring_tag, in bge_dma_alloc()
3006 (uint8_t **)&sc->bge_ldata.bge_rx_jumbo_ring, in bge_dma_alloc()
3007 &sc->bge_cdata.bge_rx_jumbo_ring_map, in bge_dma_alloc()
3008 &sc->bge_ldata.bge_rx_jumbo_ring_paddr, "jumbo RX ring"); in bge_dma_alloc()
3015 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0) { in bge_dma_alloc()
3020 * lives behind PCI-X bridge(e.g AMD 8131 PCI-X bridge). in bge_dma_alloc()
3024 if (sc->bge_pcixcap != 0) in bge_dma_alloc()
3027 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev), in bge_dma_alloc()
3030 0, NULL, NULL, &sc->bge_cdata.bge_buffer_tag); in bge_dma_alloc()
3032 device_printf(sc->bge_dev, in bge_dma_alloc()
3037 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) { in bge_dma_alloc()
3044 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1, in bge_dma_alloc()
3047 &sc->bge_cdata.bge_tx_mtag); in bge_dma_alloc()
3050 device_printf(sc->bge_dev, "could not allocate TX dma tag\n"); in bge_dma_alloc()
3054 /* Create tag for Rx mbufs. */ in bge_dma_alloc()
3055 if (sc->bge_flags & BGE_FLAG_JUMBO_STD) in bge_dma_alloc()
3059 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1, 0, in bge_dma_alloc()
3061 rxmaxsegsz, 0, NULL, NULL, &sc->bge_cdata.bge_rx_mtag); in bge_dma_alloc()
3064 device_printf(sc->bge_dev, "could not allocate RX dma tag\n"); in bge_dma_alloc()
3068 /* Create DMA maps for RX buffers. */ in bge_dma_alloc()
3069 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0, in bge_dma_alloc()
3070 &sc->bge_cdata.bge_rx_std_sparemap); in bge_dma_alloc()
3072 device_printf(sc->bge_dev, in bge_dma_alloc()
3073 "can't create spare DMA map for RX\n"); in bge_dma_alloc()
3077 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0, in bge_dma_alloc()
3078 &sc->bge_cdata.bge_rx_std_dmamap[i]); in bge_dma_alloc()
3080 device_printf(sc->bge_dev, in bge_dma_alloc()
3081 "can't create DMA map for RX\n"); in bge_dma_alloc()
3088 error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag, 0, in bge_dma_alloc()
3089 &sc->bge_cdata.bge_tx_dmamap[i]); in bge_dma_alloc()
3091 device_printf(sc->bge_dev, in bge_dma_alloc()
3097 /* Create tags for jumbo RX buffers. */ in bge_dma_alloc()
3099 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, in bge_dma_alloc()
3102 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo); in bge_dma_alloc()
3104 device_printf(sc->bge_dev, in bge_dma_alloc()
3108 /* Create DMA maps for jumbo RX buffers. */ in bge_dma_alloc()
3109 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo, in bge_dma_alloc()
3110 0, &sc->bge_cdata.bge_rx_jumbo_sparemap); in bge_dma_alloc()
3112 device_printf(sc->bge_dev, in bge_dma_alloc()
3113 "can't create spare DMA map for jumbo RX\n"); in bge_dma_alloc()
3117 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo, in bge_dma_alloc()
3118 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]); in bge_dma_alloc()
3120 device_printf(sc->bge_dev, in bge_dma_alloc()
3121 "can't create DMA map for jumbo RX\n"); in bge_dma_alloc()
3136 device_t dev = sc->bge_dev; in bge_has_multiple_ports()
3157 if (sc->bge_msi == 0) in bge_can_use_msi()
3164 switch (sc->bge_asicrev) { in bge_can_use_msi()
3169 * configured in single-port mode. in bge_can_use_msi()
3175 if (sc->bge_chiprev != BGE_CHIPREV_5750_AX && in bge_can_use_msi()
3176 sc->bge_chiprev != BGE_CHIPREV_5750_BX) in bge_can_use_msi()
3182 * in some MacBook Pro and make it work out-of-the-box. in bge_can_use_msi()
3184 if (sc->bge_chiprev == BGE_CHIPREV_5784_AX) in bge_can_use_msi()
3203 { 0x1022, 0x7450, "AMD-8131 PCI-X Bridge" }, in bge_mbox_reorder()
3211 dev = sc->bge_dev; in bge_mbox_reorder()
3225 device_printf(sc->bge_dev, in bge_mbox_reorder()
3240 device_printf(sc->bge_dev, in bge_devinfo()
3242 sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev); in bge_devinfo()
3243 if (sc->bge_flags & BGE_FLAG_PCIE) in bge_devinfo()
3244 printf("PCI-E\n"); in bge_devinfo()
3245 else if (sc->bge_flags & BGE_FLAG_PCIX) { in bge_devinfo()
3246 printf("PCI-X "); in bge_devinfo()
3272 if (sc->bge_pcixcap != 0) in bge_devinfo()
3273 printf("PCI on PCI-X "); in bge_devinfo()
3276 cfg = pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4); in bge_devinfo()
3298 sc->bge_dev = dev; in bge_attach()
3301 NET_TASK_INIT(&sc->bge_intr_task, 0, bge_intr_task, sc); in bge_attach()
3302 callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0); in bge_attach()
3310 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, in bge_attach()
3313 if (sc->bge_res == NULL) { in bge_attach()
3314 device_printf (sc->bge_dev, "couldn't map BAR0 memory\n"); in bge_attach()
3320 sc->bge_func_addr = pci_get_function(dev); in bge_attach()
3321 sc->bge_chipid = bge_chipid(dev); in bge_attach()
3322 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid); in bge_attach()
3323 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid); in bge_attach()
3326 sc->bge_phy_addr = 1; in bge_attach()
3331 * ---------+-------+-------+-------+-------+ in bge_attach()
3339 * ---------+-------+-------+-------+-------+ in bge_attach()
3349 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 || in bge_attach()
3350 sc->bge_asicrev == BGE_ASICREV_BCM5719 || in bge_attach()
3351 sc->bge_asicrev == BGE_ASICREV_BCM5720) { in bge_attach()
3352 if (sc->bge_chipid != BGE_CHIPID_BCM5717_A0) { in bge_attach()
3355 sc->bge_phy_addr = sc->bge_func_addr + 8; in bge_attach()
3357 sc->bge_phy_addr = sc->bge_func_addr + 1; in bge_attach()
3361 sc->bge_phy_addr = sc->bge_func_addr + 8; in bge_attach()
3363 sc->bge_phy_addr = sc->bge_func_addr + 1; in bge_attach()
3368 sc->bge_flags |= BGE_FLAG_EADDR; in bge_attach()
3371 switch (sc->bge_asicrev) { in bge_attach()
3375 sc->bge_flags |= BGE_FLAG_57765_PLUS; in bge_attach()
3380 sc->bge_flags |= BGE_FLAG_5717_PLUS | BGE_FLAG_5755_PLUS | in bge_attach()
3383 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 || in bge_attach()
3384 sc->bge_asicrev == BGE_ASICREV_BCM5720) { in bge_attach()
3389 sc->bge_flags |= BGE_FLAG_RDMA_BUG; in bge_attach()
3390 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 && in bge_attach()
3391 sc->bge_chipid == BGE_CHIPID_BCM5719_A0) { in bge_attach()
3393 sc->bge_flags &= ~BGE_FLAG_JUMBO; in bge_attach()
3403 sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS | in bge_attach()
3410 sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO; in bge_attach()
3415 sc->bge_flags |= BGE_FLAG_5714_FAMILY | BGE_FLAG_JUMBO_STD; in bge_attach()
3420 sc->bge_flags |= BGE_FLAG_575X_PLUS; in bge_attach()
3423 sc->bge_flags |= BGE_FLAG_5705_PLUS; in bge_attach()
3428 switch (sc->bge_asicrev) { in bge_attach()
3434 sc->bge_flags |= BGE_FLAG_APE; in bge_attach()
3439 if ((sc->bge_flags & BGE_FLAG_APE) != 0) { in bge_attach()
3441 sc->bge_res2 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, in bge_attach()
3443 if (sc->bge_res2 == NULL) { in bge_attach()
3444 device_printf (sc->bge_dev, in bge_attach()
3466 sc->bge_asicrev == BGE_ASICREV_BCM5784 || in bge_attach()
3467 sc->bge_asicrev == BGE_ASICREV_BCM5761 || in bge_attach()
3468 sc->bge_asicrev == BGE_ASICREV_BCM5785 || in bge_attach()
3469 sc->bge_asicrev == BGE_ASICREV_BCM57780) in bge_attach()
3470 sc->bge_flags |= BGE_FLAG_CPMU_PRESENT; in bge_attach()
3471 if ((sc->bge_flags & BGE_FLAG_CPMU_PRESENT) != 0) in bge_attach()
3472 sc->bge_mi_mode = BGE_MIMODE_500KHZ_CONST; in bge_attach()
3474 sc->bge_mi_mode = BGE_MIMODE_BASE; in bge_attach()
3475 /* Enable auto polling for BCM570[0-5]. */ in bge_attach()
3476 if (BGE_IS_5700_FAMILY(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5705) in bge_attach()
3477 sc->bge_mi_mode |= BGE_MIMODE_AUTOPOLL; in bge_attach()
3486 sc->bge_flags |= BGE_FLAG_4G_BNDRY_BUG; in bge_attach()
3489 if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906) in bge_attach()
3490 sc->bge_flags |= BGE_FLAG_SHORT_DMA_BUG; in bge_attach()
3498 if (sc->bge_asicrev == BGE_ASICREV_BCM5719) in bge_attach()
3499 sc->bge_flags |= BGE_FLAG_4K_RDMA_BUG; in bge_attach()
3502 if (sc->bge_asicrev == BGE_ASICREV_BCM5705) { in bge_attach()
3505 sc->bge_flags |= BGE_FLAG_5788; in bge_attach()
3509 if ((sc->bge_asicrev == BGE_ASICREV_BCM5703 && in bge_attach()
3511 (sc->bge_asicrev == BGE_ASICREV_BCM5705 && in bge_attach()
3523 sc->bge_asicrev == BGE_ASICREV_BCM5906) { in bge_attach()
3526 sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED; in bge_attach()
3543 sc->bge_flags |= BGE_FLAG_TSO3; in bge_attach()
3544 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 && in bge_attach()
3545 sc->bge_chipid == BGE_CHIPID_BCM5719_A0) { in bge_attach()
3547 sc->bge_flags &= ~BGE_FLAG_TSO3; in bge_attach()
3558 sc->bge_flags |= BGE_FLAG_TSO; in bge_attach()
3562 * Check if this is a PCI-X or PCI Express device. in bge_attach()
3569 sc->bge_flags |= BGE_FLAG_PCIE; in bge_attach()
3570 sc->bge_expcap = reg; in bge_attach()
3572 sc->bge_mps = pci_read_config(dev, sc->bge_expcap + in bge_attach()
3574 sc->bge_mps = 128 << (sc->bge_mps & PCIEM_CAP_MAX_PAYLOAD); in bge_attach()
3575 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 || in bge_attach()
3576 sc->bge_asicrev == BGE_ASICREV_BCM5720) in bge_attach()
3577 sc->bge_expmrq = 2048; in bge_attach()
3579 sc->bge_expmrq = 4096; in bge_attach()
3580 pci_set_max_read_req(dev, sc->bge_expmrq); in bge_attach()
3583 * Check if the device is in PCI-X Mode. in bge_attach()
3587 sc->bge_pcixcap = reg; in bge_attach()
3590 sc->bge_flags |= BGE_FLAG_PCIX; in bge_attach()
3596 * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround. in bge_attach()
3598 if (BGE_IS_5714_FAMILY(sc) && (sc->bge_flags & BGE_FLAG_PCIX)) in bge_attach()
3599 sc->bge_flags |= BGE_FLAG_40BIT_BUG; in bge_attach()
3601 * Some PCI-X bridges are known to trigger write reordering to in bge_attach()
3603 * caused by out-of-order TX completions. Enable workaround for in bge_attach()
3604 * PCI-X devices that live behind these bridges. in bge_attach()
3605 * Note, PCI-X controllers can run in PCI mode so we can't use in bge_attach()
3606 * BGE_FLAG_PCIX flag to detect PCI-X controllers. in bge_attach()
3608 if (sc->bge_pcixcap != 0 && bge_mbox_reorder(sc) != 0) in bge_attach()
3609 sc->bge_flags |= BGE_FLAG_MBOX_REORDER; in bge_attach()
3616 if (pci_find_cap(sc->bge_dev, PCIY_MSI, ®) == 0) { in bge_attach()
3617 sc->bge_msicap = reg; in bge_attach()
3621 sc->bge_flags |= BGE_FLAG_MSI; in bge_attach()
3631 if (sc->bge_flags & BGE_FLAG_MSI && BGE_IS_5717_PLUS(sc)) in bge_attach()
3632 sc->bge_flags |= BGE_FLAG_TAGGED_STATUS; in bge_attach()
3635 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, in bge_attach()
3638 if (sc->bge_irq == NULL) { in bge_attach()
3639 device_printf(sc->bge_dev, "couldn't map interrupt\n"); in bge_attach()
3646 sc->bge_asf_mode = 0; in bge_attach()
3648 if ((sc->bge_flags & BGE_FLAG_APE) == 0) { in bge_attach()
3653 sc->bge_asf_mode |= ASF_ENABLE; in bge_attach()
3654 sc->bge_asf_mode |= ASF_STACKUP; in bge_attach()
3656 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE; in bge_attach()
3664 device_printf(sc->bge_dev, "chip reset failed\n"); in bge_attach()
3673 device_printf(sc->bge_dev, "chip initialization failed\n"); in bge_attach()
3680 device_printf(sc->bge_dev, in bge_attach()
3686 /* 5705 limits RX return ring to 512 entries. */ in bge_attach()
3688 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; in bge_attach()
3690 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; in bge_attach()
3692 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; in bge_attach()
3695 device_printf(sc->bge_dev, in bge_attach()
3702 sc->bge_stat_ticks = BGE_TICKS_PER_SEC; in bge_attach()
3703 sc->bge_rx_coal_ticks = 150; in bge_attach()
3704 sc->bge_tx_coal_ticks = 150; in bge_attach()
3705 sc->bge_rx_max_coal_bds = 10; in bge_attach()
3706 sc->bge_tx_max_coal_bds = 10; in bge_attach()
3709 sc->bge_csum_features = BGE_CSUM_FEATURES; in bge_attach()
3710 if (sc->bge_forced_udpcsum != 0) in bge_attach()
3711 sc->bge_csum_features |= CSUM_UDP; in bge_attach()
3714 ifp = sc->bge_ifp = if_alloc(IFT_ETHER); in bge_attach()
3722 if_setsendqlen(ifp, BGE_TX_RING_CNT - 1); in bge_attach()
3724 if_sethwassist(ifp, sc->bge_csum_features); in bge_attach()
3727 if ((sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) != 0) { in bge_attach()
3743 if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) { in bge_attach()
3756 * SK-9D41. in bge_attach()
3760 else if ((sc->bge_flags & BGE_FLAG_EADDR) && in bge_attach()
3761 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) { in bge_attach()
3764 device_printf(sc->bge_dev, "failed to read EEPROM\n"); in bge_attach()
3771 /* The SysKonnect SK-9D41 is a 1000baseSX card. */ in bge_attach()
3775 sc->bge_flags |= BGE_FLAG_MII_SERDES; in bge_attach()
3776 sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED; in bge_attach()
3778 sc->bge_flags |= BGE_FLAG_TBI; in bge_attach()
3782 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 || in bge_attach()
3783 sc->bge_chipid == BGE_CHIPID_BCM5701_B0) in bge_attach()
3784 sc->bge_phy_flags |= BGE_PHY_CRC_BUG; in bge_attach()
3785 if (sc->bge_chiprev == BGE_CHIPREV_5703_AX || in bge_attach()
3786 sc->bge_chiprev == BGE_CHIPREV_5704_AX) in bge_attach()
3787 sc->bge_phy_flags |= BGE_PHY_ADC_BUG; in bge_attach()
3788 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0) in bge_attach()
3789 sc->bge_phy_flags |= BGE_PHY_5704_A0_BUG; in bge_attach()
3791 sc->bge_phy_flags |= BGE_PHY_NO_3LED; in bge_attach()
3793 sc->bge_asicrev != BGE_ASICREV_BCM5906 && in bge_attach()
3794 sc->bge_asicrev != BGE_ASICREV_BCM5785 && in bge_attach()
3795 sc->bge_asicrev != BGE_ASICREV_BCM57780 && in bge_attach()
3797 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 || in bge_attach()
3798 sc->bge_asicrev == BGE_ASICREV_BCM5761 || in bge_attach()
3799 sc->bge_asicrev == BGE_ASICREV_BCM5784 || in bge_attach()
3800 sc->bge_asicrev == BGE_ASICREV_BCM5787) { in bge_attach()
3803 sc->bge_phy_flags |= BGE_PHY_JITTER_BUG; in bge_attach()
3805 sc->bge_phy_flags |= BGE_PHY_ADJUST_TRIM; in bge_attach()
3807 sc->bge_phy_flags |= BGE_PHY_BER_BUG; in bge_attach()
3814 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 || in bge_attach()
3815 (sc->bge_asicrev == BGE_ASICREV_BCM5705 && in bge_attach()
3816 (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 && in bge_attach()
3817 sc->bge_chipid != BGE_CHIPID_BCM5705_A1))) in bge_attach()
3818 sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED; in bge_attach()
3820 if (sc->bge_flags & BGE_FLAG_TBI) { in bge_attach()
3821 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd, in bge_attach()
3823 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL); in bge_attach()
3824 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX, in bge_attach()
3826 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL); in bge_attach()
3827 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO); in bge_attach()
3828 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media; in bge_attach()
3842 error = mii_attach(dev, &sc->bge_miibus, ifp, in bge_attach()
3844 (ifm_stat_cb_t)bge_ifmedia_sts, capmask, sc->bge_phy_addr, in bge_attach()
3848 device_printf(sc->bge_dev, "Try again\n"); in bge_attach()
3849 bge_miibus_writereg(sc->bge_dev, in bge_attach()
3850 sc->bge_phy_addr, MII_BMCR, BMCR_RESET); in bge_attach()
3853 device_printf(sc->bge_dev, "attaching PHYs failed\n"); in bge_attach()
3860 if (sc->bge_asf_mode & ASF_STACKUP) in bge_attach()
3865 * When using the BCM5701 in PCI-X mode, data corruption has in bge_attach()
3872 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 && in bge_attach()
3873 sc->bge_flags & BGE_FLAG_PCIX) in bge_attach()
3874 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG; in bge_attach()
3887 if (BGE_IS_5755_PLUS(sc) && sc->bge_flags & BGE_FLAG_MSI) { in bge_attach()
3888 /* Take advantage of single-shot MSI. */ in bge_attach()
3891 sc->bge_tq = taskqueue_create_fast("bge_taskq", M_WAITOK, in bge_attach()
3892 taskqueue_thread_enqueue, &sc->bge_tq); in bge_attach()
3893 error = taskqueue_start_threads(&sc->bge_tq, 1, PI_NET, in bge_attach()
3894 "%s taskq", device_get_nameunit(sc->bge_dev)); in bge_attach()
3900 error = bus_setup_intr(dev, sc->bge_irq, in bge_attach()
3902 &sc->bge_intrhand); in bge_attach()
3904 error = bus_setup_intr(dev, sc->bge_irq, in bge_attach()
3906 &sc->bge_intrhand); in bge_attach()
3910 device_printf(sc->bge_dev, "couldn't set up irq\n"); in bge_attach()
3930 ifp = sc->bge_ifp; in bge_detach()
3942 callout_drain(&sc->bge_stat_ch); in bge_detach()
3945 if (sc->bge_tq) in bge_detach()
3946 taskqueue_drain(sc->bge_tq, &sc->bge_intr_task); in bge_detach()
3948 if (sc->bge_flags & BGE_FLAG_TBI) in bge_detach()
3949 ifmedia_removeall(&sc->bge_ifmedia); in bge_detach()
3950 else if (sc->bge_miibus != NULL) { in bge_detach()
3952 device_delete_child(dev, sc->bge_miibus); in bge_detach()
3965 dev = sc->bge_dev; in bge_release_resources()
3967 if (sc->bge_tq != NULL) in bge_release_resources()
3968 taskqueue_free(sc->bge_tq); in bge_release_resources()
3970 if (sc->bge_intrhand != NULL) in bge_release_resources()
3971 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand); in bge_release_resources()
3973 if (sc->bge_irq != NULL) { in bge_release_resources()
3975 rman_get_rid(sc->bge_irq), sc->bge_irq); in bge_release_resources()
3979 if (sc->bge_res != NULL) in bge_release_resources()
3981 rman_get_rid(sc->bge_res), sc->bge_res); in bge_release_resources()
3983 if (sc->bge_res2 != NULL) in bge_release_resources()
3985 rman_get_rid(sc->bge_res2), sc->bge_res2); in bge_release_resources()
3987 if (sc->bge_ifp != NULL) in bge_release_resources()
3988 if_free(sc->bge_ifp); in bge_release_resources()
3992 if (mtx_initialized(&sc->bge_mtx)) /* XXX */ in bge_release_resources()
4005 dev = sc->bge_dev; in bge_reset()
4008 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0) in bge_reset()
4013 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) { in bge_reset()
4014 if (sc->bge_flags & BGE_FLAG_PCIE) in bge_reset()
4021 if (sc->bge_asicrev != BGE_ASICREV_BCM5700 && in bge_reset()
4022 sc->bge_asicrev != BGE_ASICREV_BCM5701) { in bge_reset()
4047 if (sc->bge_asicrev == BGE_ASICREV_BCM5752 || in bge_reset()
4064 if (sc->bge_flags & BGE_FLAG_PCIE) { in bge_reset()
4065 if (sc->bge_asicrev != BGE_ASICREV_BCM5785 && in bge_reset()
4066 (sc->bge_flags & BGE_FLAG_5717_PLUS) == 0) { in bge_reset()
4070 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { in bge_reset()
4077 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { in bge_reset()
4091 (sc->bge_flags & BGE_FLAG_CPMU_PRESENT) == 0) in bge_reset()
4097 if (sc->bge_flags & BGE_FLAG_PCIE) in bge_reset()
4103 if (sc->bge_flags & BGE_FLAG_PCIE) { in bge_reset()
4104 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) { in bge_reset()
4110 sc->bge_expcap + PCIER_DEVICE_CTL, 2); in bge_reset()
4114 pci_write_config(dev, sc->bge_expcap + PCIER_DEVICE_CTL, in bge_reset()
4116 pci_set_max_read_req(dev, sc->bge_expmrq); in bge_reset()
4118 pci_write_config(dev, sc->bge_expcap + PCIER_DEVICE_STA, in bge_reset()
4129 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0 && in bge_reset()
4130 (sc->bge_flags & BGE_FLAG_PCIX) != 0) in bge_reset()
4132 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0) in bge_reset()
4140 * Disable PCI-X relaxed ordering to ensure status block update in bge_reset()
4144 if (sc->bge_flags & BGE_FLAG_PCIX) { in bge_reset()
4146 sc->bge_pcixcap + PCIXR_COMMAND, 2); in bge_reset()
4148 if (sc->bge_asicrev == BGE_ASICREV_BCM5703) { in bge_reset()
4151 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) { in bge_reset()
4156 pci_write_config(dev, sc->bge_pcixcap + PCIXR_COMMAND, in bge_reset()
4159 /* Re-enable MSI, if necessary, and enable the memory arbiter. */ in bge_reset()
4162 if (sc->bge_flags & BGE_FLAG_MSI) { in bge_reset()
4164 sc->bge_msicap + PCIR_MSI_CTRL, 2); in bge_reset()
4166 sc->bge_msicap + PCIR_MSI_CTRL, in bge_reset()
4187 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { in bge_reset()
4212 if ((sc->bge_flags & BGE_FLAG_EADDR) && i == BGE_TIMEOUT) in bge_reset()
4217 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0) in bge_reset()
4226 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && in bge_reset()
4227 sc->bge_flags & BGE_FLAG_TBI) { in bge_reset()
4234 if (sc->bge_flags & BGE_FLAG_PCIE && in bge_reset()
4236 sc->bge_chipid != BGE_CHIPID_BCM5750_A0 && in bge_reset()
4237 sc->bge_asicrev != BGE_ASICREV_BCM5785) { in bge_reset()
4243 if (sc->bge_asicrev == BGE_ASICREV_BCM5720) in bge_reset()
4255 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std]; in bge_rxreuse_std()
4256 r->bge_flags = BGE_RXBDFLAG_END; in bge_rxreuse_std()
4257 r->bge_len = sc->bge_cdata.bge_rx_std_seglen[i]; in bge_rxreuse_std()
4258 r->bge_idx = i; in bge_rxreuse_std()
4259 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); in bge_rxreuse_std()
4267 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo]; in bge_rxreuse_jumbo()
4268 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END; in bge_rxreuse_jumbo()
4269 r->bge_len0 = sc->bge_cdata.bge_rx_jumbo_seglen[i][0]; in bge_rxreuse_jumbo()
4270 r->bge_len1 = sc->bge_cdata.bge_rx_jumbo_seglen[i][1]; in bge_rxreuse_jumbo()
4271 r->bge_len2 = sc->bge_cdata.bge_rx_jumbo_seglen[i][2]; in bge_rxreuse_jumbo()
4272 r->bge_len3 = sc->bge_cdata.bge_rx_jumbo_seglen[i][3]; in bge_rxreuse_jumbo()
4273 r->bge_idx = i; in bge_rxreuse_jumbo()
4274 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); in bge_rxreuse_jumbo()
4293 rx_cons = sc->bge_rx_saved_considx; in bge_rxeof()
4299 ifp = sc->bge_ifp; in bge_rxeof()
4301 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag, in bge_rxeof()
4302 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD); in bge_rxeof()
4303 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, in bge_rxeof()
4304 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTWRITE); in bge_rxeof()
4307 ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN)) in bge_rxeof()
4308 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, in bge_rxeof()
4309 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTWRITE); in bge_rxeof()
4314 struct mbuf *m = NULL; in bge_rxeof() local
4320 if (sc->rxcycles <= 0) in bge_rxeof()
4322 sc->rxcycles--; in bge_rxeof()
4326 cur_rx = &sc->bge_ldata.bge_rx_return_ring[rx_cons]; in bge_rxeof()
4328 rxidx = cur_rx->bge_idx; in bge_rxeof()
4329 BGE_INC(rx_cons, sc->bge_return_ring_cnt); in bge_rxeof()
4332 cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { in bge_rxeof()
4334 vlan_tag = cur_rx->bge_vlan_tag; in bge_rxeof()
4337 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { in bge_rxeof()
4339 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; in bge_rxeof()
4340 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { in bge_rxeof()
4349 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); in bge_rxeof()
4352 m = sc->bge_cdata.bge_rx_std_chain[rxidx]; in bge_rxeof()
4353 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { in bge_rxeof()
4362 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); in bge_rxeof()
4371 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) { in bge_rxeof()
4372 bcopy(m->m_data, m->m_data + ETHER_ALIGN, in bge_rxeof()
4373 cur_rx->bge_len); in bge_rxeof()
4374 m->m_data += ETHER_ALIGN; in bge_rxeof()
4377 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; in bge_rxeof()
4378 m->m_pkthdr.rcvif = ifp; in bge_rxeof()
4381 bge_rxcsum(sc, cur_rx, m); in bge_rxeof()
4388 m->m_pkthdr.ether_vtag = vlan_tag; in bge_rxeof()
4389 m->m_flags |= M_VLANTAG; in bge_rxeof()
4394 if_input(ifp, m); in bge_rxeof()
4397 if_input(ifp, m); in bge_rxeof()
4404 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag, in bge_rxeof()
4405 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREREAD); in bge_rxeof()
4407 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, in bge_rxeof()
4408 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE); in bge_rxeof()
4411 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, in bge_rxeof()
4412 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE); in bge_rxeof()
4414 sc->bge_rx_saved_considx = rx_cons; in bge_rxeof()
4415 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); in bge_rxeof()
4417 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, (sc->bge_std + in bge_rxeof()
4418 BGE_STD_RX_RING_CNT - 1) % BGE_STD_RX_RING_CNT); in bge_rxeof()
4420 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, (sc->bge_jumbo + in bge_rxeof()
4421 BGE_JUMBO_RX_RING_CNT - 1) % BGE_JUMBO_RX_RING_CNT); in bge_rxeof()
4434 bge_rxcsum(struct bge_softc *sc, struct bge_rx_bd *cur_rx, struct mbuf *m) in bge_rxcsum() argument
4438 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) { in bge_rxcsum()
4439 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) { in bge_rxcsum()
4440 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; in bge_rxcsum()
4441 if ((cur_rx->bge_error_flag & in bge_rxcsum()
4443 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; in bge_rxcsum()
4445 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) { in bge_rxcsum()
4446 m->m_pkthdr.csum_data = in bge_rxcsum()
4447 cur_rx->bge_tcp_udp_csum; in bge_rxcsum()
4448 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | in bge_rxcsum()
4453 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) { in bge_rxcsum()
4454 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; in bge_rxcsum()
4455 if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0) in bge_rxcsum()
4456 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; in bge_rxcsum()
4458 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM && in bge_rxcsum()
4459 m->m_pkthdr.len >= ETHER_MIN_NOPAD) { in bge_rxcsum()
4460 m->m_pkthdr.csum_data = in bge_rxcsum()
4461 cur_rx->bge_tcp_udp_csum; in bge_rxcsum()
4462 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | in bge_rxcsum()
4477 if (sc->bge_tx_saved_considx == tx_cons) in bge_txeof()
4480 ifp = sc->bge_ifp; in bge_txeof()
4482 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag, in bge_txeof()
4483 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_POSTWRITE); in bge_txeof()
4488 while (sc->bge_tx_saved_considx != tx_cons) { in bge_txeof()
4491 idx = sc->bge_tx_saved_considx; in bge_txeof()
4492 cur_tx = &sc->bge_ldata.bge_tx_ring[idx]; in bge_txeof()
4493 if (cur_tx->bge_flags & BGE_TXBDFLAG_END) in bge_txeof()
4495 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) { in bge_txeof()
4496 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, in bge_txeof()
4497 sc->bge_cdata.bge_tx_dmamap[idx], in bge_txeof()
4499 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, in bge_txeof()
4500 sc->bge_cdata.bge_tx_dmamap[idx]); in bge_txeof()
4501 m_freem(sc->bge_cdata.bge_tx_chain[idx]); in bge_txeof()
4502 sc->bge_cdata.bge_tx_chain[idx] = NULL; in bge_txeof()
4504 sc->bge_txcnt--; in bge_txeof()
4505 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); in bge_txeof()
4509 if (sc->bge_txcnt == 0) in bge_txeof()
4510 sc->bge_timer = 0; in bge_txeof()
4528 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, in bge_poll()
4529 sc->bge_cdata.bge_status_map, in bge_poll()
4532 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx; in bge_poll()
4533 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx; in bge_poll()
4535 statusword = sc->bge_ldata.bge_status_block->bge_status; in bge_poll()
4537 sc->bge_ldata.bge_status_block->bge_status = 0; in bge_poll()
4539 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, in bge_poll()
4540 sc->bge_cdata.bge_status_map, in bge_poll()
4545 sc->bge_link_evt++; in bge_poll()
4548 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 && in bge_poll()
4549 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) || in bge_poll()
4550 sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI)) in bge_poll()
4553 sc->rxcycles = count; in bge_poll()
4578 taskqueue_enqueue(sc->bge_tq, &sc->bge_intr_task); in bge_msi_intr()
4591 ifp = sc->bge_ifp; in bge_intr_task()
4600 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, in bge_intr_task()
4601 sc->bge_cdata.bge_status_map, in bge_intr_task()
4605 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx; in bge_intr_task()
4606 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx; in bge_intr_task()
4607 status = sc->bge_ldata.bge_status_block->bge_status; in bge_intr_task()
4608 status_tag = sc->bge_ldata.bge_status_block->bge_status_tag << 24; in bge_intr_task()
4610 sc->bge_ldata.bge_status_block->bge_status = 0; in bge_intr_task()
4611 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, in bge_intr_task()
4612 sc->bge_cdata.bge_status_map, in bge_intr_task()
4614 if ((sc->bge_flags & BGE_FLAG_TAGGED_STATUS) == 0) in bge_intr_task()
4624 sc->bge_rx_saved_considx != rx_prod) { in bge_intr_task()
4625 /* Check RX return ring producer/consumer. */ in bge_intr_task()
4651 ifp = sc->bge_ifp; in bge_intr()
4664 * pessimizations for re-enabling interrupts. We used to have races in bge_intr()
4667 * running (by switching to the interrupt-mode coalescence in bge_intr()
4688 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, in bge_intr()
4689 sc->bge_cdata.bge_status_map, in bge_intr()
4691 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx; in bge_intr()
4692 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx; in bge_intr()
4693 sc->bge_ldata.bge_status_block->bge_status = 0; in bge_intr()
4694 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, in bge_intr()
4695 sc->bge_cdata.bge_status_map, in bge_intr()
4698 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 && in bge_intr()
4699 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) || in bge_intr()
4700 statusword || sc->bge_link_evt) in bge_intr()
4704 /* Check RX return ring producer/consumer. */ in bge_intr()
4723 if (sc->bge_asf_mode & ASF_STACKUP) { in bge_asf_driver_up()
4725 if (sc->bge_asf_count) in bge_asf_driver_up()
4726 sc->bge_asf_count --; in bge_asf_driver_up()
4728 sc->bge_asf_count = 2; in bge_asf_driver_up()
4750 if (callout_pending(&sc->bge_stat_ch) || in bge_tick()
4751 !callout_active(&sc->bge_stat_ch)) in bge_tick()
4761 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) { in bge_tick()
4762 mii = device_get_softc(sc->bge_miibus); in bge_tick()
4765 * IPMI/ASF mode or produce extra input errors in bge_tick()
4768 if (!sc->bge_link) in bge_tick()
4772 * Since in TBI mode auto-polling can't be used we should poll in bge_tick()
4778 if (!(if_getcapenable(sc->bge_ifp) & IFCAP_POLLING)) in bge_tick()
4781 sc->bge_link_evt++; in bge_tick()
4782 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 || in bge_tick()
4783 sc->bge_flags & BGE_FLAG_5788) in bge_tick()
4793 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc); in bge_tick()
4802 stats = &sc->bge_mac_stats; in bge_stats_update_regs()
4804 stats->ifHCOutOctets += in bge_stats_update_regs()
4806 stats->etherStatsCollisions += in bge_stats_update_regs()
4808 stats->outXonSent += in bge_stats_update_regs()
4810 stats->outXoffSent += in bge_stats_update_regs()
4812 stats->dot3StatsInternalMacTransmitErrors += in bge_stats_update_regs()
4814 stats->dot3StatsSingleCollisionFrames += in bge_stats_update_regs()
4816 stats->dot3StatsMultipleCollisionFrames += in bge_stats_update_regs()
4818 stats->dot3StatsDeferredTransmissions += in bge_stats_update_regs()
4820 stats->dot3StatsExcessiveCollisions += in bge_stats_update_regs()
4822 stats->dot3StatsLateCollisions += in bge_stats_update_regs()
4824 stats->ifHCOutUcastPkts += in bge_stats_update_regs()
4826 stats->ifHCOutMulticastPkts += in bge_stats_update_regs()
4828 stats->ifHCOutBroadcastPkts += in bge_stats_update_regs()
4831 stats->ifHCInOctets += in bge_stats_update_regs()
4833 stats->etherStatsFragments += in bge_stats_update_regs()
4835 stats->ifHCInUcastPkts += in bge_stats_update_regs()
4837 stats->ifHCInMulticastPkts += in bge_stats_update_regs()
4839 stats->ifHCInBroadcastPkts += in bge_stats_update_regs()
4841 stats->dot3StatsFCSErrors += in bge_stats_update_regs()
4843 stats->dot3StatsAlignmentErrors += in bge_stats_update_regs()
4845 stats->xonPauseFramesReceived += in bge_stats_update_regs()
4847 stats->xoffPauseFramesReceived += in bge_stats_update_regs()
4849 stats->macControlFramesReceived += in bge_stats_update_regs()
4851 stats->xoffStateEntered += in bge_stats_update_regs()
4853 stats->dot3StatsFramesTooLong += in bge_stats_update_regs()
4855 stats->etherStatsJabbers += in bge_stats_update_regs()
4857 stats->etherStatsUndersizePkts += in bge_stats_update_regs()
4860 stats->FramesDroppedDueToFilters += in bge_stats_update_regs()
4862 stats->DmaWriteQueueFull += in bge_stats_update_regs()
4864 stats->DmaWriteHighPriQueueFull += in bge_stats_update_regs()
4866 stats->NoMoreRxBDs += in bge_stats_update_regs()
4887 if (sc->bge_asicrev != BGE_ASICREV_BCM5717 && in bge_stats_update_regs()
4888 sc->bge_chipid != BGE_CHIPID_BCM5719_A0 && in bge_stats_update_regs()
4889 sc->bge_chipid != BGE_CHIPID_BCM5720_A0) in bge_stats_update_regs()
4890 stats->InputDiscards += in bge_stats_update_regs()
4892 stats->InputErrors += in bge_stats_update_regs()
4894 stats->RecvThresholdHit += in bge_stats_update_regs()
4897 if (sc->bge_flags & BGE_FLAG_RDMA_BUG) { in bge_stats_update_regs()
4903 if (stats->ifHCOutUcastPkts + stats->ifHCOutMulticastPkts + in bge_stats_update_regs()
4904 stats->ifHCOutBroadcastPkts > BGE_NUM_RDMA_CHANNELS) { in bge_stats_update_regs()
4906 if (sc->bge_asicrev == BGE_ASICREV_BCM5719) in bge_stats_update_regs()
4911 sc->bge_flags &= ~BGE_FLAG_RDMA_BUG; in bge_stats_update_regs()
4965 ifp = sc->bge_ifp; in bge_stats_update()
4973 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, cnt - sc->bge_tx_collisions); in bge_stats_update()
4974 sc->bge_tx_collisions = cnt; in bge_stats_update()
4977 if_inc_counter(ifp, IFCOUNTER_IERRORS, cnt - sc->bge_rx_nobds); in bge_stats_update()
4978 sc->bge_rx_nobds = cnt; in bge_stats_update()
4980 if_inc_counter(ifp, IFCOUNTER_IERRORS, cnt - sc->bge_rx_inerrs); in bge_stats_update()
4981 sc->bge_rx_inerrs = cnt; in bge_stats_update()
4983 if_inc_counter(ifp, IFCOUNTER_IERRORS, cnt - sc->bge_rx_discards); in bge_stats_update()
4984 sc->bge_rx_discards = cnt; in bge_stats_update()
4987 if_inc_counter(ifp, IFCOUNTER_OERRORS, cnt - sc->bge_tx_discards); in bge_stats_update()
4988 sc->bge_tx_discards = cnt; in bge_stats_update()
5002 bge_cksum_pad(struct mbuf *m) in bge_cksum_pad() argument
5004 int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len; in bge_cksum_pad()
5007 /* If there's only the packet-header and we can pad there, use it. */ in bge_cksum_pad()
5008 if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) && in bge_cksum_pad()
5009 M_TRAILINGSPACE(m) >= padlen) { in bge_cksum_pad()
5010 last = m; in bge_cksum_pad()
5016 for (last = m; last->m_next != NULL; last = last->m_next); in bge_cksum_pad()
5024 n->m_len = 0; in bge_cksum_pad()
5025 last->m_next = n; in bge_cksum_pad()
5030 /* Now zero the pad area, to avoid the bge cksum-assist bug. */ in bge_cksum_pad()
5031 memset(mtod(last, caddr_t) + last->m_len, 0, padlen); in bge_cksum_pad()
5032 last->m_len += padlen; in bge_cksum_pad()
5033 m->m_pkthdr.len += padlen; in bge_cksum_pad()
5039 bge_check_short_dma(struct mbuf *m) in bge_check_short_dma() argument
5045 * If device receive two back-to-back send BDs with less than in bge_check_short_dma()
5047 * back-to-back send BDs must in the same frame for this failure in bge_check_short_dma()
5048 * to occur. Scan mbuf chains and see whether two back-to-back in bge_check_short_dma()
5052 for (n = m, found = 0; n != NULL; n = n->m_next) { in bge_check_short_dma()
5053 if (n->m_len < 8) { in bge_check_short_dma()
5063 n = m_defrag(m, M_NOWAIT); in bge_check_short_dma()
5065 m_freem(m); in bge_check_short_dma()
5067 n = m; in bge_check_short_dma()
5072 bge_setup_tso(struct bge_softc *sc, struct mbuf *m, uint16_t *mss, in bge_setup_tso() argument
5081 if (M_WRITABLE(m) == 0) { in bge_setup_tso()
5083 n = m_dup(m, M_NOWAIT); in bge_setup_tso()
5084 m_freem(m); in bge_setup_tso()
5087 m = n; in bge_setup_tso()
5089 m = m_pullup(m, sizeof(struct ether_header) + sizeof(struct ip)); in bge_setup_tso()
5090 if (m == NULL) in bge_setup_tso()
5092 ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header)); in bge_setup_tso()
5093 poff = sizeof(struct ether_header) + (ip->ip_hl << 2); in bge_setup_tso()
5094 m = m_pullup(m, poff + sizeof(struct tcphdr)); in bge_setup_tso()
5095 if (m == NULL) in bge_setup_tso()
5097 tcp = (struct tcphdr *)(mtod(m, char *) + poff); in bge_setup_tso()
5098 m = m_pullup(m, poff + (tcp->th_off << 2)); in bge_setup_tso()
5099 if (m == NULL) in bge_setup_tso()
5105 *mss = m->m_pkthdr.tso_segsz; in bge_setup_tso()
5106 ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header)); in bge_setup_tso()
5107 ip->ip_sum = 0; in bge_setup_tso()
5108 ip->ip_len = htons(*mss + (ip->ip_hl << 2) + (tcp->th_off << 2)); in bge_setup_tso()
5110 tcp = (struct tcphdr *)(mtod(m, char *) + poff); in bge_setup_tso()
5111 tcp->th_sum = 0; in bge_setup_tso()
5114 * TSO depending on ASIC revision. Due to TSO-capable firmware in bge_setup_tso()
5119 hlen = ((ip->ip_hl << 2) + (tcp->th_off << 2)) >> 2; in bge_setup_tso()
5120 if (sc->bge_flags & BGE_FLAG_TSO3) { in bge_setup_tso()
5140 return (m); in bge_setup_tso()
5153 struct mbuf *m = *m_head; in bge_encap() local
5161 if ((sc->bge_flags & BGE_FLAG_SHORT_DMA_BUG) != 0 && in bge_encap()
5162 m->m_next != NULL) { in bge_encap()
5163 *m_head = bge_check_short_dma(m); in bge_encap()
5166 m = *m_head; in bge_encap()
5168 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { in bge_encap()
5169 *m_head = m = bge_setup_tso(sc, m, &mss, &csum_flags); in bge_encap()
5174 } else if ((m->m_pkthdr.csum_flags & sc->bge_csum_features) != 0) { in bge_encap()
5175 if (m->m_pkthdr.csum_flags & CSUM_IP) in bge_encap()
5177 if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) { in bge_encap()
5179 if (m->m_pkthdr.len < ETHER_MIN_NOPAD && in bge_encap()
5180 (error = bge_cksum_pad(m)) != 0) { in bge_encap()
5181 m_freem(m); in bge_encap()
5188 if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0) { in bge_encap()
5189 if (sc->bge_flags & BGE_FLAG_JUMBO_FRAME && in bge_encap()
5190 m->m_pkthdr.len > ETHER_MAX_LEN) in bge_encap()
5192 if (sc->bge_forced_collapse > 0 && in bge_encap()
5193 (sc->bge_flags & BGE_FLAG_PCIE) != 0 && m->m_next != NULL) { in bge_encap()
5199 if (sc->bge_forced_collapse == 1) in bge_encap()
5200 m = m_defrag(m, M_NOWAIT); in bge_encap()
5202 m = m_collapse(m, M_NOWAIT, in bge_encap()
5203 sc->bge_forced_collapse); in bge_encap()
5204 if (m == NULL) in bge_encap()
5205 m = *m_head; in bge_encap()
5206 *m_head = m; in bge_encap()
5210 map = sc->bge_cdata.bge_tx_dmamap[idx]; in bge_encap()
5211 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, m, segs, in bge_encap()
5214 m = m_collapse(m, M_NOWAIT, BGE_NSEG_NEW); in bge_encap()
5215 if (m == NULL) { in bge_encap()
5220 *m_head = m; in bge_encap()
5221 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, in bge_encap()
5222 m, segs, &nsegs, BUS_DMA_NOWAIT); in bge_encap()
5224 m_freem(m); in bge_encap()
5232 if (sc->bge_txcnt + nsegs >= BGE_TX_RING_CNT) { in bge_encap()
5233 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map); in bge_encap()
5237 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE); in bge_encap()
5239 if (m->m_flags & M_VLANTAG) { in bge_encap()
5241 vlan_tag = m->m_pkthdr.ether_vtag; in bge_encap()
5244 if (sc->bge_asicrev == BGE_ASICREV_BCM5762 && in bge_encap()
5245 (m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { in bge_encap()
5253 d = &sc->bge_ldata.bge_tx_ring[idx]; in bge_encap()
5254 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr); in bge_encap()
5255 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr); in bge_encap()
5256 d->bge_len = segs[i].ds_len; in bge_encap()
5257 if (d->bge_addr.bge_addr_lo + segs[i].ds_len + mss < in bge_encap()
5258 d->bge_addr.bge_addr_lo) in bge_encap()
5260 d->bge_flags = csum_flags; in bge_encap()
5261 d->bge_vlan_tag = vlan_tag; in bge_encap()
5262 d->bge_mss = mss; in bge_encap()
5263 if (i == nsegs - 1) in bge_encap()
5267 if (i != nsegs - 1) { in bge_encap()
5268 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, in bge_encap()
5270 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map); in bge_encap()
5277 d = &sc->bge_ldata.bge_tx_ring[idx]; in bge_encap()
5278 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr); in bge_encap()
5279 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr); in bge_encap()
5280 d->bge_len = segs[i].ds_len; in bge_encap()
5281 d->bge_flags = csum_flags; in bge_encap()
5282 d->bge_vlan_tag = vlan_tag; in bge_encap()
5283 d->bge_mss = mss; in bge_encap()
5284 if (i == nsegs - 1) in bge_encap()
5291 d->bge_flags |= BGE_TXBDFLAG_END; in bge_encap()
5298 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx]; in bge_encap()
5299 sc->bge_cdata.bge_tx_dmamap[idx] = map; in bge_encap()
5300 sc->bge_cdata.bge_tx_chain[idx] = m; in bge_encap()
5301 sc->bge_txcnt += nsegs; in bge_encap()
5324 if (!sc->bge_link || in bge_start_locked()
5329 prodidx = sc->bge_tx_prodidx; in bge_start_locked()
5332 if (sc->bge_txcnt > BGE_TX_RING_CNT - 16) { in bge_start_locked()
5369 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag, in bge_start_tx()
5370 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE); in bge_start_tx()
5374 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) in bge_start_tx()
5377 sc->bge_tx_prodidx = prodidx; in bge_start_tx()
5380 sc->bge_timer = BGE_TX_TIMEOUT; in bge_start_tx()
5402 uint16_t *m; in bge_init_locked() local
5407 ifp = sc->bge_ifp; in bge_init_locked()
5428 device_printf(sc->bge_dev, "initialization failure\n"); in bge_init_locked()
5432 ifp = sc->bge_ifp; in bge_init_locked()
5440 m = (uint16_t *)if_getlladdr(sc->bge_ifp); in bge_init_locked()
5441 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); in bge_init_locked()
5442 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); in bge_init_locked()
5454 if (sc->bge_forced_udpcsum == 0) in bge_init_locked()
5455 sc->bge_csum_features &= ~CSUM_UDP; in bge_init_locked()
5457 sc->bge_csum_features |= CSUM_UDP; in bge_init_locked()
5461 if_sethwassistbits(ifp, sc->bge_csum_features, 0); in bge_init_locked()
5464 /* Init RX ring. */ in bge_init_locked()
5466 device_printf(sc->bge_dev, "no memory for std Rx buffers.\n"); in bge_init_locked()
5476 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) { in bge_init_locked()
5481 if (v == (MCLBYTES - ETHER_ALIGN)) in bge_init_locked()
5485 device_printf (sc->bge_dev, in bge_init_locked()
5486 "5705 A0 chip failed to load RX ring\n"); in bge_init_locked()
5489 /* Init jumbo RX ring. */ in bge_init_locked()
5492 ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN)) { in bge_init_locked()
5494 device_printf(sc->bge_dev, in bge_init_locked()
5495 "no memory for jumbo Rx buffers.\n"); in bge_init_locked()
5501 /* Init our RX return ring index. */ in bge_init_locked()
5502 sc->bge_rx_saved_considx = 0; in bge_init_locked()
5504 /* Init our RX/TX stat counters. */ in bge_init_locked()
5505 sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0; in bge_init_locked()
5512 if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906) in bge_init_locked()
5514 if (sc->bge_asicrev == BGE_ASICREV_BCM5720 || in bge_init_locked()
5515 sc->bge_asicrev == BGE_ASICREV_BCM5762) { in bge_init_locked()
5528 if (sc->bge_asicrev == BGE_ASICREV_BCM5762) in bge_init_locked()
5534 * Set the number of good frames to receive after RX MBUF in bge_init_locked()
5535 * Low Watermark has been reached. After the RX MAC receives in bge_init_locked()
5572 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc); in bge_init_locked()
5611 ifm = &sc->bge_ifmedia; in bge_ifmedia_upd_locked()
5614 if (sc->bge_flags & BGE_FLAG_TBI) { in bge_ifmedia_upd_locked()
5615 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) in bge_ifmedia_upd_locked()
5617 switch(IFM_SUBTYPE(ifm->ifm_media)) { in bge_ifmedia_upd_locked()
5624 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) { in bge_ifmedia_upd_locked()
5641 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { in bge_ifmedia_upd_locked()
5656 sc->bge_link_evt++; in bge_ifmedia_upd_locked()
5657 mii = device_get_softc(sc->bge_miibus); in bge_ifmedia_upd_locked()
5658 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) in bge_ifmedia_upd_locked()
5669 * get an RX intr. in bge_ifmedia_upd_locked()
5674 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 || in bge_ifmedia_upd_locked()
5675 sc->bge_flags & BGE_FLAG_5788) in bge_ifmedia_upd_locked()
5698 if (sc->bge_flags & BGE_FLAG_TBI) { in bge_ifmedia_sts()
5699 ifmr->ifm_status = IFM_AVALID; in bge_ifmedia_sts()
5700 ifmr->ifm_active = IFM_ETHER; in bge_ifmedia_sts()
5703 ifmr->ifm_status |= IFM_ACTIVE; in bge_ifmedia_sts()
5705 ifmr->ifm_active |= IFM_NONE; in bge_ifmedia_sts()
5709 ifmr->ifm_active |= IFM_1000_SX; in bge_ifmedia_sts()
5711 ifmr->ifm_active |= IFM_HDX; in bge_ifmedia_sts()
5713 ifmr->ifm_active |= IFM_FDX; in bge_ifmedia_sts()
5718 mii = device_get_softc(sc->bge_miibus); in bge_ifmedia_sts()
5720 ifmr->ifm_active = mii->mii_media_active; in bge_ifmedia_sts()
5721 ifmr->ifm_status = mii->mii_media_status; in bge_ifmedia_sts()
5737 (sc->bge_flags & BGE_FLAG_JUMBO_STD)) { in bge_ioctl()
5738 if (ifr->ifr_mtu < ETHERMIN || in bge_ioctl()
5739 ifr->ifr_mtu > BGE_JUMBO_MTU) { in bge_ioctl()
5743 } else if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU) { in bge_ioctl()
5748 if (if_getmtu(ifp) != ifr->ifr_mtu) { in bge_ioctl()
5749 if_setmtu(ifp, ifr->ifr_mtu); in bge_ioctl()
5764 * a full re-init means reloading the firmware and in bge_ioctl()
5769 flags = if_getflags(ifp) ^ sc->bge_if_flags; in bge_ioctl()
5781 sc->bge_if_flags = if_getflags(ifp); in bge_ioctl()
5796 if (sc->bge_flags & BGE_FLAG_TBI) { in bge_ioctl()
5798 &sc->bge_ifmedia, command); in bge_ioctl()
5800 mii = device_get_softc(sc->bge_miibus); in bge_ioctl()
5802 &mii->mii_media, command); in bge_ioctl()
5806 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); in bge_ioctl()
5809 if (ifr->ifr_reqcap & IFCAP_POLLING) { in bge_ioctl()
5836 sc->bge_csum_features, 0); in bge_ioctl()
5839 sc->bge_csum_features); in bge_ioctl()
5893 if (sc->bge_timer == 0 || --sc->bge_timer) in bge_watchdog()
5905 sc->bge_timer = BGE_TX_TIMEOUT; in bge_watchdog()
5914 sc->bge_timer = BGE_TX_TIMEOUT; in bge_watchdog()
5923 ifp = sc->bge_ifp; in bge_watchdog()
5925 if_printf(ifp, "watchdog timeout -- resetting\n"); in bge_watchdog()
5949 * RX and TX lists.
5958 ifp = sc->bge_ifp; in bge_stop()
5960 callout_stop(&sc->bge_stat_ch); in bge_stop()
6022 if (sc->bge_asf_mode & ASF_STACKUP) in bge_stop()
6027 /* Free the RX lists. */ in bge_stop()
6030 /* Free jumbo RX list. */ in bge_stop()
6037 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; in bge_stop()
6040 if (bootverbose && sc->bge_link) in bge_stop()
6041 if_printf(sc->bge_ifp, "link DOWN\n"); in bge_stop()
6042 sc->bge_link = 0; in bge_stop()
6085 ifp = sc->bge_ifp; in bge_resume()
6105 sc->bge_link_evt = 0; in bge_link_upd()
6122 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && in bge_link_upd()
6123 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) { in bge_link_upd()
6126 mii = device_get_softc(sc->bge_miibus); in bge_link_upd()
6128 if (!sc->bge_link && in bge_link_upd()
6129 mii->mii_media_status & IFM_ACTIVE && in bge_link_upd()
6130 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { in bge_link_upd()
6131 sc->bge_link++; in bge_link_upd()
6133 if_printf(sc->bge_ifp, "link UP\n"); in bge_link_upd()
6134 } else if (sc->bge_link && in bge_link_upd()
6135 (!(mii->mii_media_status & IFM_ACTIVE) || in bge_link_upd()
6136 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) { in bge_link_upd()
6137 sc->bge_link = 0; in bge_link_upd()
6139 if_printf(sc->bge_ifp, "link DOWN\n"); in bge_link_upd()
6145 bge_miibus_readreg(sc->bge_dev, sc->bge_phy_addr, in bge_link_upd()
6147 bge_miibus_writereg(sc->bge_dev, sc->bge_phy_addr, in bge_link_upd()
6153 if (sc->bge_flags & BGE_FLAG_TBI) { in bge_link_upd()
6156 if (!sc->bge_link) { in bge_link_upd()
6157 sc->bge_link++; in bge_link_upd()
6158 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) { in bge_link_upd()
6165 if_printf(sc->bge_ifp, "link UP\n"); in bge_link_upd()
6166 if_link_state_change(sc->bge_ifp, in bge_link_upd()
6169 } else if (sc->bge_link) { in bge_link_upd()
6170 sc->bge_link = 0; in bge_link_upd()
6172 if_printf(sc->bge_ifp, "link DOWN\n"); in bge_link_upd()
6173 if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN); in bge_link_upd()
6175 } else if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) { in bge_link_upd()
6183 if (link != sc->bge_link || in bge_link_upd()
6184 sc->bge_asicrev == BGE_ASICREV_BCM5700) { in bge_link_upd()
6185 mii = device_get_softc(sc->bge_miibus); in bge_link_upd()
6187 if (!sc->bge_link && in bge_link_upd()
6188 mii->mii_media_status & IFM_ACTIVE && in bge_link_upd()
6189 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { in bge_link_upd()
6190 sc->bge_link++; in bge_link_upd()
6192 if_printf(sc->bge_ifp, "link UP\n"); in bge_link_upd()
6193 } else if (sc->bge_link && in bge_link_upd()
6194 (!(mii->mii_media_status & IFM_ACTIVE) || in bge_link_upd()
6195 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) { in bge_link_upd()
6196 sc->bge_link = 0; in bge_link_upd()
6198 if_printf(sc->bge_ifp, "link DOWN\n"); in bge_link_upd()
6206 mii = device_get_softc(sc->bge_miibus); in bge_link_upd()
6208 bge_miibus_statchg(sc->bge_dev); in bge_link_upd()
6223 ctx = device_get_sysctl_ctx(sc->bge_dev); in bge_add_sysctls()
6224 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev)); in bge_add_sysctls()
6257 sc->bge_forced_collapse = 0; in bge_add_sysctls()
6259 CTLFLAG_RWTUN, &sc->bge_forced_collapse, 0, in bge_add_sysctls()
6263 sc->bge_msi = 1; in bge_add_sysctls()
6265 CTLFLAG_RDTUN, &sc->bge_msi, 0, "Enable MSI"); in bge_add_sysctls()
6277 sc->bge_forced_udpcsum = 0; in bge_add_sysctls()
6279 CTLFLAG_RWTUN, &sc->bge_forced_udpcsum, 0, in bge_add_sysctls()
6311 BGE_SYSCTL_STAT(sc, ctx, "NIC No More RX Buffer Descriptors", in bge_add_sysctl_stats()
6313 BGE_SYSCTL_STAT(sc, ctx, "Discarded Input Frames", in bge_add_sysctl_stats()
6315 BGE_SYSCTL_STAT(sc, ctx, "Input Errors", in bge_add_sysctl_stats()
6336 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "rx", in bge_add_sysctl_stats()
6337 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "BGE RX Statistics"); in bge_add_sysctl_stats()
6431 stats = &sc->bge_mac_stats; in bge_add_sysctl_stats_regs()
6436 &stats->FramesDroppedDueToFilters, "Frames Dropped Due to Filters"); in bge_add_sysctl_stats_regs()
6438 &stats->DmaWriteQueueFull, "NIC DMA Write Queue Full"); in bge_add_sysctl_stats_regs()
6440 &stats->DmaWriteHighPriQueueFull, in bge_add_sysctl_stats_regs()
6443 &stats->NoMoreRxBDs, "NIC No More RX Buffer Descriptors"); in bge_add_sysctl_stats_regs()
6445 &stats->InputDiscards, "Discarded Input Frames"); in bge_add_sysctl_stats_regs()
6447 &stats->InputErrors, "Input Errors"); in bge_add_sysctl_stats_regs()
6449 &stats->RecvThresholdHit, "NIC Recv Threshold Hit"); in bge_add_sysctl_stats_regs()
6451 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", in bge_add_sysctl_stats_regs()
6452 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "BGE RX Statistics"); in bge_add_sysctl_stats_regs()
6455 &stats->ifHCInOctets, "Inbound Octets"); in bge_add_sysctl_stats_regs()
6457 &stats->etherStatsFragments, "Fragments"); in bge_add_sysctl_stats_regs()
6459 &stats->ifHCInUcastPkts, "Inbound Unicast Packets"); in bge_add_sysctl_stats_regs()
6461 &stats->ifHCInMulticastPkts, "Inbound Multicast Packets"); in bge_add_sysctl_stats_regs()
6463 &stats->ifHCInBroadcastPkts, "Inbound Broadcast Packets"); in bge_add_sysctl_stats_regs()
6465 &stats->dot3StatsFCSErrors, "FCS Errors"); in bge_add_sysctl_stats_regs()
6467 &stats->dot3StatsAlignmentErrors, "Alignment Errors"); in bge_add_sysctl_stats_regs()
6469 &stats->xonPauseFramesReceived, "XON Pause Frames Received"); in bge_add_sysctl_stats_regs()
6471 &stats->xoffPauseFramesReceived, "XOFF Pause Frames Received"); in bge_add_sysctl_stats_regs()
6473 &stats->macControlFramesReceived, "MAC Control Frames Received"); in bge_add_sysctl_stats_regs()
6475 &stats->xoffStateEntered, "XOFF State Entered"); in bge_add_sysctl_stats_regs()
6477 &stats->dot3StatsFramesTooLong, "Frames Too Long"); in bge_add_sysctl_stats_regs()
6479 &stats->etherStatsJabbers, "Jabbers"); in bge_add_sysctl_stats_regs()
6481 &stats->etherStatsUndersizePkts, "Undersized Packets"); in bge_add_sysctl_stats_regs()
6487 &stats->ifHCOutOctets, "Outbound Octets"); in bge_add_sysctl_stats_regs()
6489 &stats->etherStatsCollisions, "TX Collisions"); in bge_add_sysctl_stats_regs()
6491 &stats->outXonSent, "XON Sent"); in bge_add_sysctl_stats_regs()
6493 &stats->outXoffSent, "XOFF Sent"); in bge_add_sysctl_stats_regs()
6495 &stats->dot3StatsInternalMacTransmitErrors, in bge_add_sysctl_stats_regs()
6498 &stats->dot3StatsSingleCollisionFrames, "Single Collision Frames"); in bge_add_sysctl_stats_regs()
6500 &stats->dot3StatsMultipleCollisionFrames, in bge_add_sysctl_stats_regs()
6503 &stats->dot3StatsDeferredTransmissions, "Deferred Transmissions"); in bge_add_sysctl_stats_regs()
6505 &stats->dot3StatsExcessiveCollisions, "Excessive Collisions"); in bge_add_sysctl_stats_regs()
6507 &stats->dot3StatsLateCollisions, "Late Collisions"); in bge_add_sysctl_stats_regs()
6509 &stats->ifHCOutUcastPkts, "Outbound Unicast Packets"); in bge_add_sysctl_stats_regs()
6511 &stats->ifHCOutMulticastPkts, "Outbound Multicast Packets"); in bge_add_sysctl_stats_regs()
6513 &stats->ifHCOutBroadcastPkts, "Outbound Broadcast Packets"); in bge_add_sysctl_stats_regs()
6541 result = -1; in bge_sysctl_debug_info()
6543 if (error || (req->newptr == NULL)) in bge_sysctl_debug_info()
6549 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && in bge_sysctl_debug_info()
6550 sc->bge_chipid != BGE_CHIPID_BCM5700_C0) in bge_sysctl_debug_info()
6554 sbdata = (uint16_t *)sc->bge_ldata.bge_status_block; in bge_sysctl_debug_info()
6557 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, in bge_sysctl_debug_info()
6558 sc->bge_cdata.bge_status_map, in bge_sysctl_debug_info()
6580 printf(" - 5717 Plus\n"); in bge_sysctl_debug_info()
6582 printf(" - 5755 Plus\n"); in bge_sysctl_debug_info()
6584 printf(" - 575X Plus\n"); in bge_sysctl_debug_info()
6586 printf(" - 5705 Plus\n"); in bge_sysctl_debug_info()
6588 printf(" - 5714 Family\n"); in bge_sysctl_debug_info()
6590 printf(" - 5700 Family\n"); in bge_sysctl_debug_info()
6591 if (sc->bge_flags & BGE_FLAG_JUMBO) in bge_sysctl_debug_info()
6592 printf(" - Supports Jumbo Frames\n"); in bge_sysctl_debug_info()
6593 if (sc->bge_flags & BGE_FLAG_PCIX) in bge_sysctl_debug_info()
6594 printf(" - PCI-X Bus\n"); in bge_sysctl_debug_info()
6595 if (sc->bge_flags & BGE_FLAG_PCIE) in bge_sysctl_debug_info()
6596 printf(" - PCI Express Bus\n"); in bge_sysctl_debug_info()
6597 if (sc->bge_phy_flags & BGE_PHY_NO_3LED) in bge_sysctl_debug_info()
6598 printf(" - No 3 LEDs\n"); in bge_sysctl_debug_info()
6599 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) in bge_sysctl_debug_info()
6600 printf(" - RX Alignment Bug\n"); in bge_sysctl_debug_info()
6614 result = -1; in bge_sysctl_reg_read()
6616 if (error || (req->newptr == NULL)) in bge_sysctl_reg_read()
6636 result = -1; in bge_sysctl_ape_read()
6638 if (error || (req->newptr == NULL)) in bge_sysctl_ape_read()
6658 result = -1; in bge_sysctl_mem_read()
6660 if (error || (req->newptr == NULL)) in bge_sysctl_mem_read()
6703 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) in bge_get_eaddr_nvram()
6714 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) in bge_get_eaddr_eeprom()
6750 stats = &sc->bge_mac_stats; in bge_get_counter()
6754 return (stats->NoMoreRxBDs + stats->InputDiscards + in bge_get_counter()
6755 stats->InputErrors); in bge_get_counter()
6757 return (stats->etherStatsCollisions); in bge_get_counter()
6773 * by two actual rings, for cluster- and jumbo-sized mbufs. in bge_debugnet_init()
6780 if ((sc->bge_flags & BGE_FLAG_JUMBO_STD) != 0 && in bge_debugnet_init()
6781 (if_getmtu(sc->bge_ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN + in bge_debugnet_init()
6782 ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN))) in bge_debugnet_init()
6795 bge_debugnet_transmit(if_t ifp, struct mbuf *m) in bge_debugnet_transmit() argument
6806 prodidx = sc->bge_tx_prodidx; in bge_debugnet_transmit()
6807 error = bge_encap(sc, &m, &prodidx); in bge_debugnet_transmit()
6824 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, in bge_debugnet_poll()
6825 sc->bge_cdata.bge_status_map, in bge_debugnet_poll()
6828 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx; in bge_debugnet_poll()
6829 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx; in bge_debugnet_poll()
6831 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, in bge_debugnet_poll()
6832 sc->bge_cdata.bge_status_map, in bge_debugnet_poll()