Lines Matching +full:smi +full:- +full:mdio
1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
5 * Copyright (C) 2009-2015 Semihalf
76 #include <dev/mdio/mdio.h>
85 #define MGE_DELAY(x) pause("SMI access sleep", (x) / tick_sbt)
166 /* MDIO interface */
178 DRIVER_MODULE(mdio, mge, mdio_driver, 0, 0);
181 MODULE_DEPEND(mge, mdio, 1, 1, 1);
188 { -1, 0 }
203 /* SMI access interlock */
218 while (--timeout &&
223 device_printf(dev, "SMI write timeout.\n");
233 while (--timeout &&
238 device_printf(dev, "SMI write validation timeout.\n");
243 /* Wait for the data to update in the SMI register */
264 while (--timeout &&
269 device_printf(dev, "SMI read timeout.\n");
291 MGE_WRITE(sc->phy_sc, MGE_REG_SMI, MGE_SMI_MASK &
295 while (--retries &&
296 !(MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_READVALID))
302 ret = MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_DATA_MASK;
317 MGE_WRITE(sc->phy_sc, MGE_REG_SMI, MGE_SMI_MASK &
322 while (--retries && MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_BUSY)
340 i = OF_getprop(sc->node, "local-mac-address", (void *)lmac, 6);
356 * Fall back -- use the currently programmed address.
406 sc->mge_ver = 2;
407 sc->mge_mtu = 0x4e8;
408 sc->mge_tfut_ipg_max = 0xFFFF;
409 sc->mge_rx_ipg_max = 0xFFFF;
410 sc->mge_tx_arb_cfg = 0xFC0000FF;
411 sc->mge_tx_tok_cfg = 0xFFFF7FFF;
412 sc->mge_tx_tok_cnt = 0x3FFFFFFF;
414 sc->mge_ver = 1;
415 sc->mge_mtu = 0x458;
416 sc->mge_tfut_ipg_max = 0x3FFF;
417 sc->mge_rx_ipg_max = 0x3FFF;
418 sc->mge_tx_arb_cfg = 0x000000FF;
419 sc->mge_tx_tok_cfg = 0x3FFFFFFF;
420 sc->mge_tx_tok_cnt = 0x3FFFFFFF;
423 sc->mge_intr_cnt = 1;
425 sc->mge_intr_cnt = 2;
428 sc->mge_hw_csum = 0;
430 sc->mge_hw_csum = 1;
441 if_mac = (char *)if_getlladdr(sc->ifp);
478 if (if_getflags(sc->ifp) & IFF_PROMISC) {
516 *paddr = segs->ds_addr;
533 new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size;
549 (*paddr) = seg->ds_addr;
562 for (i = size - 1; i >= 0; i--) {
564 error = bus_dmamem_alloc(sc->mge_desc_dtag,
565 (void**)&(dw->mge_desc),
567 &(dw->desc_dmap));
570 if_printf(sc->ifp, "failed to allocate DMA memory\n");
571 dw->mge_desc = NULL;
575 error = bus_dmamap_load(sc->mge_desc_dtag, dw->desc_dmap,
576 dw->mge_desc, sizeof(struct mge_desc), mge_get_dma_addr,
577 &(dw->mge_desc_paddr), BUS_DMA_NOWAIT);
580 if_printf(sc->ifp, "can't load descriptor\n");
581 bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
582 dw->desc_dmap);
583 dw->mge_desc = NULL;
588 dw->mge_desc->next_desc = desc_paddr;
589 desc_paddr = dw->mge_desc_paddr;
591 tab[size - 1].mge_desc->next_desc = desc_paddr;
594 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent */
604 if_printf(sc->ifp, "failed to create busdma tag for mbufs\n");
611 error = bus_dmamap_create(*buffer_tag, 0, &dw->buffer_dmap);
613 if_printf(sc->ifp, "failed to create map for mbuf\n");
617 dw->buffer = (struct mbuf*)NULL;
618 dw->mge_desc->buffer = (bus_addr_t)NULL;
631 bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent */
639 &sc->mge_desc_dtag); /* dmat */
642 mge_alloc_desc_dma(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM,
643 &sc->mge_tx_dtag);
644 mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
645 &sc->mge_rx_dtag);
648 dw = &(sc->mge_rx_desc[i]);
649 mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
650 &dw->mge_desc->buffer);
653 sc->tx_desc_start = sc->mge_tx_desc[0].mge_desc_paddr;
654 sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
670 if (dw->buffer_dmap) {
672 bus_dmamap_sync(buffer_tag, dw->buffer_dmap,
674 bus_dmamap_unload(buffer_tag, dw->buffer_dmap);
676 bus_dmamap_destroy(buffer_tag, dw->buffer_dmap);
678 m_freem(dw->buffer);
681 if (dw->desc_dmap) {
682 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
684 bus_dmamap_unload(sc->mge_desc_dtag, dw->desc_dmap);
685 bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
686 dw->desc_dmap);
696 mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
697 mge_free_desc(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM, sc->mge_tx_dtag, 0);
700 bus_dma_tag_destroy(sc->mge_tx_dtag);
701 bus_dma_tag_destroy(sc->mge_rx_dtag);
703 bus_dma_tag_destroy(sc->mge_desc_dtag);
714 mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
716 mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
717 &sc->mge_rx_dtag);
720 dw = &(sc->mge_rx_desc[i]);
721 mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
722 &dw->mge_desc->buffer);
725 sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
726 sc->rx_desc_curr = 0;
729 sc->rx_desc_start);
787 sc->dev = dev;
788 sc->node = ofw_bus_get_node(dev);
791 if (fdt_get_phyaddr(sc->node, sc->dev, &phy, (void **)&sc->phy_sc) == 0) {
793 device_get_nameunit(sc->phy_sc->dev));
794 sc->phy_attached = 1;
797 sc->phy_attached = 0;
798 sc->phy_sc = sc;
801 if (fdt_find_compatible(sc->node, "mrvl,sw", 1) != 0) {
803 sc->switch_attached = 1;
807 sc->switch_attached = 0;
811 sx_init(&sx_smi, "mge_tick() SMI access threads interlock");
814 /* Set chip version-dependent parameters */
818 mtx_init(&sc->transmit_lock, device_get_nameunit(dev), "mge TX lock",
820 mtx_init(&sc->receive_lock, device_get_nameunit(dev), "mge RX lock",
824 error = bus_alloc_resources(dev, res_spec, sc->res);
838 sc->tx_desc_curr = 0;
839 sc->rx_desc_curr = 0;
840 sc->tx_desc_used_idx = 0;
841 sc->tx_desc_used_count = 0;
844 sc->rx_ic_time = 768;
845 sc->tx_ic_time = 768;
849 ifp = sc->ifp = if_alloc(IFT_ETHER);
854 if (sc->mge_hw_csum) {
869 if_setsendqlen(ifp, MGE_TX_DESC_NUM - 1);
874 callout_init(&sc->wd_callout, 1);
877 if (sc->phy_attached) {
878 error = mii_attach(dev, &sc->miibus, ifp, mge_ifmedia_upd,
883 sc->ifp = NULL;
887 sc->mii = device_get_softc(sc->miibus);
890 miisc = LIST_FIRST(&sc->mii->mii_phys);
891 MGE_WRITE(sc, MGE_REG_PHYDEV, miisc->mii_phy);
893 /* no PHY, so use hard-coded values */
894 ifmedia_init(&sc->mge_ifmedia, 0,
897 ifmedia_add(&sc->mge_ifmedia,
900 ifmedia_set(&sc->mge_ifmedia,
906 for (i = 1; i <= sc->mge_intr_cnt; ++i) {
907 error = bus_setup_intr(dev, sc->res[i],
909 NULL, *mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i)].handler,
910 sc, &sc->ih_cookie[i - 1]);
913 mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i)].description);
919 if (sc->switch_attached) {
921 device_add_child(dev, "mdio", DEVICE_UNIT_ANY);
937 if (sc->ifp)
941 callout_drain(&sc->wd_callout);
944 for (i = 0; i < sc->mge_intr_cnt; ++i) {
945 if (!sc->ih_cookie[i])
948 error = bus_teardown_intr(dev, sc->res[1 + i],
949 sc->ih_cookie[i]);
952 mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i + 1)].description);
956 if (sc->ifp) {
957 ether_ifdetach(sc->ifp);
958 if_free(sc->ifp);
965 bus_release_resources(dev, res_spec, sc->res);
968 mtx_destroy(&sc->receive_lock);
969 mtx_destroy(&sc->transmit_lock);
986 if (!sc->phy_attached) {
987 ifmr->ifm_active = IFM_1000_T | IFM_FDX | IFM_ETHER;
988 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
992 mii = sc->mii;
995 ifmr->ifm_active = mii->mii_media_active;
996 ifmr->ifm_status = mii->mii_media_status;
1046 if (sc->phy_attached) {
1049 sc->mge_media_status = sc->mii->mii_media.ifm_media;
1050 mii_mediachg(sc->mii);
1099 if (sc->mge_ver == 2) {
1105 MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(0), sc->mge_tx_tok_cnt);
1106 MGE_WRITE(sc, MGE_TX_TOKEN_CONF(0), sc->mge_tx_tok_cfg);
1107 MGE_WRITE(sc, MGE_TX_ARBITER_CONF(0), sc->mge_tx_arb_cfg);
1117 MGE_WRITE(sc, sc->mge_mtu, 0);
1128 media_status = sc->mge_media_status;
1129 if (sc->switch_attached) {
1146 MGE_WRITE(sc, MGE_TX_CUR_DESC_PTR, sc->tx_desc_start);
1148 sc->rx_desc_start);
1151 sc->tx_desc_curr = 0;
1152 sc->rx_desc_curr = 0;
1153 sc->tx_desc_used_idx = 0;
1154 sc->tx_desc_used_count = 0;
1158 dw = &sc->mge_rx_desc[i];
1159 dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
1160 dw->mge_desc->buff_size = MCLBYTES;
1161 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1178 if (--count == 0) {
1179 if_printf(sc->ifp, "Timeout on link-up\n");
1194 if (if_getcapenable(sc->ifp) & IFCAP_POLLING)
1201 if_setdrvflagbits(sc->ifp, IFF_DRV_RUNNING, 0);
1202 if_setdrvflagbits(sc->ifp, 0, IFF_DRV_OACTIVE);
1203 sc->wd_timer = 0;
1206 if (sc->phy_attached)
1207 callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1220 if (if_getcapenable(sc->ifp) & IFCAP_POLLING) {
1253 ifp = sc->ifp;
1264 ifp = sc->ifp;
1277 if (if_getcapenable(sc->ifp) & IFCAP_POLLING) {
1309 mge_intr_rx_locked(sc, -1);
1316 if_t ifp = sc->ifp;
1326 dw = &sc->mge_rx_desc[sc->rx_desc_curr];
1327 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1331 status = dw->mge_desc->cmd_status;
1332 bufsize = dw->mge_desc->buff_size;
1336 if (dw->mge_desc->byte_count &&
1339 bus_dmamap_sync(sc->mge_rx_dtag, dw->buffer_dmap,
1342 mb = m_devget(dw->buffer->m_data,
1343 dw->mge_desc->byte_count - ETHER_CRC_LEN,
1350 mb->m_len -= 2;
1351 mb->m_pkthdr.len -= 2;
1352 mb->m_data += 2;
1354 mb->m_pkthdr.rcvif = ifp;
1365 dw->mge_desc->byte_count = 0;
1366 dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
1367 sc->rx_desc_curr = (++sc->rx_desc_curr % MGE_RX_DESC_NUM);
1368 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1372 count -= 1;
1386 ifp = sc->ifp;
1399 if (if_getcapenable(sc->ifp) & IFCAP_POLLING) {
1418 if_t ifp = sc->ifp;
1427 sc->wd_timer = 0;
1429 while (sc->tx_desc_used_count) {
1431 dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1432 desc = dw->mge_desc;
1433 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1437 status = desc->cmd_status;
1442 sc->tx_desc_used_idx =
1443 (++sc->tx_desc_used_idx) % MGE_TX_DESC_NUM;
1444 sc->tx_desc_used_count--;
1454 bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1456 bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1457 m_freem(dw->buffer);
1458 dw->buffer = (struct mbuf*)NULL;
1486 flags = if_getflags(ifp) ^ sc->mge_if_flags;
1499 sc->mge_if_flags = if_getflags(ifp);
1511 mask = if_getcapenable(ifp) ^ ifr->ifr_reqcap;
1514 if_setcapenablebit(ifp, IFCAP_HWCSUM & ifr->ifr_reqcap, 0);
1522 if (ifr->ifr_reqcap & IFCAP_POLLING) {
1547 if (!sc->phy_attached && (command == SIOCSIFMEDIA))
1549 else if (!sc->phy_attached) {
1550 error = ifmedia_ioctl(ifp, ifr, &sc->mge_ifmedia,
1555 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T
1556 && !(ifr->ifr_media & IFM_FDX)) {
1557 device_printf(sc->dev,
1558 "1000baseTX half-duplex unsupported\n");
1561 error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
1619 if (if_getcapenable(sc->ifp) & IFCAP_POLLING)
1620 ether_poll_deregister(sc->ifp);
1641 desc_no = sc->tx_desc_curr;
1642 dw = &sc->mge_tx_desc[desc_no];
1643 mapp = dw->buffer_dmap;
1646 error = bus_dmamap_load_mbuf_sg(sc->mge_tx_dtag, mapp, m0, segs, &nsegs,
1655 bus_dmamap_unload(sc->mge_tx_dtag, mapp);
1657 return (-1);
1660 bus_dmamap_sync(sc->mge_tx_dtag, mapp, BUS_DMASYNC_PREWRITE);
1664 dw->mge_desc->byte_count = segs[seg].ds_len;
1665 dw->mge_desc->buffer = segs[seg].ds_addr;
1666 dw->buffer = m0;
1667 dw->mge_desc->cmd_status = 0;
1670 dw->mge_desc->cmd_status |= MGE_TX_LAST | MGE_TX_FIRST |
1675 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1678 sc->tx_desc_curr = (++sc->tx_desc_curr) % MGE_TX_DESC_NUM;
1679 sc->tx_desc_used_count++;
1688 KASSERT(sc->phy_attached == 1, ("mge_tick while PHY not attached"));
1695 mii_tick(sc->mii);
1698 if(sc->mge_media_status != sc->mii->mii_media.ifm_media)
1699 mge_ifmedia_upd(sc->ifp);
1704 callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1714 ifp = sc->ifp;
1716 if (sc->wd_timer == 0 || --sc->wd_timer) {
1760 if (m0->m_pkthdr.csum_flags & (CSUM_IP|CSUM_TCP|CSUM_UDP) ||
1761 m0->m_flags & M_VLANTAG) {
1771 if (m0->m_next != NULL) {
1778 if (sc->tx_desc_used_count + 1 >= MGE_TX_DESC_NUM) {
1795 sc->wd_timer = 5;
1808 ifp = sc->ifp;
1814 callout_stop(&sc->wd_callout);
1818 sc->wd_timer = 0;
1829 while (sc->tx_desc_used_idx != sc->tx_desc_curr &&
1830 sc->tx_desc_used_count) {
1832 dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1833 desc = dw->mge_desc;
1834 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1838 status = desc->cmd_status;
1843 sc->tx_desc_used_idx = (++sc->tx_desc_used_idx) %
1845 sc->tx_desc_used_count--;
1847 bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1849 bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1851 m_freem(dw->buffer);
1852 dw->buffer = (struct mbuf*)NULL;
1857 while (count--) {
1897 frame->m_pkthdr.csum_data = 0xFFFF;
1900 frame->m_pkthdr.csum_flags = csum_flags;
1907 struct mbuf *m0 = dw->buffer;
1909 int csum_flags = m0->m_pkthdr.csum_flags;
1915 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1916 etype = ntohs(eh->evl_proto);
1920 etype = ntohs(eh->evl_encap_proto);
1925 if_printf(sc->ifp,
1931 ip = (struct ip *)(m0->m_data + ehlen);
1932 cmd_status |= MGE_TX_IP_HDR_SIZE(ip->ip_hl);
1945 dw->mge_desc->cmd_status |= cmd_status;
2009 while(size--)
2032 ctx->smt[i >> 2] |= v << ((i & 0x03) << 3);
2035 ctx->omt[i >> 2] |= v << ((i & 0x03) << 3);
2044 if_t ifp = sc->ifp;
2068 if (sc->rx_ic_time > sc->mge_rx_ipg_max)
2069 sc->rx_ic_time = sc->mge_rx_ipg_max;
2072 reg &= ~mge_rx_ipg(sc->mge_rx_ipg_max, sc->mge_ver);
2073 reg |= mge_rx_ipg(sc->rx_ic_time, sc->mge_ver);
2082 if (sc->tx_ic_time > sc->mge_tfut_ipg_max)
2083 sc->tx_ic_time = sc->mge_tfut_ipg_max;
2086 reg &= ~mge_tfut_ipg(sc->mge_tfut_ipg_max, sc->mge_ver);
2087 reg |= mge_tfut_ipg(sc->tx_ic_time, sc->mge_ver);
2098 time = (arg2 == MGE_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time;
2105 sc->rx_ic_time = time;
2108 sc->tx_ic_time = time;
2123 ctx = device_get_sysctl_ctx(sc->dev);
2124 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));