Lines Matching +full:tx +full:- +full:burst +full:- +full:length
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
29 * $Id: eqos.c 1059 2022-12-08 19:32:32Z sos $
33 * DesignWare Ethernet Quality-of-Service controller
84 #define TX_QUEUED(h, t) ((((h) - (t)) + TX_DESC_COUNT) % TX_DESC_COUNT)
93 #define EQOS_LOCK(sc) mtx_lock(&(sc)->lock)
94 #define EQOS_UNLOCK(sc) mtx_unlock(&(sc)->lock)
95 #define EQOS_ASSERT_LOCKED(sc) mtx_assert(&(sc)->lock, MA_OWNED)
97 #define RD4(sc, o) bus_read_4(sc->res[EQOS_RES_MEM], (o))
98 #define WR4(sc, o, v) bus_write_4(sc->res[EQOS_RES_MEM], (o), (v))
104 { -1, 0 }
117 addr = sc->csr_clock_range | in eqos_miibus_readreg()
125 for (retry = MII_BUSY_RETRY; retry > 0; retry--) { in eqos_miibus_readreg()
150 addr = sc->csr_clock_range | in eqos_miibus_writereg()
158 for (retry = MII_BUSY_RETRY; retry > 0; retry--) { in eqos_miibus_writereg()
176 struct mii_data *mii = device_get_softc(sc->miibus); in eqos_miibus_statchg()
181 if (mii->mii_media_status & IFM_ACTIVE) in eqos_miibus_statchg()
182 sc->link_up = true; in eqos_miibus_statchg()
184 sc->link_up = false; in eqos_miibus_statchg()
188 switch (IFM_SUBTYPE(mii->mii_media_active)) { in eqos_miibus_statchg()
208 sc->link_up = false; in eqos_miibus_statchg()
212 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX)) in eqos_miibus_statchg()
219 IF_EQOS_SET_SPEED(dev, IFM_SUBTYPE(mii->mii_media_active)); in eqos_miibus_statchg()
221 WR4(sc, GMAC_MAC_1US_TIC_COUNTER, (sc->csr_clock / 1000000) - 1); in eqos_miibus_statchg()
228 struct mii_data *mii = device_get_softc(sc->miibus); in eqos_media_status()
232 ifmr->ifm_active = mii->mii_media_active; in eqos_media_status()
233 ifmr->ifm_status = mii->mii_media_status; in eqos_media_status()
244 error = mii_mediachg(device_get_softc(sc->miibus)); in eqos_media_change()
262 bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map, BUS_DMASYNC_PREWRITE); in eqos_setup_txdesc()
263 sc->tx.desc_ring[index].des0 = htole32((uint32_t)paddr); in eqos_setup_txdesc()
264 sc->tx.desc_ring[index].des1 = htole32((uint32_t)(paddr >> 32)); in eqos_setup_txdesc()
265 sc->tx.desc_ring[index].des2 = htole32(tdes2 | len); in eqos_setup_txdesc()
266 sc->tx.desc_ring[index].des3 = htole32(tdes3 | total_len); in eqos_setup_txdesc()
273 int first = sc->tx.head; in eqos_setup_txbuf()
277 error = bus_dmamap_load_mbuf_sg(sc->tx.buf_tag, in eqos_setup_txbuf()
278 sc->tx.buf_map[first].map, m, segs, &nsegs, 0); in eqos_setup_txbuf()
282 device_printf(sc->dev, "TX packet too big trying defrag\n"); in eqos_setup_txbuf()
283 bus_dmamap_unload(sc->tx.buf_tag, sc->tx.buf_map[first].map); in eqos_setup_txbuf()
287 error = bus_dmamap_load_mbuf_sg(sc->tx.buf_tag, in eqos_setup_txbuf()
288 sc->tx.buf_map[first].map, m, segs, &nsegs, 0); in eqos_setup_txbuf()
293 if (TX_QUEUED(sc->tx.head, sc->tx.tail) + nsegs > TX_DESC_COUNT) { in eqos_setup_txbuf()
294 bus_dmamap_unload(sc->tx.buf_tag, sc->tx.buf_map[first].map); in eqos_setup_txbuf()
295 device_printf(sc->dev, "TX packet no more queue space\n"); in eqos_setup_txbuf()
299 bus_dmamap_sync(sc->tx.buf_tag, sc->tx.buf_map[first].map, in eqos_setup_txbuf()
302 sc->tx.buf_map[first].mbuf = m; in eqos_setup_txbuf()
305 if (idx == (nsegs - 1)) in eqos_setup_txbuf()
307 eqos_setup_txdesc(sc, sc->tx.head, flags, segs[idx].ds_addr, in eqos_setup_txbuf()
308 segs[idx].ds_len, m->m_pkthdr.len); in eqos_setup_txbuf()
311 sc->tx.head = TX_NEXT(sc->tx.head); in eqos_setup_txbuf()
318 bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map, BUS_DMASYNC_PREWRITE); in eqos_setup_txbuf()
319 sc->tx.desc_ring[first].des3 |= htole32(EQOS_TDES3_OWN); in eqos_setup_txbuf()
328 sc->rx.desc_ring[index].des0 = htole32((uint32_t)paddr); in eqos_setup_rxdesc()
329 sc->rx.desc_ring[index].des1 = htole32((uint32_t)(paddr >> 32)); in eqos_setup_rxdesc()
330 sc->rx.desc_ring[index].des2 = htole32(0); in eqos_setup_rxdesc()
331 bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map, BUS_DMASYNC_PREWRITE); in eqos_setup_rxdesc()
332 sc->rx.desc_ring[index].des3 = htole32(EQOS_RDES3_OWN | EQOS_RDES3_IOC | in eqos_setup_rxdesc()
344 error = bus_dmamap_load_mbuf_sg(sc->rx.buf_tag, in eqos_setup_rxbuf()
345 sc->rx.buf_map[index].map, m, &seg, &nsegs, 0); in eqos_setup_rxbuf()
349 bus_dmamap_sync(sc->rx.buf_tag, sc->rx.buf_map[index].map, in eqos_setup_rxbuf()
352 sc->rx.buf_map[index].mbuf = m; in eqos_setup_rxbuf()
364 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; in eqos_alloc_mbufcl()
411 if_t ifp = sc->ifp; in eqos_setup_rxfilter()
459 for (retry = 2000; retry > 0; retry--) { in eqos_reset()
473 (uint32_t)(sc->tx.desc_ring_paddr >> 32)); in eqos_init_rings()
475 (uint32_t)sc->tx.desc_ring_paddr); in eqos_init_rings()
476 WR4(sc, GMAC_DMA_CHAN0_TX_RING_LEN, TX_DESC_COUNT - 1); in eqos_init_rings()
479 (uint32_t)(sc->rx.desc_ring_paddr >> 32)); in eqos_init_rings()
481 (uint32_t)sc->rx.desc_ring_paddr); in eqos_init_rings()
482 WR4(sc, GMAC_DMA_CHAN0_RX_RING_LEN, RX_DESC_COUNT - 1); in eqos_init_rings()
485 (uint32_t)sc->rx.desc_ring_paddr + DESC_OFFSET(RX_DESC_COUNT)); in eqos_init_rings()
492 if_t ifp = sc->ifp; in eqos_init()
493 struct mii_data *mii = device_get_softc(sc->miibus); in eqos_init()
505 WR4(sc, GMAC_MAC_1US_TIC_COUNTER, (sc->csr_clock / 1000000) - 1); in eqos_init()
510 val |= ((DESC_ALIGN - 16) / 8) << GMAC_DMA_CHAN0_CONTROL_DSL_SHIFT; in eqos_init()
566 callout_reset(&sc->callout, hz, eqos_tick, sc); in eqos_init()
578 if (!sc->link_up) in eqos_start_locked()
586 if (TX_QUEUED(sc->tx.head, sc->tx.tail) >= in eqos_start_locked()
587 TX_DESC_COUNT - TX_MAX_SEGS) { in eqos_start_locked()
605 bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map, in eqos_start_locked()
608 /* Start and run TX DMA */ in eqos_start_locked()
610 (uint32_t)sc->tx.desc_ring_paddr + DESC_OFFSET(sc->tx.head)); in eqos_start_locked()
611 sc->tx_watchdog = WATCHDOG_TIMEOUT_SECS; in eqos_start_locked()
628 if_t ifp = sc->ifp; in eqos_stop()
636 callout_stop(&sc->callout); in eqos_stop()
653 /* Flush data in the TX FIFO */ in eqos_stop()
657 for (retry = 10000; retry > 0; retry--) { in eqos_stop()
664 device_printf(sc->dev, "timeout flushing TX queue\n"); in eqos_stop()
679 if_t ifp = sc->ifp; in eqos_rxintr()
682 int error, length; in eqos_rxintr() local
685 rdes3 = le32toh(sc->rx.desc_ring[sc->rx.head].des3); in eqos_rxintr()
692 bus_dmamap_sync(sc->rx.buf_tag, in eqos_rxintr()
693 sc->rx.buf_map[sc->rx.head].map, BUS_DMASYNC_POSTREAD); in eqos_rxintr()
694 bus_dmamap_unload(sc->rx.buf_tag, in eqos_rxintr()
695 sc->rx.buf_map[sc->rx.head].map); in eqos_rxintr()
697 length = rdes3 & EQOS_RDES3_LENGTH_MASK; in eqos_rxintr()
698 if (length) { in eqos_rxintr()
699 m = sc->rx.buf_map[sc->rx.head].mbuf; in eqos_rxintr()
700 m->m_pkthdr.rcvif = ifp; in eqos_rxintr()
701 m->m_pkthdr.len = length; in eqos_rxintr()
702 m->m_len = length; in eqos_rxintr()
703 m->m_nextpkt = NULL; in eqos_rxintr()
706 m_adj(m, -ETHER_CRC_LEN); in eqos_rxintr()
714 if ((error = eqos_setup_rxbuf(sc, sc->rx.head, m))) in eqos_rxintr()
723 (uint32_t)sc->rx.desc_ring_paddr + DESC_OFFSET(sc->rx.head)); in eqos_rxintr()
725 sc->rx.head = RX_NEXT(sc->rx.head); in eqos_rxintr()
732 if_t ifp = sc->ifp; in eqos_txintr()
738 while (sc->tx.tail != sc->tx.head) { in eqos_txintr()
739 tdes3 = le32toh(sc->tx.desc_ring[sc->tx.tail].des3); in eqos_txintr()
743 bmap = &sc->tx.buf_map[sc->tx.tail]; in eqos_txintr()
744 if (bmap->mbuf) { in eqos_txintr()
745 bus_dmamap_sync(sc->tx.buf_tag, bmap->map, in eqos_txintr()
747 bus_dmamap_unload(sc->tx.buf_tag, bmap->map); in eqos_txintr()
748 m_freem(bmap->mbuf); in eqos_txintr()
749 bmap->mbuf = NULL; in eqos_txintr()
752 eqos_setup_txdesc(sc, sc->tx.tail, 0, 0, 0, 0); in eqos_txintr()
766 sc->tx.tail = TX_NEXT(sc->tx.tail); in eqos_txintr()
768 if (sc->tx.tail == sc->tx.head) in eqos_txintr()
769 sc->tx_watchdog = 0; in eqos_txintr()
770 eqos_start_locked(sc->ifp); in eqos_txintr()
796 device_printf(sc->dev, in eqos_intr_mtl()
806 struct mii_data *mii = device_get_softc(sc->miibus); in eqos_tick()
811 if (sc->tx_watchdog > 0) in eqos_tick()
812 if (!--sc->tx_watchdog) { in eqos_tick()
813 device_printf(sc->dev, "watchdog timeout\n"); in eqos_tick()
817 link_status = sc->link_up; in eqos_tick()
819 if (sc->link_up && !link_status) in eqos_tick()
820 eqos_start_locked(sc->ifp); in eqos_tick()
822 callout_reset(&sc->callout, hz, eqos_tick, sc); in eqos_tick()
835 device_printf(sc->dev, "MAC interrupt\n"); in eqos_intr()
857 device_printf(sc->dev, in eqos_intr()
864 device_printf(sc->dev, "RX/TX status interrupt\n"); in eqos_intr()
908 mii = device_get_softc(sc->miibus); in eqos_ioctl()
909 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); in eqos_ioctl()
913 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); in eqos_ioctl()
972 /* Allowed Burst Length's */ in eqos_axi_configure()
977 /* Fixed Burst Length */ in eqos_axi_configure()
997 /* Set up TX descriptor ring, descriptors, and dma maps */ in eqos_setup_dma()
998 if ((error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), in eqos_setup_dma()
1003 NULL, NULL, &sc->tx.desc_tag))) { in eqos_setup_dma()
1004 device_printf(sc->dev, "could not create TX ring DMA tag\n"); in eqos_setup_dma()
1008 if ((error = bus_dmamem_alloc(sc->tx.desc_tag, in eqos_setup_dma()
1009 (void**)&sc->tx.desc_ring, in eqos_setup_dma()
1011 &sc->tx.desc_map))) { in eqos_setup_dma()
1012 device_printf(sc->dev, in eqos_setup_dma()
1013 "could not allocate TX descriptor ring.\n"); in eqos_setup_dma()
1017 if ((error = bus_dmamap_load(sc->tx.desc_tag, sc->tx.desc_map, in eqos_setup_dma()
1018 sc->tx.desc_ring, in eqos_setup_dma()
1019 TX_DESC_SIZE, eqos_get1paddr, &sc->tx.desc_ring_paddr, 0))) { in eqos_setup_dma()
1020 device_printf(sc->dev, in eqos_setup_dma()
1021 "could not load TX descriptor ring map.\n"); in eqos_setup_dma()
1025 if ((error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, in eqos_setup_dma()
1030 &sc->tx.buf_tag))) { in eqos_setup_dma()
1031 device_printf(sc->dev, "could not create TX buffer DMA tag.\n"); in eqos_setup_dma()
1036 if ((error = bus_dmamap_create(sc->tx.buf_tag, BUS_DMA_COHERENT, in eqos_setup_dma()
1037 &sc->tx.buf_map[i].map))) { in eqos_setup_dma()
1038 device_printf(sc->dev, "cannot create TX buffer map\n"); in eqos_setup_dma()
1045 if ((error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), in eqos_setup_dma()
1050 NULL, NULL, &sc->rx.desc_tag))) { in eqos_setup_dma()
1051 device_printf(sc->dev, "could not create RX ring DMA tag.\n"); in eqos_setup_dma()
1055 if ((error = bus_dmamem_alloc(sc->rx.desc_tag, in eqos_setup_dma()
1056 (void **)&sc->rx.desc_ring, in eqos_setup_dma()
1058 &sc->rx.desc_map))) { in eqos_setup_dma()
1059 device_printf(sc->dev, in eqos_setup_dma()
1064 if ((error = bus_dmamap_load(sc->rx.desc_tag, sc->rx.desc_map, in eqos_setup_dma()
1065 sc->rx.desc_ring, RX_DESC_SIZE, eqos_get1paddr, in eqos_setup_dma()
1066 &sc->rx.desc_ring_paddr, 0))) { in eqos_setup_dma()
1067 device_printf(sc->dev, in eqos_setup_dma()
1072 if ((error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, in eqos_setup_dma()
1077 &sc->rx.buf_tag))) { in eqos_setup_dma()
1078 device_printf(sc->dev, "could not create RX buf DMA tag.\n"); in eqos_setup_dma()
1083 if ((error = bus_dmamap_create(sc->rx.buf_tag, BUS_DMA_COHERENT, in eqos_setup_dma()
1084 &sc->rx.buf_map[i].map))) { in eqos_setup_dma()
1085 device_printf(sc->dev, "cannot create RX buffer map\n"); in eqos_setup_dma()
1089 device_printf(sc->dev, "cannot allocate RX mbuf\n"); in eqos_setup_dma()
1093 device_printf(sc->dev, "cannot create RX buffer\n"); in eqos_setup_dma()
1099 device_printf(sc->dev, "TX ring @ 0x%lx, RX ring @ 0x%lx\n", in eqos_setup_dma()
1100 sc->tx.desc_ring_paddr, sc->rx.desc_ring_paddr); in eqos_setup_dma()
1116 if (bus_alloc_resources(dev, eqos_spec, sc->res)) { in eqos_attach()
1118 bus_release_resources(dev, eqos_spec, sc->res); in eqos_attach()
1125 sc->dev = dev; in eqos_attach()
1138 sc->hw_feature[n] = RD4(sc, GMAC_MAC_HW_FEATURE(n)); in eqos_attach()
1144 sc->hw_feature[0], sc->hw_feature[1], in eqos_attach()
1145 sc->hw_feature[2], sc->hw_feature[3]); in eqos_attach()
1148 mtx_init(&sc->lock, "eqos lock", MTX_NETWORK_LOCK, MTX_DEF); in eqos_attach()
1149 callout_init_mtx(&sc->callout, &sc->lock, 0); in eqos_attach()
1153 device_printf(sc->dev, "Ethernet address %6D\n", eaddr, ":"); in eqos_attach()
1157 device_printf(sc->dev, "reset timeout!\n"); in eqos_attach()
1166 device_printf(sc->dev, "failed to setup DMA descriptors\n"); in eqos_attach()
1171 if ((bus_setup_intr(dev, sc->res[EQOS_RES_IRQ0], EQOS_INTR_FLAGS, in eqos_attach()
1172 NULL, eqos_intr, sc, &sc->irq_handle))) { in eqos_attach()
1174 bus_release_resources(dev, eqos_spec, sc->res); in eqos_attach()
1179 ifp = sc->ifp = if_alloc(IFT_ETHER); in eqos_attach()
1181 if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev)); in eqos_attach()
1182 if_setflags(sc->ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); in eqos_attach()
1186 if_setsendqlen(ifp, TX_DESC_COUNT - 1); in eqos_attach()
1192 if ((error = mii_attach(sc->dev, &sc->miibus, ifp, eqos_media_change, in eqos_attach()
1195 device_printf(sc->dev, "PHY attach failed\n"); in eqos_attach()
1215 if_setflagbits(sc->ifp, 0, IFF_UP); in eqos_detach()
1216 ether_ifdetach(sc->ifp); in eqos_detach()
1219 if (sc->miibus) in eqos_detach()
1220 device_delete_child(dev, sc->miibus); in eqos_detach()
1223 if (sc->irq_handle) in eqos_detach()
1224 bus_teardown_intr(dev, sc->res[EQOS_RES_IRQ0], in eqos_detach()
1225 sc->irq_handle); in eqos_detach()
1227 if (sc->ifp) in eqos_detach()
1228 if_free(sc->ifp); in eqos_detach()
1230 bus_release_resources(dev, eqos_spec, sc->res); in eqos_detach()
1232 if (sc->tx.desc_tag) { in eqos_detach()
1233 if (sc->tx.desc_map) { in eqos_detach()
1234 bus_dmamap_unload(sc->tx.desc_tag, sc->tx.desc_map); in eqos_detach()
1235 bus_dmamem_free(sc->tx.desc_tag, sc->tx.desc_ring, in eqos_detach()
1236 sc->tx.desc_map); in eqos_detach()
1238 bus_dma_tag_destroy(sc->tx.desc_tag); in eqos_detach()
1240 if (sc->tx.buf_tag) { in eqos_detach()
1242 m_free(sc->tx.buf_map[i].mbuf); in eqos_detach()
1243 bus_dmamap_destroy(sc->tx.buf_tag, in eqos_detach()
1244 sc->tx.buf_map[i].map); in eqos_detach()
1246 bus_dma_tag_destroy(sc->tx.buf_tag); in eqos_detach()
1249 if (sc->rx.desc_tag) { in eqos_detach()
1250 if (sc->rx.desc_map) { in eqos_detach()
1251 bus_dmamap_unload(sc->rx.desc_tag, sc->rx.desc_map); in eqos_detach()
1252 bus_dmamem_free(sc->rx.desc_tag, sc->rx.desc_ring, in eqos_detach()
1253 sc->rx.desc_map); in eqos_detach()
1255 bus_dma_tag_destroy(sc->rx.desc_tag); in eqos_detach()
1257 if (sc->rx.buf_tag) { in eqos_detach()
1259 m_free(sc->rx.buf_map[i].mbuf); in eqos_detach()
1260 bus_dmamap_destroy(sc->rx.buf_tag, in eqos_detach()
1261 sc->rx.buf_map[i].map); in eqos_detach()
1263 bus_dma_tag_destroy(sc->rx.buf_tag); in eqos_detach()
1266 mtx_destroy(&sc->lock); in eqos_detach()