Lines Matching +full:rx +full:- +full:m

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
29 * $Id: eqos.c 1059 2022-12-08 19:32:32Z sos $
33 * DesignWare Ethernet Quality-of-Service controller
84 #define TX_QUEUED(h, t) ((((h) - (t)) + TX_DESC_COUNT) % TX_DESC_COUNT)
93 #define EQOS_LOCK(sc) mtx_lock(&(sc)->lock)
94 #define EQOS_UNLOCK(sc) mtx_unlock(&(sc)->lock)
95 #define EQOS_ASSERT_LOCKED(sc) mtx_assert(&(sc)->lock, MA_OWNED)
97 #define RD4(sc, o) bus_read_4(sc->res[EQOS_RES_MEM], (o))
98 #define WR4(sc, o, v) bus_write_4(sc->res[EQOS_RES_MEM], (o), (v))
104 { -1, 0 }
117 addr = sc->csr_clock_range | in eqos_miibus_readreg()
125 for (retry = MII_BUSY_RETRY; retry > 0; retry--) { in eqos_miibus_readreg()
150 addr = sc->csr_clock_range | in eqos_miibus_writereg()
158 for (retry = MII_BUSY_RETRY; retry > 0; retry--) { in eqos_miibus_writereg()
176 struct mii_data *mii = device_get_softc(sc->miibus); in eqos_miibus_statchg()
181 if (mii->mii_media_status & IFM_ACTIVE) in eqos_miibus_statchg()
182 sc->link_up = true; in eqos_miibus_statchg()
184 sc->link_up = false; in eqos_miibus_statchg()
188 switch (IFM_SUBTYPE(mii->mii_media_active)) { in eqos_miibus_statchg()
208 sc->link_up = false; in eqos_miibus_statchg()
212 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX)) in eqos_miibus_statchg()
219 IF_EQOS_SET_SPEED(dev, IFM_SUBTYPE(mii->mii_media_active)); in eqos_miibus_statchg()
221 WR4(sc, GMAC_MAC_1US_TIC_COUNTER, (sc->csr_clock / 1000000) - 1); in eqos_miibus_statchg()
228 struct mii_data *mii = device_get_softc(sc->miibus); in eqos_media_status()
232 ifmr->ifm_active = mii->mii_media_active; in eqos_media_status()
233 ifmr->ifm_status = mii->mii_media_status; in eqos_media_status()
244 error = mii_mediachg(device_get_softc(sc->miibus)); in eqos_media_change()
262 bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map, BUS_DMASYNC_PREWRITE); in eqos_setup_txdesc()
263 sc->tx.desc_ring[index].des0 = htole32((uint32_t)paddr); in eqos_setup_txdesc()
264 sc->tx.desc_ring[index].des1 = htole32((uint32_t)(paddr >> 32)); in eqos_setup_txdesc()
265 sc->tx.desc_ring[index].des2 = htole32(tdes2 | len); in eqos_setup_txdesc()
266 sc->tx.desc_ring[index].des3 = htole32(tdes3 | total_len); in eqos_setup_txdesc()
270 eqos_setup_txbuf(struct eqos_softc *sc, struct mbuf *m) in eqos_setup_txbuf() argument
273 int first = sc->tx.head; in eqos_setup_txbuf()
277 error = bus_dmamap_load_mbuf_sg(sc->tx.buf_tag, in eqos_setup_txbuf()
278 sc->tx.buf_map[first].map, m, segs, &nsegs, 0); in eqos_setup_txbuf()
282 device_printf(sc->dev, "TX packet too big trying defrag\n"); in eqos_setup_txbuf()
283 bus_dmamap_unload(sc->tx.buf_tag, sc->tx.buf_map[first].map); in eqos_setup_txbuf()
284 if (!(mb = m_defrag(m, M_NOWAIT))) in eqos_setup_txbuf()
286 m = mb; in eqos_setup_txbuf()
287 error = bus_dmamap_load_mbuf_sg(sc->tx.buf_tag, in eqos_setup_txbuf()
288 sc->tx.buf_map[first].map, m, segs, &nsegs, 0); in eqos_setup_txbuf()
293 if (TX_QUEUED(sc->tx.head, sc->tx.tail) + nsegs > TX_DESC_COUNT) { in eqos_setup_txbuf()
294 bus_dmamap_unload(sc->tx.buf_tag, sc->tx.buf_map[first].map); in eqos_setup_txbuf()
295 device_printf(sc->dev, "TX packet no more queue space\n"); in eqos_setup_txbuf()
299 bus_dmamap_sync(sc->tx.buf_tag, sc->tx.buf_map[first].map, in eqos_setup_txbuf()
302 sc->tx.buf_map[first].mbuf = m; in eqos_setup_txbuf()
305 if (idx == (nsegs - 1)) in eqos_setup_txbuf()
307 eqos_setup_txdesc(sc, sc->tx.head, flags, segs[idx].ds_addr, in eqos_setup_txbuf()
308 segs[idx].ds_len, m->m_pkthdr.len); in eqos_setup_txbuf()
311 sc->tx.head = TX_NEXT(sc->tx.head); in eqos_setup_txbuf()
318 bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map, BUS_DMASYNC_PREWRITE); in eqos_setup_txbuf()
319 sc->tx.desc_ring[first].des3 |= htole32(EQOS_TDES3_OWN); in eqos_setup_txbuf()
328 sc->rx.desc_ring[index].des0 = htole32((uint32_t)paddr); in eqos_setup_rxdesc()
329 sc->rx.desc_ring[index].des1 = htole32((uint32_t)(paddr >> 32)); in eqos_setup_rxdesc()
330 sc->rx.desc_ring[index].des2 = htole32(0); in eqos_setup_rxdesc()
331 bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map, BUS_DMASYNC_PREWRITE); in eqos_setup_rxdesc()
332 sc->rx.desc_ring[index].des3 = htole32(EQOS_RDES3_OWN | EQOS_RDES3_IOC | in eqos_setup_rxdesc()
337 eqos_setup_rxbuf(struct eqos_softc *sc, int index, struct mbuf *m) in eqos_setup_rxbuf() argument
342 m_adj(m, ETHER_ALIGN); in eqos_setup_rxbuf()
344 error = bus_dmamap_load_mbuf_sg(sc->rx.buf_tag, in eqos_setup_rxbuf()
345 sc->rx.buf_map[index].map, m, &seg, &nsegs, 0); in eqos_setup_rxbuf()
349 bus_dmamap_sync(sc->rx.buf_tag, sc->rx.buf_map[index].map, in eqos_setup_rxbuf()
352 sc->rx.buf_map[index].mbuf = m; in eqos_setup_rxbuf()
361 struct mbuf *m; in eqos_alloc_mbufcl() local
363 if ((m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR))) in eqos_alloc_mbufcl()
364 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; in eqos_alloc_mbufcl()
365 return (m); in eqos_alloc_mbufcl()
411 if_t ifp = sc->ifp; in eqos_setup_rxfilter()
459 for (retry = 5000; retry > 0; retry--) { in eqos_reset()
473 (uint32_t)(sc->tx.desc_ring_paddr >> 32)); in eqos_init_rings()
475 (uint32_t)sc->tx.desc_ring_paddr); in eqos_init_rings()
476 WR4(sc, GMAC_DMA_CHAN0_TX_RING_LEN, TX_DESC_COUNT - 1); in eqos_init_rings()
479 (uint32_t)(sc->rx.desc_ring_paddr >> 32)); in eqos_init_rings()
481 (uint32_t)sc->rx.desc_ring_paddr); in eqos_init_rings()
482 WR4(sc, GMAC_DMA_CHAN0_RX_RING_LEN, RX_DESC_COUNT - 1); in eqos_init_rings()
485 (uint32_t)sc->rx.desc_ring_paddr + DESC_OFFSET(RX_DESC_COUNT)); in eqos_init_rings()
492 if_t ifp = sc->ifp; in eqos_init()
493 struct mii_data *mii = device_get_softc(sc->miibus); in eqos_init()
505 WR4(sc, GMAC_MAC_1US_TIC_COUNTER, (sc->csr_clock / 1000000) - 1); in eqos_init()
510 val |= ((DESC_ALIGN - 16) / 8) << GMAC_DMA_CHAN0_CONTROL_DSL_SHIFT; in eqos_init()
511 if (sc->pblx8) in eqos_init()
515 if (sc->txpbl > 0) in eqos_init()
516 val |= (sc->txpbl << GMAC_DMA_CHAN0_TXRX_PBL_SHIFT); in eqos_init()
521 if (sc->rxpbl > 0) in eqos_init()
522 val |= (sc->rxpbl << GMAC_DMA_CHAN0_TXRX_PBL_SHIFT); in eqos_init()
535 if (sc->thresh_dma_mode) { in eqos_init()
536 mtl_tx_val = sc->ttc; in eqos_init()
537 mtl_rx_val = sc->rtc; in eqos_init()
560 /* set RX queue mode. must be in DCB mode. */ in eqos_init()
579 callout_reset(&sc->callout, hz, eqos_tick, sc); in eqos_init()
588 struct mbuf *m; in eqos_start_locked() local
591 if (!sc->link_up) in eqos_start_locked()
599 if (TX_QUEUED(sc->tx.head, sc->tx.tail) >= in eqos_start_locked()
600 TX_DESC_COUNT - TX_MAX_SEGS) { in eqos_start_locked()
605 if (!(m = if_dequeue(ifp))) in eqos_start_locked()
608 if (eqos_setup_txbuf(sc, m)) { in eqos_start_locked()
609 if_sendq_prepend(ifp, m); in eqos_start_locked()
613 bpf_mtap_if(ifp, m); in eqos_start_locked()
618 bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map, in eqos_start_locked()
623 (uint32_t)sc->tx.desc_ring_paddr + DESC_OFFSET(sc->tx.head)); in eqos_start_locked()
624 sc->tx_watchdog = WATCHDOG_TIMEOUT_SECS; in eqos_start_locked()
641 if_t ifp = sc->ifp; in eqos_stop()
649 callout_stop(&sc->callout); in eqos_stop()
670 for (retry = 10000; retry > 0; retry--) { in eqos_stop()
677 device_printf(sc->dev, "timeout flushing TX queue\n"); in eqos_stop()
692 if_t ifp = sc->ifp; in eqos_rxintr()
693 struct mbuf *m; in eqos_rxintr() local
698 rdes3 = le32toh(sc->rx.desc_ring[sc->rx.head].des3); in eqos_rxintr()
705 bus_dmamap_sync(sc->rx.buf_tag, in eqos_rxintr()
706 sc->rx.buf_map[sc->rx.head].map, BUS_DMASYNC_POSTREAD); in eqos_rxintr()
707 bus_dmamap_unload(sc->rx.buf_tag, in eqos_rxintr()
708 sc->rx.buf_map[sc->rx.head].map); in eqos_rxintr()
712 m = sc->rx.buf_map[sc->rx.head].mbuf; in eqos_rxintr()
713 m->m_pkthdr.rcvif = ifp; in eqos_rxintr()
714 m->m_pkthdr.len = length; in eqos_rxintr()
715 m->m_len = length; in eqos_rxintr()
716 m->m_nextpkt = NULL; in eqos_rxintr()
719 m_adj(m, -ETHER_CRC_LEN); in eqos_rxintr()
722 if_input(ifp, m); in eqos_rxintr()
726 if ((m = eqos_alloc_mbufcl(sc))) { in eqos_rxintr()
727 if ((error = eqos_setup_rxbuf(sc, sc->rx.head, m))) in eqos_rxintr()
728 printf("ERROR: Hole in RX ring!!\n"); in eqos_rxintr()
736 (uint32_t)sc->rx.desc_ring_paddr + DESC_OFFSET(sc->rx.head)); in eqos_rxintr()
738 sc->rx.head = RX_NEXT(sc->rx.head); in eqos_rxintr()
745 if_t ifp = sc->ifp; in eqos_txintr()
751 while (sc->tx.tail != sc->tx.head) { in eqos_txintr()
752 tdes3 = le32toh(sc->tx.desc_ring[sc->tx.tail].des3); in eqos_txintr()
756 bmap = &sc->tx.buf_map[sc->tx.tail]; in eqos_txintr()
757 if (bmap->mbuf) { in eqos_txintr()
758 bus_dmamap_sync(sc->tx.buf_tag, bmap->map, in eqos_txintr()
760 bus_dmamap_unload(sc->tx.buf_tag, bmap->map); in eqos_txintr()
761 m_freem(bmap->mbuf); in eqos_txintr()
762 bmap->mbuf = NULL; in eqos_txintr()
765 eqos_setup_txdesc(sc, sc->tx.tail, 0, 0, 0, 0); in eqos_txintr()
779 sc->tx.tail = TX_NEXT(sc->tx.tail); in eqos_txintr()
781 if (sc->tx.tail == sc->tx.head) in eqos_txintr()
782 sc->tx_watchdog = 0; in eqos_txintr()
783 eqos_start_locked(sc->ifp); in eqos_txintr()
809 device_printf(sc->dev, in eqos_intr_mtl()
819 struct mii_data *mii = device_get_softc(sc->miibus); in eqos_tick()
824 if (sc->tx_watchdog > 0) in eqos_tick()
825 if (!--sc->tx_watchdog) { in eqos_tick()
826 device_printf(sc->dev, "watchdog timeout\n"); in eqos_tick()
830 link_status = sc->link_up; in eqos_tick()
832 if (sc->link_up && !link_status) in eqos_tick()
833 eqos_start_locked(sc->ifp); in eqos_tick()
835 callout_reset(&sc->callout, hz, eqos_tick, sc); in eqos_tick()
848 device_printf(sc->dev, "MAC interrupt\n"); in eqos_intr()
870 device_printf(sc->dev, in eqos_intr()
877 device_printf(sc->dev, "RX/TX status interrupt\n"); in eqos_intr()
921 mii = device_get_softc(sc->miibus); in eqos_ioctl()
922 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); in eqos_ioctl()
926 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); in eqos_ioctl()
1007 struct mbuf *m; in eqos_setup_dma() local
1011 if ((error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), in eqos_setup_dma()
1016 NULL, NULL, &sc->tx.desc_tag))) { in eqos_setup_dma()
1017 device_printf(sc->dev, "could not create TX ring DMA tag\n"); in eqos_setup_dma()
1021 if ((error = bus_dmamem_alloc(sc->tx.desc_tag, in eqos_setup_dma()
1022 (void**)&sc->tx.desc_ring, in eqos_setup_dma()
1024 &sc->tx.desc_map))) { in eqos_setup_dma()
1025 device_printf(sc->dev, in eqos_setup_dma()
1030 if ((error = bus_dmamap_load(sc->tx.desc_tag, sc->tx.desc_map, in eqos_setup_dma()
1031 sc->tx.desc_ring, in eqos_setup_dma()
1032 TX_DESC_SIZE, eqos_get1paddr, &sc->tx.desc_ring_paddr, 0))) { in eqos_setup_dma()
1033 device_printf(sc->dev, in eqos_setup_dma()
1038 if ((error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, in eqos_setup_dma()
1043 &sc->tx.buf_tag))) { in eqos_setup_dma()
1044 device_printf(sc->dev, "could not create TX buffer DMA tag.\n"); in eqos_setup_dma()
1049 if ((error = bus_dmamap_create(sc->tx.buf_tag, BUS_DMA_COHERENT, in eqos_setup_dma()
1050 &sc->tx.buf_map[i].map))) { in eqos_setup_dma()
1051 device_printf(sc->dev, "cannot create TX buffer map\n"); in eqos_setup_dma()
1057 /* Set up RX descriptor ring, descriptors, dma maps, and mbufs */ in eqos_setup_dma()
1058 if ((error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), in eqos_setup_dma()
1063 NULL, NULL, &sc->rx.desc_tag))) { in eqos_setup_dma()
1064 device_printf(sc->dev, "could not create RX ring DMA tag.\n"); in eqos_setup_dma()
1068 if ((error = bus_dmamem_alloc(sc->rx.desc_tag, in eqos_setup_dma()
1069 (void **)&sc->rx.desc_ring, in eqos_setup_dma()
1071 &sc->rx.desc_map))) { in eqos_setup_dma()
1072 device_printf(sc->dev, in eqos_setup_dma()
1073 "could not allocate RX descriptor ring.\n"); in eqos_setup_dma()
1077 if ((error = bus_dmamap_load(sc->rx.desc_tag, sc->rx.desc_map, in eqos_setup_dma()
1078 sc->rx.desc_ring, RX_DESC_SIZE, eqos_get1paddr, in eqos_setup_dma()
1079 &sc->rx.desc_ring_paddr, 0))) { in eqos_setup_dma()
1080 device_printf(sc->dev, in eqos_setup_dma()
1081 "could not load RX descriptor ring map.\n"); in eqos_setup_dma()
1085 if ((error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, in eqos_setup_dma()
1090 &sc->rx.buf_tag))) { in eqos_setup_dma()
1091 device_printf(sc->dev, "could not create RX buf DMA tag.\n"); in eqos_setup_dma()
1096 if ((error = bus_dmamap_create(sc->rx.buf_tag, BUS_DMA_COHERENT, in eqos_setup_dma()
1097 &sc->rx.buf_map[i].map))) { in eqos_setup_dma()
1098 device_printf(sc->dev, "cannot create RX buffer map\n"); in eqos_setup_dma()
1101 if (!(m = eqos_alloc_mbufcl(sc))) { in eqos_setup_dma()
1102 device_printf(sc->dev, "cannot allocate RX mbuf\n"); in eqos_setup_dma()
1105 if ((error = eqos_setup_rxbuf(sc, i, m))) { in eqos_setup_dma()
1106 device_printf(sc->dev, "cannot create RX buffer\n"); in eqos_setup_dma()
1112 device_printf(sc->dev, "TX ring @ 0x%lx, RX ring @ 0x%lx\n", in eqos_setup_dma()
1113 sc->tx.desc_ring_paddr, sc->rx.desc_ring_paddr); in eqos_setup_dma()
1129 sc->thresh_dma_mode = false; in eqos_attach()
1130 sc->pblx8 = true; in eqos_attach()
1131 sc->txpbl = 0; in eqos_attach()
1132 sc->rxpbl = 0; in eqos_attach()
1133 sc->ttc = 0x10; in eqos_attach()
1134 sc->rtc = 0; in eqos_attach()
1137 if (bus_alloc_resources(dev, eqos_spec, sc->res)) { in eqos_attach()
1139 bus_release_resources(dev, eqos_spec, sc->res); in eqos_attach()
1146 sc->dev = dev; in eqos_attach()
1159 sc->hw_feature[n] = RD4(sc, GMAC_MAC_HW_FEATURE(n)); in eqos_attach()
1165 sc->hw_feature[0], sc->hw_feature[1], in eqos_attach()
1166 sc->hw_feature[2], sc->hw_feature[3]); in eqos_attach()
1169 mtx_init(&sc->lock, "eqos lock", MTX_NETWORK_LOCK, MTX_DEF); in eqos_attach()
1170 callout_init_mtx(&sc->callout, &sc->lock, 0); in eqos_attach()
1174 device_printf(sc->dev, "Ethernet address %6D\n", eaddr, ":"); in eqos_attach()
1178 device_printf(sc->dev, "reset timeout!\n"); in eqos_attach()
1187 device_printf(sc->dev, "failed to setup DMA descriptors\n"); in eqos_attach()
1192 if ((bus_setup_intr(dev, sc->res[EQOS_RES_IRQ0], EQOS_INTR_FLAGS, in eqos_attach()
1193 NULL, eqos_intr, sc, &sc->irq_handle))) { in eqos_attach()
1195 bus_release_resources(dev, eqos_spec, sc->res); in eqos_attach()
1200 ifp = sc->ifp = if_alloc(IFT_ETHER); in eqos_attach()
1202 if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev)); in eqos_attach()
1203 if_setflags(sc->ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); in eqos_attach()
1207 if_setsendqlen(ifp, TX_DESC_COUNT - 1); in eqos_attach()
1213 if ((error = mii_attach(sc->dev, &sc->miibus, ifp, eqos_media_change, in eqos_attach()
1216 device_printf(sc->dev, "PHY attach failed\n"); in eqos_attach()
1236 if_setflagbits(sc->ifp, 0, IFF_UP); in eqos_detach()
1237 ether_ifdetach(sc->ifp); in eqos_detach()
1242 if (sc->irq_handle) in eqos_detach()
1243 bus_teardown_intr(dev, sc->res[EQOS_RES_IRQ0], in eqos_detach()
1244 sc->irq_handle); in eqos_detach()
1246 if (sc->ifp) in eqos_detach()
1247 if_free(sc->ifp); in eqos_detach()
1249 bus_release_resources(dev, eqos_spec, sc->res); in eqos_detach()
1251 if (sc->tx.desc_tag) { in eqos_detach()
1252 if (sc->tx.desc_map) { in eqos_detach()
1253 bus_dmamap_unload(sc->tx.desc_tag, sc->tx.desc_map); in eqos_detach()
1254 bus_dmamem_free(sc->tx.desc_tag, sc->tx.desc_ring, in eqos_detach()
1255 sc->tx.desc_map); in eqos_detach()
1257 bus_dma_tag_destroy(sc->tx.desc_tag); in eqos_detach()
1259 if (sc->tx.buf_tag) { in eqos_detach()
1261 m_free(sc->tx.buf_map[i].mbuf); in eqos_detach()
1262 bus_dmamap_destroy(sc->tx.buf_tag, in eqos_detach()
1263 sc->tx.buf_map[i].map); in eqos_detach()
1265 bus_dma_tag_destroy(sc->tx.buf_tag); in eqos_detach()
1268 if (sc->rx.desc_tag) { in eqos_detach()
1269 if (sc->rx.desc_map) { in eqos_detach()
1270 bus_dmamap_unload(sc->rx.desc_tag, sc->rx.desc_map); in eqos_detach()
1271 bus_dmamem_free(sc->rx.desc_tag, sc->rx.desc_ring, in eqos_detach()
1272 sc->rx.desc_map); in eqos_detach()
1274 bus_dma_tag_destroy(sc->rx.desc_tag); in eqos_detach()
1276 if (sc->rx.buf_tag) { in eqos_detach()
1278 m_free(sc->rx.buf_map[i].mbuf); in eqos_detach()
1279 bus_dmamap_destroy(sc->rx.buf_tag, in eqos_detach()
1280 sc->rx.buf_map[i].map); in eqos_detach()
1282 bus_dma_tag_destroy(sc->rx.buf_tag); in eqos_detach()
1285 mtx_destroy(&sc->lock); in eqos_detach()