Lines Matching full:sc

93 #define	EQOS_LOCK(sc)		mtx_lock(&(sc)->lock)  argument
94 #define EQOS_UNLOCK(sc) mtx_unlock(&(sc)->lock) argument
95 #define EQOS_ASSERT_LOCKED(sc) mtx_assert(&(sc)->lock, MA_OWNED) argument
97 #define RD4(sc, o) bus_read_4(sc->res[EQOS_RES_MEM], (o)) argument
98 #define WR4(sc, o, v) bus_write_4(sc->res[EQOS_RES_MEM], (o), (v)) argument
113 struct eqos_softc *sc = device_get_softc(dev); in eqos_miibus_readreg() local
117 addr = sc->csr_clock_range | in eqos_miibus_readreg()
121 WR4(sc, GMAC_MAC_MDIO_ADDRESS, addr); in eqos_miibus_readreg()
126 addr = RD4(sc, GMAC_MAC_MDIO_ADDRESS); in eqos_miibus_readreg()
128 val = RD4(sc, GMAC_MAC_MDIO_DATA) & 0xFFFF; in eqos_miibus_readreg()
144 struct eqos_softc *sc = device_get_softc(dev); in eqos_miibus_writereg() local
148 WR4(sc, GMAC_MAC_MDIO_DATA, val); in eqos_miibus_writereg()
150 addr = sc->csr_clock_range | in eqos_miibus_writereg()
154 WR4(sc, GMAC_MAC_MDIO_ADDRESS, addr); in eqos_miibus_writereg()
159 addr = RD4(sc, GMAC_MAC_MDIO_ADDRESS); in eqos_miibus_writereg()
175 struct eqos_softc *sc = device_get_softc(dev); in eqos_miibus_statchg() local
176 struct mii_data *mii = device_get_softc(sc->miibus); in eqos_miibus_statchg()
179 EQOS_ASSERT_LOCKED(sc); in eqos_miibus_statchg()
182 sc->link_up = true; in eqos_miibus_statchg()
184 sc->link_up = false; in eqos_miibus_statchg()
186 reg = RD4(sc, GMAC_MAC_CONFIGURATION); in eqos_miibus_statchg()
208 sc->link_up = false; in eqos_miibus_statchg()
217 WR4(sc, GMAC_MAC_CONFIGURATION, reg); in eqos_miibus_statchg()
221 WR4(sc, GMAC_MAC_1US_TIC_COUNTER, (sc->csr_clock / 1000000) - 1); in eqos_miibus_statchg()
227 struct eqos_softc *sc = if_getsoftc(ifp); in eqos_media_status() local
228 struct mii_data *mii = device_get_softc(sc->miibus); in eqos_media_status()
230 EQOS_LOCK(sc); in eqos_media_status()
234 EQOS_UNLOCK(sc); in eqos_media_status()
240 struct eqos_softc *sc = if_getsoftc(ifp); in eqos_media_change() local
243 EQOS_LOCK(sc); in eqos_media_change()
244 error = mii_mediachg(device_get_softc(sc->miibus)); in eqos_media_change()
245 EQOS_UNLOCK(sc); in eqos_media_change()
250 eqos_setup_txdesc(struct eqos_softc *sc, int index, int flags, in eqos_setup_txdesc() argument
262 bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map, BUS_DMASYNC_PREWRITE); in eqos_setup_txdesc()
263 sc->tx.desc_ring[index].des0 = htole32((uint32_t)paddr); in eqos_setup_txdesc()
264 sc->tx.desc_ring[index].des1 = htole32((uint32_t)(paddr >> 32)); in eqos_setup_txdesc()
265 sc->tx.desc_ring[index].des2 = htole32(tdes2 | len); in eqos_setup_txdesc()
266 sc->tx.desc_ring[index].des3 = htole32(tdes3 | total_len); in eqos_setup_txdesc()
270 eqos_setup_txbuf(struct eqos_softc *sc, struct mbuf *m) in eqos_setup_txbuf() argument
273 int first = sc->tx.head; in eqos_setup_txbuf()
277 error = bus_dmamap_load_mbuf_sg(sc->tx.buf_tag, in eqos_setup_txbuf()
278 sc->tx.buf_map[first].map, m, segs, &nsegs, 0); in eqos_setup_txbuf()
282 device_printf(sc->dev, "TX packet too big trying defrag\n"); in eqos_setup_txbuf()
283 bus_dmamap_unload(sc->tx.buf_tag, sc->tx.buf_map[first].map); in eqos_setup_txbuf()
287 error = bus_dmamap_load_mbuf_sg(sc->tx.buf_tag, in eqos_setup_txbuf()
288 sc->tx.buf_map[first].map, m, segs, &nsegs, 0); in eqos_setup_txbuf()
293 if (TX_QUEUED(sc->tx.head, sc->tx.tail) + nsegs > TX_DESC_COUNT) { in eqos_setup_txbuf()
294 bus_dmamap_unload(sc->tx.buf_tag, sc->tx.buf_map[first].map); in eqos_setup_txbuf()
295 device_printf(sc->dev, "TX packet no more queue space\n"); in eqos_setup_txbuf()
299 bus_dmamap_sync(sc->tx.buf_tag, sc->tx.buf_map[first].map, in eqos_setup_txbuf()
302 sc->tx.buf_map[first].mbuf = m; in eqos_setup_txbuf()
307 eqos_setup_txdesc(sc, sc->tx.head, flags, segs[idx].ds_addr, in eqos_setup_txbuf()
311 sc->tx.head = TX_NEXT(sc->tx.head); in eqos_setup_txbuf()
318 bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map, BUS_DMASYNC_PREWRITE); in eqos_setup_txbuf()
319 sc->tx.desc_ring[first].des3 |= htole32(EQOS_TDES3_OWN); in eqos_setup_txbuf()
325 eqos_setup_rxdesc(struct eqos_softc *sc, int index, bus_addr_t paddr) in eqos_setup_rxdesc() argument
328 sc->rx.desc_ring[index].des0 = htole32((uint32_t)paddr); in eqos_setup_rxdesc()
329 sc->rx.desc_ring[index].des1 = htole32((uint32_t)(paddr >> 32)); in eqos_setup_rxdesc()
330 sc->rx.desc_ring[index].des2 = htole32(0); in eqos_setup_rxdesc()
331 bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map, BUS_DMASYNC_PREWRITE); in eqos_setup_rxdesc()
332 sc->rx.desc_ring[index].des3 = htole32(EQOS_RDES3_OWN | EQOS_RDES3_IOC | in eqos_setup_rxdesc()
337 eqos_setup_rxbuf(struct eqos_softc *sc, int index, struct mbuf *m) in eqos_setup_rxbuf() argument
344 error = bus_dmamap_load_mbuf_sg(sc->rx.buf_tag, in eqos_setup_rxbuf()
345 sc->rx.buf_map[index].map, m, &seg, &nsegs, 0); in eqos_setup_rxbuf()
349 bus_dmamap_sync(sc->rx.buf_tag, sc->rx.buf_map[index].map, in eqos_setup_rxbuf()
352 sc->rx.buf_map[index].mbuf = m; in eqos_setup_rxbuf()
353 eqos_setup_rxdesc(sc, index, seg.ds_addr); in eqos_setup_rxbuf()
359 eqos_alloc_mbufcl(struct eqos_softc *sc) in eqos_alloc_mbufcl() argument
369 eqos_enable_intr(struct eqos_softc *sc) in eqos_enable_intr() argument
372 WR4(sc, GMAC_DMA_CHAN0_INTR_ENABLE, in eqos_enable_intr()
379 eqos_disable_intr(struct eqos_softc *sc) in eqos_disable_intr() argument
382 WR4(sc, GMAC_DMA_CHAN0_INTR_ENABLE, 0); in eqos_disable_intr()
409 eqos_setup_rxfilter(struct eqos_softc *sc) in eqos_setup_rxfilter() argument
411 if_t ifp = sc->ifp; in eqos_setup_rxfilter()
416 EQOS_ASSERT_LOCKED(sc); in eqos_setup_rxfilter()
418 pfil = RD4(sc, GMAC_MAC_PACKET_FILTER); in eqos_setup_rxfilter()
439 WR4(sc, GMAC_MAC_ADDRESS0_HIGH, val); in eqos_setup_rxfilter()
442 WR4(sc, GMAC_MAC_ADDRESS0_LOW, val); in eqos_setup_rxfilter()
445 WR4(sc, GMAC_MAC_HASH_TABLE_REG0, hash[0]); in eqos_setup_rxfilter()
446 WR4(sc, GMAC_MAC_HASH_TABLE_REG1, hash[1]); in eqos_setup_rxfilter()
449 WR4(sc, GMAC_MAC_PACKET_FILTER, pfil); in eqos_setup_rxfilter()
453 eqos_reset(struct eqos_softc *sc) in eqos_reset() argument
458 WR4(sc, GMAC_DMA_MODE, GMAC_DMA_MODE_SWR); in eqos_reset()
461 val = RD4(sc, GMAC_DMA_MODE); in eqos_reset()
469 eqos_init_rings(struct eqos_softc *sc) in eqos_init_rings() argument
472 WR4(sc, GMAC_DMA_CHAN0_TX_BASE_ADDR_HI, in eqos_init_rings()
473 (uint32_t)(sc->tx.desc_ring_paddr >> 32)); in eqos_init_rings()
474 WR4(sc, GMAC_DMA_CHAN0_TX_BASE_ADDR, in eqos_init_rings()
475 (uint32_t)sc->tx.desc_ring_paddr); in eqos_init_rings()
476 WR4(sc, GMAC_DMA_CHAN0_TX_RING_LEN, TX_DESC_COUNT - 1); in eqos_init_rings()
478 WR4(sc, GMAC_DMA_CHAN0_RX_BASE_ADDR_HI, in eqos_init_rings()
479 (uint32_t)(sc->rx.desc_ring_paddr >> 32)); in eqos_init_rings()
480 WR4(sc, GMAC_DMA_CHAN0_RX_BASE_ADDR, in eqos_init_rings()
481 (uint32_t)sc->rx.desc_ring_paddr); in eqos_init_rings()
482 WR4(sc, GMAC_DMA_CHAN0_RX_RING_LEN, RX_DESC_COUNT - 1); in eqos_init_rings()
484 WR4(sc, GMAC_DMA_CHAN0_RX_END_ADDR, in eqos_init_rings()
485 (uint32_t)sc->rx.desc_ring_paddr + DESC_OFFSET(RX_DESC_COUNT)); in eqos_init_rings()
491 struct eqos_softc *sc = if_softc; in eqos_init() local
492 if_t ifp = sc->ifp; in eqos_init()
493 struct mii_data *mii = device_get_softc(sc->miibus); in eqos_init()
499 EQOS_LOCK(sc); in eqos_init()
501 eqos_init_rings(sc); in eqos_init()
503 eqos_setup_rxfilter(sc); in eqos_init()
505 WR4(sc, GMAC_MAC_1US_TIC_COUNTER, (sc->csr_clock / 1000000) - 1); in eqos_init()
508 val = RD4(sc, GMAC_DMA_CHAN0_CONTROL); in eqos_init()
511 if (sc->pblx8) in eqos_init()
513 WR4(sc, GMAC_DMA_CHAN0_CONTROL, val); in eqos_init()
514 val = RD4(sc, GMAC_DMA_CHAN0_TX_CONTROL); in eqos_init()
515 if (sc->txpbl > 0) in eqos_init()
516 val |= (sc->txpbl << GMAC_DMA_CHAN0_TXRX_PBL_SHIFT); in eqos_init()
519 WR4(sc, GMAC_DMA_CHAN0_TX_CONTROL, val); in eqos_init()
520 val = RD4(sc, GMAC_DMA_CHAN0_RX_CONTROL); in eqos_init()
521 if (sc->rxpbl > 0) in eqos_init()
522 val |= (sc->rxpbl << GMAC_DMA_CHAN0_TXRX_PBL_SHIFT); in eqos_init()
526 WR4(sc, GMAC_DMA_CHAN0_RX_CONTROL, val); in eqos_init()
529 WR4(sc, GMAC_MMC_CONTROL, in eqos_init()
535 if (sc->thresh_dma_mode) { in eqos_init()
536 mtl_tx_val = sc->ttc; in eqos_init()
537 mtl_rx_val = sc->rtc; in eqos_init()
543 WR4(sc, GMAC_MTL_TXQ0_OPERATION_MODE, in eqos_init()
546 WR4(sc, GMAC_MTL_RXQ0_OPERATION_MODE, in eqos_init()
552 val = RD4(sc, GMAC_MAC_Q0_TX_FLOW_CTRL); in eqos_init()
555 WR4(sc, GMAC_MAC_Q0_TX_FLOW_CTRL, val); in eqos_init()
556 val = RD4(sc, GMAC_MAC_RX_FLOW_CTRL); in eqos_init()
558 WR4(sc, GMAC_MAC_RX_FLOW_CTRL, val); in eqos_init()
561 WR4(sc, GMAC_RXQ_CTRL0, (GMAC_RXQ_CTRL0_EN_MASK << 16) | in eqos_init()
565 val = RD4(sc, GMAC_MAC_CONFIGURATION); in eqos_init()
572 WR4(sc, GMAC_MAC_CONFIGURATION, val); in eqos_init()
574 eqos_enable_intr(sc); in eqos_init()
579 callout_reset(&sc->callout, hz, eqos_tick, sc); in eqos_init()
581 EQOS_UNLOCK(sc); in eqos_init()
587 struct eqos_softc *sc = if_getsoftc(ifp); in eqos_start_locked() local
591 if (!sc->link_up) in eqos_start_locked()
599 if (TX_QUEUED(sc->tx.head, sc->tx.tail) >= in eqos_start_locked()
608 if (eqos_setup_txbuf(sc, m)) { in eqos_start_locked()
618 bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map, in eqos_start_locked()
622 WR4(sc, GMAC_DMA_CHAN0_TX_END_ADDR, in eqos_start_locked()
623 (uint32_t)sc->tx.desc_ring_paddr + DESC_OFFSET(sc->tx.head)); in eqos_start_locked()
624 sc->tx_watchdog = WATCHDOG_TIMEOUT_SECS; in eqos_start_locked()
631 struct eqos_softc *sc = if_getsoftc(ifp); in eqos_start() local
633 EQOS_LOCK(sc); in eqos_start()
635 EQOS_UNLOCK(sc); in eqos_start()
639 eqos_stop(struct eqos_softc *sc) in eqos_stop() argument
641 if_t ifp = sc->ifp; in eqos_stop()
645 EQOS_LOCK(sc); in eqos_stop()
649 callout_stop(&sc->callout); in eqos_stop()
652 val = RD4(sc, GMAC_MAC_CONFIGURATION); in eqos_stop()
654 WR4(sc, GMAC_MAC_CONFIGURATION, val); in eqos_stop()
657 val = RD4(sc, GMAC_DMA_CHAN0_RX_CONTROL); in eqos_stop()
659 WR4(sc, GMAC_DMA_CHAN0_RX_CONTROL, val); in eqos_stop()
662 val = RD4(sc, GMAC_DMA_CHAN0_TX_CONTROL); in eqos_stop()
664 WR4(sc, GMAC_DMA_CHAN0_TX_CONTROL, val); in eqos_stop()
667 val = RD4(sc, GMAC_MTL_TXQ0_OPERATION_MODE); in eqos_stop()
669 WR4(sc, GMAC_MTL_TXQ0_OPERATION_MODE, val); in eqos_stop()
671 val = RD4(sc, GMAC_MTL_TXQ0_OPERATION_MODE); in eqos_stop()
677 device_printf(sc->dev, "timeout flushing TX queue\n"); in eqos_stop()
680 val = RD4(sc, GMAC_MAC_CONFIGURATION); in eqos_stop()
682 WR4(sc, GMAC_MAC_CONFIGURATION, val); in eqos_stop()
684 eqos_disable_intr(sc); in eqos_stop()
686 EQOS_UNLOCK(sc); in eqos_stop()
690 eqos_rxintr(struct eqos_softc *sc) in eqos_rxintr() argument
692 if_t ifp = sc->ifp; in eqos_rxintr()
698 rdes3 = le32toh(sc->rx.desc_ring[sc->rx.head].des3); in eqos_rxintr()
705 bus_dmamap_sync(sc->rx.buf_tag, in eqos_rxintr()
706 sc->rx.buf_map[sc->rx.head].map, BUS_DMASYNC_POSTREAD); in eqos_rxintr()
707 bus_dmamap_unload(sc->rx.buf_tag, in eqos_rxintr()
708 sc->rx.buf_map[sc->rx.head].map); in eqos_rxintr()
712 m = sc->rx.buf_map[sc->rx.head].mbuf; in eqos_rxintr()
721 EQOS_UNLOCK(sc); in eqos_rxintr()
723 EQOS_LOCK(sc); in eqos_rxintr()
726 if ((m = eqos_alloc_mbufcl(sc))) { in eqos_rxintr()
727 if ((error = eqos_setup_rxbuf(sc, sc->rx.head, m))) in eqos_rxintr()
735 WR4(sc, GMAC_DMA_CHAN0_RX_END_ADDR, in eqos_rxintr()
736 (uint32_t)sc->rx.desc_ring_paddr + DESC_OFFSET(sc->rx.head)); in eqos_rxintr()
738 sc->rx.head = RX_NEXT(sc->rx.head); in eqos_rxintr()
743 eqos_txintr(struct eqos_softc *sc) in eqos_txintr() argument
745 if_t ifp = sc->ifp; in eqos_txintr()
749 EQOS_ASSERT_LOCKED(sc); in eqos_txintr()
751 while (sc->tx.tail != sc->tx.head) { in eqos_txintr()
752 tdes3 = le32toh(sc->tx.desc_ring[sc->tx.tail].des3); in eqos_txintr()
756 bmap = &sc->tx.buf_map[sc->tx.tail]; in eqos_txintr()
758 bus_dmamap_sync(sc->tx.buf_tag, bmap->map, in eqos_txintr()
760 bus_dmamap_unload(sc->tx.buf_tag, bmap->map); in eqos_txintr()
765 eqos_setup_txdesc(sc, sc->tx.tail, 0, 0, 0, 0); in eqos_txintr()
779 sc->tx.tail = TX_NEXT(sc->tx.tail); in eqos_txintr()
781 if (sc->tx.tail == sc->tx.head) in eqos_txintr()
782 sc->tx_watchdog = 0; in eqos_txintr()
783 eqos_start_locked(sc->ifp); in eqos_txintr()
787 eqos_intr_mtl(struct eqos_softc *sc, uint32_t mtl_status) in eqos_intr_mtl() argument
794 mtl_istat = RD4(sc, GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS); in eqos_intr_mtl()
805 WR4(sc, GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS, mtl_clear); in eqos_intr_mtl()
809 device_printf(sc->dev, in eqos_intr_mtl()
818 struct eqos_softc *sc = softc; in eqos_tick() local
819 struct mii_data *mii = device_get_softc(sc->miibus); in eqos_tick()
822 EQOS_ASSERT_LOCKED(sc); in eqos_tick()
824 if (sc->tx_watchdog > 0) in eqos_tick()
825 if (!--sc->tx_watchdog) { in eqos_tick()
826 device_printf(sc->dev, "watchdog timeout\n"); in eqos_tick()
827 eqos_txintr(sc); in eqos_tick()
830 link_status = sc->link_up; in eqos_tick()
832 if (sc->link_up && !link_status) in eqos_tick()
833 eqos_start_locked(sc->ifp); in eqos_tick()
835 callout_reset(&sc->callout, hz, eqos_tick, sc); in eqos_tick()
841 struct eqos_softc *sc = arg; in eqos_intr() local
844 mac_status = RD4(sc, GMAC_MAC_INTERRUPT_STATUS); in eqos_intr()
845 mac_status &= RD4(sc, GMAC_MAC_INTERRUPT_ENABLE); in eqos_intr()
848 device_printf(sc->dev, "MAC interrupt\n"); in eqos_intr()
850 if ((mtl_status = RD4(sc, GMAC_MTL_INTERRUPT_STATUS))) in eqos_intr()
851 eqos_intr_mtl(sc, mtl_status); in eqos_intr()
853 dma_status = RD4(sc, GMAC_DMA_CHAN0_STATUS); in eqos_intr()
854 dma_status &= RD4(sc, GMAC_DMA_CHAN0_INTR_ENABLE); in eqos_intr()
857 WR4(sc, GMAC_DMA_CHAN0_STATUS, dma_status); in eqos_intr()
859 EQOS_LOCK(sc); in eqos_intr()
862 eqos_rxintr(sc); in eqos_intr()
865 eqos_txintr(sc); in eqos_intr()
867 EQOS_UNLOCK(sc); in eqos_intr()
870 device_printf(sc->dev, in eqos_intr()
872 RD4(sc, GMAC_MAC_INTERRUPT_STATUS), in eqos_intr()
873 RD4(sc, GMAC_MTL_INTERRUPT_STATUS), in eqos_intr()
874 RD4(sc, GMAC_DMA_CHAN0_STATUS)); in eqos_intr()
876 if ((rx_tx_status = RD4(sc, GMAC_MAC_RX_TX_STATUS))) in eqos_intr()
877 device_printf(sc->dev, "RX/TX status interrupt\n"); in eqos_intr()
883 struct eqos_softc *sc = if_getsoftc(ifp); in eqos_ioctl() local
895 EQOS_LOCK(sc); in eqos_ioctl()
896 eqos_setup_rxfilter(sc); in eqos_ioctl()
897 EQOS_UNLOCK(sc); in eqos_ioctl()
901 eqos_init(sc); in eqos_ioctl()
906 eqos_stop(sc); in eqos_ioctl()
913 EQOS_LOCK(sc); in eqos_ioctl()
914 eqos_setup_rxfilter(sc); in eqos_ioctl()
915 EQOS_UNLOCK(sc); in eqos_ioctl()
921 mii = device_get_softc(sc->miibus); in eqos_ioctl()
950 eqos_get_eaddr(struct eqos_softc *sc, uint8_t *eaddr) in eqos_get_eaddr() argument
954 maclo = htobe32(RD4(sc, GMAC_MAC_ADDRESS0_LOW)); in eqos_get_eaddr()
955 machi = htobe16(RD4(sc, GMAC_MAC_ADDRESS0_HIGH) & 0xFFFF); in eqos_get_eaddr()
971 eqos_axi_configure(struct eqos_softc *sc) in eqos_axi_configure() argument
975 val = RD4(sc, GMAC_DMA_SYSBUS_MODE); in eqos_axi_configure()
993 WR4(sc, GMAC_DMA_SYSBUS_MODE, val); in eqos_axi_configure()
1005 eqos_setup_dma(struct eqos_softc *sc) in eqos_setup_dma() argument
1011 if ((error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), in eqos_setup_dma()
1016 NULL, NULL, &sc->tx.desc_tag))) { in eqos_setup_dma()
1017 device_printf(sc->dev, "could not create TX ring DMA tag\n"); in eqos_setup_dma()
1021 if ((error = bus_dmamem_alloc(sc->tx.desc_tag, in eqos_setup_dma()
1022 (void**)&sc->tx.desc_ring, in eqos_setup_dma()
1024 &sc->tx.desc_map))) { in eqos_setup_dma()
1025 device_printf(sc->dev, in eqos_setup_dma()
1030 if ((error = bus_dmamap_load(sc->tx.desc_tag, sc->tx.desc_map, in eqos_setup_dma()
1031 sc->tx.desc_ring, in eqos_setup_dma()
1032 TX_DESC_SIZE, eqos_get1paddr, &sc->tx.desc_ring_paddr, 0))) { in eqos_setup_dma()
1033 device_printf(sc->dev, in eqos_setup_dma()
1038 if ((error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, in eqos_setup_dma()
1043 &sc->tx.buf_tag))) { in eqos_setup_dma()
1044 device_printf(sc->dev, "could not create TX buffer DMA tag.\n"); in eqos_setup_dma()
1049 if ((error = bus_dmamap_create(sc->tx.buf_tag, BUS_DMA_COHERENT, in eqos_setup_dma()
1050 &sc->tx.buf_map[i].map))) { in eqos_setup_dma()
1051 device_printf(sc->dev, "cannot create TX buffer map\n"); in eqos_setup_dma()
1054 eqos_setup_txdesc(sc, i, EQOS_TDES3_OWN, 0, 0, 0); in eqos_setup_dma()
1058 if ((error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), in eqos_setup_dma()
1063 NULL, NULL, &sc->rx.desc_tag))) { in eqos_setup_dma()
1064 device_printf(sc->dev, "could not create RX ring DMA tag.\n"); in eqos_setup_dma()
1068 if ((error = bus_dmamem_alloc(sc->rx.desc_tag, in eqos_setup_dma()
1069 (void **)&sc->rx.desc_ring, in eqos_setup_dma()
1071 &sc->rx.desc_map))) { in eqos_setup_dma()
1072 device_printf(sc->dev, in eqos_setup_dma()
1077 if ((error = bus_dmamap_load(sc->rx.desc_tag, sc->rx.desc_map, in eqos_setup_dma()
1078 sc->rx.desc_ring, RX_DESC_SIZE, eqos_get1paddr, in eqos_setup_dma()
1079 &sc->rx.desc_ring_paddr, 0))) { in eqos_setup_dma()
1080 device_printf(sc->dev, in eqos_setup_dma()
1085 if ((error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, in eqos_setup_dma()
1090 &sc->rx.buf_tag))) { in eqos_setup_dma()
1091 device_printf(sc->dev, "could not create RX buf DMA tag.\n"); in eqos_setup_dma()
1096 if ((error = bus_dmamap_create(sc->rx.buf_tag, BUS_DMA_COHERENT, in eqos_setup_dma()
1097 &sc->rx.buf_map[i].map))) { in eqos_setup_dma()
1098 device_printf(sc->dev, "cannot create RX buffer map\n"); in eqos_setup_dma()
1101 if (!(m = eqos_alloc_mbufcl(sc))) { in eqos_setup_dma()
1102 device_printf(sc->dev, "cannot allocate RX mbuf\n"); in eqos_setup_dma()
1105 if ((error = eqos_setup_rxbuf(sc, i, m))) { in eqos_setup_dma()
1106 device_printf(sc->dev, "cannot create RX buffer\n"); in eqos_setup_dma()
1112 device_printf(sc->dev, "TX ring @ 0x%lx, RX ring @ 0x%lx\n", in eqos_setup_dma()
1113 sc->tx.desc_ring_paddr, sc->rx.desc_ring_paddr); in eqos_setup_dma()
1120 struct eqos_softc *sc = device_get_softc(dev); in eqos_attach() local
1129 sc->thresh_dma_mode = false; in eqos_attach()
1130 sc->pblx8 = true; in eqos_attach()
1131 sc->txpbl = 0; in eqos_attach()
1132 sc->rxpbl = 0; in eqos_attach()
1133 sc->ttc = 0x10; in eqos_attach()
1134 sc->rtc = 0; in eqos_attach()
1137 if (bus_alloc_resources(dev, eqos_spec, sc->res)) { in eqos_attach()
1139 bus_release_resources(dev, eqos_spec, sc->res); in eqos_attach()
1146 sc->dev = dev; in eqos_attach()
1147 ver = RD4(sc, GMAC_MAC_VERSION); in eqos_attach()
1159 sc->hw_feature[n] = RD4(sc, GMAC_MAC_HW_FEATURE(n)); in eqos_attach()
1165 sc->hw_feature[0], sc->hw_feature[1], in eqos_attach()
1166 sc->hw_feature[2], sc->hw_feature[3]); in eqos_attach()
1169 mtx_init(&sc->lock, "eqos lock", MTX_NETWORK_LOCK, MTX_DEF); in eqos_attach()
1170 callout_init_mtx(&sc->callout, &sc->lock, 0); in eqos_attach()
1172 eqos_get_eaddr(sc, eaddr); in eqos_attach()
1174 device_printf(sc->dev, "Ethernet address %6D\n", eaddr, ":"); in eqos_attach()
1177 if ((error = eqos_reset(sc))) { in eqos_attach()
1178 device_printf(sc->dev, "reset timeout!\n"); in eqos_attach()
1183 eqos_axi_configure(sc); in eqos_attach()
1186 if (eqos_setup_dma(sc)) { in eqos_attach()
1187 device_printf(sc->dev, "failed to setup DMA descriptors\n"); in eqos_attach()
1192 if ((bus_setup_intr(dev, sc->res[EQOS_RES_IRQ0], EQOS_INTR_FLAGS, in eqos_attach()
1193 NULL, eqos_intr, sc, &sc->irq_handle))) { in eqos_attach()
1195 bus_release_resources(dev, eqos_spec, sc->res); in eqos_attach()
1200 ifp = sc->ifp = if_alloc(IFT_ETHER); in eqos_attach()
1201 if_setsoftc(ifp, sc); in eqos_attach()
1202 if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev)); in eqos_attach()
1203 if_setflags(sc->ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); in eqos_attach()
1213 if ((error = mii_attach(sc->dev, &sc->miibus, ifp, eqos_media_change, in eqos_attach()
1216 device_printf(sc->dev, "PHY attach failed\n"); in eqos_attach()
1229 struct eqos_softc *sc = device_get_softc(dev); in eqos_detach() local
1233 EQOS_LOCK(sc); in eqos_detach()
1234 eqos_stop(sc); in eqos_detach()
1235 EQOS_UNLOCK(sc); in eqos_detach()
1236 if_setflagbits(sc->ifp, 0, IFF_UP); in eqos_detach()
1237 ether_ifdetach(sc->ifp); in eqos_detach()
1242 if (sc->irq_handle) in eqos_detach()
1243 bus_teardown_intr(dev, sc->res[EQOS_RES_IRQ0], in eqos_detach()
1244 sc->irq_handle); in eqos_detach()
1246 if (sc->ifp) in eqos_detach()
1247 if_free(sc->ifp); in eqos_detach()
1249 bus_release_resources(dev, eqos_spec, sc->res); in eqos_detach()
1251 if (sc->tx.desc_tag) { in eqos_detach()
1252 if (sc->tx.desc_map) { in eqos_detach()
1253 bus_dmamap_unload(sc->tx.desc_tag, sc->tx.desc_map); in eqos_detach()
1254 bus_dmamem_free(sc->tx.desc_tag, sc->tx.desc_ring, in eqos_detach()
1255 sc->tx.desc_map); in eqos_detach()
1257 bus_dma_tag_destroy(sc->tx.desc_tag); in eqos_detach()
1259 if (sc->tx.buf_tag) { in eqos_detach()
1261 m_free(sc->tx.buf_map[i].mbuf); in eqos_detach()
1262 bus_dmamap_destroy(sc->tx.buf_tag, in eqos_detach()
1263 sc->tx.buf_map[i].map); in eqos_detach()
1265 bus_dma_tag_destroy(sc->tx.buf_tag); in eqos_detach()
1268 if (sc->rx.desc_tag) { in eqos_detach()
1269 if (sc->rx.desc_map) { in eqos_detach()
1270 bus_dmamap_unload(sc->rx.desc_tag, sc->rx.desc_map); in eqos_detach()
1271 bus_dmamem_free(sc->rx.desc_tag, sc->rx.desc_ring, in eqos_detach()
1272 sc->rx.desc_map); in eqos_detach()
1274 bus_dma_tag_destroy(sc->rx.desc_tag); in eqos_detach()
1276 if (sc->rx.buf_tag) { in eqos_detach()
1278 m_free(sc->rx.buf_map[i].mbuf); in eqos_detach()
1279 bus_dmamap_destroy(sc->rx.buf_tag, in eqos_detach()
1280 sc->rx.buf_map[i].map); in eqos_detach()
1282 bus_dma_tag_destroy(sc->rx.buf_tag); in eqos_detach()
1285 mtx_destroy(&sc->lock); in eqos_detach()