Lines Matching full:sc

93 #define	EQOS_LOCK(sc)		mtx_lock(&(sc)->lock)  argument
94 #define EQOS_UNLOCK(sc) mtx_unlock(&(sc)->lock) argument
95 #define EQOS_ASSERT_LOCKED(sc) mtx_assert(&(sc)->lock, MA_OWNED) argument
97 #define RD4(sc, o) bus_read_4(sc->res[EQOS_RES_MEM], (o)) argument
98 #define WR4(sc, o, v) bus_write_4(sc->res[EQOS_RES_MEM], (o), (v)) argument
113 struct eqos_softc *sc = device_get_softc(dev); in eqos_miibus_readreg() local
117 addr = sc->csr_clock_range | in eqos_miibus_readreg()
122 WR4(sc, GMAC_MAC_MDIO_ADDRESS, addr); in eqos_miibus_readreg()
127 addr = RD4(sc, GMAC_MAC_MDIO_ADDRESS); in eqos_miibus_readreg()
129 val = RD4(sc, GMAC_MAC_MDIO_DATA) & 0xFFFF; in eqos_miibus_readreg()
145 struct eqos_softc *sc = device_get_softc(dev); in eqos_miibus_writereg() local
149 WR4(sc, GMAC_MAC_MDIO_DATA, val); in eqos_miibus_writereg()
151 addr = sc->csr_clock_range | in eqos_miibus_writereg()
156 WR4(sc, GMAC_MAC_MDIO_ADDRESS, addr); in eqos_miibus_writereg()
161 addr = RD4(sc, GMAC_MAC_MDIO_ADDRESS); in eqos_miibus_writereg()
177 struct eqos_softc *sc = device_get_softc(dev); in eqos_miibus_statchg() local
178 struct mii_data *mii = device_get_softc(sc->miibus); in eqos_miibus_statchg()
181 EQOS_ASSERT_LOCKED(sc); in eqos_miibus_statchg()
184 sc->link_up = true; in eqos_miibus_statchg()
186 sc->link_up = false; in eqos_miibus_statchg()
188 reg = RD4(sc, GMAC_MAC_CONFIGURATION); in eqos_miibus_statchg()
210 sc->link_up = false; in eqos_miibus_statchg()
219 WR4(sc, GMAC_MAC_CONFIGURATION, reg); in eqos_miibus_statchg()
223 WR4(sc, GMAC_MAC_1US_TIC_COUNTER, (sc->csr_clock / 1000000) - 1); in eqos_miibus_statchg()
229 struct eqos_softc *sc = if_getsoftc(ifp); in eqos_media_status() local
230 struct mii_data *mii = device_get_softc(sc->miibus); in eqos_media_status()
232 EQOS_LOCK(sc); in eqos_media_status()
236 EQOS_UNLOCK(sc); in eqos_media_status()
242 struct eqos_softc *sc = if_getsoftc(ifp); in eqos_media_change() local
245 EQOS_LOCK(sc); in eqos_media_change()
246 error = mii_mediachg(device_get_softc(sc->miibus)); in eqos_media_change()
247 EQOS_UNLOCK(sc); in eqos_media_change()
252 eqos_setup_txdesc(struct eqos_softc *sc, int index, int flags, in eqos_setup_txdesc() argument
264 bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map, BUS_DMASYNC_PREWRITE); in eqos_setup_txdesc()
265 sc->tx.desc_ring[index].des0 = htole32((uint32_t)paddr); in eqos_setup_txdesc()
266 sc->tx.desc_ring[index].des1 = htole32((uint32_t)(paddr >> 32)); in eqos_setup_txdesc()
267 sc->tx.desc_ring[index].des2 = htole32(tdes2 | len); in eqos_setup_txdesc()
268 sc->tx.desc_ring[index].des3 = htole32(tdes3 | total_len); in eqos_setup_txdesc()
272 eqos_setup_txbuf(struct eqos_softc *sc, struct mbuf *m) in eqos_setup_txbuf() argument
275 int first = sc->tx.head; in eqos_setup_txbuf()
279 error = bus_dmamap_load_mbuf_sg(sc->tx.buf_tag, in eqos_setup_txbuf()
280 sc->tx.buf_map[first].map, m, segs, &nsegs, 0); in eqos_setup_txbuf()
284 device_printf(sc->dev, "TX packet too big trying defrag\n"); in eqos_setup_txbuf()
285 bus_dmamap_unload(sc->tx.buf_tag, sc->tx.buf_map[first].map); in eqos_setup_txbuf()
289 error = bus_dmamap_load_mbuf_sg(sc->tx.buf_tag, in eqos_setup_txbuf()
290 sc->tx.buf_map[first].map, m, segs, &nsegs, 0); in eqos_setup_txbuf()
295 if (TX_QUEUED(sc->tx.head, sc->tx.tail) + nsegs > TX_DESC_COUNT) { in eqos_setup_txbuf()
296 bus_dmamap_unload(sc->tx.buf_tag, sc->tx.buf_map[first].map); in eqos_setup_txbuf()
297 device_printf(sc->dev, "TX packet no more queue space\n"); in eqos_setup_txbuf()
301 bus_dmamap_sync(sc->tx.buf_tag, sc->tx.buf_map[first].map, in eqos_setup_txbuf()
304 sc->tx.buf_map[first].mbuf = m; in eqos_setup_txbuf()
309 eqos_setup_txdesc(sc, sc->tx.head, flags, segs[idx].ds_addr, in eqos_setup_txbuf()
313 sc->tx.head = TX_NEXT(sc->tx.head); in eqos_setup_txbuf()
320 bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map, BUS_DMASYNC_PREWRITE); in eqos_setup_txbuf()
321 sc->tx.desc_ring[first].des3 |= htole32(EQOS_TDES3_OWN); in eqos_setup_txbuf()
327 eqos_setup_rxdesc(struct eqos_softc *sc, int index, bus_addr_t paddr) in eqos_setup_rxdesc() argument
330 sc->rx.desc_ring[index].des0 = htole32((uint32_t)paddr); in eqos_setup_rxdesc()
331 sc->rx.desc_ring[index].des1 = htole32((uint32_t)(paddr >> 32)); in eqos_setup_rxdesc()
332 sc->rx.desc_ring[index].des2 = htole32(0); in eqos_setup_rxdesc()
333 bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map, BUS_DMASYNC_PREWRITE); in eqos_setup_rxdesc()
334 sc->rx.desc_ring[index].des3 = in eqos_setup_rxdesc()
339 eqos_setup_rxbuf(struct eqos_softc *sc, int index, struct mbuf *m) in eqos_setup_rxbuf() argument
346 error = bus_dmamap_load_mbuf_sg(sc->rx.buf_tag, in eqos_setup_rxbuf()
347 sc->rx.buf_map[index].map, m, &seg, &nsegs, 0); in eqos_setup_rxbuf()
351 bus_dmamap_sync(sc->rx.buf_tag, sc->rx.buf_map[index].map, in eqos_setup_rxbuf()
354 sc->rx.buf_map[index].mbuf = m; in eqos_setup_rxbuf()
355 eqos_setup_rxdesc(sc, index, seg.ds_addr); in eqos_setup_rxbuf()
361 eqos_alloc_mbufcl(struct eqos_softc *sc) in eqos_alloc_mbufcl() argument
371 eqos_enable_intr(struct eqos_softc *sc) in eqos_enable_intr() argument
374 WR4(sc, GMAC_DMA_CHAN0_INTR_ENABLE, in eqos_enable_intr()
383 eqos_disable_intr(struct eqos_softc *sc) in eqos_disable_intr() argument
386 WR4(sc, GMAC_DMA_CHAN0_INTR_ENABLE, 0); in eqos_disable_intr()
413 eqos_setup_rxfilter(struct eqos_softc *sc) in eqos_setup_rxfilter() argument
415 if_t ifp = sc->ifp; in eqos_setup_rxfilter()
420 EQOS_ASSERT_LOCKED(sc); in eqos_setup_rxfilter()
422 pfil = RD4(sc, GMAC_MAC_PACKET_FILTER); in eqos_setup_rxfilter()
443 WR4(sc, GMAC_MAC_ADDRESS0_HIGH, val); in eqos_setup_rxfilter()
445 WR4(sc, GMAC_MAC_ADDRESS0_LOW, val); in eqos_setup_rxfilter()
448 WR4(sc, GMAC_MAC_HASH_TABLE_REG0, hash[0]); in eqos_setup_rxfilter()
449 WR4(sc, GMAC_MAC_HASH_TABLE_REG1, hash[1]); in eqos_setup_rxfilter()
452 WR4(sc, GMAC_MAC_PACKET_FILTER, pfil); in eqos_setup_rxfilter()
456 eqos_reset(struct eqos_softc *sc) in eqos_reset() argument
461 WR4(sc, GMAC_DMA_MODE, GMAC_DMA_MODE_SWR); in eqos_reset()
464 val = RD4(sc, GMAC_DMA_MODE); in eqos_reset()
472 eqos_init_rings(struct eqos_softc *sc) in eqos_init_rings() argument
475 WR4(sc, GMAC_DMA_CHAN0_TX_BASE_ADDR_HI, in eqos_init_rings()
476 (uint32_t)(sc->tx.desc_ring_paddr >> 32)); in eqos_init_rings()
477 WR4(sc, GMAC_DMA_CHAN0_TX_BASE_ADDR, in eqos_init_rings()
478 (uint32_t)sc->tx.desc_ring_paddr); in eqos_init_rings()
479 WR4(sc, GMAC_DMA_CHAN0_TX_RING_LEN, TX_DESC_COUNT - 1); in eqos_init_rings()
481 WR4(sc, GMAC_DMA_CHAN0_RX_BASE_ADDR_HI, in eqos_init_rings()
482 (uint32_t)(sc->rx.desc_ring_paddr >> 32)); in eqos_init_rings()
483 WR4(sc, GMAC_DMA_CHAN0_RX_BASE_ADDR, in eqos_init_rings()
484 (uint32_t)sc->rx.desc_ring_paddr); in eqos_init_rings()
485 WR4(sc, GMAC_DMA_CHAN0_RX_RING_LEN, RX_DESC_COUNT - 1); in eqos_init_rings()
487 WR4(sc, GMAC_DMA_CHAN0_RX_END_ADDR, in eqos_init_rings()
488 (uint32_t)sc->rx.desc_ring_paddr + DESC_OFFSET(RX_DESC_COUNT)); in eqos_init_rings()
494 struct eqos_softc *sc = if_softc; in eqos_init() local
495 if_t ifp = sc->ifp; in eqos_init()
496 struct mii_data *mii = device_get_softc(sc->miibus); in eqos_init()
502 EQOS_LOCK(sc); in eqos_init()
504 eqos_init_rings(sc); in eqos_init()
506 eqos_setup_rxfilter(sc); in eqos_init()
508 WR4(sc, GMAC_MAC_1US_TIC_COUNTER, (sc->csr_clock / 1000000) - 1); in eqos_init()
511 val = RD4(sc, GMAC_DMA_CHAN0_CONTROL); in eqos_init()
514 if (sc->pblx8) in eqos_init()
516 WR4(sc, GMAC_DMA_CHAN0_CONTROL, val); in eqos_init()
517 val = RD4(sc, GMAC_DMA_CHAN0_TX_CONTROL); in eqos_init()
518 if (sc->txpbl > 0) in eqos_init()
519 val |= (sc->txpbl << GMAC_DMA_CHAN0_TXRX_PBL_SHIFT); in eqos_init()
522 WR4(sc, GMAC_DMA_CHAN0_TX_CONTROL, val); in eqos_init()
523 val = RD4(sc, GMAC_DMA_CHAN0_RX_CONTROL); in eqos_init()
524 if (sc->rxpbl > 0) in eqos_init()
525 val |= (sc->rxpbl << GMAC_DMA_CHAN0_TXRX_PBL_SHIFT); in eqos_init()
529 WR4(sc, GMAC_DMA_CHAN0_RX_CONTROL, val); in eqos_init()
532 WR4(sc, GMAC_MMC_CONTROL, in eqos_init()
538 if (sc->thresh_dma_mode) { in eqos_init()
539 mtl_tx_val = sc->ttc; in eqos_init()
540 mtl_rx_val = sc->rtc; in eqos_init()
546 WR4(sc, GMAC_MTL_TXQ0_OPERATION_MODE, in eqos_init()
549 WR4(sc, GMAC_MTL_RXQ0_OPERATION_MODE, in eqos_init()
555 val = RD4(sc, GMAC_MAC_Q0_TX_FLOW_CTRL); in eqos_init()
558 WR4(sc, GMAC_MAC_Q0_TX_FLOW_CTRL, val); in eqos_init()
559 val = RD4(sc, GMAC_MAC_RX_FLOW_CTRL); in eqos_init()
561 WR4(sc, GMAC_MAC_RX_FLOW_CTRL, val); in eqos_init()
564 WR4(sc, GMAC_RXQ_CTRL0, (GMAC_RXQ_CTRL0_EN_MASK << 16) | in eqos_init()
568 val = RD4(sc, GMAC_MAC_CONFIGURATION); in eqos_init()
575 WR4(sc, GMAC_MAC_CONFIGURATION, val); in eqos_init()
577 eqos_enable_intr(sc); in eqos_init()
582 callout_reset(&sc->callout, hz, eqos_tick, sc); in eqos_init()
584 EQOS_UNLOCK(sc); in eqos_init()
590 struct eqos_softc *sc = if_getsoftc(ifp); in eqos_start_locked() local
594 if (!sc->link_up) in eqos_start_locked()
602 if (TX_QUEUED(sc->tx.head, sc->tx.tail) >= in eqos_start_locked()
611 if (eqos_setup_txbuf(sc, m)) { in eqos_start_locked()
621 bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map, in eqos_start_locked()
625 WR4(sc, GMAC_DMA_CHAN0_TX_END_ADDR, in eqos_start_locked()
626 (uint32_t)sc->tx.desc_ring_paddr + DESC_OFFSET(sc->tx.head)); in eqos_start_locked()
627 sc->tx_watchdog = WATCHDOG_TIMEOUT_SECS; in eqos_start_locked()
634 struct eqos_softc *sc = if_getsoftc(ifp); in eqos_start() local
636 EQOS_LOCK(sc); in eqos_start()
638 EQOS_UNLOCK(sc); in eqos_start()
642 eqos_stop(struct eqos_softc *sc) in eqos_stop() argument
644 if_t ifp = sc->ifp; in eqos_stop()
648 EQOS_LOCK(sc); in eqos_stop()
652 callout_stop(&sc->callout); in eqos_stop()
655 val = RD4(sc, GMAC_MAC_CONFIGURATION); in eqos_stop()
657 WR4(sc, GMAC_MAC_CONFIGURATION, val); in eqos_stop()
660 val = RD4(sc, GMAC_DMA_CHAN0_RX_CONTROL); in eqos_stop()
662 WR4(sc, GMAC_DMA_CHAN0_RX_CONTROL, val); in eqos_stop()
665 val = RD4(sc, GMAC_DMA_CHAN0_TX_CONTROL); in eqos_stop()
667 WR4(sc, GMAC_DMA_CHAN0_TX_CONTROL, val); in eqos_stop()
670 val = RD4(sc, GMAC_MTL_TXQ0_OPERATION_MODE); in eqos_stop()
672 WR4(sc, GMAC_MTL_TXQ0_OPERATION_MODE, val); in eqos_stop()
674 val = RD4(sc, GMAC_MTL_TXQ0_OPERATION_MODE); in eqos_stop()
680 device_printf(sc->dev, "timeout flushing TX queue\n"); in eqos_stop()
683 val = RD4(sc, GMAC_MAC_CONFIGURATION); in eqos_stop()
685 WR4(sc, GMAC_MAC_CONFIGURATION, val); in eqos_stop()
687 eqos_disable_intr(sc); in eqos_stop()
689 EQOS_UNLOCK(sc); in eqos_stop()
693 eqos_rxintr(struct eqos_softc *sc) in eqos_rxintr() argument
695 if_t ifp = sc->ifp; in eqos_rxintr()
701 rdes3 = le32toh(sc->rx.desc_ring[sc->rx.head].des3); in eqos_rxintr()
708 bus_dmamap_sync(sc->rx.buf_tag, in eqos_rxintr()
709 sc->rx.buf_map[sc->rx.head].map, BUS_DMASYNC_POSTREAD); in eqos_rxintr()
710 bus_dmamap_unload(sc->rx.buf_tag, in eqos_rxintr()
711 sc->rx.buf_map[sc->rx.head].map); in eqos_rxintr()
715 m = sc->rx.buf_map[sc->rx.head].mbuf; in eqos_rxintr()
724 EQOS_UNLOCK(sc); in eqos_rxintr()
726 EQOS_LOCK(sc); in eqos_rxintr()
729 if ((m = eqos_alloc_mbufcl(sc))) { in eqos_rxintr()
730 if ((error = eqos_setup_rxbuf(sc, sc->rx.head, m))) in eqos_rxintr()
737 WR4(sc, GMAC_DMA_CHAN0_RX_END_ADDR, in eqos_rxintr()
738 (uint32_t)sc->rx.desc_ring_paddr + DESC_OFFSET(sc->rx.head)); in eqos_rxintr()
740 sc->rx.head = RX_NEXT(sc->rx.head); in eqos_rxintr()
745 eqos_txintr(struct eqos_softc *sc) in eqos_txintr() argument
747 if_t ifp = sc->ifp; in eqos_txintr()
751 EQOS_ASSERT_LOCKED(sc); in eqos_txintr()
753 while (sc->tx.tail != sc->tx.head) { in eqos_txintr()
754 tdes3 = le32toh(sc->tx.desc_ring[sc->tx.tail].des3); in eqos_txintr()
758 bmap = &sc->tx.buf_map[sc->tx.tail]; in eqos_txintr()
760 bus_dmamap_sync(sc->tx.buf_tag, bmap->map, in eqos_txintr()
762 bus_dmamap_unload(sc->tx.buf_tag, bmap->map); in eqos_txintr()
767 eqos_setup_txdesc(sc, sc->tx.tail, 0, 0, 0, 0); in eqos_txintr()
781 sc->tx.tail = TX_NEXT(sc->tx.tail); in eqos_txintr()
783 if (sc->tx.tail == sc->tx.head) in eqos_txintr()
784 sc->tx_watchdog = 0; in eqos_txintr()
785 eqos_start_locked(sc->ifp); in eqos_txintr()
789 eqos_intr_mtl(struct eqos_softc *sc, uint32_t mtl_status) in eqos_intr_mtl() argument
796 mtl_istat = RD4(sc, GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS); in eqos_intr_mtl()
807 WR4(sc, GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS, mtl_clear); in eqos_intr_mtl()
811 device_printf(sc->dev, in eqos_intr_mtl()
820 struct eqos_softc *sc = softc; in eqos_tick() local
821 struct mii_data *mii = device_get_softc(sc->miibus); in eqos_tick()
824 EQOS_ASSERT_LOCKED(sc); in eqos_tick()
826 if (sc->tx_watchdog > 0) in eqos_tick()
827 if (!--sc->tx_watchdog) { in eqos_tick()
828 device_printf(sc->dev, "watchdog timeout\n"); in eqos_tick()
829 eqos_txintr(sc); in eqos_tick()
832 link_status = sc->link_up; in eqos_tick()
834 if (sc->link_up && !link_status) in eqos_tick()
835 eqos_start_locked(sc->ifp); in eqos_tick()
837 callout_reset(&sc->callout, hz, eqos_tick, sc); in eqos_tick()
843 struct eqos_softc *sc = arg; in eqos_intr() local
846 mac_status = RD4(sc, GMAC_MAC_INTERRUPT_STATUS); in eqos_intr()
847 mac_status &= RD4(sc, GMAC_MAC_INTERRUPT_ENABLE); in eqos_intr()
850 device_printf(sc->dev, "MAC interrupt\n"); in eqos_intr()
852 if ((mtl_status = RD4(sc, GMAC_MTL_INTERRUPT_STATUS))) in eqos_intr()
853 eqos_intr_mtl(sc, mtl_status); in eqos_intr()
855 dma_status = RD4(sc, GMAC_DMA_CHAN0_STATUS); in eqos_intr()
856 dma_status &= RD4(sc, GMAC_DMA_CHAN0_INTR_ENABLE); in eqos_intr()
859 WR4(sc, GMAC_DMA_CHAN0_STATUS, dma_status); in eqos_intr()
861 EQOS_LOCK(sc); in eqos_intr()
864 eqos_rxintr(sc); in eqos_intr()
867 eqos_txintr(sc); in eqos_intr()
869 EQOS_UNLOCK(sc); in eqos_intr()
872 device_printf(sc->dev, in eqos_intr()
874 RD4(sc, GMAC_MAC_INTERRUPT_STATUS), in eqos_intr()
875 RD4(sc, GMAC_MTL_INTERRUPT_STATUS), in eqos_intr()
876 RD4(sc, GMAC_DMA_CHAN0_STATUS)); in eqos_intr()
878 if ((rx_tx_status = RD4(sc, GMAC_MAC_RX_TX_STATUS))) in eqos_intr()
879 device_printf(sc->dev, "RX/TX status interrupt\n"); in eqos_intr()
885 struct eqos_softc *sc = if_getsoftc(ifp); in eqos_ioctl() local
897 EQOS_LOCK(sc); in eqos_ioctl()
898 eqos_setup_rxfilter(sc); in eqos_ioctl()
899 EQOS_UNLOCK(sc); in eqos_ioctl()
902 eqos_init(sc); in eqos_ioctl()
906 eqos_stop(sc); in eqos_ioctl()
913 EQOS_LOCK(sc); in eqos_ioctl()
914 eqos_setup_rxfilter(sc); in eqos_ioctl()
915 EQOS_UNLOCK(sc); in eqos_ioctl()
921 mii = device_get_softc(sc->miibus); in eqos_ioctl()
950 eqos_get_eaddr(struct eqos_softc *sc, uint8_t *eaddr) in eqos_get_eaddr() argument
954 maclo = htobe32(RD4(sc, GMAC_MAC_ADDRESS0_LOW)); in eqos_get_eaddr()
955 machi = htobe16(RD4(sc, GMAC_MAC_ADDRESS0_HIGH) & 0xFFFF); in eqos_get_eaddr()
971 eqos_axi_configure(struct eqos_softc *sc) in eqos_axi_configure() argument
975 val = RD4(sc, GMAC_DMA_SYSBUS_MODE); in eqos_axi_configure()
993 WR4(sc, GMAC_DMA_SYSBUS_MODE, val); in eqos_axi_configure()
1005 eqos_setup_dma(struct eqos_softc *sc) in eqos_setup_dma() argument
1012 bus_get_dma_tag(sc->dev), /* Parent tag */ in eqos_setup_dma()
1021 &sc->tx.desc_tag); in eqos_setup_dma()
1023 device_printf(sc->dev, "could not create TX ring DMA tag\n"); in eqos_setup_dma()
1027 error = bus_dmamem_alloc(sc->tx.desc_tag, in eqos_setup_dma()
1028 (void**)&sc->tx.desc_ring, in eqos_setup_dma()
1030 &sc->tx.desc_map); in eqos_setup_dma()
1032 device_printf(sc->dev, in eqos_setup_dma()
1038 error = bus_dmamap_load(sc->tx.desc_tag, sc->tx.desc_map, in eqos_setup_dma()
1039 sc->tx.desc_ring, TX_DESC_SIZE, in eqos_setup_dma()
1040 eqos_get1paddr, &sc->tx.desc_ring_paddr, in eqos_setup_dma()
1043 device_printf(sc->dev, in eqos_setup_dma()
1049 bus_get_dma_tag(sc->dev), /* Parent tag */ in eqos_setup_dma()
1058 &sc->tx.buf_tag); in eqos_setup_dma()
1060 device_printf(sc->dev, "could not create TX buffer DMA tag.\n"); in eqos_setup_dma()
1065 if ((error = bus_dmamap_create(sc->tx.buf_tag, BUS_DMA_COHERENT, in eqos_setup_dma()
1066 &sc->tx.buf_map[i].map))) { in eqos_setup_dma()
1067 device_printf(sc->dev, "cannot create TX buffer map\n"); in eqos_setup_dma()
1070 eqos_setup_txdesc(sc, i, EQOS_TDES3_OWN, 0, 0, 0); in eqos_setup_dma()
1075 bus_get_dma_tag(sc->dev), /* Parent tag */ in eqos_setup_dma()
1084 &sc->rx.desc_tag); in eqos_setup_dma()
1086 device_printf(sc->dev, "could not create RX ring DMA tag.\n"); in eqos_setup_dma()
1090 error = bus_dmamem_alloc(sc->rx.desc_tag, in eqos_setup_dma()
1091 (void **)&sc->rx.desc_ring, in eqos_setup_dma()
1093 &sc->rx.desc_map); in eqos_setup_dma()
1095 device_printf(sc->dev, in eqos_setup_dma()
1100 error = bus_dmamap_load(sc->rx.desc_tag, sc->rx.desc_map, in eqos_setup_dma()
1101 sc->rx.desc_ring, RX_DESC_SIZE, in eqos_setup_dma()
1102 eqos_get1paddr, &sc->rx.desc_ring_paddr, in eqos_setup_dma()
1105 device_printf(sc->dev, in eqos_setup_dma()
1111 bus_get_dma_tag(sc->dev), /* Parent tag */ in eqos_setup_dma()
1120 &sc->rx.buf_tag); in eqos_setup_dma()
1122 device_printf(sc->dev, "could not create RX buf DMA tag.\n"); in eqos_setup_dma()
1127 if ((error = bus_dmamap_create(sc->rx.buf_tag, BUS_DMA_COHERENT, in eqos_setup_dma()
1128 &sc->rx.buf_map[i].map))) { in eqos_setup_dma()
1129 device_printf(sc->dev, "cannot create RX buffer map\n"); in eqos_setup_dma()
1132 if (!(m = eqos_alloc_mbufcl(sc))) { in eqos_setup_dma()
1133 device_printf(sc->dev, "cannot allocate RX mbuf\n"); in eqos_setup_dma()
1136 if ((error = eqos_setup_rxbuf(sc, i, m))) { in eqos_setup_dma()
1137 device_printf(sc->dev, "cannot create RX buffer\n"); in eqos_setup_dma()
1143 device_printf(sc->dev, "TX ring @ 0x%lx, RX ring @ 0x%lx\n", in eqos_setup_dma()
1144 sc->tx.desc_ring_paddr, sc->rx.desc_ring_paddr); in eqos_setup_dma()
1151 struct eqos_softc *sc = device_get_softc(dev); in eqos_attach() local
1160 sc->thresh_dma_mode = false; in eqos_attach()
1161 sc->pblx8 = true; in eqos_attach()
1162 sc->txpbl = 0; in eqos_attach()
1163 sc->rxpbl = 0; in eqos_attach()
1164 sc->ttc = 0x10; in eqos_attach()
1165 sc->rtc = 0; in eqos_attach()
1168 if (bus_alloc_resources(dev, eqos_spec, sc->res)) { in eqos_attach()
1170 bus_release_resources(dev, eqos_spec, sc->res); in eqos_attach()
1177 sc->dev = dev; in eqos_attach()
1178 ver = RD4(sc, GMAC_MAC_VERSION); in eqos_attach()
1190 sc->hw_feature[n] = RD4(sc, GMAC_MAC_HW_FEATURE(n)); in eqos_attach()
1196 sc->hw_feature[0], sc->hw_feature[1], in eqos_attach()
1197 sc->hw_feature[2], sc->hw_feature[3]); in eqos_attach()
1200 mtx_init(&sc->lock, "eqos lock", MTX_NETWORK_LOCK, MTX_DEF); in eqos_attach()
1201 callout_init_mtx(&sc->callout, &sc->lock, 0); in eqos_attach()
1203 eqos_get_eaddr(sc, eaddr); in eqos_attach()
1205 device_printf(sc->dev, "Ethernet address %6D\n", eaddr, ":"); in eqos_attach()
1208 if ((error = eqos_reset(sc))) { in eqos_attach()
1209 device_printf(sc->dev, "reset timeout!\n"); in eqos_attach()
1214 eqos_axi_configure(sc); in eqos_attach()
1217 if (eqos_setup_dma(sc)) { in eqos_attach()
1218 device_printf(sc->dev, "failed to setup DMA descriptors\n"); in eqos_attach()
1223 if ((bus_setup_intr(dev, sc->res[EQOS_RES_IRQ0], EQOS_INTR_FLAGS, in eqos_attach()
1224 NULL, eqos_intr, sc, &sc->irq_handle))) { in eqos_attach()
1226 bus_release_resources(dev, eqos_spec, sc->res); in eqos_attach()
1231 ifp = sc->ifp = if_alloc(IFT_ETHER); in eqos_attach()
1232 if_setsoftc(ifp, sc); in eqos_attach()
1233 if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev)); in eqos_attach()
1234 if_setflags(sc->ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); in eqos_attach()
1244 if ((error = mii_attach(sc->dev, &sc->miibus, ifp, eqos_media_change, in eqos_attach()
1247 device_printf(sc->dev, "PHY attach failed\n"); in eqos_attach()
1260 struct eqos_softc *sc = device_get_softc(dev); in eqos_detach() local
1264 EQOS_LOCK(sc); in eqos_detach()
1265 eqos_stop(sc); in eqos_detach()
1266 EQOS_UNLOCK(sc); in eqos_detach()
1267 if_setflagbits(sc->ifp, 0, IFF_UP); in eqos_detach()
1268 ether_ifdetach(sc->ifp); in eqos_detach()
1273 if (sc->irq_handle) in eqos_detach()
1274 bus_teardown_intr(dev, sc->res[EQOS_RES_IRQ0], in eqos_detach()
1275 sc->irq_handle); in eqos_detach()
1277 if (sc->ifp) in eqos_detach()
1278 if_free(sc->ifp); in eqos_detach()
1280 bus_release_resources(dev, eqos_spec, sc->res); in eqos_detach()
1282 if (sc->tx.desc_tag) { in eqos_detach()
1283 if (sc->tx.desc_map) { in eqos_detach()
1284 bus_dmamap_unload(sc->tx.desc_tag, sc->tx.desc_map); in eqos_detach()
1285 bus_dmamem_free(sc->tx.desc_tag, sc->tx.desc_ring, in eqos_detach()
1286 sc->tx.desc_map); in eqos_detach()
1288 bus_dma_tag_destroy(sc->tx.desc_tag); in eqos_detach()
1290 if (sc->tx.buf_tag) { in eqos_detach()
1292 m_free(sc->tx.buf_map[i].mbuf); in eqos_detach()
1293 bus_dmamap_destroy(sc->tx.buf_tag, in eqos_detach()
1294 sc->tx.buf_map[i].map); in eqos_detach()
1296 bus_dma_tag_destroy(sc->tx.buf_tag); in eqos_detach()
1299 if (sc->rx.desc_tag) { in eqos_detach()
1300 if (sc->rx.desc_map) { in eqos_detach()
1301 bus_dmamap_unload(sc->rx.desc_tag, sc->rx.desc_map); in eqos_detach()
1302 bus_dmamem_free(sc->rx.desc_tag, sc->rx.desc_ring, in eqos_detach()
1303 sc->rx.desc_map); in eqos_detach()
1305 bus_dma_tag_destroy(sc->rx.desc_tag); in eqos_detach()
1307 if (sc->rx.buf_tag) { in eqos_detach()
1309 m_free(sc->rx.buf_map[i].mbuf); in eqos_detach()
1310 bus_dmamap_destroy(sc->rx.buf_tag, in eqos_detach()
1311 sc->rx.buf_map[i].map); in eqos_detach()
1313 bus_dma_tag_destroy(sc->rx.buf_tag); in eqos_detach()
1316 mtx_destroy(&sc->lock); in eqos_detach()