Lines Matching full:sc
73 static void tsec_dma_ctl(struct tsec_softc *sc, int state);
74 static void tsec_encap(if_t ifp, struct tsec_softc *sc,
76 static void tsec_free_dma(struct tsec_softc *sc);
84 static void tsec_intrs_ctl(struct tsec_softc *sc, int state);
86 static void tsec_init_locked(struct tsec_softc *sc);
88 static void tsec_reset_mac(struct tsec_softc *sc);
89 static void tsec_setfilter(struct tsec_softc *sc);
90 static void tsec_set_mac_address(struct tsec_softc *sc);
93 static void tsec_stop(struct tsec_softc *sc);
95 static void tsec_watchdog(struct tsec_softc *sc);
96 static void tsec_add_sysctls(struct tsec_softc *sc);
99 static void tsec_set_rxic(struct tsec_softc *sc);
100 static void tsec_set_txic(struct tsec_softc *sc);
101 static int tsec_receive_intr_locked(struct tsec_softc *sc, int count);
102 static void tsec_transmit_intr_locked(struct tsec_softc *sc);
103 static void tsec_error_intr_locked(struct tsec_softc *sc, int count);
104 static void tsec_offload_setup(struct tsec_softc *sc);
105 static void tsec_offload_process_frame(struct tsec_softc *sc,
107 static void tsec_setup_multicast(struct tsec_softc *sc);
108 static int tsec_set_mtu(struct tsec_softc *sc, unsigned int mtu);
117 tsec_attach(struct tsec_softc *sc) in tsec_attach() argument
129 TSEC_TX_RX_COUNTERS_INIT(sc); in tsec_attach()
132 tsec_dma_ctl(sc, 0); in tsec_attach()
135 tsec_reset_mac(sc); in tsec_attach()
138 tsec_intrs_ctl(sc, 0); in tsec_attach()
141 sc->rx_ic_time = 768; in tsec_attach()
142 sc->rx_ic_count = 16; in tsec_attach()
143 sc->tx_ic_time = 768; in tsec_attach()
144 sc->tx_ic_count = 16; in tsec_attach()
145 tsec_set_rxic(sc); in tsec_attach()
146 tsec_set_txic(sc); in tsec_attach()
147 tsec_add_sysctls(sc); in tsec_attach()
150 error = tsec_alloc_dma_desc(sc->dev, &sc->tsec_tx_dtag, in tsec_attach()
151 &sc->tsec_tx_dmap, sizeof(*sc->tsec_tx_vaddr) * TSEC_TX_NUM_DESC, in tsec_attach()
152 (void **)&sc->tsec_tx_vaddr, &sc->tsec_tx_raddr, "TX"); in tsec_attach()
155 tsec_detach(sc); in tsec_attach()
160 error = tsec_alloc_dma_desc(sc->dev, &sc->tsec_rx_dtag, in tsec_attach()
161 &sc->tsec_rx_dmap, sizeof(*sc->tsec_rx_vaddr) * TSEC_RX_NUM_DESC, in tsec_attach()
162 (void **)&sc->tsec_rx_vaddr, &sc->tsec_rx_raddr, "RX"); in tsec_attach()
164 tsec_detach(sc); in tsec_attach()
178 &sc->tsec_tx_mtag); /* dmat */ in tsec_attach()
180 device_printf(sc->dev, "failed to allocate busdma tag " in tsec_attach()
182 tsec_detach(sc); in tsec_attach()
196 &sc->tsec_rx_mtag); /* dmat */ in tsec_attach()
198 device_printf(sc->dev, "failed to allocate busdma tag " in tsec_attach()
200 tsec_detach(sc); in tsec_attach()
206 error = bus_dmamap_create(sc->tsec_tx_mtag, 0, in tsec_attach()
207 &sc->tx_bufmap[i].map); in tsec_attach()
209 device_printf(sc->dev, "failed to init TX ring\n"); in tsec_attach()
210 tsec_detach(sc); in tsec_attach()
213 sc->tx_bufmap[i].map_initialized = 1; in tsec_attach()
218 error = bus_dmamap_create(sc->tsec_rx_mtag, 0, in tsec_attach()
219 &sc->rx_data[i].map); in tsec_attach()
221 device_printf(sc->dev, "failed to init RX ring\n"); in tsec_attach()
222 tsec_detach(sc); in tsec_attach()
225 sc->rx_data[i].mbuf = NULL; in tsec_attach()
230 error = tsec_new_rxbuf(sc->tsec_rx_mtag, sc->rx_data[i].map, in tsec_attach()
231 &sc->rx_data[i].mbuf, &sc->rx_data[i].paddr); in tsec_attach()
233 device_printf(sc->dev, "can't load rx DMA map %d, " in tsec_attach()
235 tsec_detach(sc); in tsec_attach()
241 ifp = sc->tsec_ifp = if_alloc(IFT_ETHER); in tsec_attach()
242 if_setsoftc(ifp, sc); in tsec_attach()
243 if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev)); in tsec_attach()
253 if (sc->is_etsec) in tsec_attach()
264 error = mii_attach(sc->dev, &sc->tsec_miibus, ifp, tsec_ifmedia_upd, in tsec_attach()
265 tsec_ifmedia_sts, BMSR_DEFCAPMASK, sc->phyaddr, MII_OFFSET_ANY, in tsec_attach()
268 device_printf(sc->dev, "attaching PHYs failed\n"); in tsec_attach()
270 sc->tsec_ifp = NULL; in tsec_attach()
271 tsec_detach(sc); in tsec_attach()
274 sc->tsec_mii = device_get_softc(sc->tsec_miibus); in tsec_attach()
277 tsec_get_hwaddr(sc, hwaddr); in tsec_attach()
284 tsec_detach(struct tsec_softc *sc) in tsec_detach() argument
287 if (sc->tsec_ifp != NULL) { in tsec_detach()
289 if (if_getcapenable(sc->tsec_ifp) & IFCAP_POLLING) in tsec_detach()
290 ether_poll_deregister(sc->tsec_ifp); in tsec_detach()
294 if (sc->sc_rres) in tsec_detach()
295 tsec_shutdown(sc->dev); in tsec_detach()
298 ether_ifdetach(sc->tsec_ifp); in tsec_detach()
299 if_free(sc->tsec_ifp); in tsec_detach()
300 sc->tsec_ifp = NULL; in tsec_detach()
304 tsec_free_dma(sc); in tsec_detach()
312 struct tsec_softc *sc; in tsec_shutdown() local
314 sc = device_get_softc(dev); in tsec_shutdown()
316 TSEC_GLOBAL_LOCK(sc); in tsec_shutdown()
317 tsec_stop(sc); in tsec_shutdown()
318 TSEC_GLOBAL_UNLOCK(sc); in tsec_shutdown()
341 struct tsec_softc *sc = xsc; in tsec_init() local
343 TSEC_GLOBAL_LOCK(sc); in tsec_init()
344 tsec_init_locked(sc); in tsec_init()
345 TSEC_GLOBAL_UNLOCK(sc); in tsec_init()
349 tsec_mii_wait(struct tsec_softc *sc, uint32_t flags) in tsec_mii_wait() argument
357 TSEC_PHY_READ(sc, TSEC_REG_MIIMIND); in tsec_mii_wait()
360 while ((TSEC_PHY_READ(sc, TSEC_REG_MIIMIND) & flags) && --timeout) in tsec_mii_wait()
367 tsec_init_locked(struct tsec_softc *sc) in tsec_init_locked() argument
369 struct tsec_desc *tx_desc = sc->tsec_tx_vaddr; in tsec_init_locked()
370 struct tsec_desc *rx_desc = sc->tsec_rx_vaddr; in tsec_init_locked()
371 if_t ifp = sc->tsec_ifp; in tsec_init_locked()
378 TSEC_GLOBAL_LOCK_ASSERT(sc); in tsec_init_locked()
379 tsec_stop(sc); in tsec_init_locked()
387 tsec_reset_mac(sc); in tsec_init_locked()
390 TSEC_WRITE(sc, TSEC_REG_MACCFG2, in tsec_init_locked()
408 i = TSEC_READ(sc, TSEC_REG_ID2); in tsec_init_locked()
412 TSEC_WRITE(sc, TSEC_REG_ECNTRL, TSEC_ECNTRL_STEN | val); in tsec_init_locked()
415 tsec_set_mac_address(sc); in tsec_init_locked()
421 TSEC_WRITE(sc, TSEC_REG_TBIPA, 5); in tsec_init_locked()
423 TSEC_PHY_LOCK(sc); in tsec_init_locked()
426 TSEC_PHY_WRITE(sc, TSEC_REG_MIIMCFG, TSEC_MIIMCFG_RESETMGMT); in tsec_init_locked()
429 TSEC_PHY_WRITE(sc, TSEC_REG_MIIMCFG, TSEC_MIIMCFG_CLKDIV28); in tsec_init_locked()
432 timeout = tsec_mii_wait(sc, TSEC_MIIMIND_BUSY); in tsec_init_locked()
434 TSEC_PHY_UNLOCK(sc); in tsec_init_locked()
441 mii_mediachg(sc->tsec_mii); in tsec_init_locked()
444 TSEC_WRITE(sc, TSEC_REG_IEVENT, 0xffffffff); in tsec_init_locked()
453 tsec_intrs_ctl(sc, 0); in tsec_init_locked()
456 tsec_intrs_ctl(sc, 1); in tsec_init_locked()
459 TSEC_WRITE(sc, TSEC_REG_IADDR0, 0); in tsec_init_locked()
460 TSEC_WRITE(sc, TSEC_REG_IADDR1, 0); in tsec_init_locked()
461 TSEC_WRITE(sc, TSEC_REG_IADDR2, 0); in tsec_init_locked()
462 TSEC_WRITE(sc, TSEC_REG_IADDR3, 0); in tsec_init_locked()
463 TSEC_WRITE(sc, TSEC_REG_IADDR4, 0); in tsec_init_locked()
464 TSEC_WRITE(sc, TSEC_REG_IADDR5, 0); in tsec_init_locked()
465 TSEC_WRITE(sc, TSEC_REG_IADDR6, 0); in tsec_init_locked()
466 TSEC_WRITE(sc, TSEC_REG_IADDR7, 0); in tsec_init_locked()
469 TSEC_WRITE(sc, TSEC_REG_GADDR0, 0); in tsec_init_locked()
470 TSEC_WRITE(sc, TSEC_REG_GADDR1, 0); in tsec_init_locked()
471 TSEC_WRITE(sc, TSEC_REG_GADDR2, 0); in tsec_init_locked()
472 TSEC_WRITE(sc, TSEC_REG_GADDR3, 0); in tsec_init_locked()
473 TSEC_WRITE(sc, TSEC_REG_GADDR4, 0); in tsec_init_locked()
474 TSEC_WRITE(sc, TSEC_REG_GADDR5, 0); in tsec_init_locked()
475 TSEC_WRITE(sc, TSEC_REG_GADDR6, 0); in tsec_init_locked()
476 TSEC_WRITE(sc, TSEC_REG_GADDR7, 0); in tsec_init_locked()
479 TSEC_WRITE(sc, TSEC_REG_RCTRL, 0); in tsec_init_locked()
482 tsec_dma_ctl(sc, 1); in tsec_init_locked()
485 TSEC_WRITE(sc, TSEC_REG_FIFO_PAUSE_CTRL, TSEC_FIFO_PAUSE_CTRL_EN); in tsec_init_locked()
491 TSEC_WRITE(sc, TSEC_REG_TBASE, sc->tsec_tx_raddr); in tsec_init_locked()
492 TSEC_WRITE(sc, TSEC_REG_RBASE, sc->tsec_rx_raddr); in tsec_init_locked()
500 bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, in tsec_init_locked()
504 rx_desc[i].bufptr = sc->rx_data[i].paddr; in tsec_init_locked()
509 bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap, in tsec_init_locked()
513 TSEC_WRITE(sc, TSEC_REG_MRBLR, MCLBYTES); in tsec_init_locked()
516 TSEC_WRITE(sc, TSEC_REG_MINFLR, TSEC_MIN_FRAME_SIZE); in tsec_init_locked()
517 tsec_set_mtu(sc, if_getmtu(ifp)); in tsec_init_locked()
520 TSEC_WRITE(sc, TSEC_REG_ATTR, TSEC_ATTR_RDSEN | TSEC_ATTR_RBDSEN); in tsec_init_locked()
521 TSEC_WRITE(sc, TSEC_REG_ATTRELI, 0); in tsec_init_locked()
524 TSEC_WRITE(sc, TSEC_REG_MON_TSCL, 0); in tsec_init_locked()
525 TSEC_WRITE(sc, TSEC_REG_MON_TMCL, 0); in tsec_init_locked()
526 TSEC_WRITE(sc, TSEC_REG_MON_TLCL, 0); in tsec_init_locked()
527 TSEC_WRITE(sc, TSEC_REG_MON_TXCL, 0); in tsec_init_locked()
528 TSEC_WRITE(sc, TSEC_REG_MON_TNCL, 0); in tsec_init_locked()
531 TSEC_WRITE(sc, TSEC_REG_MON_CAM1, 0xffffffff); in tsec_init_locked()
532 TSEC_WRITE(sc, TSEC_REG_MON_CAM2, 0xffffffff); in tsec_init_locked()
535 val = TSEC_READ(sc, TSEC_REG_MACCFG1); in tsec_init_locked()
537 TSEC_WRITE(sc, TSEC_REG_MACCFG1, val); in tsec_init_locked()
540 TSEC_TX_RX_COUNTERS_INIT(sc); in tsec_init_locked()
543 if (sc->is_etsec) in tsec_init_locked()
544 tsec_offload_setup(sc); in tsec_init_locked()
547 tsec_setup_multicast(sc); in tsec_init_locked()
552 sc->tsec_if_flags = if_getflags(ifp); in tsec_init_locked()
553 sc->tsec_watchdog = 0; in tsec_init_locked()
556 callout_reset(&sc->tsec_callout, hz, tsec_tick, sc); in tsec_init_locked()
560 tsec_set_mac_address(struct tsec_softc *sc) in tsec_set_mac_address() argument
566 TSEC_GLOBAL_LOCK_ASSERT(sc); in tsec_set_mac_address()
573 curmac = (char *)if_getlladdr(sc->tsec_ifp); in tsec_set_mac_address()
580 TSEC_WRITE(sc, TSEC_REG_MACSTNADDR2, macbuf[1]); in tsec_set_mac_address()
581 TSEC_WRITE(sc, TSEC_REG_MACSTNADDR1, macbuf[0]); in tsec_set_mac_address()
590 tsec_dma_ctl(struct tsec_softc *sc, int state) in tsec_dma_ctl() argument
595 dev = sc->dev; in tsec_dma_ctl()
597 dma_flags = TSEC_READ(sc, TSEC_REG_DMACTRL); in tsec_dma_ctl()
602 tsec_dma_ctl(sc, 1000); in tsec_dma_ctl()
621 TSEC_WRITE(sc, TSEC_REG_DMACTRL, dma_flags); in tsec_dma_ctl()
627 while (--timeout && (!(TSEC_READ(sc, TSEC_REG_IEVENT) & in tsec_dma_ctl()
636 TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT); in tsec_dma_ctl()
646 tsec_intrs_ctl(struct tsec_softc *sc, int state) in tsec_intrs_ctl() argument
650 dev = sc->dev; in tsec_intrs_ctl()
654 TSEC_WRITE(sc, TSEC_REG_IMASK, 0); in tsec_intrs_ctl()
657 TSEC_WRITE(sc, TSEC_REG_IMASK, TSEC_IMASK_BREN | in tsec_intrs_ctl()
669 tsec_reset_mac(struct tsec_softc *sc) in tsec_reset_mac() argument
674 maccfg1_flags = TSEC_READ(sc, TSEC_REG_MACCFG1); in tsec_reset_mac()
676 TSEC_WRITE(sc, TSEC_REG_MACCFG1, maccfg1_flags); in tsec_reset_mac()
679 maccfg1_flags = TSEC_READ(sc, TSEC_REG_MACCFG1); in tsec_reset_mac()
681 TSEC_WRITE(sc, TSEC_REG_MACCFG1, maccfg1_flags); in tsec_reset_mac()
685 tsec_watchdog(struct tsec_softc *sc) in tsec_watchdog() argument
689 TSEC_GLOBAL_LOCK_ASSERT(sc); in tsec_watchdog()
691 if (sc->tsec_watchdog == 0 || --sc->tsec_watchdog > 0) in tsec_watchdog()
694 ifp = sc->tsec_ifp; in tsec_watchdog()
698 tsec_stop(sc); in tsec_watchdog()
699 tsec_init_locked(sc); in tsec_watchdog()
705 struct tsec_softc *sc = if_getsoftc(ifp); in tsec_start() local
707 TSEC_TRANSMIT_LOCK(sc); in tsec_start()
709 TSEC_TRANSMIT_UNLOCK(sc); in tsec_start()
715 struct tsec_softc *sc; in tsec_start_locked() local
722 sc = if_getsoftc(ifp); in tsec_start_locked()
725 TSEC_TRANSMIT_LOCK_ASSERT(sc); in tsec_start_locked()
727 if (sc->tsec_link == 0) in tsec_start_locked()
730 bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, in tsec_start_locked()
734 if (TSEC_FREE_TX_DESC(sc) < TSEC_TX_MAX_DMA_SEGS) { in tsec_start_locked()
771 tsec_encap(ifp, sc, m0, fcb_flags, &start_tx); in tsec_start_locked()
773 bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, in tsec_start_locked()
778 TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT); in tsec_start_locked()
779 sc->tsec_watchdog = 5; in tsec_start_locked()
784 tsec_encap(if_t ifp, struct tsec_softc *sc, struct mbuf *m0, in tsec_encap() argument
793 TSEC_TRANSMIT_LOCK_ASSERT(sc); in tsec_encap()
795 tx_idx = sc->tx_idx_head; in tsec_encap()
796 tx_bufmap = &sc->tx_bufmap[tx_idx]; in tsec_encap()
799 error = bus_dmamap_load_mbuf_sg(sc->tsec_tx_mtag, tx_bufmap->map, m0, in tsec_encap()
810 error = bus_dmamap_load_mbuf_sg(sc->tsec_tx_mtag, in tsec_encap()
819 bus_dmamap_sync(sc->tsec_tx_mtag, tx_bufmap->map, in tsec_encap()
828 sc->tx_idx_head = tx_idx; in tsec_encap()
834 tx_desc = &sc->tsec_tx_vaddr[tx_idx]; in tsec_encap()
864 tsec_setfilter(struct tsec_softc *sc) in tsec_setfilter() argument
869 ifp = sc->tsec_ifp; in tsec_setfilter()
870 flags = TSEC_READ(sc, TSEC_REG_RCTRL); in tsec_setfilter()
878 TSEC_WRITE(sc, TSEC_REG_RCTRL, flags); in tsec_setfilter()
888 struct tsec_softc *sc = if_getsoftc(ifp); in tsec_poll() local
893 TSEC_GLOBAL_LOCK(sc); in tsec_poll()
895 TSEC_GLOBAL_UNLOCK(sc); in tsec_poll()
900 tsec_error_intr_locked(sc, count); in tsec_poll()
903 ie = TSEC_READ(sc, TSEC_REG_IEVENT); in tsec_poll()
904 TSEC_WRITE(sc, TSEC_REG_IEVENT, ie); in tsec_poll()
907 tsec_transmit_intr_locked(sc); in tsec_poll()
909 TSEC_GLOBAL_TO_RECEIVE_LOCK(sc); in tsec_poll()
911 rx_npkts = tsec_receive_intr_locked(sc, count); in tsec_poll()
913 TSEC_RECEIVE_UNLOCK(sc); in tsec_poll()
922 struct tsec_softc *sc = if_getsoftc(ifp); in tsec_ioctl() local
928 TSEC_GLOBAL_LOCK(sc); in tsec_ioctl()
929 if (tsec_set_mtu(sc, ifr->ifr_mtu)) in tsec_ioctl()
933 TSEC_GLOBAL_UNLOCK(sc); in tsec_ioctl()
936 TSEC_GLOBAL_LOCK(sc); in tsec_ioctl()
939 if ((sc->tsec_if_flags ^ if_getflags(ifp)) & in tsec_ioctl()
941 tsec_setfilter(sc); in tsec_ioctl()
943 if ((sc->tsec_if_flags ^ if_getflags(ifp)) & in tsec_ioctl()
945 tsec_setup_multicast(sc); in tsec_ioctl()
947 tsec_init_locked(sc); in tsec_ioctl()
949 tsec_stop(sc); in tsec_ioctl()
951 sc->tsec_if_flags = if_getflags(ifp); in tsec_ioctl()
952 TSEC_GLOBAL_UNLOCK(sc); in tsec_ioctl()
957 TSEC_GLOBAL_LOCK(sc); in tsec_ioctl()
958 tsec_setup_multicast(sc); in tsec_ioctl()
959 TSEC_GLOBAL_UNLOCK(sc); in tsec_ioctl()
963 error = ifmedia_ioctl(ifp, ifr, &sc->tsec_mii->mii_media, in tsec_ioctl()
968 if ((mask & IFCAP_HWCSUM) && sc->is_etsec) { in tsec_ioctl()
969 TSEC_GLOBAL_LOCK(sc); in tsec_ioctl()
972 tsec_offload_setup(sc); in tsec_ioctl()
973 TSEC_GLOBAL_UNLOCK(sc); in tsec_ioctl()
982 TSEC_GLOBAL_LOCK(sc); in tsec_ioctl()
984 tsec_intrs_ctl(sc, 0); in tsec_ioctl()
986 TSEC_GLOBAL_UNLOCK(sc); in tsec_ioctl()
989 TSEC_GLOBAL_LOCK(sc); in tsec_ioctl()
991 tsec_intrs_ctl(sc, 1); in tsec_ioctl()
993 TSEC_GLOBAL_UNLOCK(sc); in tsec_ioctl()
1012 struct tsec_softc *sc = if_getsoftc(ifp); in tsec_ifmedia_upd() local
1015 TSEC_TRANSMIT_LOCK(sc); in tsec_ifmedia_upd()
1017 mii = sc->tsec_mii; in tsec_ifmedia_upd()
1020 TSEC_TRANSMIT_UNLOCK(sc); in tsec_ifmedia_upd()
1027 struct tsec_softc *sc = if_getsoftc(ifp); in tsec_ifmedia_sts() local
1030 TSEC_TRANSMIT_LOCK(sc); in tsec_ifmedia_sts()
1032 mii = sc->tsec_mii; in tsec_ifmedia_sts()
1038 TSEC_TRANSMIT_UNLOCK(sc); in tsec_ifmedia_sts()
1165 tsec_free_dma(struct tsec_softc *sc) in tsec_free_dma() argument
1171 if (sc->tx_bufmap[i].map_initialized) in tsec_free_dma()
1172 bus_dmamap_destroy(sc->tsec_tx_mtag, in tsec_free_dma()
1173 sc->tx_bufmap[i].map); in tsec_free_dma()
1175 bus_dma_tag_destroy(sc->tsec_tx_mtag); in tsec_free_dma()
1179 if (sc->rx_data[i].mbuf) { in tsec_free_dma()
1181 bus_dmamap_sync(sc->tsec_rx_mtag, sc->rx_data[i].map, in tsec_free_dma()
1183 bus_dmamap_unload(sc->tsec_rx_mtag, in tsec_free_dma()
1184 sc->rx_data[i].map); in tsec_free_dma()
1187 m_freem(sc->rx_data[i].mbuf); in tsec_free_dma()
1190 if (sc->rx_data[i].map != NULL) in tsec_free_dma()
1191 bus_dmamap_destroy(sc->tsec_rx_mtag, in tsec_free_dma()
1192 sc->rx_data[i].map); in tsec_free_dma()
1195 bus_dma_tag_destroy(sc->tsec_rx_mtag); in tsec_free_dma()
1198 tsec_free_dma_desc(sc->tsec_tx_dtag, sc->tsec_tx_dmap, in tsec_free_dma()
1199 sc->tsec_tx_vaddr); in tsec_free_dma()
1200 tsec_free_dma_desc(sc->tsec_rx_dtag, sc->tsec_rx_dmap, in tsec_free_dma()
1201 sc->tsec_rx_vaddr); in tsec_free_dma()
1205 tsec_stop(struct tsec_softc *sc) in tsec_stop() argument
1210 TSEC_GLOBAL_LOCK_ASSERT(sc); in tsec_stop()
1212 ifp = sc->tsec_ifp; in tsec_stop()
1215 callout_stop(&sc->tsec_callout); in tsec_stop()
1217 sc->tsec_watchdog = 0; in tsec_stop()
1220 tsec_intrs_ctl(sc, 0); in tsec_stop()
1221 tsec_dma_ctl(sc, 0); in tsec_stop()
1224 while (sc->tx_idx_tail != sc->tx_idx_head) { in tsec_stop()
1225 bus_dmamap_sync(sc->tsec_tx_mtag, in tsec_stop()
1226 sc->tx_bufmap[sc->tx_idx_tail].map, in tsec_stop()
1228 bus_dmamap_unload(sc->tsec_tx_mtag, in tsec_stop()
1229 sc->tx_bufmap[sc->tx_idx_tail].map); in tsec_stop()
1230 m_freem(sc->tx_bufmap[sc->tx_idx_tail].mbuf); in tsec_stop()
1231 sc->tx_idx_tail = (sc->tx_idx_tail + 1) in tsec_stop()
1236 tmpval = TSEC_READ(sc, TSEC_REG_MACCFG1); in tsec_stop()
1238 TSEC_WRITE(sc, TSEC_REG_MACCFG1, tmpval); in tsec_stop()
1245 struct tsec_softc *sc = arg; in tsec_tick() local
1249 TSEC_GLOBAL_LOCK(sc); in tsec_tick()
1251 tsec_watchdog(sc); in tsec_tick()
1253 ifp = sc->tsec_ifp; in tsec_tick()
1254 link = sc->tsec_link; in tsec_tick()
1256 mii_tick(sc->tsec_mii); in tsec_tick()
1258 if (link == 0 && sc->tsec_link == 1 && in tsec_tick()
1263 callout_reset(&sc->tsec_callout, hz, tsec_tick, sc); in tsec_tick()
1265 TSEC_GLOBAL_UNLOCK(sc); in tsec_tick()
1275 tsec_receive_intr_locked(struct tsec_softc *sc, int count) in tsec_receive_intr_locked() argument
1285 TSEC_RECEIVE_LOCK_ASSERT(sc); in tsec_receive_intr_locked()
1287 ifp = sc->tsec_ifp; in tsec_receive_intr_locked()
1288 rx_data = sc->rx_data; in tsec_receive_intr_locked()
1291 bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap, in tsec_receive_intr_locked()
1298 rx_desc = TSEC_GET_CUR_RX_DESC(sc); in tsec_receive_intr_locked()
1307 TSEC_WRITE(sc, TSEC_REG_IEVENT, in tsec_receive_intr_locked()
1313 TSEC_BACK_CUR_RX_DESC(sc); in tsec_receive_intr_locked()
1323 if (sc->frame != NULL) { in tsec_receive_intr_locked()
1324 m_free(sc->frame); in tsec_receive_intr_locked()
1325 sc->frame = NULL; in tsec_receive_intr_locked()
1332 i = TSEC_GET_CUR_RX_DESC_CNT(sc); in tsec_receive_intr_locked()
1336 if (sc->frame != NULL) { in tsec_receive_intr_locked()
1338 m->m_len -= m_length(sc->frame, NULL); in tsec_receive_intr_locked()
1341 m_cat(sc->frame, m); in tsec_receive_intr_locked()
1343 sc->frame = m; in tsec_receive_intr_locked()
1349 m = sc->frame; in tsec_receive_intr_locked()
1350 sc->frame = NULL; in tsec_receive_intr_locked()
1353 if (tsec_new_rxbuf(sc->tsec_rx_mtag, rx_data[i].map, in tsec_receive_intr_locked()
1360 TSEC_BACK_CUR_RX_DESC(sc); in tsec_receive_intr_locked()
1376 if (sc->is_etsec) in tsec_receive_intr_locked()
1377 tsec_offload_process_frame(sc, m); in tsec_receive_intr_locked()
1379 TSEC_RECEIVE_UNLOCK(sc); in tsec_receive_intr_locked()
1381 TSEC_RECEIVE_LOCK(sc); in tsec_receive_intr_locked()
1386 bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap, in tsec_receive_intr_locked()
1397 TSEC_WRITE(sc, TSEC_REG_RSTAT, TSEC_RSTAT_QHLT); in tsec_receive_intr_locked()
1404 struct tsec_softc *sc = arg; in tsec_receive_intr() local
1406 TSEC_RECEIVE_LOCK(sc); in tsec_receive_intr()
1409 if (if_getcapenable(sc->tsec_ifp) & IFCAP_POLLING) { in tsec_receive_intr()
1410 TSEC_RECEIVE_UNLOCK(sc); in tsec_receive_intr()
1416 TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_RXB | TSEC_IEVENT_RXF); in tsec_receive_intr()
1417 tsec_receive_intr_locked(sc, -1); in tsec_receive_intr()
1419 TSEC_RECEIVE_UNLOCK(sc); in tsec_receive_intr()
1423 tsec_transmit_intr_locked(struct tsec_softc *sc) in tsec_transmit_intr_locked() argument
1428 TSEC_TRANSMIT_LOCK_ASSERT(sc); in tsec_transmit_intr_locked()
1430 ifp = sc->tsec_ifp; in tsec_transmit_intr_locked()
1433 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, TSEC_READ(sc, TSEC_REG_MON_TNCL)); in tsec_transmit_intr_locked()
1436 TSEC_WRITE(sc, TSEC_REG_MON_TSCL, 0); in tsec_transmit_intr_locked()
1437 TSEC_WRITE(sc, TSEC_REG_MON_TMCL, 0); in tsec_transmit_intr_locked()
1438 TSEC_WRITE(sc, TSEC_REG_MON_TLCL, 0); in tsec_transmit_intr_locked()
1439 TSEC_WRITE(sc, TSEC_REG_MON_TXCL, 0); in tsec_transmit_intr_locked()
1440 TSEC_WRITE(sc, TSEC_REG_MON_TNCL, 0); in tsec_transmit_intr_locked()
1442 bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, in tsec_transmit_intr_locked()
1445 tx_idx = sc->tx_idx_tail; in tsec_transmit_intr_locked()
1446 while (tx_idx != sc->tx_idx_head) { in tsec_transmit_intr_locked()
1450 tx_desc = &sc->tsec_tx_vaddr[tx_idx]; in tsec_transmit_intr_locked()
1455 tx_bufmap = &sc->tx_bufmap[tx_idx]; in tsec_transmit_intr_locked()
1463 bus_dmamap_sync(sc->tsec_tx_mtag, tx_bufmap->map, in tsec_transmit_intr_locked()
1465 bus_dmamap_unload(sc->tsec_tx_mtag, tx_bufmap->map); in tsec_transmit_intr_locked()
1471 sc->tx_idx_tail = tx_idx; in tsec_transmit_intr_locked()
1472 bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, in tsec_transmit_intr_locked()
1478 if (sc->tx_idx_tail == sc->tx_idx_head) in tsec_transmit_intr_locked()
1479 sc->tsec_watchdog = 0; in tsec_transmit_intr_locked()
1485 struct tsec_softc *sc = arg; in tsec_transmit_intr() local
1487 TSEC_TRANSMIT_LOCK(sc); in tsec_transmit_intr()
1490 if (if_getcapenable(sc->tsec_ifp) & IFCAP_POLLING) { in tsec_transmit_intr()
1491 TSEC_TRANSMIT_UNLOCK(sc); in tsec_transmit_intr()
1496 TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_TXB | TSEC_IEVENT_TXF); in tsec_transmit_intr()
1497 tsec_transmit_intr_locked(sc); in tsec_transmit_intr()
1499 TSEC_TRANSMIT_UNLOCK(sc); in tsec_transmit_intr()
1503 tsec_error_intr_locked(struct tsec_softc *sc, int count) in tsec_error_intr_locked() argument
1508 TSEC_GLOBAL_LOCK_ASSERT(sc); in tsec_error_intr_locked()
1510 ifp = sc->tsec_ifp; in tsec_error_intr_locked()
1512 eflags = TSEC_READ(sc, TSEC_REG_IEVENT); in tsec_error_intr_locked()
1515 TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_RXC | TSEC_IEVENT_BSY | in tsec_error_intr_locked()
1527 TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT); in tsec_error_intr_locked()
1542 tsec_init_locked(sc); in tsec_error_intr_locked()
1555 struct tsec_softc *sc = arg; in tsec_error_intr() local
1557 TSEC_GLOBAL_LOCK(sc); in tsec_error_intr()
1558 tsec_error_intr_locked(sc, -1); in tsec_error_intr()
1559 TSEC_GLOBAL_UNLOCK(sc); in tsec_error_intr()
1565 struct tsec_softc *sc; in tsec_miibus_readreg() local
1569 sc = device_get_softc(dev); in tsec_miibus_readreg()
1572 TSEC_PHY_WRITE(sc, TSEC_REG_MIIMADD, (phy << 8) | reg); in tsec_miibus_readreg()
1573 TSEC_PHY_WRITE(sc, TSEC_REG_MIIMCOM, 0); in tsec_miibus_readreg()
1574 TSEC_PHY_WRITE(sc, TSEC_REG_MIIMCOM, TSEC_MIIMCOM_READCYCLE); in tsec_miibus_readreg()
1576 timeout = tsec_mii_wait(sc, TSEC_MIIMIND_NOTVALID | TSEC_MIIMIND_BUSY); in tsec_miibus_readreg()
1577 rv = TSEC_PHY_READ(sc, TSEC_REG_MIIMSTAT); in tsec_miibus_readreg()
1589 struct tsec_softc *sc; in tsec_miibus_writereg() local
1592 sc = device_get_softc(dev); in tsec_miibus_writereg()
1595 TSEC_PHY_WRITE(sc, TSEC_REG_MIIMADD, (phy << 8) | reg); in tsec_miibus_writereg()
1596 TSEC_PHY_WRITE(sc, TSEC_REG_MIIMCON, value); in tsec_miibus_writereg()
1597 timeout = tsec_mii_wait(sc, TSEC_MIIMIND_BUSY); in tsec_miibus_writereg()
1609 struct tsec_softc *sc; in tsec_miibus_statchg() local
1614 sc = device_get_softc(dev); in tsec_miibus_statchg()
1615 mii = sc->tsec_mii; in tsec_miibus_statchg()
1618 tmp = TSEC_READ(sc, TSEC_REG_MACCFG2) & ~TSEC_MACCFG2_IF; in tsec_miibus_statchg()
1629 sc->tsec_link = link; in tsec_miibus_statchg()
1634 sc->tsec_link = link; in tsec_miibus_statchg()
1640 sc->tsec_link = 0; in tsec_miibus_statchg()
1643 sc->tsec_link = 0; in tsec_miibus_statchg()
1649 TSEC_WRITE(sc, TSEC_REG_MACCFG2, tmp); in tsec_miibus_statchg()
1652 id = TSEC_READ(sc, TSEC_REG_ID2); in tsec_miibus_statchg()
1654 ecntrl = TSEC_READ(sc, TSEC_REG_ECNTRL) & ~TSEC_ECNTRL_R100M; in tsec_miibus_statchg()
1656 TSEC_WRITE(sc, TSEC_REG_ECNTRL, ecntrl); in tsec_miibus_statchg()
1661 tsec_add_sysctls(struct tsec_softc *sc) in tsec_add_sysctls() argument
1667 ctx = device_get_sysctl_ctx(sc->dev); in tsec_add_sysctls()
1668 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); in tsec_add_sysctls()
1674 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, TSEC_IC_RX, in tsec_add_sysctls()
1677 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, TSEC_IC_RX, in tsec_add_sysctls()
1681 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, TSEC_IC_TX, in tsec_add_sysctls()
1684 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, TSEC_IC_TX, in tsec_add_sysctls()
1721 struct tsec_softc *sc = (struct tsec_softc *)arg1; in tsec_sysctl_ic_time() local
1723 time = (arg2 == TSEC_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time; in tsec_sysctl_ic_time()
1732 TSEC_IC_LOCK(sc); in tsec_sysctl_ic_time()
1734 sc->rx_ic_time = time; in tsec_sysctl_ic_time()
1735 tsec_set_rxic(sc); in tsec_sysctl_ic_time()
1737 sc->tx_ic_time = time; in tsec_sysctl_ic_time()
1738 tsec_set_txic(sc); in tsec_sysctl_ic_time()
1740 TSEC_IC_UNLOCK(sc); in tsec_sysctl_ic_time()
1750 struct tsec_softc *sc = (struct tsec_softc *)arg1; in tsec_sysctl_ic_count() local
1752 count = (arg2 == TSEC_IC_RX) ? sc->rx_ic_count : sc->tx_ic_count; in tsec_sysctl_ic_count()
1761 TSEC_IC_LOCK(sc); in tsec_sysctl_ic_count()
1763 sc->rx_ic_count = count; in tsec_sysctl_ic_count()
1764 tsec_set_rxic(sc); in tsec_sysctl_ic_count()
1766 sc->tx_ic_count = count; in tsec_sysctl_ic_count()
1767 tsec_set_txic(sc); in tsec_sysctl_ic_count()
1769 TSEC_IC_UNLOCK(sc); in tsec_sysctl_ic_count()
1775 tsec_set_rxic(struct tsec_softc *sc) in tsec_set_rxic() argument
1779 if (sc->rx_ic_count == 0 || sc->rx_ic_time == 0) in tsec_set_rxic()
1784 rxic_val |= (sc->rx_ic_count << 21); in tsec_set_rxic()
1785 rxic_val |= sc->rx_ic_time; in tsec_set_rxic()
1788 TSEC_WRITE(sc, TSEC_REG_RXIC, rxic_val); in tsec_set_rxic()
1792 tsec_set_txic(struct tsec_softc *sc) in tsec_set_txic() argument
1796 if (sc->tx_ic_count == 0 || sc->tx_ic_time == 0) in tsec_set_txic()
1801 txic_val |= (sc->tx_ic_count << 21); in tsec_set_txic()
1802 txic_val |= sc->tx_ic_time; in tsec_set_txic()
1805 TSEC_WRITE(sc, TSEC_REG_TXIC, txic_val); in tsec_set_txic()
1809 tsec_offload_setup(struct tsec_softc *sc) in tsec_offload_setup() argument
1811 if_t ifp = sc->tsec_ifp; in tsec_offload_setup()
1814 TSEC_GLOBAL_LOCK_ASSERT(sc); in tsec_offload_setup()
1816 reg = TSEC_READ(sc, TSEC_REG_TCTRL); in tsec_offload_setup()
1824 TSEC_WRITE(sc, TSEC_REG_TCTRL, reg); in tsec_offload_setup()
1826 reg = TSEC_READ(sc, TSEC_REG_RCTRL); in tsec_offload_setup()
1834 TSEC_WRITE(sc, TSEC_REG_RCTRL, reg); in tsec_offload_setup()
1838 tsec_offload_process_frame(struct tsec_softc *sc, struct mbuf *m) in tsec_offload_process_frame() argument
1844 TSEC_RECEIVE_LOCK_ASSERT(sc); in tsec_offload_process_frame()
1886 tsec_setup_multicast(struct tsec_softc *sc) in tsec_setup_multicast() argument
1889 if_t ifp = sc->tsec_ifp; in tsec_setup_multicast()
1892 TSEC_GLOBAL_LOCK_ASSERT(sc); in tsec_setup_multicast()
1896 TSEC_WRITE(sc, TSEC_REG_GADDR(i), 0xFFFFFFFF); in tsec_setup_multicast()
1904 TSEC_WRITE(sc, TSEC_REG_GADDR(i), hashtable[i]); in tsec_setup_multicast()
1908 tsec_set_mtu(struct tsec_softc *sc, unsigned int mtu) in tsec_set_mtu() argument
1913 TSEC_GLOBAL_LOCK_ASSERT(sc); in tsec_set_mtu()
1916 TSEC_WRITE(sc, TSEC_REG_MAXFRM, mtu); in tsec_set_mtu()