Lines Matching +full:dma +full:- +full:safe +full:- +full:map

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (C) 2007-2008 Semihalf, Rafal Jaworowski
5 * Copyright (C) 2006-2007 Semihalf, Piotr Kruszynski
30 * Freescale integrated Three-Speed Ethernet Controller (TSEC) driver.
80 static int tsec_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map,
131 /* Stop DMA engine if enabled by firmware */ in tsec_attach()
141 sc->rx_ic_time = 768; in tsec_attach()
142 sc->rx_ic_count = 16; in tsec_attach()
143 sc->tx_ic_time = 768; in tsec_attach()
144 sc->tx_ic_count = 16; in tsec_attach()
149 /* Allocate a busdma tag and DMA safe memory for TX descriptors. */ in tsec_attach()
150 error = tsec_alloc_dma_desc(sc->dev, &sc->tsec_tx_dtag, in tsec_attach()
151 &sc->tsec_tx_dmap, sizeof(*sc->tsec_tx_vaddr) * TSEC_TX_NUM_DESC, in tsec_attach()
152 (void **)&sc->tsec_tx_vaddr, &sc->tsec_tx_raddr, "TX"); in tsec_attach()
159 /* Allocate a busdma tag and DMA safe memory for RX descriptors. */ in tsec_attach()
160 error = tsec_alloc_dma_desc(sc->dev, &sc->tsec_rx_dtag, in tsec_attach()
161 &sc->tsec_rx_dmap, sizeof(*sc->tsec_rx_vaddr) * TSEC_RX_NUM_DESC, in tsec_attach()
162 (void **)&sc->tsec_rx_vaddr, &sc->tsec_rx_raddr, "RX"); in tsec_attach()
174 MCLBYTES * (TSEC_TX_NUM_DESC - 1), /* maxsize */ in tsec_attach()
178 &sc->tsec_tx_mtag); /* dmat */ in tsec_attach()
180 device_printf(sc->dev, "failed to allocate busdma tag " in tsec_attach()
196 &sc->tsec_rx_mtag); /* dmat */ in tsec_attach()
198 device_printf(sc->dev, "failed to allocate busdma tag " in tsec_attach()
206 error = bus_dmamap_create(sc->tsec_tx_mtag, 0, in tsec_attach()
207 &sc->tx_bufmap[i].map); in tsec_attach()
209 device_printf(sc->dev, "failed to init TX ring\n"); in tsec_attach()
213 sc->tx_bufmap[i].map_initialized = 1; in tsec_attach()
218 error = bus_dmamap_create(sc->tsec_rx_mtag, 0, in tsec_attach()
219 &sc->rx_data[i].map); in tsec_attach()
221 device_printf(sc->dev, "failed to init RX ring\n"); in tsec_attach()
225 sc->rx_data[i].mbuf = NULL; in tsec_attach()
230 error = tsec_new_rxbuf(sc->tsec_rx_mtag, sc->rx_data[i].map, in tsec_attach()
231 &sc->rx_data[i].mbuf, &sc->rx_data[i].paddr); in tsec_attach()
233 device_printf(sc->dev, "can't load rx DMA map %d, " in tsec_attach()
241 ifp = sc->tsec_ifp = if_alloc(IFT_ETHER); in tsec_attach()
243 if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev)); in tsec_attach()
249 if_setsendqlen(ifp, TSEC_TX_NUM_DESC - 1); in tsec_attach()
253 if (sc->is_etsec) in tsec_attach()
264 error = mii_attach(sc->dev, &sc->tsec_miibus, ifp, tsec_ifmedia_upd, in tsec_attach()
265 tsec_ifmedia_sts, BMSR_DEFCAPMASK, sc->phyaddr, MII_OFFSET_ANY, in tsec_attach()
268 device_printf(sc->dev, "attaching PHYs failed\n"); in tsec_attach()
270 sc->tsec_ifp = NULL; in tsec_attach()
274 sc->tsec_mii = device_get_softc(sc->tsec_miibus); in tsec_attach()
287 if (sc->tsec_ifp != NULL) { in tsec_detach()
289 if (if_getcapenable(sc->tsec_ifp) & IFCAP_POLLING) in tsec_detach()
290 ether_poll_deregister(sc->tsec_ifp); in tsec_detach()
294 if (sc->sc_rres) in tsec_detach()
295 tsec_shutdown(sc->dev); in tsec_detach()
298 ether_ifdetach(sc->tsec_ifp); in tsec_detach()
299 if_free(sc->tsec_ifp); in tsec_detach()
300 sc->tsec_ifp = NULL; in tsec_detach()
303 /* Free DMA resources */ in tsec_detach()
360 while ((TSEC_PHY_READ(sc, TSEC_REG_MIIMIND) & flags) && --timeout) in tsec_mii_wait()
369 struct tsec_desc *tx_desc = sc->tsec_tx_vaddr; in tsec_init_locked()
370 struct tsec_desc *rx_desc = sc->tsec_rx_vaddr; in tsec_init_locked()
371 if_t ifp = sc->tsec_ifp; in tsec_init_locked()
404 * XXX kludge - use circumstancial evidence to program ECNTRL in tsec_init_locked()
441 mii_mediachg(sc->tsec_mii); in tsec_init_locked()
491 TSEC_WRITE(sc, TSEC_REG_TBASE, sc->tsec_tx_raddr); in tsec_init_locked()
492 TSEC_WRITE(sc, TSEC_REG_RBASE, sc->tsec_rx_raddr); in tsec_init_locked()
497 tx_desc[i].flags = ((i == TSEC_TX_NUM_DESC - 1) ? in tsec_init_locked()
500 bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, in tsec_init_locked()
504 rx_desc[i].bufptr = sc->rx_data[i].paddr; in tsec_init_locked()
507 ((i == TSEC_RX_NUM_DESC - 1) ? TSEC_RXBD_W : 0); in tsec_init_locked()
509 bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap, in tsec_init_locked()
542 /* Step 25: Setup TCP/IP Off-Load engine */ in tsec_init_locked()
543 if (sc->is_etsec) in tsec_init_locked()
552 sc->tsec_if_flags = if_getflags(ifp); in tsec_init_locked()
553 sc->tsec_watchdog = 0; in tsec_init_locked()
556 callout_reset(&sc->tsec_callout, hz, tsec_tick, sc); in tsec_init_locked()
573 curmac = (char *)if_getlladdr(sc->tsec_ifp); in tsec_set_mac_address()
577 macbufp[ETHER_ADDR_LEN-i] = curmac[i-1]; in tsec_set_mac_address()
585 * DMA control function, if argument state is:
586 * 0 - DMA engine will be disabled
587 * 1 - DMA engine will be enabled
595 dev = sc->dev; in tsec_dma_ctl()
625 /* Wait for DMA stop */ in tsec_dma_ctl()
627 while (--timeout && (!(TSEC_READ(sc, TSEC_REG_IEVENT) & in tsec_dma_ctl()
642 * 0 - all TSEC interrupts will be masked
643 * 1 - all TSEC interrupts will be unmasked
650 dev = sc->dev; in tsec_intrs_ctl()
691 if (sc->tsec_watchdog == 0 || --sc->tsec_watchdog > 0) in tsec_watchdog()
694 ifp = sc->tsec_ifp; in tsec_watchdog()
727 if (sc->tsec_link == 0) in tsec_start_locked()
730 bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, in tsec_start_locked()
745 /* Insert TCP/IP Off-load frame control block */ in tsec_start_locked()
747 csum_flags = m0->m_pkthdr.csum_flags; in tsec_start_locked()
766 tx_fcb->flags = fcb_flags; in tsec_start_locked()
767 tx_fcb->l3_offset = ETHER_HDR_LEN; in tsec_start_locked()
768 tx_fcb->l4_offset = sizeof(struct ip); in tsec_start_locked()
773 bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, in tsec_start_locked()
779 sc->tsec_watchdog = 5; in tsec_start_locked()
795 tx_idx = sc->tx_idx_head; in tsec_encap()
796 tx_bufmap = &sc->tx_bufmap[tx_idx]; in tsec_encap()
798 /* Create mapping in DMA memory */ in tsec_encap()
799 error = bus_dmamap_load_mbuf_sg(sc->tsec_tx_mtag, tx_bufmap->map, m0, in tsec_encap()
810 error = bus_dmamap_load_mbuf_sg(sc->tsec_tx_mtag, in tsec_encap()
811 tx_bufmap->map, m0, segs, &nsegs, BUS_DMA_NOWAIT); in tsec_encap()
819 bus_dmamap_sync(sc->tsec_tx_mtag, tx_bufmap->map, in tsec_encap()
821 tx_bufmap->mbuf = m0; in tsec_encap()
827 tx_idx = (tx_idx + (uint32_t)nsegs) & (TSEC_TX_NUM_DESC - 1); in tsec_encap()
828 sc->tx_idx_head = tx_idx; in tsec_encap()
830 for (i = nsegs - 1; i >= 0; i--) { in tsec_encap()
833 tx_idx = (tx_idx - 1) & (TSEC_TX_NUM_DESC - 1); in tsec_encap()
834 tx_desc = &sc->tsec_tx_vaddr[tx_idx]; in tsec_encap()
835 tx_desc->length = segs[i].ds_len; in tsec_encap()
836 tx_desc->bufptr = segs[i].ds_addr; in tsec_encap()
847 * - wrap in tsec_encap()
848 * - checksum in tsec_encap()
849 * - ready to send in tsec_encap()
850 * - transmit the CRC sequence after the last data byte in tsec_encap()
851 * - interrupt after the last buffer in tsec_encap()
853 tx_desc->flags = (tx_idx == (TSEC_TX_NUM_DESC - 1) ? in tsec_encap()
869 ifp = sc->tsec_ifp; in tsec_setfilter()
929 if (tsec_set_mtu(sc, ifr->ifr_mtu)) in tsec_ioctl()
930 if_setmtu(ifp, ifr->ifr_mtu); in tsec_ioctl()
939 if ((sc->tsec_if_flags ^ if_getflags(ifp)) & in tsec_ioctl()
943 if ((sc->tsec_if_flags ^ if_getflags(ifp)) & in tsec_ioctl()
951 sc->tsec_if_flags = if_getflags(ifp); in tsec_ioctl()
963 error = ifmedia_ioctl(ifp, ifr, &sc->tsec_mii->mii_media, in tsec_ioctl()
967 mask = if_getcapenable(ifp) ^ ifr->ifr_reqcap; in tsec_ioctl()
968 if ((mask & IFCAP_HWCSUM) && sc->is_etsec) { in tsec_ioctl()
971 if_setcapenablebit(ifp, IFCAP_HWCSUM & ifr->ifr_reqcap, 0); in tsec_ioctl()
977 if (ifr->ifr_reqcap & IFCAP_POLLING) { in tsec_ioctl()
1017 mii = sc->tsec_mii; in tsec_ifmedia_upd()
1032 mii = sc->tsec_mii; in tsec_ifmedia_sts()
1035 ifmr->ifm_active = mii->mii_media_active; in tsec_ifmedia_sts()
1036 ifmr->ifm_status = mii->mii_media_status; in tsec_ifmedia_sts()
1042 tsec_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp, in tsec_new_rxbuf() argument
1054 new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size; in tsec_new_rxbuf()
1057 bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD); in tsec_new_rxbuf()
1058 bus_dmamap_unload(tag, map); in tsec_new_rxbuf()
1061 error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs, in tsec_new_rxbuf()
1077 KASSERT(((seg->ds_addr) & (TSEC_RXBUFFER_ALIGNMENT-1)) == 0, in tsec_new_rxbuf()
1080 bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD); in tsec_new_rxbuf()
1083 (*paddr) = seg->ds_addr; in tsec_new_rxbuf()
1094 *paddr = segs->ds_addr; in tsec_map_dma_addr()
1103 /* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */ in tsec_alloc_dma_desc()
1124 device_printf(dev, "failed to allocate %s DMA safe memory\n", in tsec_alloc_dma_desc()
1152 /* Unmap descriptors from DMA memory */ in tsec_free_dma_desc()
1171 if (sc->tx_bufmap[i].map_initialized) in tsec_free_dma()
1172 bus_dmamap_destroy(sc->tsec_tx_mtag, in tsec_free_dma()
1173 sc->tx_bufmap[i].map); in tsec_free_dma()
1175 bus_dma_tag_destroy(sc->tsec_tx_mtag); in tsec_free_dma()
1179 if (sc->rx_data[i].mbuf) { in tsec_free_dma()
1180 /* Unload buffer from DMA */ in tsec_free_dma()
1181 bus_dmamap_sync(sc->tsec_rx_mtag, sc->rx_data[i].map, in tsec_free_dma()
1183 bus_dmamap_unload(sc->tsec_rx_mtag, in tsec_free_dma()
1184 sc->rx_data[i].map); in tsec_free_dma()
1187 m_freem(sc->rx_data[i].mbuf); in tsec_free_dma()
1189 /* Destroy map for this buffer */ in tsec_free_dma()
1190 if (sc->rx_data[i].map != NULL) in tsec_free_dma()
1191 bus_dmamap_destroy(sc->tsec_rx_mtag, in tsec_free_dma()
1192 sc->rx_data[i].map); in tsec_free_dma()
1195 bus_dma_tag_destroy(sc->tsec_rx_mtag); in tsec_free_dma()
1198 tsec_free_dma_desc(sc->tsec_tx_dtag, sc->tsec_tx_dmap, in tsec_free_dma()
1199 sc->tsec_tx_vaddr); in tsec_free_dma()
1200 tsec_free_dma_desc(sc->tsec_rx_dtag, sc->tsec_rx_dmap, in tsec_free_dma()
1201 sc->tsec_rx_vaddr); in tsec_free_dma()
1212 ifp = sc->tsec_ifp; in tsec_stop()
1215 callout_stop(&sc->tsec_callout); in tsec_stop()
1217 sc->tsec_watchdog = 0; in tsec_stop()
1219 /* Disable all interrupts and stop DMA */ in tsec_stop()
1224 while (sc->tx_idx_tail != sc->tx_idx_head) { in tsec_stop()
1225 bus_dmamap_sync(sc->tsec_tx_mtag, in tsec_stop()
1226 sc->tx_bufmap[sc->tx_idx_tail].map, in tsec_stop()
1228 bus_dmamap_unload(sc->tsec_tx_mtag, in tsec_stop()
1229 sc->tx_bufmap[sc->tx_idx_tail].map); in tsec_stop()
1230 m_freem(sc->tx_bufmap[sc->tx_idx_tail].mbuf); in tsec_stop()
1231 sc->tx_idx_tail = (sc->tx_idx_tail + 1) in tsec_stop()
1232 & (TSEC_TX_NUM_DESC - 1); in tsec_stop()
1253 ifp = sc->tsec_ifp; in tsec_tick()
1254 link = sc->tsec_link; in tsec_tick()
1256 mii_tick(sc->tsec_mii); in tsec_tick()
1258 if (link == 0 && sc->tsec_link == 1 && in tsec_tick()
1263 callout_reset(&sc->tsec_callout, hz, tsec_tick, sc); in tsec_tick()
1270 * sends data which have been dma'ed into host memory to upper layer.
1287 ifp = sc->tsec_ifp; in tsec_receive_intr_locked()
1288 rx_data = sc->rx_data; in tsec_receive_intr_locked()
1291 bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap, in tsec_receive_intr_locked()
1295 if (count >= 0 && count-- == 0) in tsec_receive_intr_locked()
1299 flags = rx_desc->flags; in tsec_receive_intr_locked()
1319 rx_desc->length = 0; in tsec_receive_intr_locked()
1320 rx_desc->flags = (rx_desc->flags & in tsec_receive_intr_locked()
1323 if (sc->frame != NULL) { in tsec_receive_intr_locked()
1324 m_free(sc->frame); in tsec_receive_intr_locked()
1325 sc->frame = NULL; in tsec_receive_intr_locked()
1334 m->m_len = rx_desc->length; in tsec_receive_intr_locked()
1336 if (sc->frame != NULL) { in tsec_receive_intr_locked()
1338 m->m_len -= m_length(sc->frame, NULL); in tsec_receive_intr_locked()
1340 m->m_flags &= ~M_PKTHDR; in tsec_receive_intr_locked()
1341 m_cat(sc->frame, m); in tsec_receive_intr_locked()
1343 sc->frame = m; in tsec_receive_intr_locked()
1349 m = sc->frame; in tsec_receive_intr_locked()
1350 sc->frame = NULL; in tsec_receive_intr_locked()
1353 if (tsec_new_rxbuf(sc->tsec_rx_mtag, rx_data[i].map, in tsec_receive_intr_locked()
1365 rx_desc->bufptr = rx_data[i].paddr; in tsec_receive_intr_locked()
1366 rx_desc->length = 0; in tsec_receive_intr_locked()
1367 rx_desc->flags = (rx_desc->flags & ~TSEC_RXBD_ZEROONINIT) | in tsec_receive_intr_locked()
1371 m->m_pkthdr.rcvif = ifp; in tsec_receive_intr_locked()
1374 m_adj(m, -ETHER_CRC_LEN); in tsec_receive_intr_locked()
1376 if (sc->is_etsec) in tsec_receive_intr_locked()
1386 bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap, in tsec_receive_intr_locked()
1409 if (if_getcapenable(sc->tsec_ifp) & IFCAP_POLLING) { in tsec_receive_intr()
1417 tsec_receive_intr_locked(sc, -1); in tsec_receive_intr()
1430 ifp = sc->tsec_ifp; in tsec_transmit_intr_locked()
1442 bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, in tsec_transmit_intr_locked()
1445 tx_idx = sc->tx_idx_tail; in tsec_transmit_intr_locked()
1446 while (tx_idx != sc->tx_idx_head) { in tsec_transmit_intr_locked()
1450 tx_desc = &sc->tsec_tx_vaddr[tx_idx]; in tsec_transmit_intr_locked()
1451 if (tx_desc->flags & TSEC_TXBD_R) { in tsec_transmit_intr_locked()
1455 tx_bufmap = &sc->tx_bufmap[tx_idx]; in tsec_transmit_intr_locked()
1456 tx_idx = (tx_idx + 1) & (TSEC_TX_NUM_DESC - 1); in tsec_transmit_intr_locked()
1457 if (tx_bufmap->mbuf == NULL) in tsec_transmit_intr_locked()
1463 bus_dmamap_sync(sc->tsec_tx_mtag, tx_bufmap->map, in tsec_transmit_intr_locked()
1465 bus_dmamap_unload(sc->tsec_tx_mtag, tx_bufmap->map); in tsec_transmit_intr_locked()
1466 m_freem(tx_bufmap->mbuf); in tsec_transmit_intr_locked()
1467 tx_bufmap->mbuf = NULL; in tsec_transmit_intr_locked()
1471 sc->tx_idx_tail = tx_idx; in tsec_transmit_intr_locked()
1472 bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, in tsec_transmit_intr_locked()
1478 if (sc->tx_idx_tail == sc->tx_idx_head) in tsec_transmit_intr_locked()
1479 sc->tsec_watchdog = 0; in tsec_transmit_intr_locked()
1490 if (if_getcapenable(sc->tsec_ifp) & IFCAP_POLLING) { in tsec_transmit_intr()
1510 ifp = sc->tsec_ifp; in tsec_error_intr_locked()
1541 "DMA transaction (flags: 0x%x)\n", eflags); in tsec_error_intr_locked()
1558 tsec_error_intr_locked(sc, -1); in tsec_error_intr()
1615 mii = sc->tsec_mii; in tsec_miibus_statchg()
1616 link = ((mii->mii_media_status & IFM_ACTIVE) ? 1 : 0); in tsec_miibus_statchg()
1620 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) in tsec_miibus_statchg()
1625 switch (IFM_SUBTYPE(mii->mii_media_active)) { in tsec_miibus_statchg()
1629 sc->tsec_link = link; in tsec_miibus_statchg()
1634 sc->tsec_link = link; in tsec_miibus_statchg()
1640 sc->tsec_link = 0; in tsec_miibus_statchg()
1643 sc->tsec_link = 0; in tsec_miibus_statchg()
1645 IFM_SUBTYPE(mii->mii_media_active), in tsec_miibus_statchg()
1651 /* XXX kludge - use circumstantial evidence for reduced mode. */ in tsec_miibus_statchg()
1667 ctx = device_get_sysctl_ctx(sc->dev); in tsec_add_sysctls()
1668 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); in tsec_add_sysctls()
1675 tsec_sysctl_ic_time, "I", "IC RX time threshold (0-65535)"); in tsec_add_sysctls()
1678 tsec_sysctl_ic_count, "I", "IC RX frame count threshold (0-255)"); in tsec_add_sysctls()
1682 tsec_sysctl_ic_time, "I", "IC TX time threshold (0-65535)"); in tsec_add_sysctls()
1685 tsec_sysctl_ic_count, "I", "IC TX frame count threshold (0-255)"); in tsec_add_sysctls()
1692 * - threshold-defined period of time elapsed, or
1693 * - threshold-defined number of frames is received/transmitted,
1705 * - 0 for either time or count disables IC on the given TX/RX path
1707 * - count: 1-255 (expresses frame count number; note that value of 1 is
1710 * - time: 1-65535 (value corresponds to a real time period and is
1723 time = (arg2 == TSEC_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time; in tsec_sysctl_ic_time()
1734 sc->rx_ic_time = time; in tsec_sysctl_ic_time()
1737 sc->tx_ic_time = time; in tsec_sysctl_ic_time()
1752 count = (arg2 == TSEC_IC_RX) ? sc->rx_ic_count : sc->tx_ic_count; in tsec_sysctl_ic_count()
1763 sc->rx_ic_count = count; in tsec_sysctl_ic_count()
1766 sc->tx_ic_count = count; in tsec_sysctl_ic_count()
1779 if (sc->rx_ic_count == 0 || sc->rx_ic_time == 0) in tsec_set_rxic()
1784 rxic_val |= (sc->rx_ic_count << 21); in tsec_set_rxic()
1785 rxic_val |= sc->rx_ic_time; in tsec_set_rxic()
1796 if (sc->tx_ic_count == 0 || sc->tx_ic_time == 0) in tsec_set_txic()
1801 txic_val |= (sc->tx_ic_count << 21); in tsec_set_txic()
1802 txic_val |= sc->tx_ic_time; in tsec_set_txic()
1811 if_t ifp = sc->tsec_ifp; in tsec_offload_setup()
1861 m->m_pkthdr.csum_data = 0xFFFF; in tsec_offload_process_frame()
1864 m->m_pkthdr.csum_flags = csum_flags; in tsec_offload_process_frame()
1867 m->m_pkthdr.ether_vtag = rx_fcb.vlan; in tsec_offload_process_frame()
1868 m->m_flags |= M_VLANTAG; in tsec_offload_process_frame()
1880 hashtable[(h >> 5)] |= 1 << (0x1F - (h & 0x1F)); in tsec_hash_maddr()
1889 if_t ifp = sc->tsec_ifp; in tsec_setup_multicast()