Lines Matching +full:rx +full:- +full:m

1 /*-
5 * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
65 /* TX descriptors - TDESC0 is almost unified */
82 /* TX descriptors - TDESC0 extended format only */
95 /* TX descriptors - TDESC1 normal format */
106 /* TX descriptors - TDESC1 extended format */
113 /* RX descriptor - RDESC0 is unified */
134 #define RDESC0_RXMA (1U << 0) /* Rx MAC Address */
136 /* RX descriptors - RDESC1 normal format */
145 /* RX descriptors - RDESC1 enhanced format */
187 sc->tx_desccount--; in txdesc_clear()
188 sc->txdesc_ring[idx].addr1 = (uint32_t)(0); in txdesc_clear()
189 sc->txdesc_ring[idx].desc0 = 0; in txdesc_clear()
190 sc->txdesc_ring[idx].desc1 = 0; in txdesc_clear()
199 if (!sc->dma_ext_desc) { in txdesc_setup()
214 ++sc->tx_desccount; in txdesc_setup()
215 sc->txdesc_ring[idx].addr1 = (uint32_t)(paddr); in txdesc_setup()
216 sc->txdesc_ring[idx].desc0 = desc0; in txdesc_setup()
217 sc->txdesc_ring[idx].desc1 = desc1; in txdesc_setup()
219 sc->txdesc_ring[idx].desc0 |= TDESC0_OWN; in txdesc_setup()
228 sc->rxdesc_ring[idx].addr1 = (uint32_t)paddr; in rxdesc_setup()
230 sc->rxdesc_ring[idx].addr2 = sc->rxdesc_ring_paddr + in rxdesc_setup()
232 if (!sc->dma_ext_desc) in rxdesc_setup()
233 sc->rxdesc_ring[idx].desc1 = NRDESC1_RCH | in rxdesc_setup()
236 sc->rxdesc_ring[idx].desc1 = ERDESC1_RCH | in rxdesc_setup()
240 sc->rxdesc_ring[idx].desc0 = RDESC0_OWN; in rxdesc_setup()
250 struct mbuf * m; in dma1000_setup_txbuf() local
255 error = bus_dmamap_load_mbuf_sg(sc->txbuf_tag, sc->txbuf_map[idx].map, in dma1000_setup_txbuf()
262 bus_dmamap_unload(sc->txbuf_tag, sc->txbuf_map[idx].map); in dma1000_setup_txbuf()
263 if ((m = m_defrag(*mp, M_NOWAIT)) == NULL) in dma1000_setup_txbuf()
265 *mp = m; in dma1000_setup_txbuf()
266 error = bus_dmamap_load_mbuf_sg(sc->txbuf_tag, sc->txbuf_map[idx].map, in dma1000_setup_txbuf()
272 if (sc->tx_desccount + nsegs > TX_DESC_COUNT) { in dma1000_setup_txbuf()
273 bus_dmamap_unload(sc->txbuf_tag, sc->txbuf_map[idx].map); in dma1000_setup_txbuf()
277 m = *mp; in dma1000_setup_txbuf()
279 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) { in dma1000_setup_txbuf()
280 if ((m->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) != 0) { in dma1000_setup_txbuf()
281 if (!sc->dma_ext_desc) in dma1000_setup_txbuf()
286 if (!sc->dma_ext_desc) in dma1000_setup_txbuf()
293 bus_dmamap_sync(sc->txbuf_tag, sc->txbuf_map[idx].map, in dma1000_setup_txbuf()
296 sc->txbuf_map[idx].mbuf = m; in dma1000_setup_txbuf()
299 txdesc_setup(sc, sc->tx_desc_head, in dma1000_setup_txbuf()
303 (i == nsegs - 1)); in dma1000_setup_txbuf()
304 last = sc->tx_desc_head; in dma1000_setup_txbuf()
305 sc->tx_desc_head = next_txidx(sc, sc->tx_desc_head); in dma1000_setup_txbuf()
308 sc->txbuf_map[idx].last_desc_idx = last; in dma1000_setup_txbuf()
314 dma1000_setup_rxbuf(struct dwc_softc *sc, int idx, struct mbuf *m) in dma1000_setup_rxbuf() argument
319 m_adj(m, ETHER_ALIGN); in dma1000_setup_rxbuf()
321 error = bus_dmamap_load_mbuf_sg(sc->rxbuf_tag, sc->rxbuf_map[idx].map, in dma1000_setup_rxbuf()
322 m, &seg, &nsegs, 0); in dma1000_setup_rxbuf()
328 bus_dmamap_sync(sc->rxbuf_tag, sc->rxbuf_map[idx].map, in dma1000_setup_rxbuf()
331 sc->rxbuf_map[idx].mbuf = m; in dma1000_setup_rxbuf()
340 struct mbuf *m; in dwc_alloc_mbufcl() local
342 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); in dwc_alloc_mbufcl()
343 if (m != NULL) in dwc_alloc_mbufcl()
344 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; in dwc_alloc_mbufcl()
346 return (m); in dwc_alloc_mbufcl()
354 struct mbuf *m, *m0; in dwc_rxfinish_one() local
358 m = map->mbuf; in dwc_rxfinish_one()
359 ifp = sc->ifp; in dwc_rxfinish_one()
360 rdesc0 = desc ->desc0; in dwc_rxfinish_one()
368 device_printf(sc->dev, in dwc_rxfinish_one()
369 "%s: RX descriptor without FIRST and LAST bit set: 0x%08X", in dwc_rxfinish_one()
387 if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, 1); in dwc_rxfinish_one()
391 bus_dmamap_sync(sc->rxbuf_tag, map->map, BUS_DMASYNC_POSTREAD); in dwc_rxfinish_one()
392 bus_dmamap_unload(sc->rxbuf_tag, map->map); in dwc_rxfinish_one()
395 m->m_pkthdr.rcvif = ifp; in dwc_rxfinish_one()
396 m->m_pkthdr.len = len; in dwc_rxfinish_one()
397 m->m_len = len; in dwc_rxfinish_one()
402 m->m_pkthdr.csum_flags = CSUM_IP_CHECKED; in dwc_rxfinish_one()
404 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; in dwc_rxfinish_one()
406 m->m_pkthdr.csum_flags |= in dwc_rxfinish_one()
408 m->m_pkthdr.csum_data = 0xffff; in dwc_rxfinish_one()
413 m_adj(m, -ETHER_CRC_LEN); in dwc_rxfinish_one()
416 if_input(ifp, m); in dwc_rxfinish_one()
432 ifp = sc->ifp; in dma1000_txfinish_locked()
434 while (sc->tx_map_tail != sc->tx_map_head) { in dma1000_txfinish_locked()
436 bmap = &sc->txbuf_map[sc->tx_map_tail]; in dma1000_txfinish_locked()
437 idx = sc->tx_desc_tail; in dma1000_txfinish_locked()
438 last_idx = next_txidx(sc, bmap->last_desc_idx); in dma1000_txfinish_locked()
440 desc = &sc->txdesc_ring[idx]; in dma1000_txfinish_locked()
441 if ((desc->desc0 & TDESC0_OWN) != 0) { in dma1000_txfinish_locked()
450 bus_dmamap_sync(sc->txbuf_tag, bmap->map, in dma1000_txfinish_locked()
452 bus_dmamap_unload(sc->txbuf_tag, bmap->map); in dma1000_txfinish_locked()
453 m_freem(bmap->mbuf); in dma1000_txfinish_locked()
454 bmap->mbuf = NULL; in dma1000_txfinish_locked()
455 sc->tx_mapcount--; in dma1000_txfinish_locked()
456 while (sc->tx_desc_tail != last_idx) { in dma1000_txfinish_locked()
457 txdesc_clear(sc, sc->tx_desc_tail); in dma1000_txfinish_locked()
458 sc->tx_desc_tail = next_txidx(sc, sc->tx_desc_tail); in dma1000_txfinish_locked()
460 sc->tx_map_tail = next_txidx(sc, sc->tx_map_tail); in dma1000_txfinish_locked()
466 if (sc->tx_desc_tail == sc->tx_desc_head) { in dma1000_txfinish_locked()
467 sc->tx_watchdog_count = 0; in dma1000_txfinish_locked()
475 struct mbuf *m; in dma1000_txstart() local
480 if (sc->tx_desccount > (TX_DESC_COUNT - TX_MAP_MAX_SEGS + 1)) { in dma1000_txstart()
481 if_setdrvflagbits(sc->ifp, IFF_DRV_OACTIVE, 0); in dma1000_txstart()
485 if (sc->tx_mapcount == (TX_MAP_COUNT - 1)) { in dma1000_txstart()
486 if_setdrvflagbits(sc->ifp, IFF_DRV_OACTIVE, 0); in dma1000_txstart()
490 m = if_dequeue(sc->ifp); in dma1000_txstart()
491 if (m == NULL) in dma1000_txstart()
493 if (dma1000_setup_txbuf(sc, sc->tx_map_head, &m) != 0) { in dma1000_txstart()
494 if_sendq_prepend(sc->ifp, m); in dma1000_txstart()
495 if_setdrvflagbits(sc->ifp, IFF_DRV_OACTIVE, 0); in dma1000_txstart()
498 bpf_mtap_if(sc->ifp, m); in dma1000_txstart()
499 sc->tx_map_head = next_txidx(sc, sc->tx_map_head); in dma1000_txstart()
500 sc->tx_mapcount++; in dma1000_txstart()
506 sc->tx_watchdog_count = WATCHDOG_TIMEOUT_SECS; in dma1000_txstart()
513 struct mbuf *m; in dma1000_rxfinish_locked() local
519 idx = sc->rx_idx; in dma1000_rxfinish_locked()
520 desc = sc->rxdesc_ring + idx; in dma1000_rxfinish_locked()
521 if ((desc->desc0 & RDESC0_OWN) != 0) in dma1000_rxfinish_locked()
524 m = dwc_rxfinish_one(sc, desc, sc->rxbuf_map + idx); in dma1000_rxfinish_locked()
525 if (m == NULL) { in dma1000_rxfinish_locked()
527 desc->desc0 = RDESC0_OWN; in dma1000_rxfinish_locked()
530 /* We cannot create hole in RX ring */ in dma1000_rxfinish_locked()
531 error = dma1000_setup_rxbuf(sc, idx, m); in dma1000_rxfinish_locked()
537 sc->rx_idx = next_rxidx(sc, sc->rx_idx); in dma1000_rxfinish_locked()
586 /* Stop DMA RX */ in dma1000_stop()
620 struct mbuf *m; in dma1000_init() local
627 if (!sc->nopblx8) in dma1000_init()
629 reg |= (sc->txpbl << BUS_MODE_PBL_SHIFT); in dma1000_init()
630 reg |= (sc->rxpbl << BUS_MODE_RPBL_SHIFT); in dma1000_init()
631 if (sc->fixed_burst) in dma1000_init()
633 if (sc->mixed_burst) in dma1000_init()
635 if (sc->aal) in dma1000_init()
642 sc->dma_ext_desc = true; in dma1000_init()
655 bus_get_dma_tag(sc->dev), /* Parent tag. */ in dma1000_init()
664 &sc->txdesc_tag); in dma1000_init()
666 device_printf(sc->dev, in dma1000_init()
671 error = bus_dmamem_alloc(sc->txdesc_tag, (void**)&sc->txdesc_ring, in dma1000_init()
673 &sc->txdesc_map); in dma1000_init()
675 device_printf(sc->dev, in dma1000_init()
680 error = bus_dmamap_load(sc->txdesc_tag, sc->txdesc_map, in dma1000_init()
681 sc->txdesc_ring, TX_DESC_SIZE, dwc_get1paddr, in dma1000_init()
682 &sc->txdesc_ring_paddr, 0); in dma1000_init()
684 device_printf(sc->dev, in dma1000_init()
691 sc->txdesc_ring[idx].addr2 = sc->txdesc_ring_paddr + in dma1000_init()
696 bus_get_dma_tag(sc->dev), /* Parent tag. */ in dma1000_init()
706 &sc->txbuf_tag); in dma1000_init()
708 device_printf(sc->dev, in dma1000_init()
714 error = bus_dmamap_create(sc->txbuf_tag, BUS_DMA_COHERENT, in dma1000_init()
715 &sc->txbuf_map[idx].map); in dma1000_init()
717 device_printf(sc->dev, in dma1000_init()
726 WRITE4(sc, TX_DESCR_LIST_ADDR, sc->txdesc_ring_paddr); in dma1000_init()
729 * Set up RX descriptor ring, descriptors, dma maps, and mbufs. in dma1000_init()
732 bus_get_dma_tag(sc->dev), /* Parent tag. */ in dma1000_init()
741 &sc->rxdesc_tag); in dma1000_init()
743 device_printf(sc->dev, in dma1000_init()
744 "could not create RX ring DMA tag.\n"); in dma1000_init()
748 error = bus_dmamem_alloc(sc->rxdesc_tag, (void **)&sc->rxdesc_ring, in dma1000_init()
750 &sc->rxdesc_map); in dma1000_init()
752 device_printf(sc->dev, in dma1000_init()
753 "could not allocate RX descriptor ring.\n"); in dma1000_init()
757 error = bus_dmamap_load(sc->rxdesc_tag, sc->rxdesc_map, in dma1000_init()
758 sc->rxdesc_ring, RX_DESC_SIZE, dwc_get1paddr, in dma1000_init()
759 &sc->rxdesc_ring_paddr, 0); in dma1000_init()
761 device_printf(sc->dev, in dma1000_init()
762 "could not load RX descriptor ring map.\n"); in dma1000_init()
767 bus_get_dma_tag(sc->dev), /* Parent tag. */ in dma1000_init()
776 &sc->rxbuf_tag); in dma1000_init()
778 device_printf(sc->dev, in dma1000_init()
779 "could not create RX buf DMA tag.\n"); in dma1000_init()
784 error = bus_dmamap_create(sc->rxbuf_tag, BUS_DMA_COHERENT, in dma1000_init()
785 &sc->rxbuf_map[idx].map); in dma1000_init()
787 device_printf(sc->dev, in dma1000_init()
788 "could not create RX buffer DMA map.\n"); in dma1000_init()
791 if ((m = dwc_alloc_mbufcl(sc)) == NULL) { in dma1000_init()
792 device_printf(sc->dev, "Could not alloc mbuf\n"); in dma1000_init()
796 if ((error = dma1000_setup_rxbuf(sc, idx, m)) != 0) { in dma1000_init()
797 device_printf(sc->dev, in dma1000_init()
798 "could not create new RX buffer.\n"); in dma1000_init()
802 WRITE4(sc, RX_DESCR_LIST_ADDR, sc->rxdesc_ring_paddr); in dma1000_init()
820 /* Clean up RX DMA resources and free mbufs. */ in dma1000_free()
822 if ((map = sc->rxbuf_map[idx].map) != NULL) { in dma1000_free()
823 bus_dmamap_unload(sc->rxbuf_tag, map); in dma1000_free()
824 bus_dmamap_destroy(sc->rxbuf_tag, map); in dma1000_free()
825 m_freem(sc->rxbuf_map[idx].mbuf); in dma1000_free()
828 if (sc->rxbuf_tag != NULL) in dma1000_free()
829 bus_dma_tag_destroy(sc->rxbuf_tag); in dma1000_free()
830 if (sc->rxdesc_map != NULL) { in dma1000_free()
831 bus_dmamap_unload(sc->rxdesc_tag, sc->rxdesc_map); in dma1000_free()
832 bus_dmamem_free(sc->rxdesc_tag, sc->rxdesc_ring, in dma1000_free()
833 sc->rxdesc_map); in dma1000_free()
835 if (sc->rxdesc_tag != NULL) in dma1000_free()
836 bus_dma_tag_destroy(sc->rxdesc_tag); in dma1000_free()
840 if ((map = sc->txbuf_map[idx].map) != NULL) { in dma1000_free()
842 bus_dmamap_destroy(sc->txbuf_tag, map); in dma1000_free()
845 if (sc->txbuf_tag != NULL) in dma1000_free()
846 bus_dma_tag_destroy(sc->txbuf_tag); in dma1000_free()
847 if (sc->txdesc_map != NULL) { in dma1000_free()
848 bus_dmamap_unload(sc->txdesc_tag, sc->txdesc_map); in dma1000_free()
849 bus_dmamem_free(sc->txdesc_tag, sc->txdesc_ring, in dma1000_free()
850 sc->txdesc_map); in dma1000_free()
852 if (sc->txdesc_tag != NULL) in dma1000_free()
853 bus_dma_tag_destroy(sc->txdesc_tag); in dma1000_free()