Lines Matching +full:rx +full:- +full:tx
111 /* Rx/Tx Queue Control */
159 /* Tx Subroutines */
166 /* Rx Subroutines */
191 #define mvneta_sc_lock(sc) mtx_lock(&sc->mtx)
192 #define mvneta_sc_unlock(sc) mtx_unlock(&sc->mtx)
270 "rx_good_oct", "Good Octets Rx"},
272 "rx_bad_oct", "Bad Octets Rx"},
276 "rx_good_frame", "Good Frames Rx"},
278 "rx_bad_frame", "Bad Frames Rx"},
280 "rx_bcast_frame", "Broadcast Frames Rx"},
282 "rx_mcast_frame", "Multicast Frames Rx"},
284 "rx_frame_1_64", "Frame Size 1 - 64"},
286 "rx_frame_65_127", "Frame Size 65 - 127"},
288 "rx_frame_128_255", "Frame Size 128 - 255"},
290 "rx_frame_256_511", "Frame Size 256 - 511"},
292 "rx_frame_512_1023", "Frame Size 512 - 1023"},
294 "rx_fame_1024_max", "Frame Size 1024 - Max"},
296 "tx_good_oct", "Good Octets Tx"},
298 "tx_good_frame", "Good Frames Tx"},
302 "tx_mcast_frame", "Multicast Frames Tx"},
304 "tx_bcast_frame", "Broadcast Frames Tx"},
308 "fc_tx", "Flow Control Tx"},
310 "fc_rx_good", "Good Flow Control Rx"},
312 "fc_rx_bad", "Bad Flow Control Rx"},
314 "pkt_undersize", "Undersized Packets Rx"},
316 "pkt_fragment", "Fragmented Packets Rx"},
318 "pkt_oversize", "Oversized Packets Rx"},
320 "pkt_jabber", "Jabber Packets Rx"},
322 "mac_rx_err", "MAC Rx Errors"},
334 { -1, 0}
369 * Fall back -- use the currently programmed address. in mvneta_get_mac_address()
375 * Generate pseudo-random MAC. in mvneta_get_mac_address()
379 mac_l |= device_get_unit(sc->dev) & 0xff; in mvneta_get_mac_address()
383 device_printf(sc->dev, in mvneta_get_mac_address()
416 * Create Tx DMA in mvneta_dma_create()
421 bus_get_dma_tag(sc->dev), /* parent */ in mvneta_dma_create()
431 &sc->tx_dtag); /* dmat */ in mvneta_dma_create()
433 device_printf(sc->dev, in mvneta_dma_create()
434 "Failed to create DMA tag for Tx descriptors.\n"); in mvneta_dma_create()
438 bus_get_dma_tag(sc->dev), /* parent */ in mvneta_dma_create()
448 &sc->txmbuf_dtag); in mvneta_dma_create()
450 device_printf(sc->dev, in mvneta_dma_create()
451 "Failed to create DMA tag for Tx mbufs.\n"); in mvneta_dma_create()
458 device_printf(sc->dev, in mvneta_dma_create()
465 * Create Rx DMA. in mvneta_dma_create()
467 /* Create tag for Rx descripors */ in mvneta_dma_create()
469 bus_get_dma_tag(sc->dev), /* parent */ in mvneta_dma_create()
479 &sc->rx_dtag); /* dmat */ in mvneta_dma_create()
481 device_printf(sc->dev, in mvneta_dma_create()
482 "Failed to create DMA tag for Rx descriptors.\n"); in mvneta_dma_create()
486 /* Create tag for Rx buffers */ in mvneta_dma_create()
488 bus_get_dma_tag(sc->dev), /* parent */ in mvneta_dma_create()
497 &sc->rxbuf_dtag); /* dmat */ in mvneta_dma_create()
499 device_printf(sc->dev, in mvneta_dma_create()
500 "Failed to create DMA tag for Rx buffers.\n"); in mvneta_dma_create()
506 device_printf(sc->dev, in mvneta_dma_create()
514 mvneta_detach(sc->dev); in mvneta_dma_create()
534 sc->dev = self; in mvneta_attach()
536 mtx_init(&sc->mtx, "mvneta_sc", NULL, MTX_DEF); in mvneta_attach()
538 error = bus_alloc_resources(self, res_spec, sc->res); in mvneta_attach()
544 sc->version = MVNETA_READ(sc, MVNETA_PV); in mvneta_attach()
545 device_printf(self, "version is %x\n", sc->version); in mvneta_attach()
546 callout_init(&sc->tick_ch, 0); in mvneta_attach()
554 error = clk_get_by_ofw_index(sc->dev, ofw_bus_get_node(sc->dev), 0, in mvneta_attach()
558 device_printf(sc->dev, in mvneta_attach()
561 sc->clk_freq = A3700_TCLK_250MHZ; in mvneta_attach()
563 device_printf(sc->dev, in mvneta_attach()
565 sc->clk_freq = get_tclk(); in mvneta_attach()
568 error = clk_get_freq(clk, &sc->clk_freq); in mvneta_attach()
570 device_printf(sc->dev, in mvneta_attach()
572 bus_release_resources(sc->dev, res_spec, sc->res); in mvneta_attach()
592 error = bus_setup_intr(self, sc->res[1], in mvneta_attach()
594 &sc->ih_cookie[0]); in mvneta_attach()
605 if (mvneta_get_mac_address(sc, sc->enaddr)) { in mvneta_attach()
609 mvneta_set_mac_address(sc, sc->enaddr); in mvneta_attach()
614 ifp = sc->ifp = if_alloc(IFT_ETHER); in mvneta_attach()
618 * We can support 802.1Q VLAN-sized frames and jumbo in mvneta_attach()
630 if_setsendqlen(ifp, MVNETA_TX_RING_CNT - 1); in mvneta_attach()
656 * - Support for Large Receive Offload in mvneta_attach()
662 sc->rx_frame_size = MCLBYTES; /* ether_ifattach() always sets normal mtu */ in mvneta_attach()
715 if ((sc->phy_addr != MII_PHY_ANY) && (!sc->use_inband_status)) { in mvneta_attach()
716 error = mii_attach(self, &sc->miibus, ifp, mvneta_mediachange, in mvneta_attach()
717 mvneta_mediastatus, BMSR_DEFCAPMASK, sc->phy_addr, in mvneta_attach()
722 ether_ifdetach(sc->ifp); in mvneta_attach()
726 sc->mii = device_get_softc(sc->miibus); in mvneta_attach()
727 sc->phy_attached = 1; in mvneta_attach()
729 /* Disable auto-negotiation in MAC - rely on PHY layer */ in mvneta_attach()
731 } else if (sc->use_inband_status == TRUE) { in mvneta_attach()
732 /* In-band link status */ in mvneta_attach()
733 ifmedia_init(&sc->mvneta_ifmedia, 0, mvneta_mediachange, in mvneta_attach()
737 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_1000_T | IFM_FDX, in mvneta_attach()
739 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL); in mvneta_attach()
740 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX, in mvneta_attach()
742 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_10_T, 0, NULL); in mvneta_attach()
743 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX, in mvneta_attach()
745 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL); in mvneta_attach()
746 ifmedia_set(&sc->mvneta_ifmedia, IFM_ETHER | IFM_AUTO); in mvneta_attach()
748 /* Enable auto-negotiation */ in mvneta_attach()
759 /* Fixed-link, use predefined values */ in mvneta_attach()
761 ifmedia_init(&sc->mvneta_ifmedia, 0, mvneta_mediachange, in mvneta_attach()
765 switch (sc->phy_speed) { in mvneta_attach()
767 if (sc->phy_mode != MVNETA_PHY_SGMII && in mvneta_attach()
768 sc->phy_mode != MVNETA_PHY_QSGMII) { in mvneta_attach()
771 ether_ifdetach(sc->ifp); in mvneta_attach()
787 ether_ifdetach(sc->ifp); in mvneta_attach()
792 if (sc->phy_fdx) in mvneta_attach()
797 ifmedia_add(&sc->mvneta_ifmedia, ifm_target, 0, NULL); in mvneta_attach()
798 ifmedia_set(&sc->mvneta_ifmedia, ifm_target); in mvneta_attach()
799 if_link_state_change(sc->ifp, LINK_STATE_UP); in mvneta_attach()
804 child = device_add_child(sc->dev, "mdio", DEVICE_UNIT_ANY); in mvneta_attach()
806 ether_ifdetach(sc->ifp); in mvneta_attach()
810 bus_attach_children(sc->dev); in mvneta_attach()
818 ether_ifattach(ifp, sc->enaddr); in mvneta_attach()
820 callout_reset(&sc->tick_ch, 0, mvneta_tick, sc); in mvneta_attach()
837 callout_drain(&sc->tick_ch); in mvneta_detach()
838 ether_ifdetach(sc->ifp); in mvneta_detach()
848 if (sc->ih_cookie[0] != NULL) in mvneta_detach()
849 bus_teardown_intr(dev, sc->res[1], sc->ih_cookie[0]); in mvneta_detach()
851 if (sc->tx_dtag != NULL) in mvneta_detach()
852 bus_dma_tag_destroy(sc->tx_dtag); in mvneta_detach()
853 if (sc->rx_dtag != NULL) in mvneta_detach()
854 bus_dma_tag_destroy(sc->rx_dtag); in mvneta_detach()
855 if (sc->txmbuf_dtag != NULL) in mvneta_detach()
856 bus_dma_tag_destroy(sc->txmbuf_dtag); in mvneta_detach()
857 if (sc->rxbuf_dtag != NULL) in mvneta_detach()
858 bus_dma_tag_destroy(sc->rxbuf_dtag); in mvneta_detach()
860 bus_release_resources(dev, res_spec, sc->res); in mvneta_detach()
862 if (sc->ifp) in mvneta_detach()
863 if_free(sc->ifp); in mvneta_detach()
865 if (mtx_initialized(&sc->mtx)) in mvneta_detach()
866 mtx_destroy(&sc->mtx); in mvneta_detach()
883 ifp = sc->ifp; in mvneta_miibus_readreg()
895 return (-1); in mvneta_miibus_readreg()
911 return (-1); in mvneta_miibus_readreg()
923 return (-1); in mvneta_miibus_readreg()
951 ifp = sc->ifp; in mvneta_miibus_writereg()
995 mvneta_rx_queue_enable(sc->ifp, q); in mvneta_portup()
1001 mvneta_tx_queue_enable(sc->ifp, q); in mvneta_portup()
1010 struct mvneta_rx_ring *rx; in mvneta_portdown() local
1011 struct mvneta_tx_ring *tx; in mvneta_portdown() local
1016 rx = MVNETA_RX_RING(sc, q); in mvneta_portdown()
1018 rx->queue_status = MVNETA_QUEUE_DISABLED; in mvneta_portdown()
1023 tx = MVNETA_TX_RING(sc, q); in mvneta_portdown()
1025 tx->queue_status = MVNETA_QUEUE_DISABLED; in mvneta_portdown()
1029 /* Wait for all Rx activity to terminate. */ in mvneta_portdown()
1036 if_printf(sc->ifp, in mvneta_portdown()
1037 "timeout for RX stopped. rqc 0x%x\n", reg); in mvneta_portdown()
1044 /* Wait for all Tx activity to terminate. */ in mvneta_portdown()
1059 if_printf(sc->ifp, in mvneta_portdown()
1060 "timeout for TX stopped. tqc 0x%x\n", reg); in mvneta_portdown()
1067 /* Wait for all Tx FIFO is empty */ in mvneta_portdown()
1071 if_printf(sc->ifp, in mvneta_portdown()
1072 "timeout for TX FIFO drained. ps0 0x%x\n", reg); in mvneta_portdown()
1103 /* Init TX/RX Queue Registers */ in mvneta_initreg()
1107 device_printf(sc->dev, in mvneta_initreg()
1117 device_printf(sc->dev, in mvneta_initreg()
1126 * Ethernet Unit Control - disable automatic PHY management by HW. in mvneta_initreg()
1127 * In case the port uses SMI-controlled PHY, poll its status with in mvneta_initreg()
1153 switch (sc->phy_mode) { in mvneta_initreg()
1171 /* Port Configuration Extended: enable Tx CRC generation */ in mvneta_initreg()
1197 *(bus_addr_t *)arg = segs->ds_addr; in mvneta_dmamap_cb()
1203 struct mvneta_rx_ring *rx; in mvneta_ring_alloc_rx_queue() local
1211 rx = MVNETA_RX_RING(sc, q); in mvneta_ring_alloc_rx_queue()
1212 mtx_init(&rx->ring_mtx, "mvneta_rx", NULL, MTX_DEF); in mvneta_ring_alloc_rx_queue()
1213 /* Allocate DMA memory for Rx descriptors */ in mvneta_ring_alloc_rx_queue()
1214 error = bus_dmamem_alloc(sc->rx_dtag, in mvneta_ring_alloc_rx_queue()
1215 (void**)&(rx->desc), in mvneta_ring_alloc_rx_queue()
1217 &rx->desc_map); in mvneta_ring_alloc_rx_queue()
1218 if (error != 0 || rx->desc == NULL) in mvneta_ring_alloc_rx_queue()
1220 error = bus_dmamap_load(sc->rx_dtag, rx->desc_map, in mvneta_ring_alloc_rx_queue()
1221 rx->desc, in mvneta_ring_alloc_rx_queue()
1223 mvneta_dmamap_cb, &rx->desc_pa, BUS_DMA_NOWAIT); in mvneta_ring_alloc_rx_queue()
1228 error = bus_dmamap_create(sc->rxbuf_dtag, 0, &dmap); in mvneta_ring_alloc_rx_queue()
1230 device_printf(sc->dev, in mvneta_ring_alloc_rx_queue()
1231 "Failed to create DMA map for Rx buffer num: %d\n", i); in mvneta_ring_alloc_rx_queue()
1234 rxbuf = &rx->rxbuf[i]; in mvneta_ring_alloc_rx_queue()
1235 rxbuf->dmap = dmap; in mvneta_ring_alloc_rx_queue()
1236 rxbuf->m = NULL; in mvneta_ring_alloc_rx_queue()
1245 device_printf(sc->dev, "DMA Ring buffer allocation failure.\n"); in mvneta_ring_alloc_rx_queue()
1252 struct mvneta_tx_ring *tx; in mvneta_ring_alloc_tx_queue() local
1257 tx = MVNETA_TX_RING(sc, q); in mvneta_ring_alloc_tx_queue()
1258 mtx_init(&tx->ring_mtx, "mvneta_tx", NULL, MTX_DEF); in mvneta_ring_alloc_tx_queue()
1259 error = bus_dmamem_alloc(sc->tx_dtag, in mvneta_ring_alloc_tx_queue()
1260 (void**)&(tx->desc), in mvneta_ring_alloc_tx_queue()
1262 &tx->desc_map); in mvneta_ring_alloc_tx_queue()
1263 if (error != 0 || tx->desc == NULL) in mvneta_ring_alloc_tx_queue()
1265 error = bus_dmamap_load(sc->tx_dtag, tx->desc_map, in mvneta_ring_alloc_tx_queue()
1266 tx->desc, in mvneta_ring_alloc_tx_queue()
1268 mvneta_dmamap_cb, &tx->desc_pa, BUS_DMA_NOWAIT); in mvneta_ring_alloc_tx_queue()
1273 tx->br = buf_ring_alloc(MVNETA_BUFRING_SIZE, M_DEVBUF, M_NOWAIT, in mvneta_ring_alloc_tx_queue()
1274 &tx->ring_mtx); in mvneta_ring_alloc_tx_queue()
1275 if (tx->br == NULL) { in mvneta_ring_alloc_tx_queue()
1276 device_printf(sc->dev, in mvneta_ring_alloc_tx_queue()
1289 device_printf(sc->dev, "DMA Ring buffer allocation failure.\n"); in mvneta_ring_alloc_tx_queue()
1296 struct mvneta_tx_ring *tx; in mvneta_ring_dealloc_tx_queue() local
1304 tx = MVNETA_TX_RING(sc, q); in mvneta_ring_dealloc_tx_queue()
1306 if (tx->taskq != NULL) { in mvneta_ring_dealloc_tx_queue()
1308 while (taskqueue_cancel(tx->taskq, &tx->task, NULL) != 0) in mvneta_ring_dealloc_tx_queue()
1309 taskqueue_drain(tx->taskq, &tx->task); in mvneta_ring_dealloc_tx_queue()
1312 if (tx->br != NULL) in mvneta_ring_dealloc_tx_queue()
1313 drbr_free(tx->br, M_DEVBUF); in mvneta_ring_dealloc_tx_queue()
1316 if (sc->txmbuf_dtag != NULL) { in mvneta_ring_dealloc_tx_queue()
1318 txbuf = &tx->txbuf[i]; in mvneta_ring_dealloc_tx_queue()
1319 if (txbuf->dmap != NULL) { in mvneta_ring_dealloc_tx_queue()
1320 error = bus_dmamap_destroy(sc->txmbuf_dtag, in mvneta_ring_dealloc_tx_queue()
1321 txbuf->dmap); in mvneta_ring_dealloc_tx_queue()
1323 panic("%s: map busy for Tx descriptor (Q%d, %d)", in mvneta_ring_dealloc_tx_queue()
1330 if (tx->desc_pa != 0) in mvneta_ring_dealloc_tx_queue()
1331 bus_dmamap_unload(sc->tx_dtag, tx->desc_map); in mvneta_ring_dealloc_tx_queue()
1333 kva = (void *)tx->desc; in mvneta_ring_dealloc_tx_queue()
1335 bus_dmamem_free(sc->tx_dtag, tx->desc, tx->desc_map); in mvneta_ring_dealloc_tx_queue()
1337 if (mtx_name(&tx->ring_mtx) != NULL) in mvneta_ring_dealloc_tx_queue()
1338 mtx_destroy(&tx->ring_mtx); in mvneta_ring_dealloc_tx_queue()
1340 memset(tx, 0, sizeof(*tx)); in mvneta_ring_dealloc_tx_queue()
1346 struct mvneta_rx_ring *rx; in mvneta_ring_dealloc_rx_queue() local
1353 rx = MVNETA_RX_RING(sc, q); in mvneta_ring_dealloc_rx_queue()
1355 if (rx->desc_pa != 0) in mvneta_ring_dealloc_rx_queue()
1356 bus_dmamap_unload(sc->rx_dtag, rx->desc_map); in mvneta_ring_dealloc_rx_queue()
1358 kva = (void *)rx->desc; in mvneta_ring_dealloc_rx_queue()
1360 bus_dmamem_free(sc->rx_dtag, rx->desc, rx->desc_map); in mvneta_ring_dealloc_rx_queue()
1362 lro = &rx->lro; in mvneta_ring_dealloc_rx_queue()
1365 if (mtx_name(&rx->ring_mtx) != NULL) in mvneta_ring_dealloc_rx_queue()
1366 mtx_destroy(&rx->ring_mtx); in mvneta_ring_dealloc_rx_queue()
1368 memset(rx, 0, sizeof(*rx)); in mvneta_ring_dealloc_rx_queue()
1374 struct mvneta_rx_ring *rx; in mvneta_ring_init_rx_queue() local
1381 rx = MVNETA_RX_RING(sc, q); in mvneta_ring_init_rx_queue()
1382 rx->dma = rx->cpu = 0; in mvneta_ring_init_rx_queue()
1383 rx->queue_th_received = MVNETA_RXTH_COUNT; in mvneta_ring_init_rx_queue()
1384 rx->queue_th_time = (sc->clk_freq / 1000) / 10; /* 0.1 [ms] */ in mvneta_ring_init_rx_queue()
1387 rx->lro_enabled = FALSE; in mvneta_ring_init_rx_queue()
1388 if ((if_getcapenable(sc->ifp) & IFCAP_LRO) != 0) { in mvneta_ring_init_rx_queue()
1389 lro = &rx->lro; in mvneta_ring_init_rx_queue()
1392 device_printf(sc->dev, "LRO Initialization failed!\n"); in mvneta_ring_init_rx_queue()
1394 rx->lro_enabled = TRUE; in mvneta_ring_init_rx_queue()
1395 lro->ifp = sc->ifp; in mvneta_ring_init_rx_queue()
1405 struct mvneta_tx_ring *tx; in mvneta_ring_init_tx_queue() local
1412 tx = MVNETA_TX_RING(sc, q); in mvneta_ring_init_tx_queue()
1414 /* Tx handle */ in mvneta_ring_init_tx_queue()
1416 txbuf = &tx->txbuf[i]; in mvneta_ring_init_tx_queue()
1417 txbuf->m = NULL; in mvneta_ring_init_tx_queue()
1418 /* Tx handle needs DMA map for busdma_load_mbuf() */ in mvneta_ring_init_tx_queue()
1419 error = bus_dmamap_create(sc->txmbuf_dtag, 0, in mvneta_ring_init_tx_queue()
1420 &txbuf->dmap); in mvneta_ring_init_tx_queue()
1422 device_printf(sc->dev, in mvneta_ring_init_tx_queue()
1423 "can't create dma map (tx ring %d)\n", i); in mvneta_ring_init_tx_queue()
1427 tx->dma = tx->cpu = 0; in mvneta_ring_init_tx_queue()
1428 tx->used = 0; in mvneta_ring_init_tx_queue()
1429 tx->drv_error = 0; in mvneta_ring_init_tx_queue()
1430 tx->queue_status = MVNETA_QUEUE_DISABLED; in mvneta_ring_init_tx_queue()
1431 tx->queue_hung = FALSE; in mvneta_ring_init_tx_queue()
1433 tx->ifp = sc->ifp; in mvneta_ring_init_tx_queue()
1434 tx->qidx = q; in mvneta_ring_init_tx_queue()
1435 TASK_INIT(&tx->task, 0, mvneta_tx_task, tx); in mvneta_ring_init_tx_queue()
1436 tx->taskq = taskqueue_create_fast("mvneta_tx_taskq", M_WAITOK, in mvneta_ring_init_tx_queue()
1437 taskqueue_thread_enqueue, &tx->taskq); in mvneta_ring_init_tx_queue()
1438 taskqueue_start_threads(&tx->taskq, 1, PI_NET, "%s: tx_taskq(%d)", in mvneta_ring_init_tx_queue()
1439 device_get_nameunit(sc->dev), q); in mvneta_ring_init_tx_queue()
1447 struct mvneta_tx_ring *tx; in mvneta_ring_flush_tx_queue() local
1451 tx = MVNETA_TX_RING(sc, q); in mvneta_ring_flush_tx_queue()
1454 /* Tx handle */ in mvneta_ring_flush_tx_queue()
1456 txbuf = &tx->txbuf[i]; in mvneta_ring_flush_tx_queue()
1457 bus_dmamap_unload(sc->txmbuf_dtag, txbuf->dmap); in mvneta_ring_flush_tx_queue()
1458 if (txbuf->m != NULL) { in mvneta_ring_flush_tx_queue()
1459 m_freem(txbuf->m); in mvneta_ring_flush_tx_queue()
1460 txbuf->m = NULL; in mvneta_ring_flush_tx_queue()
1463 tx->dma = tx->cpu = 0; in mvneta_ring_flush_tx_queue()
1464 tx->used = 0; in mvneta_ring_flush_tx_queue()
1470 struct mvneta_rx_ring *rx; in mvneta_ring_flush_rx_queue() local
1474 rx = MVNETA_RX_RING(sc, q); in mvneta_ring_flush_rx_queue()
1477 /* Rx handle */ in mvneta_ring_flush_rx_queue()
1479 rxbuf = &rx->rxbuf[i]; in mvneta_ring_flush_rx_queue()
1482 rx->dma = rx->cpu = 0; in mvneta_ring_flush_rx_queue()
1486 * Rx/Tx Queue Control
1492 struct mvneta_rx_ring *rx; in mvneta_rx_queue_init() local
1497 rx = MVNETA_RX_RING(sc, q); in mvneta_rx_queue_init()
1498 DASSERT(rx->desc_pa != 0); in mvneta_rx_queue_init()
1501 MVNETA_WRITE(sc, MVNETA_PRXDQA(q), rx->desc_pa); in mvneta_rx_queue_init()
1503 /* Rx buffer size and descriptor ring size */ in mvneta_rx_queue_init()
1504 reg = MVNETA_PRXDQS_BUFFERSIZE(sc->rx_frame_size >> 3); in mvneta_rx_queue_init()
1511 /* Rx packet offset address */ in mvneta_rx_queue_init()
1520 DASSERT(MVNETA_READ(sc, MVNETA_PRXDQA(q)) == rx->desc_pa); in mvneta_rx_queue_init()
1528 struct mvneta_tx_ring *tx; in mvneta_tx_queue_init() local
1533 tx = MVNETA_TX_RING(sc, q); in mvneta_tx_queue_init()
1534 DASSERT(tx->desc_pa != 0); in mvneta_tx_queue_init()
1537 MVNETA_WRITE(sc, MVNETA_PTXDQA(q), tx->desc_pa); in mvneta_tx_queue_init()
1544 DASSERT(MVNETA_READ(sc, MVNETA_PTXDQA(q)) == tx->desc_pa); in mvneta_tx_queue_init()
1552 struct mvneta_rx_ring *rx; in mvneta_rx_queue_enable() local
1556 rx = MVNETA_RX_RING(sc, q); in mvneta_rx_queue_enable()
1559 /* Set Rx interrupt threshold */ in mvneta_rx_queue_enable()
1560 reg = MVNETA_PRXDQTH_ODT(rx->queue_th_received); in mvneta_rx_queue_enable()
1563 reg = MVNETA_PRXITTH_RITT(rx->queue_th_time); in mvneta_rx_queue_enable()
1568 reg |= MVNETA_PRXTXTI_RBICTAPQ(q); /* Rx Buffer Interrupt Coalese */ in mvneta_rx_queue_enable()
1571 /* Enable Rx queue */ in mvneta_rx_queue_enable()
1576 rx->queue_status = MVNETA_QUEUE_WORKING; in mvneta_rx_queue_enable()
1584 struct mvneta_tx_ring *tx; in mvneta_tx_queue_enable() local
1587 tx = MVNETA_TX_RING(sc, q); in mvneta_tx_queue_enable()
1590 /* Enable Tx queue */ in mvneta_tx_queue_enable()
1593 tx->queue_status = MVNETA_QUEUE_IDLE; in mvneta_tx_queue_enable()
1594 tx->queue_hung = FALSE; in mvneta_tx_queue_enable()
1604 mtx_lock(&sc->rx_ring[q].ring_mtx); in mvneta_rx_lockq()
1613 mtx_unlock(&sc->rx_ring[q].ring_mtx); in mvneta_rx_unlockq()
1622 return (mtx_trylock(&sc->tx_ring[q].ring_mtx)); in mvneta_tx_trylockq()
1631 mtx_lock(&sc->tx_ring[q].ring_mtx); in mvneta_tx_lockq()
1640 mtx_unlock(&sc->tx_ring[q].ring_mtx); in mvneta_tx_unlockq()
1671 if (!sc->phy_attached || sc->use_inband_status) { in mvneta_enable_intr()
1692 ifp = sc->ifp; in mvneta_rxtxth_intr()
1703 (!sc->phy_attached || sc->use_inband_status))) { in mvneta_rxtxth_intr()
1716 /* At the moment the driver support only one RX queue. */ in mvneta_rxtxth_intr()
1729 CTR1(KTR_SPARE2, "%s got MISC_INTR", if_name(sc->ifp)); in mvneta_misc_intr()
1752 struct mvneta_tx_ring *tx; in mvneta_tick() local
1753 struct mvneta_rx_ring *rx; in mvneta_tick() local
1765 /* Extract previous flow-control frame received counter. */ in mvneta_tick()
1766 fc_prev = sc->sysctl_mib[MVNETA_MIB_FC_GOOD_IDX].counter; in mvneta_tick()
1769 /* Extract current flow-control frame received counter. */ in mvneta_tick()
1770 fc_curr = sc->sysctl_mib[MVNETA_MIB_FC_GOOD_IDX].counter; in mvneta_tick()
1773 if (sc->phy_attached && if_getflags(sc->ifp) & IFF_UP) { in mvneta_tick()
1775 mii_tick(sc->mii); in mvneta_tick()
1783 * We were unable to refill the rx queue and left the rx func, leaving in mvneta_tick()
1787 rx = MVNETA_RX_RING(sc, q); in mvneta_tick()
1788 if (rx->needs_refill == TRUE) { in mvneta_tick()
1797 * - check if queue is mark as hung. in mvneta_tick()
1798 * - ignore hung status if we received some pause frame in mvneta_tick()
1807 tx = MVNETA_TX_RING(sc, q); in mvneta_tick()
1809 if (tx->queue_hung && (fc_curr - fc_prev) == 0) in mvneta_tick()
1813 callout_schedule(&sc->tick_ch, hz); in mvneta_tick()
1817 if_printf(sc->ifp, "watchdog timeout\n"); in mvneta_tick()
1820 sc->counter_watchdog++; in mvneta_tick()
1821 sc->counter_watchdog_mib++; in mvneta_tick()
1833 struct mvneta_tx_ring *tx; in mvneta_qflush() local
1840 tx = MVNETA_TX_RING(sc, q); in mvneta_qflush()
1842 while ((m = buf_ring_dequeue_sc(tx->br)) != NULL) in mvneta_qflush()
1854 struct mvneta_tx_ring *tx; in mvneta_tx_task() local
1858 tx = arg; in mvneta_tx_task()
1859 ifp = tx->ifp; in mvneta_tx_task()
1862 mvneta_tx_lockq(sc, tx->qidx); in mvneta_tx_task()
1863 error = mvneta_xmit_locked(sc, tx->qidx); in mvneta_tx_task()
1864 mvneta_tx_unlockq(sc, tx->qidx); in mvneta_tx_task()
1869 taskqueue_enqueue(tx->taskq, &tx->task); in mvneta_tx_task()
1876 struct mvneta_tx_ring *tx; in mvneta_xmitfast_locked() local
1881 tx = MVNETA_TX_RING(sc, q); in mvneta_xmitfast_locked()
1884 ifp = sc->ifp; in mvneta_xmitfast_locked()
1887 if (__predict_false(tx->queue_status == MVNETA_QUEUE_DISABLED)) { in mvneta_xmitfast_locked()
1894 if (__predict_true(tx->used > MVNETA_TX_RECLAIM_COUNT)) in mvneta_xmitfast_locked()
1898 if (__predict_false(tx->used > in mvneta_xmitfast_locked()
1899 MVNETA_TX_RING_CNT - MVNETA_TX_SEGLIMIT)) in mvneta_xmitfast_locked()
1910 tx->watchdog_time = ticks; in mvneta_xmitfast_locked()
1911 tx->queue_status = MVNETA_QUEUE_WORKING; in mvneta_xmitfast_locked()
1921 struct mvneta_tx_ring *tx; in mvneta_transmit() local
1929 q = m->m_pkthdr.flowid % MVNETA_TX_QNUM_MAX; in mvneta_transmit()
1933 tx = MVNETA_TX_RING(sc, q); in mvneta_transmit()
1936 if (buf_ring_full(tx->br)) { in mvneta_transmit()
1946 if (buf_ring_empty(tx->br) && mvneta_tx_trylockq(sc, q) != 0) { in mvneta_transmit()
1958 error = drbr_enqueue(ifp, tx->br, m); in mvneta_transmit()
1962 taskqueue_enqueue(tx->taskq, &tx->task); in mvneta_transmit()
1970 struct mvneta_tx_ring *tx; in mvneta_xmit_locked() local
1975 ifp = sc->ifp; in mvneta_xmit_locked()
1976 tx = MVNETA_TX_RING(sc, q); in mvneta_xmit_locked()
1979 while ((m = drbr_peek(ifp, tx->br)) != NULL) { in mvneta_xmit_locked()
1983 drbr_putback(ifp, tx->br, m); in mvneta_xmit_locked()
1985 drbr_advance(ifp, tx->br); in mvneta_xmit_locked()
1988 drbr_advance(ifp, tx->br); in mvneta_xmit_locked()
1998 struct mvneta_tx_ring *tx; in mvneta_start() local
2002 tx = MVNETA_TX_RING(sc, 0); in mvneta_start()
2009 taskqueue_enqueue(tx->taskq, &tx->task); in mvneta_start()
2020 ifp = sc->ifp; in mvneta_xmit_locked()
2044 struct mvneta_rx_ring *rx; in mvneta_ioctl() local
2060 flags = if_getflags(ifp) ^ sc->mvneta_if_flags; in mvneta_ioctl()
2063 sc->mvneta_if_flags = if_getflags(ifp); in mvneta_ioctl()
2069 sc->mvneta_if_flags = if_getflags(ifp); in mvneta_ioctl()
2070 if (sc->phy_attached) in mvneta_ioctl()
2071 mii_mediachg(sc->mii); in mvneta_ioctl()
2078 sc->mvneta_if_flags = if_getflags(ifp); in mvneta_ioctl()
2082 if (if_getmtu(ifp) > sc->tx_csum_limit && in mvneta_ioctl()
2083 ifr->ifr_reqcap & IFCAP_TXCSUM) in mvneta_ioctl()
2084 ifr->ifr_reqcap &= ~IFCAP_TXCSUM; in mvneta_ioctl()
2085 mask = if_getcapenable(ifp) ^ ifr->ifr_reqcap; in mvneta_ioctl()
2087 if_setcapenablebit(ifp, IFCAP_HWCSUM & ifr->ifr_reqcap, in mvneta_ioctl()
2100 rx = MVNETA_RX_RING(sc, q); in mvneta_ioctl()
2101 rx->lro_enabled = !rx->lro_enabled; in mvneta_ioctl()
2109 if ((IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T || in mvneta_ioctl()
2110 IFM_SUBTYPE(ifr->ifr_media) == IFM_2500_T) && in mvneta_ioctl()
2111 (ifr->ifr_media & IFM_FDX) == 0) { in mvneta_ioctl()
2112 device_printf(sc->dev, in mvneta_ioctl()
2113 "%s half-duplex unsupported\n", in mvneta_ioctl()
2114 IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T ? in mvneta_ioctl()
2115 "1000Base-T" : in mvneta_ioctl()
2116 "2500Base-T"); in mvneta_ioctl()
2122 if (!sc->phy_attached) in mvneta_ioctl()
2123 error = ifmedia_ioctl(ifp, ifr, &sc->mvneta_ifmedia, in mvneta_ioctl()
2126 error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, in mvneta_ioctl()
2130 if (ifr->ifr_mtu < 68 || ifr->ifr_mtu > MVNETA_MAX_FRAME - in mvneta_ioctl()
2134 if_setmtu(ifp, ifr->ifr_mtu); in mvneta_ioctl()
2137 sc->rx_frame_size = MCLBYTES; in mvneta_ioctl()
2139 sc->rx_frame_size = MJUM9BYTES; in mvneta_ioctl()
2141 if (if_getmtu(ifp) > sc->tx_csum_limit) { in mvneta_ioctl()
2150 * Reinitialize RX queues. in mvneta_ioctl()
2151 * We need to update RX descriptor size. in mvneta_ioctl()
2161 device_printf(sc->dev, in mvneta_ioctl()
2194 ifp = sc->ifp; in mvneta_init_locked()
2196 if (!device_is_attached(sc->dev) || in mvneta_init_locked()
2201 callout_stop(&sc->tick_ch); in mvneta_init_locked()
2204 bcopy(if_getlladdr(ifp), sc->enaddr, ETHER_ADDR_LEN); in mvneta_init_locked()
2205 mvneta_set_mac_address(sc, sc->enaddr); in mvneta_init_locked()
2231 if (!sc->phy_attached) in mvneta_init_locked()
2238 callout_schedule(&sc->tick_ch, hz); in mvneta_init_locked()
2251 if (sc->phy_attached) in mvneta_init()
2252 mii_mediachg(sc->mii); in mvneta_init()
2264 ifp = sc->ifp; in mvneta_stop_locked()
2270 callout_stop(&sc->tick_ch); in mvneta_stop_locked()
2275 if (sc->linkup == TRUE) in mvneta_stop_locked()
2320 if (!sc->phy_attached && !sc->use_inband_status) { in mvneta_mediachange()
2322 if_printf(ifp, "Cannot change media in fixed-link mode!\n"); in mvneta_mediachange()
2326 if (sc->use_inband_status) { in mvneta_mediachange()
2327 mvneta_update_media(sc, sc->mvneta_ifmedia.ifm_media); in mvneta_mediachange()
2334 mii_mediachg(sc->mii); in mvneta_mediachange()
2350 ifmr->ifm_active = IFM_ETHER_SUBTYPE_SET(IFM_1000_T); in mvneta_get_media()
2352 ifmr->ifm_active = IFM_ETHER_SUBTYPE_SET(IFM_100_TX); in mvneta_get_media()
2354 ifmr->ifm_active = IFM_ETHER_SUBTYPE_SET(IFM_10_T); in mvneta_get_media()
2358 ifmr->ifm_active |= IFM_FDX; in mvneta_get_media()
2361 ifmr->ifm_status = IFM_AVALID; in mvneta_get_media()
2363 ifmr->ifm_status |= IFM_ACTIVE; in mvneta_get_media()
2374 if (!sc->phy_attached && !sc->use_inband_status) { in mvneta_mediastatus()
2375 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; in mvneta_mediastatus()
2381 if (sc->use_inband_status) { in mvneta_mediastatus()
2387 mii = sc->mii; in mvneta_mediastatus()
2390 ifmr->ifm_active = mii->mii_media_active; in mvneta_mediastatus()
2391 ifmr->ifm_status = mii->mii_media_status; in mvneta_mediastatus()
2448 running = (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) != 0; in mvneta_update_media()
2452 sc->autoneg = (IFM_SUBTYPE(media) == IFM_AUTO); in mvneta_update_media()
2454 if (!sc->phy_attached || sc->use_inband_status) in mvneta_update_media()
2468 device_printf(sc->dev, in mvneta_update_media()
2469 "%s half-duplex unsupported\n", in mvneta_update_media()
2471 "1000Base-T" : in mvneta_update_media()
2472 "2500Base-T"); in mvneta_update_media()
2503 phy_linkup = (sc->mii->mii_media_status & in mvneta_adjust_link()
2506 if (sc->linkup != phy_linkup) in mvneta_adjust_link()
2514 if (sc->mvneta_media != sc->mii->mii_media_active) { in mvneta_adjust_link()
2515 sc->mvneta_media = sc->mii->mii_media_active; in mvneta_adjust_link()
2521 if (IFM_SUBTYPE(sc->mvneta_media) == IFM_1000_T || in mvneta_adjust_link()
2522 IFM_SUBTYPE(sc->mvneta_media) == IFM_2500_T) { in mvneta_adjust_link()
2524 } else if (IFM_SUBTYPE(sc->mvneta_media) == IFM_100_TX) in mvneta_adjust_link()
2527 if (sc->mvneta_media & IFM_FDX) in mvneta_adjust_link()
2542 if (sc->linkup == linkup) in mvneta_link_isr()
2551 device_printf(sc->dev, in mvneta_link_isr()
2552 "%s: link %s\n", if_name(sc->ifp), linkup ? "up" : "down"); in mvneta_link_isr()
2568 device_printf(sc->dev, in mvneta_linkupdate()
2569 "%s: link %s\n", if_name(sc->ifp), linkup ? "up" : "down"); in mvneta_linkupdate()
2582 if (sc->cf_lpi) in mvneta_update_eee()
2597 if (sc->cf_fc) { in mvneta_update_fc()
2617 if (!sc->phy_attached || !sc->use_inband_status) { in mvneta_linkup()
2624 mvneta_qflush(sc->ifp); in mvneta_linkup()
2626 sc->linkup = TRUE; in mvneta_linkup()
2627 if_link_state_change(sc->ifp, LINK_STATE_UP); in mvneta_linkup()
2637 if (!sc->phy_attached || !sc->use_inband_status) { in mvneta_linkdown()
2645 mvneta_qflush(sc->ifp); in mvneta_linkdown()
2646 sc->linkup = FALSE; in mvneta_linkdown()
2647 if_link_state_change(sc->ifp, LINK_STATE_DOWN); in mvneta_linkdown()
2655 if (sc->phy_attached) { in mvneta_linkreset()
2657 mii = LIST_FIRST(&sc->mii->mii_phys); in mvneta_linkreset()
2664 * Tx Subroutines
2672 struct mvneta_tx_ring *tx; in mvneta_tx_queue() local
2679 tx = MVNETA_TX_RING(sc, q); in mvneta_tx_queue()
2680 DASSERT(tx->used >= 0); in mvneta_tx_queue()
2681 DASSERT(tx->used <= MVNETA_TX_RING_CNT); in mvneta_tx_queue()
2683 ifp = sc->ifp; in mvneta_tx_queue()
2685 if (__predict_false(mbuf->m_flags & M_VLANTAG)) { in mvneta_tx_queue()
2686 mbuf = ether_vlanencap(mbuf, mbuf->m_pkthdr.ether_vtag); in mvneta_tx_queue()
2688 tx->drv_error++; in mvneta_tx_queue()
2692 mbuf->m_flags &= ~M_VLANTAG; in mvneta_tx_queue()
2696 if (__predict_false(mbuf->m_next != NULL && in mvneta_tx_queue()
2697 (mbuf->m_pkthdr.csum_flags & in mvneta_tx_queue()
2703 tx->drv_error++; in mvneta_tx_queue()
2712 txbuf = &tx->txbuf[tx->cpu]; in mvneta_tx_queue()
2713 error = bus_dmamap_load_mbuf_sg(sc->txmbuf_dtag, in mvneta_tx_queue()
2714 txbuf->dmap, mbuf, txsegs, &txnsegs, in mvneta_tx_queue()
2722 tx->drv_error++; in mvneta_tx_queue()
2731 || (txnsegs + tx->used) > MVNETA_TX_RING_CNT)) { in mvneta_tx_queue()
2737 bus_dmamap_unload(sc->txmbuf_dtag, txbuf->dmap); in mvneta_tx_queue()
2740 DASSERT(txbuf->m == NULL); in mvneta_tx_queue()
2743 txbuf->m = mbuf; in mvneta_tx_queue()
2744 bus_dmamap_sync(sc->txmbuf_dtag, txbuf->dmap, in mvneta_tx_queue()
2747 /* load to tx descriptors */ in mvneta_tx_queue()
2750 t = &tx->desc[tx->cpu]; in mvneta_tx_queue()
2751 t->command = 0; in mvneta_tx_queue()
2752 t->l4ichk = 0; in mvneta_tx_queue()
2753 t->flags = 0; in mvneta_tx_queue()
2756 t->command |= MVNETA_TX_CMD_W_PACKET_OFFSET(0); in mvneta_tx_queue()
2757 t->command |= MVNETA_TX_CMD_F; in mvneta_tx_queue()
2760 t->bufptr_pa = txsegs[i].ds_addr; in mvneta_tx_queue()
2761 t->bytecnt = txsegs[i].ds_len; in mvneta_tx_queue()
2762 tx->cpu = tx_counter_adv(tx->cpu, 1); in mvneta_tx_queue()
2764 tx->used++; in mvneta_tx_queue()
2769 t->command |= MVNETA_TX_CMD_L|MVNETA_TX_CMD_PADDING; in mvneta_tx_queue()
2771 bus_dmamap_sync(sc->tx_dtag, tx->desc_map, in mvneta_tx_queue()
2777 used -= 255; in mvneta_tx_queue()
2797 csum_flags = if_gethwassist(ifp) & m->m_pkthdr.csum_flags; in mvneta_tx_set_csumflag()
2800 switch (ntohs(eh->ether_type)) { in mvneta_tx_set_csumflag()
2807 if (ntohs(evh->evl_proto) == ETHERTYPE_VLAN) in mvneta_tx_set_csumflag()
2815 ip = (struct ip *)(m->m_data + ipoff); in mvneta_tx_set_csumflag()
2816 iphl = ip->ip_hl<<2; in mvneta_tx_set_csumflag()
2817 t->command |= MVNETA_TX_CMD_L3_IP4; in mvneta_tx_set_csumflag()
2819 t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NONE; in mvneta_tx_set_csumflag()
2826 t->command |= MVNETA_TX_CMD_IP4_CHECKSUM; in mvneta_tx_set_csumflag()
2831 t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NOFRAG; in mvneta_tx_set_csumflag()
2832 t->command |= MVNETA_TX_CMD_L4_TCP; in mvneta_tx_set_csumflag()
2834 t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NOFRAG; in mvneta_tx_set_csumflag()
2835 t->command |= MVNETA_TX_CMD_L4_UDP; in mvneta_tx_set_csumflag()
2837 t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NONE; in mvneta_tx_set_csumflag()
2839 t->l4ichk = 0; in mvneta_tx_set_csumflag()
2840 t->command |= MVNETA_TX_CMD_IP_HEADER_LEN(iphl >> 2); in mvneta_tx_set_csumflag()
2841 t->command |= MVNETA_TX_CMD_L3_OFFSET(ipoff); in mvneta_tx_set_csumflag()
2847 struct mvneta_tx_ring *tx; in mvneta_tx_queue_complete() local
2855 tx = MVNETA_TX_RING(sc, q); in mvneta_tx_queue_complete()
2856 if (__predict_false(tx->queue_status == MVNETA_QUEUE_DISABLED)) in mvneta_tx_queue_complete()
2863 if (tx->used == 0) in mvneta_tx_queue_complete()
2864 tx->queue_status = MVNETA_QUEUE_IDLE; in mvneta_tx_queue_complete()
2865 else if (tx->queue_status == MVNETA_QUEUE_WORKING && in mvneta_tx_queue_complete()
2866 ((ticks - tx->watchdog_time) > MVNETA_WATCHDOG)) in mvneta_tx_queue_complete()
2867 tx->queue_hung = TRUE; in mvneta_tx_queue_complete()
2873 if_name(sc->ifp), q, ndesc); in mvneta_tx_queue_complete()
2876 bus_dmamap_sync(sc->tx_dtag, tx->desc_map, in mvneta_tx_queue_complete()
2880 t = &tx->desc[tx->dma]; in mvneta_tx_queue_complete()
2882 if (t->flags & MVNETA_TX_F_ES) in mvneta_tx_queue_complete()
2883 CTR3(KTR_SPARE2, "%s tx error queue %d desc %d", in mvneta_tx_queue_complete()
2884 if_name(sc->ifp), q, tx->dma); in mvneta_tx_queue_complete()
2886 txbuf = &tx->txbuf[tx->dma]; in mvneta_tx_queue_complete()
2887 if (__predict_true(txbuf->m != NULL)) { in mvneta_tx_queue_complete()
2888 DASSERT((t->command & MVNETA_TX_CMD_F) != 0); in mvneta_tx_queue_complete()
2889 bus_dmamap_unload(sc->txmbuf_dtag, txbuf->dmap); in mvneta_tx_queue_complete()
2890 m_freem(txbuf->m); in mvneta_tx_queue_complete()
2891 txbuf->m = NULL; in mvneta_tx_queue_complete()
2894 DASSERT((t->flags & MVNETA_TX_CMD_F) == 0); in mvneta_tx_queue_complete()
2895 tx->dma = tx_counter_adv(tx->dma, 1); in mvneta_tx_queue_complete()
2896 tx->used--; in mvneta_tx_queue_complete()
2898 DASSERT(tx->used >= 0); in mvneta_tx_queue_complete()
2899 DASSERT(tx->used <= MVNETA_TX_RING_CNT); in mvneta_tx_queue_complete()
2903 ndesc -= 255; in mvneta_tx_queue_complete()
2911 if_name(sc->ifp), q, tx->cpu, tx->dma, tx->used); in mvneta_tx_queue_complete()
2914 tx->watchdog_time = ticks; in mvneta_tx_queue_complete()
2916 if (tx->used == 0) in mvneta_tx_queue_complete()
2917 tx->queue_status = MVNETA_QUEUE_IDLE; in mvneta_tx_queue_complete()
2921 * Do a final TX complete when TX is idle.
2926 struct mvneta_tx_ring *tx; in mvneta_tx_drain() local
2930 * Handle trailing mbuf on TX queue. in mvneta_tx_drain()
2931 * Check is done lockess to avoid TX path contention. in mvneta_tx_drain()
2934 tx = MVNETA_TX_RING(sc, q); in mvneta_tx_drain()
2935 if ((ticks - tx->watchdog_time) > MVNETA_WATCHDOG_TXCOMP && in mvneta_tx_drain()
2936 tx->used > 0) { in mvneta_tx_drain()
2945 * Rx Subroutines
2982 processed -= 255; in mvneta_prxsu_update()
2999 struct mvneta_rx_ring *rx; in mvneta_rx_queue() local
3008 ifp = sc->ifp; in mvneta_rx_queue()
3009 rx = MVNETA_RX_RING(sc, q); in mvneta_rx_queue()
3012 if (__predict_false(rx->queue_status == MVNETA_QUEUE_DISABLED)) in mvneta_rx_queue()
3015 bus_dmamap_sync(sc->rx_dtag, rx->desc_map, in mvneta_rx_queue()
3020 ndma = rx_counter_adv(rx->dma, 1); in mvneta_rx_queue()
3021 mvneta_prefetch(&rx->desc[ndma]); in mvneta_rx_queue()
3022 mvneta_prefetch(&rx->rxbuf[ndma]); in mvneta_rx_queue()
3025 r = &rx->desc[rx->dma]; in mvneta_rx_queue()
3026 rxbuf = &rx->rxbuf[rx->dma]; in mvneta_rx_queue()
3027 m = rxbuf->m; in mvneta_rx_queue()
3028 rxbuf->m = NULL; in mvneta_rx_queue()
3030 bus_dmamap_sync(sc->rxbuf_dtag, rxbuf->dmap, in mvneta_rx_queue()
3032 bus_dmamap_unload(sc->rxbuf_dtag, rxbuf->dmap); in mvneta_rx_queue()
3038 DASSERT((r->status & (MVNETA_RX_F|MVNETA_RX_L)) == in mvneta_rx_queue()
3040 if (__predict_false((r->status & MVNETA_RX_ES) || in mvneta_rx_queue()
3041 (r->status & (MVNETA_RX_F|MVNETA_RX_L)) != in mvneta_rx_queue()
3049 pktlen = r->bytecnt - ETHER_CRC_LEN - MVNETA_HWHEADER_SIZE; in mvneta_rx_queue()
3050 pktbuf = (uint8_t *)rx->rxbuf_virt_addr[rx->dma] + MVNETA_PACKET_OFFSET + in mvneta_rx_queue()
3057 m->m_data = pktbuf; in mvneta_rx_queue()
3058 m->m_len = m->m_pkthdr.len = pktlen; in mvneta_rx_queue()
3059 m->m_pkthdr.rcvif = ifp; in mvneta_rx_queue()
3063 rx->dma = ndma; in mvneta_rx_queue()
3065 if (__predict_false(rx->lro_enabled && in mvneta_rx_queue()
3066 ((r->status & MVNETA_RX_L3_IP) != 0) && in mvneta_rx_queue()
3067 ((r->status & MVNETA_RX_L4_MASK) == MVNETA_RX_L4_TCP) && in mvneta_rx_queue()
3068 (m->m_pkthdr.csum_flags & in mvneta_rx_queue()
3071 if (rx->lro.lro_cnt != 0) { in mvneta_rx_queue()
3072 if (tcp_lro_rx(&rx->lro, m, 0) == 0) in mvneta_rx_queue()
3084 if(__predict_false(rx->queue_status == MVNETA_QUEUE_DISABLED)) in mvneta_rx_queue()
3096 rx->dma = ndma; in mvneta_rx_queue()
3116 tcp_lro_flush_all(&rx->lro); in mvneta_rx_queue()
3123 bus_dmamap_unload(sc->rxbuf_dtag, rxbuf->dmap); in mvneta_rx_buf_free()
3125 m_freem(rxbuf->m); in mvneta_rx_buf_free()
3131 struct mvneta_rx_ring *rx; in mvneta_rx_queue_refill() local
3141 rx = MVNETA_RX_RING(sc, q); in mvneta_rx_queue_refill()
3144 refill = MVNETA_RX_RING_CNT - ndesc; in mvneta_rx_queue_refill()
3146 CTR3(KTR_SPARE2, "%s:%u refill %u packets", if_name(sc->ifp), q, in mvneta_rx_queue_refill()
3153 rxbuf = &rx->rxbuf[rx->cpu]; in mvneta_rx_queue_refill()
3154 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, sc->rx_frame_size); in mvneta_rx_queue_refill()
3159 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; in mvneta_rx_queue_refill()
3161 error = bus_dmamap_load_mbuf_sg(sc->rxbuf_dtag, rxbuf->dmap, in mvneta_rx_queue_refill()
3164 KASSERT(1, ("Failed to load Rx mbuf DMA map")); in mvneta_rx_queue_refill()
3170 rxbuf->m = m; in mvneta_rx_queue_refill()
3171 r = &rx->desc[rx->cpu]; in mvneta_rx_queue_refill()
3172 r->bufptr_pa = segs.ds_addr; in mvneta_rx_queue_refill()
3173 rx->rxbuf_virt_addr[rx->cpu] = m->m_data; in mvneta_rx_queue_refill()
3175 rx->cpu = rx_counter_adv(rx->cpu, 1); in mvneta_rx_queue_refill()
3179 rx->needs_refill = TRUE; in mvneta_rx_queue_refill()
3183 rx->needs_refill = FALSE; in mvneta_rx_queue_refill()
3184 bus_dmamap_sync(sc->rx_dtag, rx->desc_map, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); in mvneta_rx_queue_refill()
3189 npkt -= 255; in mvneta_rx_queue_refill()
3204 if (__predict_false((r->status & in mvneta_rx_set_csumflag()
3209 if (__predict_true((r->status & MVNETA_RX_IP_HEADER_OK) == in mvneta_rx_set_csumflag()
3213 if (__predict_true((r->status & (MVNETA_RX_IP_HEADER_OK|MVNETA_RX_L3_IP)) == in mvneta_rx_set_csumflag()
3216 switch (r->status & MVNETA_RX_L4_MASK) { in mvneta_rx_set_csumflag()
3220 if (__predict_true((r->status & in mvneta_rx_set_csumflag()
3223 m->m_pkthdr.csum_data = htons(0xffff); in mvneta_rx_set_csumflag()
3231 m->m_pkthdr.csum_flags = csum_flags; in mvneta_rx_set_csumflag()
3251 ifp = sc->ifp; in mvneta_filter_setup()
3266 pxc |= MVNETA_PXC_RXQ(MVNETA_RX_QNUM_MAX-1); in mvneta_filter_setup()
3267 pxc |= MVNETA_PXC_RXQARP(MVNETA_RX_QNUM_MAX-1); in mvneta_filter_setup()
3268 pxc |= MVNETA_PXC_TCPQ(MVNETA_RX_QNUM_MAX-1); in mvneta_filter_setup()
3269 pxc |= MVNETA_PXC_UDPQ(MVNETA_RX_QNUM_MAX-1); in mvneta_filter_setup()
3270 pxc |= MVNETA_PXC_BPDUQ(MVNETA_RX_QNUM_MAX-1); in mvneta_filter_setup()
3291 i = sc->enaddr[5] & 0xf; /* last nibble */ in mvneta_filter_setup()
3315 sc = arg->sc; in sysctl_read_mib()
3318 if (arg->index < 0 || arg->index > MVNETA_PORTMIB_NOCOUNTER) in sysctl_read_mib()
3322 val = arg->counter; in sysctl_read_mib()
3359 struct mvneta_rx_ring *rx; in sysctl_set_queue_rxthtime() local
3364 rx = NULL; in sysctl_set_queue_rxthtime()
3368 if (arg->queue < 0 || arg->queue > MVNETA_RX_RING_CNT) in sysctl_set_queue_rxthtime()
3370 if (arg->rxtx != MVNETA_SYSCTL_RX) in sysctl_set_queue_rxthtime()
3373 sc = arg->sc; in sysctl_set_queue_rxthtime()
3379 mvneta_rx_lockq(sc, arg->queue); in sysctl_set_queue_rxthtime()
3380 rx = MVNETA_RX_RING(sc, arg->queue); in sysctl_set_queue_rxthtime()
3381 time_mvtclk = rx->queue_th_time; in sysctl_set_queue_rxthtime()
3382 time_us = ((uint64_t)time_mvtclk * 1000ULL * 1000ULL) / sc->clk_freq; in sysctl_set_queue_rxthtime()
3383 mvneta_rx_unlockq(sc, arg->queue); in sysctl_set_queue_rxthtime()
3391 mvneta_rx_lockq(sc, arg->queue); in sysctl_set_queue_rxthtime()
3393 /* update queue length (0[sec] - 1[sec]) */ in sysctl_set_queue_rxthtime()
3395 mvneta_rx_unlockq(sc, arg->queue); in sysctl_set_queue_rxthtime()
3399 time_mvtclk = sc->clk_freq * (uint64_t)time_us / (1000ULL * 1000ULL); in sysctl_set_queue_rxthtime()
3400 rx->queue_th_time = time_mvtclk; in sysctl_set_queue_rxthtime()
3401 reg = MVNETA_PRXITTH_RITT(rx->queue_th_time); in sysctl_set_queue_rxthtime()
3402 MVNETA_WRITE(sc, MVNETA_PRXITTH(arg->queue), reg); in sysctl_set_queue_rxthtime()
3403 mvneta_rx_unlockq(sc, arg->queue); in sysctl_set_queue_rxthtime()
3440 ctx = device_get_sysctl_ctx(sc->dev); in sysctl_mvneta_init()
3441 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); in sysctl_mvneta_init()
3443 tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "rx", in sysctl_mvneta_init()
3444 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "NETA RX"); in sysctl_mvneta_init()
3452 CTLFLAG_RW, &sc->cf_fc, 0, "flow control"); in sysctl_mvneta_init()
3454 CTLFLAG_RW, &sc->cf_lpi, 0, "Low Power Idle"); in sysctl_mvneta_init()
3461 struct mvneta_sysctl_mib *mib_arg = &sc->sysctl_mib[i]; in sysctl_mvneta_init()
3463 mib_arg->sc = sc; in sysctl_mvneta_init()
3464 mib_arg->index = i; in sysctl_mvneta_init()
3472 CTLFLAG_RD, &sc->counter_pdfc, "Port Rx Discard Frame Counter"); in sysctl_mvneta_init()
3474 CTLFLAG_RD, &sc->counter_pofc, "Port Overrun Frame Counter"); in sysctl_mvneta_init()
3476 CTLFLAG_RD, &sc->counter_watchdog, 0, "TX Watchdog Counter"); in sysctl_mvneta_init()
3483 rxarg = &sc->sysctl_rx_queue[q]; in sysctl_mvneta_init()
3485 rxarg->sc = sc; in sysctl_mvneta_init()
3486 rxarg->queue = q; in sysctl_mvneta_init()
3487 rxarg->rxtx = MVNETA_SYSCTL_RX; in sysctl_mvneta_init()
3489 /* hw.mvneta.mvneta[unit].rx.[queue] */ in sysctl_mvneta_init()
3495 /* hw.mvneta.mvneta[unit].rx.[queue].threshold_timer_us */ in sysctl_mvneta_init()
3513 val = MVNETA_READ_MIB(sc, mib->regnum); in mvneta_read_mib()
3514 if (mib->reg64) in mvneta_read_mib()
3515 val |= (uint64_t)MVNETA_READ_MIB(sc, mib->regnum + 4) << 32; in mvneta_read_mib()
3528 sc->sysctl_mib[i].counter = 0; in mvneta_clear_mib()
3531 sc->counter_pdfc = 0; in mvneta_clear_mib()
3533 sc->counter_pofc = 0; in mvneta_clear_mib()
3534 sc->counter_watchdog = 0; in mvneta_clear_mib()
3540 struct mvneta_tx_ring *tx; in mvneta_update_mib() local
3551 sc->sysctl_mib[i].counter += val; in mvneta_update_mib()
3554 if_inc_counter(sc->ifp, IFCOUNTER_IBYTES, val); in mvneta_update_mib()
3557 if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, val); in mvneta_update_mib()
3560 if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, val); in mvneta_update_mib()
3563 if_inc_counter(sc->ifp, IFCOUNTER_IMCASTS, val); in mvneta_update_mib()
3566 if_inc_counter(sc->ifp, IFCOUNTER_OBYTES, val); in mvneta_update_mib()
3569 if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, val); in mvneta_update_mib()
3572 if_inc_counter(sc->ifp, IFCOUNTER_OMCASTS, val); in mvneta_update_mib()
3575 if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, val); in mvneta_update_mib()
3580 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, val); in mvneta_update_mib()
3586 sc->counter_pdfc += reg; in mvneta_update_mib()
3587 if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, reg); in mvneta_update_mib()
3589 sc->counter_pofc += reg; in mvneta_update_mib()
3590 if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, reg); in mvneta_update_mib()
3592 /* TX watchdog. */ in mvneta_update_mib()
3593 if (sc->counter_watchdog_mib > 0) { in mvneta_update_mib()
3594 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, sc->counter_watchdog_mib); in mvneta_update_mib()
3595 sc->counter_watchdog_mib = 0; in mvneta_update_mib()
3598 * TX driver errors: in mvneta_update_mib()
3599 * We do not take queue locks to not disrupt TX path. in mvneta_update_mib()
3601 * next mib update. We may also clear counter when TX path in mvneta_update_mib()
3606 tx = MVNETA_TX_RING(sc, i); in mvneta_update_mib()
3608 if (tx->drv_error > 0) { in mvneta_update_mib()
3609 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, tx->drv_error); in mvneta_update_mib()
3610 tx->drv_error = 0; in mvneta_update_mib()