Lines Matching full:tx

111 /* Rx/Tx Queue Control */
159 /* Tx Subroutines */
296 "tx_good_oct", "Good Octets Tx"},
298 "tx_good_frame", "Good Frames Tx"},
302 "tx_mcast_frame", "Multicast Frames Tx"},
304 "tx_bcast_frame", "Broadcast Frames Tx"},
308 "fc_tx", "Flow Control Tx"},
416 * Create Tx DMA in mvneta_dma_create()
434 "Failed to create DMA tag for Tx descriptors.\n"); in mvneta_dma_create()
451 "Failed to create DMA tag for Tx mbufs.\n"); in mvneta_dma_create()
1011 struct mvneta_tx_ring *tx; in mvneta_portdown() local
1023 tx = MVNETA_TX_RING(sc, q); in mvneta_portdown()
1025 tx->queue_status = MVNETA_QUEUE_DISABLED; in mvneta_portdown()
1044 /* Wait for all Tx activity to terminate. */ in mvneta_portdown()
1060 "timeout for TX stopped. tqc 0x%x\n", reg); in mvneta_portdown()
1067 /* Wait for all Tx FIFO is empty */ in mvneta_portdown()
1072 "timeout for TX FIFO drained. ps0 0x%x\n", reg); in mvneta_portdown()
1103 /* Init TX/RX Queue Registers */ in mvneta_initreg()
1171 /* Port Configuration Extended: enable Tx CRC generation */ in mvneta_initreg()
1252 struct mvneta_tx_ring *tx; in mvneta_ring_alloc_tx_queue() local
1257 tx = MVNETA_TX_RING(sc, q); in mvneta_ring_alloc_tx_queue()
1258 mtx_init(&tx->ring_mtx, "mvneta_tx", NULL, MTX_DEF); in mvneta_ring_alloc_tx_queue()
1260 (void**)&(tx->desc), in mvneta_ring_alloc_tx_queue()
1262 &tx->desc_map); in mvneta_ring_alloc_tx_queue()
1263 if (error != 0 || tx->desc == NULL) in mvneta_ring_alloc_tx_queue()
1265 error = bus_dmamap_load(sc->tx_dtag, tx->desc_map, in mvneta_ring_alloc_tx_queue()
1266 tx->desc, in mvneta_ring_alloc_tx_queue()
1268 mvneta_dmamap_cb, &tx->desc_pa, BUS_DMA_NOWAIT); in mvneta_ring_alloc_tx_queue()
1273 tx->br = buf_ring_alloc(MVNETA_BUFRING_SIZE, M_DEVBUF, M_NOWAIT, in mvneta_ring_alloc_tx_queue()
1274 &tx->ring_mtx); in mvneta_ring_alloc_tx_queue()
1275 if (tx->br == NULL) { in mvneta_ring_alloc_tx_queue()
1296 struct mvneta_tx_ring *tx; in mvneta_ring_dealloc_tx_queue() local
1304 tx = MVNETA_TX_RING(sc, q); in mvneta_ring_dealloc_tx_queue()
1306 if (tx->taskq != NULL) { in mvneta_ring_dealloc_tx_queue()
1308 while (taskqueue_cancel(tx->taskq, &tx->task, NULL) != 0) in mvneta_ring_dealloc_tx_queue()
1309 taskqueue_drain(tx->taskq, &tx->task); in mvneta_ring_dealloc_tx_queue()
1312 if (tx->br != NULL) in mvneta_ring_dealloc_tx_queue()
1313 drbr_free(tx->br, M_DEVBUF); in mvneta_ring_dealloc_tx_queue()
1318 txbuf = &tx->txbuf[i]; in mvneta_ring_dealloc_tx_queue()
1323 panic("%s: map busy for Tx descriptor (Q%d, %d)", in mvneta_ring_dealloc_tx_queue()
1330 if (tx->desc_pa != 0) in mvneta_ring_dealloc_tx_queue()
1331 bus_dmamap_unload(sc->tx_dtag, tx->desc_map); in mvneta_ring_dealloc_tx_queue()
1333 kva = (void *)tx->desc; in mvneta_ring_dealloc_tx_queue()
1335 bus_dmamem_free(sc->tx_dtag, tx->desc, tx->desc_map); in mvneta_ring_dealloc_tx_queue()
1337 if (mtx_name(&tx->ring_mtx) != NULL) in mvneta_ring_dealloc_tx_queue()
1338 mtx_destroy(&tx->ring_mtx); in mvneta_ring_dealloc_tx_queue()
1340 memset(tx, 0, sizeof(*tx)); in mvneta_ring_dealloc_tx_queue()
1405 struct mvneta_tx_ring *tx; in mvneta_ring_init_tx_queue() local
1412 tx = MVNETA_TX_RING(sc, q); in mvneta_ring_init_tx_queue()
1414 /* Tx handle */ in mvneta_ring_init_tx_queue()
1416 txbuf = &tx->txbuf[i]; in mvneta_ring_init_tx_queue()
1418 /* Tx handle needs DMA map for busdma_load_mbuf() */ in mvneta_ring_init_tx_queue()
1423 "can't create dma map (tx ring %d)\n", i); in mvneta_ring_init_tx_queue()
1427 tx->dma = tx->cpu = 0; in mvneta_ring_init_tx_queue()
1428 tx->used = 0; in mvneta_ring_init_tx_queue()
1429 tx->drv_error = 0; in mvneta_ring_init_tx_queue()
1430 tx->queue_status = MVNETA_QUEUE_DISABLED; in mvneta_ring_init_tx_queue()
1431 tx->queue_hung = FALSE; in mvneta_ring_init_tx_queue()
1433 tx->ifp = sc->ifp; in mvneta_ring_init_tx_queue()
1434 tx->qidx = q; in mvneta_ring_init_tx_queue()
1435 TASK_INIT(&tx->task, 0, mvneta_tx_task, tx); in mvneta_ring_init_tx_queue()
1436 tx->taskq = taskqueue_create_fast("mvneta_tx_taskq", M_WAITOK, in mvneta_ring_init_tx_queue()
1437 taskqueue_thread_enqueue, &tx->taskq); in mvneta_ring_init_tx_queue()
1438 taskqueue_start_threads(&tx->taskq, 1, PI_NET, "%s: tx_taskq(%d)", in mvneta_ring_init_tx_queue()
1447 struct mvneta_tx_ring *tx; in mvneta_ring_flush_tx_queue() local
1451 tx = MVNETA_TX_RING(sc, q); in mvneta_ring_flush_tx_queue()
1454 /* Tx handle */ in mvneta_ring_flush_tx_queue()
1456 txbuf = &tx->txbuf[i]; in mvneta_ring_flush_tx_queue()
1463 tx->dma = tx->cpu = 0; in mvneta_ring_flush_tx_queue()
1464 tx->used = 0; in mvneta_ring_flush_tx_queue()
1486 * Rx/Tx Queue Control
1528 struct mvneta_tx_ring *tx; in mvneta_tx_queue_init() local
1533 tx = MVNETA_TX_RING(sc, q); in mvneta_tx_queue_init()
1534 DASSERT(tx->desc_pa != 0); in mvneta_tx_queue_init()
1537 MVNETA_WRITE(sc, MVNETA_PTXDQA(q), tx->desc_pa); in mvneta_tx_queue_init()
1544 DASSERT(MVNETA_READ(sc, MVNETA_PTXDQA(q)) == tx->desc_pa); in mvneta_tx_queue_init()
1584 struct mvneta_tx_ring *tx; in mvneta_tx_queue_enable() local
1587 tx = MVNETA_TX_RING(sc, q); in mvneta_tx_queue_enable()
1590 /* Enable Tx queue */ in mvneta_tx_queue_enable()
1593 tx->queue_status = MVNETA_QUEUE_IDLE; in mvneta_tx_queue_enable()
1594 tx->queue_hung = FALSE; in mvneta_tx_queue_enable()
1752 struct mvneta_tx_ring *tx; in mvneta_tick() local
1807 tx = MVNETA_TX_RING(sc, q); in mvneta_tick()
1809 if (tx->queue_hung && (fc_curr - fc_prev) == 0) in mvneta_tick()
1833 struct mvneta_tx_ring *tx; in mvneta_qflush() local
1840 tx = MVNETA_TX_RING(sc, q); in mvneta_qflush()
1842 while ((m = buf_ring_dequeue_sc(tx->br)) != NULL) in mvneta_qflush()
1854 struct mvneta_tx_ring *tx; in mvneta_tx_task() local
1858 tx = arg; in mvneta_tx_task()
1859 ifp = tx->ifp; in mvneta_tx_task()
1862 mvneta_tx_lockq(sc, tx->qidx); in mvneta_tx_task()
1863 error = mvneta_xmit_locked(sc, tx->qidx); in mvneta_tx_task()
1864 mvneta_tx_unlockq(sc, tx->qidx); in mvneta_tx_task()
1869 taskqueue_enqueue(tx->taskq, &tx->task); in mvneta_tx_task()
1876 struct mvneta_tx_ring *tx; in mvneta_xmitfast_locked() local
1881 tx = MVNETA_TX_RING(sc, q); in mvneta_xmitfast_locked()
1887 if (__predict_false(tx->queue_status == MVNETA_QUEUE_DISABLED)) { in mvneta_xmitfast_locked()
1894 if (__predict_true(tx->used > MVNETA_TX_RECLAIM_COUNT)) in mvneta_xmitfast_locked()
1898 if (__predict_false(tx->used > in mvneta_xmitfast_locked()
1910 tx->watchdog_time = ticks; in mvneta_xmitfast_locked()
1911 tx->queue_status = MVNETA_QUEUE_WORKING; in mvneta_xmitfast_locked()
1921 struct mvneta_tx_ring *tx; in mvneta_transmit() local
1933 tx = MVNETA_TX_RING(sc, q); in mvneta_transmit()
1936 if (buf_ring_full(tx->br)) { in mvneta_transmit()
1946 if (buf_ring_empty(tx->br) && mvneta_tx_trylockq(sc, q) != 0) { in mvneta_transmit()
1958 error = drbr_enqueue(ifp, tx->br, m); in mvneta_transmit()
1962 taskqueue_enqueue(tx->taskq, &tx->task); in mvneta_transmit()
1970 struct mvneta_tx_ring *tx; in mvneta_xmit_locked() local
1976 tx = MVNETA_TX_RING(sc, q); in mvneta_xmit_locked()
1979 while ((m = drbr_peek(ifp, tx->br)) != NULL) { in mvneta_xmit_locked()
1983 drbr_putback(ifp, tx->br, m); in mvneta_xmit_locked()
1985 drbr_advance(ifp, tx->br); in mvneta_xmit_locked()
1988 drbr_advance(ifp, tx->br); in mvneta_xmit_locked()
1998 struct mvneta_tx_ring *tx; in mvneta_start() local
2002 tx = MVNETA_TX_RING(sc, 0); in mvneta_start()
2009 taskqueue_enqueue(tx->taskq, &tx->task); in mvneta_start()
2664 * Tx Subroutines
2672 struct mvneta_tx_ring *tx; in mvneta_tx_queue() local
2679 tx = MVNETA_TX_RING(sc, q); in mvneta_tx_queue()
2680 DASSERT(tx->used >= 0); in mvneta_tx_queue()
2681 DASSERT(tx->used <= MVNETA_TX_RING_CNT); in mvneta_tx_queue()
2688 tx->drv_error++; in mvneta_tx_queue()
2703 tx->drv_error++; in mvneta_tx_queue()
2712 txbuf = &tx->txbuf[tx->cpu]; in mvneta_tx_queue()
2722 tx->drv_error++; in mvneta_tx_queue()
2731 || (txnsegs + tx->used) > MVNETA_TX_RING_CNT)) { in mvneta_tx_queue()
2747 /* load to tx descriptors */ in mvneta_tx_queue()
2750 t = &tx->desc[tx->cpu]; in mvneta_tx_queue()
2762 tx->cpu = tx_counter_adv(tx->cpu, 1); in mvneta_tx_queue()
2764 tx->used++; in mvneta_tx_queue()
2771 bus_dmamap_sync(sc->tx_dtag, tx->desc_map, in mvneta_tx_queue()
2847 struct mvneta_tx_ring *tx; in mvneta_tx_queue_complete() local
2855 tx = MVNETA_TX_RING(sc, q); in mvneta_tx_queue_complete()
2856 if (__predict_false(tx->queue_status == MVNETA_QUEUE_DISABLED)) in mvneta_tx_queue_complete()
2863 if (tx->used == 0) in mvneta_tx_queue_complete()
2864 tx->queue_status = MVNETA_QUEUE_IDLE; in mvneta_tx_queue_complete()
2865 else if (tx->queue_status == MVNETA_QUEUE_WORKING && in mvneta_tx_queue_complete()
2866 ((ticks - tx->watchdog_time) > MVNETA_WATCHDOG)) in mvneta_tx_queue_complete()
2867 tx->queue_hung = TRUE; in mvneta_tx_queue_complete()
2876 bus_dmamap_sync(sc->tx_dtag, tx->desc_map, in mvneta_tx_queue_complete()
2880 t = &tx->desc[tx->dma]; in mvneta_tx_queue_complete()
2883 CTR3(KTR_SPARE2, "%s tx error queue %d desc %d", in mvneta_tx_queue_complete()
2884 if_name(sc->ifp), q, tx->dma); in mvneta_tx_queue_complete()
2886 txbuf = &tx->txbuf[tx->dma]; in mvneta_tx_queue_complete()
2895 tx->dma = tx_counter_adv(tx->dma, 1); in mvneta_tx_queue_complete()
2896 tx->used--; in mvneta_tx_queue_complete()
2898 DASSERT(tx->used >= 0); in mvneta_tx_queue_complete()
2899 DASSERT(tx->used <= MVNETA_TX_RING_CNT); in mvneta_tx_queue_complete()
2911 if_name(sc->ifp), q, tx->cpu, tx->dma, tx->used); in mvneta_tx_queue_complete()
2914 tx->watchdog_time = ticks; in mvneta_tx_queue_complete()
2916 if (tx->used == 0) in mvneta_tx_queue_complete()
2917 tx->queue_status = MVNETA_QUEUE_IDLE; in mvneta_tx_queue_complete()
2921 * Do a final TX complete when TX is idle.
2926 struct mvneta_tx_ring *tx; in mvneta_tx_drain() local
2930 * Handle trailing mbuf on TX queue. in mvneta_tx_drain()
2931 * Check is done lockess to avoid TX path contention. in mvneta_tx_drain()
2934 tx = MVNETA_TX_RING(sc, q); in mvneta_tx_drain()
2935 if ((ticks - tx->watchdog_time) > MVNETA_WATCHDOG_TXCOMP && in mvneta_tx_drain()
2936 tx->used > 0) { in mvneta_tx_drain()
3476 CTLFLAG_RD, &sc->counter_watchdog, 0, "TX Watchdog Counter"); in sysctl_mvneta_init()
3540 struct mvneta_tx_ring *tx; in mvneta_update_mib() local
3592 /* TX watchdog. */ in mvneta_update_mib()
3598 * TX driver errors: in mvneta_update_mib()
3599 * We do not take queue locks to not disrupt TX path. in mvneta_update_mib()
3601 * next mib update. We may also clear counter when TX path in mvneta_update_mib()
3606 tx = MVNETA_TX_RING(sc, i); in mvneta_update_mib()
3608 if (tx->drv_error > 0) { in mvneta_update_mib()
3609 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, tx->drv_error); in mvneta_update_mib()
3610 tx->drv_error = 0; in mvneta_update_mib()