Lines Matching full:tx

10  * It supports multiple TX/RX queue pairs. The first TX/RX queue pair is used
100 /* handle TX/RX queue 0 interrupt */ in tsnep_irq()
116 /* handle TX/RX queue interrupt */ in tsnep_irq_txrx()
278 static void tsnep_tx_ring_cleanup(struct tsnep_tx *tx) in tsnep_tx_ring_cleanup() argument
280 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_ring_cleanup()
283 memset(tx->entry, 0, sizeof(tx->entry)); in tsnep_tx_ring_cleanup()
286 if (tx->page[i]) { in tsnep_tx_ring_cleanup()
287 dma_free_coherent(dmadev, PAGE_SIZE, tx->page[i], in tsnep_tx_ring_cleanup()
288 tx->page_dma[i]); in tsnep_tx_ring_cleanup()
289 tx->page[i] = NULL; in tsnep_tx_ring_cleanup()
290 tx->page_dma[i] = 0; in tsnep_tx_ring_cleanup()
295 static int tsnep_tx_ring_create(struct tsnep_tx *tx) in tsnep_tx_ring_create() argument
297 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_ring_create()
304 tx->page[i] = in tsnep_tx_ring_create()
305 dma_alloc_coherent(dmadev, PAGE_SIZE, &tx->page_dma[i], in tsnep_tx_ring_create()
307 if (!tx->page[i]) { in tsnep_tx_ring_create()
312 entry = &tx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j]; in tsnep_tx_ring_create()
314 (((u8 *)tx->page[i]) + TSNEP_DESC_SIZE * j); in tsnep_tx_ring_create()
317 entry->desc_dma = tx->page_dma[i] + TSNEP_DESC_SIZE * j; in tsnep_tx_ring_create()
322 entry = &tx->entry[i]; in tsnep_tx_ring_create()
323 next_entry = &tx->entry[(i + 1) & TSNEP_RING_MASK]; in tsnep_tx_ring_create()
330 tsnep_tx_ring_cleanup(tx); in tsnep_tx_ring_create()
334 static void tsnep_tx_init(struct tsnep_tx *tx) in tsnep_tx_init() argument
338 dma = tx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER; in tsnep_tx_init()
339 iowrite32(DMA_ADDR_LOW(dma), tx->addr + TSNEP_TX_DESC_ADDR_LOW); in tsnep_tx_init()
340 iowrite32(DMA_ADDR_HIGH(dma), tx->addr + TSNEP_TX_DESC_ADDR_HIGH); in tsnep_tx_init()
341 tx->write = 0; in tsnep_tx_init()
342 tx->read = 0; in tsnep_tx_init()
343 tx->owner_counter = 1; in tsnep_tx_init()
344 tx->increment_owner_counter = TSNEP_RING_SIZE - 1; in tsnep_tx_init()
347 static void tsnep_tx_enable(struct tsnep_tx *tx) in tsnep_tx_enable() argument
351 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); in tsnep_tx_enable()
358 static void tsnep_tx_disable(struct tsnep_tx *tx, struct napi_struct *napi) in tsnep_tx_disable() argument
363 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); in tsnep_tx_disable()
369 /* wait until TX is done in hardware */ in tsnep_tx_disable()
370 readx_poll_timeout(ioread32, tx->addr + TSNEP_CONTROL, val, in tsnep_tx_disable()
374 /* wait until TX is also done in software */ in tsnep_tx_disable()
375 while (READ_ONCE(tx->read) != tx->write) { in tsnep_tx_disable()
381 static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length, in tsnep_tx_activate() argument
384 struct tsnep_tx_entry *entry = &tx->entry[index]; in tsnep_tx_activate()
416 if (index == tx->increment_owner_counter) { in tsnep_tx_activate()
417 tx->owner_counter++; in tsnep_tx_activate()
418 if (tx->owner_counter == 4) in tsnep_tx_activate()
419 tx->owner_counter = 1; in tsnep_tx_activate()
420 tx->increment_owner_counter--; in tsnep_tx_activate()
421 if (tx->increment_owner_counter < 0) in tsnep_tx_activate()
422 tx->increment_owner_counter = TSNEP_RING_SIZE - 1; in tsnep_tx_activate()
425 (tx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) & in tsnep_tx_activate()
442 static int tsnep_tx_desc_available(struct tsnep_tx *tx) in tsnep_tx_desc_available() argument
444 if (tx->read <= tx->write) in tsnep_tx_desc_available()
445 return TSNEP_RING_SIZE - tx->write + tx->read - 1; in tsnep_tx_desc_available()
447 return tx->read - tx->write - 1; in tsnep_tx_desc_available()
467 memcpy(&entry->desc->tx, fragdata, len); in tsnep_tx_map_frag()
472 memcpy(&entry->desc->tx, fragdata + skb_frag_off(frag), in tsnep_tx_map_frag()
483 static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count, in tsnep_tx_map() argument
486 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_map()
494 entry = &tx->entry[(tx->write + i) & TSNEP_RING_MASK]; in tsnep_tx_map()
506 memcpy(&entry->desc->tx, skb->data, len); in tsnep_tx_map()
525 entry->desc->tx = __cpu_to_le64(dma); in tsnep_tx_map()
534 static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count) in tsnep_tx_unmap() argument
536 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_unmap()
542 entry = &tx->entry[(index + i) & TSNEP_RING_MASK]; in tsnep_tx_unmap()
564 struct tsnep_tx *tx) in tsnep_xmit_frame_ring() argument
576 if (tsnep_tx_desc_available(tx) < count) { in tsnep_xmit_frame_ring()
580 netif_stop_subqueue(tx->adapter->netdev, tx->queue_index); in tsnep_xmit_frame_ring()
585 entry = &tx->entry[tx->write]; in tsnep_xmit_frame_ring()
589 tx->adapter->hwtstamp_config.tx_type == HWTSTAMP_TX_ON) { in tsnep_xmit_frame_ring()
594 retval = tsnep_tx_map(skb, tx, count, do_tstamp); in tsnep_xmit_frame_ring()
596 tsnep_tx_unmap(tx, tx->write, count); in tsnep_xmit_frame_ring()
600 tx->dropped++; in tsnep_xmit_frame_ring()
607 tsnep_tx_activate(tx, (tx->write + i) & TSNEP_RING_MASK, length, in tsnep_xmit_frame_ring()
609 tx->write = (tx->write + count) & TSNEP_RING_MASK; in tsnep_xmit_frame_ring()
616 iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL); in tsnep_xmit_frame_ring()
618 if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1)) { in tsnep_xmit_frame_ring()
620 netif_stop_subqueue(tx->adapter->netdev, tx->queue_index); in tsnep_xmit_frame_ring()
626 static int tsnep_xdp_tx_map(struct xdp_frame *xdpf, struct tsnep_tx *tx, in tsnep_xdp_tx_map() argument
629 struct device *dmadev = tx->adapter->dmadev; in tsnep_xdp_tx_map()
642 entry = &tx->entry[(tx->write + i) & TSNEP_RING_MASK]; in tsnep_xdp_tx_map()
668 entry->desc->tx = __cpu_to_le64(dma); in tsnep_xdp_tx_map()
683 struct tsnep_tx *tx, u32 type) in tsnep_xdp_xmit_frame_ring() argument
693 /* ensure that TX ring is not filled up by XDP, always MAX_SKB_FRAGS in tsnep_xdp_xmit_frame_ring()
694 * will be available for normal TX path and queue is stopped there if in tsnep_xdp_xmit_frame_ring()
697 if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1 + count)) in tsnep_xdp_xmit_frame_ring()
700 entry = &tx->entry[tx->write]; in tsnep_xdp_xmit_frame_ring()
703 retval = tsnep_xdp_tx_map(xdpf, tx, shinfo, count, type); in tsnep_xdp_xmit_frame_ring()
705 tsnep_tx_unmap(tx, tx->write, count); in tsnep_xdp_xmit_frame_ring()
708 tx->dropped++; in tsnep_xdp_xmit_frame_ring()
715 tsnep_tx_activate(tx, (tx->write + i) & TSNEP_RING_MASK, length, in tsnep_xdp_xmit_frame_ring()
717 tx->write = (tx->write + count) & TSNEP_RING_MASK; in tsnep_xdp_xmit_frame_ring()
725 static void tsnep_xdp_xmit_flush(struct tsnep_tx *tx) in tsnep_xdp_xmit_flush() argument
727 iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL); in tsnep_xdp_xmit_flush()
732 struct netdev_queue *tx_nq, struct tsnep_tx *tx, in tsnep_xdp_xmit_back() argument
750 xmit = tsnep_xdp_xmit_frame_ring(xdpf, tx, type); in tsnep_xdp_xmit_back()
761 static int tsnep_xdp_tx_map_zc(struct xdp_desc *xdpd, struct tsnep_tx *tx) in tsnep_xdp_tx_map_zc() argument
766 entry = &tx->entry[tx->write]; in tsnep_xdp_tx_map_zc()
769 dma = xsk_buff_raw_get_dma(tx->xsk_pool, xdpd->addr); in tsnep_xdp_tx_map_zc()
770 xsk_buff_raw_dma_sync_for_device(tx->xsk_pool, dma, xdpd->len); in tsnep_xdp_tx_map_zc()
775 entry->desc->tx = __cpu_to_le64(dma); in tsnep_xdp_tx_map_zc()
781 struct tsnep_tx *tx) in tsnep_xdp_xmit_frame_ring_zc() argument
785 length = tsnep_xdp_tx_map_zc(xdpd, tx); in tsnep_xdp_xmit_frame_ring_zc()
787 tsnep_tx_activate(tx, tx->write, length, true); in tsnep_xdp_xmit_frame_ring_zc()
788 tx->write = (tx->write + 1) & TSNEP_RING_MASK; in tsnep_xdp_xmit_frame_ring_zc()
791 static void tsnep_xdp_xmit_zc(struct tsnep_tx *tx) in tsnep_xdp_xmit_zc() argument
793 int desc_available = tsnep_tx_desc_available(tx); in tsnep_xdp_xmit_zc()
794 struct xdp_desc *descs = tx->xsk_pool->tx_descs; in tsnep_xdp_xmit_zc()
797 /* ensure that TX ring is not filled up by XDP, always MAX_SKB_FRAGS in tsnep_xdp_xmit_zc()
798 * will be available for normal TX path and queue is stopped there if in tsnep_xdp_xmit_zc()
805 batch = xsk_tx_peek_release_desc_batch(tx->xsk_pool, desc_available); in tsnep_xdp_xmit_zc()
807 tsnep_xdp_xmit_frame_ring_zc(&descs[i], tx); in tsnep_xdp_xmit_zc()
815 tsnep_xdp_xmit_flush(tx); in tsnep_xdp_xmit_zc()
819 static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget) in tsnep_tx_poll() argument
828 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); in tsnep_tx_poll()
832 if (tx->read == tx->write) in tsnep_tx_poll()
835 entry = &tx->entry[tx->read]; in tsnep_tx_poll()
854 length = tsnep_tx_unmap(tx, tx->read, count); in tsnep_tx_poll()
885 tx->read = (tx->read + count) & TSNEP_RING_MASK; in tsnep_tx_poll()
887 tx->packets++; in tsnep_tx_poll()
888 tx->bytes += length + ETH_FCS_LEN; in tsnep_tx_poll()
893 if (tx->xsk_pool) { in tsnep_tx_poll()
895 xsk_tx_completed(tx->xsk_pool, xsk_frames); in tsnep_tx_poll()
896 if (xsk_uses_need_wakeup(tx->xsk_pool)) in tsnep_tx_poll()
897 xsk_set_tx_need_wakeup(tx->xsk_pool); in tsnep_tx_poll()
898 tsnep_xdp_xmit_zc(tx); in tsnep_tx_poll()
901 if ((tsnep_tx_desc_available(tx) >= ((MAX_SKB_FRAGS + 1) * 2)) && in tsnep_tx_poll()
911 static bool tsnep_tx_pending(struct tsnep_tx *tx) in tsnep_tx_pending() argument
917 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); in tsnep_tx_pending()
920 if (tx->read != tx->write) { in tsnep_tx_pending()
921 entry = &tx->entry[tx->read]; in tsnep_tx_pending()
933 static int tsnep_tx_open(struct tsnep_tx *tx) in tsnep_tx_open() argument
937 retval = tsnep_tx_ring_create(tx); in tsnep_tx_open()
941 tsnep_tx_init(tx); in tsnep_tx_open()
946 static void tsnep_tx_close(struct tsnep_tx *tx) in tsnep_tx_close() argument
948 tsnep_tx_ring_cleanup(tx); in tsnep_tx_close()
1288 struct netdev_queue *tx_nq, struct tsnep_tx *tx) in tsnep_xdp_run_prog() argument
1301 if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, false)) in tsnep_xdp_run_prog()
1333 struct tsnep_tx *tx) in tsnep_xdp_run_prog_zc() argument
1351 if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, true)) in tsnep_xdp_run_prog_zc()
1369 struct netdev_queue *tx_nq, struct tsnep_tx *tx) in tsnep_finalize_xdp() argument
1373 tsnep_xdp_xmit_flush(tx); in tsnep_finalize_xdp()
1443 struct tsnep_tx *tx; in tsnep_rx_poll() local
1455 tx = &rx->adapter->tx[rx->tx_queue_index]; in tsnep_rx_poll()
1516 &xdp_status, tx_nq, tx); in tsnep_rx_poll()
1532 tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx); in tsnep_rx_poll()
1546 struct tsnep_tx *tx; in tsnep_rx_poll_zc() local
1558 tx = &rx->adapter->tx[rx->tx_queue_index]; in tsnep_rx_poll_zc()
1616 &xdp_status, tx_nq, tx); in tsnep_rx_poll_zc()
1641 tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx); in tsnep_rx_poll_zc()
1798 if (queue->tx && tsnep_tx_pending(queue->tx)) in tsnep_pending()
1814 if (queue->tx) in tsnep_poll()
1815 complete = tsnep_tx_poll(queue->tx, budget); in tsnep_poll()
1861 if (queue->tx && queue->rx) in tsnep_request_irq()
1864 else if (queue->tx) in tsnep_request_irq()
1865 snprintf(queue->name, sizeof(queue->name), "%s-tx-%d", in tsnep_request_irq()
1866 name, queue->tx->queue_index); in tsnep_request_irq()
1919 struct tsnep_tx *tx = queue->tx; in tsnep_queue_open() local
1925 /* choose TX queue for XDP_TX */ in tsnep_queue_open()
1926 if (tx) in tsnep_queue_open()
1927 rx->tx_queue_index = tx->queue_index; in tsnep_queue_open()
1982 if (queue->tx) { in tsnep_queue_enable()
1983 netif_queue_set_napi(adapter->netdev, queue->tx->queue_index, in tsnep_queue_enable()
1985 tsnep_tx_enable(queue->tx); in tsnep_queue_enable()
2003 if (queue->tx) { in tsnep_queue_disable()
2004 tsnep_tx_disable(queue->tx, &queue->napi); in tsnep_queue_disable()
2005 netif_queue_set_napi(adapter->netdev, queue->tx->queue_index, in tsnep_queue_disable()
2025 if (adapter->queue[i].tx) { in tsnep_netdev_open()
2026 retval = tsnep_tx_open(adapter->queue[i].tx); in tsnep_netdev_open()
2068 if (adapter->queue[i].tx) in tsnep_netdev_open()
2069 tsnep_tx_close(adapter->queue[i].tx); in tsnep_netdev_open()
2089 if (adapter->queue[i].tx) in tsnep_netdev_close()
2090 tsnep_tx_close(adapter->queue[i].tx); in tsnep_netdev_close()
2125 queue->tx->xsk_pool = pool; in tsnep_enable_xsk()
2146 queue->tx->xsk_pool = NULL; in tsnep_disable_xsk()
2168 return tsnep_xmit_frame_ring(skb, &adapter->tx[queue_mapping]); in tsnep_netdev_xmit_frame()
2206 stats->tx_packets += adapter->tx[i].packets; in tsnep_netdev_get_stats64()
2207 stats->tx_bytes += adapter->tx[i].bytes; in tsnep_netdev_get_stats64()
2208 stats->tx_dropped += adapter->tx[i].dropped; in tsnep_netdev_get_stats64()
2324 return &adapter->tx[cpu]; in tsnep_xdp_get_tx()
2333 struct tsnep_tx *tx; in tsnep_netdev_xdp_xmit() local
2340 tx = tsnep_xdp_get_tx(adapter, cpu); in tsnep_netdev_xdp_xmit()
2341 nq = netdev_get_tx_queue(adapter->netdev, tx->queue_index); in tsnep_netdev_xdp_xmit()
2346 xmit = tsnep_xdp_xmit_frame_ring(xdp[nxmit], tx, in tsnep_netdev_xdp_xmit()
2358 tsnep_xdp_xmit_flush(tx); in tsnep_netdev_xdp_xmit()
2502 /* one TX/RX queue pair for netdev is mandatory */ in tsnep_queue_init()
2514 adapter->queue[0].tx = &adapter->tx[0]; in tsnep_queue_init()
2515 adapter->queue[0].tx->adapter = adapter; in tsnep_queue_init()
2516 adapter->queue[0].tx->addr = adapter->addr + TSNEP_QUEUE(0); in tsnep_queue_init()
2517 adapter->queue[0].tx->queue_index = 0; in tsnep_queue_init()
2531 /* add additional TX/RX queue pairs only if dedicated interrupt is in tsnep_queue_init()
2545 adapter->queue[i].tx = &adapter->tx[i]; in tsnep_queue_init()
2546 adapter->queue[i].tx->adapter = adapter; in tsnep_queue_init()
2547 adapter->queue[i].tx->addr = adapter->addr + TSNEP_QUEUE(i); in tsnep_queue_init()
2548 adapter->queue[i].tx->queue_index = i; in tsnep_queue_init()