Lines Matching +full:rx +full:- +full:tx
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
6 * The TSN endpoint Ethernet MAC is a FPGA based network device for real-time
10 * It supports multiple TX/RX queue pairs. The first TX/RX queue pair is used
14 * - www.embedded-experts.at/tsn
15 * - www.engleder-embedded.com
36 #define TSNEP_MAX_RX_BUF_SIZE (PAGE_SIZE - TSNEP_HEADROOM - \
38 /* XSK buffer shall store at least Q-in-Q frame */
52 ECM_INT_DELAY_BASE_US + ECM_INT_DELAY_BASE_US - 1)
78 iowrite32(mask, adapter->addr + ECM_INT_ENABLE); in tsnep_enable_irq()
84 iowrite32(mask, adapter->addr + ECM_INT_ENABLE); in tsnep_disable_irq()
90 u32 active = ioread32(adapter->addr + ECM_INT_ACTIVE); in tsnep_irq()
94 iowrite32(active, adapter->addr + ECM_INT_ACKNOWLEDGE); in tsnep_irq()
98 phy_mac_interrupt(adapter->netdev->phydev); in tsnep_irq()
100 /* handle TX/RX queue 0 interrupt */ in tsnep_irq()
101 if ((active & adapter->queue[0].irq_mask) != 0) { in tsnep_irq()
102 if (napi_schedule_prep(&adapter->queue[0].napi)) { in tsnep_irq()
103 tsnep_disable_irq(adapter, adapter->queue[0].irq_mask); in tsnep_irq()
105 __napi_schedule(&adapter->queue[0].napi); in tsnep_irq()
116 /* handle TX/RX queue interrupt */ in tsnep_irq_txrx()
117 if (napi_schedule_prep(&queue->napi)) { in tsnep_irq_txrx()
118 tsnep_disable_irq(queue->adapter, queue->irq_mask); in tsnep_irq_txrx()
120 __napi_schedule(&queue->napi); in tsnep_irq_txrx()
129 return -ERANGE; in tsnep_set_irq_coalesce()
135 queue->irq_delay &= ~ECM_INT_DELAY_MASK; in tsnep_set_irq_coalesce()
136 queue->irq_delay |= usecs; in tsnep_set_irq_coalesce()
137 iowrite8(queue->irq_delay, queue->irq_delay_addr); in tsnep_set_irq_coalesce()
146 usecs = (queue->irq_delay & ECM_INT_DELAY_MASK); in tsnep_get_irq_coalesce()
155 struct tsnep_adapter *adapter = bus->priv; in tsnep_mdiobus_read()
160 if (!adapter->suppress_preamble) in tsnep_mdiobus_read()
164 iowrite32(md, adapter->addr + ECM_MD_CONTROL); in tsnep_mdiobus_read()
165 retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md, in tsnep_mdiobus_read()
176 struct tsnep_adapter *adapter = bus->priv; in tsnep_mdiobus_write()
181 if (!adapter->suppress_preamble) in tsnep_mdiobus_write()
186 iowrite32(md, adapter->addr + ECM_MD_CONTROL); in tsnep_mdiobus_write()
187 retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md, in tsnep_mdiobus_write()
199 switch (adapter->phydev->speed) { in tsnep_set_link_mode()
210 iowrite32(mode, adapter->addr + ECM_STATUS); in tsnep_set_link_mode()
216 struct phy_device *phydev = netdev->phydev; in tsnep_phy_link_status_change()
218 if (phydev->link) in tsnep_phy_link_status_change()
221 phy_print_status(netdev->phydev); in tsnep_phy_link_status_change()
229 if (adapter->phydev->autoneg == AUTONEG_DISABLE && in tsnep_phy_loopback()
230 adapter->phydev->speed == SPEED_100) in tsnep_phy_loopback()
238 return phy_loopback(adapter->phydev, enable, speed); in tsnep_phy_loopback()
247 retval = phy_connect_direct(adapter->netdev, adapter->phydev, in tsnep_phy_open()
249 adapter->phy_mode); in tsnep_phy_open()
252 phydev = adapter->netdev->phydev; in tsnep_phy_open()
264 phy_ethtool_set_eee(adapter->phydev, ðtool_keee); in tsnep_phy_open()
266 adapter->phydev->irq = PHY_MAC_INTERRUPT; in tsnep_phy_open()
267 phy_start(adapter->phydev); in tsnep_phy_open()
274 phy_stop(adapter->netdev->phydev); in tsnep_phy_close()
275 phy_disconnect(adapter->netdev->phydev); in tsnep_phy_close()
278 static void tsnep_tx_ring_cleanup(struct tsnep_tx *tx) in tsnep_tx_ring_cleanup() argument
280 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_ring_cleanup()
283 memset(tx->entry, 0, sizeof(tx->entry)); in tsnep_tx_ring_cleanup()
286 if (tx->page[i]) { in tsnep_tx_ring_cleanup()
287 dma_free_coherent(dmadev, PAGE_SIZE, tx->page[i], in tsnep_tx_ring_cleanup()
288 tx->page_dma[i]); in tsnep_tx_ring_cleanup()
289 tx->page[i] = NULL; in tsnep_tx_ring_cleanup()
290 tx->page_dma[i] = 0; in tsnep_tx_ring_cleanup()
295 static int tsnep_tx_ring_create(struct tsnep_tx *tx) in tsnep_tx_ring_create() argument
297 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_ring_create()
304 tx->page[i] = in tsnep_tx_ring_create()
305 dma_alloc_coherent(dmadev, PAGE_SIZE, &tx->page_dma[i], in tsnep_tx_ring_create()
307 if (!tx->page[i]) { in tsnep_tx_ring_create()
308 retval = -ENOMEM; in tsnep_tx_ring_create()
312 entry = &tx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j]; in tsnep_tx_ring_create()
313 entry->desc_wb = (struct tsnep_tx_desc_wb *) in tsnep_tx_ring_create()
314 (((u8 *)tx->page[i]) + TSNEP_DESC_SIZE * j); in tsnep_tx_ring_create()
315 entry->desc = (struct tsnep_tx_desc *) in tsnep_tx_ring_create()
316 (((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET); in tsnep_tx_ring_create()
317 entry->desc_dma = tx->page_dma[i] + TSNEP_DESC_SIZE * j; in tsnep_tx_ring_create()
318 entry->owner_user_flag = false; in tsnep_tx_ring_create()
322 entry = &tx->entry[i]; in tsnep_tx_ring_create()
323 next_entry = &tx->entry[(i + 1) & TSNEP_RING_MASK]; in tsnep_tx_ring_create()
324 entry->desc->next = __cpu_to_le64(next_entry->desc_dma); in tsnep_tx_ring_create()
330 tsnep_tx_ring_cleanup(tx); in tsnep_tx_ring_create()
334 static void tsnep_tx_init(struct tsnep_tx *tx) in tsnep_tx_init() argument
338 dma = tx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER; in tsnep_tx_init()
339 iowrite32(DMA_ADDR_LOW(dma), tx->addr + TSNEP_TX_DESC_ADDR_LOW); in tsnep_tx_init()
340 iowrite32(DMA_ADDR_HIGH(dma), tx->addr + TSNEP_TX_DESC_ADDR_HIGH); in tsnep_tx_init()
341 tx->write = 0; in tsnep_tx_init()
342 tx->read = 0; in tsnep_tx_init()
343 tx->owner_counter = 1; in tsnep_tx_init()
344 tx->increment_owner_counter = TSNEP_RING_SIZE - 1; in tsnep_tx_init()
347 static void tsnep_tx_enable(struct tsnep_tx *tx) in tsnep_tx_enable() argument
351 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); in tsnep_tx_enable()
358 static void tsnep_tx_disable(struct tsnep_tx *tx, struct napi_struct *napi) in tsnep_tx_disable() argument
363 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); in tsnep_tx_disable()
369 /* wait until TX is done in hardware */ in tsnep_tx_disable()
370 readx_poll_timeout(ioread32, tx->addr + TSNEP_CONTROL, val, in tsnep_tx_disable()
374 /* wait until TX is also done in software */ in tsnep_tx_disable()
375 while (READ_ONCE(tx->read) != tx->write) { in tsnep_tx_disable()
381 static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length, in tsnep_tx_activate() argument
384 struct tsnep_tx_entry *entry = &tx->entry[index]; in tsnep_tx_activate()
386 entry->properties = 0; in tsnep_tx_activate()
388 if (entry->skb) { in tsnep_tx_activate()
389 entry->properties = length & TSNEP_DESC_LENGTH_MASK; in tsnep_tx_activate()
390 entry->properties |= TSNEP_DESC_INTERRUPT_FLAG; in tsnep_tx_activate()
391 if ((entry->type & TSNEP_TX_TYPE_SKB_TSTAMP) == TSNEP_TX_TYPE_SKB_TSTAMP) in tsnep_tx_activate()
392 entry->properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG; in tsnep_tx_activate()
412 entry->owner_user_flag = !entry->owner_user_flag; in tsnep_tx_activate()
415 entry->properties |= TSNEP_TX_DESC_LAST_FRAGMENT_FLAG; in tsnep_tx_activate()
416 if (index == tx->increment_owner_counter) { in tsnep_tx_activate()
417 tx->owner_counter++; in tsnep_tx_activate()
418 if (tx->owner_counter == 4) in tsnep_tx_activate()
419 tx->owner_counter = 1; in tsnep_tx_activate()
420 tx->increment_owner_counter--; in tsnep_tx_activate()
421 if (tx->increment_owner_counter < 0) in tsnep_tx_activate()
422 tx->increment_owner_counter = TSNEP_RING_SIZE - 1; in tsnep_tx_activate()
424 entry->properties |= in tsnep_tx_activate()
425 (tx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) & in tsnep_tx_activate()
427 if (entry->owner_user_flag) in tsnep_tx_activate()
428 entry->properties |= TSNEP_TX_DESC_OWNER_USER_FLAG; in tsnep_tx_activate()
429 entry->desc->more_properties = in tsnep_tx_activate()
430 __cpu_to_le32(entry->len & TSNEP_DESC_LENGTH_MASK); in tsnep_tx_activate()
431 if (entry->type & TSNEP_TX_TYPE_INLINE) in tsnep_tx_activate()
432 entry->properties |= TSNEP_TX_DESC_DATA_AFTER_DESC_FLAG; in tsnep_tx_activate()
439 entry->desc->properties = __cpu_to_le32(entry->properties); in tsnep_tx_activate()
442 static int tsnep_tx_desc_available(struct tsnep_tx *tx) in tsnep_tx_desc_available() argument
444 if (tx->read <= tx->write) in tsnep_tx_desc_available()
445 return TSNEP_RING_SIZE - tx->write + tx->read - 1; in tsnep_tx_desc_available()
447 return tx->read - tx->write - 1; in tsnep_tx_desc_available()
460 return -ENOMEM; in tsnep_tx_map_frag()
461 entry->type = TSNEP_TX_TYPE_SKB_FRAG_MAP_PAGE; in tsnep_tx_map_frag()
467 memcpy(&entry->desc->tx, fragdata, len); in tsnep_tx_map_frag()
472 memcpy(&entry->desc->tx, fragdata + skb_frag_off(frag), in tsnep_tx_map_frag()
476 entry->type = TSNEP_TX_TYPE_SKB_FRAG_INLINE; in tsnep_tx_map_frag()
483 static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count, in tsnep_tx_map() argument
486 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_map()
494 entry = &tx->entry[(tx->write + i) & TSNEP_RING_MASK]; in tsnep_tx_map()
499 dma = dma_map_single(dmadev, skb->data, len, in tsnep_tx_map()
502 return -ENOMEM; in tsnep_tx_map()
503 entry->type = TSNEP_TX_TYPE_SKB_MAP; in tsnep_tx_map()
506 memcpy(&entry->desc->tx, skb->data, len); in tsnep_tx_map()
507 entry->type = TSNEP_TX_TYPE_SKB_INLINE; in tsnep_tx_map()
512 entry->type |= TSNEP_TX_TYPE_TSTAMP; in tsnep_tx_map()
514 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; in tsnep_tx_map()
522 entry->len = len; in tsnep_tx_map()
525 entry->desc->tx = __cpu_to_le64(dma); in tsnep_tx_map()
534 static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count) in tsnep_tx_unmap() argument
536 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_unmap()
542 entry = &tx->entry[(index + i) & TSNEP_RING_MASK]; in tsnep_tx_unmap()
544 if (entry->len) { in tsnep_tx_unmap()
545 if (entry->type & TSNEP_TX_TYPE_MAP) in tsnep_tx_unmap()
550 else if (entry->type & TSNEP_TX_TYPE_MAP_PAGE) in tsnep_tx_unmap()
555 map_len += entry->len; in tsnep_tx_unmap()
556 entry->len = 0; in tsnep_tx_unmap()
564 struct tsnep_tx *tx) in tsnep_xmit_frame_ring() argument
573 if (skb_shinfo(skb)->nr_frags > 0) in tsnep_xmit_frame_ring()
574 count += skb_shinfo(skb)->nr_frags; in tsnep_xmit_frame_ring()
576 if (tsnep_tx_desc_available(tx) < count) { in tsnep_xmit_frame_ring()
580 netif_stop_subqueue(tx->adapter->netdev, tx->queue_index); in tsnep_xmit_frame_ring()
585 entry = &tx->entry[tx->write]; in tsnep_xmit_frame_ring()
586 entry->skb = skb; in tsnep_xmit_frame_ring()
588 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && in tsnep_xmit_frame_ring()
589 tx->adapter->hwtstamp_config.tx_type == HWTSTAMP_TX_ON) { in tsnep_xmit_frame_ring()
590 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in tsnep_xmit_frame_ring()
594 retval = tsnep_tx_map(skb, tx, count, do_tstamp); in tsnep_xmit_frame_ring()
596 tsnep_tx_unmap(tx, tx->write, count); in tsnep_xmit_frame_ring()
597 dev_kfree_skb_any(entry->skb); in tsnep_xmit_frame_ring()
598 entry->skb = NULL; in tsnep_xmit_frame_ring()
600 tx->dropped++; in tsnep_xmit_frame_ring()
607 tsnep_tx_activate(tx, (tx->write + i) & TSNEP_RING_MASK, length, in tsnep_xmit_frame_ring()
608 i == count - 1); in tsnep_xmit_frame_ring()
609 tx->write = (tx->write + count) & TSNEP_RING_MASK; in tsnep_xmit_frame_ring()
616 iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL); in tsnep_xmit_frame_ring()
618 if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1)) { in tsnep_xmit_frame_ring()
620 netif_stop_subqueue(tx->adapter->netdev, tx->queue_index); in tsnep_xmit_frame_ring()
626 static int tsnep_xdp_tx_map(struct xdp_frame *xdpf, struct tsnep_tx *tx, in tsnep_xdp_tx_map() argument
629 struct device *dmadev = tx->adapter->dmadev; in tsnep_xdp_tx_map()
640 len = xdpf->len; in tsnep_xdp_tx_map()
642 entry = &tx->entry[(tx->write + i) & TSNEP_RING_MASK]; in tsnep_xdp_tx_map()
645 xdpf->data; in tsnep_xdp_tx_map()
648 return -ENOMEM; in tsnep_xdp_tx_map()
650 entry->type = TSNEP_TX_TYPE_XDP_NDO_MAP_PAGE; in tsnep_xdp_tx_map()
653 virt_to_page(xdpf->data); in tsnep_xdp_tx_map()
658 dma += sizeof(*xdpf) + xdpf->headroom; in tsnep_xdp_tx_map()
662 entry->type = TSNEP_TX_TYPE_XDP_TX; in tsnep_xdp_tx_map()
665 entry->len = len; in tsnep_xdp_tx_map()
668 entry->desc->tx = __cpu_to_le64(dma); in tsnep_xdp_tx_map()
673 frag = &shinfo->frags[i]; in tsnep_xdp_tx_map()
683 struct tsnep_tx *tx, u32 type) in tsnep_xdp_xmit_frame_ring() argument
691 count += shinfo->nr_frags; in tsnep_xdp_xmit_frame_ring()
693 /* ensure that TX ring is not filled up by XDP, always MAX_SKB_FRAGS in tsnep_xdp_xmit_frame_ring()
694 * will be available for normal TX path and queue is stopped there if in tsnep_xdp_xmit_frame_ring()
697 if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1 + count)) in tsnep_xdp_xmit_frame_ring()
700 entry = &tx->entry[tx->write]; in tsnep_xdp_xmit_frame_ring()
701 entry->xdpf = xdpf; in tsnep_xdp_xmit_frame_ring()
703 retval = tsnep_xdp_tx_map(xdpf, tx, shinfo, count, type); in tsnep_xdp_xmit_frame_ring()
705 tsnep_tx_unmap(tx, tx->write, count); in tsnep_xdp_xmit_frame_ring()
706 entry->xdpf = NULL; in tsnep_xdp_xmit_frame_ring()
708 tx->dropped++; in tsnep_xdp_xmit_frame_ring()
715 tsnep_tx_activate(tx, (tx->write + i) & TSNEP_RING_MASK, length, in tsnep_xdp_xmit_frame_ring()
716 i == count - 1); in tsnep_xdp_xmit_frame_ring()
717 tx->write = (tx->write + count) & TSNEP_RING_MASK; in tsnep_xdp_xmit_frame_ring()
725 static void tsnep_xdp_xmit_flush(struct tsnep_tx *tx) in tsnep_xdp_xmit_flush() argument
727 iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL); in tsnep_xdp_xmit_flush()
732 struct netdev_queue *tx_nq, struct tsnep_tx *tx, in tsnep_xdp_xmit_back() argument
750 xmit = tsnep_xdp_xmit_frame_ring(xdpf, tx, type); in tsnep_xdp_xmit_back()
761 static int tsnep_xdp_tx_map_zc(struct xdp_desc *xdpd, struct tsnep_tx *tx) in tsnep_xdp_tx_map_zc() argument
766 entry = &tx->entry[tx->write]; in tsnep_xdp_tx_map_zc()
767 entry->zc = true; in tsnep_xdp_tx_map_zc()
769 dma = xsk_buff_raw_get_dma(tx->xsk_pool, xdpd->addr); in tsnep_xdp_tx_map_zc()
770 xsk_buff_raw_dma_sync_for_device(tx->xsk_pool, dma, xdpd->len); in tsnep_xdp_tx_map_zc()
772 entry->type = TSNEP_TX_TYPE_XSK; in tsnep_xdp_tx_map_zc()
773 entry->len = xdpd->len; in tsnep_xdp_tx_map_zc()
775 entry->desc->tx = __cpu_to_le64(dma); in tsnep_xdp_tx_map_zc()
777 return xdpd->len; in tsnep_xdp_tx_map_zc()
781 struct tsnep_tx *tx) in tsnep_xdp_xmit_frame_ring_zc() argument
785 length = tsnep_xdp_tx_map_zc(xdpd, tx); in tsnep_xdp_xmit_frame_ring_zc()
787 tsnep_tx_activate(tx, tx->write, length, true); in tsnep_xdp_xmit_frame_ring_zc()
788 tx->write = (tx->write + 1) & TSNEP_RING_MASK; in tsnep_xdp_xmit_frame_ring_zc()
791 static void tsnep_xdp_xmit_zc(struct tsnep_tx *tx) in tsnep_xdp_xmit_zc() argument
793 int desc_available = tsnep_tx_desc_available(tx); in tsnep_xdp_xmit_zc()
794 struct xdp_desc *descs = tx->xsk_pool->tx_descs; in tsnep_xdp_xmit_zc()
797 /* ensure that TX ring is not filled up by XDP, always MAX_SKB_FRAGS in tsnep_xdp_xmit_zc()
798 * will be available for normal TX path and queue is stopped there if in tsnep_xdp_xmit_zc()
803 desc_available -= MAX_SKB_FRAGS + 1; in tsnep_xdp_xmit_zc()
805 batch = xsk_tx_peek_release_desc_batch(tx->xsk_pool, desc_available); in tsnep_xdp_xmit_zc()
807 tsnep_xdp_xmit_frame_ring_zc(&descs[i], tx); in tsnep_xdp_xmit_zc()
815 tsnep_xdp_xmit_flush(tx); in tsnep_xdp_xmit_zc()
819 static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget) in tsnep_tx_poll() argument
828 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); in tsnep_tx_poll()
832 if (tx->read == tx->write) in tsnep_tx_poll()
835 entry = &tx->entry[tx->read]; in tsnep_tx_poll()
836 if ((__le32_to_cpu(entry->desc_wb->properties) & in tsnep_tx_poll()
838 (entry->properties & TSNEP_TX_DESC_OWNER_MASK)) in tsnep_tx_poll()
847 if ((entry->type & TSNEP_TX_TYPE_SKB) && in tsnep_tx_poll()
848 skb_shinfo(entry->skb)->nr_frags > 0) in tsnep_tx_poll()
849 count += skb_shinfo(entry->skb)->nr_frags; in tsnep_tx_poll()
850 else if ((entry->type & TSNEP_TX_TYPE_XDP) && in tsnep_tx_poll()
851 xdp_frame_has_frags(entry->xdpf)) in tsnep_tx_poll()
852 count += xdp_get_shared_info_from_frame(entry->xdpf)->nr_frags; in tsnep_tx_poll()
854 length = tsnep_tx_unmap(tx, tx->read, count); in tsnep_tx_poll()
856 if (((entry->type & TSNEP_TX_TYPE_SKB_TSTAMP) == TSNEP_TX_TYPE_SKB_TSTAMP) && in tsnep_tx_poll()
857 (__le32_to_cpu(entry->desc_wb->properties) & in tsnep_tx_poll()
862 if (entry->skb->sk && in tsnep_tx_poll()
863 READ_ONCE(entry->skb->sk->sk_tsflags) & SOF_TIMESTAMPING_BIND_PHC) in tsnep_tx_poll()
865 __le64_to_cpu(entry->desc_wb->counter); in tsnep_tx_poll()
868 __le64_to_cpu(entry->desc_wb->timestamp); in tsnep_tx_poll()
873 skb_tstamp_tx(entry->skb, &hwtstamps); in tsnep_tx_poll()
876 if (entry->type & TSNEP_TX_TYPE_SKB) in tsnep_tx_poll()
877 napi_consume_skb(entry->skb, napi_budget); in tsnep_tx_poll()
878 else if (entry->type & TSNEP_TX_TYPE_XDP) in tsnep_tx_poll()
879 xdp_return_frame_rx_napi(entry->xdpf); in tsnep_tx_poll()
883 entry->skb = NULL; in tsnep_tx_poll()
885 tx->read = (tx->read + count) & TSNEP_RING_MASK; in tsnep_tx_poll()
887 tx->packets++; in tsnep_tx_poll()
888 tx->bytes += length + ETH_FCS_LEN; in tsnep_tx_poll()
890 budget--; in tsnep_tx_poll()
893 if (tx->xsk_pool) { in tsnep_tx_poll()
895 xsk_tx_completed(tx->xsk_pool, xsk_frames); in tsnep_tx_poll()
896 if (xsk_uses_need_wakeup(tx->xsk_pool)) in tsnep_tx_poll()
897 xsk_set_tx_need_wakeup(tx->xsk_pool); in tsnep_tx_poll()
898 tsnep_xdp_xmit_zc(tx); in tsnep_tx_poll()
901 if ((tsnep_tx_desc_available(tx) >= ((MAX_SKB_FRAGS + 1) * 2)) && in tsnep_tx_poll()
911 static bool tsnep_tx_pending(struct tsnep_tx *tx) in tsnep_tx_pending() argument
917 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); in tsnep_tx_pending()
920 if (tx->read != tx->write) { in tsnep_tx_pending()
921 entry = &tx->entry[tx->read]; in tsnep_tx_pending()
922 if ((__le32_to_cpu(entry->desc_wb->properties) & in tsnep_tx_pending()
924 (entry->properties & TSNEP_TX_DESC_OWNER_MASK)) in tsnep_tx_pending()
933 static int tsnep_tx_open(struct tsnep_tx *tx) in tsnep_tx_open() argument
937 retval = tsnep_tx_ring_create(tx); in tsnep_tx_open()
941 tsnep_tx_init(tx); in tsnep_tx_open()
946 static void tsnep_tx_close(struct tsnep_tx *tx) in tsnep_tx_close() argument
948 tsnep_tx_ring_cleanup(tx); in tsnep_tx_close()
951 static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx) in tsnep_rx_ring_cleanup() argument
953 struct device *dmadev = rx->adapter->dmadev; in tsnep_rx_ring_cleanup()
958 entry = &rx->entry[i]; in tsnep_rx_ring_cleanup()
959 if (!rx->xsk_pool && entry->page) in tsnep_rx_ring_cleanup()
960 page_pool_put_full_page(rx->page_pool, entry->page, in tsnep_rx_ring_cleanup()
962 if (rx->xsk_pool && entry->xdp) in tsnep_rx_ring_cleanup()
963 xsk_buff_free(entry->xdp); in tsnep_rx_ring_cleanup()
965 entry->page = NULL; in tsnep_rx_ring_cleanup()
968 if (rx->page_pool) in tsnep_rx_ring_cleanup()
969 page_pool_destroy(rx->page_pool); in tsnep_rx_ring_cleanup()
971 memset(rx->entry, 0, sizeof(rx->entry)); in tsnep_rx_ring_cleanup()
974 if (rx->page[i]) { in tsnep_rx_ring_cleanup()
975 dma_free_coherent(dmadev, PAGE_SIZE, rx->page[i], in tsnep_rx_ring_cleanup()
976 rx->page_dma[i]); in tsnep_rx_ring_cleanup()
977 rx->page[i] = NULL; in tsnep_rx_ring_cleanup()
978 rx->page_dma[i] = 0; in tsnep_rx_ring_cleanup()
983 static int tsnep_rx_ring_create(struct tsnep_rx *rx) in tsnep_rx_ring_create() argument
985 struct device *dmadev = rx->adapter->dmadev; in tsnep_rx_ring_create()
993 rx->page[i] = in tsnep_rx_ring_create()
994 dma_alloc_coherent(dmadev, PAGE_SIZE, &rx->page_dma[i], in tsnep_rx_ring_create()
996 if (!rx->page[i]) { in tsnep_rx_ring_create()
997 retval = -ENOMEM; in tsnep_rx_ring_create()
1001 entry = &rx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j]; in tsnep_rx_ring_create()
1002 entry->desc_wb = (struct tsnep_rx_desc_wb *) in tsnep_rx_ring_create()
1003 (((u8 *)rx->page[i]) + TSNEP_DESC_SIZE * j); in tsnep_rx_ring_create()
1004 entry->desc = (struct tsnep_rx_desc *) in tsnep_rx_ring_create()
1005 (((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET); in tsnep_rx_ring_create()
1006 entry->desc_dma = rx->page_dma[i] + TSNEP_DESC_SIZE * j; in tsnep_rx_ring_create()
1018 rx->page_pool = page_pool_create(&pp_params); in tsnep_rx_ring_create()
1019 if (IS_ERR(rx->page_pool)) { in tsnep_rx_ring_create()
1020 retval = PTR_ERR(rx->page_pool); in tsnep_rx_ring_create()
1021 rx->page_pool = NULL; in tsnep_rx_ring_create()
1026 entry = &rx->entry[i]; in tsnep_rx_ring_create()
1027 next_entry = &rx->entry[(i + 1) & TSNEP_RING_MASK]; in tsnep_rx_ring_create()
1028 entry->desc->next = __cpu_to_le64(next_entry->desc_dma); in tsnep_rx_ring_create()
1034 tsnep_rx_ring_cleanup(rx); in tsnep_rx_ring_create()
1038 static void tsnep_rx_init(struct tsnep_rx *rx) in tsnep_rx_init() argument
1042 dma = rx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER; in tsnep_rx_init()
1043 iowrite32(DMA_ADDR_LOW(dma), rx->addr + TSNEP_RX_DESC_ADDR_LOW); in tsnep_rx_init()
1044 iowrite32(DMA_ADDR_HIGH(dma), rx->addr + TSNEP_RX_DESC_ADDR_HIGH); in tsnep_rx_init()
1045 rx->write = 0; in tsnep_rx_init()
1046 rx->read = 0; in tsnep_rx_init()
1047 rx->owner_counter = 1; in tsnep_rx_init()
1048 rx->increment_owner_counter = TSNEP_RING_SIZE - 1; in tsnep_rx_init()
1051 static void tsnep_rx_enable(struct tsnep_rx *rx) in tsnep_rx_enable() argument
1056 iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL); in tsnep_rx_enable()
1059 static void tsnep_rx_disable(struct tsnep_rx *rx) in tsnep_rx_disable() argument
1063 iowrite32(TSNEP_CONTROL_RX_DISABLE, rx->addr + TSNEP_CONTROL); in tsnep_rx_disable()
1064 readx_poll_timeout(ioread32, rx->addr + TSNEP_CONTROL, val, in tsnep_rx_disable()
1069 static int tsnep_rx_desc_available(struct tsnep_rx *rx) in tsnep_rx_desc_available() argument
1071 if (rx->read <= rx->write) in tsnep_rx_desc_available()
1072 return TSNEP_RING_SIZE - rx->write + rx->read - 1; in tsnep_rx_desc_available()
1074 return rx->read - rx->write - 1; in tsnep_rx_desc_available()
1077 static void tsnep_rx_free_page_buffer(struct tsnep_rx *rx) in tsnep_rx_free_page_buffer() argument
1084 page = rx->page_buffer; in tsnep_rx_free_page_buffer()
1086 page_pool_put_full_page(rx->page_pool, *page, false); in tsnep_rx_free_page_buffer()
1092 static int tsnep_rx_alloc_page_buffer(struct tsnep_rx *rx) in tsnep_rx_alloc_page_buffer() argument
1099 for (i = 0; i < TSNEP_RING_SIZE - 1; i++) { in tsnep_rx_alloc_page_buffer()
1100 rx->page_buffer[i] = page_pool_dev_alloc_pages(rx->page_pool); in tsnep_rx_alloc_page_buffer()
1101 if (!rx->page_buffer[i]) { in tsnep_rx_alloc_page_buffer()
1102 tsnep_rx_free_page_buffer(rx); in tsnep_rx_alloc_page_buffer()
1104 return -ENOMEM; in tsnep_rx_alloc_page_buffer()
1111 static void tsnep_rx_set_page(struct tsnep_rx *rx, struct tsnep_rx_entry *entry, in tsnep_rx_set_page() argument
1114 entry->page = page; in tsnep_rx_set_page()
1115 entry->len = TSNEP_MAX_RX_BUF_SIZE; in tsnep_rx_set_page()
1116 entry->dma = page_pool_get_dma_addr(entry->page); in tsnep_rx_set_page()
1117 entry->desc->rx = __cpu_to_le64(entry->dma + TSNEP_RX_OFFSET); in tsnep_rx_set_page()
1120 static int tsnep_rx_alloc_buffer(struct tsnep_rx *rx, int index) in tsnep_rx_alloc_buffer() argument
1122 struct tsnep_rx_entry *entry = &rx->entry[index]; in tsnep_rx_alloc_buffer()
1125 page = page_pool_dev_alloc_pages(rx->page_pool); in tsnep_rx_alloc_buffer()
1127 return -ENOMEM; in tsnep_rx_alloc_buffer()
1128 tsnep_rx_set_page(rx, entry, page); in tsnep_rx_alloc_buffer()
1133 static void tsnep_rx_reuse_buffer(struct tsnep_rx *rx, int index) in tsnep_rx_reuse_buffer() argument
1135 struct tsnep_rx_entry *entry = &rx->entry[index]; in tsnep_rx_reuse_buffer()
1136 struct tsnep_rx_entry *read = &rx->entry[rx->read]; in tsnep_rx_reuse_buffer()
1138 tsnep_rx_set_page(rx, entry, read->page); in tsnep_rx_reuse_buffer()
1139 read->page = NULL; in tsnep_rx_reuse_buffer()
1142 static void tsnep_rx_activate(struct tsnep_rx *rx, int index) in tsnep_rx_activate() argument
1144 struct tsnep_rx_entry *entry = &rx->entry[index]; in tsnep_rx_activate()
1147 entry->properties = entry->len & TSNEP_DESC_LENGTH_MASK; in tsnep_rx_activate()
1148 entry->properties |= TSNEP_DESC_INTERRUPT_FLAG; in tsnep_rx_activate()
1149 if (index == rx->increment_owner_counter) { in tsnep_rx_activate()
1150 rx->owner_counter++; in tsnep_rx_activate()
1151 if (rx->owner_counter == 4) in tsnep_rx_activate()
1152 rx->owner_counter = 1; in tsnep_rx_activate()
1153 rx->increment_owner_counter--; in tsnep_rx_activate()
1154 if (rx->increment_owner_counter < 0) in tsnep_rx_activate()
1155 rx->increment_owner_counter = TSNEP_RING_SIZE - 1; in tsnep_rx_activate()
1157 entry->properties |= in tsnep_rx_activate()
1158 (rx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) & in tsnep_rx_activate()
1166 entry->desc->properties = __cpu_to_le32(entry->properties); in tsnep_rx_activate()
1169 static int tsnep_rx_alloc(struct tsnep_rx *rx, int count, bool reuse) in tsnep_rx_alloc() argument
1175 index = (rx->write + i) & TSNEP_RING_MASK; in tsnep_rx_alloc()
1177 if (unlikely(tsnep_rx_alloc_buffer(rx, index))) { in tsnep_rx_alloc()
1178 rx->alloc_failed++; in tsnep_rx_alloc()
1183 tsnep_rx_reuse_buffer(rx, index); in tsnep_rx_alloc()
1188 tsnep_rx_activate(rx, index); in tsnep_rx_alloc()
1192 rx->write = (rx->write + i) & TSNEP_RING_MASK; in tsnep_rx_alloc()
1197 static int tsnep_rx_refill(struct tsnep_rx *rx, int count, bool reuse) in tsnep_rx_refill() argument
1201 desc_refilled = tsnep_rx_alloc(rx, count, reuse); in tsnep_rx_refill()
1203 tsnep_rx_enable(rx); in tsnep_rx_refill()
1208 static void tsnep_rx_set_xdp(struct tsnep_rx *rx, struct tsnep_rx_entry *entry, in tsnep_rx_set_xdp() argument
1211 entry->xdp = xdp; in tsnep_rx_set_xdp()
1212 entry->len = TSNEP_XSK_RX_BUF_SIZE; in tsnep_rx_set_xdp()
1213 entry->dma = xsk_buff_xdp_get_dma(entry->xdp); in tsnep_rx_set_xdp()
1214 entry->desc->rx = __cpu_to_le64(entry->dma); in tsnep_rx_set_xdp()
1217 static void tsnep_rx_reuse_buffer_zc(struct tsnep_rx *rx, int index) in tsnep_rx_reuse_buffer_zc() argument
1219 struct tsnep_rx_entry *entry = &rx->entry[index]; in tsnep_rx_reuse_buffer_zc()
1220 struct tsnep_rx_entry *read = &rx->entry[rx->read]; in tsnep_rx_reuse_buffer_zc()
1222 tsnep_rx_set_xdp(rx, entry, read->xdp); in tsnep_rx_reuse_buffer_zc()
1223 read->xdp = NULL; in tsnep_rx_reuse_buffer_zc()
1226 static int tsnep_rx_alloc_zc(struct tsnep_rx *rx, int count, bool reuse) in tsnep_rx_alloc_zc() argument
1231 allocated = xsk_buff_alloc_batch(rx->xsk_pool, rx->xdp_batch, count); in tsnep_rx_alloc_zc()
1233 int index = (rx->write + i) & TSNEP_RING_MASK; in tsnep_rx_alloc_zc()
1234 struct tsnep_rx_entry *entry = &rx->entry[index]; in tsnep_rx_alloc_zc()
1236 tsnep_rx_set_xdp(rx, entry, rx->xdp_batch[i]); in tsnep_rx_alloc_zc()
1237 tsnep_rx_activate(rx, index); in tsnep_rx_alloc_zc()
1240 rx->alloc_failed++; in tsnep_rx_alloc_zc()
1243 tsnep_rx_reuse_buffer_zc(rx, rx->write); in tsnep_rx_alloc_zc()
1244 tsnep_rx_activate(rx, rx->write); in tsnep_rx_alloc_zc()
1249 rx->write = (rx->write + i) & TSNEP_RING_MASK; in tsnep_rx_alloc_zc()
1254 static void tsnep_rx_free_zc(struct tsnep_rx *rx) in tsnep_rx_free_zc() argument
1259 struct tsnep_rx_entry *entry = &rx->entry[i]; in tsnep_rx_free_zc()
1261 if (entry->xdp) in tsnep_rx_free_zc()
1262 xsk_buff_free(entry->xdp); in tsnep_rx_free_zc()
1263 entry->xdp = NULL; in tsnep_rx_free_zc()
1267 static int tsnep_rx_refill_zc(struct tsnep_rx *rx, int count, bool reuse) in tsnep_rx_refill_zc() argument
1271 desc_refilled = tsnep_rx_alloc_zc(rx, count, reuse); in tsnep_rx_refill_zc()
1273 tsnep_rx_enable(rx); in tsnep_rx_refill_zc()
1278 static void tsnep_xsk_rx_need_wakeup(struct tsnep_rx *rx, int desc_available) in tsnep_xsk_rx_need_wakeup() argument
1281 xsk_set_rx_need_wakeup(rx->xsk_pool); in tsnep_xsk_rx_need_wakeup()
1283 xsk_clear_rx_need_wakeup(rx->xsk_pool); in tsnep_xsk_rx_need_wakeup()
1286 static bool tsnep_xdp_run_prog(struct tsnep_rx *rx, struct bpf_prog *prog, in tsnep_xdp_run_prog() argument
1288 struct netdev_queue *tx_nq, struct tsnep_tx *tx) in tsnep_xdp_run_prog() argument
1294 length = xdp->data_end - xdp->data_hard_start - XDP_PACKET_HEADROOM; in tsnep_xdp_run_prog()
1301 if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, false)) in tsnep_xdp_run_prog()
1306 if (xdp_do_redirect(rx->adapter->netdev, xdp, prog) < 0) in tsnep_xdp_run_prog()
1311 bpf_warn_invalid_xdp_action(rx->adapter->netdev, prog, act); in tsnep_xdp_run_prog()
1315 trace_xdp_exception(rx->adapter->netdev, prog, act); in tsnep_xdp_run_prog()
1321 sync = xdp->data_end - xdp->data_hard_start - in tsnep_xdp_run_prog()
1324 page_pool_put_page(rx->page_pool, virt_to_head_page(xdp->data), in tsnep_xdp_run_prog()
1330 static bool tsnep_xdp_run_prog_zc(struct tsnep_rx *rx, struct bpf_prog *prog, in tsnep_xdp_run_prog_zc() argument
1333 struct tsnep_tx *tx) in tsnep_xdp_run_prog_zc() argument
1339 /* XDP_REDIRECT is the main action for zero-copy */ in tsnep_xdp_run_prog_zc()
1341 if (xdp_do_redirect(rx->adapter->netdev, xdp, prog) < 0) in tsnep_xdp_run_prog_zc()
1351 if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, true)) in tsnep_xdp_run_prog_zc()
1356 bpf_warn_invalid_xdp_action(rx->adapter->netdev, prog, act); in tsnep_xdp_run_prog_zc()
1360 trace_xdp_exception(rx->adapter->netdev, prog, act); in tsnep_xdp_run_prog_zc()
1369 struct netdev_queue *tx_nq, struct tsnep_tx *tx) in tsnep_finalize_xdp() argument
1373 tsnep_xdp_xmit_flush(tx); in tsnep_finalize_xdp()
1381 static struct sk_buff *tsnep_build_skb(struct tsnep_rx *rx, struct page *page, in tsnep_build_skb() argument
1392 __skb_put(skb, length - ETH_FCS_LEN); in tsnep_build_skb()
1394 if (rx->adapter->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL) { in tsnep_build_skb()
1400 skb_shinfo(skb)->tx_flags |= in tsnep_build_skb()
1403 hwtstamps->netdev_data = rx_inline; in tsnep_build_skb()
1406 skb_record_rx_queue(skb, rx->queue_index); in tsnep_build_skb()
1407 skb->protocol = eth_type_trans(skb, rx->adapter->netdev); in tsnep_build_skb()
1412 static void tsnep_rx_page(struct tsnep_rx *rx, struct napi_struct *napi, in tsnep_rx_page() argument
1417 skb = tsnep_build_skb(rx, page, length); in tsnep_rx_page()
1421 rx->packets++; in tsnep_rx_page()
1422 rx->bytes += length; in tsnep_rx_page()
1423 if (skb->pkt_type == PACKET_MULTICAST) in tsnep_rx_page()
1424 rx->multicast++; in tsnep_rx_page()
1428 page_pool_recycle_direct(rx->page_pool, page); in tsnep_rx_page()
1430 rx->dropped++; in tsnep_rx_page()
1434 static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi, in tsnep_rx_poll() argument
1437 struct device *dmadev = rx->adapter->dmadev; in tsnep_rx_poll()
1443 struct tsnep_tx *tx; in tsnep_rx_poll() local
1449 desc_available = tsnep_rx_desc_available(rx); in tsnep_rx_poll()
1450 dma_dir = page_pool_get_dma_dir(rx->page_pool); in tsnep_rx_poll()
1451 prog = READ_ONCE(rx->adapter->xdp_prog); in tsnep_rx_poll()
1453 tx_nq = netdev_get_tx_queue(rx->adapter->netdev, in tsnep_rx_poll()
1454 rx->tx_queue_index); in tsnep_rx_poll()
1455 tx = &rx->adapter->tx[rx->tx_queue_index]; in tsnep_rx_poll()
1457 xdp_init_buff(&xdp, PAGE_SIZE, &rx->xdp_rxq); in tsnep_rx_poll()
1460 while (likely(done < budget) && (rx->read != rx->write)) { in tsnep_rx_poll()
1461 entry = &rx->entry[rx->read]; in tsnep_rx_poll()
1462 if ((__le32_to_cpu(entry->desc_wb->properties) & in tsnep_rx_poll()
1464 (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK)) in tsnep_rx_poll()
1471 desc_available -= tsnep_rx_refill(rx, desc_available, in tsnep_rx_poll()
1473 if (!entry->page) { in tsnep_rx_poll()
1475 * empty RX ring, thus buffer cannot be used for in tsnep_rx_poll()
1476 * RX processing in tsnep_rx_poll()
1478 rx->read = (rx->read + 1) & TSNEP_RING_MASK; in tsnep_rx_poll()
1481 rx->dropped++; in tsnep_rx_poll()
1492 prefetch(page_address(entry->page) + TSNEP_RX_OFFSET); in tsnep_rx_poll()
1493 length = __le32_to_cpu(entry->desc_wb->properties) & in tsnep_rx_poll()
1495 dma_sync_single_range_for_cpu(dmadev, entry->dma, in tsnep_rx_poll()
1498 /* RX metadata with timestamps is in front of actual data, in tsnep_rx_poll()
1500 * consider metadata size as offset of actual data during RX in tsnep_rx_poll()
1503 length -= TSNEP_RX_INLINE_METADATA_SIZE; in tsnep_rx_poll()
1505 rx->read = (rx->read + 1) & TSNEP_RING_MASK; in tsnep_rx_poll()
1511 xdp_prepare_buff(&xdp, page_address(entry->page), in tsnep_rx_poll()
1513 length - ETH_FCS_LEN, false); in tsnep_rx_poll()
1515 consume = tsnep_xdp_run_prog(rx, prog, &xdp, in tsnep_rx_poll()
1516 &xdp_status, tx_nq, tx); in tsnep_rx_poll()
1518 rx->packets++; in tsnep_rx_poll()
1519 rx->bytes += length; in tsnep_rx_poll()
1521 entry->page = NULL; in tsnep_rx_poll()
1527 tsnep_rx_page(rx, napi, entry->page, length); in tsnep_rx_poll()
1528 entry->page = NULL; in tsnep_rx_poll()
1532 tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx); in tsnep_rx_poll()
1535 tsnep_rx_refill(rx, desc_available, false); in tsnep_rx_poll()
1540 static int tsnep_rx_poll_zc(struct tsnep_rx *rx, struct napi_struct *napi, in tsnep_rx_poll_zc() argument
1546 struct tsnep_tx *tx; in tsnep_rx_poll_zc() local
1553 desc_available = tsnep_rx_desc_available(rx); in tsnep_rx_poll_zc()
1554 prog = READ_ONCE(rx->adapter->xdp_prog); in tsnep_rx_poll_zc()
1556 tx_nq = netdev_get_tx_queue(rx->adapter->netdev, in tsnep_rx_poll_zc()
1557 rx->tx_queue_index); in tsnep_rx_poll_zc()
1558 tx = &rx->adapter->tx[rx->tx_queue_index]; in tsnep_rx_poll_zc()
1561 while (likely(done < budget) && (rx->read != rx->write)) { in tsnep_rx_poll_zc()
1562 entry = &rx->entry[rx->read]; in tsnep_rx_poll_zc()
1563 if ((__le32_to_cpu(entry->desc_wb->properties) & in tsnep_rx_poll_zc()
1565 (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK)) in tsnep_rx_poll_zc()
1572 desc_available -= tsnep_rx_refill_zc(rx, desc_available, in tsnep_rx_poll_zc()
1574 if (!entry->xdp) { in tsnep_rx_poll_zc()
1576 * empty RX ring, thus buffer cannot be used for in tsnep_rx_poll_zc()
1577 * RX processing in tsnep_rx_poll_zc()
1579 rx->read = (rx->read + 1) & TSNEP_RING_MASK; in tsnep_rx_poll_zc()
1582 rx->dropped++; in tsnep_rx_poll_zc()
1593 prefetch(entry->xdp->data); in tsnep_rx_poll_zc()
1594 length = __le32_to_cpu(entry->desc_wb->properties) & in tsnep_rx_poll_zc()
1596 xsk_buff_set_size(entry->xdp, length - ETH_FCS_LEN); in tsnep_rx_poll_zc()
1597 xsk_buff_dma_sync_for_cpu(entry->xdp); in tsnep_rx_poll_zc()
1599 /* RX metadata with timestamps is in front of actual data, in tsnep_rx_poll_zc()
1601 * consider metadata size as offset of actual data during RX in tsnep_rx_poll_zc()
1604 length -= TSNEP_RX_INLINE_METADATA_SIZE; in tsnep_rx_poll_zc()
1606 rx->read = (rx->read + 1) & TSNEP_RING_MASK; in tsnep_rx_poll_zc()
1612 entry->xdp->data += TSNEP_RX_INLINE_METADATA_SIZE; in tsnep_rx_poll_zc()
1613 entry->xdp->data_meta += TSNEP_RX_INLINE_METADATA_SIZE; in tsnep_rx_poll_zc()
1615 consume = tsnep_xdp_run_prog_zc(rx, prog, entry->xdp, in tsnep_rx_poll_zc()
1616 &xdp_status, tx_nq, tx); in tsnep_rx_poll_zc()
1618 rx->packets++; in tsnep_rx_poll_zc()
1619 rx->bytes += length; in tsnep_rx_poll_zc()
1621 entry->xdp = NULL; in tsnep_rx_poll_zc()
1627 page = page_pool_dev_alloc_pages(rx->page_pool); in tsnep_rx_poll_zc()
1630 entry->xdp->data - TSNEP_RX_INLINE_METADATA_SIZE, in tsnep_rx_poll_zc()
1632 tsnep_rx_page(rx, napi, page, length); in tsnep_rx_poll_zc()
1634 rx->dropped++; in tsnep_rx_poll_zc()
1636 xsk_buff_free(entry->xdp); in tsnep_rx_poll_zc()
1637 entry->xdp = NULL; in tsnep_rx_poll_zc()
1641 tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx); in tsnep_rx_poll_zc()
1644 desc_available -= tsnep_rx_refill_zc(rx, desc_available, false); in tsnep_rx_poll_zc()
1646 if (xsk_uses_need_wakeup(rx->xsk_pool)) { in tsnep_rx_poll_zc()
1647 tsnep_xsk_rx_need_wakeup(rx, desc_available); in tsnep_rx_poll_zc()
1655 static bool tsnep_rx_pending(struct tsnep_rx *rx) in tsnep_rx_pending() argument
1659 if (rx->read != rx->write) { in tsnep_rx_pending()
1660 entry = &rx->entry[rx->read]; in tsnep_rx_pending()
1661 if ((__le32_to_cpu(entry->desc_wb->properties) & in tsnep_rx_pending()
1663 (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK)) in tsnep_rx_pending()
1670 static int tsnep_rx_open(struct tsnep_rx *rx) in tsnep_rx_open() argument
1675 retval = tsnep_rx_ring_create(rx); in tsnep_rx_open()
1679 tsnep_rx_init(rx); in tsnep_rx_open()
1681 desc_available = tsnep_rx_desc_available(rx); in tsnep_rx_open()
1682 if (rx->xsk_pool) in tsnep_rx_open()
1683 retval = tsnep_rx_alloc_zc(rx, desc_available, false); in tsnep_rx_open()
1685 retval = tsnep_rx_alloc(rx, desc_available, false); in tsnep_rx_open()
1687 retval = -ENOMEM; in tsnep_rx_open()
1695 if (rx->xsk_pool) { in tsnep_rx_open()
1696 retval = tsnep_rx_alloc_page_buffer(rx); in tsnep_rx_open()
1704 tsnep_rx_ring_cleanup(rx); in tsnep_rx_open()
1708 static void tsnep_rx_close(struct tsnep_rx *rx) in tsnep_rx_close() argument
1710 if (rx->xsk_pool) in tsnep_rx_close()
1711 tsnep_rx_free_page_buffer(rx); in tsnep_rx_close()
1713 tsnep_rx_ring_cleanup(rx); in tsnep_rx_close()
1716 static void tsnep_rx_reopen(struct tsnep_rx *rx) in tsnep_rx_reopen() argument
1718 struct page **page = rx->page_buffer; in tsnep_rx_reopen()
1721 tsnep_rx_init(rx); in tsnep_rx_reopen()
1724 struct tsnep_rx_entry *entry = &rx->entry[i]; in tsnep_rx_reopen()
1729 entry->desc->properties = 0; in tsnep_rx_reopen()
1730 entry->desc_wb->properties = 0; in tsnep_rx_reopen()
1734 tsnep_rx_set_page(rx, entry, *page); in tsnep_rx_reopen()
1735 tsnep_rx_activate(rx, rx->write); in tsnep_rx_reopen()
1736 rx->write++; in tsnep_rx_reopen()
1744 static void tsnep_rx_reopen_xsk(struct tsnep_rx *rx) in tsnep_rx_reopen_xsk() argument
1746 struct page **page = rx->page_buffer; in tsnep_rx_reopen_xsk()
1750 tsnep_rx_init(rx); in tsnep_rx_reopen_xsk()
1756 allocated = xsk_buff_alloc_batch(rx->xsk_pool, rx->xdp_batch, in tsnep_rx_reopen_xsk()
1757 TSNEP_RING_SIZE - 1); in tsnep_rx_reopen_xsk()
1760 struct tsnep_rx_entry *entry = &rx->entry[i]; in tsnep_rx_reopen_xsk()
1765 if (entry->page) { in tsnep_rx_reopen_xsk()
1766 *page = entry->page; in tsnep_rx_reopen_xsk()
1767 entry->page = NULL; in tsnep_rx_reopen_xsk()
1775 entry->desc->properties = 0; in tsnep_rx_reopen_xsk()
1776 entry->desc_wb->properties = 0; in tsnep_rx_reopen_xsk()
1779 tsnep_rx_set_xdp(rx, entry, in tsnep_rx_reopen_xsk()
1780 rx->xdp_batch[allocated - 1]); in tsnep_rx_reopen_xsk()
1781 tsnep_rx_activate(rx, rx->write); in tsnep_rx_reopen_xsk()
1782 rx->write++; in tsnep_rx_reopen_xsk()
1784 allocated--; in tsnep_rx_reopen_xsk()
1792 if (xsk_uses_need_wakeup(rx->xsk_pool)) in tsnep_rx_reopen_xsk()
1793 tsnep_xsk_rx_need_wakeup(rx, tsnep_rx_desc_available(rx)); in tsnep_rx_reopen_xsk()
1798 if (queue->tx && tsnep_tx_pending(queue->tx)) in tsnep_pending()
1801 if (queue->rx && tsnep_rx_pending(queue->rx)) in tsnep_pending()
1814 if (queue->tx) in tsnep_poll()
1815 complete = tsnep_tx_poll(queue->tx, budget); in tsnep_poll()
1821 if (queue->rx) { in tsnep_poll()
1822 done = queue->rx->xsk_pool ? in tsnep_poll()
1823 tsnep_rx_poll_zc(queue->rx, napi, budget) : in tsnep_poll()
1824 tsnep_rx_poll(queue->rx, napi, budget); in tsnep_poll()
1834 tsnep_enable_irq(queue->adapter, queue->irq_mask); in tsnep_poll()
1841 tsnep_disable_irq(queue->adapter, queue->irq_mask); in tsnep_poll()
1846 return min(done, budget - 1); in tsnep_poll()
1851 const char *name = netdev_name(queue->adapter->netdev); in tsnep_request_irq()
1857 sprintf(queue->name, "%s-mac", name); in tsnep_request_irq()
1859 dev = queue->adapter; in tsnep_request_irq()
1861 if (queue->tx && queue->rx) in tsnep_request_irq()
1862 snprintf(queue->name, sizeof(queue->name), "%s-txrx-%d", in tsnep_request_irq()
1863 name, queue->rx->queue_index); in tsnep_request_irq()
1864 else if (queue->tx) in tsnep_request_irq()
1865 snprintf(queue->name, sizeof(queue->name), "%s-tx-%d", in tsnep_request_irq()
1866 name, queue->tx->queue_index); in tsnep_request_irq()
1868 snprintf(queue->name, sizeof(queue->name), "%s-rx-%d", in tsnep_request_irq()
1869 name, queue->rx->queue_index); in tsnep_request_irq()
1874 retval = request_irq(queue->irq, handler, 0, queue->name, dev); in tsnep_request_irq()
1877 memset(queue->name, 0, sizeof(queue->name)); in tsnep_request_irq()
1887 if (!strlen(queue->name)) in tsnep_free_irq()
1891 dev = queue->adapter; in tsnep_free_irq()
1895 free_irq(queue->irq, dev); in tsnep_free_irq()
1896 memset(queue->name, 0, sizeof(queue->name)); in tsnep_free_irq()
1901 struct tsnep_rx *rx = queue->rx; in tsnep_queue_close() local
1905 if (rx) { in tsnep_queue_close()
1906 if (xdp_rxq_info_is_reg(&rx->xdp_rxq)) in tsnep_queue_close()
1907 xdp_rxq_info_unreg(&rx->xdp_rxq); in tsnep_queue_close()
1908 if (xdp_rxq_info_is_reg(&rx->xdp_rxq_zc)) in tsnep_queue_close()
1909 xdp_rxq_info_unreg(&rx->xdp_rxq_zc); in tsnep_queue_close()
1912 netif_napi_del(&queue->napi); in tsnep_queue_close()
1918 struct tsnep_rx *rx = queue->rx; in tsnep_queue_open() local
1919 struct tsnep_tx *tx = queue->tx; in tsnep_queue_open() local
1922 netif_napi_add(adapter->netdev, &queue->napi, tsnep_poll); in tsnep_queue_open()
1924 if (rx) { in tsnep_queue_open()
1925 /* choose TX queue for XDP_TX */ in tsnep_queue_open()
1926 if (tx) in tsnep_queue_open()
1927 rx->tx_queue_index = tx->queue_index; in tsnep_queue_open()
1928 else if (rx->queue_index < adapter->num_tx_queues) in tsnep_queue_open()
1929 rx->tx_queue_index = rx->queue_index; in tsnep_queue_open()
1931 rx->tx_queue_index = 0; in tsnep_queue_open()
1937 retval = xdp_rxq_info_reg(&rx->xdp_rxq, adapter->netdev, in tsnep_queue_open()
1938 rx->queue_index, queue->napi.napi_id); in tsnep_queue_open()
1941 retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq, in tsnep_queue_open()
1943 rx->page_pool); in tsnep_queue_open()
1946 retval = xdp_rxq_info_reg(&rx->xdp_rxq_zc, adapter->netdev, in tsnep_queue_open()
1947 rx->queue_index, queue->napi.napi_id); in tsnep_queue_open()
1950 retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq_zc, in tsnep_queue_open()
1955 if (rx->xsk_pool) in tsnep_queue_open()
1956 xsk_pool_set_rxq_info(rx->xsk_pool, &rx->xdp_rxq_zc); in tsnep_queue_open()
1961 netif_err(adapter, drv, adapter->netdev, in tsnep_queue_open()
1962 "can't get assigned irq %d.\n", queue->irq); in tsnep_queue_open()
1976 struct tsnep_adapter *adapter = queue->adapter; in tsnep_queue_enable()
1978 netif_napi_set_irq(&queue->napi, queue->irq); in tsnep_queue_enable()
1979 napi_enable(&queue->napi); in tsnep_queue_enable()
1980 tsnep_enable_irq(adapter, queue->irq_mask); in tsnep_queue_enable()
1982 if (queue->tx) { in tsnep_queue_enable()
1983 netif_queue_set_napi(adapter->netdev, queue->tx->queue_index, in tsnep_queue_enable()
1984 NETDEV_QUEUE_TYPE_TX, &queue->napi); in tsnep_queue_enable()
1985 tsnep_tx_enable(queue->tx); in tsnep_queue_enable()
1988 if (queue->rx) { in tsnep_queue_enable()
1989 netif_queue_set_napi(adapter->netdev, queue->rx->queue_index, in tsnep_queue_enable()
1990 NETDEV_QUEUE_TYPE_RX, &queue->napi); in tsnep_queue_enable()
1991 tsnep_rx_enable(queue->rx); in tsnep_queue_enable()
1997 struct tsnep_adapter *adapter = queue->adapter; in tsnep_queue_disable()
1999 if (queue->rx) in tsnep_queue_disable()
2000 netif_queue_set_napi(adapter->netdev, queue->rx->queue_index, in tsnep_queue_disable()
2003 if (queue->tx) { in tsnep_queue_disable()
2004 tsnep_tx_disable(queue->tx, &queue->napi); in tsnep_queue_disable()
2005 netif_queue_set_napi(adapter->netdev, queue->tx->queue_index, in tsnep_queue_disable()
2009 napi_disable(&queue->napi); in tsnep_queue_disable()
2010 tsnep_disable_irq(adapter, queue->irq_mask); in tsnep_queue_disable()
2012 /* disable RX after NAPI polling has been disabled, because RX can be in tsnep_queue_disable()
2015 if (queue->rx) in tsnep_queue_disable()
2016 tsnep_rx_disable(queue->rx); in tsnep_queue_disable()
2024 for (i = 0; i < adapter->num_queues; i++) { in tsnep_netdev_open()
2025 if (adapter->queue[i].tx) { in tsnep_netdev_open()
2026 retval = tsnep_tx_open(adapter->queue[i].tx); in tsnep_netdev_open()
2030 if (adapter->queue[i].rx) { in tsnep_netdev_open()
2031 retval = tsnep_rx_open(adapter->queue[i].rx); in tsnep_netdev_open()
2036 retval = tsnep_queue_open(adapter, &adapter->queue[i], i == 0); in tsnep_netdev_open()
2041 retval = netif_set_real_num_tx_queues(adapter->netdev, in tsnep_netdev_open()
2042 adapter->num_tx_queues); in tsnep_netdev_open()
2045 retval = netif_set_real_num_rx_queues(adapter->netdev, in tsnep_netdev_open()
2046 adapter->num_rx_queues); in tsnep_netdev_open()
2055 for (i = 0; i < adapter->num_queues; i++) in tsnep_netdev_open()
2056 tsnep_queue_enable(&adapter->queue[i]); in tsnep_netdev_open()
2063 for (i = 0; i < adapter->num_queues; i++) { in tsnep_netdev_open()
2064 tsnep_queue_close(&adapter->queue[i], i == 0); in tsnep_netdev_open()
2066 if (adapter->queue[i].rx) in tsnep_netdev_open()
2067 tsnep_rx_close(adapter->queue[i].rx); in tsnep_netdev_open()
2068 if (adapter->queue[i].tx) in tsnep_netdev_open()
2069 tsnep_tx_close(adapter->queue[i].tx); in tsnep_netdev_open()
2082 for (i = 0; i < adapter->num_queues; i++) { in tsnep_netdev_close()
2083 tsnep_queue_disable(&adapter->queue[i]); in tsnep_netdev_close()
2085 tsnep_queue_close(&adapter->queue[i], i == 0); in tsnep_netdev_close()
2087 if (adapter->queue[i].rx) in tsnep_netdev_close()
2088 tsnep_rx_close(adapter->queue[i].rx); in tsnep_netdev_close()
2089 if (adapter->queue[i].tx) in tsnep_netdev_close()
2090 tsnep_tx_close(adapter->queue[i].tx); in tsnep_netdev_close()
2098 bool running = netif_running(queue->adapter->netdev); in tsnep_enable_xsk()
2103 return -EOPNOTSUPP; in tsnep_enable_xsk()
2105 queue->rx->page_buffer = kcalloc(TSNEP_RING_SIZE, in tsnep_enable_xsk()
2106 sizeof(*queue->rx->page_buffer), in tsnep_enable_xsk()
2108 if (!queue->rx->page_buffer) in tsnep_enable_xsk()
2109 return -ENOMEM; in tsnep_enable_xsk()
2110 queue->rx->xdp_batch = kcalloc(TSNEP_RING_SIZE, in tsnep_enable_xsk()
2111 sizeof(*queue->rx->xdp_batch), in tsnep_enable_xsk()
2113 if (!queue->rx->xdp_batch) { in tsnep_enable_xsk()
2114 kfree(queue->rx->page_buffer); in tsnep_enable_xsk()
2115 queue->rx->page_buffer = NULL; in tsnep_enable_xsk()
2117 return -ENOMEM; in tsnep_enable_xsk()
2120 xsk_pool_set_rxq_info(pool, &queue->rx->xdp_rxq_zc); in tsnep_enable_xsk()
2125 queue->tx->xsk_pool = pool; in tsnep_enable_xsk()
2126 queue->rx->xsk_pool = pool; in tsnep_enable_xsk()
2129 tsnep_rx_reopen_xsk(queue->rx); in tsnep_enable_xsk()
2138 bool running = netif_running(queue->adapter->netdev); in tsnep_disable_xsk()
2143 tsnep_rx_free_zc(queue->rx); in tsnep_disable_xsk()
2145 queue->rx->xsk_pool = NULL; in tsnep_disable_xsk()
2146 queue->tx->xsk_pool = NULL; in tsnep_disable_xsk()
2149 tsnep_rx_reopen(queue->rx); in tsnep_disable_xsk()
2153 kfree(queue->rx->xdp_batch); in tsnep_disable_xsk()
2154 queue->rx->xdp_batch = NULL; in tsnep_disable_xsk()
2155 kfree(queue->rx->page_buffer); in tsnep_disable_xsk()
2156 queue->rx->page_buffer = NULL; in tsnep_disable_xsk()
2165 if (queue_mapping >= adapter->num_tx_queues) in tsnep_netdev_xmit_frame()
2168 return tsnep_xmit_frame_ring(skb, &adapter->tx[queue_mapping]); in tsnep_netdev_xmit_frame()
2175 return -EINVAL; in tsnep_netdev_ioctl()
2178 return phy_mii_ioctl(netdev->phydev, ifr, cmd); in tsnep_netdev_ioctl()
2188 if (netdev->flags & IFF_PROMISC) { in tsnep_netdev_set_multicast()
2191 } else if (!netdev_mc_empty(netdev) || (netdev->flags & IFF_ALLMULTI)) { in tsnep_netdev_set_multicast()
2194 iowrite16(rx_filter, adapter->addr + TSNEP_RX_FILTER); in tsnep_netdev_set_multicast()
2205 for (i = 0; i < adapter->num_tx_queues; i++) { in tsnep_netdev_get_stats64()
2206 stats->tx_packets += adapter->tx[i].packets; in tsnep_netdev_get_stats64()
2207 stats->tx_bytes += adapter->tx[i].bytes; in tsnep_netdev_get_stats64()
2208 stats->tx_dropped += adapter->tx[i].dropped; in tsnep_netdev_get_stats64()
2210 for (i = 0; i < adapter->num_rx_queues; i++) { in tsnep_netdev_get_stats64()
2211 stats->rx_packets += adapter->rx[i].packets; in tsnep_netdev_get_stats64()
2212 stats->rx_bytes += adapter->rx[i].bytes; in tsnep_netdev_get_stats64()
2213 stats->rx_dropped += adapter->rx[i].dropped; in tsnep_netdev_get_stats64()
2214 stats->multicast += adapter->rx[i].multicast; in tsnep_netdev_get_stats64()
2216 reg = ioread32(adapter->addr + TSNEP_QUEUE(i) + in tsnep_netdev_get_stats64()
2220 stats->rx_dropped += val; in tsnep_netdev_get_stats64()
2223 stats->rx_dropped += val; in tsnep_netdev_get_stats64()
2226 stats->rx_errors += val; in tsnep_netdev_get_stats64()
2227 stats->rx_fifo_errors += val; in tsnep_netdev_get_stats64()
2230 stats->rx_errors += val; in tsnep_netdev_get_stats64()
2231 stats->rx_frame_errors += val; in tsnep_netdev_get_stats64()
2234 reg = ioread32(adapter->addr + ECM_STAT); in tsnep_netdev_get_stats64()
2236 stats->rx_errors += val; in tsnep_netdev_get_stats64()
2238 stats->rx_errors += val; in tsnep_netdev_get_stats64()
2239 stats->rx_crc_errors += val; in tsnep_netdev_get_stats64()
2241 stats->rx_errors += val; in tsnep_netdev_get_stats64()
2246 iowrite32(*(u32 *)addr, adapter->addr + TSNEP_MAC_ADDRESS_LOW); in tsnep_mac_set_address()
2248 adapter->addr + TSNEP_MAC_ADDRESS_HIGH); in tsnep_mac_set_address()
2250 ether_addr_copy(adapter->mac_address, addr); in tsnep_mac_set_address()
2251 netif_info(adapter, drv, adapter->netdev, "MAC address set to %pM\n", in tsnep_mac_set_address()
2264 eth_hw_addr_set(netdev, sock_addr->sa_data); in tsnep_netdev_set_mac_address()
2265 tsnep_mac_set_address(adapter, sock_addr->sa_data); in tsnep_netdev_set_mac_address()
2274 netdev_features_t changed = netdev->features ^ features; in tsnep_netdev_set_features()
2290 struct tsnep_rx_inline *rx_inline = hwtstamps->netdev_data; in tsnep_netdev_get_tstamp()
2294 timestamp = __le64_to_cpu(rx_inline->counter); in tsnep_netdev_get_tstamp()
2296 timestamp = __le64_to_cpu(rx_inline->timestamp); in tsnep_netdev_get_tstamp()
2305 switch (bpf->command) { in tsnep_netdev_bpf()
2307 return tsnep_xdp_setup_prog(adapter, bpf->prog, bpf->extack); in tsnep_netdev_bpf()
2309 return tsnep_xdp_setup_pool(adapter, bpf->xsk.pool, in tsnep_netdev_bpf()
2310 bpf->xsk.queue_id); in tsnep_netdev_bpf()
2312 return -EOPNOTSUPP; in tsnep_netdev_bpf()
2319 cpu &= TSNEP_MAX_QUEUES - 1; in tsnep_xdp_get_tx()
2321 while (cpu >= adapter->num_tx_queues) in tsnep_xdp_get_tx()
2322 cpu -= adapter->num_tx_queues; in tsnep_xdp_get_tx()
2324 return &adapter->tx[cpu]; in tsnep_xdp_get_tx()
2333 struct tsnep_tx *tx; in tsnep_netdev_xdp_xmit() local
2338 return -EINVAL; in tsnep_netdev_xdp_xmit()
2340 tx = tsnep_xdp_get_tx(adapter, cpu); in tsnep_netdev_xdp_xmit()
2341 nq = netdev_get_tx_queue(adapter->netdev, tx->queue_index); in tsnep_netdev_xdp_xmit()
2346 xmit = tsnep_xdp_xmit_frame_ring(xdp[nxmit], tx, in tsnep_netdev_xdp_xmit()
2358 tsnep_xdp_xmit_flush(tx); in tsnep_netdev_xdp_xmit()
2371 if (queue_id >= adapter->num_rx_queues || in tsnep_netdev_xsk_wakeup()
2372 queue_id >= adapter->num_tx_queues) in tsnep_netdev_xsk_wakeup()
2373 return -EINVAL; in tsnep_netdev_xsk_wakeup()
2375 queue = &adapter->queue[queue_id]; in tsnep_netdev_xsk_wakeup()
2377 if (!napi_if_scheduled_mark_missed(&queue->napi)) in tsnep_netdev_xsk_wakeup()
2378 napi_schedule(&queue->napi); in tsnep_netdev_xsk_wakeup()
2403 /* initialize RX filtering, at least configured MAC address and in tsnep_mac_init()
2406 iowrite16(0, adapter->addr + TSNEP_RX_FILTER); in tsnep_mac_init()
2409 * - device tree in tsnep_mac_init()
2410 * - valid MAC address already set in tsnep_mac_init()
2411 * - MAC address register if valid in tsnep_mac_init()
2412 * - random MAC address in tsnep_mac_init()
2414 retval = of_get_mac_address(adapter->pdev->dev.of_node, in tsnep_mac_init()
2415 adapter->mac_address); in tsnep_mac_init()
2416 if (retval == -EPROBE_DEFER) in tsnep_mac_init()
2418 if (retval && !is_valid_ether_addr(adapter->mac_address)) { in tsnep_mac_init()
2419 *(u32 *)adapter->mac_address = in tsnep_mac_init()
2420 ioread32(adapter->addr + TSNEP_MAC_ADDRESS_LOW); in tsnep_mac_init()
2421 *(u16 *)(adapter->mac_address + sizeof(u32)) = in tsnep_mac_init()
2422 ioread16(adapter->addr + TSNEP_MAC_ADDRESS_HIGH); in tsnep_mac_init()
2423 if (!is_valid_ether_addr(adapter->mac_address)) in tsnep_mac_init()
2424 eth_random_addr(adapter->mac_address); in tsnep_mac_init()
2427 tsnep_mac_set_address(adapter, adapter->mac_address); in tsnep_mac_init()
2428 eth_hw_addr_set(adapter->netdev, adapter->mac_address); in tsnep_mac_init()
2435 struct device_node *np = adapter->pdev->dev.of_node; in tsnep_mdio_init()
2443 adapter->suppress_preamble = in tsnep_mdio_init()
2444 of_property_read_bool(np, "suppress-preamble"); in tsnep_mdio_init()
2447 adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev); in tsnep_mdio_init()
2448 if (!adapter->mdiobus) { in tsnep_mdio_init()
2449 retval = -ENOMEM; in tsnep_mdio_init()
2454 adapter->mdiobus->priv = (void *)adapter; in tsnep_mdio_init()
2455 adapter->mdiobus->parent = &adapter->pdev->dev; in tsnep_mdio_init()
2456 adapter->mdiobus->read = tsnep_mdiobus_read; in tsnep_mdio_init()
2457 adapter->mdiobus->write = tsnep_mdiobus_write; in tsnep_mdio_init()
2458 adapter->mdiobus->name = TSNEP "-mdiobus"; in tsnep_mdio_init()
2459 snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE, "%s", in tsnep_mdio_init()
2460 adapter->pdev->name); in tsnep_mdio_init()
2463 adapter->mdiobus->phy_mask = 0x0000001; in tsnep_mdio_init()
2465 retval = of_mdiobus_register(adapter->mdiobus, np); in tsnep_mdio_init()
2478 retval = of_get_phy_mode(adapter->pdev->dev.of_node, in tsnep_phy_init()
2479 &adapter->phy_mode); in tsnep_phy_init()
2481 adapter->phy_mode = PHY_INTERFACE_MODE_GMII; in tsnep_phy_init()
2483 phy_node = of_parse_phandle(adapter->pdev->dev.of_node, "phy-handle", in tsnep_phy_init()
2485 adapter->phydev = of_phy_find_device(phy_node); in tsnep_phy_init()
2487 if (!adapter->phydev && adapter->mdiobus) in tsnep_phy_init()
2488 adapter->phydev = phy_find_first(adapter->mdiobus); in tsnep_phy_init()
2489 if (!adapter->phydev) in tsnep_phy_init()
2490 return -EIO; in tsnep_phy_init()
2502 /* one TX/RX queue pair for netdev is mandatory */ in tsnep_queue_init()
2503 if (platform_irq_count(adapter->pdev) == 1) in tsnep_queue_init()
2504 retval = platform_get_irq(adapter->pdev, 0); in tsnep_queue_init()
2506 retval = platform_get_irq_byname(adapter->pdev, "mac"); in tsnep_queue_init()
2509 adapter->num_tx_queues = 1; in tsnep_queue_init()
2510 adapter->num_rx_queues = 1; in tsnep_queue_init()
2511 adapter->num_queues = 1; in tsnep_queue_init()
2512 adapter->queue[0].adapter = adapter; in tsnep_queue_init()
2513 adapter->queue[0].irq = retval; in tsnep_queue_init()
2514 adapter->queue[0].tx = &adapter->tx[0]; in tsnep_queue_init()
2515 adapter->queue[0].tx->adapter = adapter; in tsnep_queue_init()
2516 adapter->queue[0].tx->addr = adapter->addr + TSNEP_QUEUE(0); in tsnep_queue_init()
2517 adapter->queue[0].tx->queue_index = 0; in tsnep_queue_init()
2518 adapter->queue[0].rx = &adapter->rx[0]; in tsnep_queue_init()
2519 adapter->queue[0].rx->adapter = adapter; in tsnep_queue_init()
2520 adapter->queue[0].rx->addr = adapter->addr + TSNEP_QUEUE(0); in tsnep_queue_init()
2521 adapter->queue[0].rx->queue_index = 0; in tsnep_queue_init()
2522 adapter->queue[0].irq_mask = irq_mask; in tsnep_queue_init()
2523 adapter->queue[0].irq_delay_addr = adapter->addr + ECM_INT_DELAY; in tsnep_queue_init()
2524 retval = tsnep_set_irq_coalesce(&adapter->queue[0], in tsnep_queue_init()
2529 adapter->netdev->irq = adapter->queue[0].irq; in tsnep_queue_init()
2531 /* add additional TX/RX queue pairs only if dedicated interrupt is in tsnep_queue_init()
2535 sprintf(name, "txrx-%d", i); in tsnep_queue_init()
2536 retval = platform_get_irq_byname_optional(adapter->pdev, name); in tsnep_queue_init()
2540 adapter->num_tx_queues++; in tsnep_queue_init()
2541 adapter->num_rx_queues++; in tsnep_queue_init()
2542 adapter->num_queues++; in tsnep_queue_init()
2543 adapter->queue[i].adapter = adapter; in tsnep_queue_init()
2544 adapter->queue[i].irq = retval; in tsnep_queue_init()
2545 adapter->queue[i].tx = &adapter->tx[i]; in tsnep_queue_init()
2546 adapter->queue[i].tx->adapter = adapter; in tsnep_queue_init()
2547 adapter->queue[i].tx->addr = adapter->addr + TSNEP_QUEUE(i); in tsnep_queue_init()
2548 adapter->queue[i].tx->queue_index = i; in tsnep_queue_init()
2549 adapter->queue[i].rx = &adapter->rx[i]; in tsnep_queue_init()
2550 adapter->queue[i].rx->adapter = adapter; in tsnep_queue_init()
2551 adapter->queue[i].rx->addr = adapter->addr + TSNEP_QUEUE(i); in tsnep_queue_init()
2552 adapter->queue[i].rx->queue_index = i; in tsnep_queue_init()
2553 adapter->queue[i].irq_mask = in tsnep_queue_init()
2555 adapter->queue[i].irq_delay_addr = in tsnep_queue_init()
2556 adapter->addr + ECM_INT_DELAY + ECM_INT_DELAY_OFFSET * i; in tsnep_queue_init()
2557 retval = tsnep_set_irq_coalesce(&adapter->queue[i], in tsnep_queue_init()
2577 netdev = devm_alloc_etherdev_mqs(&pdev->dev, in tsnep_probe()
2581 return -ENODEV; in tsnep_probe()
2582 SET_NETDEV_DEV(netdev, &pdev->dev); in tsnep_probe()
2585 adapter->pdev = pdev; in tsnep_probe()
2586 adapter->dmadev = &pdev->dev; in tsnep_probe()
2587 adapter->netdev = netdev; in tsnep_probe()
2588 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE | in tsnep_probe()
2592 netdev->min_mtu = ETH_MIN_MTU; in tsnep_probe()
2593 netdev->max_mtu = TSNEP_MAX_FRAME_SIZE; in tsnep_probe()
2595 mutex_init(&adapter->gate_control_lock); in tsnep_probe()
2596 mutex_init(&adapter->rxnfc_lock); in tsnep_probe()
2597 INIT_LIST_HEAD(&adapter->rxnfc_rules); in tsnep_probe()
2599 adapter->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &io); in tsnep_probe()
2600 if (IS_ERR(adapter->addr)) in tsnep_probe()
2601 return PTR_ERR(adapter->addr); in tsnep_probe()
2602 netdev->mem_start = io->start; in tsnep_probe()
2603 netdev->mem_end = io->end; in tsnep_probe()
2605 type = ioread32(adapter->addr + ECM_TYPE); in tsnep_probe()
2609 adapter->gate_control = type & ECM_GATE_CONTROL; in tsnep_probe()
2610 adapter->rxnfc_max = TSNEP_RX_ASSIGN_ETHER_TYPE_COUNT; in tsnep_probe()
2618 retval = dma_set_mask_and_coherent(&adapter->pdev->dev, in tsnep_probe()
2621 dev_err(&adapter->pdev->dev, "no usable DMA configuration.\n"); in tsnep_probe()
2649 netdev->netdev_ops = &tsnep_netdev_ops; in tsnep_probe()
2650 netdev->ethtool_ops = &tsnep_ethtool_ops; in tsnep_probe()
2651 netdev->features = NETIF_F_SG; in tsnep_probe()
2652 netdev->hw_features = netdev->features | NETIF_F_LOOPBACK; in tsnep_probe()
2654 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | in tsnep_probe()
2666 dev_info(&adapter->pdev->dev, "device version %d.%02d\n", version, in tsnep_probe()
2668 if (adapter->gate_control) in tsnep_probe()
2669 dev_info(&adapter->pdev->dev, "gate control detected\n"); in tsnep_probe()
2681 if (adapter->mdiobus) in tsnep_probe()
2682 mdiobus_unregister(adapter->mdiobus); in tsnep_probe()
2691 unregister_netdev(adapter->netdev); in tsnep_remove()
2699 if (adapter->mdiobus) in tsnep_remove()
2700 mdiobus_unregister(adapter->mdiobus); in tsnep_remove()
2721 MODULE_AUTHOR("Gerhard Engleder <gerhard@engleder-embedded.com>");