Lines Matching +full:tx +full:- +full:rx
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
6 * The TSN endpoint Ethernet MAC is a FPGA based network device for real-time
10 * It supports multiple TX/RX queue pairs. The first TX/RX queue pair is used
14 * - www.embedded-experts.at/tsn
15 * - www.engleder-embedded.com
36 #define TSNEP_MAX_RX_BUF_SIZE (PAGE_SIZE - TSNEP_HEADROOM - \
38 /* XSK buffer shall store at least Q-in-Q frame */
52 ECM_INT_DELAY_BASE_US + ECM_INT_DELAY_BASE_US - 1)
78 iowrite32(mask, adapter->addr + ECM_INT_ENABLE);
84 iowrite32(mask, adapter->addr + ECM_INT_ENABLE);
90 u32 active = ioread32(adapter->addr + ECM_INT_ACTIVE);
94 iowrite32(active, adapter->addr + ECM_INT_ACKNOWLEDGE);
98 phy_mac_interrupt(adapter->netdev->phydev);
100 /* handle TX/RX queue 0 interrupt */
101 if ((active & adapter->queue[0].irq_mask) != 0) {
102 if (napi_schedule_prep(&adapter->queue[0].napi)) {
103 tsnep_disable_irq(adapter, adapter->queue[0].irq_mask);
105 __napi_schedule(&adapter->queue[0].napi);
116 /* handle TX/RX queue interrupt */
117 if (napi_schedule_prep(&queue->napi)) {
118 tsnep_disable_irq(queue->adapter, queue->irq_mask);
120 __napi_schedule(&queue->napi);
129 return -ERANGE;
135 queue->irq_delay &= ~ECM_INT_DELAY_MASK;
136 queue->irq_delay |= usecs;
137 iowrite8(queue->irq_delay, queue->irq_delay_addr);
146 usecs = (queue->irq_delay & ECM_INT_DELAY_MASK);
155 struct tsnep_adapter *adapter = bus->priv;
160 if (!adapter->suppress_preamble)
164 iowrite32(md, adapter->addr + ECM_MD_CONTROL);
165 retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md,
176 struct tsnep_adapter *adapter = bus->priv;
181 if (!adapter->suppress_preamble)
186 iowrite32(md, adapter->addr + ECM_MD_CONTROL);
187 retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md,
199 switch (adapter->phydev->speed) {
210 iowrite32(mode, adapter->addr + ECM_STATUS);
216 struct phy_device *phydev = netdev->phydev;
218 if (phydev->link)
221 phy_print_status(netdev->phydev);
229 if (adapter->phydev->autoneg == AUTONEG_DISABLE &&
230 adapter->phydev->speed == SPEED_100)
238 return phy_loopback(adapter->phydev, enable, speed);
247 retval = phy_connect_direct(adapter->netdev, adapter->phydev,
249 adapter->phy_mode);
252 phydev = adapter->netdev->phydev;
264 phy_ethtool_set_eee(adapter->phydev, ðtool_keee);
266 adapter->phydev->irq = PHY_MAC_INTERRUPT;
267 phy_start(adapter->phydev);
274 phy_stop(adapter->netdev->phydev);
275 phy_disconnect(adapter->netdev->phydev);
278 static void tsnep_tx_ring_cleanup(struct tsnep_tx *tx)
280 struct device *dmadev = tx->adapter->dmadev;
283 memset(tx->entry, 0, sizeof(tx->entry));
286 if (tx->page[i]) {
287 dma_free_coherent(dmadev, PAGE_SIZE, tx->page[i],
288 tx->page_dma[i]);
289 tx->page[i] = NULL;
290 tx->page_dma[i] = 0;
295 static int tsnep_tx_ring_create(struct tsnep_tx *tx)
297 struct device *dmadev = tx->adapter->dmadev;
304 tx->page[i] =
305 dma_alloc_coherent(dmadev, PAGE_SIZE, &tx->page_dma[i],
307 if (!tx->page[i]) {
308 retval = -ENOMEM;
312 entry = &tx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j];
313 entry->desc_wb = (struct tsnep_tx_desc_wb *)
314 (((u8 *)tx->page[i]) + TSNEP_DESC_SIZE * j);
315 entry->desc = (struct tsnep_tx_desc *)
316 (((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET);
317 entry->desc_dma = tx->page_dma[i] + TSNEP_DESC_SIZE * j;
318 entry->owner_user_flag = false;
322 entry = &tx->entry[i];
323 next_entry = &tx->entry[(i + 1) & TSNEP_RING_MASK];
324 entry->desc->next = __cpu_to_le64(next_entry->desc_dma);
330 tsnep_tx_ring_cleanup(tx);
334 static void tsnep_tx_init(struct tsnep_tx *tx)
338 dma = tx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER;
339 iowrite32(DMA_ADDR_LOW(dma), tx->addr + TSNEP_TX_DESC_ADDR_LOW);
340 iowrite32(DMA_ADDR_HIGH(dma), tx->addr + TSNEP_TX_DESC_ADDR_HIGH);
341 tx->write = 0;
342 tx->read = 0;
343 tx->owner_counter = 1;
344 tx->increment_owner_counter = TSNEP_RING_SIZE - 1;
347 static void tsnep_tx_enable(struct tsnep_tx *tx)
351 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index);
358 static void tsnep_tx_disable(struct tsnep_tx *tx, struct napi_struct *napi)
363 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index);
369 /* wait until TX is done in hardware */
370 readx_poll_timeout(ioread32, tx->addr + TSNEP_CONTROL, val,
374 /* wait until TX is also done in software */
375 while (READ_ONCE(tx->read) != tx->write) {
381 static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length,
384 struct tsnep_tx_entry *entry = &tx->entry[index];
386 entry->properties = 0;
388 if (entry->skb) {
389 entry->properties = length & TSNEP_DESC_LENGTH_MASK;
390 entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
391 if ((entry->type & TSNEP_TX_TYPE_SKB_TSTAMP) == TSNEP_TX_TYPE_SKB_TSTAMP)
392 entry->properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG;
412 entry->owner_user_flag = !entry->owner_user_flag;
415 entry->properties |= TSNEP_TX_DESC_LAST_FRAGMENT_FLAG;
416 if (index == tx->increment_owner_counter) {
417 tx->owner_counter++;
418 if (tx->owner_counter == 4)
419 tx->owner_counter = 1;
420 tx->increment_owner_counter--;
421 if (tx->increment_owner_counter < 0)
422 tx->increment_owner_counter = TSNEP_RING_SIZE - 1;
424 entry->properties |=
425 (tx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) &
427 if (entry->owner_user_flag)
428 entry->properties |= TSNEP_TX_DESC_OWNER_USER_FLAG;
429 entry->desc->more_properties =
430 __cpu_to_le32(entry->len & TSNEP_DESC_LENGTH_MASK);
431 if (entry->type & TSNEP_TX_TYPE_INLINE)
432 entry->properties |= TSNEP_TX_DESC_DATA_AFTER_DESC_FLAG;
439 entry->desc->properties = __cpu_to_le32(entry->properties);
442 static int tsnep_tx_desc_available(struct tsnep_tx *tx)
444 if (tx->read <= tx->write)
445 return TSNEP_RING_SIZE - tx->write + tx->read - 1;
447 return tx->read - tx->write - 1;
460 return -ENOMEM;
461 entry->type = TSNEP_TX_TYPE_SKB_FRAG_MAP_PAGE;
467 memcpy(&entry->desc->tx, fragdata, len);
472 memcpy(&entry->desc->tx, fragdata + skb_frag_off(frag),
476 entry->type = TSNEP_TX_TYPE_SKB_FRAG_INLINE;
483 static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count,
486 struct device *dmadev = tx->adapter->dmadev;
494 entry = &tx->entry[(tx->write + i) & TSNEP_RING_MASK];
499 dma = dma_map_single(dmadev, skb->data, len,
502 return -ENOMEM;
503 entry->type = TSNEP_TX_TYPE_SKB_MAP;
506 memcpy(&entry->desc->tx, skb->data, len);
507 entry->type = TSNEP_TX_TYPE_SKB_INLINE;
512 entry->type |= TSNEP_TX_TYPE_TSTAMP;
514 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
522 entry->len = len;
525 entry->desc->tx = __cpu_to_le64(dma);
534 static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count)
536 struct device *dmadev = tx->adapter->dmadev;
542 entry = &tx->entry[(index + i) & TSNEP_RING_MASK];
544 if (entry->len) {
545 if (entry->type & TSNEP_TX_TYPE_MAP)
550 else if (entry->type & TSNEP_TX_TYPE_MAP_PAGE)
555 map_len += entry->len;
556 entry->len = 0;
564 struct tsnep_tx *tx)
573 if (skb_shinfo(skb)->nr_frags > 0)
574 count += skb_shinfo(skb)->nr_frags;
576 if (tsnep_tx_desc_available(tx) < count) {
580 netif_stop_subqueue(tx->adapter->netdev, tx->queue_index);
585 entry = &tx->entry[tx->write];
586 entry->skb = skb;
588 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
589 tx->adapter->hwtstamp_config.tx_type == HWTSTAMP_TX_ON) {
590 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
594 retval = tsnep_tx_map(skb, tx, count, do_tstamp);
596 tsnep_tx_unmap(tx, tx->write, count);
597 dev_kfree_skb_any(entry->skb);
598 entry->skb = NULL;
600 tx->dropped++;
607 tsnep_tx_activate(tx, (tx->write + i) & TSNEP_RING_MASK, length,
608 i == count - 1);
609 tx->write = (tx->write + count) & TSNEP_RING_MASK;
616 iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL);
618 if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1)) {
620 netif_stop_subqueue(tx->adapter->netdev, tx->queue_index);
626 static int tsnep_xdp_tx_map(struct xdp_frame *xdpf, struct tsnep_tx *tx,
629 struct device *dmadev = tx->adapter->dmadev;
640 len = xdpf->len;
642 entry = &tx->entry[(tx->write + i) & TSNEP_RING_MASK];
645 xdpf->data;
648 return -ENOMEM;
650 entry->type = TSNEP_TX_TYPE_XDP_NDO_MAP_PAGE;
653 virt_to_page(xdpf->data);
658 dma += sizeof(*xdpf) + xdpf->headroom;
662 entry->type = TSNEP_TX_TYPE_XDP_TX;
665 entry->len = len;
668 entry->desc->tx = __cpu_to_le64(dma);
673 frag = &shinfo->frags[i];
683 struct tsnep_tx *tx, u32 type)
691 count += shinfo->nr_frags;
693 /* ensure that TX ring is not filled up by XDP, always MAX_SKB_FRAGS
694 * will be available for normal TX path and queue is stopped there if
697 if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1 + count))
700 entry = &tx->entry[tx->write];
701 entry->xdpf = xdpf;
703 retval = tsnep_xdp_tx_map(xdpf, tx, shinfo, count, type);
705 tsnep_tx_unmap(tx, tx->write, count);
706 entry->xdpf = NULL;
708 tx->dropped++;
715 tsnep_tx_activate(tx, (tx->write + i) & TSNEP_RING_MASK, length,
716 i == count - 1);
717 tx->write = (tx->write + count) & TSNEP_RING_MASK;
725 static void tsnep_xdp_xmit_flush(struct tsnep_tx *tx)
727 iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL);
732 struct netdev_queue *tx_nq, struct tsnep_tx *tx,
750 xmit = tsnep_xdp_xmit_frame_ring(xdpf, tx, type);
761 static int tsnep_xdp_tx_map_zc(struct xdp_desc *xdpd, struct tsnep_tx *tx)
766 entry = &tx->entry[tx->write];
767 entry->zc = true;
769 dma = xsk_buff_raw_get_dma(tx->xsk_pool, xdpd->addr);
770 xsk_buff_raw_dma_sync_for_device(tx->xsk_pool, dma, xdpd->len);
772 entry->type = TSNEP_TX_TYPE_XSK;
773 entry->len = xdpd->len;
775 entry->desc->tx = __cpu_to_le64(dma);
777 return xdpd->len;
781 struct tsnep_tx *tx)
785 length = tsnep_xdp_tx_map_zc(xdpd, tx);
787 tsnep_tx_activate(tx, tx->write, length, true);
788 tx->write = (tx->write + 1) & TSNEP_RING_MASK;
791 static void tsnep_xdp_xmit_zc(struct tsnep_tx *tx)
793 int desc_available = tsnep_tx_desc_available(tx);
794 struct xdp_desc *descs = tx->xsk_pool->tx_descs;
797 /* ensure that TX ring is not filled up by XDP, always MAX_SKB_FRAGS
798 * will be available for normal TX path and queue is stopped there if
803 desc_available -= MAX_SKB_FRAGS + 1;
805 batch = xsk_tx_peek_release_desc_batch(tx->xsk_pool, desc_available);
807 tsnep_xdp_xmit_frame_ring_zc(&descs[i], tx);
815 tsnep_xdp_xmit_flush(tx);
819 static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
828 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index);
832 if (tx->read == tx->write)
835 entry = &tx->entry[tx->read];
836 if ((__le32_to_cpu(entry->desc_wb->properties) &
838 (entry->properties & TSNEP_TX_DESC_OWNER_MASK))
847 if ((entry->type & TSNEP_TX_TYPE_SKB) &&
848 skb_shinfo(entry->skb)->nr_frags > 0)
849 count += skb_shinfo(entry->skb)->nr_frags;
850 else if ((entry->type & TSNEP_TX_TYPE_XDP) &&
851 xdp_frame_has_frags(entry->xdpf))
852 count += xdp_get_shared_info_from_frame(entry->xdpf)->nr_frags;
854 length = tsnep_tx_unmap(tx, tx->read, count);
856 if (((entry->type & TSNEP_TX_TYPE_SKB_TSTAMP) == TSNEP_TX_TYPE_SKB_TSTAMP) &&
857 (__le32_to_cpu(entry->desc_wb->properties) &
862 if (entry->skb->sk &&
863 READ_ONCE(entry->skb->sk->sk_tsflags) & SOF_TIMESTAMPING_BIND_PHC)
865 __le64_to_cpu(entry->desc_wb->counter);
868 __le64_to_cpu(entry->desc_wb->timestamp);
873 skb_tstamp_tx(entry->skb, &hwtstamps);
876 if (entry->type & TSNEP_TX_TYPE_SKB)
877 napi_consume_skb(entry->skb, napi_budget);
878 else if (entry->type & TSNEP_TX_TYPE_XDP)
879 xdp_return_frame_rx_napi(entry->xdpf);
883 entry->skb = NULL;
885 tx->read = (tx->read + count) & TSNEP_RING_MASK;
887 tx->packets++;
888 tx->bytes += length + ETH_FCS_LEN;
890 budget--;
893 if (tx->xsk_pool) {
895 xsk_tx_completed(tx->xsk_pool, xsk_frames);
896 if (xsk_uses_need_wakeup(tx->xsk_pool))
897 xsk_set_tx_need_wakeup(tx->xsk_pool);
898 tsnep_xdp_xmit_zc(tx);
901 if ((tsnep_tx_desc_available(tx) >= ((MAX_SKB_FRAGS + 1) * 2)) &&
911 static bool tsnep_tx_pending(struct tsnep_tx *tx)
917 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index);
920 if (tx->read != tx->write) {
921 entry = &tx->entry[tx->read];
922 if ((__le32_to_cpu(entry->desc_wb->properties) &
924 (entry->properties & TSNEP_TX_DESC_OWNER_MASK))
933 static int tsnep_tx_open(struct tsnep_tx *tx)
937 retval = tsnep_tx_ring_create(tx);
941 tsnep_tx_init(tx);
946 static void tsnep_tx_close(struct tsnep_tx *tx)
948 tsnep_tx_ring_cleanup(tx);
951 static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx)
953 struct device *dmadev = rx->adapter->dmadev;
958 entry = &rx->entry[i];
959 if (!rx->xsk_pool && entry->page)
960 page_pool_put_full_page(rx->page_pool, entry->page,
962 if (rx->xsk_pool && entry->xdp)
963 xsk_buff_free(entry->xdp);
965 entry->page = NULL;
968 if (rx->page_pool)
969 page_pool_destroy(rx->page_pool);
971 memset(rx->entry, 0, sizeof(rx->entry));
974 if (rx->page[i]) {
975 dma_free_coherent(dmadev, PAGE_SIZE, rx->page[i],
976 rx->page_dma[i]);
977 rx->page[i] = NULL;
978 rx->page_dma[i] = 0;
983 static int tsnep_rx_ring_create(struct tsnep_rx *rx)
985 struct device *dmadev = rx->adapter->dmadev;
993 rx->page[i] =
994 dma_alloc_coherent(dmadev, PAGE_SIZE, &rx->page_dma[i],
996 if (!rx->page[i]) {
997 retval = -ENOMEM;
1001 entry = &rx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j];
1002 entry->desc_wb = (struct tsnep_rx_desc_wb *)
1003 (((u8 *)rx->page[i]) + TSNEP_DESC_SIZE * j);
1004 entry->desc = (struct tsnep_rx_desc *)
1005 (((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET);
1006 entry->desc_dma = rx->page_dma[i] + TSNEP_DESC_SIZE * j;
1018 rx->page_pool = page_pool_create(&pp_params);
1019 if (IS_ERR(rx->page_pool)) {
1020 retval = PTR_ERR(rx->page_pool);
1021 rx->page_pool = NULL;
1026 entry = &rx->entry[i];
1027 next_entry = &rx->entry[(i + 1) & TSNEP_RING_MASK];
1028 entry->desc->next = __cpu_to_le64(next_entry->desc_dma);
1034 tsnep_rx_ring_cleanup(rx);
1038 static void tsnep_rx_init(struct tsnep_rx *rx)
1042 dma = rx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER;
1043 iowrite32(DMA_ADDR_LOW(dma), rx->addr + TSNEP_RX_DESC_ADDR_LOW);
1044 iowrite32(DMA_ADDR_HIGH(dma), rx->addr + TSNEP_RX_DESC_ADDR_HIGH);
1045 rx->write = 0;
1046 rx->read = 0;
1047 rx->owner_counter = 1;
1048 rx->increment_owner_counter = TSNEP_RING_SIZE - 1;
1051 static void tsnep_rx_enable(struct tsnep_rx *rx)
1056 iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL);
1059 static void tsnep_rx_disable(struct tsnep_rx *rx)
1063 iowrite32(TSNEP_CONTROL_RX_DISABLE, rx->addr + TSNEP_CONTROL);
1064 readx_poll_timeout(ioread32, rx->addr + TSNEP_CONTROL, val,
1069 static int tsnep_rx_desc_available(struct tsnep_rx *rx)
1071 if (rx->read <= rx->write)
1072 return TSNEP_RING_SIZE - rx->write + rx->read - 1;
1074 return rx->read - rx->write - 1;
1077 static void tsnep_rx_free_page_buffer(struct tsnep_rx *rx)
1084 page = rx->page_buffer;
1086 page_pool_put_full_page(rx->page_pool, *page, false);
1092 static int tsnep_rx_alloc_page_buffer(struct tsnep_rx *rx)
1099 for (i = 0; i < TSNEP_RING_SIZE - 1; i++) {
1100 rx->page_buffer[i] = page_pool_dev_alloc_pages(rx->page_pool);
1101 if (!rx->page_buffer[i]) {
1102 tsnep_rx_free_page_buffer(rx);
1104 return -ENOMEM;
1111 static void tsnep_rx_set_page(struct tsnep_rx *rx, struct tsnep_rx_entry *entry,
1114 entry->page = page;
1115 entry->len = TSNEP_MAX_RX_BUF_SIZE;
1116 entry->dma = page_pool_get_dma_addr(entry->page);
1117 entry->desc->rx = __cpu_to_le64(entry->dma + TSNEP_RX_OFFSET);
1120 static int tsnep_rx_alloc_buffer(struct tsnep_rx *rx, int index)
1122 struct tsnep_rx_entry *entry = &rx->entry[index];
1125 page = page_pool_dev_alloc_pages(rx->page_pool);
1127 return -ENOMEM;
1128 tsnep_rx_set_page(rx, entry, page);
1133 static void tsnep_rx_reuse_buffer(struct tsnep_rx *rx, int index)
1135 struct tsnep_rx_entry *entry = &rx->entry[index];
1136 struct tsnep_rx_entry *read = &rx->entry[rx->read];
1138 tsnep_rx_set_page(rx, entry, read->page);
1139 read->page = NULL;
1142 static void tsnep_rx_activate(struct tsnep_rx *rx, int index)
1144 struct tsnep_rx_entry *entry = &rx->entry[index];
1147 entry->properties = entry->len & TSNEP_DESC_LENGTH_MASK;
1148 entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
1149 if (index == rx->increment_owner_counter) {
1150 rx->owner_counter++;
1151 if (rx->owner_counter == 4)
1152 rx->owner_counter = 1;
1153 rx->increment_owner_counter--;
1154 if (rx->increment_owner_counter < 0)
1155 rx->increment_owner_counter = TSNEP_RING_SIZE - 1;
1157 entry->properties |=
1158 (rx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) &
1166 entry->desc->properties = __cpu_to_le32(entry->properties);
1169 static int tsnep_rx_alloc(struct tsnep_rx *rx, int count, bool reuse)
1175 index = (rx->write + i) & TSNEP_RING_MASK;
1177 if (unlikely(tsnep_rx_alloc_buffer(rx, index))) {
1178 rx->alloc_failed++;
1183 tsnep_rx_reuse_buffer(rx, index);
1188 tsnep_rx_activate(rx, index);
1192 rx->write = (rx->write + i) & TSNEP_RING_MASK;
1197 static int tsnep_rx_refill(struct tsnep_rx *rx, int count, bool reuse)
1201 desc_refilled = tsnep_rx_alloc(rx, count, reuse);
1203 tsnep_rx_enable(rx);
1208 static void tsnep_rx_set_xdp(struct tsnep_rx *rx, struct tsnep_rx_entry *entry,
1211 entry->xdp = xdp;
1212 entry->len = TSNEP_XSK_RX_BUF_SIZE;
1213 entry->dma = xsk_buff_xdp_get_dma(entry->xdp);
1214 entry->desc->rx = __cpu_to_le64(entry->dma);
1217 static void tsnep_rx_reuse_buffer_zc(struct tsnep_rx *rx, int index)
1219 struct tsnep_rx_entry *entry = &rx->entry[index];
1220 struct tsnep_rx_entry *read = &rx->entry[rx->read];
1222 tsnep_rx_set_xdp(rx, entry, read->xdp);
1223 read->xdp = NULL;
1226 static int tsnep_rx_alloc_zc(struct tsnep_rx *rx, int count, bool reuse)
1231 allocated = xsk_buff_alloc_batch(rx->xsk_pool, rx->xdp_batch, count);
1233 int index = (rx->write + i) & TSNEP_RING_MASK;
1234 struct tsnep_rx_entry *entry = &rx->entry[index];
1236 tsnep_rx_set_xdp(rx, entry, rx->xdp_batch[i]);
1237 tsnep_rx_activate(rx, index);
1240 rx->alloc_failed++;
1243 tsnep_rx_reuse_buffer_zc(rx, rx->write);
1244 tsnep_rx_activate(rx, rx->write);
1249 rx->write = (rx->write + i) & TSNEP_RING_MASK;
1254 static void tsnep_rx_free_zc(struct tsnep_rx *rx)
1259 struct tsnep_rx_entry *entry = &rx->entry[i];
1261 if (entry->xdp)
1262 xsk_buff_free(entry->xdp);
1263 entry->xdp = NULL;
1267 static int tsnep_rx_refill_zc(struct tsnep_rx *rx, int count, bool reuse)
1271 desc_refilled = tsnep_rx_alloc_zc(rx, count, reuse);
1273 tsnep_rx_enable(rx);
1278 static void tsnep_xsk_rx_need_wakeup(struct tsnep_rx *rx, int desc_available)
1281 xsk_set_rx_need_wakeup(rx->xsk_pool);
1283 xsk_clear_rx_need_wakeup(rx->xsk_pool);
1286 static bool tsnep_xdp_run_prog(struct tsnep_rx *rx, struct bpf_prog *prog,
1288 struct netdev_queue *tx_nq, struct tsnep_tx *tx)
1294 length = xdp->data_end - xdp->data_hard_start - XDP_PACKET_HEADROOM;
1301 if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, false))
1306 if (xdp_do_redirect(rx->adapter->netdev, xdp, prog) < 0)
1311 bpf_warn_invalid_xdp_action(rx->adapter->netdev, prog, act);
1315 trace_xdp_exception(rx->adapter->netdev, prog, act);
1321 sync = xdp->data_end - xdp->data_hard_start -
1324 page_pool_put_page(rx->page_pool, virt_to_head_page(xdp->data),
1330 static bool tsnep_xdp_run_prog_zc(struct tsnep_rx *rx, struct bpf_prog *prog,
1333 struct tsnep_tx *tx)
1339 /* XDP_REDIRECT is the main action for zero-copy */
1341 if (xdp_do_redirect(rx->adapter->netdev, xdp, prog) < 0)
1351 if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, true))
1356 bpf_warn_invalid_xdp_action(rx->adapter->netdev, prog, act);
1360 trace_xdp_exception(rx->adapter->netdev, prog, act);
1369 struct netdev_queue *tx_nq, struct tsnep_tx *tx)
1373 tsnep_xdp_xmit_flush(tx);
1381 static struct sk_buff *tsnep_build_skb(struct tsnep_rx *rx, struct page *page,
1392 __skb_put(skb, length - ETH_FCS_LEN);
1394 if (rx->adapter->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL) {
1400 skb_shinfo(skb)->tx_flags |=
1403 hwtstamps->netdev_data = rx_inline;
1406 skb_record_rx_queue(skb, rx->queue_index);
1407 skb->protocol = eth_type_trans(skb, rx->adapter->netdev);
1412 static void tsnep_rx_page(struct tsnep_rx *rx, struct napi_struct *napi,
1417 skb = tsnep_build_skb(rx, page, length);
1421 rx->packets++;
1422 rx->bytes += length;
1423 if (skb->pkt_type == PACKET_MULTICAST)
1424 rx->multicast++;
1428 page_pool_recycle_direct(rx->page_pool, page);
1430 rx->dropped++;
1434 static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
1437 struct device *dmadev = rx->adapter->dmadev;
1443 struct tsnep_tx *tx;
1449 desc_available = tsnep_rx_desc_available(rx);
1450 dma_dir = page_pool_get_dma_dir(rx->page_pool);
1451 prog = READ_ONCE(rx->adapter->xdp_prog);
1453 tx_nq = netdev_get_tx_queue(rx->adapter->netdev,
1454 rx->tx_queue_index);
1455 tx = &rx->adapter->tx[rx->tx_queue_index];
1457 xdp_init_buff(&xdp, PAGE_SIZE, &rx->xdp_rxq);
1460 while (likely(done < budget) && (rx->read != rx->write)) {
1461 entry = &rx->entry[rx->read];
1462 if ((__le32_to_cpu(entry->desc_wb->properties) &
1464 (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK))
1471 desc_available -= tsnep_rx_refill(rx, desc_available,
1473 if (!entry->page) {
1475 * empty RX ring, thus buffer cannot be used for
1476 * RX processing
1478 rx->read = (rx->read + 1) & TSNEP_RING_MASK;
1481 rx->dropped++;
1492 prefetch(page_address(entry->page) + TSNEP_RX_OFFSET);
1493 length = __le32_to_cpu(entry->desc_wb->properties) &
1495 dma_sync_single_range_for_cpu(dmadev, entry->dma,
1498 /* RX metadata with timestamps is in front of actual data,
1500 * consider metadata size as offset of actual data during RX
1503 length -= TSNEP_RX_INLINE_METADATA_SIZE;
1505 rx->read = (rx->read + 1) & TSNEP_RING_MASK;
1511 xdp_prepare_buff(&xdp, page_address(entry->page),
1513 length - ETH_FCS_LEN, false);
1515 consume = tsnep_xdp_run_prog(rx, prog, &xdp,
1516 &xdp_status, tx_nq, tx);
1518 rx->packets++;
1519 rx->bytes += length;
1521 entry->page = NULL;
1527 tsnep_rx_page(rx, napi, entry->page, length);
1528 entry->page = NULL;
1532 tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx);
1535 tsnep_rx_refill(rx, desc_available, false);
1540 static int tsnep_rx_poll_zc(struct tsnep_rx *rx, struct napi_struct *napi,
1546 struct tsnep_tx *tx;
1553 desc_available = tsnep_rx_desc_available(rx);
1554 prog = READ_ONCE(rx->adapter->xdp_prog);
1556 tx_nq = netdev_get_tx_queue(rx->adapter->netdev,
1557 rx->tx_queue_index);
1558 tx = &rx->adapter->tx[rx->tx_queue_index];
1561 while (likely(done < budget) && (rx->read != rx->write)) {
1562 entry = &rx->entry[rx->read];
1563 if ((__le32_to_cpu(entry->desc_wb->properties) &
1565 (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK))
1572 desc_available -= tsnep_rx_refill_zc(rx, desc_available,
1574 if (!entry->xdp) {
1576 * empty RX ring, thus buffer cannot be used for
1577 * RX processing
1579 rx->read = (rx->read + 1) & TSNEP_RING_MASK;
1582 rx->dropped++;
1593 prefetch(entry->xdp->data);
1594 length = __le32_to_cpu(entry->desc_wb->properties) &
1596 xsk_buff_set_size(entry->xdp, length - ETH_FCS_LEN);
1597 xsk_buff_dma_sync_for_cpu(entry->xdp);
1599 /* RX metadata with timestamps is in front of actual data,
1601 * consider metadata size as offset of actual data during RX
1604 length -= TSNEP_RX_INLINE_METADATA_SIZE;
1606 rx->read = (rx->read + 1) & TSNEP_RING_MASK;
1612 entry->xdp->data += TSNEP_RX_INLINE_METADATA_SIZE;
1613 entry->xdp->data_meta += TSNEP_RX_INLINE_METADATA_SIZE;
1615 consume = tsnep_xdp_run_prog_zc(rx, prog, entry->xdp,
1616 &xdp_status, tx_nq, tx);
1618 rx->packets++;
1619 rx->bytes += length;
1621 entry->xdp = NULL;
1627 page = page_pool_dev_alloc_pages(rx->page_pool);
1630 entry->xdp->data - TSNEP_RX_INLINE_METADATA_SIZE,
1632 tsnep_rx_page(rx, napi, page, length);
1634 rx->dropped++;
1636 xsk_buff_free(entry->xdp);
1637 entry->xdp = NULL;
1641 tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx);
1644 desc_available -= tsnep_rx_refill_zc(rx, desc_available, false);
1646 if (xsk_uses_need_wakeup(rx->xsk_pool)) {
1647 tsnep_xsk_rx_need_wakeup(rx, desc_available);
1655 static bool tsnep_rx_pending(struct tsnep_rx *rx)
1659 if (rx->read != rx->write) {
1660 entry = &rx->entry[rx->read];
1661 if ((__le32_to_cpu(entry->desc_wb->properties) &
1663 (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK))
1670 static int tsnep_rx_open(struct tsnep_rx *rx)
1675 retval = tsnep_rx_ring_create(rx);
1679 tsnep_rx_init(rx);
1681 desc_available = tsnep_rx_desc_available(rx);
1682 if (rx->xsk_pool)
1683 retval = tsnep_rx_alloc_zc(rx, desc_available, false);
1685 retval = tsnep_rx_alloc(rx, desc_available, false);
1687 retval = -ENOMEM;
1695 if (rx->xsk_pool) {
1696 retval = tsnep_rx_alloc_page_buffer(rx);
1704 tsnep_rx_ring_cleanup(rx);
1708 static void tsnep_rx_close(struct tsnep_rx *rx)
1710 if (rx->xsk_pool)
1711 tsnep_rx_free_page_buffer(rx);
1713 tsnep_rx_ring_cleanup(rx);
1716 static void tsnep_rx_reopen(struct tsnep_rx *rx)
1718 struct page **page = rx->page_buffer;
1721 tsnep_rx_init(rx);
1724 struct tsnep_rx_entry *entry = &rx->entry[i];
1729 entry->desc->properties = 0;
1730 entry->desc_wb->properties = 0;
1734 tsnep_rx_set_page(rx, entry, *page);
1735 tsnep_rx_activate(rx, rx->write);
1736 rx->write++;
1744 static void tsnep_rx_reopen_xsk(struct tsnep_rx *rx)
1746 struct page **page = rx->page_buffer;
1750 tsnep_rx_init(rx);
1756 allocated = xsk_buff_alloc_batch(rx->xsk_pool, rx->xdp_batch,
1757 TSNEP_RING_SIZE - 1);
1760 struct tsnep_rx_entry *entry = &rx->entry[i];
1765 if (entry->page) {
1766 *page = entry->page;
1767 entry->page = NULL;
1775 entry->desc->properties = 0;
1776 entry->desc_wb->properties = 0;
1779 tsnep_rx_set_xdp(rx, entry,
1780 rx->xdp_batch[allocated - 1]);
1781 tsnep_rx_activate(rx, rx->write);
1782 rx->write++;
1784 allocated--;
1792 if (xsk_uses_need_wakeup(rx->xsk_pool))
1793 tsnep_xsk_rx_need_wakeup(rx, tsnep_rx_desc_available(rx));
1798 if (queue->tx && tsnep_tx_pending(queue->tx))
1801 if (queue->rx && tsnep_rx_pending(queue->rx))
1814 if (queue->tx)
1815 complete = tsnep_tx_poll(queue->tx, budget);
1821 if (queue->rx) {
1822 done = queue->rx->xsk_pool ?
1823 tsnep_rx_poll_zc(queue->rx, napi, budget) :
1824 tsnep_rx_poll(queue->rx, napi, budget);
1834 tsnep_enable_irq(queue->adapter, queue->irq_mask);
1841 tsnep_disable_irq(queue->adapter, queue->irq_mask);
1846 return min(done, budget - 1);
1851 const char *name = netdev_name(queue->adapter->netdev);
1857 sprintf(queue->name, "%s-mac", name);
1859 dev = queue->adapter;
1861 if (queue->tx && queue->rx)
1862 snprintf(queue->name, sizeof(queue->name), "%s-txrx-%d",
1863 name, queue->rx->queue_index);
1864 else if (queue->tx)
1865 snprintf(queue->name, sizeof(queue->name), "%s-tx-%d",
1866 name, queue->tx->queue_index);
1868 snprintf(queue->name, sizeof(queue->name), "%s-rx-%d",
1869 name, queue->rx->queue_index);
1874 retval = request_irq(queue->irq, handler, 0, queue->name, dev);
1877 memset(queue->name, 0, sizeof(queue->name));
1887 if (!strlen(queue->name))
1891 dev = queue->adapter;
1895 free_irq(queue->irq, dev);
1896 memset(queue->name, 0, sizeof(queue->name));
1901 struct tsnep_rx *rx = queue->rx;
1905 if (rx) {
1906 if (xdp_rxq_info_is_reg(&rx->xdp_rxq))
1907 xdp_rxq_info_unreg(&rx->xdp_rxq);
1908 if (xdp_rxq_info_is_reg(&rx->xdp_rxq_zc))
1909 xdp_rxq_info_unreg(&rx->xdp_rxq_zc);
1912 netif_napi_del(&queue->napi);
1918 struct tsnep_rx *rx = queue->rx;
1919 struct tsnep_tx *tx = queue->tx;
1922 netif_napi_add(adapter->netdev, &queue->napi, tsnep_poll);
1924 if (rx) {
1925 /* choose TX queue for XDP_TX */
1926 if (tx)
1927 rx->tx_queue_index = tx->queue_index;
1928 else if (rx->queue_index < adapter->num_tx_queues)
1929 rx->tx_queue_index = rx->queue_index;
1931 rx->tx_queue_index = 0;
1937 retval = xdp_rxq_info_reg(&rx->xdp_rxq, adapter->netdev,
1938 rx->queue_index, queue->napi.napi_id);
1941 retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
1943 rx->page_pool);
1946 retval = xdp_rxq_info_reg(&rx->xdp_rxq_zc, adapter->netdev,
1947 rx->queue_index, queue->napi.napi_id);
1950 retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq_zc,
1955 if (rx->xsk_pool)
1956 xsk_pool_set_rxq_info(rx->xsk_pool, &rx->xdp_rxq_zc);
1961 netif_err(adapter, drv, adapter->netdev,
1962 "can't get assigned irq %d.\n", queue->irq);
1976 struct tsnep_adapter *adapter = queue->adapter;
1978 netif_napi_set_irq(&queue->napi, queue->irq);
1979 napi_enable(&queue->napi);
1980 tsnep_enable_irq(adapter, queue->irq_mask);
1982 if (queue->tx) {
1983 netif_queue_set_napi(adapter->netdev, queue->tx->queue_index,
1984 NETDEV_QUEUE_TYPE_TX, &queue->napi);
1985 tsnep_tx_enable(queue->tx);
1988 if (queue->rx) {
1989 netif_queue_set_napi(adapter->netdev, queue->rx->queue_index,
1990 NETDEV_QUEUE_TYPE_RX, &queue->napi);
1991 tsnep_rx_enable(queue->rx);
1997 struct tsnep_adapter *adapter = queue->adapter;
1999 if (queue->rx)
2000 netif_queue_set_napi(adapter->netdev, queue->rx->queue_index,
2003 if (queue->tx) {
2004 tsnep_tx_disable(queue->tx, &queue->napi);
2005 netif_queue_set_napi(adapter->netdev, queue->tx->queue_index,
2009 napi_disable(&queue->napi);
2010 tsnep_disable_irq(adapter, queue->irq_mask);
2012 /* disable RX after NAPI polling has been disabled, because RX can be
2015 if (queue->rx)
2016 tsnep_rx_disable(queue->rx);
2024 for (i = 0; i < adapter->num_queues; i++) {
2025 if (adapter->queue[i].tx) {
2026 retval = tsnep_tx_open(adapter->queue[i].tx);
2030 if (adapter->queue[i].rx) {
2031 retval = tsnep_rx_open(adapter->queue[i].rx);
2036 retval = tsnep_queue_open(adapter, &adapter->queue[i], i == 0);
2041 retval = netif_set_real_num_tx_queues(adapter->netdev,
2042 adapter->num_tx_queues);
2045 retval = netif_set_real_num_rx_queues(adapter->netdev,
2046 adapter->num_rx_queues);
2055 for (i = 0; i < adapter->num_queues; i++)
2056 tsnep_queue_enable(&adapter->queue[i]);
2063 for (i = 0; i < adapter->num_queues; i++) {
2064 tsnep_queue_close(&adapter->queue[i], i == 0);
2066 if (adapter->queue[i].rx)
2067 tsnep_rx_close(adapter->queue[i].rx);
2068 if (adapter->queue[i].tx)
2069 tsnep_tx_close(adapter->queue[i].tx);
2082 for (i = 0; i < adapter->num_queues; i++) {
2083 tsnep_queue_disable(&adapter->queue[i]);
2085 tsnep_queue_close(&adapter->queue[i], i == 0);
2087 if (adapter->queue[i].rx)
2088 tsnep_rx_close(adapter->queue[i].rx);
2089 if (adapter->queue[i].tx)
2090 tsnep_tx_close(adapter->queue[i].tx);
2098 bool running = netif_running(queue->adapter->netdev);
2103 return -EOPNOTSUPP;
2105 queue->rx->page_buffer = kcalloc(TSNEP_RING_SIZE,
2106 sizeof(*queue->rx->page_buffer),
2108 if (!queue->rx->page_buffer)
2109 return -ENOMEM;
2110 queue->rx->xdp_batch = kcalloc(TSNEP_RING_SIZE,
2111 sizeof(*queue->rx->xdp_batch),
2113 if (!queue->rx->xdp_batch) {
2114 kfree(queue->rx->page_buffer);
2115 queue->rx->page_buffer = NULL;
2117 return -ENOMEM;
2120 xsk_pool_set_rxq_info(pool, &queue->rx->xdp_rxq_zc);
2125 queue->tx->xsk_pool = pool;
2126 queue->rx->xsk_pool = pool;
2129 tsnep_rx_reopen_xsk(queue->rx);
2138 bool running = netif_running(queue->adapter->netdev);
2143 tsnep_rx_free_zc(queue->rx);
2145 queue->rx->xsk_pool = NULL;
2146 queue->tx->xsk_pool = NULL;
2149 tsnep_rx_reopen(queue->rx);
2153 kfree(queue->rx->xdp_batch);
2154 queue->rx->xdp_batch = NULL;
2155 kfree(queue->rx->page_buffer);
2156 queue->rx->page_buffer = NULL;
2165 if (queue_mapping >= adapter->num_tx_queues)
2168 return tsnep_xmit_frame_ring(skb, &adapter->tx[queue_mapping]);
2178 if (netdev->flags & IFF_PROMISC) {
2181 } else if (!netdev_mc_empty(netdev) || (netdev->flags & IFF_ALLMULTI)) {
2184 iowrite16(rx_filter, adapter->addr + TSNEP_RX_FILTER);
2195 for (i = 0; i < adapter->num_tx_queues; i++) {
2196 stats->tx_packets += adapter->tx[i].packets;
2197 stats->tx_bytes += adapter->tx[i].bytes;
2198 stats->tx_dropped += adapter->tx[i].dropped;
2200 for (i = 0; i < adapter->num_rx_queues; i++) {
2201 stats->rx_packets += adapter->rx[i].packets;
2202 stats->rx_bytes += adapter->rx[i].bytes;
2203 stats->rx_dropped += adapter->rx[i].dropped;
2204 stats->multicast += adapter->rx[i].multicast;
2206 reg = ioread32(adapter->addr + TSNEP_QUEUE(i) +
2210 stats->rx_dropped += val;
2213 stats->rx_dropped += val;
2216 stats->rx_errors += val;
2217 stats->rx_fifo_errors += val;
2220 stats->rx_errors += val;
2221 stats->rx_frame_errors += val;
2224 reg = ioread32(adapter->addr + ECM_STAT);
2226 stats->rx_errors += val;
2228 stats->rx_errors += val;
2229 stats->rx_crc_errors += val;
2231 stats->rx_errors += val;
2236 iowrite32(*(u32 *)addr, adapter->addr + TSNEP_MAC_ADDRESS_LOW);
2238 adapter->addr + TSNEP_MAC_ADDRESS_HIGH);
2240 ether_addr_copy(adapter->mac_address, addr);
2241 netif_info(adapter, drv, adapter->netdev, "MAC address set to %pM\n",
2254 eth_hw_addr_set(netdev, sock_addr->sa_data);
2255 tsnep_mac_set_address(adapter, sock_addr->sa_data);
2264 netdev_features_t changed = netdev->features ^ features;
2280 struct tsnep_rx_inline *rx_inline = hwtstamps->netdev_data;
2284 timestamp = __le64_to_cpu(rx_inline->counter);
2286 timestamp = __le64_to_cpu(rx_inline->timestamp);
2295 switch (bpf->command) {
2297 return tsnep_xdp_setup_prog(adapter, bpf->prog, bpf->extack);
2299 return tsnep_xdp_setup_pool(adapter, bpf->xsk.pool,
2300 bpf->xsk.queue_id);
2302 return -EOPNOTSUPP;
2309 cpu &= TSNEP_MAX_QUEUES - 1;
2311 while (cpu >= adapter->num_tx_queues)
2312 cpu -= adapter->num_tx_queues;
2314 return &adapter->tx[cpu];
2323 struct tsnep_tx *tx;
2328 return -EINVAL;
2330 tx = tsnep_xdp_get_tx(adapter, cpu);
2331 nq = netdev_get_tx_queue(adapter->netdev, tx->queue_index);
2336 xmit = tsnep_xdp_xmit_frame_ring(xdp[nxmit], tx,
2348 tsnep_xdp_xmit_flush(tx);
2361 if (queue_id >= adapter->num_rx_queues ||
2362 queue_id >= adapter->num_tx_queues)
2363 return -EINVAL;
2365 queue = &adapter->queue[queue_id];
2367 if (!napi_if_scheduled_mark_missed(&queue->napi))
2368 napi_schedule(&queue->napi);
2395 /* initialize RX filtering, at least configured MAC address and
2398 iowrite16(0, adapter->addr + TSNEP_RX_FILTER);
2401 * - device tree
2402 * - valid MAC address already set
2403 * - MAC address register if valid
2404 * - random MAC address
2406 retval = of_get_mac_address(adapter->pdev->dev.of_node,
2407 adapter->mac_address);
2408 if (retval == -EPROBE_DEFER)
2410 if (retval && !is_valid_ether_addr(adapter->mac_address)) {
2411 *(u32 *)adapter->mac_address =
2412 ioread32(adapter->addr + TSNEP_MAC_ADDRESS_LOW);
2413 *(u16 *)(adapter->mac_address + sizeof(u32)) =
2414 ioread16(adapter->addr + TSNEP_MAC_ADDRESS_HIGH);
2415 if (!is_valid_ether_addr(adapter->mac_address))
2416 eth_random_addr(adapter->mac_address);
2419 tsnep_mac_set_address(adapter, adapter->mac_address);
2420 eth_hw_addr_set(adapter->netdev, adapter->mac_address);
2427 struct device_node *np = adapter->pdev->dev.of_node;
2435 adapter->suppress_preamble =
2436 of_property_read_bool(np, "suppress-preamble");
2439 adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev);
2440 if (!adapter->mdiobus) {
2441 retval = -ENOMEM;
2446 adapter->mdiobus->priv = (void *)adapter;
2447 adapter->mdiobus->parent = &adapter->pdev->dev;
2448 adapter->mdiobus->read = tsnep_mdiobus_read;
2449 adapter->mdiobus->write = tsnep_mdiobus_write;
2450 adapter->mdiobus->name = TSNEP "-mdiobus";
2451 snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE, "%s",
2452 adapter->pdev->name);
2455 adapter->mdiobus->phy_mask = 0x0000001;
2457 retval = of_mdiobus_register(adapter->mdiobus, np);
2470 retval = of_get_phy_mode(adapter->pdev->dev.of_node,
2471 &adapter->phy_mode);
2473 adapter->phy_mode = PHY_INTERFACE_MODE_GMII;
2475 phy_node = of_parse_phandle(adapter->pdev->dev.of_node, "phy-handle",
2477 adapter->phydev = of_phy_find_device(phy_node);
2479 if (!adapter->phydev && adapter->mdiobus)
2480 adapter->phydev = phy_find_first(adapter->mdiobus);
2481 if (!adapter->phydev)
2482 return -EIO;
2494 /* one TX/RX queue pair for netdev is mandatory */
2495 if (platform_irq_count(adapter->pdev) == 1)
2496 retval = platform_get_irq(adapter->pdev, 0);
2498 retval = platform_get_irq_byname(adapter->pdev, "mac");
2501 adapter->num_tx_queues = 1;
2502 adapter->num_rx_queues = 1;
2503 adapter->num_queues = 1;
2504 adapter->queue[0].adapter = adapter;
2505 adapter->queue[0].irq = retval;
2506 adapter->queue[0].tx = &adapter->tx[0];
2507 adapter->queue[0].tx->adapter = adapter;
2508 adapter->queue[0].tx->addr = adapter->addr + TSNEP_QUEUE(0);
2509 adapter->queue[0].tx->queue_index = 0;
2510 adapter->queue[0].rx = &adapter->rx[0];
2511 adapter->queue[0].rx->adapter = adapter;
2512 adapter->queue[0].rx->addr = adapter->addr + TSNEP_QUEUE(0);
2513 adapter->queue[0].rx->queue_index = 0;
2514 adapter->queue[0].irq_mask = irq_mask;
2515 adapter->queue[0].irq_delay_addr = adapter->addr + ECM_INT_DELAY;
2516 retval = tsnep_set_irq_coalesce(&adapter->queue[0],
2521 adapter->netdev->irq = adapter->queue[0].irq;
2523 /* add additional TX/RX queue pairs only if dedicated interrupt is
2527 sprintf(name, "txrx-%d", i);
2528 retval = platform_get_irq_byname_optional(adapter->pdev, name);
2532 adapter->num_tx_queues++;
2533 adapter->num_rx_queues++;
2534 adapter->num_queues++;
2535 adapter->queue[i].adapter = adapter;
2536 adapter->queue[i].irq = retval;
2537 adapter->queue[i].tx = &adapter->tx[i];
2538 adapter->queue[i].tx->adapter = adapter;
2539 adapter->queue[i].tx->addr = adapter->addr + TSNEP_QUEUE(i);
2540 adapter->queue[i].tx->queue_index = i;
2541 adapter->queue[i].rx = &adapter->rx[i];
2542 adapter->queue[i].rx->adapter = adapter;
2543 adapter->queue[i].rx->addr = adapter->addr + TSNEP_QUEUE(i);
2544 adapter->queue[i].rx->queue_index = i;
2545 adapter->queue[i].irq_mask =
2547 adapter->queue[i].irq_delay_addr =
2548 adapter->addr + ECM_INT_DELAY + ECM_INT_DELAY_OFFSET * i;
2549 retval = tsnep_set_irq_coalesce(&adapter->queue[i],
2569 netdev = devm_alloc_etherdev_mqs(&pdev->dev,
2573 return -ENODEV;
2574 SET_NETDEV_DEV(netdev, &pdev->dev);
2577 adapter->pdev = pdev;
2578 adapter->dmadev = &pdev->dev;
2579 adapter->netdev = netdev;
2580 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE |
2584 netdev->min_mtu = ETH_MIN_MTU;
2585 netdev->max_mtu = TSNEP_MAX_FRAME_SIZE;
2587 mutex_init(&adapter->gate_control_lock);
2588 mutex_init(&adapter->rxnfc_lock);
2589 INIT_LIST_HEAD(&adapter->rxnfc_rules);
2591 adapter->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &io);
2592 if (IS_ERR(adapter->addr))
2593 return PTR_ERR(adapter->addr);
2594 netdev->mem_start = io->start;
2595 netdev->mem_end = io->end;
2597 type = ioread32(adapter->addr + ECM_TYPE);
2601 adapter->gate_control = type & ECM_GATE_CONTROL;
2602 adapter->rxnfc_max = TSNEP_RX_ASSIGN_ETHER_TYPE_COUNT;
2610 retval = dma_set_mask_and_coherent(&adapter->pdev->dev,
2613 dev_err(&adapter->pdev->dev, "no usable DMA configuration.\n");
2641 netdev->netdev_ops = &tsnep_netdev_ops;
2642 netdev->ethtool_ops = &tsnep_ethtool_ops;
2643 netdev->features = NETIF_F_SG;
2644 netdev->hw_features = netdev->features | NETIF_F_LOOPBACK;
2646 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
2658 dev_info(&adapter->pdev->dev, "device version %d.%02d\n", version,
2660 if (adapter->gate_control)
2661 dev_info(&adapter->pdev->dev, "gate control detected\n");
2673 if (adapter->mdiobus)
2674 mdiobus_unregister(adapter->mdiobus);
2683 unregister_netdev(adapter->netdev);
2691 if (adapter->mdiobus)
2692 mdiobus_unregister(adapter->mdiobus);
2713 MODULE_AUTHOR("Gerhard Engleder <gerhard@engleder-embedded.com>");