Lines Matching +full:ether +full:- +full:r8a7790

1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 2014-2019 Renesas Electronics Corporation
6 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
14 #include <linux/dma-mapping.h>
58 return -ETIMEDOUT; in ravb_wait()
90 switch (priv->speed) { in ravb_set_rate_gbeth()
107 switch (priv->speed) { in ravb_set_rate_rcar()
148 ravb_modify(priv->ndev, PIR, mask, set ? mask : 0); in ravb_mdio_ctrl()
175 return (ravb_read(priv->ndev, PIR) & PIR_MDI) != 0; in ravb_get_mdio_data()
191 return priv->rx_ring[q].raw + priv->info->rx_desc_size * i; in ravb_rx_get_desc()
194 /* Free TX skb function for AVB-IP */
198 struct net_device_stats *stats = &priv->stats[q]; in ravb_tx_free()
199 unsigned int num_tx_desc = priv->num_tx_desc; in ravb_tx_free()
205 for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) { in ravb_tx_free()
208 entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] * in ravb_tx_free()
210 desc = &priv->tx_ring[q][entry]; in ravb_tx_free()
211 txed = desc->die_dt == DT_FEMPTY; in ravb_tx_free()
216 size = le16_to_cpu(desc->ds_tagl) & TX_DS; in ravb_tx_free()
218 if (priv->tx_skb[q][entry / num_tx_desc]) { in ravb_tx_free()
219 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), in ravb_tx_free()
222 if (entry % num_tx_desc == num_tx_desc - 1) { in ravb_tx_free()
224 dev_kfree_skb_any(priv->tx_skb[q][entry]); in ravb_tx_free()
225 priv->tx_skb[q][entry] = NULL; in ravb_tx_free()
227 stats->tx_packets++; in ravb_tx_free()
232 stats->tx_bytes += size; in ravb_tx_free()
233 desc->die_dt = DT_EEMPTY; in ravb_tx_free()
243 if (!priv->rx_ring[q].raw) in ravb_rx_ring_free()
246 ring_size = priv->info->rx_desc_size * (priv->num_rx_ring[q] + 1); in ravb_rx_ring_free()
247 dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q].raw, in ravb_rx_ring_free()
248 priv->rx_desc_dma[q]); in ravb_rx_ring_free()
249 priv->rx_ring[q].raw = NULL; in ravb_rx_ring_free()
256 unsigned int num_tx_desc = priv->num_tx_desc; in ravb_ring_free()
262 if (priv->tx_ring[q]) { in ravb_ring_free()
266 (priv->num_tx_ring[q] * num_tx_desc + 1); in ravb_ring_free()
267 dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q], in ravb_ring_free()
268 priv->tx_desc_dma[q]); in ravb_ring_free()
269 priv->tx_ring[q] = NULL; in ravb_ring_free()
273 for (i = 0; i < priv->num_rx_ring[q]; i++) { in ravb_ring_free()
274 if (priv->rx_buffers[q][i].page) in ravb_ring_free()
275 page_pool_put_page(priv->rx_pool[q], in ravb_ring_free()
276 priv->rx_buffers[q][i].page, in ravb_ring_free()
279 kfree(priv->rx_buffers[q]); in ravb_ring_free()
280 priv->rx_buffers[q] = NULL; in ravb_ring_free()
281 page_pool_destroy(priv->rx_pool[q]); in ravb_ring_free()
284 kfree(priv->tx_align[q]); in ravb_ring_free()
285 priv->tx_align[q] = NULL; in ravb_ring_free()
290 kfree(priv->tx_skb[q]); in ravb_ring_free()
291 priv->tx_skb[q] = NULL; in ravb_ring_free()
299 const struct ravb_hw_info *info = priv->info; in ravb_alloc_rx_buffer()
304 rx_buff = &priv->rx_buffers[q][entry]; in ravb_alloc_rx_buffer()
305 size = info->rx_buffer_size; in ravb_alloc_rx_buffer()
306 rx_buff->page = page_pool_alloc(priv->rx_pool[q], &rx_buff->offset, in ravb_alloc_rx_buffer()
308 if (unlikely(!rx_buff->page)) { in ravb_alloc_rx_buffer()
312 rx_desc->ds_cc = cpu_to_le16(0); in ravb_alloc_rx_buffer()
313 return -ENOMEM; in ravb_alloc_rx_buffer()
316 dma_addr = page_pool_get_dma_addr(rx_buff->page) + rx_buff->offset; in ravb_alloc_rx_buffer()
317 dma_sync_single_for_device(ndev->dev.parent, dma_addr, in ravb_alloc_rx_buffer()
318 info->rx_buffer_size, DMA_FROM_DEVICE); in ravb_alloc_rx_buffer()
319 rx_desc->dptr = cpu_to_le32(dma_addr); in ravb_alloc_rx_buffer()
324 rx_desc->ds_cc = cpu_to_le16(info->rx_buffer_size - in ravb_alloc_rx_buffer()
325 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) - in ravb_alloc_rx_buffer()
338 entry = (priv->dirty_rx[q] + i) % priv->num_rx_ring[q]; in ravb_rx_ring_refill()
341 if (!priv->rx_buffers[q][entry].page) { in ravb_rx_ring_refill()
348 rx_desc->die_dt = DT_FEMPTY; in ravb_rx_ring_refill()
358 unsigned int num_tx_desc = priv->num_tx_desc; in ravb_ring_format()
362 unsigned int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] * in ravb_ring_format()
366 priv->cur_rx[q] = 0; in ravb_ring_format()
367 priv->cur_tx[q] = 0; in ravb_ring_format()
368 priv->dirty_rx[q] = 0; in ravb_ring_format()
369 priv->dirty_tx[q] = 0; in ravb_ring_format()
375 rx_desc = ravb_rx_get_desc(priv, q, priv->num_rx_ring[q]); in ravb_ring_format()
376 rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); in ravb_ring_format()
377 rx_desc->die_dt = DT_LINKFIX; /* type */ in ravb_ring_format()
379 memset(priv->tx_ring[q], 0, tx_ring_size); in ravb_ring_format()
381 for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q]; in ravb_ring_format()
383 tx_desc->die_dt = DT_EEMPTY; in ravb_ring_format()
386 tx_desc->die_dt = DT_EEMPTY; in ravb_ring_format()
389 tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]); in ravb_ring_format()
390 tx_desc->die_dt = DT_LINKFIX; /* type */ in ravb_ring_format()
393 desc = &priv->desc_bat[RX_QUEUE_OFFSET + q]; in ravb_ring_format()
394 desc->die_dt = DT_LINKFIX; /* type */ in ravb_ring_format()
395 desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); in ravb_ring_format()
398 desc = &priv->desc_bat[q]; in ravb_ring_format()
399 desc->die_dt = DT_LINKFIX; /* type */ in ravb_ring_format()
400 desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]); in ravb_ring_format()
408 ring_size = priv->info->rx_desc_size * (priv->num_rx_ring[q] + 1); in ravb_alloc_rx_desc()
410 priv->rx_ring[q].raw = dma_alloc_coherent(ndev->dev.parent, ring_size, in ravb_alloc_rx_desc()
411 &priv->rx_desc_dma[q], in ravb_alloc_rx_desc()
414 return priv->rx_ring[q].raw; in ravb_alloc_rx_desc()
421 unsigned int num_tx_desc = priv->num_tx_desc; in ravb_ring_init()
425 .pool_size = priv->num_rx_ring[q], in ravb_ring_init()
427 .dev = ndev->dev.parent, in ravb_ring_init()
434 priv->rx_pool[q] = page_pool_create(&params); in ravb_ring_init()
435 if (IS_ERR(priv->rx_pool[q])) in ravb_ring_init()
439 priv->rx_buffers[q] = kcalloc(priv->num_rx_ring[q], in ravb_ring_init()
440 sizeof(*priv->rx_buffers[q]), GFP_KERNEL); in ravb_ring_init()
441 if (!priv->rx_buffers[q]) in ravb_ring_init()
445 priv->tx_skb[q] = kcalloc(priv->num_tx_ring[q], in ravb_ring_init()
446 sizeof(*priv->tx_skb[q]), GFP_KERNEL); in ravb_ring_init()
447 if (!priv->tx_skb[q]) in ravb_ring_init()
455 priv->dirty_rx[q] = 0; in ravb_ring_init()
456 ring_size = priv->info->rx_desc_size * priv->num_rx_ring[q]; in ravb_ring_init()
457 memset(priv->rx_ring[q].raw, 0, ring_size); in ravb_ring_init()
458 num_filled = ravb_rx_ring_refill(ndev, q, priv->num_rx_ring[q], in ravb_ring_init()
460 if (num_filled != priv->num_rx_ring[q]) in ravb_ring_init()
465 priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] + in ravb_ring_init()
466 DPTR_ALIGN - 1, GFP_KERNEL); in ravb_ring_init()
467 if (!priv->tx_align[q]) in ravb_ring_init()
473 (priv->num_tx_ring[q] * num_tx_desc + 1); in ravb_ring_init()
474 priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size, in ravb_ring_init()
475 &priv->tx_desc_dma[q], in ravb_ring_init()
477 if (!priv->tx_ring[q]) in ravb_ring_init()
485 return -ENOMEM; in ravb_ring_init()
490 bool tx_enable = ndev->features & NETIF_F_HW_CSUM; in ravb_csum_init_gbeth()
491 bool rx_enable = ndev->features & NETIF_F_RXCSUM; in ravb_csum_init_gbeth()
501 ndev->features &= ~NETIF_F_HW_CSUM; in ravb_csum_init_gbeth()
504 ndev->features &= ~NETIF_F_RXCSUM; in ravb_csum_init_gbeth()
521 if (priv->phy_interface == PHY_INTERFACE_MODE_MII) { in ravb_emac_init_gbeth()
531 ravb_write(ndev, priv->info->rx_max_frame_size + ETH_FCS_LEN, RFLR); in ravb_emac_init_gbeth()
534 ravb_write(ndev, ECMR_ZPF | ((priv->duplex > 0) ? ECMR_DM : 0) | in ravb_emac_init_gbeth()
542 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) | in ravb_emac_init_gbeth()
543 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR); in ravb_emac_init_gbeth()
544 ravb_write(ndev, (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR); in ravb_emac_init_gbeth()
546 /* E-MAC status register clear */ in ravb_emac_init_gbeth()
551 /* E-MAC interrupt enable register */ in ravb_emac_init_gbeth()
566 ravb_write(ndev, priv->info->rx_max_frame_size + ETH_FCS_LEN, RFLR); in ravb_emac_init_rcar()
570 (ndev->features & NETIF_F_RXCSUM ? ECMR_RCSC : 0) | in ravb_emac_init_rcar()
577 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) | in ravb_emac_init_rcar()
578 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR); in ravb_emac_init_rcar()
580 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR); in ravb_emac_init_rcar()
582 /* E-MAC status register clear */ in ravb_emac_init_rcar()
585 /* E-MAC interrupt enable register */ in ravb_emac_init_rcar()
592 bool mii = priv->phy_interface == PHY_INTERFACE_MODE_MII; in ravb_emac_init_rcar_gen4()
599 /* E-MAC init function */
603 const struct ravb_hw_info *info = priv->info; in ravb_emac_init()
605 info->emac_init(ndev); in ravb_emac_init()
624 ravb_write(ndev, 0x7ffc0000 | priv->info->rx_max_frame_size, RTC); in ravb_dmac_init_gbeth()
646 const struct ravb_hw_info *info = priv->info; in ravb_dmac_init_rcar()
673 if (info->multi_irqs) { in ravb_dmac_init_rcar()
695 const struct ravb_hw_info *info = priv->info; in ravb_dmac_init()
703 error = info->dmac_init(ndev); in ravb_dmac_init()
707 /* Setting the control will start the AVB-DMAC process. */ in ravb_dmac_init()
723 while (count--) { in ravb_get_tx_tstamp()
731 list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, in ravb_get_tx_tstamp()
733 skb = ts_skb->skb; in ravb_get_tx_tstamp()
734 tag = ts_skb->tag; in ravb_get_tx_tstamp()
735 list_del(&ts_skb->list); in ravb_get_tx_tstamp()
759 * ignore this as it will always be re-checked in inet_gro_receive(). in ravb_rx_csum_gbeth()
765 if (unlikely(skb->len < csum_len)) in ravb_rx_csum_gbeth()
769 skb_frag_t *last_frag = &shinfo->frags[shinfo->nr_frags - 1]; in ravb_rx_csum_gbeth()
776 skb_trim(skb, skb->len - csum_len); in ravb_rx_csum_gbeth()
779 if (!get_unaligned(--hw_csum)) in ravb_rx_csum_gbeth()
780 skb->ip_summed = CHECKSUM_UNNECESSARY; in ravb_rx_csum_gbeth()
790 if (unlikely(skb->len < sizeof(__sum16))) in ravb_rx_csum()
792 hw_csum = skb_tail_pointer(skb) - sizeof(__sum16); in ravb_rx_csum()
793 skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum)); in ravb_rx_csum()
794 skb->ip_summed = CHECKSUM_COMPLETE; in ravb_rx_csum()
795 skb_trim(skb, skb->len - sizeof(__sum16)); in ravb_rx_csum()
802 const struct ravb_hw_info *info = priv->info; in ravb_rx_gbeth()
814 limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q]; in ravb_rx_gbeth()
815 stats = &priv->stats[q]; in ravb_rx_gbeth()
817 for (i = 0; i < limit; i++, priv->cur_rx[q]++) { in ravb_rx_gbeth()
818 entry = priv->cur_rx[q] % priv->num_rx_ring[q]; in ravb_rx_gbeth()
819 desc = &priv->rx_ring[q].desc[entry]; in ravb_rx_gbeth()
820 if (rx_packets == budget || desc->die_dt == DT_FEMPTY) in ravb_rx_gbeth()
825 desc_status = desc->msc; in ravb_rx_gbeth()
826 desc_len = le16_to_cpu(desc->ds_cc) & RX_DS; in ravb_rx_gbeth()
828 /* We use 0-byte descriptors to mark the DMA mapping errors */ in ravb_rx_gbeth()
833 stats->multicast++; in ravb_rx_gbeth()
836 stats->rx_errors++; in ravb_rx_gbeth()
838 stats->rx_crc_errors++; in ravb_rx_gbeth()
840 stats->rx_frame_errors++; in ravb_rx_gbeth()
842 stats->rx_length_errors++; in ravb_rx_gbeth()
844 stats->rx_missed_errors++; in ravb_rx_gbeth()
849 rx_buff = &priv->rx_buffers[q][entry]; in ravb_rx_gbeth()
850 rx_addr = page_address(rx_buff->page) + rx_buff->offset; in ravb_rx_gbeth()
851 die_dt = desc->die_dt & 0xF0; in ravb_rx_gbeth()
852 dma_sync_single_for_cpu(ndev->dev.parent, in ravb_rx_gbeth()
853 le32_to_cpu(desc->dptr), in ravb_rx_gbeth()
861 info->rx_buffer_size); in ravb_rx_gbeth()
863 stats->rx_errors++; in ravb_rx_gbeth()
864 page_pool_put_page(priv->rx_pool[q], in ravb_rx_gbeth()
865 rx_buff->page, 0, in ravb_rx_gbeth()
876 priv->rx_1st_skb = skb; in ravb_rx_gbeth()
887 * multi-descriptor packet. in ravb_rx_gbeth()
889 if (unlikely(!priv->rx_1st_skb)) { in ravb_rx_gbeth()
890 stats->rx_errors++; in ravb_rx_gbeth()
891 page_pool_put_page(priv->rx_pool[q], in ravb_rx_gbeth()
892 rx_buff->page, 0, in ravb_rx_gbeth()
901 skb_add_rx_frag(priv->rx_1st_skb, in ravb_rx_gbeth()
902 skb_shinfo(priv->rx_1st_skb)->nr_frags, in ravb_rx_gbeth()
903 rx_buff->page, rx_buff->offset, in ravb_rx_gbeth()
904 desc_len, info->rx_buffer_size); in ravb_rx_gbeth()
910 skb = priv->rx_1st_skb; in ravb_rx_gbeth()
920 skb->protocol = eth_type_trans(skb, ndev); in ravb_rx_gbeth()
921 if (ndev->features & NETIF_F_RXCSUM) in ravb_rx_gbeth()
923 stats->rx_bytes += skb->len; in ravb_rx_gbeth()
924 napi_gro_receive(&priv->napi[q], skb); in ravb_rx_gbeth()
928 * non-NULL when valid. in ravb_rx_gbeth()
930 priv->rx_1st_skb = NULL; in ravb_rx_gbeth()
934 rx_buff->page = NULL; in ravb_rx_gbeth()
940 priv->dirty_rx[q] += ravb_rx_ring_refill(ndev, q, in ravb_rx_gbeth()
941 priv->cur_rx[q] - priv->dirty_rx[q], in ravb_rx_gbeth()
944 stats->rx_packets += rx_packets; in ravb_rx_gbeth()
952 const struct ravb_hw_info *info = priv->info; in ravb_rx_rcar()
953 struct net_device_stats *stats = &priv->stats[q]; in ravb_rx_rcar()
963 limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q]; in ravb_rx_rcar()
964 for (i = 0; i < limit; i++, priv->cur_rx[q]++) { in ravb_rx_rcar()
965 entry = priv->cur_rx[q] % priv->num_rx_ring[q]; in ravb_rx_rcar()
966 desc = &priv->rx_ring[q].ex_desc[entry]; in ravb_rx_rcar()
967 if (rx_packets == budget || desc->die_dt == DT_FEMPTY) in ravb_rx_rcar()
972 desc_status = desc->msc; in ravb_rx_rcar()
973 pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS; in ravb_rx_rcar()
975 /* We use 0-byte descriptors to mark the DMA mapping errors */ in ravb_rx_rcar()
980 stats->multicast++; in ravb_rx_rcar()
984 stats->rx_errors++; in ravb_rx_rcar()
986 stats->rx_crc_errors++; in ravb_rx_rcar()
988 stats->rx_frame_errors++; in ravb_rx_rcar()
990 stats->rx_length_errors++; in ravb_rx_rcar()
992 stats->rx_missed_errors++; in ravb_rx_rcar()
994 u32 get_ts = priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE; in ravb_rx_rcar()
998 rx_buff = &priv->rx_buffers[q][entry]; in ravb_rx_rcar()
999 rx_addr = page_address(rx_buff->page) + rx_buff->offset; in ravb_rx_rcar()
1000 dma_sync_single_for_cpu(ndev->dev.parent, in ravb_rx_rcar()
1001 le32_to_cpu(desc->dptr), in ravb_rx_rcar()
1004 skb = napi_build_skb(rx_addr, info->rx_buffer_size); in ravb_rx_rcar()
1006 stats->rx_errors++; in ravb_rx_rcar()
1007 page_pool_put_page(priv->rx_pool[q], in ravb_rx_rcar()
1008 rx_buff->page, 0, true); in ravb_rx_rcar()
1020 ts.tv_sec = ((u64) le16_to_cpu(desc->ts_sh) << in ravb_rx_rcar()
1021 32) | le32_to_cpu(desc->ts_sl); in ravb_rx_rcar()
1022 ts.tv_nsec = le32_to_cpu(desc->ts_n); in ravb_rx_rcar()
1023 shhwtstamps->hwtstamp = timespec64_to_ktime(ts); in ravb_rx_rcar()
1027 skb->protocol = eth_type_trans(skb, ndev); in ravb_rx_rcar()
1028 if (ndev->features & NETIF_F_RXCSUM) in ravb_rx_rcar()
1030 napi_gro_receive(&priv->napi[q], skb); in ravb_rx_rcar()
1032 stats->rx_bytes += pkt_len; in ravb_rx_rcar()
1035 rx_buff->page = NULL; in ravb_rx_rcar()
1040 priv->dirty_rx[q] += ravb_rx_ring_refill(ndev, q, in ravb_rx_rcar()
1041 priv->cur_rx[q] - priv->dirty_rx[q], in ravb_rx_rcar()
1044 stats->rx_packets += rx_packets; in ravb_rx_rcar()
1052 const struct ravb_hw_info *info = priv->info; in ravb_rx()
1054 return info->receive(ndev, budget, q); in ravb_rx()
1073 const struct ravb_hw_info *info = priv->info; in ravb_stop_dma()
1077 error = ravb_wait(ndev, TCCR, info->tccr_mask, 0); in ravb_stop_dma()
1087 /* Stop the E-MAC's RX/TX processes. */ in ravb_stop_dma()
1095 /* Stop AVB-DMAC process */ in ravb_stop_dma()
1099 /* E-MAC interrupt handler */
1109 pm_wakeup_event(&priv->pdev->dev, 0); in ravb_emac_interrupt_unlocked()
1111 ndev->stats.tx_carrier_errors++; in ravb_emac_interrupt_unlocked()
1114 if (priv->no_avb_link) in ravb_emac_interrupt_unlocked()
1117 if (priv->avb_link_active_low) in ravb_emac_interrupt_unlocked()
1133 struct device *dev = &priv->pdev->dev; in ravb_emac_interrupt()
1143 spin_lock(&priv->lock); in ravb_emac_interrupt()
1145 spin_unlock(&priv->lock); in ravb_emac_interrupt()
1167 priv->stats[RAVB_BE].rx_over_errors++; in ravb_error_interrupt()
1171 priv->stats[RAVB_NC].rx_over_errors++; in ravb_error_interrupt()
1175 priv->rx_fifo_errors++; in ravb_error_interrupt()
1182 const struct ravb_hw_info *info = priv->info; in ravb_queue_interrupt()
1189 if (napi_schedule_prep(&priv->napi[q])) { in ravb_queue_interrupt()
1191 if (!info->irq_en_dis) { in ravb_queue_interrupt()
1198 __napi_schedule(&priv->napi[q]); in ravb_queue_interrupt()
1228 const struct ravb_hw_info *info = priv->info; in ravb_interrupt()
1229 struct device *dev = &priv->pdev->dev; in ravb_interrupt()
1238 spin_lock(&priv->lock); in ravb_interrupt()
1251 if (info->nc_queues) { in ravb_interrupt()
1252 for (q = RAVB_NC; q >= RAVB_BE; q--) { in ravb_interrupt()
1262 /* E-MAC status summary */ in ravb_interrupt()
1280 spin_unlock(&priv->lock); in ravb_interrupt()
1292 struct device *dev = &priv->pdev->dev; in ravb_multi_interrupt()
1301 spin_lock(&priv->lock); in ravb_multi_interrupt()
1321 spin_unlock(&priv->lock); in ravb_multi_interrupt()
1332 struct device *dev = &priv->pdev->dev; in ravb_dma_interrupt()
1340 spin_lock(&priv->lock); in ravb_dma_interrupt()
1346 spin_unlock(&priv->lock); in ravb_dma_interrupt()
1365 struct net_device *ndev = napi->dev; in ravb_poll()
1367 const struct ravb_hw_info *info = priv->info; in ravb_poll()
1369 int q = napi - priv->napi; in ravb_poll()
1379 spin_lock_irqsave(&priv->lock, flags); in ravb_poll()
1384 spin_unlock_irqrestore(&priv->lock, flags); in ravb_poll()
1387 priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors; in ravb_poll()
1388 if (info->nc_queues) in ravb_poll()
1389 priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors; in ravb_poll()
1390 if (priv->rx_over_errors != ndev->stats.rx_over_errors) in ravb_poll()
1391 ndev->stats.rx_over_errors = priv->rx_over_errors; in ravb_poll()
1392 if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) in ravb_poll()
1393 ndev->stats.rx_fifo_errors = priv->rx_fifo_errors; in ravb_poll()
1396 /* Re-enable RX/TX interrupts */ in ravb_poll()
1397 spin_lock_irqsave(&priv->lock, flags); in ravb_poll()
1398 if (!info->irq_en_dis) { in ravb_poll()
1405 spin_unlock_irqrestore(&priv->lock, flags); in ravb_poll()
1415 ravb_modify(ndev, ECMR, ECMR_DM, priv->duplex > 0 ? ECMR_DM : 0); in ravb_set_duplex_gbeth()
1422 const struct ravb_hw_info *info = priv->info; in ravb_adjust_link()
1423 struct phy_device *phydev = ndev->phydev; in ravb_adjust_link()
1427 spin_lock_irqsave(&priv->lock, flags); in ravb_adjust_link()
1429 /* Disable TX and RX right over here, if E-MAC change is ignored */ in ravb_adjust_link()
1430 if (priv->no_avb_link) in ravb_adjust_link()
1433 if (phydev->link) { in ravb_adjust_link()
1434 if (info->half_duplex && phydev->duplex != priv->duplex) { in ravb_adjust_link()
1436 priv->duplex = phydev->duplex; in ravb_adjust_link()
1440 if (phydev->speed != priv->speed) { in ravb_adjust_link()
1442 priv->speed = phydev->speed; in ravb_adjust_link()
1443 info->set_rate(ndev); in ravb_adjust_link()
1445 if (!priv->link) { in ravb_adjust_link()
1448 priv->link = phydev->link; in ravb_adjust_link()
1450 } else if (priv->link) { in ravb_adjust_link()
1452 priv->link = 0; in ravb_adjust_link()
1453 priv->speed = 0; in ravb_adjust_link()
1454 if (info->half_duplex) in ravb_adjust_link()
1455 priv->duplex = -1; in ravb_adjust_link()
1458 /* Enable TX and RX right over here, if E-MAC change is ignored */ in ravb_adjust_link()
1459 if (priv->no_avb_link && phydev->link) in ravb_adjust_link()
1462 spin_unlock_irqrestore(&priv->lock, flags); in ravb_adjust_link()
1471 struct device_node *np = ndev->dev.parent->of_node; in ravb_phy_init()
1473 const struct ravb_hw_info *info = priv->info; in ravb_phy_init()
1479 priv->link = 0; in ravb_phy_init()
1480 priv->speed = 0; in ravb_phy_init()
1481 priv->duplex = -1; in ravb_phy_init()
1484 pn = of_parse_phandle(np, "phy-handle", 0); in ravb_phy_init()
1497 iface = priv->rgmii_override ? PHY_INTERFACE_MODE_RGMII in ravb_phy_init()
1498 : priv->phy_interface; in ravb_phy_init()
1503 err = -ENOENT; in ravb_phy_init()
1507 if (!info->half_duplex) { in ravb_phy_init()
1539 phy_start(ndev->phydev); in ravb_phy_start()
1548 return priv->msg_enable; in ravb_get_msglevel()
1555 priv->msg_enable = value; in ravb_set_msglevel()
1613 const struct ravb_hw_info *info = priv->info; in ravb_get_sset_count()
1617 return info->stats_len; in ravb_get_sset_count()
1619 return -EOPNOTSUPP; in ravb_get_sset_count()
1627 const struct ravb_hw_info *info = priv->info; in ravb_get_ethtool_stats()
1632 num_rx_q = info->nc_queues ? NUM_RX_QUEUE : 1; in ravb_get_ethtool_stats()
1633 /* Device-specific stats */ in ravb_get_ethtool_stats()
1635 struct net_device_stats *stats = &priv->stats[q]; in ravb_get_ethtool_stats()
1637 data[i++] = priv->cur_rx[q]; in ravb_get_ethtool_stats()
1638 data[i++] = priv->cur_tx[q]; in ravb_get_ethtool_stats()
1639 data[i++] = priv->dirty_rx[q]; in ravb_get_ethtool_stats()
1640 data[i++] = priv->dirty_tx[q]; in ravb_get_ethtool_stats()
1641 data[i++] = stats->rx_packets; in ravb_get_ethtool_stats()
1642 data[i++] = stats->tx_packets; in ravb_get_ethtool_stats()
1643 data[i++] = stats->rx_bytes; in ravb_get_ethtool_stats()
1644 data[i++] = stats->tx_bytes; in ravb_get_ethtool_stats()
1645 data[i++] = stats->multicast; in ravb_get_ethtool_stats()
1646 data[i++] = stats->rx_errors; in ravb_get_ethtool_stats()
1647 data[i++] = stats->rx_crc_errors; in ravb_get_ethtool_stats()
1648 data[i++] = stats->rx_frame_errors; in ravb_get_ethtool_stats()
1649 data[i++] = stats->rx_length_errors; in ravb_get_ethtool_stats()
1650 data[i++] = stats->rx_missed_errors; in ravb_get_ethtool_stats()
1651 data[i++] = stats->rx_over_errors; in ravb_get_ethtool_stats()
1658 const struct ravb_hw_info *info = priv->info; in ravb_get_strings()
1662 memcpy(data, info->gstrings_stats, info->gstrings_size); in ravb_get_strings()
1674 ring->rx_max_pending = BE_RX_RING_MAX; in ravb_get_ringparam()
1675 ring->tx_max_pending = BE_TX_RING_MAX; in ravb_get_ringparam()
1676 ring->rx_pending = priv->num_rx_ring[RAVB_BE]; in ravb_get_ringparam()
1677 ring->tx_pending = priv->num_tx_ring[RAVB_BE]; in ravb_get_ringparam()
1686 const struct ravb_hw_info *info = priv->info; in ravb_set_ringparam()
1689 if (ring->tx_pending > BE_TX_RING_MAX || in ravb_set_ringparam()
1690 ring->rx_pending > BE_RX_RING_MAX || in ravb_set_ringparam()
1691 ring->tx_pending < BE_TX_RING_MIN || in ravb_set_ringparam()
1692 ring->rx_pending < BE_RX_RING_MIN) in ravb_set_ringparam()
1693 return -EINVAL; in ravb_set_ringparam()
1694 if (ring->rx_mini_pending || ring->rx_jumbo_pending) in ravb_set_ringparam()
1695 return -EINVAL; in ravb_set_ringparam()
1700 if (info->gptp) in ravb_set_ringparam()
1709 synchronize_irq(ndev->irq); in ravb_set_ringparam()
1713 if (info->nc_queues) in ravb_set_ringparam()
1718 priv->num_rx_ring[RAVB_BE] = ring->rx_pending; in ravb_set_ringparam()
1719 priv->num_tx_ring[RAVB_BE] = ring->tx_pending; in ravb_set_ringparam()
1733 if (info->gptp) in ravb_set_ringparam()
1734 ravb_ptp_init(ndev, priv->pdev); in ravb_set_ringparam()
1746 const struct ravb_hw_info *hw_info = priv->info; in ravb_get_ts_info()
1748 if (hw_info->gptp || hw_info->ccc_gac) { in ravb_get_ts_info()
1749 info->so_timestamping = in ravb_get_ts_info()
1754 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); in ravb_get_ts_info()
1755 info->rx_filters = in ravb_get_ts_info()
1759 info->phc_index = ptp_clock_index(priv->ptp.clock); in ravb_get_ts_info()
1769 wol->supported = WAKE_MAGIC; in ravb_get_wol()
1770 wol->wolopts = priv->wol_enabled ? WAKE_MAGIC : 0; in ravb_get_wol()
1776 const struct ravb_hw_info *info = priv->info; in ravb_set_wol()
1778 if (!info->magic_pkt || (wol->wolopts & ~WAKE_MAGIC)) in ravb_set_wol()
1779 return -EOPNOTSUPP; in ravb_set_wol()
1781 priv->wol_enabled = !!(wol->wolopts & WAKE_MAGIC); in ravb_set_wol()
1783 device_set_wakeup_enable(&priv->pdev->dev, priv->wol_enabled); in ravb_set_wol()
1808 const struct ravb_hw_info *info = priv->info; in ravb_set_config_mode()
1811 if (info->gptp) { in ravb_set_config_mode()
1817 } else if (info->ccc_gac) { in ravb_set_config_mode()
1829 const struct ravb_hw_info *info = priv->info; in ravb_set_gti()
1831 if (!(info->gptp || info->ccc_gac)) in ravb_set_gti()
1834 ravb_write(ndev, priv->gti_tiv, GTI); in ravb_set_gti()
1843 const struct ravb_hw_info *info = priv->info; in ravb_compute_gti()
1844 struct device *dev = ndev->dev.parent; in ravb_compute_gti()
1848 if (!(info->gptp || info->ccc_gac)) in ravb_compute_gti()
1851 if (info->gptp_ref_clk) in ravb_compute_gti()
1852 rate = clk_get_rate(priv->gptp_clk); in ravb_compute_gti()
1854 rate = clk_get_rate(priv->clk); in ravb_compute_gti()
1856 return -EINVAL; in ravb_compute_gti()
1861 dev_err(dev, "gti.tiv increment 0x%llx is outside the range 0x%x - 0x%x\n", in ravb_compute_gti()
1863 return -EINVAL; in ravb_compute_gti()
1865 priv->gti_tiv = inc; in ravb_compute_gti()
1877 if (!priv->info->internal_delay) in ravb_parse_delay_mode()
1880 if (!of_property_read_u32(np, "rx-internal-delay-ps", &delay)) { in ravb_parse_delay_mode()
1882 priv->rxcidm = !!delay; in ravb_parse_delay_mode()
1885 if (!of_property_read_u32(np, "tx-internal-delay-ps", &delay)) { in ravb_parse_delay_mode()
1887 priv->txcidm = !!delay; in ravb_parse_delay_mode()
1894 /* Fall back to legacy rgmii-*id behavior */ in ravb_parse_delay_mode()
1895 if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || in ravb_parse_delay_mode()
1896 priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) { in ravb_parse_delay_mode()
1897 priv->rxcidm = 1; in ravb_parse_delay_mode()
1898 priv->rgmii_override = 1; in ravb_parse_delay_mode()
1901 if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || in ravb_parse_delay_mode()
1902 priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) { in ravb_parse_delay_mode()
1903 priv->txcidm = 1; in ravb_parse_delay_mode()
1904 priv->rgmii_override = 1; in ravb_parse_delay_mode()
1913 if (!priv->info->internal_delay) in ravb_set_delay_mode()
1916 if (priv->rxcidm) in ravb_set_delay_mode()
1918 if (priv->txcidm) in ravb_set_delay_mode()
1927 const struct ravb_hw_info *info = priv->info; in ravb_open()
1928 struct device *dev = &priv->pdev->dev; in ravb_open()
1931 napi_enable(&priv->napi[RAVB_BE]); in ravb_open()
1932 if (info->nc_queues) in ravb_open()
1933 napi_enable(&priv->napi[RAVB_NC]); in ravb_open()
1945 ravb_write(ndev, priv->desc_bat_dma, DBAT); in ravb_open()
1957 if (info->gptp || info->ccc_gac) in ravb_open()
1958 ravb_ptp_init(ndev, priv->pdev); in ravb_open()
1971 if (info->gptp || info->ccc_gac) in ravb_open()
1980 if (info->nc_queues) in ravb_open()
1981 napi_disable(&priv->napi[RAVB_NC]); in ravb_open()
1982 napi_disable(&priv->napi[RAVB_BE]); in ravb_open()
1996 ndev->stats.tx_errors++; in ravb_tx_timeout()
1998 schedule_work(&priv->work); in ravb_tx_timeout()
2005 const struct ravb_hw_info *info = priv->info; in ravb_tx_timeout_work()
2006 struct net_device *ndev = priv->ndev; in ravb_tx_timeout_work()
2011 schedule_work(&priv->work); in ravb_tx_timeout_work()
2018 if (info->gptp) in ravb_tx_timeout_work()
2028 * re-enables the TX and RX and skip the following in ravb_tx_timeout_work()
2029 * re-initialization procedure. in ravb_tx_timeout_work()
2036 if (info->nc_queues) in ravb_tx_timeout_work()
2043 * should return here to avoid re-enabling the TX and RX in in ravb_tx_timeout_work()
2054 if (info->gptp) in ravb_tx_timeout_work()
2055 ravb_ptp_init(ndev, priv->pdev); in ravb_tx_timeout_work()
2065 u16 net_protocol = ntohs(skb->protocol); in ravb_can_tx_csum_gbeth()
2069 * - there are zero or one VLAN headers with TPID=0x8100 in ravb_can_tx_csum_gbeth()
2070 * - the network protocol is IPv4 or IPv6 in ravb_can_tx_csum_gbeth()
2071 * - the transport protocol is TCP, UDP or ICMP in ravb_can_tx_csum_gbeth()
2072 * - the packet is not fragmented in ravb_can_tx_csum_gbeth()
2082 net_protocol = ntohs(vh->h_vlan_encapsulated_proto); in ravb_can_tx_csum_gbeth()
2087 inner_protocol = ip_hdr(skb)->protocol; in ravb_can_tx_csum_gbeth()
2090 inner_protocol = ipv6_hdr(skb)->nexthdr; in ravb_can_tx_csum_gbeth()
2109 const struct ravb_hw_info *info = priv->info; in ravb_start_xmit()
2110 unsigned int num_tx_desc = priv->num_tx_desc; in ravb_start_xmit()
2120 if (skb->ip_summed == CHECKSUM_PARTIAL && !ravb_can_tx_csum_gbeth(skb)) in ravb_start_xmit()
2123 spin_lock_irqsave(&priv->lock, flags); in ravb_start_xmit()
2124 if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) * in ravb_start_xmit()
2129 spin_unlock_irqrestore(&priv->lock, flags); in ravb_start_xmit()
2136 entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * num_tx_desc); in ravb_start_xmit()
2137 priv->tx_skb[q][entry / num_tx_desc] = skb; in ravb_start_xmit()
2140 buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) + in ravb_start_xmit()
2142 len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data; in ravb_start_xmit()
2153 * length of the second DMA descriptor (skb->len - len) in ravb_start_xmit()
2159 memcpy(buffer, skb->data, len); in ravb_start_xmit()
2160 dma_addr = dma_map_single(ndev->dev.parent, buffer, len, in ravb_start_xmit()
2162 if (dma_mapping_error(ndev->dev.parent, dma_addr)) in ravb_start_xmit()
2165 desc = &priv->tx_ring[q][entry]; in ravb_start_xmit()
2166 desc->ds_tagl = cpu_to_le16(len); in ravb_start_xmit()
2167 desc->dptr = cpu_to_le32(dma_addr); in ravb_start_xmit()
2169 buffer = skb->data + len; in ravb_start_xmit()
2170 len = skb->len - len; in ravb_start_xmit()
2171 dma_addr = dma_map_single(ndev->dev.parent, buffer, len, in ravb_start_xmit()
2173 if (dma_mapping_error(ndev->dev.parent, dma_addr)) in ravb_start_xmit()
2178 desc = &priv->tx_ring[q][entry]; in ravb_start_xmit()
2179 len = skb->len; in ravb_start_xmit()
2180 dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len, in ravb_start_xmit()
2182 if (dma_mapping_error(ndev->dev.parent, dma_addr)) in ravb_start_xmit()
2185 desc->ds_tagl = cpu_to_le16(len); in ravb_start_xmit()
2186 desc->dptr = cpu_to_le32(dma_addr); in ravb_start_xmit()
2189 if (info->gptp || info->ccc_gac) { in ravb_start_xmit()
2194 desc--; in ravb_start_xmit()
2195 dma_unmap_single(ndev->dev.parent, dma_addr, in ravb_start_xmit()
2200 ts_skb->skb = skb_get(skb); in ravb_start_xmit()
2201 ts_skb->tag = priv->ts_skb_tag++; in ravb_start_xmit()
2202 priv->ts_skb_tag &= 0x3ff; in ravb_start_xmit()
2203 list_add_tail(&ts_skb->list, &priv->ts_skb_list); in ravb_start_xmit()
2206 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in ravb_start_xmit()
2207 desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR; in ravb_start_xmit()
2208 desc->ds_tagl |= cpu_to_le16(ts_skb->tag << 12); in ravb_start_xmit()
2216 desc->die_dt = DT_FEND; in ravb_start_xmit()
2217 desc--; in ravb_start_xmit()
2218 desc->die_dt = DT_FSTART; in ravb_start_xmit()
2220 desc->die_dt = DT_FSINGLE; in ravb_start_xmit()
2224 priv->cur_tx[q] += num_tx_desc; in ravb_start_xmit()
2225 if (priv->cur_tx[q] - priv->dirty_tx[q] > in ravb_start_xmit()
2226 (priv->num_tx_ring[q] - 1) * num_tx_desc && in ravb_start_xmit()
2231 spin_unlock_irqrestore(&priv->lock, flags); in ravb_start_xmit()
2235 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), in ravb_start_xmit()
2236 le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE); in ravb_start_xmit()
2239 priv->tx_skb[q][entry / num_tx_desc] = NULL; in ravb_start_xmit()
2247 return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC : in ravb_select_queue()
2255 const struct ravb_hw_info *info = priv->info; in ravb_get_stats()
2257 struct device *dev = &priv->pdev->dev; in ravb_get_stats()
2259 nstats = &ndev->stats; in ravb_get_stats()
2266 stats0 = &priv->stats[RAVB_BE]; in ravb_get_stats()
2268 if (info->tx_counters) { in ravb_get_stats()
2269 nstats->tx_dropped += ravb_read(ndev, TROCR); in ravb_get_stats()
2273 if (info->carrier_counters) { in ravb_get_stats()
2274 nstats->collisions += ravb_read(ndev, CXR41); in ravb_get_stats()
2276 nstats->tx_carrier_errors += ravb_read(ndev, CXR42); in ravb_get_stats()
2280 nstats->rx_packets = stats0->rx_packets; in ravb_get_stats()
2281 nstats->tx_packets = stats0->tx_packets; in ravb_get_stats()
2282 nstats->rx_bytes = stats0->rx_bytes; in ravb_get_stats()
2283 nstats->tx_bytes = stats0->tx_bytes; in ravb_get_stats()
2284 nstats->multicast = stats0->multicast; in ravb_get_stats()
2285 nstats->rx_errors = stats0->rx_errors; in ravb_get_stats()
2286 nstats->rx_crc_errors = stats0->rx_crc_errors; in ravb_get_stats()
2287 nstats->rx_frame_errors = stats0->rx_frame_errors; in ravb_get_stats()
2288 nstats->rx_length_errors = stats0->rx_length_errors; in ravb_get_stats()
2289 nstats->rx_missed_errors = stats0->rx_missed_errors; in ravb_get_stats()
2290 nstats->rx_over_errors = stats0->rx_over_errors; in ravb_get_stats()
2291 if (info->nc_queues) { in ravb_get_stats()
2292 stats1 = &priv->stats[RAVB_NC]; in ravb_get_stats()
2294 nstats->rx_packets += stats1->rx_packets; in ravb_get_stats()
2295 nstats->tx_packets += stats1->tx_packets; in ravb_get_stats()
2296 nstats->rx_bytes += stats1->rx_bytes; in ravb_get_stats()
2297 nstats->tx_bytes += stats1->tx_bytes; in ravb_get_stats()
2298 nstats->multicast += stats1->multicast; in ravb_get_stats()
2299 nstats->rx_errors += stats1->rx_errors; in ravb_get_stats()
2300 nstats->rx_crc_errors += stats1->rx_crc_errors; in ravb_get_stats()
2301 nstats->rx_frame_errors += stats1->rx_frame_errors; in ravb_get_stats()
2302 nstats->rx_length_errors += stats1->rx_length_errors; in ravb_get_stats()
2303 nstats->rx_missed_errors += stats1->rx_missed_errors; in ravb_get_stats()
2304 nstats->rx_over_errors += stats1->rx_over_errors; in ravb_get_stats()
2318 spin_lock_irqsave(&priv->lock, flags); in ravb_set_rx_mode()
2320 ndev->flags & IFF_PROMISC ? ECMR_PRM : 0); in ravb_set_rx_mode()
2321 spin_unlock_irqrestore(&priv->lock, flags); in ravb_set_rx_mode()
2327 struct device_node *np = ndev->dev.parent->of_node; in ravb_close()
2329 const struct ravb_hw_info *info = priv->info; in ravb_close()
2331 struct device *dev = &priv->pdev->dev; in ravb_close()
2342 if (ndev->phydev) { in ravb_close()
2343 phy_stop(ndev->phydev); in ravb_close()
2344 phy_disconnect(ndev->phydev); in ravb_close()
2350 if (info->gptp || info->ccc_gac) in ravb_close()
2353 /* Set the config mode to stop the AVB-DMAC's processes */ in ravb_close()
2359 if (info->gptp || info->ccc_gac) { in ravb_close()
2360 list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) { in ravb_close()
2361 list_del(&ts_skb->list); in ravb_close()
2362 kfree_skb(ts_skb->skb); in ravb_close()
2367 cancel_work_sync(&priv->work); in ravb_close()
2369 if (info->nc_queues) in ravb_close()
2370 napi_disable(&priv->napi[RAVB_NC]); in ravb_close()
2371 napi_disable(&priv->napi[RAVB_BE]); in ravb_close()
2375 if (info->nc_queues) in ravb_close()
2398 config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON : in ravb_hwtstamp_get()
2400 switch (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE) { in ravb_hwtstamp_get()
2411 return copy_to_user(req->ifr_data, &config, sizeof(config)) ? in ravb_hwtstamp_get()
2412 -EFAULT : 0; in ravb_hwtstamp_get()
2423 if (copy_from_user(&config, req->ifr_data, sizeof(config))) in ravb_hwtstamp_set()
2424 return -EFAULT; in ravb_hwtstamp_set()
2434 return -ERANGE; in ravb_hwtstamp_set()
2449 priv->tstamp_tx_ctrl = tstamp_tx_ctrl; in ravb_hwtstamp_set()
2450 priv->tstamp_rx_ctrl = tstamp_rx_ctrl; in ravb_hwtstamp_set()
2452 return copy_to_user(req->ifr_data, &config, sizeof(config)) ? in ravb_hwtstamp_set()
2453 -EFAULT : 0; in ravb_hwtstamp_set()
2459 struct phy_device *phydev = ndev->phydev; in ravb_do_ioctl()
2462 return -EINVAL; in ravb_do_ioctl()
2465 return -ENODEV; in ravb_do_ioctl()
2481 WRITE_ONCE(ndev->mtu, new_mtu); in ravb_change_mtu()
2484 synchronize_irq(priv->emac_irq); in ravb_change_mtu()
2498 spin_lock_irqsave(&priv->lock, flags); in ravb_set_rx_csum()
2509 spin_unlock_irqrestore(&priv->lock, flags); in ravb_set_rx_csum()
2531 netdev_features_t changed = ndev->features ^ features; in ravb_set_features_gbeth()
2537 spin_lock_irqsave(&priv->lock, flags); in ravb_set_features_gbeth()
2561 spin_unlock_irqrestore(&priv->lock, flags); in ravb_set_features_gbeth()
2569 netdev_features_t changed = ndev->features ^ features; in ravb_set_features_rcar()
2581 const struct ravb_hw_info *info = priv->info; in ravb_set_features()
2582 struct device *dev = &priv->pdev->dev; in ravb_set_features()
2588 ret = info->set_feature(ndev, features); in ravb_set_features()
2597 ndev->features = features; in ravb_set_features()
2620 struct platform_device *pdev = priv->pdev; in ravb_mdio_init()
2621 struct device *dev = &pdev->dev; in ravb_mdio_init()
2628 priv->mdiobb.ops = &bb_ops; in ravb_mdio_init()
2631 priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb); in ravb_mdio_init()
2632 if (!priv->mii_bus) in ravb_mdio_init()
2633 return -ENOMEM; in ravb_mdio_init()
2636 priv->mii_bus->name = "ravb_mii"; in ravb_mdio_init()
2637 priv->mii_bus->parent = dev; in ravb_mdio_init()
2638 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", in ravb_mdio_init()
2639 pdev->name, pdev->id); in ravb_mdio_init()
2642 mdio_node = of_get_child_by_name(dev->of_node, "mdio"); in ravb_mdio_init()
2645 mdio_node = of_node_get(dev->of_node); in ravb_mdio_init()
2647 error = of_mdiobus_register(priv->mii_bus, mdio_node); in ravb_mdio_init()
2652 pn = of_parse_phandle(dev->of_node, "phy-handle", 0); in ravb_mdio_init()
2655 phydev->mac_managed_pm = true; in ravb_mdio_init()
2656 put_device(&phydev->mdio.dev); in ravb_mdio_init()
2663 free_mdio_bitbang(priv->mii_bus); in ravb_mdio_init()
2671 mdiobus_unregister(priv->mii_bus); in ravb_mdio_release()
2674 free_mdio_bitbang(priv->mii_bus); in ravb_mdio_release()
2803 { .compatible = "renesas,etheravb-r8a7790", .data = &ravb_gen2_hw_info },
2804 { .compatible = "renesas,etheravb-r8a7794", .data = &ravb_gen2_hw_info },
2805 { .compatible = "renesas,etheravb-rcar-gen2", .data = &ravb_gen2_hw_info },
2806 { .compatible = "renesas,etheravb-r8a7795", .data = &ravb_gen3_hw_info },
2807 { .compatible = "renesas,etheravb-rcar-gen3", .data = &ravb_gen3_hw_info },
2808 { .compatible = "renesas,etheravb-rcar-gen4", .data = &ravb_gen4_hw_info },
2809 { .compatible = "renesas,etheravb-rzv2m", .data = &ravb_rzv2m_hw_info },
2810 { .compatible = "renesas,rzg2l-gbeth", .data = &gbeth_hw_info },
2818 struct platform_device *pdev = priv->pdev; in ravb_setup_irq()
2819 struct net_device *ndev = priv->ndev; in ravb_setup_irq()
2820 struct device *dev = &pdev->dev; in ravb_setup_irq()
2828 return -ENOMEM; in ravb_setup_irq()
2851 const struct ravb_hw_info *info = priv->info; in ravb_setup_irqs()
2852 struct net_device *ndev = priv->ndev; in ravb_setup_irqs()
2856 if (!info->multi_irqs) in ravb_setup_irqs()
2857 return ravb_setup_irq(priv, NULL, NULL, &ndev->irq, ravb_interrupt); in ravb_setup_irqs()
2859 if (info->err_mgmt_irqs) { in ravb_setup_irqs()
2867 error = ravb_setup_irq(priv, irq_name, "ch22:multi", &ndev->irq, ravb_multi_interrupt); in ravb_setup_irqs()
2871 error = ravb_setup_irq(priv, emac_irq_name, "ch24:emac", &priv->emac_irq, in ravb_setup_irqs()
2876 if (info->err_mgmt_irqs) { in ravb_setup_irqs()
2903 struct device_node *np = pdev->dev.of_node; in ravb_probe()
2912 dev_err(&pdev->dev, in ravb_probe()
2914 return -EINVAL; in ravb_probe()
2917 rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL); in ravb_probe()
2919 return dev_err_probe(&pdev->dev, PTR_ERR(rstc), in ravb_probe()
2925 return -ENOMEM; in ravb_probe()
2927 info = of_device_get_match_data(&pdev->dev); in ravb_probe()
2929 ndev->features = info->net_features; in ravb_probe()
2930 ndev->hw_features = info->net_hw_features; in ravb_probe()
2931 ndev->vlan_features = info->vlan_features; in ravb_probe()
2937 SET_NETDEV_DEV(ndev, &pdev->dev); in ravb_probe()
2940 priv->info = info; in ravb_probe()
2941 priv->rstc = rstc; in ravb_probe()
2942 priv->ndev = ndev; in ravb_probe()
2943 priv->pdev = pdev; in ravb_probe()
2944 priv->num_tx_ring[RAVB_BE] = BE_TX_RING_SIZE; in ravb_probe()
2945 priv->num_rx_ring[RAVB_BE] = BE_RX_RING_SIZE; in ravb_probe()
2946 if (info->nc_queues) { in ravb_probe()
2947 priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE; in ravb_probe()
2948 priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE; in ravb_probe()
2955 priv->clk = devm_clk_get(&pdev->dev, NULL); in ravb_probe()
2956 if (IS_ERR(priv->clk)) { in ravb_probe()
2957 error = PTR_ERR(priv->clk); in ravb_probe()
2961 if (info->gptp_ref_clk) { in ravb_probe()
2962 priv->gptp_clk = devm_clk_get(&pdev->dev, "gptp"); in ravb_probe()
2963 if (IS_ERR(priv->gptp_clk)) { in ravb_probe()
2964 error = PTR_ERR(priv->gptp_clk); in ravb_probe()
2969 priv->refclk = devm_clk_get_optional(&pdev->dev, "refclk"); in ravb_probe()
2970 if (IS_ERR(priv->refclk)) { in ravb_probe()
2971 error = PTR_ERR(priv->refclk); in ravb_probe()
2974 clk_prepare(priv->refclk); in ravb_probe()
2977 pm_runtime_set_autosuspend_delay(&pdev->dev, 100); in ravb_probe()
2978 pm_runtime_use_autosuspend(&pdev->dev); in ravb_probe()
2979 pm_runtime_enable(&pdev->dev); in ravb_probe()
2980 error = pm_runtime_resume_and_get(&pdev->dev); in ravb_probe()
2984 priv->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res); in ravb_probe()
2985 if (IS_ERR(priv->addr)) { in ravb_probe()
2986 error = PTR_ERR(priv->addr); in ravb_probe()
2990 /* The Ether-specific entries in the device structure. */ in ravb_probe()
2991 ndev->base_addr = res->start; in ravb_probe()
2993 spin_lock_init(&priv->lock); in ravb_probe()
2994 INIT_WORK(&priv->work, ravb_tx_timeout_work); in ravb_probe()
2996 error = of_get_phy_mode(np, &priv->phy_interface); in ravb_probe()
2997 if (error && error != -ENODEV) in ravb_probe()
3000 priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link"); in ravb_probe()
3001 priv->avb_link_active_low = in ravb_probe()
3002 of_property_read_bool(np, "renesas,ether-link-active-low"); in ravb_probe()
3004 ndev->max_mtu = info->tx_max_frame_size - in ravb_probe()
3006 ndev->min_mtu = ETH_MIN_MTU; in ravb_probe()
3008 /* FIXME: R-Car Gen2 has 4byte alignment restriction for tx buffer in ravb_probe()
3013 priv->num_tx_desc = info->aligned_tx ? 2 : 1; in ravb_probe()
3016 ndev->netdev_ops = &ravb_netdev_ops; in ravb_probe()
3017 ndev->ethtool_ops = &ravb_ethtool_ops; in ravb_probe()
3026 priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM; in ravb_probe()
3027 priv->desc_bat = dma_alloc_coherent(ndev->dev.parent, priv->desc_bat_size, in ravb_probe()
3028 &priv->desc_bat_dma, GFP_KERNEL); in ravb_probe()
3029 if (!priv->desc_bat) { in ravb_probe()
3030 dev_err(&pdev->dev, in ravb_probe()
3032 priv->desc_bat_size); in ravb_probe()
3033 error = -ENOMEM; in ravb_probe()
3037 priv->desc_bat[q].die_dt = DT_EOS; in ravb_probe()
3040 INIT_LIST_HEAD(&priv->ts_skb_list); in ravb_probe()
3043 priv->msg_enable = RAVB_DEF_MSG_ENABLE; in ravb_probe()
3052 if (!is_valid_ether_addr(ndev->dev_addr)) { in ravb_probe()
3053 dev_warn(&pdev->dev, in ravb_probe()
3061 dev_err(&pdev->dev, "failed to initialize MDIO\n"); in ravb_probe()
3070 netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll); in ravb_probe()
3071 if (info->nc_queues) in ravb_probe()
3072 netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll); in ravb_probe()
3074 if (info->coalesce_irqs) { in ravb_probe()
3085 device_set_wakeup_capable(&pdev->dev, 1); in ravb_probe()
3089 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq); in ravb_probe()
3091 pm_runtime_mark_last_busy(&pdev->dev); in ravb_probe()
3092 pm_runtime_put_autosuspend(&pdev->dev); in ravb_probe()
3097 if (info->nc_queues) in ravb_probe()
3098 netif_napi_del(&priv->napi[RAVB_NC]); in ravb_probe()
3100 netif_napi_del(&priv->napi[RAVB_BE]); in ravb_probe()
3105 dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, in ravb_probe()
3106 priv->desc_bat_dma); in ravb_probe()
3108 pm_runtime_put(&pdev->dev); in ravb_probe()
3110 pm_runtime_disable(&pdev->dev); in ravb_probe()
3111 pm_runtime_dont_use_autosuspend(&pdev->dev); in ravb_probe()
3112 clk_unprepare(priv->refclk); in ravb_probe()
3124 const struct ravb_hw_info *info = priv->info; in ravb_remove()
3125 struct device *dev = &priv->pdev->dev; in ravb_remove()
3133 if (info->nc_queues) in ravb_remove()
3134 netif_napi_del(&priv->napi[RAVB_NC]); in ravb_remove()
3135 netif_napi_del(&priv->napi[RAVB_BE]); in ravb_remove()
3139 dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, in ravb_remove()
3140 priv->desc_bat_dma); in ravb_remove()
3142 pm_runtime_put_sync_suspend(&pdev->dev); in ravb_remove()
3143 pm_runtime_disable(&pdev->dev); in ravb_remove()
3145 clk_unprepare(priv->refclk); in ravb_remove()
3146 reset_control_assert(priv->rstc); in ravb_remove()
3154 const struct ravb_hw_info *info = priv->info; in ravb_wol_setup()
3162 synchronize_irq(priv->emac_irq); in ravb_wol_setup()
3163 if (info->nc_queues) in ravb_wol_setup()
3164 napi_disable(&priv->napi[RAVB_NC]); in ravb_wol_setup()
3165 napi_disable(&priv->napi[RAVB_BE]); in ravb_wol_setup()
3171 if (priv->info->ccc_gac) in ravb_wol_setup()
3174 return enable_irq_wake(priv->emac_irq); in ravb_wol_setup()
3180 const struct ravb_hw_info *info = priv->info; in ravb_wol_restore()
3193 if (priv->info->ccc_gac) in ravb_wol_restore()
3194 ravb_ptp_init(ndev, priv->pdev); in ravb_wol_restore()
3196 if (info->nc_queues) in ravb_wol_restore()
3197 napi_enable(&priv->napi[RAVB_NC]); in ravb_wol_restore()
3198 napi_enable(&priv->napi[RAVB_BE]); in ravb_wol_restore()
3205 return disable_irq_wake(priv->emac_irq); in ravb_wol_restore()
3219 if (priv->wol_enabled) in ravb_suspend()
3226 ret = pm_runtime_force_suspend(&priv->pdev->dev); in ravb_suspend()
3231 return reset_control_assert(priv->rstc); in ravb_suspend()
3240 ret = reset_control_deassert(priv->rstc); in ravb_resume()
3248 if (priv->wol_enabled) { in ravb_resume()
3269 if (!priv->wol_enabled) { in ravb_resume()
3282 clk_disable(priv->refclk); in ravb_runtime_suspend()
3292 return clk_enable(priv->refclk); in ravb_runtime_resume()