Lines Matching +full:rcar +full:- +full:dmac

1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 2014-2019 Renesas Electronics Corporation
6 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
14 #include <linux/dma-mapping.h>
58 return -ETIMEDOUT; in ravb_wait()
90 switch (priv->speed) { in ravb_set_rate_gbeth()
107 switch (priv->speed) { in ravb_set_rate_rcar()
148 ravb_modify(priv->ndev, PIR, mask, set ? mask : 0); in ravb_mdio_ctrl()
175 return (ravb_read(priv->ndev, PIR) & PIR_MDI) != 0; in ravb_get_mdio_data()
191 return priv->rx_ring[q].raw + priv->info->rx_desc_size * i; in ravb_rx_get_desc()
194 /* Free TX skb function for AVB-IP */
198 struct net_device_stats *stats = &priv->stats[q]; in ravb_tx_free()
199 unsigned int num_tx_desc = priv->num_tx_desc; in ravb_tx_free()
205 for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) { in ravb_tx_free()
208 entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] * in ravb_tx_free()
210 desc = &priv->tx_ring[q][entry]; in ravb_tx_free()
211 txed = desc->die_dt == DT_FEMPTY; in ravb_tx_free()
216 size = le16_to_cpu(desc->ds_tagl) & TX_DS; in ravb_tx_free()
218 if (priv->tx_skb[q][entry / num_tx_desc]) { in ravb_tx_free()
219 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), in ravb_tx_free()
222 if (entry % num_tx_desc == num_tx_desc - 1) { in ravb_tx_free()
224 dev_kfree_skb_any(priv->tx_skb[q][entry]); in ravb_tx_free()
225 priv->tx_skb[q][entry] = NULL; in ravb_tx_free()
227 stats->tx_packets++; in ravb_tx_free()
232 stats->tx_bytes += size; in ravb_tx_free()
233 desc->die_dt = DT_EEMPTY; in ravb_tx_free()
243 if (!priv->rx_ring[q].raw) in ravb_rx_ring_free()
246 ring_size = priv->info->rx_desc_size * (priv->num_rx_ring[q] + 1); in ravb_rx_ring_free()
247 dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q].raw, in ravb_rx_ring_free()
248 priv->rx_desc_dma[q]); in ravb_rx_ring_free()
249 priv->rx_ring[q].raw = NULL; in ravb_rx_ring_free()
256 unsigned int num_tx_desc = priv->num_tx_desc; in ravb_ring_free()
262 if (priv->tx_ring[q]) { in ravb_ring_free()
266 (priv->num_tx_ring[q] * num_tx_desc + 1); in ravb_ring_free()
267 dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q], in ravb_ring_free()
268 priv->tx_desc_dma[q]); in ravb_ring_free()
269 priv->tx_ring[q] = NULL; in ravb_ring_free()
273 for (i = 0; i < priv->num_rx_ring[q]; i++) { in ravb_ring_free()
274 if (priv->rx_buffers[q][i].page) in ravb_ring_free()
275 page_pool_put_page(priv->rx_pool[q], in ravb_ring_free()
276 priv->rx_buffers[q][i].page, in ravb_ring_free()
279 kfree(priv->rx_buffers[q]); in ravb_ring_free()
280 priv->rx_buffers[q] = NULL; in ravb_ring_free()
281 page_pool_destroy(priv->rx_pool[q]); in ravb_ring_free()
284 kfree(priv->tx_align[q]); in ravb_ring_free()
285 priv->tx_align[q] = NULL; in ravb_ring_free()
290 kfree(priv->tx_skb[q]); in ravb_ring_free()
291 priv->tx_skb[q] = NULL; in ravb_ring_free()
299 const struct ravb_hw_info *info = priv->info; in ravb_alloc_rx_buffer()
304 rx_buff = &priv->rx_buffers[q][entry]; in ravb_alloc_rx_buffer()
305 size = info->rx_buffer_size; in ravb_alloc_rx_buffer()
306 rx_buff->page = page_pool_alloc(priv->rx_pool[q], &rx_buff->offset, in ravb_alloc_rx_buffer()
308 if (unlikely(!rx_buff->page)) { in ravb_alloc_rx_buffer()
312 rx_desc->ds_cc = cpu_to_le16(0); in ravb_alloc_rx_buffer()
313 return -ENOMEM; in ravb_alloc_rx_buffer()
316 dma_addr = page_pool_get_dma_addr(rx_buff->page) + rx_buff->offset; in ravb_alloc_rx_buffer()
317 dma_sync_single_for_device(ndev->dev.parent, dma_addr, in ravb_alloc_rx_buffer()
318 info->rx_buffer_size, DMA_FROM_DEVICE); in ravb_alloc_rx_buffer()
319 rx_desc->dptr = cpu_to_le32(dma_addr); in ravb_alloc_rx_buffer()
324 rx_desc->ds_cc = cpu_to_le16(info->rx_buffer_size - in ravb_alloc_rx_buffer()
325 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) - in ravb_alloc_rx_buffer()
338 entry = (priv->dirty_rx[q] + i) % priv->num_rx_ring[q]; in ravb_rx_ring_refill()
341 if (!priv->rx_buffers[q][entry].page) { in ravb_rx_ring_refill()
348 rx_desc->die_dt = DT_FEMPTY; in ravb_rx_ring_refill()
358 unsigned int num_tx_desc = priv->num_tx_desc; in ravb_ring_format()
362 unsigned int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] * in ravb_ring_format()
366 priv->cur_rx[q] = 0; in ravb_ring_format()
367 priv->cur_tx[q] = 0; in ravb_ring_format()
368 priv->dirty_rx[q] = 0; in ravb_ring_format()
369 priv->dirty_tx[q] = 0; in ravb_ring_format()
375 rx_desc = ravb_rx_get_desc(priv, q, priv->num_rx_ring[q]); in ravb_ring_format()
376 rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); in ravb_ring_format()
377 rx_desc->die_dt = DT_LINKFIX; /* type */ in ravb_ring_format()
379 memset(priv->tx_ring[q], 0, tx_ring_size); in ravb_ring_format()
381 for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q]; in ravb_ring_format()
383 tx_desc->die_dt = DT_EEMPTY; in ravb_ring_format()
386 tx_desc->die_dt = DT_EEMPTY; in ravb_ring_format()
389 tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]); in ravb_ring_format()
390 tx_desc->die_dt = DT_LINKFIX; /* type */ in ravb_ring_format()
393 desc = &priv->desc_bat[RX_QUEUE_OFFSET + q]; in ravb_ring_format()
394 desc->die_dt = DT_LINKFIX; /* type */ in ravb_ring_format()
395 desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); in ravb_ring_format()
398 desc = &priv->desc_bat[q]; in ravb_ring_format()
399 desc->die_dt = DT_LINKFIX; /* type */ in ravb_ring_format()
400 desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]); in ravb_ring_format()
408 ring_size = priv->info->rx_desc_size * (priv->num_rx_ring[q] + 1); in ravb_alloc_rx_desc()
410 priv->rx_ring[q].raw = dma_alloc_coherent(ndev->dev.parent, ring_size, in ravb_alloc_rx_desc()
411 &priv->rx_desc_dma[q], in ravb_alloc_rx_desc()
414 return priv->rx_ring[q].raw; in ravb_alloc_rx_desc()
421 unsigned int num_tx_desc = priv->num_tx_desc; in ravb_ring_init()
425 .pool_size = priv->num_rx_ring[q], in ravb_ring_init()
427 .dev = ndev->dev.parent, in ravb_ring_init()
434 priv->rx_pool[q] = page_pool_create(&params); in ravb_ring_init()
435 if (IS_ERR(priv->rx_pool[q])) in ravb_ring_init()
439 priv->rx_buffers[q] = kcalloc(priv->num_rx_ring[q], in ravb_ring_init()
440 sizeof(*priv->rx_buffers[q]), GFP_KERNEL); in ravb_ring_init()
441 if (!priv->rx_buffers[q]) in ravb_ring_init()
445 priv->tx_skb[q] = kcalloc(priv->num_tx_ring[q], in ravb_ring_init()
446 sizeof(*priv->tx_skb[q]), GFP_KERNEL); in ravb_ring_init()
447 if (!priv->tx_skb[q]) in ravb_ring_init()
455 priv->dirty_rx[q] = 0; in ravb_ring_init()
456 ring_size = priv->info->rx_desc_size * priv->num_rx_ring[q]; in ravb_ring_init()
457 memset(priv->rx_ring[q].raw, 0, ring_size); in ravb_ring_init()
458 num_filled = ravb_rx_ring_refill(ndev, q, priv->num_rx_ring[q], in ravb_ring_init()
460 if (num_filled != priv->num_rx_ring[q]) in ravb_ring_init()
465 priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] + in ravb_ring_init()
466 DPTR_ALIGN - 1, GFP_KERNEL); in ravb_ring_init()
467 if (!priv->tx_align[q]) in ravb_ring_init()
473 (priv->num_tx_ring[q] * num_tx_desc + 1); in ravb_ring_init()
474 priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size, in ravb_ring_init()
475 &priv->tx_desc_dma[q], in ravb_ring_init()
477 if (!priv->tx_ring[q]) in ravb_ring_init()
485 return -ENOMEM; in ravb_ring_init()
490 bool tx_enable = ndev->features & NETIF_F_HW_CSUM; in ravb_csum_init_gbeth()
491 bool rx_enable = ndev->features & NETIF_F_RXCSUM; in ravb_csum_init_gbeth()
501 ndev->features &= ~NETIF_F_HW_CSUM; in ravb_csum_init_gbeth()
504 ndev->features &= ~NETIF_F_RXCSUM; in ravb_csum_init_gbeth()
521 if (priv->phy_interface == PHY_INTERFACE_MODE_MII) { in ravb_emac_init_gbeth()
531 ravb_write(ndev, priv->info->rx_max_frame_size + ETH_FCS_LEN, RFLR); in ravb_emac_init_gbeth()
534 ravb_write(ndev, ECMR_ZPF | ((priv->duplex > 0) ? ECMR_DM : 0) | in ravb_emac_init_gbeth()
542 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) | in ravb_emac_init_gbeth()
543 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR); in ravb_emac_init_gbeth()
544 ravb_write(ndev, (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR); in ravb_emac_init_gbeth()
546 /* E-MAC status register clear */ in ravb_emac_init_gbeth()
551 /* E-MAC interrupt enable register */ in ravb_emac_init_gbeth()
566 ravb_write(ndev, priv->info->rx_max_frame_size + ETH_FCS_LEN, RFLR); in ravb_emac_init_rcar()
570 (ndev->features & NETIF_F_RXCSUM ? ECMR_RCSC : 0) | in ravb_emac_init_rcar()
577 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) | in ravb_emac_init_rcar()
578 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR); in ravb_emac_init_rcar()
580 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR); in ravb_emac_init_rcar()
582 /* E-MAC status register clear */ in ravb_emac_init_rcar()
585 /* E-MAC interrupt enable register */ in ravb_emac_init_rcar()
592 bool mii = priv->phy_interface == PHY_INTERFACE_MODE_MII; in ravb_emac_init_rcar_gen4()
599 /* E-MAC init function */
603 const struct ravb_hw_info *info = priv->info; in ravb_emac_init()
605 info->emac_init(ndev); in ravb_emac_init()
620 /* Set DMAC RX */ in ravb_dmac_init_gbeth()
624 ravb_write(ndev, 0x7ffc0000 | priv->info->rx_max_frame_size, RTC); in ravb_dmac_init_gbeth()
646 const struct ravb_hw_info *info = priv->info; in ravb_dmac_init_rcar()
673 if (info->multi_irqs) { in ravb_dmac_init_rcar()
695 const struct ravb_hw_info *info = priv->info; in ravb_dmac_init()
703 error = info->dmac_init(ndev); in ravb_dmac_init()
707 /* Setting the control will start the AVB-DMAC process. */ in ravb_dmac_init()
723 while (count--) { in ravb_get_tx_tstamp()
731 list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, in ravb_get_tx_tstamp()
733 skb = ts_skb->skb; in ravb_get_tx_tstamp()
734 tag = ts_skb->tag; in ravb_get_tx_tstamp()
735 list_del(&ts_skb->list); in ravb_get_tx_tstamp()
759 * ignore this as it will always be re-checked in inet_gro_receive(). in ravb_rx_csum_gbeth()
765 if (unlikely(skb->len < csum_len)) in ravb_rx_csum_gbeth()
769 skb_frag_t *last_frag = &shinfo->frags[shinfo->nr_frags - 1]; in ravb_rx_csum_gbeth()
776 skb_trim(skb, skb->len - csum_len); in ravb_rx_csum_gbeth()
779 if (!get_unaligned(--hw_csum)) in ravb_rx_csum_gbeth()
780 skb->ip_summed = CHECKSUM_UNNECESSARY; in ravb_rx_csum_gbeth()
790 if (unlikely(skb->len < sizeof(__sum16))) in ravb_rx_csum()
792 hw_csum = skb_tail_pointer(skb) - sizeof(__sum16); in ravb_rx_csum()
793 skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum)); in ravb_rx_csum()
794 skb->ip_summed = CHECKSUM_COMPLETE; in ravb_rx_csum()
795 skb_trim(skb, skb->len - sizeof(__sum16)); in ravb_rx_csum()
802 const struct ravb_hw_info *info = priv->info; in ravb_rx_gbeth()
813 limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q]; in ravb_rx_gbeth()
814 stats = &priv->stats[q]; in ravb_rx_gbeth()
816 for (i = 0; i < limit; i++, priv->cur_rx[q]++) { in ravb_rx_gbeth()
819 entry = priv->cur_rx[q] % priv->num_rx_ring[q]; in ravb_rx_gbeth()
820 desc = &priv->rx_ring[q].desc[entry]; in ravb_rx_gbeth()
821 if (rx_packets == budget || desc->die_dt == DT_FEMPTY) in ravb_rx_gbeth()
826 desc_status = desc->msc; in ravb_rx_gbeth()
827 desc_len = le16_to_cpu(desc->ds_cc) & RX_DS; in ravb_rx_gbeth()
829 /* We use 0-byte descriptors to mark the DMA mapping errors */ in ravb_rx_gbeth()
834 stats->multicast++; in ravb_rx_gbeth()
837 stats->rx_errors++; in ravb_rx_gbeth()
839 stats->rx_crc_errors++; in ravb_rx_gbeth()
841 stats->rx_frame_errors++; in ravb_rx_gbeth()
843 stats->rx_length_errors++; in ravb_rx_gbeth()
845 stats->rx_missed_errors++; in ravb_rx_gbeth()
850 rx_buff = &priv->rx_buffers[q][entry]; in ravb_rx_gbeth()
851 rx_addr = page_address(rx_buff->page) + rx_buff->offset; in ravb_rx_gbeth()
852 die_dt = desc->die_dt & 0xF0; in ravb_rx_gbeth()
853 dma_sync_single_for_cpu(ndev->dev.parent, in ravb_rx_gbeth()
854 le32_to_cpu(desc->dptr), in ravb_rx_gbeth()
862 info->rx_buffer_size); in ravb_rx_gbeth()
864 stats->rx_errors++; in ravb_rx_gbeth()
865 page_pool_put_page(priv->rx_pool[q], in ravb_rx_gbeth()
866 rx_buff->page, 0, in ravb_rx_gbeth()
877 priv->rx_1st_skb = skb; in ravb_rx_gbeth()
888 * multi-descriptor packet. in ravb_rx_gbeth()
890 if (unlikely(!priv->rx_1st_skb)) { in ravb_rx_gbeth()
891 stats->rx_errors++; in ravb_rx_gbeth()
892 page_pool_put_page(priv->rx_pool[q], in ravb_rx_gbeth()
893 rx_buff->page, 0, in ravb_rx_gbeth()
902 skb_add_rx_frag(priv->rx_1st_skb, in ravb_rx_gbeth()
903 skb_shinfo(priv->rx_1st_skb)->nr_frags, in ravb_rx_gbeth()
904 rx_buff->page, rx_buff->offset, in ravb_rx_gbeth()
905 desc_len, info->rx_buffer_size); in ravb_rx_gbeth()
911 skb = priv->rx_1st_skb; in ravb_rx_gbeth()
921 skb->protocol = eth_type_trans(skb, ndev); in ravb_rx_gbeth()
922 if (ndev->features & NETIF_F_RXCSUM) in ravb_rx_gbeth()
924 stats->rx_bytes += skb->len; in ravb_rx_gbeth()
925 napi_gro_receive(&priv->napi[q], skb); in ravb_rx_gbeth()
929 * non-NULL when valid. in ravb_rx_gbeth()
931 priv->rx_1st_skb = NULL; in ravb_rx_gbeth()
935 rx_buff->page = NULL; in ravb_rx_gbeth()
941 priv->dirty_rx[q] += ravb_rx_ring_refill(ndev, q, in ravb_rx_gbeth()
942 priv->cur_rx[q] - priv->dirty_rx[q], in ravb_rx_gbeth()
945 stats->rx_packets += rx_packets; in ravb_rx_gbeth()
953 const struct ravb_hw_info *info = priv->info; in ravb_rx_rcar()
954 struct net_device_stats *stats = &priv->stats[q]; in ravb_rx_rcar()
964 limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q]; in ravb_rx_rcar()
965 for (i = 0; i < limit; i++, priv->cur_rx[q]++) { in ravb_rx_rcar()
966 entry = priv->cur_rx[q] % priv->num_rx_ring[q]; in ravb_rx_rcar()
967 desc = &priv->rx_ring[q].ex_desc[entry]; in ravb_rx_rcar()
968 if (rx_packets == budget || desc->die_dt == DT_FEMPTY) in ravb_rx_rcar()
973 desc_status = desc->msc; in ravb_rx_rcar()
974 pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS; in ravb_rx_rcar()
976 /* We use 0-byte descriptors to mark the DMA mapping errors */ in ravb_rx_rcar()
981 stats->multicast++; in ravb_rx_rcar()
985 stats->rx_errors++; in ravb_rx_rcar()
987 stats->rx_crc_errors++; in ravb_rx_rcar()
989 stats->rx_frame_errors++; in ravb_rx_rcar()
991 stats->rx_length_errors++; in ravb_rx_rcar()
993 stats->rx_missed_errors++; in ravb_rx_rcar()
995 u32 get_ts = priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE; in ravb_rx_rcar()
999 rx_buff = &priv->rx_buffers[q][entry]; in ravb_rx_rcar()
1000 rx_addr = page_address(rx_buff->page) + rx_buff->offset; in ravb_rx_rcar()
1001 dma_sync_single_for_cpu(ndev->dev.parent, in ravb_rx_rcar()
1002 le32_to_cpu(desc->dptr), in ravb_rx_rcar()
1005 skb = napi_build_skb(rx_addr, info->rx_buffer_size); in ravb_rx_rcar()
1007 stats->rx_errors++; in ravb_rx_rcar()
1008 page_pool_put_page(priv->rx_pool[q], in ravb_rx_rcar()
1009 rx_buff->page, 0, true); in ravb_rx_rcar()
1021 ts.tv_sec = ((u64) le16_to_cpu(desc->ts_sh) << in ravb_rx_rcar()
1022 32) | le32_to_cpu(desc->ts_sl); in ravb_rx_rcar()
1023 ts.tv_nsec = le32_to_cpu(desc->ts_n); in ravb_rx_rcar()
1024 shhwtstamps->hwtstamp = timespec64_to_ktime(ts); in ravb_rx_rcar()
1028 skb->protocol = eth_type_trans(skb, ndev); in ravb_rx_rcar()
1029 if (ndev->features & NETIF_F_RXCSUM) in ravb_rx_rcar()
1031 napi_gro_receive(&priv->napi[q], skb); in ravb_rx_rcar()
1033 stats->rx_bytes += pkt_len; in ravb_rx_rcar()
1036 rx_buff->page = NULL; in ravb_rx_rcar()
1041 priv->dirty_rx[q] += ravb_rx_ring_refill(ndev, q, in ravb_rx_rcar()
1042 priv->cur_rx[q] - priv->dirty_rx[q], in ravb_rx_rcar()
1045 stats->rx_packets += rx_packets; in ravb_rx_rcar()
1053 const struct ravb_hw_info *info = priv->info; in ravb_rx()
1055 return info->receive(ndev, budget, q); in ravb_rx()
1074 const struct ravb_hw_info *info = priv->info; in ravb_stop_dma()
1078 error = ravb_wait(ndev, TCCR, info->tccr_mask, 0); in ravb_stop_dma()
1088 /* Stop the E-MAC's RX/TX processes. */ in ravb_stop_dma()
1096 /* Stop AVB-DMAC process */ in ravb_stop_dma()
1100 /* E-MAC interrupt handler */
1110 pm_wakeup_event(&priv->pdev->dev, 0); in ravb_emac_interrupt_unlocked()
1112 ndev->stats.tx_carrier_errors++; in ravb_emac_interrupt_unlocked()
1115 if (priv->no_avb_link) in ravb_emac_interrupt_unlocked()
1118 if (priv->avb_link_active_low) in ravb_emac_interrupt_unlocked()
1134 struct device *dev = &priv->pdev->dev; in ravb_emac_interrupt()
1144 spin_lock(&priv->lock); in ravb_emac_interrupt()
1146 spin_unlock(&priv->lock); in ravb_emac_interrupt()
1168 priv->stats[RAVB_BE].rx_over_errors++; in ravb_error_interrupt()
1172 priv->stats[RAVB_NC].rx_over_errors++; in ravb_error_interrupt()
1176 priv->rx_fifo_errors++; in ravb_error_interrupt()
1183 const struct ravb_hw_info *info = priv->info; in ravb_queue_interrupt()
1190 if (napi_schedule_prep(&priv->napi[q])) { in ravb_queue_interrupt()
1192 if (!info->irq_en_dis) { in ravb_queue_interrupt()
1199 __napi_schedule(&priv->napi[q]); in ravb_queue_interrupt()
1229 const struct ravb_hw_info *info = priv->info; in ravb_interrupt()
1230 struct device *dev = &priv->pdev->dev; in ravb_interrupt()
1239 spin_lock(&priv->lock); in ravb_interrupt()
1252 if (info->nc_queues) { in ravb_interrupt()
1253 for (q = RAVB_NC; q >= RAVB_BE; q--) { in ravb_interrupt()
1263 /* E-MAC status summary */ in ravb_interrupt()
1281 spin_unlock(&priv->lock); in ravb_interrupt()
1293 struct device *dev = &priv->pdev->dev; in ravb_multi_interrupt()
1302 spin_lock(&priv->lock); in ravb_multi_interrupt()
1322 spin_unlock(&priv->lock); in ravb_multi_interrupt()
1333 struct device *dev = &priv->pdev->dev; in ravb_dma_interrupt()
1341 spin_lock(&priv->lock); in ravb_dma_interrupt()
1347 spin_unlock(&priv->lock); in ravb_dma_interrupt()
1366 struct net_device *ndev = napi->dev; in ravb_poll()
1368 const struct ravb_hw_info *info = priv->info; in ravb_poll()
1370 int q = napi - priv->napi; in ravb_poll()
1380 spin_lock_irqsave(&priv->lock, flags); in ravb_poll()
1385 spin_unlock_irqrestore(&priv->lock, flags); in ravb_poll()
1388 priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors; in ravb_poll()
1389 if (info->nc_queues) in ravb_poll()
1390 priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors; in ravb_poll()
1391 if (priv->rx_over_errors != ndev->stats.rx_over_errors) in ravb_poll()
1392 ndev->stats.rx_over_errors = priv->rx_over_errors; in ravb_poll()
1393 if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) in ravb_poll()
1394 ndev->stats.rx_fifo_errors = priv->rx_fifo_errors; in ravb_poll()
1397 /* Re-enable RX/TX interrupts */ in ravb_poll()
1398 spin_lock_irqsave(&priv->lock, flags); in ravb_poll()
1399 if (!info->irq_en_dis) { in ravb_poll()
1406 spin_unlock_irqrestore(&priv->lock, flags); in ravb_poll()
1416 ravb_modify(ndev, ECMR, ECMR_DM, priv->duplex > 0 ? ECMR_DM : 0); in ravb_set_duplex_gbeth()
1423 const struct ravb_hw_info *info = priv->info; in ravb_adjust_link()
1424 struct phy_device *phydev = ndev->phydev; in ravb_adjust_link()
1428 spin_lock_irqsave(&priv->lock, flags); in ravb_adjust_link()
1430 /* Disable TX and RX right over here, if E-MAC change is ignored */ in ravb_adjust_link()
1431 if (priv->no_avb_link) in ravb_adjust_link()
1434 if (phydev->link) { in ravb_adjust_link()
1435 if (info->half_duplex && phydev->duplex != priv->duplex) { in ravb_adjust_link()
1437 priv->duplex = phydev->duplex; in ravb_adjust_link()
1441 if (phydev->speed != priv->speed) { in ravb_adjust_link()
1443 priv->speed = phydev->speed; in ravb_adjust_link()
1444 info->set_rate(ndev); in ravb_adjust_link()
1446 if (!priv->link) { in ravb_adjust_link()
1449 priv->link = phydev->link; in ravb_adjust_link()
1451 } else if (priv->link) { in ravb_adjust_link()
1453 priv->link = 0; in ravb_adjust_link()
1454 priv->speed = 0; in ravb_adjust_link()
1455 if (info->half_duplex) in ravb_adjust_link()
1456 priv->duplex = -1; in ravb_adjust_link()
1459 /* Enable TX and RX right over here, if E-MAC change is ignored */ in ravb_adjust_link()
1460 if (priv->no_avb_link && phydev->link) in ravb_adjust_link()
1463 spin_unlock_irqrestore(&priv->lock, flags); in ravb_adjust_link()
1472 struct device_node *np = ndev->dev.parent->of_node; in ravb_phy_init()
1474 const struct ravb_hw_info *info = priv->info; in ravb_phy_init()
1480 priv->link = 0; in ravb_phy_init()
1481 priv->speed = 0; in ravb_phy_init()
1482 priv->duplex = -1; in ravb_phy_init()
1485 pn = of_parse_phandle(np, "phy-handle", 0); in ravb_phy_init()
1498 iface = priv->rgmii_override ? PHY_INTERFACE_MODE_RGMII in ravb_phy_init()
1499 : priv->phy_interface; in ravb_phy_init()
1504 err = -ENOENT; in ravb_phy_init()
1508 if (!info->half_duplex) { in ravb_phy_init()
1540 phy_start(ndev->phydev); in ravb_phy_start()
1549 return priv->msg_enable; in ravb_get_msglevel()
1556 priv->msg_enable = value; in ravb_set_msglevel()
1614 const struct ravb_hw_info *info = priv->info; in ravb_get_sset_count()
1618 return info->stats_len; in ravb_get_sset_count()
1620 return -EOPNOTSUPP; in ravb_get_sset_count()
1628 const struct ravb_hw_info *info = priv->info; in ravb_get_ethtool_stats()
1633 num_rx_q = info->nc_queues ? NUM_RX_QUEUE : 1; in ravb_get_ethtool_stats()
1634 /* Device-specific stats */ in ravb_get_ethtool_stats()
1636 struct net_device_stats *stats = &priv->stats[q]; in ravb_get_ethtool_stats()
1638 data[i++] = priv->cur_rx[q]; in ravb_get_ethtool_stats()
1639 data[i++] = priv->cur_tx[q]; in ravb_get_ethtool_stats()
1640 data[i++] = priv->dirty_rx[q]; in ravb_get_ethtool_stats()
1641 data[i++] = priv->dirty_tx[q]; in ravb_get_ethtool_stats()
1642 data[i++] = stats->rx_packets; in ravb_get_ethtool_stats()
1643 data[i++] = stats->tx_packets; in ravb_get_ethtool_stats()
1644 data[i++] = stats->rx_bytes; in ravb_get_ethtool_stats()
1645 data[i++] = stats->tx_bytes; in ravb_get_ethtool_stats()
1646 data[i++] = stats->multicast; in ravb_get_ethtool_stats()
1647 data[i++] = stats->rx_errors; in ravb_get_ethtool_stats()
1648 data[i++] = stats->rx_crc_errors; in ravb_get_ethtool_stats()
1649 data[i++] = stats->rx_frame_errors; in ravb_get_ethtool_stats()
1650 data[i++] = stats->rx_length_errors; in ravb_get_ethtool_stats()
1651 data[i++] = stats->rx_missed_errors; in ravb_get_ethtool_stats()
1652 data[i++] = stats->rx_over_errors; in ravb_get_ethtool_stats()
1659 const struct ravb_hw_info *info = priv->info; in ravb_get_strings()
1663 memcpy(data, info->gstrings_stats, info->gstrings_size); in ravb_get_strings()
1675 ring->rx_max_pending = BE_RX_RING_MAX; in ravb_get_ringparam()
1676 ring->tx_max_pending = BE_TX_RING_MAX; in ravb_get_ringparam()
1677 ring->rx_pending = priv->num_rx_ring[RAVB_BE]; in ravb_get_ringparam()
1678 ring->tx_pending = priv->num_tx_ring[RAVB_BE]; in ravb_get_ringparam()
1687 const struct ravb_hw_info *info = priv->info; in ravb_set_ringparam()
1690 if (ring->tx_pending > BE_TX_RING_MAX || in ravb_set_ringparam()
1691 ring->rx_pending > BE_RX_RING_MAX || in ravb_set_ringparam()
1692 ring->tx_pending < BE_TX_RING_MIN || in ravb_set_ringparam()
1693 ring->rx_pending < BE_RX_RING_MIN) in ravb_set_ringparam()
1694 return -EINVAL; in ravb_set_ringparam()
1695 if (ring->rx_mini_pending || ring->rx_jumbo_pending) in ravb_set_ringparam()
1696 return -EINVAL; in ravb_set_ringparam()
1701 if (info->gptp) in ravb_set_ringparam()
1710 synchronize_irq(ndev->irq); in ravb_set_ringparam()
1714 if (info->nc_queues) in ravb_set_ringparam()
1719 priv->num_rx_ring[RAVB_BE] = ring->rx_pending; in ravb_set_ringparam()
1720 priv->num_tx_ring[RAVB_BE] = ring->tx_pending; in ravb_set_ringparam()
1734 if (info->gptp) in ravb_set_ringparam()
1735 ravb_ptp_init(ndev, priv->pdev); in ravb_set_ringparam()
1747 const struct ravb_hw_info *hw_info = priv->info; in ravb_get_ts_info()
1749 if (hw_info->gptp || hw_info->ccc_gac) { in ravb_get_ts_info()
1750 info->so_timestamping = in ravb_get_ts_info()
1755 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); in ravb_get_ts_info()
1756 info->rx_filters = in ravb_get_ts_info()
1760 info->phc_index = ptp_clock_index(priv->ptp.clock); in ravb_get_ts_info()
1770 wol->supported = WAKE_MAGIC; in ravb_get_wol()
1771 wol->wolopts = priv->wol_enabled ? WAKE_MAGIC : 0; in ravb_get_wol()
1777 const struct ravb_hw_info *info = priv->info; in ravb_set_wol()
1779 if (!info->magic_pkt || (wol->wolopts & ~WAKE_MAGIC)) in ravb_set_wol()
1780 return -EOPNOTSUPP; in ravb_set_wol()
1782 priv->wol_enabled = !!(wol->wolopts & WAKE_MAGIC); in ravb_set_wol()
1784 device_set_wakeup_enable(&priv->pdev->dev, priv->wol_enabled); in ravb_set_wol()
1809 const struct ravb_hw_info *info = priv->info; in ravb_set_config_mode()
1812 if (info->gptp) { in ravb_set_config_mode()
1818 } else if (info->ccc_gac) { in ravb_set_config_mode()
1830 const struct ravb_hw_info *info = priv->info; in ravb_set_gti()
1832 if (!(info->gptp || info->ccc_gac)) in ravb_set_gti()
1835 ravb_write(ndev, priv->gti_tiv, GTI); in ravb_set_gti()
1844 const struct ravb_hw_info *info = priv->info; in ravb_compute_gti()
1845 struct device *dev = ndev->dev.parent; in ravb_compute_gti()
1849 if (!(info->gptp || info->ccc_gac)) in ravb_compute_gti()
1852 if (info->gptp_ref_clk) in ravb_compute_gti()
1853 rate = clk_get_rate(priv->gptp_clk); in ravb_compute_gti()
1855 rate = clk_get_rate(priv->clk); in ravb_compute_gti()
1857 return -EINVAL; in ravb_compute_gti()
1862 dev_err(dev, "gti.tiv increment 0x%llx is outside the range 0x%x - 0x%x\n", in ravb_compute_gti()
1864 return -EINVAL; in ravb_compute_gti()
1866 priv->gti_tiv = inc; in ravb_compute_gti()
1878 if (!priv->info->internal_delay) in ravb_parse_delay_mode()
1881 if (!of_property_read_u32(np, "rx-internal-delay-ps", &delay)) { in ravb_parse_delay_mode()
1883 priv->rxcidm = !!delay; in ravb_parse_delay_mode()
1886 if (!of_property_read_u32(np, "tx-internal-delay-ps", &delay)) { in ravb_parse_delay_mode()
1888 priv->txcidm = !!delay; in ravb_parse_delay_mode()
1895 /* Fall back to legacy rgmii-*id behavior */ in ravb_parse_delay_mode()
1896 if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || in ravb_parse_delay_mode()
1897 priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) { in ravb_parse_delay_mode()
1898 priv->rxcidm = 1; in ravb_parse_delay_mode()
1899 priv->rgmii_override = 1; in ravb_parse_delay_mode()
1902 if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || in ravb_parse_delay_mode()
1903 priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) { in ravb_parse_delay_mode()
1904 priv->txcidm = 1; in ravb_parse_delay_mode()
1905 priv->rgmii_override = 1; in ravb_parse_delay_mode()
1914 if (!priv->info->internal_delay) in ravb_set_delay_mode()
1917 if (priv->rxcidm) in ravb_set_delay_mode()
1919 if (priv->txcidm) in ravb_set_delay_mode()
1928 const struct ravb_hw_info *info = priv->info; in ravb_open()
1929 struct device *dev = &priv->pdev->dev; in ravb_open()
1932 napi_enable(&priv->napi[RAVB_BE]); in ravb_open()
1933 if (info->nc_queues) in ravb_open()
1934 napi_enable(&priv->napi[RAVB_NC]); in ravb_open()
1946 ravb_write(ndev, priv->desc_bat_dma, DBAT); in ravb_open()
1958 if (info->gptp || info->ccc_gac) in ravb_open()
1959 ravb_ptp_init(ndev, priv->pdev); in ravb_open()
1972 if (info->gptp || info->ccc_gac) in ravb_open()
1981 if (info->nc_queues) in ravb_open()
1982 napi_disable(&priv->napi[RAVB_NC]); in ravb_open()
1983 napi_disable(&priv->napi[RAVB_BE]); in ravb_open()
1997 ndev->stats.tx_errors++; in ravb_tx_timeout()
1999 schedule_work(&priv->work); in ravb_tx_timeout()
2006 const struct ravb_hw_info *info = priv->info; in ravb_tx_timeout_work()
2007 struct net_device *ndev = priv->ndev; in ravb_tx_timeout_work()
2012 schedule_work(&priv->work); in ravb_tx_timeout_work()
2019 if (info->gptp) in ravb_tx_timeout_work()
2029 * re-enables the TX and RX and skip the following in ravb_tx_timeout_work()
2030 * re-initialization procedure. in ravb_tx_timeout_work()
2037 if (info->nc_queues) in ravb_tx_timeout_work()
2044 * should return here to avoid re-enabling the TX and RX in in ravb_tx_timeout_work()
2055 if (info->gptp) in ravb_tx_timeout_work()
2056 ravb_ptp_init(ndev, priv->pdev); in ravb_tx_timeout_work()
2066 u16 net_protocol = ntohs(skb->protocol); in ravb_can_tx_csum_gbeth()
2070 * - there are zero or one VLAN headers with TPID=0x8100 in ravb_can_tx_csum_gbeth()
2071 * - the network protocol is IPv4 or IPv6 in ravb_can_tx_csum_gbeth()
2072 * - the transport protocol is TCP, UDP or ICMP in ravb_can_tx_csum_gbeth()
2073 * - the packet is not fragmented in ravb_can_tx_csum_gbeth()
2083 net_protocol = ntohs(vh->h_vlan_encapsulated_proto); in ravb_can_tx_csum_gbeth()
2088 inner_protocol = ip_hdr(skb)->protocol; in ravb_can_tx_csum_gbeth()
2091 inner_protocol = ipv6_hdr(skb)->nexthdr; in ravb_can_tx_csum_gbeth()
2110 const struct ravb_hw_info *info = priv->info; in ravb_start_xmit()
2111 unsigned int num_tx_desc = priv->num_tx_desc; in ravb_start_xmit()
2121 if (skb->ip_summed == CHECKSUM_PARTIAL && !ravb_can_tx_csum_gbeth(skb)) in ravb_start_xmit()
2124 spin_lock_irqsave(&priv->lock, flags); in ravb_start_xmit()
2125 if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) * in ravb_start_xmit()
2130 spin_unlock_irqrestore(&priv->lock, flags); in ravb_start_xmit()
2137 entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * num_tx_desc); in ravb_start_xmit()
2138 priv->tx_skb[q][entry / num_tx_desc] = skb; in ravb_start_xmit()
2141 buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) + in ravb_start_xmit()
2143 len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data; in ravb_start_xmit()
2154 * length of the second DMA descriptor (skb->len - len) in ravb_start_xmit()
2160 memcpy(buffer, skb->data, len); in ravb_start_xmit()
2161 dma_addr = dma_map_single(ndev->dev.parent, buffer, len, in ravb_start_xmit()
2163 if (dma_mapping_error(ndev->dev.parent, dma_addr)) in ravb_start_xmit()
2166 desc = &priv->tx_ring[q][entry]; in ravb_start_xmit()
2167 desc->ds_tagl = cpu_to_le16(len); in ravb_start_xmit()
2168 desc->dptr = cpu_to_le32(dma_addr); in ravb_start_xmit()
2170 buffer = skb->data + len; in ravb_start_xmit()
2171 len = skb->len - len; in ravb_start_xmit()
2172 dma_addr = dma_map_single(ndev->dev.parent, buffer, len, in ravb_start_xmit()
2174 if (dma_mapping_error(ndev->dev.parent, dma_addr)) in ravb_start_xmit()
2179 desc = &priv->tx_ring[q][entry]; in ravb_start_xmit()
2180 len = skb->len; in ravb_start_xmit()
2181 dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len, in ravb_start_xmit()
2183 if (dma_mapping_error(ndev->dev.parent, dma_addr)) in ravb_start_xmit()
2186 desc->ds_tagl = cpu_to_le16(len); in ravb_start_xmit()
2187 desc->dptr = cpu_to_le32(dma_addr); in ravb_start_xmit()
2190 if (info->gptp || info->ccc_gac) { in ravb_start_xmit()
2195 desc--; in ravb_start_xmit()
2196 dma_unmap_single(ndev->dev.parent, dma_addr, in ravb_start_xmit()
2201 ts_skb->skb = skb_get(skb); in ravb_start_xmit()
2202 ts_skb->tag = priv->ts_skb_tag++; in ravb_start_xmit()
2203 priv->ts_skb_tag &= 0x3ff; in ravb_start_xmit()
2204 list_add_tail(&ts_skb->list, &priv->ts_skb_list); in ravb_start_xmit()
2207 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in ravb_start_xmit()
2208 desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR; in ravb_start_xmit()
2209 desc->ds_tagl |= cpu_to_le16(ts_skb->tag << 12); in ravb_start_xmit()
2216 desc->die_dt = DT_FEND; in ravb_start_xmit()
2217 desc--; in ravb_start_xmit()
2218 /* When using multi-descriptors, DT_FEND needs to get written in ravb_start_xmit()
2229 desc->die_dt = DT_FSTART; in ravb_start_xmit()
2233 desc->die_dt = DT_FSINGLE; in ravb_start_xmit()
2245 priv->cur_tx[q] += num_tx_desc; in ravb_start_xmit()
2246 if (priv->cur_tx[q] - priv->dirty_tx[q] > in ravb_start_xmit()
2247 (priv->num_tx_ring[q] - 1) * num_tx_desc && in ravb_start_xmit()
2252 spin_unlock_irqrestore(&priv->lock, flags); in ravb_start_xmit()
2256 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), in ravb_start_xmit()
2257 le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE); in ravb_start_xmit()
2260 priv->tx_skb[q][entry / num_tx_desc] = NULL; in ravb_start_xmit()
2268 return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC : in ravb_select_queue()
2276 const struct ravb_hw_info *info = priv->info; in ravb_get_stats()
2278 struct device *dev = &priv->pdev->dev; in ravb_get_stats()
2280 nstats = &ndev->stats; in ravb_get_stats()
2287 stats0 = &priv->stats[RAVB_BE]; in ravb_get_stats()
2289 if (info->tx_counters) { in ravb_get_stats()
2290 nstats->tx_dropped += ravb_read(ndev, TROCR); in ravb_get_stats()
2294 if (info->carrier_counters) { in ravb_get_stats()
2295 nstats->collisions += ravb_read(ndev, CXR41); in ravb_get_stats()
2297 nstats->tx_carrier_errors += ravb_read(ndev, CXR42); in ravb_get_stats()
2301 nstats->rx_packets = stats0->rx_packets; in ravb_get_stats()
2302 nstats->tx_packets = stats0->tx_packets; in ravb_get_stats()
2303 nstats->rx_bytes = stats0->rx_bytes; in ravb_get_stats()
2304 nstats->tx_bytes = stats0->tx_bytes; in ravb_get_stats()
2305 nstats->multicast = stats0->multicast; in ravb_get_stats()
2306 nstats->rx_errors = stats0->rx_errors; in ravb_get_stats()
2307 nstats->rx_crc_errors = stats0->rx_crc_errors; in ravb_get_stats()
2308 nstats->rx_frame_errors = stats0->rx_frame_errors; in ravb_get_stats()
2309 nstats->rx_length_errors = stats0->rx_length_errors; in ravb_get_stats()
2310 nstats->rx_missed_errors = stats0->rx_missed_errors; in ravb_get_stats()
2311 nstats->rx_over_errors = stats0->rx_over_errors; in ravb_get_stats()
2312 if (info->nc_queues) { in ravb_get_stats()
2313 stats1 = &priv->stats[RAVB_NC]; in ravb_get_stats()
2315 nstats->rx_packets += stats1->rx_packets; in ravb_get_stats()
2316 nstats->tx_packets += stats1->tx_packets; in ravb_get_stats()
2317 nstats->rx_bytes += stats1->rx_bytes; in ravb_get_stats()
2318 nstats->tx_bytes += stats1->tx_bytes; in ravb_get_stats()
2319 nstats->multicast += stats1->multicast; in ravb_get_stats()
2320 nstats->rx_errors += stats1->rx_errors; in ravb_get_stats()
2321 nstats->rx_crc_errors += stats1->rx_crc_errors; in ravb_get_stats()
2322 nstats->rx_frame_errors += stats1->rx_frame_errors; in ravb_get_stats()
2323 nstats->rx_length_errors += stats1->rx_length_errors; in ravb_get_stats()
2324 nstats->rx_missed_errors += stats1->rx_missed_errors; in ravb_get_stats()
2325 nstats->rx_over_errors += stats1->rx_over_errors; in ravb_get_stats()
2339 spin_lock_irqsave(&priv->lock, flags); in ravb_set_rx_mode()
2341 ndev->flags & IFF_PROMISC ? ECMR_PRM : 0); in ravb_set_rx_mode()
2342 spin_unlock_irqrestore(&priv->lock, flags); in ravb_set_rx_mode()
2348 struct device_node *np = ndev->dev.parent->of_node; in ravb_close()
2350 const struct ravb_hw_info *info = priv->info; in ravb_close()
2352 struct device *dev = &priv->pdev->dev; in ravb_close()
2363 if (ndev->phydev) { in ravb_close()
2364 phy_stop(ndev->phydev); in ravb_close()
2365 phy_disconnect(ndev->phydev); in ravb_close()
2371 if (info->gptp || info->ccc_gac) in ravb_close()
2374 /* Set the config mode to stop the AVB-DMAC's processes */ in ravb_close()
2380 if (info->gptp || info->ccc_gac) { in ravb_close()
2381 list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) { in ravb_close()
2382 list_del(&ts_skb->list); in ravb_close()
2383 kfree_skb(ts_skb->skb); in ravb_close()
2388 cancel_work_sync(&priv->work); in ravb_close()
2390 if (info->nc_queues) in ravb_close()
2391 napi_disable(&priv->napi[RAVB_NC]); in ravb_close()
2392 napi_disable(&priv->napi[RAVB_BE]); in ravb_close()
2396 if (info->nc_queues) in ravb_close()
2419 config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON : in ravb_hwtstamp_get()
2421 switch (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE) { in ravb_hwtstamp_get()
2432 return copy_to_user(req->ifr_data, &config, sizeof(config)) ? in ravb_hwtstamp_get()
2433 -EFAULT : 0; in ravb_hwtstamp_get()
2444 if (copy_from_user(&config, req->ifr_data, sizeof(config))) in ravb_hwtstamp_set()
2445 return -EFAULT; in ravb_hwtstamp_set()
2455 return -ERANGE; in ravb_hwtstamp_set()
2470 priv->tstamp_tx_ctrl = tstamp_tx_ctrl; in ravb_hwtstamp_set()
2471 priv->tstamp_rx_ctrl = tstamp_rx_ctrl; in ravb_hwtstamp_set()
2473 return copy_to_user(req->ifr_data, &config, sizeof(config)) ? in ravb_hwtstamp_set()
2474 -EFAULT : 0; in ravb_hwtstamp_set()
2480 struct phy_device *phydev = ndev->phydev; in ravb_do_ioctl()
2483 return -EINVAL; in ravb_do_ioctl()
2486 return -ENODEV; in ravb_do_ioctl()
2502 WRITE_ONCE(ndev->mtu, new_mtu); in ravb_change_mtu()
2505 synchronize_irq(priv->emac_irq); in ravb_change_mtu()
2519 spin_lock_irqsave(&priv->lock, flags); in ravb_set_rx_csum()
2530 spin_unlock_irqrestore(&priv->lock, flags); in ravb_set_rx_csum()
2552 netdev_features_t changed = ndev->features ^ features; in ravb_set_features_gbeth()
2558 spin_lock_irqsave(&priv->lock, flags); in ravb_set_features_gbeth()
2582 spin_unlock_irqrestore(&priv->lock, flags); in ravb_set_features_gbeth()
2590 netdev_features_t changed = ndev->features ^ features; in ravb_set_features_rcar()
2602 const struct ravb_hw_info *info = priv->info; in ravb_set_features()
2603 struct device *dev = &priv->pdev->dev; in ravb_set_features()
2609 ret = info->set_feature(ndev, features); in ravb_set_features()
2618 ndev->features = features; in ravb_set_features()
2641 struct platform_device *pdev = priv->pdev; in ravb_mdio_init()
2642 struct device *dev = &pdev->dev; in ravb_mdio_init()
2649 priv->mdiobb.ops = &bb_ops; in ravb_mdio_init()
2652 priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb); in ravb_mdio_init()
2653 if (!priv->mii_bus) in ravb_mdio_init()
2654 return -ENOMEM; in ravb_mdio_init()
2657 priv->mii_bus->name = "ravb_mii"; in ravb_mdio_init()
2658 priv->mii_bus->parent = dev; in ravb_mdio_init()
2659 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", in ravb_mdio_init()
2660 pdev->name, pdev->id); in ravb_mdio_init()
2663 mdio_node = of_get_child_by_name(dev->of_node, "mdio"); in ravb_mdio_init()
2666 mdio_node = of_node_get(dev->of_node); in ravb_mdio_init()
2668 error = of_mdiobus_register(priv->mii_bus, mdio_node); in ravb_mdio_init()
2673 pn = of_parse_phandle(dev->of_node, "phy-handle", 0); in ravb_mdio_init()
2676 phydev->mac_managed_pm = true; in ravb_mdio_init()
2677 put_device(&phydev->mdio.dev); in ravb_mdio_init()
2684 free_mdio_bitbang(priv->mii_bus); in ravb_mdio_init()
2692 mdiobus_unregister(priv->mii_bus); in ravb_mdio_release()
2695 free_mdio_bitbang(priv->mii_bus); in ravb_mdio_release()
2825 { .compatible = "renesas,etheravb-r8a7790", .data = &ravb_gen2_hw_info },
2826 { .compatible = "renesas,etheravb-r8a7794", .data = &ravb_gen2_hw_info },
2827 { .compatible = "renesas,etheravb-rcar-gen2", .data = &ravb_gen2_hw_info },
2828 { .compatible = "renesas,etheravb-r8a7795", .data = &ravb_gen3_hw_info },
2829 { .compatible = "renesas,etheravb-rcar-gen3", .data = &ravb_gen3_hw_info },
2830 { .compatible = "renesas,etheravb-rcar-gen4", .data = &ravb_gen4_hw_info },
2831 { .compatible = "renesas,etheravb-rzv2m", .data = &ravb_rzv2m_hw_info },
2832 { .compatible = "renesas,rzg2l-gbeth", .data = &gbeth_hw_info },
2840 struct platform_device *pdev = priv->pdev; in ravb_setup_irq()
2841 struct net_device *ndev = priv->ndev; in ravb_setup_irq()
2842 struct device *dev = &pdev->dev; in ravb_setup_irq()
2850 return -ENOMEM; in ravb_setup_irq()
2873 const struct ravb_hw_info *info = priv->info; in ravb_setup_irqs()
2874 struct net_device *ndev = priv->ndev; in ravb_setup_irqs()
2878 if (!info->multi_irqs) in ravb_setup_irqs()
2879 return ravb_setup_irq(priv, NULL, NULL, &ndev->irq, ravb_interrupt); in ravb_setup_irqs()
2881 if (info->err_mgmt_irqs) { in ravb_setup_irqs()
2889 error = ravb_setup_irq(priv, irq_name, "ch22:multi", &ndev->irq, ravb_multi_interrupt); in ravb_setup_irqs()
2893 error = ravb_setup_irq(priv, emac_irq_name, "ch24:emac", &priv->emac_irq, in ravb_setup_irqs()
2898 if (info->err_mgmt_irqs) { in ravb_setup_irqs()
2925 struct device_node *np = pdev->dev.of_node; in ravb_probe()
2934 dev_err(&pdev->dev, in ravb_probe()
2936 return -EINVAL; in ravb_probe()
2939 rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL); in ravb_probe()
2941 return dev_err_probe(&pdev->dev, PTR_ERR(rstc), in ravb_probe()
2947 return -ENOMEM; in ravb_probe()
2949 info = of_device_get_match_data(&pdev->dev); in ravb_probe()
2951 ndev->features = info->net_features; in ravb_probe()
2952 ndev->hw_features = info->net_hw_features; in ravb_probe()
2953 ndev->vlan_features = info->vlan_features; in ravb_probe()
2959 SET_NETDEV_DEV(ndev, &pdev->dev); in ravb_probe()
2962 priv->info = info; in ravb_probe()
2963 priv->rstc = rstc; in ravb_probe()
2964 priv->ndev = ndev; in ravb_probe()
2965 priv->pdev = pdev; in ravb_probe()
2966 priv->num_tx_ring[RAVB_BE] = BE_TX_RING_SIZE; in ravb_probe()
2967 priv->num_rx_ring[RAVB_BE] = BE_RX_RING_SIZE; in ravb_probe()
2968 if (info->nc_queues) { in ravb_probe()
2969 priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE; in ravb_probe()
2970 priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE; in ravb_probe()
2977 priv->clk = devm_clk_get(&pdev->dev, NULL); in ravb_probe()
2978 if (IS_ERR(priv->clk)) { in ravb_probe()
2979 error = PTR_ERR(priv->clk); in ravb_probe()
2983 if (info->gptp_ref_clk) { in ravb_probe()
2984 priv->gptp_clk = devm_clk_get(&pdev->dev, "gptp"); in ravb_probe()
2985 if (IS_ERR(priv->gptp_clk)) { in ravb_probe()
2986 error = PTR_ERR(priv->gptp_clk); in ravb_probe()
2991 priv->refclk = devm_clk_get_optional(&pdev->dev, "refclk"); in ravb_probe()
2992 if (IS_ERR(priv->refclk)) { in ravb_probe()
2993 error = PTR_ERR(priv->refclk); in ravb_probe()
2996 clk_prepare(priv->refclk); in ravb_probe()
2999 pm_runtime_set_autosuspend_delay(&pdev->dev, 100); in ravb_probe()
3000 pm_runtime_use_autosuspend(&pdev->dev); in ravb_probe()
3001 pm_runtime_enable(&pdev->dev); in ravb_probe()
3002 error = pm_runtime_resume_and_get(&pdev->dev); in ravb_probe()
3006 priv->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res); in ravb_probe()
3007 if (IS_ERR(priv->addr)) { in ravb_probe()
3008 error = PTR_ERR(priv->addr); in ravb_probe()
3012 /* The Ether-specific entries in the device structure. */ in ravb_probe()
3013 ndev->base_addr = res->start; in ravb_probe()
3015 spin_lock_init(&priv->lock); in ravb_probe()
3016 INIT_WORK(&priv->work, ravb_tx_timeout_work); in ravb_probe()
3018 error = of_get_phy_mode(np, &priv->phy_interface); in ravb_probe()
3019 if (error && error != -ENODEV) in ravb_probe()
3022 priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link"); in ravb_probe()
3023 priv->avb_link_active_low = in ravb_probe()
3024 of_property_read_bool(np, "renesas,ether-link-active-low"); in ravb_probe()
3026 ndev->max_mtu = info->tx_max_frame_size - in ravb_probe()
3028 ndev->min_mtu = ETH_MIN_MTU; in ravb_probe()
3030 /* FIXME: R-Car Gen2 has 4byte alignment restriction for tx buffer in ravb_probe()
3035 priv->num_tx_desc = info->aligned_tx ? 2 : 1; in ravb_probe()
3038 ndev->netdev_ops = &ravb_netdev_ops; in ravb_probe()
3039 ndev->ethtool_ops = &ravb_ethtool_ops; in ravb_probe()
3048 priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM; in ravb_probe()
3049 priv->desc_bat = dma_alloc_coherent(ndev->dev.parent, priv->desc_bat_size, in ravb_probe()
3050 &priv->desc_bat_dma, GFP_KERNEL); in ravb_probe()
3051 if (!priv->desc_bat) { in ravb_probe()
3052 dev_err(&pdev->dev, in ravb_probe()
3054 priv->desc_bat_size); in ravb_probe()
3055 error = -ENOMEM; in ravb_probe()
3059 priv->desc_bat[q].die_dt = DT_EOS; in ravb_probe()
3062 INIT_LIST_HEAD(&priv->ts_skb_list); in ravb_probe()
3065 priv->msg_enable = RAVB_DEF_MSG_ENABLE; in ravb_probe()
3074 if (!is_valid_ether_addr(ndev->dev_addr)) { in ravb_probe()
3075 dev_warn(&pdev->dev, in ravb_probe()
3083 dev_err(&pdev->dev, "failed to initialize MDIO\n"); in ravb_probe()
3092 netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll); in ravb_probe()
3093 if (info->nc_queues) in ravb_probe()
3094 netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll); in ravb_probe()
3096 if (info->coalesce_irqs) { in ravb_probe()
3107 device_set_wakeup_capable(&pdev->dev, 1); in ravb_probe()
3111 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq); in ravb_probe()
3113 pm_runtime_mark_last_busy(&pdev->dev); in ravb_probe()
3114 pm_runtime_put_autosuspend(&pdev->dev); in ravb_probe()
3119 if (info->nc_queues) in ravb_probe()
3120 netif_napi_del(&priv->napi[RAVB_NC]); in ravb_probe()
3122 netif_napi_del(&priv->napi[RAVB_BE]); in ravb_probe()
3127 dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, in ravb_probe()
3128 priv->desc_bat_dma); in ravb_probe()
3130 pm_runtime_put(&pdev->dev); in ravb_probe()
3132 pm_runtime_disable(&pdev->dev); in ravb_probe()
3133 pm_runtime_dont_use_autosuspend(&pdev->dev); in ravb_probe()
3134 clk_unprepare(priv->refclk); in ravb_probe()
3146 const struct ravb_hw_info *info = priv->info; in ravb_remove()
3147 struct device *dev = &priv->pdev->dev; in ravb_remove()
3155 if (info->nc_queues) in ravb_remove()
3156 netif_napi_del(&priv->napi[RAVB_NC]); in ravb_remove()
3157 netif_napi_del(&priv->napi[RAVB_BE]); in ravb_remove()
3161 dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, in ravb_remove()
3162 priv->desc_bat_dma); in ravb_remove()
3164 pm_runtime_put_sync_suspend(&pdev->dev); in ravb_remove()
3165 pm_runtime_disable(&pdev->dev); in ravb_remove()
3167 clk_unprepare(priv->refclk); in ravb_remove()
3168 reset_control_assert(priv->rstc); in ravb_remove()
3176 const struct ravb_hw_info *info = priv->info; in ravb_wol_setup()
3184 synchronize_irq(priv->emac_irq); in ravb_wol_setup()
3185 if (info->nc_queues) in ravb_wol_setup()
3186 napi_disable(&priv->napi[RAVB_NC]); in ravb_wol_setup()
3187 napi_disable(&priv->napi[RAVB_BE]); in ravb_wol_setup()
3193 if (priv->info->ccc_gac) in ravb_wol_setup()
3196 return enable_irq_wake(priv->emac_irq); in ravb_wol_setup()
3202 const struct ravb_hw_info *info = priv->info; in ravb_wol_restore()
3215 if (priv->info->ccc_gac) in ravb_wol_restore()
3216 ravb_ptp_init(ndev, priv->pdev); in ravb_wol_restore()
3218 if (info->nc_queues) in ravb_wol_restore()
3219 napi_enable(&priv->napi[RAVB_NC]); in ravb_wol_restore()
3220 napi_enable(&priv->napi[RAVB_BE]); in ravb_wol_restore()
3227 return disable_irq_wake(priv->emac_irq); in ravb_wol_restore()
3242 if (priv->wol_enabled) { in ravb_suspend()
3253 ret = pm_runtime_force_suspend(&priv->pdev->dev); in ravb_suspend()
3258 return reset_control_assert(priv->rstc); in ravb_suspend()
3267 ret = reset_control_deassert(priv->rstc); in ravb_resume()
3276 if (priv->wol_enabled) in ravb_resume()
3297 if (!priv->wol_enabled) { in ravb_resume()
3310 clk_disable(priv->refclk); in ravb_runtime_suspend()
3320 return clk_enable(priv->refclk); in ravb_runtime_resume()