Lines Matching +full:num +full:- +full:rxq
1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * This driver is designed for the non-CPM ethernet controllers
13 * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
20 * B-V +1.62
25 * is therefore conveyed through an OF-style device tree.
45 * pre-allocated skb, and so after the skb is passed up to the
93 #include <linux/dma-mapping.h>
114 bdp->bufPtr = cpu_to_be32(buf); in gfar_init_rxbdp()
117 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1) in gfar_init_rxbdp()
122 bdp->lstatus = cpu_to_be32(lstatus); in gfar_init_rxbdp()
127 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_init_tx_rx_base()
131 baddr = ®s->tbase0; in gfar_init_tx_rx_base()
132 for (i = 0; i < priv->num_tx_queues; i++) { in gfar_init_tx_rx_base()
133 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base); in gfar_init_tx_rx_base()
137 baddr = ®s->rbase0; in gfar_init_tx_rx_base()
138 for (i = 0; i < priv->num_rx_queues; i++) { in gfar_init_tx_rx_base()
139 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base); in gfar_init_tx_rx_base()
146 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_init_rqprm()
150 baddr = ®s->rqprm0; in gfar_init_rqprm()
151 for (i = 0; i < priv->num_rx_queues; i++) { in gfar_init_rqprm()
152 gfar_write(baddr, priv->rx_queue[i]->rx_ring_size | in gfar_init_rqprm()
161 priv->uses_rxfcb = 0; in gfar_rx_offload_en()
163 if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX)) in gfar_rx_offload_en()
164 priv->uses_rxfcb = 1; in gfar_rx_offload_en()
166 if (priv->hwts_rx_en || priv->rx_filer_enable) in gfar_rx_offload_en()
167 priv->uses_rxfcb = 1; in gfar_rx_offload_en()
172 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_mac_rx_config()
175 if (priv->rx_filer_enable) { in gfar_mac_rx_config()
178 gfar_write(®s->rir0, DEFAULT_2RXQ_RIR0); in gfar_mac_rx_config()
182 if (priv->ndev->flags & IFF_PROMISC) in gfar_mac_rx_config()
185 if (priv->ndev->features & NETIF_F_RXCSUM) in gfar_mac_rx_config()
188 if (priv->extended_hash) in gfar_mac_rx_config()
191 if (priv->padding) { in gfar_mac_rx_config()
193 rctrl |= RCTRL_PADDING(priv->padding); in gfar_mac_rx_config()
197 if (priv->hwts_rx_en) in gfar_mac_rx_config()
200 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) in gfar_mac_rx_config()
204 gfar_write(®s->rctrl, rctrl); in gfar_mac_rx_config()
207 gfar_write(®s->ptv, DEFAULT_LFC_PTVVAL); in gfar_mac_rx_config()
211 gfar_write(®s->rctrl, rctrl); in gfar_mac_rx_config()
216 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_mac_tx_config()
219 if (priv->ndev->features & NETIF_F_IP_CSUM) in gfar_mac_tx_config()
222 if (priv->prio_sched_en) in gfar_mac_tx_config()
226 gfar_write(®s->tr03wt, DEFAULT_WRRS_WEIGHT); in gfar_mac_tx_config()
227 gfar_write(®s->tr47wt, DEFAULT_WRRS_WEIGHT); in gfar_mac_tx_config()
230 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX) in gfar_mac_tx_config()
233 gfar_write(®s->tctrl, tctrl); in gfar_mac_tx_config()
239 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_configure_coalescing()
242 if (priv->mode == MQ_MG_MODE) { in gfar_configure_coalescing()
245 baddr = ®s->txic0; in gfar_configure_coalescing()
246 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) { in gfar_configure_coalescing()
248 if (likely(priv->tx_queue[i]->txcoalescing)) in gfar_configure_coalescing()
249 gfar_write(baddr + i, priv->tx_queue[i]->txic); in gfar_configure_coalescing()
252 baddr = ®s->rxic0; in gfar_configure_coalescing()
253 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) { in gfar_configure_coalescing()
255 if (likely(priv->rx_queue[i]->rxcoalescing)) in gfar_configure_coalescing()
256 gfar_write(baddr + i, priv->rx_queue[i]->rxic); in gfar_configure_coalescing()
259 /* Backward compatible case -- even if we enable in gfar_configure_coalescing()
262 gfar_write(®s->txic, 0); in gfar_configure_coalescing()
263 if (likely(priv->tx_queue[0]->txcoalescing)) in gfar_configure_coalescing()
264 gfar_write(®s->txic, priv->tx_queue[0]->txic); in gfar_configure_coalescing()
266 gfar_write(®s->rxic, 0); in gfar_configure_coalescing()
267 if (unlikely(priv->rx_queue[0]->rxcoalescing)) in gfar_configure_coalescing()
268 gfar_write(®s->rxic, priv->rx_queue[0]->rxic); in gfar_configure_coalescing()
282 for (i = 0; i < priv->num_rx_queues; i++) { in gfar_get_stats64()
283 stats->rx_packets += priv->rx_queue[i]->stats.rx_packets; in gfar_get_stats64()
284 stats->rx_bytes += priv->rx_queue[i]->stats.rx_bytes; in gfar_get_stats64()
285 stats->rx_dropped += priv->rx_queue[i]->stats.rx_dropped; in gfar_get_stats64()
288 for (i = 0; i < priv->num_tx_queues; i++) { in gfar_get_stats64()
289 stats->tx_bytes += priv->tx_queue[i]->stats.tx_bytes; in gfar_get_stats64()
290 stats->tx_packets += priv->tx_queue[i]->stats.tx_packets; in gfar_get_stats64()
293 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { in gfar_get_stats64()
294 struct rmon_mib __iomem *rmon = &priv->gfargrp[0].regs->rmon; in gfar_get_stats64()
299 spin_lock_irqsave(&priv->rmon_overflow.lock, flags); in gfar_get_stats64()
300 car = gfar_read(&rmon->car1) & CAR1_C1RDR; in gfar_get_stats64()
303 rdrp = gfar_read(&rmon->rdrp); in gfar_get_stats64()
304 car = gfar_read(&rmon->car1) & CAR1_C1RDR; in gfar_get_stats64()
307 priv->rmon_overflow.rdrp++; in gfar_get_stats64()
308 gfar_write(&rmon->car1, car); in gfar_get_stats64()
310 rdrp_offset = priv->rmon_overflow.rdrp; in gfar_get_stats64()
311 spin_unlock_irqrestore(&priv->rmon_overflow.lock, flags); in gfar_get_stats64()
313 stats->rx_missed_errors = rdrp + (rdrp_offset << 16); in gfar_get_stats64()
322 * 2) Use the 8 most significant bits as a hash into a 256-entry
323 * table. The table is controlled through 8 32-bit registers:
324 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
336 int width = priv->hash_width; in gfar_set_hash_for_addr()
337 u8 whichbit = (result >> (32 - width)) & 0x1f; in gfar_set_hash_for_addr()
338 u8 whichreg = result >> (32 - width + 5); in gfar_set_hash_for_addr()
339 u32 value = (1 << (31-whichbit)); in gfar_set_hash_for_addr()
341 tempval = gfar_read(priv->hash_regs[whichreg]); in gfar_set_hash_for_addr()
343 gfar_write(priv->hash_regs[whichreg], tempval); in gfar_set_hash_for_addr()
349 static void gfar_set_mac_for_addr(struct net_device *dev, int num, in gfar_set_mac_for_addr() argument
353 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_set_mac_for_addr()
355 u32 __iomem *macptr = ®s->macstnaddr1; in gfar_set_mac_for_addr()
357 macptr += num*2; in gfar_set_mac_for_addr()
381 gfar_set_mac_for_addr(dev, 0, dev->dev_addr); in gfar_set_mac_addr()
389 for (i = 0; i < priv->num_grps; i++) { in gfar_ints_disable()
390 struct gfar __iomem *regs = priv->gfargrp[i].regs; in gfar_ints_disable()
392 gfar_write(®s->ievent, IEVENT_INIT_CLEAR); in gfar_ints_disable()
395 gfar_write(®s->imask, IMASK_INIT_CLEAR); in gfar_ints_disable()
402 for (i = 0; i < priv->num_grps; i++) { in gfar_ints_enable()
403 struct gfar __iomem *regs = priv->gfargrp[i].regs; in gfar_ints_enable()
405 gfar_write(®s->imask, in gfar_ints_enable()
406 IMASK_DEFAULT | priv->rmon_overflow.imask); in gfar_ints_enable()
414 for (i = 0; i < priv->num_tx_queues; i++) { in gfar_alloc_tx_queues()
415 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q), in gfar_alloc_tx_queues()
417 if (!priv->tx_queue[i]) in gfar_alloc_tx_queues()
418 return -ENOMEM; in gfar_alloc_tx_queues()
420 priv->tx_queue[i]->tx_skbuff = NULL; in gfar_alloc_tx_queues()
421 priv->tx_queue[i]->qindex = i; in gfar_alloc_tx_queues()
422 priv->tx_queue[i]->dev = priv->ndev; in gfar_alloc_tx_queues()
423 spin_lock_init(&(priv->tx_queue[i]->txlock)); in gfar_alloc_tx_queues()
432 for (i = 0; i < priv->num_rx_queues; i++) { in gfar_alloc_rx_queues()
433 priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q), in gfar_alloc_rx_queues()
435 if (!priv->rx_queue[i]) in gfar_alloc_rx_queues()
436 return -ENOMEM; in gfar_alloc_rx_queues()
438 priv->rx_queue[i]->qindex = i; in gfar_alloc_rx_queues()
439 priv->rx_queue[i]->ndev = priv->ndev; in gfar_alloc_rx_queues()
448 for (i = 0; i < priv->num_tx_queues; i++) in gfar_free_tx_queues()
449 kfree(priv->tx_queue[i]); in gfar_free_tx_queues()
456 for (i = 0; i < priv->num_rx_queues; i++) in gfar_free_rx_queues()
457 kfree(priv->rx_queue[i]); in gfar_free_rx_queues()
465 if (priv->gfargrp[i].regs) in unmap_group_regs()
466 iounmap(priv->gfargrp[i].regs); in unmap_group_regs()
473 for (i = 0; i < priv->num_grps; i++) in free_gfar_dev()
475 kfree(priv->gfargrp[i].irqinfo[j]); in free_gfar_dev()
476 priv->gfargrp[i].irqinfo[j] = NULL; in free_gfar_dev()
479 free_netdev(priv->ndev); in free_gfar_dev()
486 for (i = 0; i < priv->num_grps; i++) { in disable_napi()
487 napi_disable(&priv->gfargrp[i].napi_rx); in disable_napi()
488 napi_disable(&priv->gfargrp[i].napi_tx); in disable_napi()
496 for (i = 0; i < priv->num_grps; i++) { in enable_napi()
497 napi_enable(&priv->gfargrp[i].napi_rx); in enable_napi()
498 napi_enable(&priv->gfargrp[i].napi_tx); in enable_napi()
505 struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps]; in gfar_parse_group()
509 grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo), in gfar_parse_group()
511 if (!grp->irqinfo[i]) in gfar_parse_group()
512 return -ENOMEM; in gfar_parse_group()
515 grp->regs = of_iomap(np, 0); in gfar_parse_group()
516 if (!grp->regs) in gfar_parse_group()
517 return -ENOMEM; in gfar_parse_group()
519 gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0); in gfar_parse_group()
523 gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1); in gfar_parse_group()
524 gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2); in gfar_parse_group()
525 if (!gfar_irq(grp, TX)->irq || in gfar_parse_group()
526 !gfar_irq(grp, RX)->irq || in gfar_parse_group()
527 !gfar_irq(grp, ER)->irq) in gfar_parse_group()
528 return -EINVAL; in gfar_parse_group()
531 grp->priv = priv; in gfar_parse_group()
532 spin_lock_init(&grp->grplock); in gfar_parse_group()
533 if (priv->mode == MQ_MG_MODE) { in gfar_parse_group()
535 grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); in gfar_parse_group()
536 grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); in gfar_parse_group()
538 grp->rx_bit_map = 0xFF; in gfar_parse_group()
539 grp->tx_bit_map = 0xFF; in gfar_parse_group()
545 grp->rx_bit_map = bitrev8(grp->rx_bit_map); in gfar_parse_group()
546 grp->tx_bit_map = bitrev8(grp->tx_bit_map); in gfar_parse_group()
551 for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) { in gfar_parse_group()
552 if (!grp->rx_queue) in gfar_parse_group()
553 grp->rx_queue = priv->rx_queue[i]; in gfar_parse_group()
554 grp->num_rx_queues++; in gfar_parse_group()
555 grp->rstat |= (RSTAT_CLEAR_RHALT >> i); in gfar_parse_group()
556 priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i); in gfar_parse_group()
557 priv->rx_queue[i]->grp = grp; in gfar_parse_group()
560 for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) { in gfar_parse_group()
561 if (!grp->tx_queue) in gfar_parse_group()
562 grp->tx_queue = priv->tx_queue[i]; in gfar_parse_group()
563 grp->num_tx_queues++; in gfar_parse_group()
564 grp->tstat |= (TSTAT_CLEAR_THALT >> i); in gfar_parse_group()
565 priv->tqueue |= (TQUEUE_EN0 >> i); in gfar_parse_group()
566 priv->tx_queue[i]->grp = grp; in gfar_parse_group()
569 priv->num_grps++; in gfar_parse_group()
577 int num = 0; in gfar_of_group_count() local
580 if (of_node_name_eq(child, "queue-group")) in gfar_of_group_count()
581 num++; in gfar_of_group_count()
583 return num; in gfar_of_group_count()
592 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_get_interface()
595 ecntrl = gfar_read(®s->ecntrl); in gfar_get_interface()
612 phy_interface_t interface = priv->interface; in gfar_get_interface()
624 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) in gfar_get_interface()
637 struct device_node *np = ofdev->dev.of_node; in gfar_of_init()
645 return -ENODEV; in gfar_of_init()
660 dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n", in gfar_of_init()
663 return -EINVAL; in gfar_of_init()
667 num_rx_qs = num_grps; /* one rxq per int group */ in gfar_of_init()
674 return -EINVAL; in gfar_of_init()
681 return -EINVAL; in gfar_of_init()
687 return -ENOMEM; in gfar_of_init()
690 priv->ndev = dev; in gfar_of_init()
692 priv->mode = mode; in gfar_of_init()
694 priv->num_tx_queues = num_tx_qs; in gfar_of_init()
696 priv->num_rx_queues = num_rx_qs; in gfar_of_init()
713 INIT_LIST_HEAD(&priv->rx_list.list); in gfar_of_init()
714 priv->rx_list.count = 0; in gfar_of_init()
715 mutex_init(&priv->rx_queue_access); in gfar_of_init()
718 priv->gfargrp[i].regs = NULL; in gfar_of_init()
721 if (priv->mode == MQ_MG_MODE) { in gfar_of_init()
723 if (!of_node_name_eq(child, "queue-group")) in gfar_of_init()
738 if (of_property_read_bool(np, "bd-stash")) { in gfar_of_init()
739 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING; in gfar_of_init()
740 priv->bd_stash_en = 1; in gfar_of_init()
743 err = of_property_read_u32(np, "rx-stash-len", &stash_len); in gfar_of_init()
746 priv->rx_stash_size = stash_len; in gfar_of_init()
748 err = of_property_read_u32(np, "rx-stash-idx", &stash_idx); in gfar_of_init()
751 priv->rx_stash_index = stash_idx; in gfar_of_init()
754 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING; in gfar_of_init()
757 if (err == -EPROBE_DEFER) in gfar_of_init()
761 dev_info(&ofdev->dev, "Using random MAC address: %pM\n", dev->dev_addr); in gfar_of_init()
765 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT | in gfar_of_init()
771 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT | in gfar_of_init()
783 * rgmii-id really needs to be specified. Other types can be in gfar_of_init()
788 priv->interface = interface; in gfar_of_init()
790 priv->interface = gfar_get_interface(dev); in gfar_of_init()
792 if (of_property_read_bool(np, "fsl,magic-packet")) in gfar_of_init()
793 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET; in gfar_of_init()
795 if (of_property_read_bool(np, "fsl,wake-on-filer")) in gfar_of_init()
796 priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER; in gfar_of_init()
798 priv->phy_node = of_parse_phandle(np, "phy-handle", 0); in gfar_of_init()
803 if (!priv->phy_node && of_phy_is_fixed_link(np)) { in gfar_of_init()
808 priv->phy_node = of_node_get(np); in gfar_of_init()
812 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0); in gfar_of_init()
832 rqfar--; in cluster_entry_per_class()
834 priv->ftp_rqfpr[rqfar] = rqfpr; in cluster_entry_per_class()
835 priv->ftp_rqfcr[rqfar] = rqfcr; in cluster_entry_per_class()
838 rqfar--; in cluster_entry_per_class()
840 priv->ftp_rqfpr[rqfar] = rqfpr; in cluster_entry_per_class()
841 priv->ftp_rqfcr[rqfar] = rqfcr; in cluster_entry_per_class()
844 rqfar--; in cluster_entry_per_class()
847 priv->ftp_rqfcr[rqfar] = rqfcr; in cluster_entry_per_class()
848 priv->ftp_rqfpr[rqfar] = rqfpr; in cluster_entry_per_class()
851 rqfar--; in cluster_entry_per_class()
854 priv->ftp_rqfcr[rqfar] = rqfcr; in cluster_entry_per_class()
855 priv->ftp_rqfpr[rqfar] = rqfpr; in cluster_entry_per_class()
870 priv->ftp_rqfcr[rqfar] = rqfcr; in gfar_init_filer_table()
871 priv->ftp_rqfpr[rqfar] = rqfpr; in gfar_init_filer_table()
881 /* cur_filer_idx indicated the first non-masked rule */ in gfar_init_filer_table()
882 priv->cur_filer_idx = rqfar; in gfar_init_filer_table()
887 priv->ftp_rqfcr[i] = rqfcr; in gfar_init_filer_table()
888 priv->ftp_rqfpr[i] = rqfpr; in gfar_init_filer_table()
904 priv->errata |= GFAR_ERRATA_74; in __gfar_detect_errata_83xx()
909 priv->errata |= GFAR_ERRATA_76; in __gfar_detect_errata_83xx()
913 priv->errata |= GFAR_ERRATA_12; in __gfar_detect_errata_83xx()
921 priv->errata |= GFAR_ERRATA_12; in __gfar_detect_errata_85xx()
926 priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */ in __gfar_detect_errata_85xx()
932 struct device *dev = &priv->ofdev->dev; in gfar_detect_errata()
935 priv->errata |= GFAR_ERRATA_A002; in gfar_detect_errata()
940 else /* non-mpc85xx parts, i.e. e300 core based */ in gfar_detect_errata()
944 if (priv->errata) in gfar_detect_errata()
946 priv->errata); in gfar_detect_errata()
951 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_init_addr_hash_table()
953 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { in gfar_init_addr_hash_table()
954 priv->extended_hash = 1; in gfar_init_addr_hash_table()
955 priv->hash_width = 9; in gfar_init_addr_hash_table()
957 priv->hash_regs[0] = ®s->igaddr0; in gfar_init_addr_hash_table()
958 priv->hash_regs[1] = ®s->igaddr1; in gfar_init_addr_hash_table()
959 priv->hash_regs[2] = ®s->igaddr2; in gfar_init_addr_hash_table()
960 priv->hash_regs[3] = ®s->igaddr3; in gfar_init_addr_hash_table()
961 priv->hash_regs[4] = ®s->igaddr4; in gfar_init_addr_hash_table()
962 priv->hash_regs[5] = ®s->igaddr5; in gfar_init_addr_hash_table()
963 priv->hash_regs[6] = ®s->igaddr6; in gfar_init_addr_hash_table()
964 priv->hash_regs[7] = ®s->igaddr7; in gfar_init_addr_hash_table()
965 priv->hash_regs[8] = ®s->gaddr0; in gfar_init_addr_hash_table()
966 priv->hash_regs[9] = ®s->gaddr1; in gfar_init_addr_hash_table()
967 priv->hash_regs[10] = ®s->gaddr2; in gfar_init_addr_hash_table()
968 priv->hash_regs[11] = ®s->gaddr3; in gfar_init_addr_hash_table()
969 priv->hash_regs[12] = ®s->gaddr4; in gfar_init_addr_hash_table()
970 priv->hash_regs[13] = ®s->gaddr5; in gfar_init_addr_hash_table()
971 priv->hash_regs[14] = ®s->gaddr6; in gfar_init_addr_hash_table()
972 priv->hash_regs[15] = ®s->gaddr7; in gfar_init_addr_hash_table()
975 priv->extended_hash = 0; in gfar_init_addr_hash_table()
976 priv->hash_width = 8; in gfar_init_addr_hash_table()
978 priv->hash_regs[0] = ®s->gaddr0; in gfar_init_addr_hash_table()
979 priv->hash_regs[1] = ®s->gaddr1; in gfar_init_addr_hash_table()
980 priv->hash_regs[2] = ®s->gaddr2; in gfar_init_addr_hash_table()
981 priv->hash_regs[3] = ®s->gaddr3; in gfar_init_addr_hash_table()
982 priv->hash_regs[4] = ®s->gaddr4; in gfar_init_addr_hash_table()
983 priv->hash_regs[5] = ®s->gaddr5; in gfar_init_addr_hash_table()
984 priv->hash_regs[6] = ®s->gaddr6; in gfar_init_addr_hash_table()
985 priv->hash_regs[7] = ®s->gaddr7; in gfar_init_addr_hash_table()
999 /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are in __gfar_is_rx_idle()
1000 * the same as bits 23-30, the eTSEC Rx is assumed to be idle in __gfar_is_rx_idle()
1003 res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c); in __gfar_is_rx_idle()
1014 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_halt_nodisable()
1025 tempval = gfar_read(®s->dmactrl); in gfar_halt_nodisable()
1027 gfar_write(®s->dmactrl, tempval); in gfar_halt_nodisable()
1033 timeout--; in gfar_halt_nodisable()
1047 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_halt()
1051 gfar_write(®s->rqueue, 0); in gfar_halt()
1052 gfar_write(®s->tqueue, 0); in gfar_halt()
1059 tempval = gfar_read(®s->maccfg1); in gfar_halt()
1061 gfar_write(®s->maccfg1, tempval); in gfar_halt()
1067 struct gfar_private *priv = netdev_priv(tx_queue->dev); in free_skb_tx_queue()
1070 txbdp = tx_queue->tx_bd_base; in free_skb_tx_queue()
1072 for (i = 0; i < tx_queue->tx_ring_size; i++) { in free_skb_tx_queue()
1073 if (!tx_queue->tx_skbuff[i]) in free_skb_tx_queue()
1076 dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr), in free_skb_tx_queue()
1077 be16_to_cpu(txbdp->length), DMA_TO_DEVICE); in free_skb_tx_queue()
1078 txbdp->lstatus = 0; in free_skb_tx_queue()
1079 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; in free_skb_tx_queue()
1082 dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr), in free_skb_tx_queue()
1083 be16_to_cpu(txbdp->length), in free_skb_tx_queue()
1087 dev_kfree_skb_any(tx_queue->tx_skbuff[i]); in free_skb_tx_queue()
1088 tx_queue->tx_skbuff[i] = NULL; in free_skb_tx_queue()
1090 kfree(tx_queue->tx_skbuff); in free_skb_tx_queue()
1091 tx_queue->tx_skbuff = NULL; in free_skb_tx_queue()
1098 struct rxbd8 *rxbdp = rx_queue->rx_bd_base; in free_skb_rx_queue()
1100 dev_kfree_skb(rx_queue->skb); in free_skb_rx_queue()
1102 for (i = 0; i < rx_queue->rx_ring_size; i++) { in free_skb_rx_queue()
1103 struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i]; in free_skb_rx_queue()
1105 rxbdp->lstatus = 0; in free_skb_rx_queue()
1106 rxbdp->bufPtr = 0; in free_skb_rx_queue()
1109 if (!rxb->page) in free_skb_rx_queue()
1112 dma_unmap_page(rx_queue->dev, rxb->dma, in free_skb_rx_queue()
1114 __free_page(rxb->page); in free_skb_rx_queue()
1116 rxb->page = NULL; in free_skb_rx_queue()
1119 kfree(rx_queue->rx_buff); in free_skb_rx_queue()
1120 rx_queue->rx_buff = NULL; in free_skb_rx_queue()
1133 for (i = 0; i < priv->num_tx_queues; i++) { in free_skb_resources()
1136 tx_queue = priv->tx_queue[i]; in free_skb_resources()
1137 txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex); in free_skb_resources()
1138 if (tx_queue->tx_skbuff) in free_skb_resources()
1143 for (i = 0; i < priv->num_rx_queues; i++) { in free_skb_resources()
1144 rx_queue = priv->rx_queue[i]; in free_skb_resources()
1145 if (rx_queue->rx_buff) in free_skb_resources()
1149 dma_free_coherent(priv->dev, in free_skb_resources()
1150 sizeof(struct txbd8) * priv->total_tx_ring_size + in free_skb_resources()
1151 sizeof(struct rxbd8) * priv->total_rx_ring_size, in free_skb_resources()
1152 priv->tx_queue[0]->tx_bd_base, in free_skb_resources()
1153 priv->tx_queue[0]->tx_bd_dma_base); in free_skb_resources()
1163 set_bit(GFAR_DOWN, &priv->state); in stop_gfar()
1171 phy_stop(dev->phydev); in stop_gfar()
1178 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_start()
1183 gfar_write(®s->rqueue, priv->rqueue); in gfar_start()
1184 gfar_write(®s->tqueue, priv->tqueue); in gfar_start()
1187 tempval = gfar_read(®s->dmactrl); in gfar_start()
1189 gfar_write(®s->dmactrl, tempval); in gfar_start()
1192 tempval = gfar_read(®s->dmactrl); in gfar_start()
1194 gfar_write(®s->dmactrl, tempval); in gfar_start()
1196 for (i = 0; i < priv->num_grps; i++) { in gfar_start()
1197 regs = priv->gfargrp[i].regs; in gfar_start()
1199 gfar_write(®s->tstat, priv->gfargrp[i].tstat); in gfar_start()
1200 gfar_write(®s->rstat, priv->gfargrp[i].rstat); in gfar_start()
1204 tempval = gfar_read(®s->maccfg1); in gfar_start()
1206 gfar_write(®s->maccfg1, tempval); in gfar_start()
1210 netif_trans_update(priv->ndev); /* prevent tx timeout */ in gfar_start()
1213 static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb) in gfar_new_page() argument
1222 addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); in gfar_new_page()
1223 if (unlikely(dma_mapping_error(rxq->dev, addr))) { in gfar_new_page()
1229 rxb->dma = addr; in gfar_new_page()
1230 rxb->page = page; in gfar_new_page()
1231 rxb->page_offset = 0; in gfar_new_page()
1238 struct gfar_private *priv = netdev_priv(rx_queue->ndev); in gfar_rx_alloc_err()
1239 struct gfar_extra_stats *estats = &priv->extra_stats; in gfar_rx_alloc_err()
1241 netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n"); in gfar_rx_alloc_err()
1242 atomic64_inc(&estats->rx_alloc_err); in gfar_rx_alloc_err()
1252 i = rx_queue->next_to_use; in gfar_alloc_rx_buffs()
1253 bdp = &rx_queue->rx_bd_base[i]; in gfar_alloc_rx_buffs()
1254 rxb = &rx_queue->rx_buff[i]; in gfar_alloc_rx_buffs()
1256 while (alloc_cnt--) { in gfar_alloc_rx_buffs()
1258 if (unlikely(!rxb->page)) { in gfar_alloc_rx_buffs()
1267 rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT); in gfar_alloc_rx_buffs()
1273 if (unlikely(++i == rx_queue->rx_ring_size)) { in gfar_alloc_rx_buffs()
1275 bdp = rx_queue->rx_bd_base; in gfar_alloc_rx_buffs()
1276 rxb = rx_queue->rx_buff; in gfar_alloc_rx_buffs()
1280 rx_queue->next_to_use = i; in gfar_alloc_rx_buffs()
1281 rx_queue->next_to_alloc = i; in gfar_alloc_rx_buffs()
1287 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_init_bds()
1294 for (i = 0; i < priv->num_tx_queues; i++) { in gfar_init_bds()
1295 tx_queue = priv->tx_queue[i]; in gfar_init_bds()
1297 tx_queue->num_txbdfree = tx_queue->tx_ring_size; in gfar_init_bds()
1298 tx_queue->dirty_tx = tx_queue->tx_bd_base; in gfar_init_bds()
1299 tx_queue->cur_tx = tx_queue->tx_bd_base; in gfar_init_bds()
1300 tx_queue->skb_curtx = 0; in gfar_init_bds()
1301 tx_queue->skb_dirtytx = 0; in gfar_init_bds()
1304 txbdp = tx_queue->tx_bd_base; in gfar_init_bds()
1305 for (j = 0; j < tx_queue->tx_ring_size; j++) { in gfar_init_bds()
1306 txbdp->lstatus = 0; in gfar_init_bds()
1307 txbdp->bufPtr = 0; in gfar_init_bds()
1312 txbdp--; in gfar_init_bds()
1313 txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) | in gfar_init_bds()
1317 rfbptr = ®s->rfbptr0; in gfar_init_bds()
1318 for (i = 0; i < priv->num_rx_queues; i++) { in gfar_init_bds()
1319 rx_queue = priv->rx_queue[i]; in gfar_init_bds()
1321 rx_queue->next_to_clean = 0; in gfar_init_bds()
1322 rx_queue->next_to_use = 0; in gfar_init_bds()
1323 rx_queue->next_to_alloc = 0; in gfar_init_bds()
1330 rx_queue->rfbptr = rfbptr; in gfar_init_bds()
1341 struct device *dev = priv->dev; in gfar_alloc_skb_resources()
1345 priv->total_tx_ring_size = 0; in gfar_alloc_skb_resources()
1346 for (i = 0; i < priv->num_tx_queues; i++) in gfar_alloc_skb_resources()
1347 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size; in gfar_alloc_skb_resources()
1349 priv->total_rx_ring_size = 0; in gfar_alloc_skb_resources()
1350 for (i = 0; i < priv->num_rx_queues; i++) in gfar_alloc_skb_resources()
1351 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size; in gfar_alloc_skb_resources()
1355 (priv->total_tx_ring_size * in gfar_alloc_skb_resources()
1357 (priv->total_rx_ring_size * in gfar_alloc_skb_resources()
1361 return -ENOMEM; in gfar_alloc_skb_resources()
1363 for (i = 0; i < priv->num_tx_queues; i++) { in gfar_alloc_skb_resources()
1364 tx_queue = priv->tx_queue[i]; in gfar_alloc_skb_resources()
1365 tx_queue->tx_bd_base = vaddr; in gfar_alloc_skb_resources()
1366 tx_queue->tx_bd_dma_base = addr; in gfar_alloc_skb_resources()
1367 tx_queue->dev = ndev; in gfar_alloc_skb_resources()
1369 addr += sizeof(struct txbd8) * tx_queue->tx_ring_size; in gfar_alloc_skb_resources()
1370 vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size; in gfar_alloc_skb_resources()
1374 for (i = 0; i < priv->num_rx_queues; i++) { in gfar_alloc_skb_resources()
1375 rx_queue = priv->rx_queue[i]; in gfar_alloc_skb_resources()
1376 rx_queue->rx_bd_base = vaddr; in gfar_alloc_skb_resources()
1377 rx_queue->rx_bd_dma_base = addr; in gfar_alloc_skb_resources()
1378 rx_queue->ndev = ndev; in gfar_alloc_skb_resources()
1379 rx_queue->dev = dev; in gfar_alloc_skb_resources()
1380 addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; in gfar_alloc_skb_resources()
1381 vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; in gfar_alloc_skb_resources()
1385 for (i = 0; i < priv->num_tx_queues; i++) { in gfar_alloc_skb_resources()
1386 tx_queue = priv->tx_queue[i]; in gfar_alloc_skb_resources()
1387 tx_queue->tx_skbuff = in gfar_alloc_skb_resources()
1388 kmalloc_array(tx_queue->tx_ring_size, in gfar_alloc_skb_resources()
1389 sizeof(*tx_queue->tx_skbuff), in gfar_alloc_skb_resources()
1391 if (!tx_queue->tx_skbuff) in gfar_alloc_skb_resources()
1394 for (j = 0; j < tx_queue->tx_ring_size; j++) in gfar_alloc_skb_resources()
1395 tx_queue->tx_skbuff[j] = NULL; in gfar_alloc_skb_resources()
1398 for (i = 0; i < priv->num_rx_queues; i++) { in gfar_alloc_skb_resources()
1399 rx_queue = priv->rx_queue[i]; in gfar_alloc_skb_resources()
1400 rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size, in gfar_alloc_skb_resources()
1401 sizeof(*rx_queue->rx_buff), in gfar_alloc_skb_resources()
1403 if (!rx_queue->rx_buff) in gfar_alloc_skb_resources()
1413 return -ENOMEM; in gfar_alloc_skb_resources()
1431 clear_bit(GFAR_DOWN, &priv->state); in startup_gfar()
1438 priv->oldlink = 0; in startup_gfar()
1439 priv->oldspeed = 0; in startup_gfar()
1440 priv->oldduplex = -1; in startup_gfar()
1442 phy_start(ndev->phydev); in startup_gfar()
1453 struct net_device *ndev = priv->ndev; in gfar_get_flowctrl_cfg()
1454 struct phy_device *phydev = ndev->phydev; in gfar_get_flowctrl_cfg()
1457 if (!phydev->duplex) in gfar_get_flowctrl_cfg()
1460 if (!priv->pause_aneg_en) { in gfar_get_flowctrl_cfg()
1461 if (priv->tx_pause_en) in gfar_get_flowctrl_cfg()
1463 if (priv->rx_pause_en) in gfar_get_flowctrl_cfg()
1470 if (phydev->pause) in gfar_get_flowctrl_cfg()
1472 if (phydev->asym_pause) in gfar_get_flowctrl_cfg()
1475 lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising); in gfar_get_flowctrl_cfg()
1488 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_update_link_state()
1489 struct net_device *ndev = priv->ndev; in gfar_update_link_state()
1490 struct phy_device *phydev = ndev->phydev; in gfar_update_link_state()
1494 if (unlikely(test_bit(GFAR_RESETTING, &priv->state))) in gfar_update_link_state()
1497 if (phydev->link) { in gfar_update_link_state()
1498 u32 tempval1 = gfar_read(®s->maccfg1); in gfar_update_link_state()
1499 u32 tempval = gfar_read(®s->maccfg2); in gfar_update_link_state()
1500 u32 ecntrl = gfar_read(®s->ecntrl); in gfar_update_link_state()
1503 if (phydev->duplex != priv->oldduplex) { in gfar_update_link_state()
1504 if (!(phydev->duplex)) in gfar_update_link_state()
1509 priv->oldduplex = phydev->duplex; in gfar_update_link_state()
1512 if (phydev->speed != priv->oldspeed) { in gfar_update_link_state()
1513 switch (phydev->speed) { in gfar_update_link_state()
1528 if (phydev->speed == SPEED_100) in gfar_update_link_state()
1534 netif_warn(priv, link, priv->ndev, in gfar_update_link_state()
1536 phydev->speed); in gfar_update_link_state()
1540 priv->oldspeed = phydev->speed; in gfar_update_link_state()
1548 for (i = 0; i < priv->num_rx_queues; i++) { in gfar_update_link_state()
1551 rx_queue = priv->rx_queue[i]; in gfar_update_link_state()
1553 gfar_write(rx_queue->rfbptr, bdp_dma); in gfar_update_link_state()
1556 priv->tx_actual_en = 1; in gfar_update_link_state()
1560 priv->tx_actual_en = 0; in gfar_update_link_state()
1562 gfar_write(®s->maccfg1, tempval1); in gfar_update_link_state()
1563 gfar_write(®s->maccfg2, tempval); in gfar_update_link_state()
1564 gfar_write(®s->ecntrl, ecntrl); in gfar_update_link_state()
1566 if (!priv->oldlink) in gfar_update_link_state()
1567 priv->oldlink = 1; in gfar_update_link_state()
1569 } else if (priv->oldlink) { in gfar_update_link_state()
1570 priv->oldlink = 0; in gfar_update_link_state()
1571 priv->oldspeed = 0; in gfar_update_link_state()
1572 priv->oldduplex = -1; in gfar_update_link_state()
1588 struct phy_device *phydev = dev->phydev; in adjust_link()
1590 if (unlikely(phydev->link != priv->oldlink || in adjust_link()
1591 (phydev->link && (phydev->duplex != priv->oldduplex || in adjust_link()
1592 phydev->speed != priv->oldspeed)))) in adjust_link()
1609 if (!priv->tbi_node) { in gfar_configure_serdes()
1610 dev_warn(&dev->dev, "error: SGMII mode requires that the " in gfar_configure_serdes()
1611 "device tree specify a tbi-handle\n"); in gfar_configure_serdes()
1615 tbiphy = of_phy_find_device(priv->tbi_node); in gfar_configure_serdes()
1617 dev_err(&dev->dev, "error: Could not get TBI device\n"); in gfar_configure_serdes()
1622 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured in gfar_configure_serdes()
1627 put_device(&tbiphy->mdio.dev); in gfar_configure_serdes()
1642 put_device(&tbiphy->mdio.dev); in gfar_configure_serdes()
1651 phy_interface_t interface = priv->interface; in init_phy()
1655 priv->oldlink = 0; in init_phy()
1656 priv->oldspeed = 0; in init_phy()
1657 priv->oldduplex = -1; in init_phy()
1659 phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0, in init_phy()
1662 dev_err(&dev->dev, "could not attach to PHY\n"); in init_phy()
1663 return -ENODEV; in init_phy()
1669 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)) in init_phy()
1695 * payload. We set it to checksum, using a pseudo-header in gfar_tx_checksum()
1703 if (ip_hdr(skb)->protocol == IPPROTO_UDP) { in gfar_tx_checksum()
1705 fcb->phcs = (__force __be16)(udp_hdr(skb)->check); in gfar_tx_checksum()
1707 fcb->phcs = (__force __be16)(tcp_hdr(skb)->check); in gfar_tx_checksum()
1710 * frame (skb->data) and the start of the IP hdr. in gfar_tx_checksum()
1714 fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length); in gfar_tx_checksum()
1715 fcb->l4os = skb_network_header_len(skb); in gfar_tx_checksum()
1717 fcb->flags = flags; in gfar_tx_checksum()
1722 fcb->flags |= TXFCB_VLN; in gfar_tx_vlan()
1723 fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb)); in gfar_tx_vlan()
1731 return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd; in skip_txbd()
1759 * It is pointed to by the dev->hard_start_xmit function pointer
1776 rq = skb->queue_mapping; in gfar_start_xmit()
1777 tx_queue = priv->tx_queue[rq]; in gfar_start_xmit()
1779 base = tx_queue->tx_bd_base; in gfar_start_xmit()
1780 regs = tx_queue->grp->regs; in gfar_start_xmit()
1782 do_csum = (CHECKSUM_PARTIAL == skb->ip_summed); in gfar_start_xmit()
1784 do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && in gfar_start_xmit()
1785 priv->hwts_tx_en; in gfar_start_xmit()
1797 dev->stats.tx_errors++; in gfar_start_xmit()
1804 nr_frags = skb_shinfo(skb)->nr_frags; in gfar_start_xmit()
1813 if (nr_txbds > tx_queue->num_txbdfree) { in gfar_start_xmit()
1816 dev->stats.tx_fifo_errors++; in gfar_start_xmit()
1821 bytes_sent = skb->len; in gfar_start_xmit()
1822 tx_queue->stats.tx_bytes += bytes_sent; in gfar_start_xmit()
1824 GFAR_CB(skb)->bytes_sent = bytes_sent; in gfar_start_xmit()
1825 tx_queue->stats.tx_packets++; in gfar_start_xmit()
1827 txbdp = txbdp_start = tx_queue->cur_tx; in gfar_start_xmit()
1828 lstatus = be32_to_cpu(txbdp->lstatus); in gfar_start_xmit()
1833 memset(skb->data, 0, GMAC_TXPAL_LEN); in gfar_start_xmit()
1847 unlikely(gfar_csum_errata_76(priv, skb->len))) { in gfar_start_xmit()
1864 bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb), in gfar_start_xmit()
1866 if (unlikely(dma_mapping_error(priv->dev, bufaddr))) in gfar_start_xmit()
1869 txbdp_start->bufPtr = cpu_to_be32(bufaddr); in gfar_start_xmit()
1874 tx_queue->tx_ring_size); in gfar_start_xmit()
1883 frag = &skb_shinfo(skb)->frags[0]; in gfar_start_xmit()
1888 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); in gfar_start_xmit()
1892 lstatus = be32_to_cpu(txbdp->lstatus) | size | in gfar_start_xmit()
1896 if (i == nr_frags - 1) in gfar_start_xmit()
1899 bufaddr = skb_frag_dma_map(priv->dev, frag, 0, in gfar_start_xmit()
1901 if (unlikely(dma_mapping_error(priv->dev, bufaddr))) in gfar_start_xmit()
1905 txbdp->bufPtr = cpu_to_be32(bufaddr); in gfar_start_xmit()
1906 txbdp->lstatus = cpu_to_be32(lstatus); in gfar_start_xmit()
1918 u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus); in gfar_start_xmit()
1920 bufaddr = be32_to_cpu(txbdp_start->bufPtr); in gfar_start_xmit()
1924 (skb_headlen(skb) - fcb_len); in gfar_start_xmit()
1928 txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr); in gfar_start_xmit()
1929 txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts); in gfar_start_xmit()
1933 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in gfar_start_xmit()
1934 fcb->ptp = 1; in gfar_start_xmit()
1944 txbdp_start->lstatus = cpu_to_be32(lstatus); in gfar_start_xmit()
1948 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb; in gfar_start_xmit()
1953 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) & in gfar_start_xmit()
1954 TX_RING_MOD_MASK(tx_queue->tx_ring_size); in gfar_start_xmit()
1956 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size); in gfar_start_xmit()
1963 spin_lock_bh(&tx_queue->txlock); in gfar_start_xmit()
1965 tx_queue->num_txbdfree -= (nr_txbds); in gfar_start_xmit()
1966 spin_unlock_bh(&tx_queue->txlock); in gfar_start_xmit()
1971 if (!tx_queue->num_txbdfree) { in gfar_start_xmit()
1974 dev->stats.tx_fifo_errors++; in gfar_start_xmit()
1978 gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex); in gfar_start_xmit()
1983 txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size); in gfar_start_xmit()
1985 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); in gfar_start_xmit()
1987 lstatus = be32_to_cpu(txbdp->lstatus); in gfar_start_xmit()
1992 txbdp->lstatus = cpu_to_be32(lstatus); in gfar_start_xmit()
1993 bufaddr = be32_to_cpu(txbdp->bufPtr); in gfar_start_xmit()
1994 dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length), in gfar_start_xmit()
1996 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); in gfar_start_xmit()
2006 gfar_set_mac_for_addr(dev, 0, dev->dev_addr); in gfar_set_mac_address()
2015 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) in gfar_change_mtu()
2018 if (dev->flags & IFF_UP) in gfar_change_mtu()
2021 WRITE_ONCE(dev->mtu, new_mtu); in gfar_change_mtu()
2023 if (dev->flags & IFF_UP) in gfar_change_mtu()
2026 clear_bit_unlock(GFAR_RESETTING, &priv->state); in gfar_change_mtu()
2035 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) in reset_gfar()
2041 clear_bit_unlock(GFAR_RESETTING, &priv->state); in reset_gfar()
2053 reset_gfar(priv->ndev); in gfar_reset_task()
2060 dev->stats.tx_errors++; in gfar_timeout()
2061 schedule_work(&priv->reset_task); in gfar_timeout()
2069 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) in gfar_hwtstamp_set()
2070 return -EFAULT; in gfar_hwtstamp_set()
2074 priv->hwts_tx_en = 0; in gfar_hwtstamp_set()
2077 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) in gfar_hwtstamp_set()
2078 return -ERANGE; in gfar_hwtstamp_set()
2079 priv->hwts_tx_en = 1; in gfar_hwtstamp_set()
2082 return -ERANGE; in gfar_hwtstamp_set()
2087 if (priv->hwts_rx_en) { in gfar_hwtstamp_set()
2088 priv->hwts_rx_en = 0; in gfar_hwtstamp_set()
2093 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) in gfar_hwtstamp_set()
2094 return -ERANGE; in gfar_hwtstamp_set()
2095 if (!priv->hwts_rx_en) { in gfar_hwtstamp_set()
2096 priv->hwts_rx_en = 1; in gfar_hwtstamp_set()
2103 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? in gfar_hwtstamp_set()
2104 -EFAULT : 0; in gfar_hwtstamp_set()
2113 config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; in gfar_hwtstamp_get()
2114 config.rx_filter = (priv->hwts_rx_en ? in gfar_hwtstamp_get()
2117 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? in gfar_hwtstamp_get()
2118 -EFAULT : 0; in gfar_hwtstamp_get()
2123 struct phy_device *phydev = dev->phydev; in gfar_ioctl()
2126 return -EINVAL; in gfar_ioctl()
2134 return -ENODEV; in gfar_ioctl()
2142 struct net_device *dev = tx_queue->dev; in gfar_clean_tx_ring()
2147 struct txbd8 *base = tx_queue->tx_bd_base; in gfar_clean_tx_ring()
2150 int tx_ring_size = tx_queue->tx_ring_size; in gfar_clean_tx_ring()
2154 int tqi = tx_queue->qindex; in gfar_clean_tx_ring()
2160 bdp = tx_queue->dirty_tx; in gfar_clean_tx_ring()
2161 skb_dirtytx = tx_queue->skb_dirtytx; in gfar_clean_tx_ring()
2163 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) { in gfar_clean_tx_ring()
2166 do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && in gfar_clean_tx_ring()
2167 priv->hwts_tx_en; in gfar_clean_tx_ring()
2169 frags = skb_shinfo(skb)->nr_frags; in gfar_clean_tx_ring()
2179 lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size); in gfar_clean_tx_ring()
2181 lstatus = be32_to_cpu(lbdp->lstatus); in gfar_clean_tx_ring()
2190 buflen = be16_to_cpu(next->length) + in gfar_clean_tx_ring()
2193 buflen = be16_to_cpu(bdp->length); in gfar_clean_tx_ring()
2195 dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr), in gfar_clean_tx_ring()
2202 ns = (__be64 *)(((uintptr_t)skb->data + 0x10) & ~0x7UL); in gfar_clean_tx_ring()
2216 dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr), in gfar_clean_tx_ring()
2217 be16_to_cpu(bdp->length), in gfar_clean_tx_ring()
2223 bytes_sent += GFAR_CB(skb)->bytes_sent; in gfar_clean_tx_ring()
2227 tx_queue->tx_skbuff[skb_dirtytx] = NULL; in gfar_clean_tx_ring()
2233 spin_lock(&tx_queue->txlock); in gfar_clean_tx_ring()
2234 tx_queue->num_txbdfree += nr_txbds; in gfar_clean_tx_ring()
2235 spin_unlock(&tx_queue->txlock); in gfar_clean_tx_ring()
2239 if (tx_queue->num_txbdfree && in gfar_clean_tx_ring()
2241 !(test_bit(GFAR_DOWN, &priv->state))) in gfar_clean_tx_ring()
2242 netif_wake_subqueue(priv->ndev, tqi); in gfar_clean_tx_ring()
2245 tx_queue->skb_dirtytx = skb_dirtytx; in gfar_clean_tx_ring()
2246 tx_queue->dirty_tx = bdp; in gfar_clean_tx_ring()
2254 struct net_device_stats *stats = &ndev->stats; in count_errors()
2255 struct gfar_extra_stats *estats = &priv->extra_stats; in count_errors()
2259 stats->rx_length_errors++; in count_errors()
2261 atomic64_inc(&estats->rx_trunc); in count_errors()
2267 stats->rx_length_errors++; in count_errors()
2270 atomic64_inc(&estats->rx_large); in count_errors()
2272 atomic64_inc(&estats->rx_short); in count_errors()
2275 stats->rx_frame_errors++; in count_errors()
2276 atomic64_inc(&estats->rx_nonoctet); in count_errors()
2279 atomic64_inc(&estats->rx_crcerr); in count_errors()
2280 stats->rx_crc_errors++; in count_errors()
2283 atomic64_inc(&estats->rx_overrun); in count_errors()
2284 stats->rx_over_errors++; in count_errors()
2294 ievent = gfar_read(&grp->regs->ievent); in gfar_receive()
2297 gfar_write(&grp->regs->ievent, IEVENT_FGPI); in gfar_receive()
2301 if (likely(napi_schedule_prep(&grp->napi_rx))) { in gfar_receive()
2302 spin_lock_irqsave(&grp->grplock, flags); in gfar_receive()
2303 imask = gfar_read(&grp->regs->imask); in gfar_receive()
2304 imask &= IMASK_RX_DISABLED | grp->priv->rmon_overflow.imask; in gfar_receive()
2305 gfar_write(&grp->regs->imask, imask); in gfar_receive()
2306 spin_unlock_irqrestore(&grp->grplock, flags); in gfar_receive()
2307 __napi_schedule(&grp->napi_rx); in gfar_receive()
2312 gfar_write(&grp->regs->ievent, IEVENT_RX_MASK); in gfar_receive()
2325 if (likely(napi_schedule_prep(&grp->napi_tx))) { in gfar_transmit()
2326 spin_lock_irqsave(&grp->grplock, flags); in gfar_transmit()
2327 imask = gfar_read(&grp->regs->imask); in gfar_transmit()
2328 imask &= IMASK_TX_DISABLED | grp->priv->rmon_overflow.imask; in gfar_transmit()
2329 gfar_write(&grp->regs->imask, imask); in gfar_transmit()
2330 spin_unlock_irqrestore(&grp->grplock, flags); in gfar_transmit()
2331 __napi_schedule(&grp->napi_tx); in gfar_transmit()
2336 gfar_write(&grp->regs->ievent, IEVENT_TX_MASK); in gfar_transmit()
2346 struct page *page = rxb->page; in gfar_add_rx_frag()
2353 size -= skb->len; in gfar_add_rx_frag()
2359 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, in gfar_add_rx_frag()
2360 rxb->page_offset + RXBUF_ALIGNMENT, in gfar_add_rx_frag()
2369 rxb->page_offset ^= GFAR_RXB_TRUESIZE; in gfar_add_rx_frag()
2376 static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq, in gfar_reuse_rx_page() argument
2380 u16 nta = rxq->next_to_alloc; in gfar_reuse_rx_page()
2382 new_rxb = &rxq->rx_buff[nta]; in gfar_reuse_rx_page()
2386 rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0; in gfar_reuse_rx_page()
2392 dma_sync_single_range_for_device(rxq->dev, old_rxb->dma, in gfar_reuse_rx_page()
2393 old_rxb->page_offset, in gfar_reuse_rx_page()
2400 struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean]; in gfar_get_next_rxbuff()
2401 struct page *page = rxb->page; in gfar_get_next_rxbuff()
2405 void *buff_addr = page_address(page) + rxb->page_offset; in gfar_get_next_rxbuff()
2416 dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset, in gfar_get_next_rxbuff()
2424 dma_unmap_page(rx_queue->dev, rxb->dma, in gfar_get_next_rxbuff()
2429 rxb->page = NULL; in gfar_get_next_rxbuff()
2440 if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) == in gfar_rx_checksum()
2442 skb->ip_summed = CHECKSUM_UNNECESSARY; in gfar_rx_checksum()
2447 /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
2454 fcb = (struct rxfcb *)skb->data; in gfar_process_frame()
2459 if (priv->uses_rxfcb) in gfar_process_frame()
2463 if (priv->hwts_rx_en) { in gfar_process_frame()
2465 __be64 *ns = (__be64 *)skb->data; in gfar_process_frame()
2468 shhwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*ns)); in gfar_process_frame()
2471 if (priv->padding) in gfar_process_frame()
2472 skb_pull(skb, priv->padding); in gfar_process_frame()
2475 pskb_trim(skb, skb->len - ETH_FCS_LEN); in gfar_process_frame()
2477 if (ndev->features & NETIF_F_RXCSUM) in gfar_process_frame()
2484 if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX && in gfar_process_frame()
2485 be16_to_cpu(fcb->flags) & RXFCB_VLN) in gfar_process_frame()
2487 be16_to_cpu(fcb->vlctl)); in gfar_process_frame()
2490 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2497 struct net_device *ndev = rx_queue->ndev; in gfar_clean_rx_ring()
2501 struct sk_buff *skb = rx_queue->skb; in gfar_clean_rx_ring()
2506 i = rx_queue->next_to_clean; in gfar_clean_rx_ring()
2508 while (rx_work_limit--) { in gfar_clean_rx_ring()
2516 bdp = &rx_queue->rx_bd_base[i]; in gfar_clean_rx_ring()
2517 lstatus = be32_to_cpu(bdp->lstatus); in gfar_clean_rx_ring()
2527 rx_queue->stats.rx_dropped++; in gfar_clean_rx_ring()
2543 if (unlikely(++i == rx_queue->rx_ring_size)) in gfar_clean_rx_ring()
2546 rx_queue->next_to_clean = i; in gfar_clean_rx_ring()
2558 rx_queue->stats.rx_dropped++; in gfar_clean_rx_ring()
2566 total_bytes += skb->len; in gfar_clean_rx_ring()
2568 skb_record_rx_queue(skb, rx_queue->qindex); in gfar_clean_rx_ring()
2570 skb->protocol = eth_type_trans(skb, ndev); in gfar_clean_rx_ring()
2573 napi_gro_receive(&rx_queue->grp->napi_rx, skb); in gfar_clean_rx_ring()
2579 rx_queue->skb = skb; in gfar_clean_rx_ring()
2581 rx_queue->stats.rx_packets += total_pkts; in gfar_clean_rx_ring()
2582 rx_queue->stats.rx_bytes += total_bytes; in gfar_clean_rx_ring()
2588 if (unlikely(priv->tx_actual_en)) { in gfar_clean_rx_ring()
2591 gfar_write(rx_queue->rfbptr, bdp_dma); in gfar_clean_rx_ring()
2601 struct gfar __iomem *regs = gfargrp->regs; in gfar_poll_rx_sq()
2602 struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue; in gfar_poll_rx_sq()
2608 gfar_write(®s->ievent, IEVENT_RX_MASK); in gfar_poll_rx_sq()
2616 gfar_write(®s->rstat, gfargrp->rstat); in gfar_poll_rx_sq()
2618 spin_lock_irq(&gfargrp->grplock); in gfar_poll_rx_sq()
2619 imask = gfar_read(®s->imask); in gfar_poll_rx_sq()
2621 gfar_write(®s->imask, imask); in gfar_poll_rx_sq()
2622 spin_unlock_irq(&gfargrp->grplock); in gfar_poll_rx_sq()
2632 struct gfar __iomem *regs = gfargrp->regs; in gfar_poll_tx_sq()
2633 struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue; in gfar_poll_tx_sq()
2639 gfar_write(®s->ievent, IEVENT_TX_MASK); in gfar_poll_tx_sq()
2642 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) in gfar_poll_tx_sq()
2647 spin_lock_irq(&gfargrp->grplock); in gfar_poll_tx_sq()
2648 imask = gfar_read(®s->imask); in gfar_poll_tx_sq()
2650 gfar_write(®s->imask, imask); in gfar_poll_tx_sq()
2651 spin_unlock_irq(&gfargrp->grplock); in gfar_poll_tx_sq()
2660 struct gfar __iomem *regs = gfargrp->regs; in gfar_error()
2661 struct gfar_private *priv= gfargrp->priv; in gfar_error()
2662 struct net_device *dev = priv->ndev; in gfar_error()
2665 u32 events = gfar_read(®s->ievent); in gfar_error()
2668 gfar_write(®s->ievent, events & IEVENT_ERR_MASK); in gfar_error()
2671 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && in gfar_error()
2679 events, gfar_read(®s->imask)); in gfar_error()
2683 dev->stats.tx_errors++; in gfar_error()
2686 dev->stats.tx_window_errors++; in gfar_error()
2688 dev->stats.tx_aborted_errors++; in gfar_error()
2692 dev->stats.tx_dropped++; in gfar_error()
2693 atomic64_inc(&priv->extra_stats.tx_underrun); in gfar_error()
2695 schedule_work(&priv->reset_task); in gfar_error()
2700 struct rmon_mib __iomem *rmon = ®s->rmon; in gfar_error()
2703 spin_lock(&priv->rmon_overflow.lock); in gfar_error()
2704 car = gfar_read(&rmon->car1) & CAR1_C1RDR; in gfar_error()
2706 priv->rmon_overflow.rdrp++; in gfar_error()
2707 gfar_write(&rmon->car1, car); in gfar_error()
2709 spin_unlock(&priv->rmon_overflow.lock); in gfar_error()
2712 dev->stats.rx_over_errors++; in gfar_error()
2713 atomic64_inc(&priv->extra_stats.rx_bsy); in gfar_error()
2716 gfar_read(®s->rstat)); in gfar_error()
2719 dev->stats.rx_errors++; in gfar_error()
2720 atomic64_inc(&priv->extra_stats.rx_babr); in gfar_error()
2725 atomic64_inc(&priv->extra_stats.eberr); in gfar_error()
2732 atomic64_inc(&priv->extra_stats.tx_babt); in gfar_error()
2744 u32 events = gfar_read(&gfargrp->regs->ievent); in gfar_interrupt()
2762 /* Polling 'interrupt' - used by things like netconsole to send skbs
2763 * without having to re-enable interrupts. It's not called while
2772 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { in gfar_netpoll()
2773 for (i = 0; i < priv->num_grps; i++) { in gfar_netpoll()
2774 struct gfar_priv_grp *grp = &priv->gfargrp[i]; in gfar_netpoll()
2776 disable_irq(gfar_irq(grp, TX)->irq); in gfar_netpoll()
2777 disable_irq(gfar_irq(grp, RX)->irq); in gfar_netpoll()
2778 disable_irq(gfar_irq(grp, ER)->irq); in gfar_netpoll()
2779 gfar_interrupt(gfar_irq(grp, TX)->irq, grp); in gfar_netpoll()
2780 enable_irq(gfar_irq(grp, ER)->irq); in gfar_netpoll()
2781 enable_irq(gfar_irq(grp, RX)->irq); in gfar_netpoll()
2782 enable_irq(gfar_irq(grp, TX)->irq); in gfar_netpoll()
2785 for (i = 0; i < priv->num_grps; i++) { in gfar_netpoll()
2786 struct gfar_priv_grp *grp = &priv->gfargrp[i]; in gfar_netpoll()
2788 disable_irq(gfar_irq(grp, TX)->irq); in gfar_netpoll()
2789 gfar_interrupt(gfar_irq(grp, TX)->irq, grp); in gfar_netpoll()
2790 enable_irq(gfar_irq(grp, TX)->irq); in gfar_netpoll()
2798 free_irq(gfar_irq(grp, TX)->irq, grp); in free_grp_irqs()
2799 free_irq(gfar_irq(grp, RX)->irq, grp); in free_grp_irqs()
2800 free_irq(gfar_irq(grp, ER)->irq, grp); in free_grp_irqs()
2805 struct gfar_private *priv = grp->priv; in register_grp_irqs()
2806 struct net_device *dev = priv->ndev; in register_grp_irqs()
2812 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { in register_grp_irqs()
2816 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0, in register_grp_irqs()
2817 gfar_irq(grp, ER)->name, grp); in register_grp_irqs()
2820 gfar_irq(grp, ER)->irq); in register_grp_irqs()
2824 enable_irq_wake(gfar_irq(grp, ER)->irq); in register_grp_irqs()
2826 err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0, in register_grp_irqs()
2827 gfar_irq(grp, TX)->name, grp); in register_grp_irqs()
2830 gfar_irq(grp, TX)->irq); in register_grp_irqs()
2833 err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0, in register_grp_irqs()
2834 gfar_irq(grp, RX)->name, grp); in register_grp_irqs()
2837 gfar_irq(grp, RX)->irq); in register_grp_irqs()
2840 enable_irq_wake(gfar_irq(grp, RX)->irq); in register_grp_irqs()
2843 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0, in register_grp_irqs()
2844 gfar_irq(grp, TX)->name, grp); in register_grp_irqs()
2847 gfar_irq(grp, TX)->irq); in register_grp_irqs()
2850 enable_irq_wake(gfar_irq(grp, TX)->irq); in register_grp_irqs()
2856 free_irq(gfar_irq(grp, TX)->irq, grp); in register_grp_irqs()
2858 free_irq(gfar_irq(grp, ER)->irq, grp); in register_grp_irqs()
2869 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { in gfar_free_irq()
2870 for (i = 0; i < priv->num_grps; i++) in gfar_free_irq()
2871 free_grp_irqs(&priv->gfargrp[i]); in gfar_free_irq()
2873 for (i = 0; i < priv->num_grps; i++) in gfar_free_irq()
2874 free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq, in gfar_free_irq()
2875 &priv->gfargrp[i]); in gfar_free_irq()
2883 for (i = 0; i < priv->num_grps; i++) { in gfar_request_irq()
2884 err = register_grp_irqs(&priv->gfargrp[i]); in gfar_request_irq()
2887 free_grp_irqs(&priv->gfargrp[j]); in gfar_request_irq()
2923 cancel_work_sync(&priv->reset_task); in gfar_close()
2927 phy_disconnect(dev->phydev); in gfar_close()
2949 * whenever dev->flags is changed
2955 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_set_multi()
2958 if (dev->flags & IFF_PROMISC) { in gfar_set_multi()
2960 tempval = gfar_read(®s->rctrl); in gfar_set_multi()
2962 gfar_write(®s->rctrl, tempval); in gfar_set_multi()
2965 tempval = gfar_read(®s->rctrl); in gfar_set_multi()
2967 gfar_write(®s->rctrl, tempval); in gfar_set_multi()
2970 if (dev->flags & IFF_ALLMULTI) { in gfar_set_multi()
2972 gfar_write(®s->igaddr0, 0xffffffff); in gfar_set_multi()
2973 gfar_write(®s->igaddr1, 0xffffffff); in gfar_set_multi()
2974 gfar_write(®s->igaddr2, 0xffffffff); in gfar_set_multi()
2975 gfar_write(®s->igaddr3, 0xffffffff); in gfar_set_multi()
2976 gfar_write(®s->igaddr4, 0xffffffff); in gfar_set_multi()
2977 gfar_write(®s->igaddr5, 0xffffffff); in gfar_set_multi()
2978 gfar_write(®s->igaddr6, 0xffffffff); in gfar_set_multi()
2979 gfar_write(®s->igaddr7, 0xffffffff); in gfar_set_multi()
2980 gfar_write(®s->gaddr0, 0xffffffff); in gfar_set_multi()
2981 gfar_write(®s->gaddr1, 0xffffffff); in gfar_set_multi()
2982 gfar_write(®s->gaddr2, 0xffffffff); in gfar_set_multi()
2983 gfar_write(®s->gaddr3, 0xffffffff); in gfar_set_multi()
2984 gfar_write(®s->gaddr4, 0xffffffff); in gfar_set_multi()
2985 gfar_write(®s->gaddr5, 0xffffffff); in gfar_set_multi()
2986 gfar_write(®s->gaddr6, 0xffffffff); in gfar_set_multi()
2987 gfar_write(®s->gaddr7, 0xffffffff); in gfar_set_multi()
2993 gfar_write(®s->igaddr0, 0x0); in gfar_set_multi()
2994 gfar_write(®s->igaddr1, 0x0); in gfar_set_multi()
2995 gfar_write(®s->igaddr2, 0x0); in gfar_set_multi()
2996 gfar_write(®s->igaddr3, 0x0); in gfar_set_multi()
2997 gfar_write(®s->igaddr4, 0x0); in gfar_set_multi()
2998 gfar_write(®s->igaddr5, 0x0); in gfar_set_multi()
2999 gfar_write(®s->igaddr6, 0x0); in gfar_set_multi()
3000 gfar_write(®s->igaddr7, 0x0); in gfar_set_multi()
3001 gfar_write(®s->gaddr0, 0x0); in gfar_set_multi()
3002 gfar_write(®s->gaddr1, 0x0); in gfar_set_multi()
3003 gfar_write(®s->gaddr2, 0x0); in gfar_set_multi()
3004 gfar_write(®s->gaddr3, 0x0); in gfar_set_multi()
3005 gfar_write(®s->gaddr4, 0x0); in gfar_set_multi()
3006 gfar_write(®s->gaddr5, 0x0); in gfar_set_multi()
3007 gfar_write(®s->gaddr6, 0x0); in gfar_set_multi()
3008 gfar_write(®s->gaddr7, 0x0); in gfar_set_multi()
3014 if (priv->extended_hash) { in gfar_set_multi()
3029 gfar_set_mac_for_addr(dev, idx, ha->addr); in gfar_set_multi()
3032 gfar_set_hash_for_addr(dev, ha->addr); in gfar_set_multi()
3039 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_mac_reset()
3043 gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET); in gfar_mac_reset()
3048 /* the soft reset bit is not self-resetting, so we need to in gfar_mac_reset()
3051 gfar_write(®s->maccfg1, 0); in gfar_mac_reset()
3058 gfar_write(®s->maxfrm, GFAR_JUMBO_FRAME_SIZE); in gfar_mac_reset()
3059 gfar_write(®s->mrblr, GFAR_RXB_SIZE); in gfar_mac_reset()
3062 gfar_write(®s->minflr, MINFLR_INIT_SETTINGS); in gfar_mac_reset()
3067 /* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1 in gfar_mac_reset()
3074 gfar_write(®s->maccfg2, tempval); in gfar_mac_reset()
3077 gfar_write(®s->igaddr0, 0); in gfar_mac_reset()
3078 gfar_write(®s->igaddr1, 0); in gfar_mac_reset()
3079 gfar_write(®s->igaddr2, 0); in gfar_mac_reset()
3080 gfar_write(®s->igaddr3, 0); in gfar_mac_reset()
3081 gfar_write(®s->igaddr4, 0); in gfar_mac_reset()
3082 gfar_write(®s->igaddr5, 0); in gfar_mac_reset()
3083 gfar_write(®s->igaddr6, 0); in gfar_mac_reset()
3084 gfar_write(®s->igaddr7, 0); in gfar_mac_reset()
3086 gfar_write(®s->gaddr0, 0); in gfar_mac_reset()
3087 gfar_write(®s->gaddr1, 0); in gfar_mac_reset()
3088 gfar_write(®s->gaddr2, 0); in gfar_mac_reset()
3089 gfar_write(®s->gaddr3, 0); in gfar_mac_reset()
3090 gfar_write(®s->gaddr4, 0); in gfar_mac_reset()
3091 gfar_write(®s->gaddr5, 0); in gfar_mac_reset()
3092 gfar_write(®s->gaddr6, 0); in gfar_mac_reset()
3093 gfar_write(®s->gaddr7, 0); in gfar_mac_reset()
3095 if (priv->extended_hash) in gfar_mac_reset()
3096 gfar_clear_exact_match(priv->ndev); in gfar_mac_reset()
3102 gfar_set_mac_address(priv->ndev); in gfar_mac_reset()
3104 gfar_set_multi(priv->ndev); in gfar_mac_reset()
3115 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_hw_init()
3126 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { in gfar_hw_init()
3127 memset_io(®s->rmon, 0, offsetof(struct rmon_mib, car1)); in gfar_hw_init()
3130 gfar_write(®s->rmon.cam1, 0xffffffff); in gfar_hw_init()
3131 gfar_write(®s->rmon.cam2, 0xffffffff); in gfar_hw_init()
3133 gfar_write(®s->rmon.car1, 0xffffffff); in gfar_hw_init()
3134 gfar_write(®s->rmon.car2, 0xffffffff); in gfar_hw_init()
3138 gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS); in gfar_hw_init()
3141 attrs = ATTRELI_EL(priv->rx_stash_size) | in gfar_hw_init()
3142 ATTRELI_EI(priv->rx_stash_index); in gfar_hw_init()
3144 gfar_write(®s->attreli, attrs); in gfar_hw_init()
3151 if (priv->bd_stash_en) in gfar_hw_init()
3154 if (priv->rx_stash_size != 0) in gfar_hw_init()
3157 gfar_write(®s->attr, attrs); in gfar_hw_init()
3160 gfar_write(®s->fifo_tx_thr, DEFAULT_FIFO_TX_THR); in gfar_hw_init()
3161 gfar_write(®s->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE); in gfar_hw_init()
3162 gfar_write(®s->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF); in gfar_hw_init()
3165 if (priv->num_grps > 1) in gfar_hw_init()
3192 struct device_node *np = ofdev->dev.of_node; in gfar_probe()
3203 priv->ndev = dev; in gfar_probe()
3204 priv->ofdev = ofdev; in gfar_probe()
3205 priv->dev = &ofdev->dev; in gfar_probe()
3206 SET_NETDEV_DEV(dev, &ofdev->dev); in gfar_probe()
3208 INIT_WORK(&priv->reset_task, gfar_reset_task); in gfar_probe()
3214 /* Set the dev->base_addr to the gfar reg region */ in gfar_probe()
3215 dev->base_addr = (unsigned long) priv->gfargrp[0].regs; in gfar_probe()
3218 dev->watchdog_timeo = TX_TIMEOUT; in gfar_probe()
3219 /* MTU range: 50 - 9586 */ in gfar_probe()
3220 dev->mtu = 1500; in gfar_probe()
3221 dev->min_mtu = 50; in gfar_probe()
3222 dev->max_mtu = GFAR_JUMBO_FRAME_SIZE - ETH_HLEN; in gfar_probe()
3223 dev->netdev_ops = &gfar_netdev_ops; in gfar_probe()
3224 dev->ethtool_ops = &gfar_ethtool_ops; in gfar_probe()
3227 for (i = 0; i < priv->num_grps; i++) { in gfar_probe()
3228 netif_napi_add(dev, &priv->gfargrp[i].napi_rx, in gfar_probe()
3230 netif_napi_add_tx_weight(dev, &priv->gfargrp[i].napi_tx, in gfar_probe()
3234 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { in gfar_probe()
3235 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | in gfar_probe()
3237 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | in gfar_probe()
3241 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { in gfar_probe()
3242 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | in gfar_probe()
3244 dev->features |= NETIF_F_HW_VLAN_CTAG_RX; in gfar_probe()
3247 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; in gfar_probe()
3254 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) in gfar_probe()
3255 priv->padding = 8 + DEFAULT_PADDING; in gfar_probe()
3257 if (dev->features & NETIF_F_IP_CSUM || in gfar_probe()
3258 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) in gfar_probe()
3259 dev->needed_headroom = GMAC_FCB_LEN + GMAC_TXPAL_LEN; in gfar_probe()
3262 for (i = 0; i < priv->num_tx_queues; i++) { in gfar_probe()
3263 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE; in gfar_probe()
3264 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE; in gfar_probe()
3265 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE; in gfar_probe()
3266 priv->tx_queue[i]->txic = DEFAULT_TXIC; in gfar_probe()
3269 for (i = 0; i < priv->num_rx_queues; i++) { in gfar_probe()
3270 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE; in gfar_probe()
3271 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE; in gfar_probe()
3272 priv->rx_queue[i]->rxic = DEFAULT_RXIC; in gfar_probe()
3276 priv->rx_filer_enable = in gfar_probe()
3277 (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0; in gfar_probe()
3279 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; in gfar_probe()
3281 if (priv->num_tx_queues == 1) in gfar_probe()
3282 priv->prio_sched_en = 1; in gfar_probe()
3284 set_bit(GFAR_DOWN, &priv->state); in gfar_probe()
3288 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { in gfar_probe()
3289 struct rmon_mib __iomem *rmon = &priv->gfargrp[0].regs->rmon; in gfar_probe()
3291 spin_lock_init(&priv->rmon_overflow.lock); in gfar_probe()
3292 priv->rmon_overflow.imask = IMASK_MSRO; in gfar_probe()
3293 gfar_write(&rmon->cam1, gfar_read(&rmon->cam1) & ~CAM1_M1RDR); in gfar_probe()
3302 pr_err("%s: Cannot register net device, aborting\n", dev->name); in gfar_probe()
3306 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) in gfar_probe()
3307 priv->wol_supported |= GFAR_WOL_MAGIC; in gfar_probe()
3309 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) && in gfar_probe()
3310 priv->rx_filer_enable) in gfar_probe()
3311 priv->wol_supported |= GFAR_WOL_FILER_UCAST; in gfar_probe()
3313 device_set_wakeup_capable(&ofdev->dev, priv->wol_supported); in gfar_probe()
3316 for (i = 0; i < priv->num_grps; i++) { in gfar_probe()
3317 struct gfar_priv_grp *grp = &priv->gfargrp[i]; in gfar_probe()
3318 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { in gfar_probe()
3319 sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s", in gfar_probe()
3320 dev->name, "_g", '0' + i, "_tx"); in gfar_probe()
3321 sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s", in gfar_probe()
3322 dev->name, "_g", '0' + i, "_rx"); in gfar_probe()
3323 sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s", in gfar_probe()
3324 dev->name, "_g", '0' + i, "_er"); in gfar_probe()
3326 strcpy(gfar_irq(grp, TX)->name, dev->name); in gfar_probe()
3333 netdev_info(dev, "mac: %pM\n", dev->dev_addr); in gfar_probe()
3339 for (i = 0; i < priv->num_rx_queues; i++) in gfar_probe()
3341 i, priv->rx_queue[i]->rx_ring_size); in gfar_probe()
3342 for (i = 0; i < priv->num_tx_queues; i++) in gfar_probe()
3344 i, priv->tx_queue[i]->tx_ring_size); in gfar_probe()
3354 of_node_put(priv->phy_node); in gfar_probe()
3355 of_node_put(priv->tbi_node); in gfar_probe()
3363 struct device_node *np = ofdev->dev.of_node; in gfar_remove()
3365 of_node_put(priv->phy_node); in gfar_remove()
3366 of_node_put(priv->tbi_node); in gfar_remove()
3368 unregister_netdev(priv->ndev); in gfar_remove()
3383 struct gfar __iomem *regs = priv->gfargrp[0].regs; in __gfar_filer_disable()
3386 temp = gfar_read(®s->rctrl); in __gfar_filer_disable()
3388 gfar_write(®s->rctrl, temp); in __gfar_filer_disable()
3393 struct gfar __iomem *regs = priv->gfargrp[0].regs; in __gfar_filer_enable()
3396 temp = gfar_read(®s->rctrl); in __gfar_filer_enable()
3398 gfar_write(®s->rctrl, temp); in __gfar_filer_enable()
3415 if (priv->wol_opts & GFAR_WOL_FILER_UCAST) { in gfar_filer_config_wol()
3417 struct net_device *ndev = priv->ndev; in gfar_filer_config_wol()
3419 u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex; in gfar_filer_config_wol()
3420 u32 dest_mac_addr = (ndev->dev_addr[0] << 16) | in gfar_filer_config_wol()
3421 (ndev->dev_addr[1] << 8) | in gfar_filer_config_wol()
3422 ndev->dev_addr[2]; in gfar_filer_config_wol()
3429 dest_mac_addr = (ndev->dev_addr[3] << 16) | in gfar_filer_config_wol()
3430 (ndev->dev_addr[4] << 8) | in gfar_filer_config_wol()
3431 ndev->dev_addr[5]; in gfar_filer_config_wol()
3448 rqfcr = priv->ftp_rqfcr[i]; in gfar_filer_restore_table()
3449 rqfpr = priv->ftp_rqfpr[i]; in gfar_filer_restore_table()
3459 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_start_wol_filer()
3464 gfar_write(®s->rqueue, priv->rqueue); in gfar_start_wol_filer()
3467 tempval = gfar_read(®s->dmactrl); in gfar_start_wol_filer()
3469 gfar_write(®s->dmactrl, tempval); in gfar_start_wol_filer()
3472 tempval = gfar_read(®s->dmactrl); in gfar_start_wol_filer()
3474 gfar_write(®s->dmactrl, tempval); in gfar_start_wol_filer()
3476 for (i = 0; i < priv->num_grps; i++) { in gfar_start_wol_filer()
3477 regs = priv->gfargrp[i].regs; in gfar_start_wol_filer()
3479 gfar_write(®s->rstat, priv->gfargrp[i].rstat); in gfar_start_wol_filer()
3481 gfar_write(®s->imask, IMASK_FGPI); in gfar_start_wol_filer()
3485 tempval = gfar_read(®s->maccfg1); in gfar_start_wol_filer()
3487 gfar_write(®s->maccfg1, tempval); in gfar_start_wol_filer()
3493 struct net_device *ndev = priv->ndev; in gfar_suspend()
3494 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_suspend()
3496 u16 wol = priv->wol_opts; in gfar_suspend()
3510 gfar_write(®s->imask, IMASK_MAG); in gfar_suspend()
3513 tempval = gfar_read(®s->maccfg2); in gfar_suspend()
3515 gfar_write(®s->maccfg2, tempval); in gfar_suspend()
3517 /* re-enable the Rx block */ in gfar_suspend()
3518 tempval = gfar_read(®s->maccfg1); in gfar_suspend()
3520 gfar_write(®s->maccfg1, tempval); in gfar_suspend()
3527 phy_stop(ndev->phydev); in gfar_suspend()
3536 struct net_device *ndev = priv->ndev; in gfar_resume()
3537 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_resume()
3539 u16 wol = priv->wol_opts; in gfar_resume()
3546 tempval = gfar_read(®s->maccfg2); in gfar_resume()
3548 gfar_write(®s->maccfg2, tempval); in gfar_resume()
3556 phy_start(ndev->phydev); in gfar_resume()
3570 struct net_device *ndev = priv->ndev; in gfar_restore()
3586 priv->oldlink = 0; in gfar_restore()
3587 priv->oldspeed = 0; in gfar_restore()
3588 priv->oldduplex = -1; in gfar_restore()
3590 if (ndev->phydev) in gfar_restore()
3591 phy_start(ndev->phydev); in gfar_restore()
3631 .name = "fsl-gianfar",