Lines Matching +full:region +full:- +full:freeze +full:- +full:timeout +full:- +full:us
1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * This driver is designed for the non-CPM ethernet controllers
13 * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
20 * B-V +1.62
25 * is therefore conveyed through an OF-style device tree.
45 * pre-allocated skb, and so after the skb is passed up to the
93 #include <linux/dma-mapping.h>
115 bdp->bufPtr = cpu_to_be32(buf); in gfar_init_rxbdp()
118 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1) in gfar_init_rxbdp()
123 bdp->lstatus = cpu_to_be32(lstatus); in gfar_init_rxbdp()
128 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_init_tx_rx_base()
132 baddr = ®s->tbase0; in gfar_init_tx_rx_base()
133 for (i = 0; i < priv->num_tx_queues; i++) { in gfar_init_tx_rx_base()
134 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base); in gfar_init_tx_rx_base()
138 baddr = ®s->rbase0; in gfar_init_tx_rx_base()
139 for (i = 0; i < priv->num_rx_queues; i++) { in gfar_init_tx_rx_base()
140 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base); in gfar_init_tx_rx_base()
147 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_init_rqprm()
151 baddr = ®s->rqprm0; in gfar_init_rqprm()
152 for (i = 0; i < priv->num_rx_queues; i++) { in gfar_init_rqprm()
153 gfar_write(baddr, priv->rx_queue[i]->rx_ring_size | in gfar_init_rqprm()
162 priv->uses_rxfcb = 0; in gfar_rx_offload_en()
164 if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX)) in gfar_rx_offload_en()
165 priv->uses_rxfcb = 1; in gfar_rx_offload_en()
167 if (priv->hwts_rx_en || priv->rx_filer_enable) in gfar_rx_offload_en()
168 priv->uses_rxfcb = 1; in gfar_rx_offload_en()
173 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_mac_rx_config()
176 if (priv->rx_filer_enable) { in gfar_mac_rx_config()
179 gfar_write(®s->rir0, DEFAULT_2RXQ_RIR0); in gfar_mac_rx_config()
183 if (priv->ndev->flags & IFF_PROMISC) in gfar_mac_rx_config()
186 if (priv->ndev->features & NETIF_F_RXCSUM) in gfar_mac_rx_config()
189 if (priv->extended_hash) in gfar_mac_rx_config()
192 if (priv->padding) { in gfar_mac_rx_config()
194 rctrl |= RCTRL_PADDING(priv->padding); in gfar_mac_rx_config()
198 if (priv->hwts_rx_en) in gfar_mac_rx_config()
201 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) in gfar_mac_rx_config()
205 gfar_write(®s->rctrl, rctrl); in gfar_mac_rx_config()
208 gfar_write(®s->ptv, DEFAULT_LFC_PTVVAL); in gfar_mac_rx_config()
212 gfar_write(®s->rctrl, rctrl); in gfar_mac_rx_config()
217 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_mac_tx_config()
220 if (priv->ndev->features & NETIF_F_IP_CSUM) in gfar_mac_tx_config()
223 if (priv->prio_sched_en) in gfar_mac_tx_config()
227 gfar_write(®s->tr03wt, DEFAULT_WRRS_WEIGHT); in gfar_mac_tx_config()
228 gfar_write(®s->tr47wt, DEFAULT_WRRS_WEIGHT); in gfar_mac_tx_config()
231 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX) in gfar_mac_tx_config()
234 gfar_write(®s->tctrl, tctrl); in gfar_mac_tx_config()
240 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_configure_coalescing()
243 if (priv->mode == MQ_MG_MODE) { in gfar_configure_coalescing()
246 baddr = ®s->txic0; in gfar_configure_coalescing()
247 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) { in gfar_configure_coalescing()
249 if (likely(priv->tx_queue[i]->txcoalescing)) in gfar_configure_coalescing()
250 gfar_write(baddr + i, priv->tx_queue[i]->txic); in gfar_configure_coalescing()
253 baddr = ®s->rxic0; in gfar_configure_coalescing()
254 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) { in gfar_configure_coalescing()
256 if (likely(priv->rx_queue[i]->rxcoalescing)) in gfar_configure_coalescing()
257 gfar_write(baddr + i, priv->rx_queue[i]->rxic); in gfar_configure_coalescing()
260 /* Backward compatible case -- even if we enable in gfar_configure_coalescing()
263 gfar_write(®s->txic, 0); in gfar_configure_coalescing()
264 if (likely(priv->tx_queue[0]->txcoalescing)) in gfar_configure_coalescing()
265 gfar_write(®s->txic, priv->tx_queue[0]->txic); in gfar_configure_coalescing()
267 gfar_write(®s->rxic, 0); in gfar_configure_coalescing()
268 if (unlikely(priv->rx_queue[0]->rxcoalescing)) in gfar_configure_coalescing()
269 gfar_write(®s->rxic, priv->rx_queue[0]->rxic); in gfar_configure_coalescing()
283 for (i = 0; i < priv->num_rx_queues; i++) { in gfar_get_stats64()
284 stats->rx_packets += priv->rx_queue[i]->stats.rx_packets; in gfar_get_stats64()
285 stats->rx_bytes += priv->rx_queue[i]->stats.rx_bytes; in gfar_get_stats64()
286 stats->rx_dropped += priv->rx_queue[i]->stats.rx_dropped; in gfar_get_stats64()
289 for (i = 0; i < priv->num_tx_queues; i++) { in gfar_get_stats64()
290 stats->tx_bytes += priv->tx_queue[i]->stats.tx_bytes; in gfar_get_stats64()
291 stats->tx_packets += priv->tx_queue[i]->stats.tx_packets; in gfar_get_stats64()
294 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { in gfar_get_stats64()
295 struct rmon_mib __iomem *rmon = &priv->gfargrp[0].regs->rmon; in gfar_get_stats64()
300 spin_lock_irqsave(&priv->rmon_overflow.lock, flags); in gfar_get_stats64()
301 car = gfar_read(&rmon->car1) & CAR1_C1RDR; in gfar_get_stats64()
304 rdrp = gfar_read(&rmon->rdrp); in gfar_get_stats64()
305 car = gfar_read(&rmon->car1) & CAR1_C1RDR; in gfar_get_stats64()
308 priv->rmon_overflow.rdrp++; in gfar_get_stats64()
309 gfar_write(&rmon->car1, car); in gfar_get_stats64()
311 rdrp_offset = priv->rmon_overflow.rdrp; in gfar_get_stats64()
312 spin_unlock_irqrestore(&priv->rmon_overflow.lock, flags); in gfar_get_stats64()
314 stats->rx_missed_errors = rdrp + (rdrp_offset << 16); in gfar_get_stats64()
323 * 2) Use the 8 most significant bits as a hash into a 256-entry
324 * table. The table is controlled through 8 32-bit registers:
325 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
337 int width = priv->hash_width; in gfar_set_hash_for_addr()
338 u8 whichbit = (result >> (32 - width)) & 0x1f; in gfar_set_hash_for_addr()
339 u8 whichreg = result >> (32 - width + 5); in gfar_set_hash_for_addr()
340 u32 value = (1 << (31-whichbit)); in gfar_set_hash_for_addr()
342 tempval = gfar_read(priv->hash_regs[whichreg]); in gfar_set_hash_for_addr()
344 gfar_write(priv->hash_regs[whichreg], tempval); in gfar_set_hash_for_addr()
354 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_set_mac_for_addr()
356 u32 __iomem *macptr = ®s->macstnaddr1; in gfar_set_mac_for_addr()
382 gfar_set_mac_for_addr(dev, 0, dev->dev_addr); in gfar_set_mac_addr()
390 for (i = 0; i < priv->num_grps; i++) { in gfar_ints_disable()
391 struct gfar __iomem *regs = priv->gfargrp[i].regs; in gfar_ints_disable()
393 gfar_write(®s->ievent, IEVENT_INIT_CLEAR); in gfar_ints_disable()
396 gfar_write(®s->imask, IMASK_INIT_CLEAR); in gfar_ints_disable()
403 for (i = 0; i < priv->num_grps; i++) { in gfar_ints_enable()
404 struct gfar __iomem *regs = priv->gfargrp[i].regs; in gfar_ints_enable()
406 gfar_write(®s->imask, in gfar_ints_enable()
407 IMASK_DEFAULT | priv->rmon_overflow.imask); in gfar_ints_enable()
415 for (i = 0; i < priv->num_tx_queues; i++) { in gfar_alloc_tx_queues()
416 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q), in gfar_alloc_tx_queues()
418 if (!priv->tx_queue[i]) in gfar_alloc_tx_queues()
419 return -ENOMEM; in gfar_alloc_tx_queues()
421 priv->tx_queue[i]->tx_skbuff = NULL; in gfar_alloc_tx_queues()
422 priv->tx_queue[i]->qindex = i; in gfar_alloc_tx_queues()
423 priv->tx_queue[i]->dev = priv->ndev; in gfar_alloc_tx_queues()
424 spin_lock_init(&(priv->tx_queue[i]->txlock)); in gfar_alloc_tx_queues()
433 for (i = 0; i < priv->num_rx_queues; i++) { in gfar_alloc_rx_queues()
434 priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q), in gfar_alloc_rx_queues()
436 if (!priv->rx_queue[i]) in gfar_alloc_rx_queues()
437 return -ENOMEM; in gfar_alloc_rx_queues()
439 priv->rx_queue[i]->qindex = i; in gfar_alloc_rx_queues()
440 priv->rx_queue[i]->ndev = priv->ndev; in gfar_alloc_rx_queues()
449 for (i = 0; i < priv->num_tx_queues; i++) in gfar_free_tx_queues()
450 kfree(priv->tx_queue[i]); in gfar_free_tx_queues()
457 for (i = 0; i < priv->num_rx_queues; i++) in gfar_free_rx_queues()
458 kfree(priv->rx_queue[i]); in gfar_free_rx_queues()
466 if (priv->gfargrp[i].regs) in unmap_group_regs()
467 iounmap(priv->gfargrp[i].regs); in unmap_group_regs()
474 for (i = 0; i < priv->num_grps; i++) in free_gfar_dev()
476 kfree(priv->gfargrp[i].irqinfo[j]); in free_gfar_dev()
477 priv->gfargrp[i].irqinfo[j] = NULL; in free_gfar_dev()
480 free_netdev(priv->ndev); in free_gfar_dev()
487 for (i = 0; i < priv->num_grps; i++) { in disable_napi()
488 napi_disable(&priv->gfargrp[i].napi_rx); in disable_napi()
489 napi_disable(&priv->gfargrp[i].napi_tx); in disable_napi()
497 for (i = 0; i < priv->num_grps; i++) { in enable_napi()
498 napi_enable(&priv->gfargrp[i].napi_rx); in enable_napi()
499 napi_enable(&priv->gfargrp[i].napi_tx); in enable_napi()
506 struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps]; in gfar_parse_group()
510 grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo), in gfar_parse_group()
512 if (!grp->irqinfo[i]) in gfar_parse_group()
513 return -ENOMEM; in gfar_parse_group()
516 grp->regs = of_iomap(np, 0); in gfar_parse_group()
517 if (!grp->regs) in gfar_parse_group()
518 return -ENOMEM; in gfar_parse_group()
520 gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0); in gfar_parse_group()
524 gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1); in gfar_parse_group()
525 gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2); in gfar_parse_group()
526 if (!gfar_irq(grp, TX)->irq || in gfar_parse_group()
527 !gfar_irq(grp, RX)->irq || in gfar_parse_group()
528 !gfar_irq(grp, ER)->irq) in gfar_parse_group()
529 return -EINVAL; in gfar_parse_group()
532 grp->priv = priv; in gfar_parse_group()
533 spin_lock_init(&grp->grplock); in gfar_parse_group()
534 if (priv->mode == MQ_MG_MODE) { in gfar_parse_group()
536 grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); in gfar_parse_group()
537 grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); in gfar_parse_group()
539 grp->rx_bit_map = 0xFF; in gfar_parse_group()
540 grp->tx_bit_map = 0xFF; in gfar_parse_group()
546 grp->rx_bit_map = bitrev8(grp->rx_bit_map); in gfar_parse_group()
547 grp->tx_bit_map = bitrev8(grp->tx_bit_map); in gfar_parse_group()
552 for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) { in gfar_parse_group()
553 if (!grp->rx_queue) in gfar_parse_group()
554 grp->rx_queue = priv->rx_queue[i]; in gfar_parse_group()
555 grp->num_rx_queues++; in gfar_parse_group()
556 grp->rstat |= (RSTAT_CLEAR_RHALT >> i); in gfar_parse_group()
557 priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i); in gfar_parse_group()
558 priv->rx_queue[i]->grp = grp; in gfar_parse_group()
561 for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) { in gfar_parse_group()
562 if (!grp->tx_queue) in gfar_parse_group()
563 grp->tx_queue = priv->tx_queue[i]; in gfar_parse_group()
564 grp->num_tx_queues++; in gfar_parse_group()
565 grp->tstat |= (TSTAT_CLEAR_THALT >> i); in gfar_parse_group()
566 priv->tqueue |= (TQUEUE_EN0 >> i); in gfar_parse_group()
567 priv->tx_queue[i]->grp = grp; in gfar_parse_group()
570 priv->num_grps++; in gfar_parse_group()
581 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_get_interface()
584 ecntrl = gfar_read(®s->ecntrl); in gfar_get_interface()
601 phy_interface_t interface = priv->interface; in gfar_get_interface()
613 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) in gfar_get_interface()
626 struct device_node *np = ofdev->dev.of_node; in gfar_of_init()
634 return -ENODEV; in gfar_of_init()
648 num_grps = device_get_named_child_node_count(&ofdev->dev, in gfar_of_init()
649 "queue-group"); in gfar_of_init()
651 dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n", in gfar_of_init()
654 return -EINVAL; in gfar_of_init()
665 return -EINVAL; in gfar_of_init()
672 return -EINVAL; in gfar_of_init()
678 return -ENOMEM; in gfar_of_init()
681 priv->ndev = dev; in gfar_of_init()
683 priv->mode = mode; in gfar_of_init()
685 priv->num_tx_queues = num_tx_qs; in gfar_of_init()
687 priv->num_rx_queues = num_rx_qs; in gfar_of_init()
704 INIT_LIST_HEAD(&priv->rx_list.list); in gfar_of_init()
705 priv->rx_list.count = 0; in gfar_of_init()
706 mutex_init(&priv->rx_queue_access); in gfar_of_init()
709 priv->gfargrp[i].regs = NULL; in gfar_of_init()
712 if (priv->mode == MQ_MG_MODE) { in gfar_of_init()
714 if (!of_node_name_eq(child, "queue-group")) in gfar_of_init()
729 if (of_property_read_bool(np, "bd-stash")) { in gfar_of_init()
730 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING; in gfar_of_init()
731 priv->bd_stash_en = 1; in gfar_of_init()
734 err = of_property_read_u32(np, "rx-stash-len", &stash_len); in gfar_of_init()
737 priv->rx_stash_size = stash_len; in gfar_of_init()
739 err = of_property_read_u32(np, "rx-stash-idx", &stash_idx); in gfar_of_init()
742 priv->rx_stash_index = stash_idx; in gfar_of_init()
745 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING; in gfar_of_init()
748 if (err == -EPROBE_DEFER) in gfar_of_init()
752 dev_info(&ofdev->dev, "Using random MAC address: %pM\n", dev->dev_addr); in gfar_of_init()
756 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT | in gfar_of_init()
762 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT | in gfar_of_init()
774 * rgmii-id really needs to be specified. Other types can be in gfar_of_init()
779 priv->interface = interface; in gfar_of_init()
781 priv->interface = gfar_get_interface(dev); in gfar_of_init()
783 if (of_property_read_bool(np, "fsl,magic-packet")) in gfar_of_init()
784 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET; in gfar_of_init()
786 if (of_property_read_bool(np, "fsl,wake-on-filer")) in gfar_of_init()
787 priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER; in gfar_of_init()
789 priv->phy_node = of_parse_phandle(np, "phy-handle", 0); in gfar_of_init()
794 if (!priv->phy_node && of_phy_is_fixed_link(np)) { in gfar_of_init()
799 priv->phy_node = of_node_get(np); in gfar_of_init()
803 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0); in gfar_of_init()
823 rqfar--; in cluster_entry_per_class()
825 priv->ftp_rqfpr[rqfar] = rqfpr; in cluster_entry_per_class()
826 priv->ftp_rqfcr[rqfar] = rqfcr; in cluster_entry_per_class()
829 rqfar--; in cluster_entry_per_class()
831 priv->ftp_rqfpr[rqfar] = rqfpr; in cluster_entry_per_class()
832 priv->ftp_rqfcr[rqfar] = rqfcr; in cluster_entry_per_class()
835 rqfar--; in cluster_entry_per_class()
838 priv->ftp_rqfcr[rqfar] = rqfcr; in cluster_entry_per_class()
839 priv->ftp_rqfpr[rqfar] = rqfpr; in cluster_entry_per_class()
842 rqfar--; in cluster_entry_per_class()
845 priv->ftp_rqfcr[rqfar] = rqfcr; in cluster_entry_per_class()
846 priv->ftp_rqfpr[rqfar] = rqfpr; in cluster_entry_per_class()
861 priv->ftp_rqfcr[rqfar] = rqfcr; in gfar_init_filer_table()
862 priv->ftp_rqfpr[rqfar] = rqfpr; in gfar_init_filer_table()
872 /* cur_filer_idx indicated the first non-masked rule */ in gfar_init_filer_table()
873 priv->cur_filer_idx = rqfar; in gfar_init_filer_table()
878 priv->ftp_rqfcr[i] = rqfcr; in gfar_init_filer_table()
879 priv->ftp_rqfpr[i] = rqfpr; in gfar_init_filer_table()
895 priv->errata |= GFAR_ERRATA_74; in __gfar_detect_errata_83xx()
900 priv->errata |= GFAR_ERRATA_76; in __gfar_detect_errata_83xx()
904 priv->errata |= GFAR_ERRATA_12; in __gfar_detect_errata_83xx()
912 priv->errata |= GFAR_ERRATA_12; in __gfar_detect_errata_85xx()
917 priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */ in __gfar_detect_errata_85xx()
923 struct device *dev = &priv->ofdev->dev; in gfar_detect_errata()
926 priv->errata |= GFAR_ERRATA_A002; in gfar_detect_errata()
931 else /* non-mpc85xx parts, i.e. e300 core based */ in gfar_detect_errata()
935 if (priv->errata) in gfar_detect_errata()
937 priv->errata); in gfar_detect_errata()
942 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_init_addr_hash_table()
944 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { in gfar_init_addr_hash_table()
945 priv->extended_hash = 1; in gfar_init_addr_hash_table()
946 priv->hash_width = 9; in gfar_init_addr_hash_table()
948 priv->hash_regs[0] = ®s->igaddr0; in gfar_init_addr_hash_table()
949 priv->hash_regs[1] = ®s->igaddr1; in gfar_init_addr_hash_table()
950 priv->hash_regs[2] = ®s->igaddr2; in gfar_init_addr_hash_table()
951 priv->hash_regs[3] = ®s->igaddr3; in gfar_init_addr_hash_table()
952 priv->hash_regs[4] = ®s->igaddr4; in gfar_init_addr_hash_table()
953 priv->hash_regs[5] = ®s->igaddr5; in gfar_init_addr_hash_table()
954 priv->hash_regs[6] = ®s->igaddr6; in gfar_init_addr_hash_table()
955 priv->hash_regs[7] = ®s->igaddr7; in gfar_init_addr_hash_table()
956 priv->hash_regs[8] = ®s->gaddr0; in gfar_init_addr_hash_table()
957 priv->hash_regs[9] = ®s->gaddr1; in gfar_init_addr_hash_table()
958 priv->hash_regs[10] = ®s->gaddr2; in gfar_init_addr_hash_table()
959 priv->hash_regs[11] = ®s->gaddr3; in gfar_init_addr_hash_table()
960 priv->hash_regs[12] = ®s->gaddr4; in gfar_init_addr_hash_table()
961 priv->hash_regs[13] = ®s->gaddr5; in gfar_init_addr_hash_table()
962 priv->hash_regs[14] = ®s->gaddr6; in gfar_init_addr_hash_table()
963 priv->hash_regs[15] = ®s->gaddr7; in gfar_init_addr_hash_table()
966 priv->extended_hash = 0; in gfar_init_addr_hash_table()
967 priv->hash_width = 8; in gfar_init_addr_hash_table()
969 priv->hash_regs[0] = ®s->gaddr0; in gfar_init_addr_hash_table()
970 priv->hash_regs[1] = ®s->gaddr1; in gfar_init_addr_hash_table()
971 priv->hash_regs[2] = ®s->gaddr2; in gfar_init_addr_hash_table()
972 priv->hash_regs[3] = ®s->gaddr3; in gfar_init_addr_hash_table()
973 priv->hash_regs[4] = ®s->gaddr4; in gfar_init_addr_hash_table()
974 priv->hash_regs[5] = ®s->gaddr5; in gfar_init_addr_hash_table()
975 priv->hash_regs[6] = ®s->gaddr6; in gfar_init_addr_hash_table()
976 priv->hash_regs[7] = ®s->gaddr7; in gfar_init_addr_hash_table()
990 /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are in __gfar_is_rx_idle()
991 * the same as bits 23-30, the eTSEC Rx is assumed to be idle in __gfar_is_rx_idle()
994 res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c); in __gfar_is_rx_idle()
1005 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_halt_nodisable()
1007 unsigned int timeout; in gfar_halt_nodisable() local
1016 tempval = gfar_read(®s->dmactrl); in gfar_halt_nodisable()
1018 gfar_write(®s->dmactrl, tempval); in gfar_halt_nodisable()
1021 timeout = 1000; in gfar_halt_nodisable()
1022 while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) { in gfar_halt_nodisable()
1024 timeout--; in gfar_halt_nodisable()
1027 if (!timeout) in gfar_halt_nodisable()
1038 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_halt()
1042 gfar_write(®s->rqueue, 0); in gfar_halt()
1043 gfar_write(®s->tqueue, 0); in gfar_halt()
1050 tempval = gfar_read(®s->maccfg1); in gfar_halt()
1052 gfar_write(®s->maccfg1, tempval); in gfar_halt()
1058 struct gfar_private *priv = netdev_priv(tx_queue->dev); in free_skb_tx_queue()
1061 txbdp = tx_queue->tx_bd_base; in free_skb_tx_queue()
1063 for (i = 0; i < tx_queue->tx_ring_size; i++) { in free_skb_tx_queue()
1064 if (!tx_queue->tx_skbuff[i]) in free_skb_tx_queue()
1067 dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr), in free_skb_tx_queue()
1068 be16_to_cpu(txbdp->length), DMA_TO_DEVICE); in free_skb_tx_queue()
1069 txbdp->lstatus = 0; in free_skb_tx_queue()
1070 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; in free_skb_tx_queue()
1073 dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr), in free_skb_tx_queue()
1074 be16_to_cpu(txbdp->length), in free_skb_tx_queue()
1078 dev_kfree_skb_any(tx_queue->tx_skbuff[i]); in free_skb_tx_queue()
1079 tx_queue->tx_skbuff[i] = NULL; in free_skb_tx_queue()
1081 kfree(tx_queue->tx_skbuff); in free_skb_tx_queue()
1082 tx_queue->tx_skbuff = NULL; in free_skb_tx_queue()
1089 struct rxbd8 *rxbdp = rx_queue->rx_bd_base; in free_skb_rx_queue()
1091 dev_kfree_skb(rx_queue->skb); in free_skb_rx_queue()
1093 for (i = 0; i < rx_queue->rx_ring_size; i++) { in free_skb_rx_queue()
1094 struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i]; in free_skb_rx_queue()
1096 rxbdp->lstatus = 0; in free_skb_rx_queue()
1097 rxbdp->bufPtr = 0; in free_skb_rx_queue()
1100 if (!rxb->page) in free_skb_rx_queue()
1103 dma_unmap_page(rx_queue->dev, rxb->dma, in free_skb_rx_queue()
1105 __free_page(rxb->page); in free_skb_rx_queue()
1107 rxb->page = NULL; in free_skb_rx_queue()
1110 kfree(rx_queue->rx_buff); in free_skb_rx_queue()
1111 rx_queue->rx_buff = NULL; in free_skb_rx_queue()
1124 for (i = 0; i < priv->num_tx_queues; i++) { in free_skb_resources()
1127 tx_queue = priv->tx_queue[i]; in free_skb_resources()
1128 txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex); in free_skb_resources()
1129 if (tx_queue->tx_skbuff) in free_skb_resources()
1134 for (i = 0; i < priv->num_rx_queues; i++) { in free_skb_resources()
1135 rx_queue = priv->rx_queue[i]; in free_skb_resources()
1136 if (rx_queue->rx_buff) in free_skb_resources()
1140 dma_free_coherent(priv->dev, in free_skb_resources()
1141 sizeof(struct txbd8) * priv->total_tx_ring_size + in free_skb_resources()
1142 sizeof(struct rxbd8) * priv->total_rx_ring_size, in free_skb_resources()
1143 priv->tx_queue[0]->tx_bd_base, in free_skb_resources()
1144 priv->tx_queue[0]->tx_bd_dma_base); in free_skb_resources()
1154 set_bit(GFAR_DOWN, &priv->state); in stop_gfar()
1162 phy_stop(dev->phydev); in stop_gfar()
1169 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_start()
1174 gfar_write(®s->rqueue, priv->rqueue); in gfar_start()
1175 gfar_write(®s->tqueue, priv->tqueue); in gfar_start()
1178 tempval = gfar_read(®s->dmactrl); in gfar_start()
1180 gfar_write(®s->dmactrl, tempval); in gfar_start()
1183 tempval = gfar_read(®s->dmactrl); in gfar_start()
1185 gfar_write(®s->dmactrl, tempval); in gfar_start()
1187 for (i = 0; i < priv->num_grps; i++) { in gfar_start()
1188 regs = priv->gfargrp[i].regs; in gfar_start()
1190 gfar_write(®s->tstat, priv->gfargrp[i].tstat); in gfar_start()
1191 gfar_write(®s->rstat, priv->gfargrp[i].rstat); in gfar_start()
1195 tempval = gfar_read(®s->maccfg1); in gfar_start()
1197 gfar_write(®s->maccfg1, tempval); in gfar_start()
1201 netif_trans_update(priv->ndev); /* prevent tx timeout */ in gfar_start()
1213 addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); in gfar_new_page()
1214 if (unlikely(dma_mapping_error(rxq->dev, addr))) { in gfar_new_page()
1220 rxb->dma = addr; in gfar_new_page()
1221 rxb->page = page; in gfar_new_page()
1222 rxb->page_offset = 0; in gfar_new_page()
1229 struct gfar_private *priv = netdev_priv(rx_queue->ndev); in gfar_rx_alloc_err()
1230 struct gfar_extra_stats *estats = &priv->extra_stats; in gfar_rx_alloc_err()
1232 netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n"); in gfar_rx_alloc_err()
1233 atomic64_inc(&estats->rx_alloc_err); in gfar_rx_alloc_err()
1243 i = rx_queue->next_to_use; in gfar_alloc_rx_buffs()
1244 bdp = &rx_queue->rx_bd_base[i]; in gfar_alloc_rx_buffs()
1245 rxb = &rx_queue->rx_buff[i]; in gfar_alloc_rx_buffs()
1247 while (alloc_cnt--) { in gfar_alloc_rx_buffs()
1249 if (unlikely(!rxb->page)) { in gfar_alloc_rx_buffs()
1258 rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT); in gfar_alloc_rx_buffs()
1264 if (unlikely(++i == rx_queue->rx_ring_size)) { in gfar_alloc_rx_buffs()
1266 bdp = rx_queue->rx_bd_base; in gfar_alloc_rx_buffs()
1267 rxb = rx_queue->rx_buff; in gfar_alloc_rx_buffs()
1271 rx_queue->next_to_use = i; in gfar_alloc_rx_buffs()
1272 rx_queue->next_to_alloc = i; in gfar_alloc_rx_buffs()
1278 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_init_bds()
1285 for (i = 0; i < priv->num_tx_queues; i++) { in gfar_init_bds()
1286 tx_queue = priv->tx_queue[i]; in gfar_init_bds()
1288 tx_queue->num_txbdfree = tx_queue->tx_ring_size; in gfar_init_bds()
1289 tx_queue->dirty_tx = tx_queue->tx_bd_base; in gfar_init_bds()
1290 tx_queue->cur_tx = tx_queue->tx_bd_base; in gfar_init_bds()
1291 tx_queue->skb_curtx = 0; in gfar_init_bds()
1292 tx_queue->skb_dirtytx = 0; in gfar_init_bds()
1295 txbdp = tx_queue->tx_bd_base; in gfar_init_bds()
1296 for (j = 0; j < tx_queue->tx_ring_size; j++) { in gfar_init_bds()
1297 txbdp->lstatus = 0; in gfar_init_bds()
1298 txbdp->bufPtr = 0; in gfar_init_bds()
1303 txbdp--; in gfar_init_bds()
1304 txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) | in gfar_init_bds()
1308 rfbptr = ®s->rfbptr0; in gfar_init_bds()
1309 for (i = 0; i < priv->num_rx_queues; i++) { in gfar_init_bds()
1310 rx_queue = priv->rx_queue[i]; in gfar_init_bds()
1312 rx_queue->next_to_clean = 0; in gfar_init_bds()
1313 rx_queue->next_to_use = 0; in gfar_init_bds()
1314 rx_queue->next_to_alloc = 0; in gfar_init_bds()
1321 rx_queue->rfbptr = rfbptr; in gfar_init_bds()
1332 struct device *dev = priv->dev; in gfar_alloc_skb_resources()
1336 priv->total_tx_ring_size = 0; in gfar_alloc_skb_resources()
1337 for (i = 0; i < priv->num_tx_queues; i++) in gfar_alloc_skb_resources()
1338 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size; in gfar_alloc_skb_resources()
1340 priv->total_rx_ring_size = 0; in gfar_alloc_skb_resources()
1341 for (i = 0; i < priv->num_rx_queues; i++) in gfar_alloc_skb_resources()
1342 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size; in gfar_alloc_skb_resources()
1346 (priv->total_tx_ring_size * in gfar_alloc_skb_resources()
1348 (priv->total_rx_ring_size * in gfar_alloc_skb_resources()
1352 return -ENOMEM; in gfar_alloc_skb_resources()
1354 for (i = 0; i < priv->num_tx_queues; i++) { in gfar_alloc_skb_resources()
1355 tx_queue = priv->tx_queue[i]; in gfar_alloc_skb_resources()
1356 tx_queue->tx_bd_base = vaddr; in gfar_alloc_skb_resources()
1357 tx_queue->tx_bd_dma_base = addr; in gfar_alloc_skb_resources()
1358 tx_queue->dev = ndev; in gfar_alloc_skb_resources()
1360 addr += sizeof(struct txbd8) * tx_queue->tx_ring_size; in gfar_alloc_skb_resources()
1361 vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size; in gfar_alloc_skb_resources()
1365 for (i = 0; i < priv->num_rx_queues; i++) { in gfar_alloc_skb_resources()
1366 rx_queue = priv->rx_queue[i]; in gfar_alloc_skb_resources()
1367 rx_queue->rx_bd_base = vaddr; in gfar_alloc_skb_resources()
1368 rx_queue->rx_bd_dma_base = addr; in gfar_alloc_skb_resources()
1369 rx_queue->ndev = ndev; in gfar_alloc_skb_resources()
1370 rx_queue->dev = dev; in gfar_alloc_skb_resources()
1371 addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; in gfar_alloc_skb_resources()
1372 vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; in gfar_alloc_skb_resources()
1376 for (i = 0; i < priv->num_tx_queues; i++) { in gfar_alloc_skb_resources()
1377 tx_queue = priv->tx_queue[i]; in gfar_alloc_skb_resources()
1378 tx_queue->tx_skbuff = in gfar_alloc_skb_resources()
1379 kmalloc_array(tx_queue->tx_ring_size, in gfar_alloc_skb_resources()
1380 sizeof(*tx_queue->tx_skbuff), in gfar_alloc_skb_resources()
1382 if (!tx_queue->tx_skbuff) in gfar_alloc_skb_resources()
1385 for (j = 0; j < tx_queue->tx_ring_size; j++) in gfar_alloc_skb_resources()
1386 tx_queue->tx_skbuff[j] = NULL; in gfar_alloc_skb_resources()
1389 for (i = 0; i < priv->num_rx_queues; i++) { in gfar_alloc_skb_resources()
1390 rx_queue = priv->rx_queue[i]; in gfar_alloc_skb_resources()
1391 rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size, in gfar_alloc_skb_resources()
1392 sizeof(*rx_queue->rx_buff), in gfar_alloc_skb_resources()
1394 if (!rx_queue->rx_buff) in gfar_alloc_skb_resources()
1404 return -ENOMEM; in gfar_alloc_skb_resources()
1422 clear_bit(GFAR_DOWN, &priv->state); in startup_gfar()
1429 priv->oldlink = 0; in startup_gfar()
1430 priv->oldspeed = 0; in startup_gfar()
1431 priv->oldduplex = -1; in startup_gfar()
1433 phy_start(ndev->phydev); in startup_gfar()
1444 struct net_device *ndev = priv->ndev; in gfar_get_flowctrl_cfg()
1445 struct phy_device *phydev = ndev->phydev; in gfar_get_flowctrl_cfg()
1448 if (!phydev->duplex) in gfar_get_flowctrl_cfg()
1451 if (!priv->pause_aneg_en) { in gfar_get_flowctrl_cfg()
1452 if (priv->tx_pause_en) in gfar_get_flowctrl_cfg()
1454 if (priv->rx_pause_en) in gfar_get_flowctrl_cfg()
1461 if (phydev->pause) in gfar_get_flowctrl_cfg()
1463 if (phydev->asym_pause) in gfar_get_flowctrl_cfg()
1466 lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising); in gfar_get_flowctrl_cfg()
1479 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_update_link_state()
1480 struct net_device *ndev = priv->ndev; in gfar_update_link_state()
1481 struct phy_device *phydev = ndev->phydev; in gfar_update_link_state()
1485 if (unlikely(test_bit(GFAR_RESETTING, &priv->state))) in gfar_update_link_state()
1488 if (phydev->link) { in gfar_update_link_state()
1489 u32 tempval1 = gfar_read(®s->maccfg1); in gfar_update_link_state()
1490 u32 tempval = gfar_read(®s->maccfg2); in gfar_update_link_state()
1491 u32 ecntrl = gfar_read(®s->ecntrl); in gfar_update_link_state()
1494 if (phydev->duplex != priv->oldduplex) { in gfar_update_link_state()
1495 if (!(phydev->duplex)) in gfar_update_link_state()
1500 priv->oldduplex = phydev->duplex; in gfar_update_link_state()
1503 if (phydev->speed != priv->oldspeed) { in gfar_update_link_state()
1504 switch (phydev->speed) { in gfar_update_link_state()
1519 if (phydev->speed == SPEED_100) in gfar_update_link_state()
1525 netif_warn(priv, link, priv->ndev, in gfar_update_link_state()
1527 phydev->speed); in gfar_update_link_state()
1531 priv->oldspeed = phydev->speed; in gfar_update_link_state()
1539 for (i = 0; i < priv->num_rx_queues; i++) { in gfar_update_link_state()
1542 rx_queue = priv->rx_queue[i]; in gfar_update_link_state()
1544 gfar_write(rx_queue->rfbptr, bdp_dma); in gfar_update_link_state()
1547 priv->tx_actual_en = 1; in gfar_update_link_state()
1551 priv->tx_actual_en = 0; in gfar_update_link_state()
1553 gfar_write(®s->maccfg1, tempval1); in gfar_update_link_state()
1554 gfar_write(®s->maccfg2, tempval); in gfar_update_link_state()
1555 gfar_write(®s->ecntrl, ecntrl); in gfar_update_link_state()
1557 if (!priv->oldlink) in gfar_update_link_state()
1558 priv->oldlink = 1; in gfar_update_link_state()
1560 } else if (priv->oldlink) { in gfar_update_link_state()
1561 priv->oldlink = 0; in gfar_update_link_state()
1562 priv->oldspeed = 0; in gfar_update_link_state()
1563 priv->oldduplex = -1; in gfar_update_link_state()
1579 struct phy_device *phydev = dev->phydev; in adjust_link()
1581 if (unlikely(phydev->link != priv->oldlink || in adjust_link()
1582 (phydev->link && (phydev->duplex != priv->oldduplex || in adjust_link()
1583 phydev->speed != priv->oldspeed)))) in adjust_link()
1600 if (!priv->tbi_node) { in gfar_configure_serdes()
1601 dev_warn(&dev->dev, "error: SGMII mode requires that the " in gfar_configure_serdes()
1602 "device tree specify a tbi-handle\n"); in gfar_configure_serdes()
1606 tbiphy = of_phy_find_device(priv->tbi_node); in gfar_configure_serdes()
1608 dev_err(&dev->dev, "error: Could not get TBI device\n"); in gfar_configure_serdes()
1613 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured in gfar_configure_serdes()
1614 * everything for us? Resetting it takes the link down and requires in gfar_configure_serdes()
1618 put_device(&tbiphy->mdio.dev); in gfar_configure_serdes()
1633 put_device(&tbiphy->mdio.dev); in gfar_configure_serdes()
1642 phy_interface_t interface = priv->interface; in init_phy()
1646 priv->oldlink = 0; in init_phy()
1647 priv->oldspeed = 0; in init_phy()
1648 priv->oldduplex = -1; in init_phy()
1650 phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0, in init_phy()
1653 dev_err(&dev->dev, "could not attach to PHY\n"); in init_phy()
1654 return -ENODEV; in init_phy()
1660 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)) in init_phy()
1686 * payload. We set it to checksum, using a pseudo-header in gfar_tx_checksum()
1694 if (ip_hdr(skb)->protocol == IPPROTO_UDP) { in gfar_tx_checksum()
1696 fcb->phcs = (__force __be16)(udp_hdr(skb)->check); in gfar_tx_checksum()
1698 fcb->phcs = (__force __be16)(tcp_hdr(skb)->check); in gfar_tx_checksum()
1701 * frame (skb->data) and the start of the IP hdr. in gfar_tx_checksum()
1705 fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length); in gfar_tx_checksum()
1706 fcb->l4os = skb_network_header_len(skb); in gfar_tx_checksum()
1708 fcb->flags = flags; in gfar_tx_checksum()
1713 fcb->flags |= TXFCB_VLN; in gfar_tx_vlan()
1714 fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb)); in gfar_tx_vlan()
1722 return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd; in skip_txbd()
1750 * It is pointed to by the dev->hard_start_xmit function pointer
1767 rq = skb->queue_mapping; in gfar_start_xmit()
1768 tx_queue = priv->tx_queue[rq]; in gfar_start_xmit()
1770 base = tx_queue->tx_bd_base; in gfar_start_xmit()
1771 regs = tx_queue->grp->regs; in gfar_start_xmit()
1773 do_csum = (CHECKSUM_PARTIAL == skb->ip_summed); in gfar_start_xmit()
1775 do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && in gfar_start_xmit()
1776 priv->hwts_tx_en; in gfar_start_xmit()
1788 dev->stats.tx_errors++; in gfar_start_xmit()
1795 nr_frags = skb_shinfo(skb)->nr_frags; in gfar_start_xmit()
1804 if (nr_txbds > tx_queue->num_txbdfree) { in gfar_start_xmit()
1807 dev->stats.tx_fifo_errors++; in gfar_start_xmit()
1812 bytes_sent = skb->len; in gfar_start_xmit()
1813 tx_queue->stats.tx_bytes += bytes_sent; in gfar_start_xmit()
1815 GFAR_CB(skb)->bytes_sent = bytes_sent; in gfar_start_xmit()
1816 tx_queue->stats.tx_packets++; in gfar_start_xmit()
1818 txbdp = txbdp_start = tx_queue->cur_tx; in gfar_start_xmit()
1819 lstatus = be32_to_cpu(txbdp->lstatus); in gfar_start_xmit()
1824 memset(skb->data, 0, GMAC_TXPAL_LEN); in gfar_start_xmit()
1838 unlikely(gfar_csum_errata_76(priv, skb->len))) { in gfar_start_xmit()
1855 bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb), in gfar_start_xmit()
1857 if (unlikely(dma_mapping_error(priv->dev, bufaddr))) in gfar_start_xmit()
1860 txbdp_start->bufPtr = cpu_to_be32(bufaddr); in gfar_start_xmit()
1865 tx_queue->tx_ring_size); in gfar_start_xmit()
1874 frag = &skb_shinfo(skb)->frags[0]; in gfar_start_xmit()
1879 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); in gfar_start_xmit()
1883 lstatus = be32_to_cpu(txbdp->lstatus) | size | in gfar_start_xmit()
1887 if (i == nr_frags - 1) in gfar_start_xmit()
1890 bufaddr = skb_frag_dma_map(priv->dev, frag, 0, in gfar_start_xmit()
1892 if (unlikely(dma_mapping_error(priv->dev, bufaddr))) in gfar_start_xmit()
1896 txbdp->bufPtr = cpu_to_be32(bufaddr); in gfar_start_xmit()
1897 txbdp->lstatus = cpu_to_be32(lstatus); in gfar_start_xmit()
1909 u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus); in gfar_start_xmit()
1911 bufaddr = be32_to_cpu(txbdp_start->bufPtr); in gfar_start_xmit()
1915 (skb_headlen(skb) - fcb_len); in gfar_start_xmit()
1919 txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr); in gfar_start_xmit()
1920 txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts); in gfar_start_xmit()
1924 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in gfar_start_xmit()
1925 fcb->ptp = 1; in gfar_start_xmit()
1935 txbdp_start->lstatus = cpu_to_be32(lstatus); in gfar_start_xmit()
1939 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb; in gfar_start_xmit()
1944 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) & in gfar_start_xmit()
1945 TX_RING_MOD_MASK(tx_queue->tx_ring_size); in gfar_start_xmit()
1947 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size); in gfar_start_xmit()
1954 spin_lock_bh(&tx_queue->txlock); in gfar_start_xmit()
1956 tx_queue->num_txbdfree -= (nr_txbds); in gfar_start_xmit()
1957 spin_unlock_bh(&tx_queue->txlock); in gfar_start_xmit()
1960 * are full. We need to tell the kernel to stop sending us stuff. in gfar_start_xmit()
1962 if (!tx_queue->num_txbdfree) { in gfar_start_xmit()
1965 dev->stats.tx_fifo_errors++; in gfar_start_xmit()
1969 gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex); in gfar_start_xmit()
1974 txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size); in gfar_start_xmit()
1976 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); in gfar_start_xmit()
1978 lstatus = be32_to_cpu(txbdp->lstatus); in gfar_start_xmit()
1983 txbdp->lstatus = cpu_to_be32(lstatus); in gfar_start_xmit()
1984 bufaddr = be32_to_cpu(txbdp->bufPtr); in gfar_start_xmit()
1985 dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length), in gfar_start_xmit()
1987 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); in gfar_start_xmit()
1997 gfar_set_mac_for_addr(dev, 0, dev->dev_addr); in gfar_set_mac_address()
2006 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) in gfar_change_mtu()
2009 if (dev->flags & IFF_UP) in gfar_change_mtu()
2012 WRITE_ONCE(dev->mtu, new_mtu); in gfar_change_mtu()
2014 if (dev->flags & IFF_UP) in gfar_change_mtu()
2017 clear_bit_unlock(GFAR_RESETTING, &priv->state); in gfar_change_mtu()
2026 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) in reset_gfar()
2032 clear_bit_unlock(GFAR_RESETTING, &priv->state); in reset_gfar()
2044 reset_gfar(priv->ndev); in gfar_reset_task()
2051 dev->stats.tx_errors++; in gfar_timeout()
2052 schedule_work(&priv->reset_task); in gfar_timeout()
2061 switch (config->tx_type) { in gfar_hwtstamp_set()
2063 priv->hwts_tx_en = 0; in gfar_hwtstamp_set()
2066 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) in gfar_hwtstamp_set()
2067 return -ERANGE; in gfar_hwtstamp_set()
2068 priv->hwts_tx_en = 1; in gfar_hwtstamp_set()
2071 return -ERANGE; in gfar_hwtstamp_set()
2074 switch (config->rx_filter) { in gfar_hwtstamp_set()
2076 if (priv->hwts_rx_en) { in gfar_hwtstamp_set()
2077 priv->hwts_rx_en = 0; in gfar_hwtstamp_set()
2082 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) in gfar_hwtstamp_set()
2083 return -ERANGE; in gfar_hwtstamp_set()
2084 if (!priv->hwts_rx_en) { in gfar_hwtstamp_set()
2085 priv->hwts_rx_en = 1; in gfar_hwtstamp_set()
2088 config->rx_filter = HWTSTAMP_FILTER_ALL; in gfar_hwtstamp_set()
2100 config->tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; in gfar_hwtstamp_get()
2101 config->rx_filter = priv->hwts_rx_en ? HWTSTAMP_FILTER_ALL : in gfar_hwtstamp_get()
2110 struct net_device *dev = tx_queue->dev; in gfar_clean_tx_ring()
2115 struct txbd8 *base = tx_queue->tx_bd_base; in gfar_clean_tx_ring()
2118 int tx_ring_size = tx_queue->tx_ring_size; in gfar_clean_tx_ring()
2122 int tqi = tx_queue->qindex; in gfar_clean_tx_ring()
2128 bdp = tx_queue->dirty_tx; in gfar_clean_tx_ring()
2129 skb_dirtytx = tx_queue->skb_dirtytx; in gfar_clean_tx_ring()
2131 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) { in gfar_clean_tx_ring()
2134 do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && in gfar_clean_tx_ring()
2135 priv->hwts_tx_en; in gfar_clean_tx_ring()
2137 frags = skb_shinfo(skb)->nr_frags; in gfar_clean_tx_ring()
2147 lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size); in gfar_clean_tx_ring()
2149 lstatus = be32_to_cpu(lbdp->lstatus); in gfar_clean_tx_ring()
2158 buflen = be16_to_cpu(next->length) + in gfar_clean_tx_ring()
2161 buflen = be16_to_cpu(bdp->length); in gfar_clean_tx_ring()
2163 dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr), in gfar_clean_tx_ring()
2170 ns = (__be64 *)(((uintptr_t)skb->data + 0x10) & ~0x7UL); in gfar_clean_tx_ring()
2184 dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr), in gfar_clean_tx_ring()
2185 be16_to_cpu(bdp->length), in gfar_clean_tx_ring()
2191 bytes_sent += GFAR_CB(skb)->bytes_sent; in gfar_clean_tx_ring()
2195 tx_queue->tx_skbuff[skb_dirtytx] = NULL; in gfar_clean_tx_ring()
2201 spin_lock(&tx_queue->txlock); in gfar_clean_tx_ring()
2202 tx_queue->num_txbdfree += nr_txbds; in gfar_clean_tx_ring()
2203 spin_unlock(&tx_queue->txlock); in gfar_clean_tx_ring()
2207 if (tx_queue->num_txbdfree && in gfar_clean_tx_ring()
2209 !(test_bit(GFAR_DOWN, &priv->state))) in gfar_clean_tx_ring()
2210 netif_wake_subqueue(priv->ndev, tqi); in gfar_clean_tx_ring()
2213 tx_queue->skb_dirtytx = skb_dirtytx; in gfar_clean_tx_ring()
2214 tx_queue->dirty_tx = bdp; in gfar_clean_tx_ring()
2222 struct net_device_stats *stats = &ndev->stats; in count_errors()
2223 struct gfar_extra_stats *estats = &priv->extra_stats; in count_errors()
2227 stats->rx_length_errors++; in count_errors()
2229 atomic64_inc(&estats->rx_trunc); in count_errors()
2235 stats->rx_length_errors++; in count_errors()
2238 atomic64_inc(&estats->rx_large); in count_errors()
2240 atomic64_inc(&estats->rx_short); in count_errors()
2243 stats->rx_frame_errors++; in count_errors()
2244 atomic64_inc(&estats->rx_nonoctet); in count_errors()
2247 atomic64_inc(&estats->rx_crcerr); in count_errors()
2248 stats->rx_crc_errors++; in count_errors()
2251 atomic64_inc(&estats->rx_overrun); in count_errors()
2252 stats->rx_over_errors++; in count_errors()
2262 ievent = gfar_read(&grp->regs->ievent); in gfar_receive()
2265 gfar_write(&grp->regs->ievent, IEVENT_FGPI); in gfar_receive()
2269 if (likely(napi_schedule_prep(&grp->napi_rx))) { in gfar_receive()
2270 spin_lock_irqsave(&grp->grplock, flags); in gfar_receive()
2271 imask = gfar_read(&grp->regs->imask); in gfar_receive()
2272 imask &= IMASK_RX_DISABLED | grp->priv->rmon_overflow.imask; in gfar_receive()
2273 gfar_write(&grp->regs->imask, imask); in gfar_receive()
2274 spin_unlock_irqrestore(&grp->grplock, flags); in gfar_receive()
2275 __napi_schedule(&grp->napi_rx); in gfar_receive()
2280 gfar_write(&grp->regs->ievent, IEVENT_RX_MASK); in gfar_receive()
2293 if (likely(napi_schedule_prep(&grp->napi_tx))) { in gfar_transmit()
2294 spin_lock_irqsave(&grp->grplock, flags); in gfar_transmit()
2295 imask = gfar_read(&grp->regs->imask); in gfar_transmit()
2296 imask &= IMASK_TX_DISABLED | grp->priv->rmon_overflow.imask; in gfar_transmit()
2297 gfar_write(&grp->regs->imask, imask); in gfar_transmit()
2298 spin_unlock_irqrestore(&grp->grplock, flags); in gfar_transmit()
2299 __napi_schedule(&grp->napi_tx); in gfar_transmit()
2304 gfar_write(&grp->regs->ievent, IEVENT_TX_MASK); in gfar_transmit()
2314 struct page *page = rxb->page; in gfar_add_rx_frag()
2321 size -= skb->len; in gfar_add_rx_frag()
2327 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, in gfar_add_rx_frag()
2328 rxb->page_offset + RXBUF_ALIGNMENT, in gfar_add_rx_frag()
2337 rxb->page_offset ^= GFAR_RXB_TRUESIZE; in gfar_add_rx_frag()
2348 u16 nta = rxq->next_to_alloc; in gfar_reuse_rx_page()
2350 new_rxb = &rxq->rx_buff[nta]; in gfar_reuse_rx_page()
2354 rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0; in gfar_reuse_rx_page()
2360 dma_sync_single_range_for_device(rxq->dev, old_rxb->dma, in gfar_reuse_rx_page()
2361 old_rxb->page_offset, in gfar_reuse_rx_page()
2368 struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean]; in gfar_get_next_rxbuff()
2369 struct page *page = rxb->page; in gfar_get_next_rxbuff()
2373 void *buff_addr = page_address(page) + rxb->page_offset; in gfar_get_next_rxbuff()
2384 dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset, in gfar_get_next_rxbuff()
2392 dma_unmap_page(rx_queue->dev, rxb->dma, in gfar_get_next_rxbuff()
2397 rxb->page = NULL; in gfar_get_next_rxbuff()
2408 if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) == in gfar_rx_checksum()
2410 skb->ip_summed = CHECKSUM_UNNECESSARY; in gfar_rx_checksum()
2415 /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
2422 fcb = (struct rxfcb *)skb->data; in gfar_process_frame()
2427 if (priv->uses_rxfcb) in gfar_process_frame()
2431 if (priv->hwts_rx_en) { in gfar_process_frame()
2433 __be64 *ns = (__be64 *)skb->data; in gfar_process_frame()
2436 shhwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*ns)); in gfar_process_frame()
2439 if (priv->padding) in gfar_process_frame()
2440 skb_pull(skb, priv->padding); in gfar_process_frame()
2443 pskb_trim(skb, skb->len - ETH_FCS_LEN); in gfar_process_frame()
2445 if (ndev->features & NETIF_F_RXCSUM) in gfar_process_frame()
2452 if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX && in gfar_process_frame()
2453 be16_to_cpu(fcb->flags) & RXFCB_VLN) in gfar_process_frame()
2455 be16_to_cpu(fcb->vlctl)); in gfar_process_frame()
2458 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2465 struct net_device *ndev = rx_queue->ndev; in gfar_clean_rx_ring()
2469 struct sk_buff *skb = rx_queue->skb; in gfar_clean_rx_ring()
2474 i = rx_queue->next_to_clean; in gfar_clean_rx_ring()
2476 while (rx_work_limit--) { in gfar_clean_rx_ring()
2484 bdp = &rx_queue->rx_bd_base[i]; in gfar_clean_rx_ring()
2485 lstatus = be32_to_cpu(bdp->lstatus); in gfar_clean_rx_ring()
2495 rx_queue->stats.rx_dropped++; in gfar_clean_rx_ring()
2511 if (unlikely(++i == rx_queue->rx_ring_size)) in gfar_clean_rx_ring()
2514 rx_queue->next_to_clean = i; in gfar_clean_rx_ring()
2526 rx_queue->stats.rx_dropped++; in gfar_clean_rx_ring()
2534 total_bytes += skb->len; in gfar_clean_rx_ring()
2536 skb_record_rx_queue(skb, rx_queue->qindex); in gfar_clean_rx_ring()
2538 skb->protocol = eth_type_trans(skb, ndev); in gfar_clean_rx_ring()
2541 napi_gro_receive(&rx_queue->grp->napi_rx, skb); in gfar_clean_rx_ring()
2547 rx_queue->skb = skb; in gfar_clean_rx_ring()
2549 rx_queue->stats.rx_packets += total_pkts; in gfar_clean_rx_ring()
2550 rx_queue->stats.rx_bytes += total_bytes; in gfar_clean_rx_ring()
2556 if (unlikely(priv->tx_actual_en)) { in gfar_clean_rx_ring()
2559 gfar_write(rx_queue->rfbptr, bdp_dma); in gfar_clean_rx_ring()
2569 struct gfar __iomem *regs = gfargrp->regs; in gfar_poll_rx_sq()
2570 struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue; in gfar_poll_rx_sq()
2576 gfar_write(®s->ievent, IEVENT_RX_MASK); in gfar_poll_rx_sq()
2584 gfar_write(®s->rstat, gfargrp->rstat); in gfar_poll_rx_sq()
2586 spin_lock_irq(&gfargrp->grplock); in gfar_poll_rx_sq()
2587 imask = gfar_read(®s->imask); in gfar_poll_rx_sq()
2589 gfar_write(®s->imask, imask); in gfar_poll_rx_sq()
2590 spin_unlock_irq(&gfargrp->grplock); in gfar_poll_rx_sq()
2600 struct gfar __iomem *regs = gfargrp->regs; in gfar_poll_tx_sq()
2601 struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue; in gfar_poll_tx_sq()
2607 gfar_write(®s->ievent, IEVENT_TX_MASK); in gfar_poll_tx_sq()
2610 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) in gfar_poll_tx_sq()
2615 spin_lock_irq(&gfargrp->grplock); in gfar_poll_tx_sq()
2616 imask = gfar_read(®s->imask); in gfar_poll_tx_sq()
2618 gfar_write(®s->imask, imask); in gfar_poll_tx_sq()
2619 spin_unlock_irq(&gfargrp->grplock); in gfar_poll_tx_sq()
2628 struct gfar __iomem *regs = gfargrp->regs; in gfar_error()
2629 struct gfar_private *priv= gfargrp->priv; in gfar_error()
2630 struct net_device *dev = priv->ndev; in gfar_error()
2633 u32 events = gfar_read(®s->ievent); in gfar_error()
2636 gfar_write(®s->ievent, events & IEVENT_ERR_MASK); in gfar_error()
2639 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && in gfar_error()
2647 events, gfar_read(®s->imask)); in gfar_error()
2651 dev->stats.tx_errors++; in gfar_error()
2654 dev->stats.tx_window_errors++; in gfar_error()
2656 dev->stats.tx_aborted_errors++; in gfar_error()
2660 dev->stats.tx_dropped++; in gfar_error()
2661 atomic64_inc(&priv->extra_stats.tx_underrun); in gfar_error()
2663 schedule_work(&priv->reset_task); in gfar_error()
2668 struct rmon_mib __iomem *rmon = ®s->rmon; in gfar_error()
2671 spin_lock(&priv->rmon_overflow.lock); in gfar_error()
2672 car = gfar_read(&rmon->car1) & CAR1_C1RDR; in gfar_error()
2674 priv->rmon_overflow.rdrp++; in gfar_error()
2675 gfar_write(&rmon->car1, car); in gfar_error()
2677 spin_unlock(&priv->rmon_overflow.lock); in gfar_error()
2680 dev->stats.rx_over_errors++; in gfar_error()
2681 atomic64_inc(&priv->extra_stats.rx_bsy); in gfar_error()
2684 gfar_read(®s->rstat)); in gfar_error()
2687 dev->stats.rx_errors++; in gfar_error()
2688 atomic64_inc(&priv->extra_stats.rx_babr); in gfar_error()
2693 atomic64_inc(&priv->extra_stats.eberr); in gfar_error()
2700 atomic64_inc(&priv->extra_stats.tx_babt); in gfar_error()
2712 u32 events = gfar_read(&gfargrp->regs->ievent); in gfar_interrupt()
2730 /* Polling 'interrupt' - used by things like netconsole to send skbs
2731 * without having to re-enable interrupts. It's not called while
2740 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { in gfar_netpoll()
2741 for (i = 0; i < priv->num_grps; i++) { in gfar_netpoll()
2742 struct gfar_priv_grp *grp = &priv->gfargrp[i]; in gfar_netpoll()
2744 disable_irq(gfar_irq(grp, TX)->irq); in gfar_netpoll()
2745 disable_irq(gfar_irq(grp, RX)->irq); in gfar_netpoll()
2746 disable_irq(gfar_irq(grp, ER)->irq); in gfar_netpoll()
2747 gfar_interrupt(gfar_irq(grp, TX)->irq, grp); in gfar_netpoll()
2748 enable_irq(gfar_irq(grp, ER)->irq); in gfar_netpoll()
2749 enable_irq(gfar_irq(grp, RX)->irq); in gfar_netpoll()
2750 enable_irq(gfar_irq(grp, TX)->irq); in gfar_netpoll()
2753 for (i = 0; i < priv->num_grps; i++) { in gfar_netpoll()
2754 struct gfar_priv_grp *grp = &priv->gfargrp[i]; in gfar_netpoll()
2756 disable_irq(gfar_irq(grp, TX)->irq); in gfar_netpoll()
2757 gfar_interrupt(gfar_irq(grp, TX)->irq, grp); in gfar_netpoll()
2758 enable_irq(gfar_irq(grp, TX)->irq); in gfar_netpoll()
2766 free_irq(gfar_irq(grp, TX)->irq, grp); in free_grp_irqs()
2767 free_irq(gfar_irq(grp, RX)->irq, grp); in free_grp_irqs()
2768 free_irq(gfar_irq(grp, ER)->irq, grp); in free_grp_irqs()
2773 struct gfar_private *priv = grp->priv; in register_grp_irqs()
2774 struct net_device *dev = priv->ndev; in register_grp_irqs()
2780 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { in register_grp_irqs()
2784 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0, in register_grp_irqs()
2785 gfar_irq(grp, ER)->name, grp); in register_grp_irqs()
2788 gfar_irq(grp, ER)->irq); in register_grp_irqs()
2792 enable_irq_wake(gfar_irq(grp, ER)->irq); in register_grp_irqs()
2794 err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0, in register_grp_irqs()
2795 gfar_irq(grp, TX)->name, grp); in register_grp_irqs()
2798 gfar_irq(grp, TX)->irq); in register_grp_irqs()
2801 err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0, in register_grp_irqs()
2802 gfar_irq(grp, RX)->name, grp); in register_grp_irqs()
2805 gfar_irq(grp, RX)->irq); in register_grp_irqs()
2808 enable_irq_wake(gfar_irq(grp, RX)->irq); in register_grp_irqs()
2811 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0, in register_grp_irqs()
2812 gfar_irq(grp, TX)->name, grp); in register_grp_irqs()
2815 gfar_irq(grp, TX)->irq); in register_grp_irqs()
2818 enable_irq_wake(gfar_irq(grp, TX)->irq); in register_grp_irqs()
2824 free_irq(gfar_irq(grp, TX)->irq, grp); in register_grp_irqs()
2826 free_irq(gfar_irq(grp, ER)->irq, grp); in register_grp_irqs()
2837 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { in gfar_free_irq()
2838 for (i = 0; i < priv->num_grps; i++) in gfar_free_irq()
2839 free_grp_irqs(&priv->gfargrp[i]); in gfar_free_irq()
2841 for (i = 0; i < priv->num_grps; i++) in gfar_free_irq()
2842 free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq, in gfar_free_irq()
2843 &priv->gfargrp[i]); in gfar_free_irq()
2851 for (i = 0; i < priv->num_grps; i++) { in gfar_request_irq()
2852 err = register_grp_irqs(&priv->gfargrp[i]); in gfar_request_irq()
2855 free_grp_irqs(&priv->gfargrp[j]); in gfar_request_irq()
2891 cancel_work_sync(&priv->reset_task); in gfar_close()
2895 phy_disconnect(dev->phydev); in gfar_close()
2917 * whenever dev->flags is changed
2923 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_set_multi()
2926 if (dev->flags & IFF_PROMISC) { in gfar_set_multi()
2928 tempval = gfar_read(®s->rctrl); in gfar_set_multi()
2930 gfar_write(®s->rctrl, tempval); in gfar_set_multi()
2933 tempval = gfar_read(®s->rctrl); in gfar_set_multi()
2935 gfar_write(®s->rctrl, tempval); in gfar_set_multi()
2938 if (dev->flags & IFF_ALLMULTI) { in gfar_set_multi()
2940 gfar_write(®s->igaddr0, 0xffffffff); in gfar_set_multi()
2941 gfar_write(®s->igaddr1, 0xffffffff); in gfar_set_multi()
2942 gfar_write(®s->igaddr2, 0xffffffff); in gfar_set_multi()
2943 gfar_write(®s->igaddr3, 0xffffffff); in gfar_set_multi()
2944 gfar_write(®s->igaddr4, 0xffffffff); in gfar_set_multi()
2945 gfar_write(®s->igaddr5, 0xffffffff); in gfar_set_multi()
2946 gfar_write(®s->igaddr6, 0xffffffff); in gfar_set_multi()
2947 gfar_write(®s->igaddr7, 0xffffffff); in gfar_set_multi()
2948 gfar_write(®s->gaddr0, 0xffffffff); in gfar_set_multi()
2949 gfar_write(®s->gaddr1, 0xffffffff); in gfar_set_multi()
2950 gfar_write(®s->gaddr2, 0xffffffff); in gfar_set_multi()
2951 gfar_write(®s->gaddr3, 0xffffffff); in gfar_set_multi()
2952 gfar_write(®s->gaddr4, 0xffffffff); in gfar_set_multi()
2953 gfar_write(®s->gaddr5, 0xffffffff); in gfar_set_multi()
2954 gfar_write(®s->gaddr6, 0xffffffff); in gfar_set_multi()
2955 gfar_write(®s->gaddr7, 0xffffffff); in gfar_set_multi()
2961 gfar_write(®s->igaddr0, 0x0); in gfar_set_multi()
2962 gfar_write(®s->igaddr1, 0x0); in gfar_set_multi()
2963 gfar_write(®s->igaddr2, 0x0); in gfar_set_multi()
2964 gfar_write(®s->igaddr3, 0x0); in gfar_set_multi()
2965 gfar_write(®s->igaddr4, 0x0); in gfar_set_multi()
2966 gfar_write(®s->igaddr5, 0x0); in gfar_set_multi()
2967 gfar_write(®s->igaddr6, 0x0); in gfar_set_multi()
2968 gfar_write(®s->igaddr7, 0x0); in gfar_set_multi()
2969 gfar_write(®s->gaddr0, 0x0); in gfar_set_multi()
2970 gfar_write(®s->gaddr1, 0x0); in gfar_set_multi()
2971 gfar_write(®s->gaddr2, 0x0); in gfar_set_multi()
2972 gfar_write(®s->gaddr3, 0x0); in gfar_set_multi()
2973 gfar_write(®s->gaddr4, 0x0); in gfar_set_multi()
2974 gfar_write(®s->gaddr5, 0x0); in gfar_set_multi()
2975 gfar_write(®s->gaddr6, 0x0); in gfar_set_multi()
2976 gfar_write(®s->gaddr7, 0x0); in gfar_set_multi()
2982 if (priv->extended_hash) { in gfar_set_multi()
2997 gfar_set_mac_for_addr(dev, idx, ha->addr); in gfar_set_multi()
3000 gfar_set_hash_for_addr(dev, ha->addr); in gfar_set_multi()
3007 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_mac_reset()
3011 gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET); in gfar_mac_reset()
3016 /* the soft reset bit is not self-resetting, so we need to in gfar_mac_reset()
3019 gfar_write(®s->maccfg1, 0); in gfar_mac_reset()
3026 gfar_write(®s->maxfrm, GFAR_JUMBO_FRAME_SIZE); in gfar_mac_reset()
3027 gfar_write(®s->mrblr, GFAR_RXB_SIZE); in gfar_mac_reset()
3030 gfar_write(®s->minflr, MINFLR_INIT_SETTINGS); in gfar_mac_reset()
3035 /* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1 in gfar_mac_reset()
3042 gfar_write(®s->maccfg2, tempval); in gfar_mac_reset()
3045 gfar_write(®s->igaddr0, 0); in gfar_mac_reset()
3046 gfar_write(®s->igaddr1, 0); in gfar_mac_reset()
3047 gfar_write(®s->igaddr2, 0); in gfar_mac_reset()
3048 gfar_write(®s->igaddr3, 0); in gfar_mac_reset()
3049 gfar_write(®s->igaddr4, 0); in gfar_mac_reset()
3050 gfar_write(®s->igaddr5, 0); in gfar_mac_reset()
3051 gfar_write(®s->igaddr6, 0); in gfar_mac_reset()
3052 gfar_write(®s->igaddr7, 0); in gfar_mac_reset()
3054 gfar_write(®s->gaddr0, 0); in gfar_mac_reset()
3055 gfar_write(®s->gaddr1, 0); in gfar_mac_reset()
3056 gfar_write(®s->gaddr2, 0); in gfar_mac_reset()
3057 gfar_write(®s->gaddr3, 0); in gfar_mac_reset()
3058 gfar_write(®s->gaddr4, 0); in gfar_mac_reset()
3059 gfar_write(®s->gaddr5, 0); in gfar_mac_reset()
3060 gfar_write(®s->gaddr6, 0); in gfar_mac_reset()
3061 gfar_write(®s->gaddr7, 0); in gfar_mac_reset()
3063 if (priv->extended_hash) in gfar_mac_reset()
3064 gfar_clear_exact_match(priv->ndev); in gfar_mac_reset()
3070 gfar_set_mac_address(priv->ndev); in gfar_mac_reset()
3072 gfar_set_multi(priv->ndev); in gfar_mac_reset()
3083 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_hw_init()
3094 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { in gfar_hw_init()
3095 memset_io(®s->rmon, 0, offsetof(struct rmon_mib, car1)); in gfar_hw_init()
3098 gfar_write(®s->rmon.cam1, 0xffffffff); in gfar_hw_init()
3099 gfar_write(®s->rmon.cam2, 0xffffffff); in gfar_hw_init()
3101 gfar_write(®s->rmon.car1, 0xffffffff); in gfar_hw_init()
3102 gfar_write(®s->rmon.car2, 0xffffffff); in gfar_hw_init()
3106 gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS); in gfar_hw_init()
3109 attrs = ATTRELI_EL(priv->rx_stash_size) | in gfar_hw_init()
3110 ATTRELI_EI(priv->rx_stash_index); in gfar_hw_init()
3112 gfar_write(®s->attreli, attrs); in gfar_hw_init()
3119 if (priv->bd_stash_en) in gfar_hw_init()
3122 if (priv->rx_stash_size != 0) in gfar_hw_init()
3125 gfar_write(®s->attr, attrs); in gfar_hw_init()
3128 gfar_write(®s->fifo_tx_thr, DEFAULT_FIFO_TX_THR); in gfar_hw_init()
3129 gfar_write(®s->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE); in gfar_hw_init()
3130 gfar_write(®s->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF); in gfar_hw_init()
3133 if (priv->num_grps > 1) in gfar_hw_init()
3162 struct device_node *np = ofdev->dev.of_node; in gfar_probe()
3173 priv->ndev = dev; in gfar_probe()
3174 priv->ofdev = ofdev; in gfar_probe()
3175 priv->dev = &ofdev->dev; in gfar_probe()
3176 SET_NETDEV_DEV(dev, &ofdev->dev); in gfar_probe()
3178 INIT_WORK(&priv->reset_task, gfar_reset_task); in gfar_probe()
3184 /* Set the dev->base_addr to the gfar reg region */ in gfar_probe()
3185 dev->base_addr = (unsigned long) priv->gfargrp[0].regs; in gfar_probe()
3188 dev->watchdog_timeo = TX_TIMEOUT; in gfar_probe()
3189 /* MTU range: 50 - 9586 */ in gfar_probe()
3190 dev->mtu = 1500; in gfar_probe()
3191 dev->min_mtu = 50; in gfar_probe()
3192 dev->max_mtu = GFAR_JUMBO_FRAME_SIZE - ETH_HLEN; in gfar_probe()
3193 dev->netdev_ops = &gfar_netdev_ops; in gfar_probe()
3194 dev->ethtool_ops = &gfar_ethtool_ops; in gfar_probe()
3197 for (i = 0; i < priv->num_grps; i++) { in gfar_probe()
3198 netif_napi_add(dev, &priv->gfargrp[i].napi_rx, in gfar_probe()
3200 netif_napi_add_tx_weight(dev, &priv->gfargrp[i].napi_tx, in gfar_probe()
3204 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { in gfar_probe()
3205 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | in gfar_probe()
3207 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | in gfar_probe()
3211 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { in gfar_probe()
3212 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | in gfar_probe()
3214 dev->features |= NETIF_F_HW_VLAN_CTAG_RX; in gfar_probe()
3217 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; in gfar_probe()
3224 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) in gfar_probe()
3225 priv->padding = 8 + DEFAULT_PADDING; in gfar_probe()
3227 if (dev->features & NETIF_F_IP_CSUM || in gfar_probe()
3228 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) in gfar_probe()
3229 dev->needed_headroom = GMAC_FCB_LEN + GMAC_TXPAL_LEN; in gfar_probe()
3232 for (i = 0; i < priv->num_tx_queues; i++) { in gfar_probe()
3233 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE; in gfar_probe()
3234 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE; in gfar_probe()
3235 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE; in gfar_probe()
3236 priv->tx_queue[i]->txic = DEFAULT_TXIC; in gfar_probe()
3239 for (i = 0; i < priv->num_rx_queues; i++) { in gfar_probe()
3240 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE; in gfar_probe()
3241 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE; in gfar_probe()
3242 priv->rx_queue[i]->rxic = DEFAULT_RXIC; in gfar_probe()
3246 priv->rx_filer_enable = in gfar_probe()
3247 (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0; in gfar_probe()
3249 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; in gfar_probe()
3251 if (priv->num_tx_queues == 1) in gfar_probe()
3252 priv->prio_sched_en = 1; in gfar_probe()
3254 set_bit(GFAR_DOWN, &priv->state); in gfar_probe()
3258 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { in gfar_probe()
3259 struct rmon_mib __iomem *rmon = &priv->gfargrp[0].regs->rmon; in gfar_probe()
3261 spin_lock_init(&priv->rmon_overflow.lock); in gfar_probe()
3262 priv->rmon_overflow.imask = IMASK_MSRO; in gfar_probe()
3263 gfar_write(&rmon->cam1, gfar_read(&rmon->cam1) & ~CAM1_M1RDR); in gfar_probe()
3272 pr_err("%s: Cannot register net device, aborting\n", dev->name); in gfar_probe()
3276 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) in gfar_probe()
3277 priv->wol_supported |= GFAR_WOL_MAGIC; in gfar_probe()
3279 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) && in gfar_probe()
3280 priv->rx_filer_enable) in gfar_probe()
3281 priv->wol_supported |= GFAR_WOL_FILER_UCAST; in gfar_probe()
3283 device_set_wakeup_capable(&ofdev->dev, priv->wol_supported); in gfar_probe()
3286 for (i = 0; i < priv->num_grps; i++) { in gfar_probe()
3287 struct gfar_priv_grp *grp = &priv->gfargrp[i]; in gfar_probe()
3288 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { in gfar_probe()
3289 sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s", in gfar_probe()
3290 dev->name, "_g", '0' + i, "_tx"); in gfar_probe()
3291 sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s", in gfar_probe()
3292 dev->name, "_g", '0' + i, "_rx"); in gfar_probe()
3293 sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s", in gfar_probe()
3294 dev->name, "_g", '0' + i, "_er"); in gfar_probe()
3296 strcpy(gfar_irq(grp, TX)->name, dev->name); in gfar_probe()
3303 netdev_info(dev, "mac: %pM\n", dev->dev_addr); in gfar_probe()
3309 for (i = 0; i < priv->num_rx_queues; i++) in gfar_probe()
3311 i, priv->rx_queue[i]->rx_ring_size); in gfar_probe()
3312 for (i = 0; i < priv->num_tx_queues; i++) in gfar_probe()
3314 i, priv->tx_queue[i]->tx_ring_size); in gfar_probe()
3324 of_node_put(priv->phy_node); in gfar_probe()
3325 of_node_put(priv->tbi_node); in gfar_probe()
3333 struct device_node *np = ofdev->dev.of_node; in gfar_remove()
3335 of_node_put(priv->phy_node); in gfar_remove()
3336 of_node_put(priv->tbi_node); in gfar_remove()
3338 unregister_netdev(priv->ndev); in gfar_remove()
3353 struct gfar __iomem *regs = priv->gfargrp[0].regs; in __gfar_filer_disable()
3356 temp = gfar_read(®s->rctrl); in __gfar_filer_disable()
3358 gfar_write(®s->rctrl, temp); in __gfar_filer_disable()
3363 struct gfar __iomem *regs = priv->gfargrp[0].regs; in __gfar_filer_enable()
3366 temp = gfar_read(®s->rctrl); in __gfar_filer_enable()
3368 gfar_write(®s->rctrl, temp); in __gfar_filer_enable()
3385 if (priv->wol_opts & GFAR_WOL_FILER_UCAST) { in gfar_filer_config_wol()
3387 struct net_device *ndev = priv->ndev; in gfar_filer_config_wol()
3389 u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex; in gfar_filer_config_wol()
3390 u32 dest_mac_addr = (ndev->dev_addr[0] << 16) | in gfar_filer_config_wol()
3391 (ndev->dev_addr[1] << 8) | in gfar_filer_config_wol()
3392 ndev->dev_addr[2]; in gfar_filer_config_wol()
3399 dest_mac_addr = (ndev->dev_addr[3] << 16) | in gfar_filer_config_wol()
3400 (ndev->dev_addr[4] << 8) | in gfar_filer_config_wol()
3401 ndev->dev_addr[5]; in gfar_filer_config_wol()
3418 rqfcr = priv->ftp_rqfcr[i]; in gfar_filer_restore_table()
3419 rqfpr = priv->ftp_rqfpr[i]; in gfar_filer_restore_table()
3429 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_start_wol_filer()
3434 gfar_write(®s->rqueue, priv->rqueue); in gfar_start_wol_filer()
3437 tempval = gfar_read(®s->dmactrl); in gfar_start_wol_filer()
3439 gfar_write(®s->dmactrl, tempval); in gfar_start_wol_filer()
3442 tempval = gfar_read(®s->dmactrl); in gfar_start_wol_filer()
3444 gfar_write(®s->dmactrl, tempval); in gfar_start_wol_filer()
3446 for (i = 0; i < priv->num_grps; i++) { in gfar_start_wol_filer()
3447 regs = priv->gfargrp[i].regs; in gfar_start_wol_filer()
3449 gfar_write(®s->rstat, priv->gfargrp[i].rstat); in gfar_start_wol_filer()
3451 gfar_write(®s->imask, IMASK_FGPI); in gfar_start_wol_filer()
3455 tempval = gfar_read(®s->maccfg1); in gfar_start_wol_filer()
3457 gfar_write(®s->maccfg1, tempval); in gfar_start_wol_filer()
3463 struct net_device *ndev = priv->ndev; in gfar_suspend()
3464 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_suspend()
3466 u16 wol = priv->wol_opts; in gfar_suspend()
3480 gfar_write(®s->imask, IMASK_MAG); in gfar_suspend()
3483 tempval = gfar_read(®s->maccfg2); in gfar_suspend()
3485 gfar_write(®s->maccfg2, tempval); in gfar_suspend()
3487 /* re-enable the Rx block */ in gfar_suspend()
3488 tempval = gfar_read(®s->maccfg1); in gfar_suspend()
3490 gfar_write(®s->maccfg1, tempval); in gfar_suspend()
3497 phy_stop(ndev->phydev); in gfar_suspend()
3506 struct net_device *ndev = priv->ndev; in gfar_resume()
3507 struct gfar __iomem *regs = priv->gfargrp[0].regs; in gfar_resume()
3509 u16 wol = priv->wol_opts; in gfar_resume()
3516 tempval = gfar_read(®s->maccfg2); in gfar_resume()
3518 gfar_write(®s->maccfg2, tempval); in gfar_resume()
3526 phy_start(ndev->phydev); in gfar_resume()
3540 struct net_device *ndev = priv->ndev; in gfar_restore()
3556 priv->oldlink = 0; in gfar_restore()
3557 priv->oldspeed = 0; in gfar_restore()
3558 priv->oldduplex = -1; in gfar_restore()
3560 if (ndev->phydev) in gfar_restore()
3561 phy_start(ndev->phydev); in gfar_restore()
3572 .freeze = gfar_suspend,
3601 .name = "fsl-gianfar",