Lines Matching +full:tx +full:- +full:csum +full:- +full:limit
1 // SPDX-License-Identifier: GPL-2.0-only
7 * Copyright (c) 2009 - 2010 Guo-Fu Tseng <cooldavid@cooldavid.org>
9 * Author: Guo-Fu Tseng <cooldavid@cooldavid.org>
35 static int force_pseudohp = -1;
36 static int no_pseudohp = -1;
37 static int no_extplug = -1;
40 "Enable pseudo hot-plug feature manually by driver instead of BIOS.");
42 MODULE_PARM_DESC(no_pseudohp, "Disable pseudo hot-plug feature.");
45 "Do not use external plug signal for pseudo hot-plug.");
59 for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) { in jme_mdio_read()
71 if (again--) in jme_mdio_read()
89 for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) { in jme_mdio_write()
104 jme_mdio_write(jme->dev, in jme_reset_phy_processor()
105 jme->mii_if.phy_id, in jme_reset_phy_processor()
109 if (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) in jme_reset_phy_processor()
110 jme_mdio_write(jme->dev, in jme_reset_phy_processor()
111 jme->mii_if.phy_id, in jme_reset_phy_processor()
115 val = jme_mdio_read(jme->dev, in jme_reset_phy_processor()
116 jme->mii_if.phy_id, in jme_reset_phy_processor()
119 jme_mdio_write(jme->dev, in jme_reset_phy_processor()
120 jme->mii_if.phy_id, in jme_reset_phy_processor()
154 jme->reg_gpreg1 |= GPREG1_RXCLKOFF; in jme_mac_rxclk_off()
155 jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1); in jme_mac_rxclk_off()
161 jme->reg_gpreg1 &= ~GPREG1_RXCLKOFF; in jme_mac_rxclk_on()
162 jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1); in jme_mac_rxclk_on()
168 jme->reg_ghc &= ~(GHC_TO_CLK_SRC | GHC_TXMAC_CLK_SRC); in jme_mac_txclk_off()
169 jwrite32f(jme, JME_GHC, jme->reg_ghc); in jme_mac_txclk_off()
175 u32 speed = jme->reg_ghc & GHC_SPEED; in jme_mac_txclk_on()
177 jme->reg_ghc |= GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY; in jme_mac_txclk_on()
179 jme->reg_ghc |= GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE; in jme_mac_txclk_on()
180 jwrite32f(jme, JME_GHC, jme->reg_ghc); in jme_mac_txclk_on()
186 jme->reg_ghc &= ~(GHC_SPEED | GHC_DPX); in jme_reset_ghc_speed()
187 jwrite32f(jme, JME_GHC, jme->reg_ghc); in jme_reset_ghc_speed()
193 jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH | in jme_reset_250A2_workaround()
195 jwrite32(jme, JME_GPREG1, jme->reg_gpreg1); in jme_reset_250A2_workaround()
201 jme->reg_ghc |= GHC_SWRST; in jme_assert_ghc_reset()
202 jwrite32f(jme, JME_GHC, jme->reg_ghc); in jme_assert_ghc_reset()
208 jme->reg_ghc &= ~GHC_SWRST; in jme_clear_ghc_reset()
209 jwrite32f(jme, JME_GHC, jme->reg_ghc); in jme_clear_ghc_reset()
252 if (jme->fpgaver) in jme_reset_mac_processor()
262 jwrite32(jme, JME_PMCS, PMCS_STMASK | jme->reg_pmcs); in jme_clear_pm_enable_wol()
286 for (i = JME_EEPROM_RELOAD_TIMEOUT; i > 0; --i) { in jme_reload_eeprom()
294 return -EIO; in jme_reload_eeprom()
308 spin_lock_bh(&jme->macaddr_lock); in jme_load_macaddr()
318 spin_unlock_bh(&jme->macaddr_lock); in jme_load_macaddr()
350 if (!(test_bit(JME_FLAG_POLL, &jme->flags))) in jme_set_rx_pcc()
351 netif_info(jme, rx_status, jme->dev, "Switched to PCC_P%d\n", p); in jme_set_rx_pcc()
357 register struct dynpcc_info *dpi = &(jme->dpi); in jme_start_irq()
360 dpi->cur = PCC_P1; in jme_start_irq()
361 dpi->attempt = PCC_P1; in jme_start_irq()
362 dpi->cnt = 0; in jme_start_irq()
390 phylink = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 17); in jme_linkstat_from_phy()
391 bmsr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMSR); in jme_linkstat_from_phy()
401 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0004); in jme_set_phyfifo_5level()
407 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0000); in jme_set_phyfifo_8level()
420 if (jme->fpgaver) in jme_check_link()
433 bmcr = jme_mdio_read(jme->dev, in jme_check_link()
434 jme->mii_if.phy_id, in jme_check_link()
453 --cnt) { in jme_check_link()
457 if (jme->fpgaver) in jme_check_link()
468 if (jme->phylink == phylink) { in jme_check_link()
475 jme->phylink = phylink; in jme_check_link()
478 * The speed/duplex setting of jme->reg_ghc already cleared in jme_check_link()
483 jme->reg_ghc |= GHC_SPEED_10M; in jme_check_link()
487 jme->reg_ghc |= GHC_SPEED_100M; in jme_check_link()
491 jme->reg_ghc |= GHC_SPEED_1000M; in jme_check_link()
501 jme->reg_ghc |= GHC_DPX; in jme_check_link()
510 jwrite32(jme, JME_GHC, jme->reg_ghc); in jme_check_link()
512 if (is_buggy250(jme->pdev->device, jme->chiprev)) { in jme_check_link()
513 jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH | in jme_check_link()
516 jme->reg_gpreg1 |= GPREG1_HALFMODEPATCH; in jme_check_link()
520 jme->reg_gpreg1 |= GPREG1_RSSPATCH; in jme_check_link()
524 jme->reg_gpreg1 |= GPREG1_RSSPATCH; in jme_check_link()
533 jwrite32(jme, JME_GPREG1, jme->reg_gpreg1); in jme_check_link()
536 "Full-Duplex, " : in jme_check_link()
537 "Half-Duplex, "); in jme_check_link()
539 "MDI-X" : in jme_check_link()
541 netif_info(jme, link, jme->dev, "Link is up at %s\n", linkmsg); in jme_check_link()
547 netif_info(jme, link, jme->dev, "Link is down\n"); in jme_check_link()
548 jme->phylink = 0; in jme_check_link()
559 struct jme_ring *txring = &(jme->txring[0]); in jme_setup_tx_resources()
561 txring->alloc = dma_alloc_coherent(&(jme->pdev->dev), in jme_setup_tx_resources()
562 TX_RING_ALLOC_SIZE(jme->tx_ring_size), in jme_setup_tx_resources()
563 &(txring->dmaalloc), in jme_setup_tx_resources()
566 if (!txring->alloc) in jme_setup_tx_resources()
572 txring->desc = (void *)ALIGN((unsigned long)(txring->alloc), in jme_setup_tx_resources()
574 txring->dma = ALIGN(txring->dmaalloc, RING_DESC_ALIGN); in jme_setup_tx_resources()
575 txring->next_to_use = 0; in jme_setup_tx_resources()
576 atomic_set(&txring->next_to_clean, 0); in jme_setup_tx_resources()
577 atomic_set(&txring->nr_free, jme->tx_ring_size); in jme_setup_tx_resources()
579 txring->bufinf = kcalloc(jme->tx_ring_size, in jme_setup_tx_resources()
582 if (unlikely(!(txring->bufinf))) in jme_setup_tx_resources()
588 dma_free_coherent(&(jme->pdev->dev), in jme_setup_tx_resources()
589 TX_RING_ALLOC_SIZE(jme->tx_ring_size), in jme_setup_tx_resources()
590 txring->alloc, in jme_setup_tx_resources()
591 txring->dmaalloc); in jme_setup_tx_resources()
594 txring->desc = NULL; in jme_setup_tx_resources()
595 txring->dmaalloc = 0; in jme_setup_tx_resources()
596 txring->dma = 0; in jme_setup_tx_resources()
597 txring->bufinf = NULL; in jme_setup_tx_resources()
599 return -ENOMEM; in jme_setup_tx_resources()
606 struct jme_ring *txring = &(jme->txring[0]); in jme_free_tx_resources()
609 if (txring->alloc) { in jme_free_tx_resources()
610 if (txring->bufinf) { in jme_free_tx_resources()
611 for (i = 0 ; i < jme->tx_ring_size ; ++i) { in jme_free_tx_resources()
612 txbi = txring->bufinf + i; in jme_free_tx_resources()
613 if (txbi->skb) { in jme_free_tx_resources()
614 dev_kfree_skb(txbi->skb); in jme_free_tx_resources()
615 txbi->skb = NULL; in jme_free_tx_resources()
617 txbi->mapping = 0; in jme_free_tx_resources()
618 txbi->len = 0; in jme_free_tx_resources()
619 txbi->nr_desc = 0; in jme_free_tx_resources()
620 txbi->start_xmit = 0; in jme_free_tx_resources()
622 kfree(txring->bufinf); in jme_free_tx_resources()
625 dma_free_coherent(&(jme->pdev->dev), in jme_free_tx_resources()
626 TX_RING_ALLOC_SIZE(jme->tx_ring_size), in jme_free_tx_resources()
627 txring->alloc, in jme_free_tx_resources()
628 txring->dmaalloc); in jme_free_tx_resources()
630 txring->alloc = NULL; in jme_free_tx_resources()
631 txring->desc = NULL; in jme_free_tx_resources()
632 txring->dmaalloc = 0; in jme_free_tx_resources()
633 txring->dma = 0; in jme_free_tx_resources()
634 txring->bufinf = NULL; in jme_free_tx_resources()
636 txring->next_to_use = 0; in jme_free_tx_resources()
637 atomic_set(&txring->next_to_clean, 0); in jme_free_tx_resources()
638 atomic_set(&txring->nr_free, 0); in jme_free_tx_resources()
651 * Setup TX Queue 0 DMA Bass Address in jme_enable_tx_engine()
653 jwrite32(jme, JME_TXDBA_LO, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL); in jme_enable_tx_engine()
654 jwrite32(jme, JME_TXDBA_HI, (__u64)(jme->txring[0].dma) >> 32); in jme_enable_tx_engine()
655 jwrite32(jme, JME_TXNDA, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL); in jme_enable_tx_engine()
658 * Setup TX Descptor Count in jme_enable_tx_engine()
660 jwrite32(jme, JME_TXQDC, jme->tx_ring_size); in jme_enable_tx_engine()
663 * Enable TX Engine in jme_enable_tx_engine()
666 jwrite32f(jme, JME_TXCS, jme->reg_txcs | in jme_enable_tx_engine()
671 * Start clock for TX MAC Processor in jme_enable_tx_engine()
683 * Disable TX Engine in jme_disable_tx_engine()
685 jwrite32(jme, JME_TXCS, jme->reg_txcs | TXCS_SELECT_QUEUE0); in jme_disable_tx_engine()
689 for (i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i) { in jme_disable_tx_engine()
696 pr_err("Disable TX engine timeout\n"); in jme_disable_tx_engine()
699 * Stop clock for TX MAC Processor in jme_disable_tx_engine()
707 struct jme_ring *rxring = &(jme->rxring[0]); in jme_set_clean_rxdesc()
708 register struct rxdesc *rxdesc = rxring->desc; in jme_set_clean_rxdesc()
709 struct jme_buffer_info *rxbi = rxring->bufinf; in jme_set_clean_rxdesc()
713 rxdesc->dw[0] = 0; in jme_set_clean_rxdesc()
714 rxdesc->dw[1] = 0; in jme_set_clean_rxdesc()
715 rxdesc->desc1.bufaddrh = cpu_to_le32((__u64)rxbi->mapping >> 32); in jme_set_clean_rxdesc()
716 rxdesc->desc1.bufaddrl = cpu_to_le32( in jme_set_clean_rxdesc()
717 (__u64)rxbi->mapping & 0xFFFFFFFFUL); in jme_set_clean_rxdesc()
718 rxdesc->desc1.datalen = cpu_to_le16(rxbi->len); in jme_set_clean_rxdesc()
719 if (jme->dev->features & NETIF_F_HIGHDMA) in jme_set_clean_rxdesc()
720 rxdesc->desc1.flags = RXFLAG_64BIT; in jme_set_clean_rxdesc()
722 rxdesc->desc1.flags |= RXFLAG_OWN | RXFLAG_INT; in jme_set_clean_rxdesc()
728 struct jme_ring *rxring = &(jme->rxring[0]); in jme_make_new_rx_buf()
729 struct jme_buffer_info *rxbi = rxring->bufinf + i; in jme_make_new_rx_buf()
733 skb = netdev_alloc_skb(jme->dev, in jme_make_new_rx_buf()
734 jme->dev->mtu + RX_EXTRA_LEN); in jme_make_new_rx_buf()
736 return -ENOMEM; in jme_make_new_rx_buf()
738 mapping = dma_map_page(&jme->pdev->dev, virt_to_page(skb->data), in jme_make_new_rx_buf()
739 offset_in_page(skb->data), skb_tailroom(skb), in jme_make_new_rx_buf()
741 if (unlikely(dma_mapping_error(&jme->pdev->dev, mapping))) { in jme_make_new_rx_buf()
743 return -ENOMEM; in jme_make_new_rx_buf()
746 if (likely(rxbi->mapping)) in jme_make_new_rx_buf()
747 dma_unmap_page(&jme->pdev->dev, rxbi->mapping, rxbi->len, in jme_make_new_rx_buf()
750 rxbi->skb = skb; in jme_make_new_rx_buf()
751 rxbi->len = skb_tailroom(skb); in jme_make_new_rx_buf()
752 rxbi->mapping = mapping; in jme_make_new_rx_buf()
759 struct jme_ring *rxring = &(jme->rxring[0]); in jme_free_rx_buf()
760 struct jme_buffer_info *rxbi = rxring->bufinf; in jme_free_rx_buf()
763 if (rxbi->skb) { in jme_free_rx_buf()
764 dma_unmap_page(&jme->pdev->dev, rxbi->mapping, rxbi->len, in jme_free_rx_buf()
766 dev_kfree_skb(rxbi->skb); in jme_free_rx_buf()
767 rxbi->skb = NULL; in jme_free_rx_buf()
768 rxbi->mapping = 0; in jme_free_rx_buf()
769 rxbi->len = 0; in jme_free_rx_buf()
777 struct jme_ring *rxring = &(jme->rxring[0]); in jme_free_rx_resources()
779 if (rxring->alloc) { in jme_free_rx_resources()
780 if (rxring->bufinf) { in jme_free_rx_resources()
781 for (i = 0 ; i < jme->rx_ring_size ; ++i) in jme_free_rx_resources()
783 kfree(rxring->bufinf); in jme_free_rx_resources()
786 dma_free_coherent(&(jme->pdev->dev), in jme_free_rx_resources()
787 RX_RING_ALLOC_SIZE(jme->rx_ring_size), in jme_free_rx_resources()
788 rxring->alloc, in jme_free_rx_resources()
789 rxring->dmaalloc); in jme_free_rx_resources()
790 rxring->alloc = NULL; in jme_free_rx_resources()
791 rxring->desc = NULL; in jme_free_rx_resources()
792 rxring->dmaalloc = 0; in jme_free_rx_resources()
793 rxring->dma = 0; in jme_free_rx_resources()
794 rxring->bufinf = NULL; in jme_free_rx_resources()
796 rxring->next_to_use = 0; in jme_free_rx_resources()
797 atomic_set(&rxring->next_to_clean, 0); in jme_free_rx_resources()
804 struct jme_ring *rxring = &(jme->rxring[0]); in jme_setup_rx_resources()
806 rxring->alloc = dma_alloc_coherent(&(jme->pdev->dev), in jme_setup_rx_resources()
807 RX_RING_ALLOC_SIZE(jme->rx_ring_size), in jme_setup_rx_resources()
808 &(rxring->dmaalloc), in jme_setup_rx_resources()
810 if (!rxring->alloc) in jme_setup_rx_resources()
816 rxring->desc = (void *)ALIGN((unsigned long)(rxring->alloc), in jme_setup_rx_resources()
818 rxring->dma = ALIGN(rxring->dmaalloc, RING_DESC_ALIGN); in jme_setup_rx_resources()
819 rxring->next_to_use = 0; in jme_setup_rx_resources()
820 atomic_set(&rxring->next_to_clean, 0); in jme_setup_rx_resources()
822 rxring->bufinf = kcalloc(jme->rx_ring_size, in jme_setup_rx_resources()
825 if (unlikely(!(rxring->bufinf))) in jme_setup_rx_resources()
831 for (i = 0 ; i < jme->rx_ring_size ; ++i) { in jme_setup_rx_resources()
834 return -ENOMEM; in jme_setup_rx_resources()
843 dma_free_coherent(&(jme->pdev->dev), in jme_setup_rx_resources()
844 RX_RING_ALLOC_SIZE(jme->rx_ring_size), in jme_setup_rx_resources()
845 rxring->alloc, in jme_setup_rx_resources()
846 rxring->dmaalloc); in jme_setup_rx_resources()
848 rxring->desc = NULL; in jme_setup_rx_resources()
849 rxring->dmaalloc = 0; in jme_setup_rx_resources()
850 rxring->dma = 0; in jme_setup_rx_resources()
851 rxring->bufinf = NULL; in jme_setup_rx_resources()
853 return -ENOMEM; in jme_setup_rx_resources()
862 jwrite32(jme, JME_RXCS, jme->reg_rxcs | in jme_enable_rx_engine()
869 jwrite32(jme, JME_RXDBA_LO, (__u64)(jme->rxring[0].dma) & 0xFFFFFFFFUL); in jme_enable_rx_engine()
870 jwrite32(jme, JME_RXDBA_HI, (__u64)(jme->rxring[0].dma) >> 32); in jme_enable_rx_engine()
871 jwrite32(jme, JME_RXNDA, (__u64)(jme->rxring[0].dma) & 0xFFFFFFFFUL); in jme_enable_rx_engine()
876 jwrite32(jme, JME_RXQDC, jme->rx_ring_size); in jme_enable_rx_engine()
881 jme_set_unicastaddr(jme->dev); in jme_enable_rx_engine()
882 jme_set_multi(jme->dev); in jme_enable_rx_engine()
888 jwrite32f(jme, JME_RXCS, jme->reg_rxcs | in jme_enable_rx_engine()
905 jwrite32(jme, JME_RXCS, jme->reg_rxcs | in jme_restart_rx_engine()
920 jwrite32(jme, JME_RXCS, jme->reg_rxcs); in jme_disable_rx_engine()
924 for (i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i) { in jme_disable_rx_engine()
942 u16 csum = 0xFFFFu; in jme_udpsum() local
944 if (skb->len < (ETH_HLEN + sizeof(struct iphdr))) in jme_udpsum()
945 return csum; in jme_udpsum()
946 if (skb->protocol != htons(ETH_P_IP)) in jme_udpsum()
947 return csum; in jme_udpsum()
950 if (ip_hdr(skb)->protocol != IPPROTO_UDP || in jme_udpsum()
951 skb->len < (ETH_HLEN + ip_hdrlen(skb) + sizeof(struct udphdr))) { in jme_udpsum()
953 return csum; in jme_udpsum()
956 csum = udp_hdr(skb)->check; in jme_udpsum()
960 return csum; in jme_udpsum()
972 netif_err(jme, rx_err, jme->dev, "TCP Checksum error\n"); in jme_rxsum_ok()
979 netif_err(jme, rx_err, jme->dev, "UDP Checksum error\n"); in jme_rxsum_ok()
985 netif_err(jme, rx_err, jme->dev, "IPv4 Checksum error\n"); in jme_rxsum_ok()
995 struct jme_ring *rxring = &(jme->rxring[0]); in jme_alloc_and_feed_skb()
996 struct rxdesc *rxdesc = rxring->desc; in jme_alloc_and_feed_skb()
997 struct jme_buffer_info *rxbi = rxring->bufinf; in jme_alloc_and_feed_skb()
1004 skb = rxbi->skb; in jme_alloc_and_feed_skb()
1005 dma_sync_single_for_cpu(&jme->pdev->dev, rxbi->mapping, rxbi->len, in jme_alloc_and_feed_skb()
1009 dma_sync_single_for_device(&jme->pdev->dev, rxbi->mapping, in jme_alloc_and_feed_skb()
1010 rxbi->len, DMA_FROM_DEVICE); in jme_alloc_and_feed_skb()
1014 framesize = le16_to_cpu(rxdesc->descwb.framesize) in jme_alloc_and_feed_skb()
1015 - RX_PREPAD_SIZE; in jme_alloc_and_feed_skb()
1019 skb->protocol = eth_type_trans(skb, jme->dev); in jme_alloc_and_feed_skb()
1021 if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags), skb)) in jme_alloc_and_feed_skb()
1022 skb->ip_summed = CHECKSUM_UNNECESSARY; in jme_alloc_and_feed_skb()
1026 if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) { in jme_alloc_and_feed_skb()
1027 u16 vid = le16_to_cpu(rxdesc->descwb.vlan); in jme_alloc_and_feed_skb()
1032 jme->jme_rx(skb); in jme_alloc_and_feed_skb()
1034 if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_DEST)) == in jme_alloc_and_feed_skb()
1047 jme_process_receive(struct jme_adapter *jme, int limit) in jme_process_receive() argument
1049 struct jme_ring *rxring = &(jme->rxring[0]); in jme_process_receive()
1051 int i, j, ccnt, desccnt, mask = jme->rx_ring_mask; in jme_process_receive()
1053 if (unlikely(!atomic_dec_and_test(&jme->rx_cleaning))) in jme_process_receive()
1056 if (unlikely(atomic_read(&jme->link_changing) != 1)) in jme_process_receive()
1059 if (unlikely(!netif_carrier_ok(jme->dev))) in jme_process_receive()
1062 i = atomic_read(&rxring->next_to_clean); in jme_process_receive()
1063 while (limit > 0) { in jme_process_receive()
1064 rxdesc = rxring->desc; in jme_process_receive()
1067 if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_OWN)) || in jme_process_receive()
1068 !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL)) in jme_process_receive()
1070 --limit; in jme_process_receive()
1073 desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT; in jme_process_receive()
1076 rxdesc->descwb.errstat & RXWBERR_ALLERR)) { in jme_process_receive()
1078 if (rxdesc->descwb.errstat & RXWBERR_CRCERR) in jme_process_receive()
1080 else if (rxdesc->descwb.errstat & RXWBERR_OVERUN) in jme_process_receive()
1086 limit -= desccnt - 1; in jme_process_receive()
1088 for (j = i, ccnt = desccnt ; ccnt-- ; ) { in jme_process_receive()
1101 atomic_set(&rxring->next_to_clean, i); in jme_process_receive()
1104 atomic_inc(&jme->rx_cleaning); in jme_process_receive()
1106 return limit > 0 ? limit : 0; in jme_process_receive()
1113 if (likely(atmp == dpi->cur)) { in jme_attempt_pcc()
1114 dpi->cnt = 0; in jme_attempt_pcc()
1118 if (dpi->attempt == atmp) { in jme_attempt_pcc()
1119 ++(dpi->cnt); in jme_attempt_pcc()
1121 dpi->attempt = atmp; in jme_attempt_pcc()
1122 dpi->cnt = 0; in jme_attempt_pcc()
1130 register struct dynpcc_info *dpi = &(jme->dpi); in jme_dynamic_pcc()
1132 if ((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P3_THRESHOLD) in jme_dynamic_pcc()
1134 else if ((NET_STAT(jme).rx_packets - dpi->last_pkts) > PCC_P2_THRESHOLD || in jme_dynamic_pcc()
1135 dpi->intr_cnt > PCC_INTR_THRESHOLD) in jme_dynamic_pcc()
1140 if (unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) { in jme_dynamic_pcc()
1141 if (dpi->attempt < dpi->cur) in jme_dynamic_pcc()
1142 tasklet_schedule(&jme->rxclean_task); in jme_dynamic_pcc()
1143 jme_set_rx_pcc(jme, dpi->attempt); in jme_dynamic_pcc()
1144 dpi->cur = dpi->attempt; in jme_dynamic_pcc()
1145 dpi->cnt = 0; in jme_dynamic_pcc()
1152 struct dynpcc_info *dpi = &(jme->dpi); in jme_start_pcc_timer()
1153 dpi->last_bytes = NET_STAT(jme).rx_bytes; in jme_start_pcc_timer()
1154 dpi->last_pkts = NET_STAT(jme).rx_packets; in jme_start_pcc_timer()
1155 dpi->intr_cnt = 0; in jme_start_pcc_timer()
1157 TMCSR_EN | ((0xFFFFFF - PCC_INTERVAL_US) & TMCSR_CNT)); in jme_start_pcc_timer()
1186 struct net_device *netdev = jme->dev; in jme_pcc_tasklet()
1188 if (unlikely(test_bit(JME_FLAG_SHUTDOWN, &jme->flags))) { in jme_pcc_tasklet()
1194 (atomic_read(&jme->link_changing) != 1) in jme_pcc_tasklet()
1200 if (!(test_bit(JME_FLAG_POLL, &jme->flags))) in jme_pcc_tasklet()
1240 set_bit(JME_FLAG_SHUTDOWN, &jme->flags); in jme_start_shutdown_timer()
1242 TMCSR_EN | ((0xFFFFFF - APMC_PHP_SHUTDOWN_DELAY) & TMCSR_CNT)); in jme_start_shutdown_timer()
1252 clear_bit(JME_FLAG_SHUTDOWN, &jme->flags); in jme_stop_shutdown_timer()
1264 struct net_device *netdev = jme->dev; in jme_link_change_work()
1267 while (!atomic_dec_and_test(&jme->link_changing)) { in jme_link_change_work()
1268 atomic_inc(&jme->link_changing); in jme_link_change_work()
1269 netif_info(jme, intr, jme->dev, "Get link change lock failed\n"); in jme_link_change_work()
1270 while (atomic_read(&jme->link_changing) != 1) in jme_link_change_work()
1271 netif_info(jme, intr, jme->dev, "Waiting link change lock\n"); in jme_link_change_work()
1274 if (jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu) in jme_link_change_work()
1277 jme->old_mtu = netdev->mtu; in jme_link_change_work()
1283 tasklet_disable(&jme->txclean_task); in jme_link_change_work()
1284 tasklet_disable(&jme->rxclean_task); in jme_link_change_work()
1285 tasklet_disable(&jme->rxempty_task); in jme_link_change_work()
1294 if (test_bit(JME_FLAG_POLL, &jme->flags)) in jme_link_change_work()
1310 pr_err("Allocating resources for TX error, Device STOPPED!\n"); in jme_link_change_work()
1319 if (test_bit(JME_FLAG_POLL, &jme->flags)) in jme_link_change_work()
1332 tasklet_enable(&jme->txclean_task); in jme_link_change_work()
1333 tasklet_enable(&jme->rxclean_task); in jme_link_change_work()
1334 tasklet_enable(&jme->rxempty_task); in jme_link_change_work()
1336 atomic_inc(&jme->link_changing); in jme_link_change_work()
1343 struct dynpcc_info *dpi = &(jme->dpi); in jme_rx_clean_tasklet()
1345 jme_process_receive(jme, jme->rx_ring_size); in jme_rx_clean_tasklet()
1346 ++(dpi->intr_cnt); in jme_rx_clean_tasklet()
1358 while (atomic_read(&jme->rx_empty) > 0) { in jme_poll()
1359 atomic_dec(&jme->rx_empty); in jme_poll()
1363 atomic_inc(&jme->rx_empty); in jme_poll()
1371 return JME_NAPI_WEIGHT_VAL(budget) - rest; in jme_poll()
1379 if (unlikely(atomic_read(&jme->link_changing) != 1)) in jme_rx_empty_tasklet()
1382 if (unlikely(!netif_carrier_ok(jme->dev))) in jme_rx_empty_tasklet()
1385 netif_info(jme, rx_status, jme->dev, "RX Queue Full!\n"); in jme_rx_empty_tasklet()
1387 jme_rx_clean_tasklet(&jme->rxclean_task); in jme_rx_empty_tasklet()
1389 while (atomic_read(&jme->rx_empty) > 0) { in jme_rx_empty_tasklet()
1390 atomic_dec(&jme->rx_empty); in jme_rx_empty_tasklet()
1394 atomic_inc(&jme->rx_empty); in jme_rx_empty_tasklet()
1400 struct jme_ring *txring = &(jme->txring[0]); in jme_wake_queue_if_stopped()
1403 if (unlikely(netif_queue_stopped(jme->dev) && in jme_wake_queue_if_stopped()
1404 atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold))) { in jme_wake_queue_if_stopped()
1405 netif_info(jme, tx_done, jme->dev, "TX Queue Waked\n"); in jme_wake_queue_if_stopped()
1406 netif_wake_queue(jme->dev); in jme_wake_queue_if_stopped()
1414 struct jme_ring *txring = &(jme->txring[0]); in jme_tx_clean_tasklet()
1415 struct txdesc *txdesc = txring->desc; in jme_tx_clean_tasklet()
1416 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi; in jme_tx_clean_tasklet()
1421 if (unlikely(!atomic_dec_and_test(&jme->tx_cleaning))) in jme_tx_clean_tasklet()
1424 if (unlikely(atomic_read(&jme->link_changing) != 1)) in jme_tx_clean_tasklet()
1427 if (unlikely(!netif_carrier_ok(jme->dev))) in jme_tx_clean_tasklet()
1430 max = jme->tx_ring_size - atomic_read(&txring->nr_free); in jme_tx_clean_tasklet()
1431 mask = jme->tx_ring_mask; in jme_tx_clean_tasklet()
1433 for (i = atomic_read(&txring->next_to_clean) ; cnt < max ; ) { in jme_tx_clean_tasklet()
1437 if (likely(ctxbi->skb && in jme_tx_clean_tasklet()
1441 i, ctxbi->nr_desc, jiffies); in jme_tx_clean_tasklet()
1445 for (j = 1 ; j < ctxbi->nr_desc ; ++j) { in jme_tx_clean_tasklet()
1449 dma_unmap_page(&jme->pdev->dev, in jme_tx_clean_tasklet()
1450 ttxbi->mapping, ttxbi->len, in jme_tx_clean_tasklet()
1453 ttxbi->mapping = 0; in jme_tx_clean_tasklet()
1454 ttxbi->len = 0; in jme_tx_clean_tasklet()
1457 dev_kfree_skb(ctxbi->skb); in jme_tx_clean_tasklet()
1459 cnt += ctxbi->nr_desc; in jme_tx_clean_tasklet()
1465 NET_STAT(jme).tx_bytes += ctxbi->len; in jme_tx_clean_tasklet()
1468 ctxbi->skb = NULL; in jme_tx_clean_tasklet()
1469 ctxbi->len = 0; in jme_tx_clean_tasklet()
1470 ctxbi->start_xmit = 0; in jme_tx_clean_tasklet()
1476 i = (i + ctxbi->nr_desc) & mask; in jme_tx_clean_tasklet()
1478 ctxbi->nr_desc = 0; in jme_tx_clean_tasklet()
1482 atomic_set(&txring->next_to_clean, i); in jme_tx_clean_tasklet()
1483 atomic_add(cnt, &txring->nr_free); in jme_tx_clean_tasklet()
1488 atomic_inc(&jme->tx_cleaning); in jme_tx_clean_tasklet()
1505 schedule_work(&jme->linkch_task); in jme_intr_msi()
1511 tasklet_schedule(&jme->pcc_task); in jme_intr_msi()
1516 tasklet_schedule(&jme->txclean_task); in jme_intr_msi()
1526 if (test_bit(JME_FLAG_POLL, &jme->flags)) { in jme_intr_msi()
1528 atomic_inc(&jme->rx_empty); in jme_intr_msi()
1538 atomic_inc(&jme->rx_empty); in jme_intr_msi()
1539 tasklet_hi_schedule(&jme->rxempty_task); in jme_intr_msi()
1541 tasklet_hi_schedule(&jme->rxclean_task); in jme_intr_msi()
1547 * Re-enable interrupt in jme_intr_msi()
1603 spin_lock_bh(&jme->phy_lock); in jme_restart_an()
1604 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); in jme_restart_an()
1606 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr); in jme_restart_an()
1607 spin_unlock_bh(&jme->phy_lock); in jme_restart_an()
1614 struct net_device *netdev = jme->dev; in jme_request_irq()
1618 if (!pci_enable_msi(jme->pdev)) { in jme_request_irq()
1619 set_bit(JME_FLAG_MSI, &jme->flags); in jme_request_irq()
1624 rc = request_irq(jme->pdev->irq, handler, irq_flags, netdev->name, in jme_request_irq()
1629 test_bit(JME_FLAG_MSI, &jme->flags) ? "MSI" : "INTx", in jme_request_irq()
1632 if (test_bit(JME_FLAG_MSI, &jme->flags)) { in jme_request_irq()
1633 pci_disable_msi(jme->pdev); in jme_request_irq()
1634 clear_bit(JME_FLAG_MSI, &jme->flags); in jme_request_irq()
1637 netdev->irq = jme->pdev->irq; in jme_request_irq()
1646 free_irq(jme->pdev->irq, jme->dev); in jme_free_irq()
1647 if (test_bit(JME_FLAG_MSI, &jme->flags)) { in jme_free_irq()
1648 pci_disable_msi(jme->pdev); in jme_free_irq()
1649 clear_bit(JME_FLAG_MSI, &jme->flags); in jme_free_irq()
1650 jme->dev->irq = jme->pdev->irq; in jme_free_irq()
1664 pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, ®); in jme_new_phy_on()
1667 pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg); in jme_new_phy_on()
1680 pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, ®); in jme_new_phy_off()
1683 pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg); in jme_new_phy_off()
1691 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); in jme_phy_on()
1693 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr); in jme_phy_on()
1695 if (new_phy_power_ctrl(jme->chip_main_rev)) in jme_phy_on()
1704 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); in jme_phy_off()
1706 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr); in jme_phy_off()
1708 if (new_phy_power_ctrl(jme->chip_main_rev)) in jme_phy_off()
1718 jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG, in jme_phy_specreg_read()
1720 return jme_mdio_read(jme->dev, jme->mii_if.phy_id, in jme_phy_specreg_read()
1730 jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_DATA_REG, in jme_phy_specreg_write()
1732 jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG, in jme_phy_specreg_write()
1744 ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000); in jme_phy_calibration()
1747 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000); in jme_phy_calibration()
1762 ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000); in jme_phy_calibration()
1764 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000); in jme_phy_calibration()
1774 pci_read_config_byte(jme->pdev, PCI_PRIV_SHARE_NICCTRL, &nic_ctrl); in jme_phy_setEA()
1778 switch (jme->pdev->device) { in jme_phy_setEA()
1780 if (((jme->chip_main_rev == 5) && in jme_phy_setEA()
1781 ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) || in jme_phy_setEA()
1782 (jme->chip_sub_rev == 3))) || in jme_phy_setEA()
1783 (jme->chip_main_rev >= 6)) { in jme_phy_setEA()
1787 if ((jme->chip_main_rev == 3) && in jme_phy_setEA()
1788 ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2))) in jme_phy_setEA()
1792 if (((jme->chip_main_rev == 5) && in jme_phy_setEA()
1793 ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) || in jme_phy_setEA()
1794 (jme->chip_sub_rev == 3))) || in jme_phy_setEA()
1795 (jme->chip_main_rev >= 6)) { in jme_phy_setEA()
1799 if ((jme->chip_main_rev == 3) && in jme_phy_setEA()
1800 ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2))) in jme_phy_setEA()
1802 if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 0)) in jme_phy_setEA()
1804 if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 2)) in jme_phy_setEA()
1808 return -ENODEV; in jme_phy_setEA()
1827 tasklet_setup(&jme->txclean_task, jme_tx_clean_tasklet); in jme_open()
1828 tasklet_setup(&jme->rxclean_task, jme_rx_clean_tasklet); in jme_open()
1829 tasklet_setup(&jme->rxempty_task, jme_rx_empty_tasklet); in jme_open()
1838 if (test_bit(JME_FLAG_SSET, &jme->flags)) in jme_open()
1839 jme_set_link_ksettings(netdev, &jme->old_cmd); in jme_open()
1860 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); in jme_set_100m_half()
1866 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, tmp); in jme_set_100m_half()
1868 if (jme->fpgaver) in jme_set_100m_half()
1882 while (!(phylink & PHY_LINK_UP) && (to -= 10) > 0) { in jme_wait_link()
1891 if (jme->reg_pmcs && device_may_wakeup(&jme->pdev->dev)) { in jme_powersave_phy()
1893 if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN)) in jme_powersave_phy()
1914 cancel_work_sync(&jme->linkch_task); in jme_close()
1915 tasklet_kill(&jme->txclean_task); in jme_close()
1916 tasklet_kill(&jme->rxclean_task); in jme_close()
1917 tasklet_kill(&jme->rxempty_task); in jme_close()
1924 jme->phylink = 0; in jme_close()
1934 struct jme_ring *txring = &(jme->txring[0]); in jme_alloc_txdesc()
1935 int idx, nr_alloc, mask = jme->tx_ring_mask; in jme_alloc_txdesc()
1937 idx = txring->next_to_use; in jme_alloc_txdesc()
1938 nr_alloc = skb_shinfo(skb)->nr_frags + 2; in jme_alloc_txdesc()
1940 if (unlikely(atomic_read(&txring->nr_free) < nr_alloc)) in jme_alloc_txdesc()
1941 return -1; in jme_alloc_txdesc()
1943 atomic_sub(nr_alloc, &txring->nr_free); in jme_alloc_txdesc()
1945 txring->next_to_use = (txring->next_to_use + nr_alloc) & mask; in jme_alloc_txdesc()
1961 dmaaddr = dma_map_page(&pdev->dev, page, page_offset, len, in jme_fill_tx_map()
1964 if (unlikely(dma_mapping_error(&pdev->dev, dmaaddr))) in jme_fill_tx_map()
1965 return -EINVAL; in jme_fill_tx_map()
1967 dma_sync_single_for_device(&pdev->dev, dmaaddr, len, DMA_TO_DEVICE); in jme_fill_tx_map()
1969 txdesc->dw[0] = 0; in jme_fill_tx_map()
1970 txdesc->dw[1] = 0; in jme_fill_tx_map()
1971 txdesc->desc2.flags = TXFLAG_OWN; in jme_fill_tx_map()
1972 txdesc->desc2.flags |= (hidma) ? TXFLAG_64BIT : 0; in jme_fill_tx_map()
1973 txdesc->desc2.datalen = cpu_to_le16(len); in jme_fill_tx_map()
1974 txdesc->desc2.bufaddrh = cpu_to_le32((__u64)dmaaddr >> 32); in jme_fill_tx_map()
1975 txdesc->desc2.bufaddrl = cpu_to_le32( in jme_fill_tx_map()
1978 txbi->mapping = dmaaddr; in jme_fill_tx_map()
1979 txbi->len = len; in jme_fill_tx_map()
1985 struct jme_ring *txring = &(jme->txring[0]); in jme_drop_tx_map()
1986 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi; in jme_drop_tx_map()
1987 int mask = jme->tx_ring_mask; in jme_drop_tx_map()
1992 dma_unmap_page(&jme->pdev->dev, ctxbi->mapping, ctxbi->len, in jme_drop_tx_map()
1995 ctxbi->mapping = 0; in jme_drop_tx_map()
1996 ctxbi->len = 0; in jme_drop_tx_map()
2003 struct jme_ring *txring = &(jme->txring[0]); in jme_map_tx_skb()
2004 struct txdesc *txdesc = txring->desc, *ctxdesc; in jme_map_tx_skb()
2005 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi; in jme_map_tx_skb()
2006 bool hidma = jme->dev->features & NETIF_F_HIGHDMA; in jme_map_tx_skb()
2007 int i, nr_frags = skb_shinfo(skb)->nr_frags; in jme_map_tx_skb()
2008 int mask = jme->tx_ring_mask; in jme_map_tx_skb()
2013 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in jme_map_tx_skb()
2018 ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, in jme_map_tx_skb()
2027 len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; in jme_map_tx_skb()
2030 ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data), in jme_map_tx_skb()
2031 offset_in_page(skb->data), len, hidma); in jme_map_tx_skb()
2044 *mss = cpu_to_le16(skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT); in jme_tx_tso()
2048 if (skb->protocol == htons(ETH_P_IP)) { in jme_tx_tso()
2051 iph->check = 0; in jme_tx_tso()
2052 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, in jme_tx_tso()
2053 iph->daddr, 0, in jme_tx_tso()
2069 if (skb->ip_summed == CHECKSUM_PARTIAL) { in jme_tx_csum()
2072 switch (skb->protocol) { in jme_tx_csum()
2074 ip_proto = ip_hdr(skb)->protocol; in jme_tx_csum()
2077 ip_proto = ipv6_hdr(skb)->nexthdr; in jme_tx_csum()
2092 netif_err(jme, tx_err, jme->dev, "Error upper layer protocol\n"); in jme_tx_csum()
2110 struct jme_ring *txring = &(jme->txring[0]); in jme_fill_tx_desc()
2116 txdesc = (struct txdesc *)txring->desc + idx; in jme_fill_tx_desc()
2117 txbi = txring->bufinf + idx; in jme_fill_tx_desc()
2119 txdesc->dw[0] = 0; in jme_fill_tx_desc()
2120 txdesc->dw[1] = 0; in jme_fill_tx_desc()
2121 txdesc->dw[2] = 0; in jme_fill_tx_desc()
2122 txdesc->dw[3] = 0; in jme_fill_tx_desc()
2123 txdesc->desc1.pktsize = cpu_to_le16(skb->len); in jme_fill_tx_desc()
2128 * it to start sending this TX queue. in jme_fill_tx_desc()
2136 if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags)) in jme_fill_tx_desc()
2138 jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags); in jme_fill_tx_desc()
2143 txdesc->desc1.flags = flags; in jme_fill_tx_desc()
2145 * Set tx buffer info after telling NIC to send in jme_fill_tx_desc()
2149 txbi->nr_desc = skb_shinfo(skb)->nr_frags + 2; in jme_fill_tx_desc()
2150 txbi->skb = skb; in jme_fill_tx_desc()
2151 txbi->len = skb->len; in jme_fill_tx_desc()
2152 txbi->start_xmit = jiffies; in jme_fill_tx_desc()
2153 if (!txbi->start_xmit) in jme_fill_tx_desc()
2154 txbi->start_xmit = (0UL-1); in jme_fill_tx_desc()
2162 struct jme_ring *txring = &(jme->txring[0]); in jme_stop_queue_if_full()
2163 struct jme_buffer_info *txbi = txring->bufinf; in jme_stop_queue_if_full()
2164 int idx = atomic_read(&txring->next_to_clean); in jme_stop_queue_if_full()
2169 if (unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) { in jme_stop_queue_if_full()
2170 netif_stop_queue(jme->dev); in jme_stop_queue_if_full()
2171 netif_info(jme, tx_queued, jme->dev, "TX Queue Paused\n"); in jme_stop_queue_if_full()
2173 if (atomic_read(&txring->nr_free) in jme_stop_queue_if_full()
2174 >= (jme->tx_wake_threshold)) { in jme_stop_queue_if_full()
2175 netif_wake_queue(jme->dev); in jme_stop_queue_if_full()
2176 netif_info(jme, tx_queued, jme->dev, "TX Queue Fast Waked\n"); in jme_stop_queue_if_full()
2180 if (unlikely(txbi->start_xmit && in jme_stop_queue_if_full()
2181 time_is_before_eq_jiffies(txbi->start_xmit + TX_TIMEOUT) && in jme_stop_queue_if_full()
2182 txbi->skb)) { in jme_stop_queue_if_full()
2183 netif_stop_queue(jme->dev); in jme_stop_queue_if_full()
2184 netif_info(jme, tx_queued, jme->dev, in jme_stop_queue_if_full()
2185 "TX Queue Stopped %d@%lu\n", idx, jiffies); in jme_stop_queue_if_full()
2209 netif_err(jme, tx_err, jme->dev, in jme_start_xmit()
2210 "BUG! Tx ring full when queue awake!\n"); in jme_start_xmit()
2218 jwrite32(jme, JME_TXCS, jme->reg_txcs | in jme_start_xmit()
2224 idx, skb_shinfo(skb)->nr_frags + 2, jiffies); in jme_start_xmit()
2236 val = (netdev->dev_addr[3] & 0xff) << 24 | in jme_set_unicastaddr()
2237 (netdev->dev_addr[2] & 0xff) << 16 | in jme_set_unicastaddr()
2238 (netdev->dev_addr[1] & 0xff) << 8 | in jme_set_unicastaddr()
2239 (netdev->dev_addr[0] & 0xff); in jme_set_unicastaddr()
2241 val = (netdev->dev_addr[5] & 0xff) << 8 | in jme_set_unicastaddr()
2242 (netdev->dev_addr[4] & 0xff); in jme_set_unicastaddr()
2253 return -EBUSY; in jme_set_macaddr()
2255 spin_lock_bh(&jme->macaddr_lock); in jme_set_macaddr()
2256 eth_hw_addr_set(netdev, addr->sa_data); in jme_set_macaddr()
2258 spin_unlock_bh(&jme->macaddr_lock); in jme_set_macaddr()
2269 spin_lock_bh(&jme->rxmcs_lock); in jme_set_multi()
2271 jme->reg_rxmcs |= RXMCS_BRDFRAME | RXMCS_UNIFRAME; in jme_set_multi()
2273 if (netdev->flags & IFF_PROMISC) { in jme_set_multi()
2274 jme->reg_rxmcs |= RXMCS_ALLFRAME; in jme_set_multi()
2275 } else if (netdev->flags & IFF_ALLMULTI) { in jme_set_multi()
2276 jme->reg_rxmcs |= RXMCS_ALLMULFRAME; in jme_set_multi()
2277 } else if (netdev->flags & IFF_MULTICAST) { in jme_set_multi()
2281 jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED; in jme_set_multi()
2283 bit_nr = ether_crc(ETH_ALEN, ha->addr) & 0x3F; in jme_set_multi()
2292 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs); in jme_set_multi()
2294 spin_unlock_bh(&jme->rxmcs_lock); in jme_set_multi()
2302 WRITE_ONCE(netdev->mtu, new_mtu); in jme_change_mtu()
2316 jme->phylink = 0; in jme_tx_timeout()
2318 if (test_bit(JME_FLAG_SSET, &jme->flags)) in jme_tx_timeout()
2319 jme_set_link_ksettings(netdev, &jme->old_cmd); in jme_tx_timeout()
2333 strscpy(info->driver, DRV_NAME, sizeof(info->driver)); in jme_get_drvinfo()
2334 strscpy(info->version, DRV_VERSION, sizeof(info->version)); in jme_get_drvinfo()
2335 strscpy(info->bus_info, pci_name(jme->pdev), sizeof(info->bus_info)); in jme_get_drvinfo()
2360 p16[i] = jme_mdio_read(jme->dev, jme->mii_if.phy_id, i); in mdio_memcpy()
2371 regs->version = 1; in jme_get_regs()
2394 ecmd->tx_coalesce_usecs = PCC_TX_TO; in jme_get_coalesce()
2395 ecmd->tx_max_coalesced_frames = PCC_TX_CNT; in jme_get_coalesce()
2397 if (test_bit(JME_FLAG_POLL, &jme->flags)) { in jme_get_coalesce()
2398 ecmd->use_adaptive_rx_coalesce = false; in jme_get_coalesce()
2399 ecmd->rx_coalesce_usecs = 0; in jme_get_coalesce()
2400 ecmd->rx_max_coalesced_frames = 0; in jme_get_coalesce()
2404 ecmd->use_adaptive_rx_coalesce = true; in jme_get_coalesce()
2406 switch (jme->dpi.cur) { in jme_get_coalesce()
2408 ecmd->rx_coalesce_usecs = PCC_P1_TO; in jme_get_coalesce()
2409 ecmd->rx_max_coalesced_frames = PCC_P1_CNT; in jme_get_coalesce()
2412 ecmd->rx_coalesce_usecs = PCC_P2_TO; in jme_get_coalesce()
2413 ecmd->rx_max_coalesced_frames = PCC_P2_CNT; in jme_get_coalesce()
2416 ecmd->rx_coalesce_usecs = PCC_P3_TO; in jme_get_coalesce()
2417 ecmd->rx_max_coalesced_frames = PCC_P3_CNT; in jme_get_coalesce()
2432 struct dynpcc_info *dpi = &(jme->dpi); in jme_set_coalesce()
2435 return -EBUSY; in jme_set_coalesce()
2437 if (ecmd->use_adaptive_rx_coalesce && in jme_set_coalesce()
2438 test_bit(JME_FLAG_POLL, &jme->flags)) { in jme_set_coalesce()
2439 clear_bit(JME_FLAG_POLL, &jme->flags); in jme_set_coalesce()
2440 jme->jme_rx = netif_rx; in jme_set_coalesce()
2441 dpi->cur = PCC_P1; in jme_set_coalesce()
2442 dpi->attempt = PCC_P1; in jme_set_coalesce()
2443 dpi->cnt = 0; in jme_set_coalesce()
2446 } else if (!(ecmd->use_adaptive_rx_coalesce) && in jme_set_coalesce()
2447 !(test_bit(JME_FLAG_POLL, &jme->flags))) { in jme_set_coalesce()
2448 set_bit(JME_FLAG_POLL, &jme->flags); in jme_set_coalesce()
2449 jme->jme_rx = netif_receive_skb; in jme_set_coalesce()
2463 ecmd->tx_pause = (jme->reg_txpfc & TXPFC_PF_EN) != 0; in jme_get_pauseparam()
2464 ecmd->rx_pause = (jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0; in jme_get_pauseparam()
2466 spin_lock_bh(&jme->phy_lock); in jme_get_pauseparam()
2467 val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE); in jme_get_pauseparam()
2468 spin_unlock_bh(&jme->phy_lock); in jme_get_pauseparam()
2470 ecmd->autoneg = in jme_get_pauseparam()
2481 if (((jme->reg_txpfc & TXPFC_PF_EN) != 0) ^ in jme_set_pauseparam()
2482 (ecmd->tx_pause != 0)) { in jme_set_pauseparam()
2484 if (ecmd->tx_pause) in jme_set_pauseparam()
2485 jme->reg_txpfc |= TXPFC_PF_EN; in jme_set_pauseparam()
2487 jme->reg_txpfc &= ~TXPFC_PF_EN; in jme_set_pauseparam()
2489 jwrite32(jme, JME_TXPFC, jme->reg_txpfc); in jme_set_pauseparam()
2492 spin_lock_bh(&jme->rxmcs_lock); in jme_set_pauseparam()
2493 if (((jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0) ^ in jme_set_pauseparam()
2494 (ecmd->rx_pause != 0)) { in jme_set_pauseparam()
2496 if (ecmd->rx_pause) in jme_set_pauseparam()
2497 jme->reg_rxmcs |= RXMCS_FLOWCTRL; in jme_set_pauseparam()
2499 jme->reg_rxmcs &= ~RXMCS_FLOWCTRL; in jme_set_pauseparam()
2501 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs); in jme_set_pauseparam()
2503 spin_unlock_bh(&jme->rxmcs_lock); in jme_set_pauseparam()
2505 spin_lock_bh(&jme->phy_lock); in jme_set_pauseparam()
2506 val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE); in jme_set_pauseparam()
2508 (ecmd->autoneg != 0)) { in jme_set_pauseparam()
2510 if (ecmd->autoneg) in jme_set_pauseparam()
2515 jme_mdio_write(jme->dev, jme->mii_if.phy_id, in jme_set_pauseparam()
2518 spin_unlock_bh(&jme->phy_lock); in jme_set_pauseparam()
2529 wol->supported = WAKE_MAGIC | WAKE_PHY; in jme_get_wol()
2531 wol->wolopts = 0; in jme_get_wol()
2533 if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN)) in jme_get_wol()
2534 wol->wolopts |= WAKE_PHY; in jme_get_wol()
2536 if (jme->reg_pmcs & PMCS_MFEN) in jme_get_wol()
2537 wol->wolopts |= WAKE_MAGIC; in jme_get_wol()
2547 if (wol->wolopts & (WAKE_MAGICSECURE | in jme_set_wol()
2552 return -EOPNOTSUPP; in jme_set_wol()
2554 jme->reg_pmcs = 0; in jme_set_wol()
2556 if (wol->wolopts & WAKE_PHY) in jme_set_wol()
2557 jme->reg_pmcs |= PMCS_LFEN | PMCS_LREN; in jme_set_wol()
2559 if (wol->wolopts & WAKE_MAGIC) in jme_set_wol()
2560 jme->reg_pmcs |= PMCS_MFEN; in jme_set_wol()
2571 spin_lock_bh(&jme->phy_lock); in jme_get_link_ksettings()
2572 mii_ethtool_get_link_ksettings(&jme->mii_if, cmd); in jme_get_link_ksettings()
2573 spin_unlock_bh(&jme->phy_lock); in jme_get_link_ksettings()
2584 if (cmd->base.speed == SPEED_1000 && in jme_set_link_ksettings()
2585 cmd->base.autoneg != AUTONEG_ENABLE) in jme_set_link_ksettings()
2586 return -EINVAL; in jme_set_link_ksettings()
2592 if (jme->mii_if.force_media && in jme_set_link_ksettings()
2593 cmd->base.autoneg != AUTONEG_ENABLE && in jme_set_link_ksettings()
2594 (jme->mii_if.full_duplex != cmd->base.duplex)) in jme_set_link_ksettings()
2597 spin_lock_bh(&jme->phy_lock); in jme_set_link_ksettings()
2598 rc = mii_ethtool_set_link_ksettings(&jme->mii_if, cmd); in jme_set_link_ksettings()
2599 spin_unlock_bh(&jme->phy_lock); in jme_set_link_ksettings()
2604 jme->old_cmd = *cmd; in jme_set_link_ksettings()
2605 set_bit(JME_FLAG_SSET, &jme->flags); in jme_set_link_ksettings()
2620 u16 val = mii_data->val_in; in jme_ioctl()
2623 return -EINVAL; in jme_ioctl()
2626 spin_lock_bh(&jme->phy_lock); in jme_ioctl()
2627 rc = generic_mii_ioctl(&jme->mii_if, mii_data, cmd, &duplex_chg); in jme_ioctl()
2628 spin_unlock_bh(&jme->phy_lock); in jme_ioctl()
2633 jme_get_link_ksettings(netdev, &jme->old_cmd); in jme_ioctl()
2634 set_bit(JME_FLAG_SSET, &jme->flags); in jme_ioctl()
2651 return jme->msg_enable; in jme_get_msglevel()
2658 jme->msg_enable = value; in jme_set_msglevel()
2664 if (netdev->mtu > 1900) in jme_fix_features()
2674 spin_lock_bh(&jme->rxmcs_lock); in jme_set_features()
2676 jme->reg_rxmcs |= RXMCS_CHECKSUM; in jme_set_features()
2678 jme->reg_rxmcs &= ~RXMCS_CHECKSUM; in jme_set_features()
2679 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs); in jme_set_features()
2680 spin_unlock_bh(&jme->rxmcs_lock); in jme_set_features()
2691 jme_intr(dev->irq, dev); in jme_netpoll()
2712 while ((val & SMBCSR_BUSY) && --to) { in jme_smb_read()
2717 netif_err(jme, hw, jme->dev, "SMB Bus Busy\n"); in jme_smb_read()
2728 while ((val & SMBINTF_HWCMD) && --to) { in jme_smb_read()
2733 netif_err(jme, hw, jme->dev, "SMB Bus Busy\n"); in jme_smb_read()
2748 while ((val & SMBCSR_BUSY) && --to) { in jme_smb_write()
2753 netif_err(jme, hw, jme->dev, "SMB Bus Busy\n"); in jme_smb_write()
2765 while ((val & SMBINTF_HWCMD) && --to) { in jme_smb_write()
2770 netif_err(jme, hw, jme->dev, "SMB Bus Busy\n"); in jme_smb_write()
2791 int i, offset = eeprom->offset, len = eeprom->len; in jme_get_eeprom()
2796 eeprom->magic = JME_EEPROM_MAGIC; in jme_get_eeprom()
2808 int i, offset = eeprom->offset, len = eeprom->len; in jme_set_eeprom()
2810 if (eeprom->magic != JME_EEPROM_MAGIC) in jme_set_eeprom()
2811 return -EINVAL; in jme_set_eeprom()
2849 if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 && in jme_pci_dma64()
2850 !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) in jme_pci_dma64()
2853 if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 && in jme_pci_dma64()
2854 !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40))) in jme_pci_dma64()
2857 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) in jme_pci_dma64()
2860 return -1; in jme_pci_dma64()
2868 reg26 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 26); in jme_phy_init()
2869 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 26, reg26 | 0x1000); in jme_phy_init()
2879 jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT; in jme_check_hw_ver()
2880 jme->chiprev = (chipmode & CM_CHIPREV_MASK) >> CM_CHIPREV_SHIFT; in jme_check_hw_ver()
2881 jme->chip_main_rev = jme->chiprev & 0xF; in jme_check_hw_ver()
2882 jme->chip_sub_rev = (jme->chiprev >> 4) & 0xF; in jme_check_hw_ver()
2927 rc = -EIO; in jme_init_one()
2933 rc = -ENOMEM; in jme_init_one()
2950 rc = -ENOMEM; in jme_init_one()
2953 netdev->netdev_ops = &jme_netdev_ops; in jme_init_one()
2954 netdev->ethtool_ops = &jme_ethtool_ops; in jme_init_one()
2955 netdev->watchdog_timeo = TX_TIMEOUT; in jme_init_one()
2956 netdev->hw_features = NETIF_F_IP_CSUM | in jme_init_one()
2962 netdev->features = NETIF_F_IP_CSUM | in jme_init_one()
2970 netdev->features |= NETIF_F_HIGHDMA; in jme_init_one()
2972 /* MTU range: 1280 - 9202*/ in jme_init_one()
2973 netdev->min_mtu = IPV6_MIN_MTU; in jme_init_one()
2974 netdev->max_mtu = MAX_ETHERNET_JUMBO_PACKET_SIZE - ETH_HLEN; in jme_init_one()
2976 SET_NETDEV_DEV(netdev, &pdev->dev); in jme_init_one()
2983 jme->pdev = pdev; in jme_init_one()
2984 jme->dev = netdev; in jme_init_one()
2985 jme->jme_rx = netif_rx; in jme_init_one()
2986 jme->old_mtu = netdev->mtu = 1500; in jme_init_one()
2987 jme->phylink = 0; in jme_init_one()
2988 jme->tx_ring_size = 1 << 10; in jme_init_one()
2989 jme->tx_ring_mask = jme->tx_ring_size - 1; in jme_init_one()
2990 jme->tx_wake_threshold = 1 << 9; in jme_init_one()
2991 jme->rx_ring_size = 1 << 9; in jme_init_one()
2992 jme->rx_ring_mask = jme->rx_ring_size - 1; in jme_init_one()
2993 jme->msg_enable = JME_DEF_MSG_ENABLE; in jme_init_one()
2994 jme->regs = ioremap(pci_resource_start(pdev, 0), in jme_init_one()
2996 if (!(jme->regs)) { in jme_init_one()
2998 rc = -ENOMEM; in jme_init_one()
3010 netif_napi_add(netdev, &jme->napi, jme_poll); in jme_init_one()
3012 spin_lock_init(&jme->phy_lock); in jme_init_one()
3013 spin_lock_init(&jme->macaddr_lock); in jme_init_one()
3014 spin_lock_init(&jme->rxmcs_lock); in jme_init_one()
3016 atomic_set(&jme->link_changing, 1); in jme_init_one()
3017 atomic_set(&jme->rx_cleaning, 1); in jme_init_one()
3018 atomic_set(&jme->tx_cleaning, 1); in jme_init_one()
3019 atomic_set(&jme->rx_empty, 1); in jme_init_one()
3021 tasklet_setup(&jme->pcc_task, jme_pcc_tasklet); in jme_init_one()
3022 INIT_WORK(&jme->linkch_task, jme_link_change_work); in jme_init_one()
3023 jme->dpi.cur = PCC_P1; in jme_init_one()
3025 jme->reg_ghc = 0; in jme_init_one()
3026 jme->reg_rxcs = RXCS_DEFAULT; in jme_init_one()
3027 jme->reg_rxmcs = RXMCS_DEFAULT; in jme_init_one()
3028 jme->reg_txpfc = 0; in jme_init_one()
3029 jme->reg_pmcs = PMCS_MFEN; in jme_init_one()
3030 jme->reg_gpreg1 = GPREG1_DEFAULT; in jme_init_one()
3032 if (jme->reg_rxmcs & RXMCS_CHECKSUM) in jme_init_one()
3033 netdev->features |= NETIF_F_RXCSUM; in jme_init_one()
3038 pci_read_config_byte(pdev, PCI_DCSR_MRRS, &jme->mrrs); in jme_init_one()
3039 jme->mrrs &= PCI_DCSR_MRRS_MASK; in jme_init_one()
3040 switch (jme->mrrs) { in jme_init_one()
3042 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B; in jme_init_one()
3045 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B; in jme_init_one()
3048 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B; in jme_init_one()
3056 jme->mii_if.dev = netdev; in jme_init_one()
3057 if (jme->fpgaver) { in jme_init_one()
3058 jme->mii_if.phy_id = 0; in jme_init_one()
3063 jme->mii_if.phy_id = i; in jme_init_one()
3068 if (!jme->mii_if.phy_id) { in jme_init_one()
3069 rc = -EIO; in jme_init_one()
3074 jme->reg_ghc |= GHC_LINK_POLL; in jme_init_one()
3076 jme->mii_if.phy_id = 1; in jme_init_one()
3078 if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) in jme_init_one()
3079 jme->mii_if.supports_gmii = true; in jme_init_one()
3081 jme->mii_if.supports_gmii = false; in jme_init_one()
3082 jme->mii_if.phy_id_mask = 0x1F; in jme_init_one()
3083 jme->mii_if.reg_num_mask = 0x1F; in jme_init_one()
3084 jme->mii_if.mdio_read = jme_mdio_read; in jme_init_one()
3085 jme->mii_if.mdio_write = jme_mdio_write; in jme_init_one()
3088 device_init_wakeup(&pdev->dev, true); in jme_init_one()
3091 jme->pcirev = pdev->revision; in jme_init_one()
3092 if (!jme->fpgaver) in jme_init_one()
3118 netif_info(jme, probe, jme->dev, "%s%s chiprev:%x pcirev:%x macaddr:%pM\n", in jme_init_one()
3119 (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ? in jme_init_one()
3121 (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ? in jme_init_one()
3123 (jme->fpgaver != 0) ? " (FPGA)" : "", in jme_init_one()
3124 (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev, in jme_init_one()
3125 jme->pcirev, netdev->dev_addr); in jme_init_one()
3130 iounmap(jme->regs); in jme_init_one()
3148 iounmap(jme->regs); in jme_remove_one()
3175 atomic_dec(&jme->link_changing); in jme_suspend()
3181 tasklet_disable(&jme->txclean_task); in jme_suspend()
3182 tasklet_disable(&jme->rxclean_task); in jme_suspend()
3183 tasklet_disable(&jme->rxempty_task); in jme_suspend()
3186 if (test_bit(JME_FLAG_POLL, &jme->flags)) in jme_suspend()
3196 jme->phylink = 0; in jme_suspend()
3199 tasklet_enable(&jme->txclean_task); in jme_suspend()
3200 tasklet_enable(&jme->rxclean_task); in jme_suspend()
3201 tasklet_enable(&jme->rxempty_task); in jme_suspend()
3219 if (test_bit(JME_FLAG_SSET, &jme->flags)) in jme_resume()
3220 jme_set_link_ksettings(netdev, &jme->old_cmd); in jme_resume()
3227 atomic_inc(&jme->link_changing); in jme_resume()
3275 MODULE_AUTHOR("Guo-Fu Tseng <cooldavid@cooldavid.org>");