Lines Matching +full:reg +full:- +full:spacing

1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2006 Intel Corporation. */
26 * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx
27 * configurations. 8255x supports a 32-bit linear addressing
32 * Memory-mapped mode is used exclusively to access the device's
33 * shared-memory structure, the Control/Status Registers (CSR). All
39 * 8255x is highly MII-compliant and all access to the PHY go
41 * driver leverages the mii.c library shared with other MII-compliant
44 * Big- and Little-Endian byte order as well as 32- and 64-bit
45 * archs are supported. Weak-ordered memory and non-cache-coherent
51 * together in a fixed-size ring (CBL) thus forming the flexible mode
52 * memory structure. A TCB marked with the suspend-bit indicates
58 * Non-Tx commands (config, multicast setup, etc) are linked
60 * used for both Tx and non-Tx commands is the Command Block (CB).
79 * protocol headers are u32-aligned. Since the RFD is part of the
87 * packet as end-of-list (EL). After updating the link, we remove EL
89 * previous-to-end RFD.
93 * replacement RFDs cannot be allocated, or the RU goes non-active,
95 * and Rx indication and re-allocation happen in the same context,
96 * therefore no locking is required. A software-generated interrupt
98 * scenario where all Rx resources have been indicated and none re-
104 * supported, but driver will accommodate the extra 4-byte VLAN tag
115 * o several entry points race with dev->close
116 * o check for tx-no-resources/stop Q races with tx clean/wake Q
119 * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
120 * - Stratus87247: protect MDI control register manipulations
121 * 2009/06/01 - Andreas Mohr <andi at lisas dot de>
122 * - add clean lowlevel I/O emulation for cards with MII-lacking PHYs
138 #include <linux/dma-mapping.h>
154 #define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
284 RU_UNINITIALIZED = -1,
387 * cb_command - Command Block flags
471 /* Important: keep total struct u32-aligned */
541 u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data);
609 (void)ioread8(&nic->csr->scb.status); in e100_write_flush()
616 spin_lock_irqsave(&nic->cmd_lock, flags); in e100_enable_irq()
617 iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi); in e100_enable_irq()
619 spin_unlock_irqrestore(&nic->cmd_lock, flags); in e100_enable_irq()
626 spin_lock_irqsave(&nic->cmd_lock, flags); in e100_disable_irq()
627 iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi); in e100_disable_irq()
629 spin_unlock_irqrestore(&nic->cmd_lock, flags); in e100_disable_irq()
636 iowrite32(selective_reset, &nic->csr->port); in e100_hw_reset()
640 iowrite32(software_reset, &nic->csr->port); in e100_hw_reset()
643 /* Mask off our interrupt line - it's unmasked after reset */ in e100_hw_reset()
649 u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest); in e100_self_test()
651 /* Passing the self-test is a pretty good indication in e100_self_test()
654 nic->mem->selftest.signature = 0; in e100_self_test()
655 nic->mem->selftest.result = 0xFFFFFFFF; in e100_self_test()
657 iowrite32(selftest | dma_addr, &nic->csr->port); in e100_self_test()
659 /* Wait 10 msec for self-test to complete */ in e100_self_test()
662 /* Interrupts are enabled after self-test */ in e100_self_test()
665 /* Check results of self-test */ in e100_self_test()
666 if (nic->mem->selftest.result != 0) { in e100_self_test()
667 netif_err(nic, hw, nic->netdev, in e100_self_test()
668 "Self-test failed: result=0x%08X\n", in e100_self_test()
669 nic->mem->selftest.result); in e100_self_test()
670 return -ETIMEDOUT; in e100_self_test()
672 if (nic->mem->selftest.signature == 0) { in e100_self_test()
673 netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n"); in e100_self_test()
674 return -ETIMEDOUT; in e100_self_test()
687 cmd_addr_data[0] = op_ewen << (addr_len - 2); in e100_eeprom_write()
690 cmd_addr_data[2] = op_ewds << (addr_len - 2); in e100_eeprom_write()
692 /* Bit-bang cmds to write word to eeprom */ in e100_eeprom_write()
696 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo); in e100_eeprom_write()
699 for (i = 31; i >= 0; i--) { in e100_eeprom_write()
702 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo); in e100_eeprom_write()
705 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo); in e100_eeprom_write()
712 iowrite8(0, &nic->csr->eeprom_ctrl_lo); in e100_eeprom_write()
717 /* General technique stolen from the eepro100 driver - very clever */
728 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo); in e100_eeprom_read()
731 /* Bit-bang to read word from eeprom */ in e100_eeprom_read()
732 for (i = 31; i >= 0; i--) { in e100_eeprom_read()
734 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo); in e100_eeprom_read()
737 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo); in e100_eeprom_read()
742 ctrl = ioread8(&nic->csr->eeprom_ctrl_lo); in e100_eeprom_read()
744 *addr_len -= (i - 16); in e100_eeprom_read()
752 iowrite8(0, &nic->csr->eeprom_ctrl_lo); in e100_eeprom_read()
763 /* Try reading with an 8-bit addr len to discover actual addr len */ in e100_eeprom_load()
765 nic->eeprom_wc = 1 << addr_len; in e100_eeprom_load()
767 for (addr = 0; addr < nic->eeprom_wc; addr++) { in e100_eeprom_load()
768 nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr); in e100_eeprom_load()
769 if (addr < nic->eeprom_wc - 1) in e100_eeprom_load()
770 checksum += le16_to_cpu(nic->eeprom[addr]); in e100_eeprom_load()
775 if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) { in e100_eeprom_load()
776 netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n"); in e100_eeprom_load()
778 return -EAGAIN; in e100_eeprom_load()
789 /* Try reading with an 8-bit addr len to discover actual addr len */ in e100_eeprom_save()
791 nic->eeprom_wc = 1 << addr_len; in e100_eeprom_save()
793 if (start + count >= nic->eeprom_wc) in e100_eeprom_save()
794 return -EINVAL; in e100_eeprom_save()
797 e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]); in e100_eeprom_save()
801 for (addr = 0; addr < nic->eeprom_wc - 1; addr++) in e100_eeprom_save()
802 checksum += le16_to_cpu(nic->eeprom[addr]); in e100_eeprom_save()
803 nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum); in e100_eeprom_save()
804 e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1, in e100_eeprom_save()
805 nic->eeprom[nic->eeprom_wc - 1]); in e100_eeprom_save()
818 spin_lock_irqsave(&nic->cmd_lock, flags); in e100_exec_cmd()
822 if (likely(!ioread8(&nic->csr->scb.cmd_lo))) in e100_exec_cmd()
829 err = -EAGAIN; in e100_exec_cmd()
834 iowrite32(dma_addr, &nic->csr->scb.gen_ptr); in e100_exec_cmd()
835 iowrite8(cmd, &nic->csr->scb.cmd_lo); in e100_exec_cmd()
838 spin_unlock_irqrestore(&nic->cmd_lock, flags); in e100_exec_cmd()
850 spin_lock_irqsave(&nic->cb_lock, flags); in e100_exec_cb()
852 if (unlikely(!nic->cbs_avail)) { in e100_exec_cb()
853 err = -ENOMEM; in e100_exec_cb()
857 cb = nic->cb_to_use; in e100_exec_cb()
858 nic->cb_to_use = cb->next; in e100_exec_cb()
859 nic->cbs_avail--; in e100_exec_cb()
860 cb->skb = skb; in e100_exec_cb()
866 if (unlikely(!nic->cbs_avail)) in e100_exec_cb()
867 err = -ENOSPC; in e100_exec_cb()
871 * set S-bit in current first, then clear S-bit in previous. */ in e100_exec_cb()
872 cb->command |= cpu_to_le16(cb_s); in e100_exec_cb()
874 cb->prev->command &= cpu_to_le16(~cb_s); in e100_exec_cb()
876 while (nic->cb_to_send != nic->cb_to_use) { in e100_exec_cb()
877 if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd, in e100_exec_cb()
878 nic->cb_to_send->dma_addr))) { in e100_exec_cb()
884 if (err == -ENOSPC) { in e100_exec_cb()
886 schedule_work(&nic->tx_timeout_task); in e100_exec_cb()
890 nic->cuc_cmd = cuc_resume; in e100_exec_cb()
891 nic->cb_to_send = nic->cb_to_send->next; in e100_exec_cb()
896 spin_unlock_irqrestore(&nic->cb_lock, flags); in e100_exec_cb()
901 static int mdio_read(struct net_device *netdev, int addr, int reg) in mdio_read() argument
904 return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0); in mdio_read()
907 static void mdio_write(struct net_device *netdev, int addr, int reg, int data) in mdio_write() argument
911 nic->mdio_ctrl(nic, addr, mdi_write, reg, data); in mdio_write()
914 /* the standard mdio_ctrl() function for usual MII-compliant hardware */
915 static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data) in mdio_ctrl_hw() argument
925 * manipulation of the MDI control registers is a multi-step in mdio_ctrl_hw()
928 spin_lock_irqsave(&nic->mdio_lock, flags); in mdio_ctrl_hw()
929 for (i = 100; i; --i) { in mdio_ctrl_hw()
930 if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready) in mdio_ctrl_hw()
935 netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n"); in mdio_ctrl_hw()
936 spin_unlock_irqrestore(&nic->mdio_lock, flags); in mdio_ctrl_hw()
939 iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl); in mdio_ctrl_hw()
943 if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready) in mdio_ctrl_hw()
946 spin_unlock_irqrestore(&nic->mdio_lock, flags); in mdio_ctrl_hw()
947 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, in mdio_ctrl_hw()
948 "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n", in mdio_ctrl_hw()
950 addr, reg, data, data_out); in mdio_ctrl_hw()
958 u32 reg, in mdio_ctrl_phy_82552_v() argument
961 if ((reg == MII_BMCR) && (dir == mdi_write)) { in mdio_ctrl_phy_82552_v()
963 u16 advert = mdio_read(nic->netdev, nic->mii.phy_id, in mdio_ctrl_phy_82552_v()
976 return mdio_ctrl_hw(nic, addr, dir, reg, data); in mdio_ctrl_phy_82552_v()
979 /* Fully software-emulated mdio_ctrl() function for cards without
980 * MII-compliant PHYs.
988 u32 reg, in mdio_ctrl_phy_mii_emulated() argument
996 switch (reg) { in mdio_ctrl_phy_mii_emulated()
998 /* Auto-negotiation, right? */ in mdio_ctrl_phy_mii_emulated()
1010 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, in mdio_ctrl_phy_mii_emulated()
1011 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n", in mdio_ctrl_phy_mii_emulated()
1013 addr, reg, data); in mdio_ctrl_phy_mii_emulated()
1017 switch (reg) { in mdio_ctrl_phy_mii_emulated()
1019 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, in mdio_ctrl_phy_mii_emulated()
1020 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n", in mdio_ctrl_phy_mii_emulated()
1022 addr, reg, data); in mdio_ctrl_phy_mii_emulated()
1032 return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated); in e100_phy_supports_mii()
1041 nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision; in e100_get_defaults()
1042 if (nic->mac == mac_unknown) in e100_get_defaults()
1043 nic->mac = mac_82557_D100_A; in e100_get_defaults()
1045 nic->params.rfds = rfds; in e100_get_defaults()
1046 nic->params.cbs = cbs; in e100_get_defaults()
1049 nic->tx_threshold = 0xE0; in e100_get_defaults()
1052 nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf | in e100_get_defaults()
1053 ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i)); in e100_get_defaults()
1056 nic->blank_rfd.command = 0; in e100_get_defaults()
1057 nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF); in e100_get_defaults()
1058 nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN + ETH_FCS_LEN); in e100_get_defaults()
1061 nic->mii.phy_id_mask = 0x1F; in e100_get_defaults()
1062 nic->mii.reg_num_mask = 0x1F; in e100_get_defaults()
1063 nic->mii.dev = nic->netdev; in e100_get_defaults()
1064 nic->mii.mdio_read = mdio_read; in e100_get_defaults()
1065 nic->mii.mdio_write = mdio_write; in e100_get_defaults()
1070 struct config *config = &cb->u.config; in e100_configure()
1072 struct net_device *netdev = nic->netdev; in e100_configure()
1074 cb->command = cpu_to_le16(cb_config); in e100_configure()
1078 config->byte_count = 0x16; /* bytes in this struct */ in e100_configure()
1079 config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */ in e100_configure()
1080 config->direct_rx_dma = 0x1; /* reserved */ in e100_configure()
1081 config->standard_tcb = 0x1; /* 1=standard, 0=extended */ in e100_configure()
1082 config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */ in e100_configure()
1083 config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */ in e100_configure()
1084 config->tx_underrun_retry = 0x3; /* # of underrun retries */ in e100_configure()
1086 config->mii_mode = 1; /* 1=MII mode, 0=i82503 mode */ in e100_configure()
1087 config->pad10 = 0x6; in e100_configure()
1088 config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */ in e100_configure()
1089 config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */ in e100_configure()
1090 config->ifs = 0x6; /* x16 = inter frame spacing */ in e100_configure()
1091 config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */ in e100_configure()
1092 config->pad15_1 = 0x1; in e100_configure()
1093 config->pad15_2 = 0x1; in e100_configure()
1094 config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */ in e100_configure()
1095 config->fc_delay_hi = 0x40; /* time delay for fc frame */ in e100_configure()
1096 config->tx_padding = 0x1; /* 1=pad short frames */ in e100_configure()
1097 config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */ in e100_configure()
1098 config->pad18 = 0x1; in e100_configure()
1099 config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */ in e100_configure()
1100 config->pad20_1 = 0x1F; in e100_configure()
1101 config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */ in e100_configure()
1102 config->pad21_1 = 0x5; in e100_configure()
1104 config->adaptive_ifs = nic->adaptive_ifs; in e100_configure()
1105 config->loopback = nic->loopback; in e100_configure()
1107 if (nic->mii.force_media && nic->mii.full_duplex) in e100_configure()
1108 config->full_duplex_force = 0x1; /* 1=force, 0=auto */ in e100_configure()
1110 if (nic->flags & promiscuous || nic->loopback) { in e100_configure()
1111 config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */ in e100_configure()
1112 config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */ in e100_configure()
1113 config->promiscuous_mode = 0x1; /* 1=on, 0=off */ in e100_configure()
1116 if (unlikely(netdev->features & NETIF_F_RXFCS)) in e100_configure()
1117 config->rx_crc_transfer = 0x1; /* 1=save, 0=discard */ in e100_configure()
1119 if (nic->flags & multicast_all) in e100_configure()
1120 config->multicast_all = 0x1; /* 1=accept, 0=no */ in e100_configure()
1123 if (netif_running(nic->netdev) || !(nic->flags & wol_magic)) in e100_configure()
1124 config->magic_packet_disable = 0x1; /* 1=off, 0=on */ in e100_configure()
1126 if (nic->mac >= mac_82558_D101_A4) { in e100_configure()
1127 config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */ in e100_configure()
1128 config->mwi_enable = 0x1; /* 1=enable, 0=disable */ in e100_configure()
1129 config->standard_tcb = 0x0; /* 1=standard, 0=extended */ in e100_configure()
1130 config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */ in e100_configure()
1131 if (nic->mac >= mac_82559_D101M) { in e100_configure()
1132 config->tno_intr = 0x1; /* TCO stats enable */ in e100_configure()
1134 if (nic->mac >= mac_82551_10) { in e100_configure()
1135 config->byte_count = 0x20; /* extended bytes */ in e100_configure()
1136 config->rx_d102_mode = 0x1; /* GMRC for TCO */ in e100_configure()
1139 config->standard_stat_counter = 0x0; in e100_configure()
1143 if (netdev->features & NETIF_F_RXALL) { in e100_configure()
1144 config->rx_save_overruns = 0x1; /* 1=save, 0=discard */ in e100_configure()
1145 config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */ in e100_configure()
1146 config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */ in e100_configure()
1149 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[00-07]=%8ph\n", in e100_configure()
1151 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[08-15]=%8ph\n", in e100_configure()
1153 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[16-23]=%8ph\n", in e100_configure()
1161 * All CPUSaver parameters are 16-bit literals that are part of a
1166 * INTDELAY - This loads the dead-man timer with its initial value.
1171 * the value should probably stay within the 0x200 - 0x1000.
1173 * BUNDLEMAX -
1183 * BUNDLESMALL -
1184 * This contains a bit-mask describing the minimum size frame that
1221 const struct firmware *fw = nic->fw; in e100_request_firmware()
1226 /* do not load u-code for ICH devices */ in e100_request_firmware()
1227 if (nic->flags & ich) in e100_request_firmware()
1235 * "fixes for bugs in the B-step hardware (specifically, bugs in e100_request_firmware()
1243 if (nic->mac == mac_82559_D101M) { in e100_request_firmware()
1245 } else if (nic->mac == mac_82559_D101S) { in e100_request_firmware()
1247 } else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10) { in e100_request_firmware()
1260 err = request_firmware(&fw, fw_name, &nic->pdev->dev); in e100_request_firmware()
1264 netif_err(nic, probe, nic->netdev, in e100_request_firmware()
1269 netif_info(nic, probe, nic->netdev, in e100_request_firmware()
1278 if (fw->size != UCODE_SIZE * 4 + 3) { in e100_request_firmware()
1279 netif_err(nic, probe, nic->netdev, in e100_request_firmware()
1281 fw_name, fw->size); in e100_request_firmware()
1283 return ERR_PTR(-EINVAL); in e100_request_firmware()
1287 timer = fw->data[UCODE_SIZE * 4]; in e100_request_firmware()
1288 bundle = fw->data[UCODE_SIZE * 4 + 1]; in e100_request_firmware()
1289 min_size = fw->data[UCODE_SIZE * 4 + 2]; in e100_request_firmware()
1293 netif_err(nic, probe, nic->netdev, in e100_request_firmware()
1297 return ERR_PTR(-EINVAL); in e100_request_firmware()
1302 nic->fw = fw; in e100_request_firmware()
1314 cb->skb = NULL; in e100_setup_ucode()
1317 memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4); in e100_setup_ucode()
1320 timer = fw->data[UCODE_SIZE * 4]; in e100_setup_ucode()
1321 bundle = fw->data[UCODE_SIZE * 4 + 1]; in e100_setup_ucode()
1322 min_size = fw->data[UCODE_SIZE * 4 + 2]; in e100_setup_ucode()
1324 /* Insert user-tunable settings in cb->u.ucode */ in e100_setup_ucode()
1325 cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000); in e100_setup_ucode()
1326 cb->u.ucode[timer] |= cpu_to_le32(INTDELAY); in e100_setup_ucode()
1327 cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000); in e100_setup_ucode()
1328 cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX); in e100_setup_ucode()
1329 cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000); in e100_setup_ucode()
1330 cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80); in e100_setup_ucode()
1332 cb->command = cpu_to_le16(cb_ucode | cb_el); in e100_setup_ucode()
1340 struct cb *cb = nic->cb_to_clean; in e100_load_ucode_wait()
1348 netif_err(nic, probe, nic->netdev, in e100_load_ucode_wait()
1352 nic->cuc_cmd = cuc_start; in e100_load_ucode_wait()
1359 while (!(cb->status & cpu_to_le16(cb_complete))) { in e100_load_ucode_wait()
1361 if (!--counter) break; in e100_load_ucode_wait()
1365 iowrite8(~0, &nic->csr->scb.stat_ack); in e100_load_ucode_wait()
1368 if (!counter || !(cb->status & cpu_to_le16(cb_ok))) { in e100_load_ucode_wait()
1369 netif_err(nic, probe, nic->netdev, "ucode load failed\n"); in e100_load_ucode_wait()
1370 err = -EPERM; in e100_load_ucode_wait()
1379 cb->command = cpu_to_le16(cb_iaaddr); in e100_setup_iaaddr()
1380 memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN); in e100_setup_iaaddr()
1386 cb->command = cpu_to_le16(cb_dump); in e100_dump()
1387 cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr + in e100_dump()
1397 phy_type = (le16_to_cpu(nic->eeprom[eeprom_phy_iface]) >> 8) & 0x0f; in e100_phy_check_without_mii()
1400 case NoSuchPhy: /* Non-MII PHY; UNTESTED! */ in e100_phy_check_without_mii()
1401 case I82503: /* Non-MII PHY; UNTESTED! */ in e100_phy_check_without_mii()
1402 case S80C24: /* Non-MII PHY; tested and working */ in e100_phy_check_without_mii()
1409 netif_info(nic, probe, nic->netdev, in e100_phy_check_without_mii()
1410 "found MII-less i82503 or 80c24 or other PHY\n"); in e100_phy_check_without_mii()
1412 nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated; in e100_phy_check_without_mii()
1413 nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */ in e100_phy_check_without_mii()
1415 /* these might be needed for certain MII-less cards... in e100_phy_check_without_mii()
1416 * nic->flags |= ich; in e100_phy_check_without_mii()
1417 * nic->flags |= ich_10h_workaround; */ in e100_phy_check_without_mii()
1434 struct net_device *netdev = nic->netdev; in e100_phy_init()
1440 nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr; in e100_phy_init()
1441 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR); in e100_phy_init()
1442 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR); in e100_phy_init()
1443 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR); in e100_phy_init()
1456 netif_err(nic, hw, nic->netdev, in e100_phy_init()
1458 return -EAGAIN; in e100_phy_init()
1461 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, in e100_phy_init()
1462 "phy_addr = %d\n", nic->mii.phy_id); in e100_phy_init()
1465 id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1); in e100_phy_init()
1466 id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2); in e100_phy_init()
1467 nic->phy = (u32)id_hi << 16 | (u32)id_lo; in e100_phy_init()
1468 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, in e100_phy_init()
1469 "phy ID = 0x%08X\n", nic->phy); in e100_phy_init()
1473 if (addr != nic->mii.phy_id) { in e100_phy_init()
1475 } else if (nic->phy != phy_82552_v) { in e100_phy_init()
1486 if (nic->phy == phy_82552_v) in e100_phy_init()
1487 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, in e100_phy_init()
1492 if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) { in e100_phy_init()
1494 cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG); in e100_phy_init()
1497 mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong); in e100_phy_init()
1500 if (nic->phy == phy_82552_v) { in e100_phy_init()
1501 u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE); in e100_phy_init()
1504 nic->mdio_ctrl = mdio_ctrl_phy_82552_v; in e100_phy_init()
1506 /* Workaround Si not advertising flow-control during autoneg */ in e100_phy_init()
1508 mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert); in e100_phy_init()
1511 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR); in e100_phy_init()
1513 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr); in e100_phy_init()
1514 } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) && in e100_phy_init()
1515 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) && in e100_phy_init()
1516 (le16_to_cpu(nic->eeprom[eeprom_cnfg_mdix]) & eeprom_mdix_enabled))) { in e100_phy_init()
1517 /* enable/disable MDI/MDI-X auto-switching. */ in e100_phy_init()
1518 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, in e100_phy_init()
1519 nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH); in e100_phy_init()
1531 netif_err(nic, hw, nic->netdev, "e100_hw_init\n"); in e100_hw_init()
1548 nic->dma_addr + offsetof(struct mem, stats)))) in e100_hw_init()
1560 struct net_device *netdev = nic->netdev; in e100_multi()
1564 cb->command = cpu_to_le16(cb_multi); in e100_multi()
1565 cb->u.multi.count = cpu_to_le16(count * ETH_ALEN); in e100_multi()
1570 memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr, in e100_multi()
1580 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, in e100_set_multicast_list()
1582 netdev_mc_count(netdev), netdev->flags); in e100_set_multicast_list()
1584 if (netdev->flags & IFF_PROMISC) in e100_set_multicast_list()
1585 nic->flags |= promiscuous; in e100_set_multicast_list()
1587 nic->flags &= ~promiscuous; in e100_set_multicast_list()
1589 if (netdev->flags & IFF_ALLMULTI || in e100_set_multicast_list()
1591 nic->flags |= multicast_all; in e100_set_multicast_list()
1593 nic->flags &= ~multicast_all; in e100_set_multicast_list()
1601 struct net_device *dev = nic->netdev; in e100_update_stats()
1602 struct net_device_stats *ns = &dev->stats; in e100_update_stats()
1603 struct stats *s = &nic->mem->stats; in e100_update_stats()
1604 __le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause : in e100_update_stats()
1605 (nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames : in e100_update_stats()
1606 &s->complete; in e100_update_stats()
1614 nic->tx_frames = le32_to_cpu(s->tx_good_frames); in e100_update_stats()
1615 nic->tx_collisions = le32_to_cpu(s->tx_total_collisions); in e100_update_stats()
1616 ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions); in e100_update_stats()
1617 ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions); in e100_update_stats()
1618 ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs); in e100_update_stats()
1619 ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns); in e100_update_stats()
1620 ns->collisions += nic->tx_collisions; in e100_update_stats()
1621 ns->tx_errors += le32_to_cpu(s->tx_max_collisions) + in e100_update_stats()
1622 le32_to_cpu(s->tx_lost_crs); in e100_update_stats()
1623 nic->rx_short_frame_errors += in e100_update_stats()
1624 le32_to_cpu(s->rx_short_frame_errors); in e100_update_stats()
1625 ns->rx_length_errors = nic->rx_short_frame_errors + in e100_update_stats()
1626 nic->rx_over_length_errors; in e100_update_stats()
1627 ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors); in e100_update_stats()
1628 ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors); in e100_update_stats()
1629 ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors); in e100_update_stats()
1630 ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors); in e100_update_stats()
1631 ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors); in e100_update_stats()
1632 ns->rx_errors += le32_to_cpu(s->rx_crc_errors) + in e100_update_stats()
1633 le32_to_cpu(s->rx_alignment_errors) + in e100_update_stats()
1634 le32_to_cpu(s->rx_short_frame_errors) + in e100_update_stats()
1635 le32_to_cpu(s->rx_cdt_errors); in e100_update_stats()
1636 nic->tx_deferred += le32_to_cpu(s->tx_deferred); in e100_update_stats()
1637 nic->tx_single_collisions += in e100_update_stats()
1638 le32_to_cpu(s->tx_single_collisions); in e100_update_stats()
1639 nic->tx_multiple_collisions += in e100_update_stats()
1640 le32_to_cpu(s->tx_multiple_collisions); in e100_update_stats()
1641 if (nic->mac >= mac_82558_D101_A4) { in e100_update_stats()
1642 nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause); in e100_update_stats()
1643 nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause); in e100_update_stats()
1644 nic->rx_fc_unsupported += in e100_update_stats()
1645 le32_to_cpu(s->fc_rcv_unsupported); in e100_update_stats()
1646 if (nic->mac >= mac_82559_D101M) { in e100_update_stats()
1647 nic->tx_tco_frames += in e100_update_stats()
1648 le16_to_cpu(s->xmt_tco_frames); in e100_update_stats()
1649 nic->rx_tco_frames += in e100_update_stats()
1650 le16_to_cpu(s->rcv_tco_frames); in e100_update_stats()
1657 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, in e100_update_stats()
1663 /* Adjust inter-frame-spacing (IFS) between two transmits if in e100_adjust_adaptive_ifs()
1664 * we're getting collisions on a half-duplex connection. */ in e100_adjust_adaptive_ifs()
1667 u32 prev = nic->adaptive_ifs; in e100_adjust_adaptive_ifs()
1670 if ((nic->tx_frames / 32 < nic->tx_collisions) && in e100_adjust_adaptive_ifs()
1671 (nic->tx_frames > min_frames)) { in e100_adjust_adaptive_ifs()
1672 if (nic->adaptive_ifs < 60) in e100_adjust_adaptive_ifs()
1673 nic->adaptive_ifs += 5; in e100_adjust_adaptive_ifs()
1674 } else if (nic->tx_frames < min_frames) { in e100_adjust_adaptive_ifs()
1675 if (nic->adaptive_ifs >= 5) in e100_adjust_adaptive_ifs()
1676 nic->adaptive_ifs -= 5; in e100_adjust_adaptive_ifs()
1678 if (nic->adaptive_ifs != prev) in e100_adjust_adaptive_ifs()
1689 netif_printk(nic, timer, KERN_DEBUG, nic->netdev, in e100_watchdog()
1694 mii_ethtool_gset(&nic->mii, &cmd); in e100_watchdog()
1697 if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) { in e100_watchdog()
1698 netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n", in e100_watchdog()
1701 } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) { in e100_watchdog()
1702 netdev_info(nic->netdev, "NIC Link is Down\n"); in e100_watchdog()
1705 mii_check_link(&nic->mii); in e100_watchdog()
1709 * Unfortunately have to use a spinlock to not re-enable interrupts in e100_watchdog()
1712 spin_lock_irq(&nic->cmd_lock); in e100_watchdog()
1713 iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi); in e100_watchdog()
1715 spin_unlock_irq(&nic->cmd_lock); in e100_watchdog()
1720 if (nic->mac <= mac_82557_D100_C) in e100_watchdog()
1722 e100_set_multicast_list(nic->netdev); in e100_watchdog()
1724 if (nic->flags & ich && speed == SPEED_10 && cmd.duplex == DUPLEX_HALF) in e100_watchdog()
1726 nic->flags |= ich_10h_workaround; in e100_watchdog()
1728 nic->flags &= ~ich_10h_workaround; in e100_watchdog()
1730 mod_timer(&nic->watchdog, in e100_watchdog()
1738 cb->command = nic->tx_command; in e100_xmit_prepare()
1740 dma_addr = dma_map_single(&nic->pdev->dev, skb->data, skb->len, in e100_xmit_prepare()
1743 if (dma_mapping_error(&nic->pdev->dev, dma_addr)) in e100_xmit_prepare()
1744 return -ENOMEM; in e100_xmit_prepare()
1750 if (unlikely(skb->no_fcs)) in e100_xmit_prepare()
1751 cb->command |= cpu_to_le16(cb_tx_nc); in e100_xmit_prepare()
1753 cb->command &= ~cpu_to_le16(cb_tx_nc); in e100_xmit_prepare()
1756 if ((nic->cbs_avail & ~15) == nic->cbs_avail) in e100_xmit_prepare()
1757 cb->command |= cpu_to_le16(cb_i); in e100_xmit_prepare()
1758 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd); in e100_xmit_prepare()
1759 cb->u.tcb.tcb_byte_count = 0; in e100_xmit_prepare()
1760 cb->u.tcb.threshold = nic->tx_threshold; in e100_xmit_prepare()
1761 cb->u.tcb.tbd_count = 1; in e100_xmit_prepare()
1762 cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr); in e100_xmit_prepare()
1763 cb->u.tcb.tbd.size = cpu_to_le16(skb->len); in e100_xmit_prepare()
1774 if (nic->flags & ich_10h_workaround) { in e100_xmit_frame()
1779 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, in e100_xmit_frame()
1787 case -ENOSPC: in e100_xmit_frame()
1789 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, in e100_xmit_frame()
1793 case -ENOMEM: in e100_xmit_frame()
1794 /* This is a hard error - log it. */ in e100_xmit_frame()
1795 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, in e100_xmit_frame()
1806 struct net_device *dev = nic->netdev; in e100_tx_clean()
1810 spin_lock(&nic->cb_lock); in e100_tx_clean()
1813 for (cb = nic->cb_to_clean; in e100_tx_clean()
1814 cb->status & cpu_to_le16(cb_complete); in e100_tx_clean()
1815 cb = nic->cb_to_clean = cb->next) { in e100_tx_clean()
1817 netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev, in e100_tx_clean()
1818 "cb[%d]->status = 0x%04X\n", in e100_tx_clean()
1819 (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)), in e100_tx_clean()
1820 cb->status); in e100_tx_clean()
1822 if (likely(cb->skb != NULL)) { in e100_tx_clean()
1823 dev->stats.tx_packets++; in e100_tx_clean()
1824 dev->stats.tx_bytes += cb->skb->len; in e100_tx_clean()
1826 dma_unmap_single(&nic->pdev->dev, in e100_tx_clean()
1827 le32_to_cpu(cb->u.tcb.tbd.buf_addr), in e100_tx_clean()
1828 le16_to_cpu(cb->u.tcb.tbd.size), in e100_tx_clean()
1830 dev_kfree_skb_any(cb->skb); in e100_tx_clean()
1831 cb->skb = NULL; in e100_tx_clean()
1834 cb->status = 0; in e100_tx_clean()
1835 nic->cbs_avail++; in e100_tx_clean()
1838 spin_unlock(&nic->cb_lock); in e100_tx_clean()
1841 if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev))) in e100_tx_clean()
1842 netif_wake_queue(nic->netdev); in e100_tx_clean()
1849 if (nic->cbs) { in e100_clean_cbs()
1850 while (nic->cbs_avail != nic->params.cbs.count) { in e100_clean_cbs()
1851 struct cb *cb = nic->cb_to_clean; in e100_clean_cbs()
1852 if (cb->skb) { in e100_clean_cbs()
1853 dma_unmap_single(&nic->pdev->dev, in e100_clean_cbs()
1854 le32_to_cpu(cb->u.tcb.tbd.buf_addr), in e100_clean_cbs()
1855 le16_to_cpu(cb->u.tcb.tbd.size), in e100_clean_cbs()
1857 dev_kfree_skb(cb->skb); in e100_clean_cbs()
1859 nic->cb_to_clean = nic->cb_to_clean->next; in e100_clean_cbs()
1860 nic->cbs_avail++; in e100_clean_cbs()
1862 dma_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr); in e100_clean_cbs()
1863 nic->cbs = NULL; in e100_clean_cbs()
1864 nic->cbs_avail = 0; in e100_clean_cbs()
1866 nic->cuc_cmd = cuc_start; in e100_clean_cbs()
1867 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = in e100_clean_cbs()
1868 nic->cbs; in e100_clean_cbs()
1874 unsigned int i, count = nic->params.cbs.count; in e100_alloc_cbs()
1876 nic->cuc_cmd = cuc_start; in e100_alloc_cbs()
1877 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL; in e100_alloc_cbs()
1878 nic->cbs_avail = 0; in e100_alloc_cbs()
1880 nic->cbs = dma_pool_zalloc(nic->cbs_pool, GFP_KERNEL, in e100_alloc_cbs()
1881 &nic->cbs_dma_addr); in e100_alloc_cbs()
1882 if (!nic->cbs) in e100_alloc_cbs()
1883 return -ENOMEM; in e100_alloc_cbs()
1885 for (cb = nic->cbs, i = 0; i < count; cb++, i++) { in e100_alloc_cbs()
1886 cb->next = (i + 1 < count) ? cb + 1 : nic->cbs; in e100_alloc_cbs()
1887 cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1; in e100_alloc_cbs()
1889 cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb); in e100_alloc_cbs()
1890 cb->link = cpu_to_le32(nic->cbs_dma_addr + in e100_alloc_cbs()
1894 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs; in e100_alloc_cbs()
1895 nic->cbs_avail = count; in e100_alloc_cbs()
1902 if (!nic->rxs) return; in e100_start_receiver()
1903 if (RU_SUSPENDED != nic->ru_running) return; in e100_start_receiver()
1906 if (!rx) rx = nic->rxs; in e100_start_receiver()
1908 /* (Re)start RU if suspended or idle and RFA is non-NULL */ in e100_start_receiver()
1909 if (rx->skb) { in e100_start_receiver()
1910 e100_exec_cmd(nic, ruc_start, rx->dma_addr); in e100_start_receiver()
1911 nic->ru_running = RU_RUNNING; in e100_start_receiver()
1918 if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN))) in e100_rx_alloc_skb()
1919 return -ENOMEM; in e100_rx_alloc_skb()
1922 skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd)); in e100_rx_alloc_skb()
1923 rx->dma_addr = dma_map_single(&nic->pdev->dev, rx->skb->data, in e100_rx_alloc_skb()
1926 if (dma_mapping_error(&nic->pdev->dev, rx->dma_addr)) { in e100_rx_alloc_skb()
1927 dev_kfree_skb_any(rx->skb); in e100_rx_alloc_skb()
1928 rx->skb = NULL; in e100_rx_alloc_skb()
1929 rx->dma_addr = 0; in e100_rx_alloc_skb()
1930 return -ENOMEM; in e100_rx_alloc_skb()
1936 if (rx->prev->skb) { in e100_rx_alloc_skb()
1937 struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data; in e100_rx_alloc_skb()
1938 put_unaligned_le32(rx->dma_addr, &prev_rfd->link); in e100_rx_alloc_skb()
1939 dma_sync_single_for_device(&nic->pdev->dev, in e100_rx_alloc_skb()
1940 rx->prev->dma_addr, in e100_rx_alloc_skb()
1951 struct net_device *dev = nic->netdev; in e100_rx_indicate()
1952 struct sk_buff *skb = rx->skb; in e100_rx_indicate()
1953 struct rfd *rfd = (struct rfd *)skb->data; in e100_rx_indicate()
1958 return -EAGAIN; in e100_rx_indicate()
1961 dma_sync_single_for_cpu(&nic->pdev->dev, rx->dma_addr, in e100_rx_indicate()
1963 rfd_status = le16_to_cpu(rfd->status); in e100_rx_indicate()
1965 netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev, in e100_rx_indicate()
1974 * This allows for a fast restart without re-enabling in e100_rx_indicate()
1976 if ((le16_to_cpu(rfd->command) & cb_el) && in e100_rx_indicate()
1977 (RU_RUNNING == nic->ru_running)) in e100_rx_indicate()
1979 if (ioread8(&nic->csr->scb.status) & rus_no_res) in e100_rx_indicate()
1980 nic->ru_running = RU_SUSPENDED; in e100_rx_indicate()
1981 dma_sync_single_for_device(&nic->pdev->dev, rx->dma_addr, in e100_rx_indicate()
1984 return -ENODATA; in e100_rx_indicate()
1988 if (unlikely(dev->features & NETIF_F_RXFCS)) in e100_rx_indicate()
1990 actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF; in e100_rx_indicate()
1991 if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd))) in e100_rx_indicate()
1992 actual_size = RFD_BUF_LEN - sizeof(struct rfd); in e100_rx_indicate()
1995 dma_unmap_single(&nic->pdev->dev, rx->dma_addr, RFD_BUF_LEN, in e100_rx_indicate()
2001 * This allows for a fast restart without re-enabling interrupts. in e100_rx_indicate()
2004 if ((le16_to_cpu(rfd->command) & cb_el) && in e100_rx_indicate()
2005 (RU_RUNNING == nic->ru_running)) { in e100_rx_indicate()
2007 if (ioread8(&nic->csr->scb.status) & rus_no_res) in e100_rx_indicate()
2008 nic->ru_running = RU_SUSPENDED; in e100_rx_indicate()
2014 skb->protocol = eth_type_trans(skb, nic->netdev); in e100_rx_indicate()
2019 if (unlikely(dev->features & NETIF_F_RXALL)) { in e100_rx_indicate()
2022 nic->rx_over_length_errors++; in e100_rx_indicate()
2031 nic->rx_over_length_errors++; in e100_rx_indicate()
2035 dev->stats.rx_packets++; in e100_rx_indicate()
2036 dev->stats.rx_bytes += (actual_size - fcs_pad); in e100_rx_indicate()
2042 rx->skb = NULL; in e100_rx_indicate()
2056 for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) { in e100_rx_clean()
2059 if (-EAGAIN == err || -ENODATA == err) in e100_rx_clean()
2070 if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running) in e100_rx_clean()
2073 old_before_last_rx = nic->rx_to_use->prev->prev; in e100_rx_clean()
2074 old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data; in e100_rx_clean()
2077 for (rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) { in e100_rx_clean()
2082 new_before_last_rx = nic->rx_to_use->prev->prev; in e100_rx_clean()
2084 /* Set the el-bit on the buffer that is before the last buffer. in e100_rx_clean()
2089 * When the hardware hits the before last buffer with el-bit in e100_rx_clean()
2094 (struct rfd *)new_before_last_rx->skb->data; in e100_rx_clean()
2095 new_before_last_rfd->size = 0; in e100_rx_clean()
2096 new_before_last_rfd->command |= cpu_to_le16(cb_el); in e100_rx_clean()
2097 dma_sync_single_for_device(&nic->pdev->dev, in e100_rx_clean()
2098 new_before_last_rx->dma_addr, in e100_rx_clean()
2105 old_before_last_rfd->command &= ~cpu_to_le16(cb_el); in e100_rx_clean()
2106 dma_sync_single_for_device(&nic->pdev->dev, in e100_rx_clean()
2107 old_before_last_rx->dma_addr, in e100_rx_clean()
2110 old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN in e100_rx_clean()
2112 dma_sync_single_for_device(&nic->pdev->dev, in e100_rx_clean()
2113 old_before_last_rx->dma_addr, in e100_rx_clean()
2120 iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack); in e100_rx_clean()
2121 e100_start_receiver(nic, nic->rx_to_clean); in e100_rx_clean()
2130 unsigned int i, count = nic->params.rfds.count; in e100_rx_clean_list()
2132 nic->ru_running = RU_UNINITIALIZED; in e100_rx_clean_list()
2134 if (nic->rxs) { in e100_rx_clean_list()
2135 for (rx = nic->rxs, i = 0; i < count; rx++, i++) { in e100_rx_clean_list()
2136 if (rx->skb) { in e100_rx_clean_list()
2137 dma_unmap_single(&nic->pdev->dev, in e100_rx_clean_list()
2138 rx->dma_addr, RFD_BUF_LEN, in e100_rx_clean_list()
2140 dev_kfree_skb(rx->skb); in e100_rx_clean_list()
2143 kfree(nic->rxs); in e100_rx_clean_list()
2144 nic->rxs = NULL; in e100_rx_clean_list()
2147 nic->rx_to_use = nic->rx_to_clean = NULL; in e100_rx_clean_list()
2153 unsigned int i, count = nic->params.rfds.count; in e100_rx_alloc_list()
2156 nic->rx_to_use = nic->rx_to_clean = NULL; in e100_rx_alloc_list()
2157 nic->ru_running = RU_UNINITIALIZED; in e100_rx_alloc_list()
2159 if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_KERNEL))) in e100_rx_alloc_list()
2160 return -ENOMEM; in e100_rx_alloc_list()
2162 for (rx = nic->rxs, i = 0; i < count; rx++, i++) { in e100_rx_alloc_list()
2163 rx->next = (i + 1 < count) ? rx + 1 : nic->rxs; in e100_rx_alloc_list()
2164 rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1; in e100_rx_alloc_list()
2167 return -ENOMEM; in e100_rx_alloc_list()
2170 /* Set the el-bit on the buffer that is before the last buffer. in e100_rx_alloc_list()
2174 * When the hardware hits the before last buffer with el-bit and size in e100_rx_alloc_list()
2177 rx = nic->rxs->prev->prev; in e100_rx_alloc_list()
2178 before_last = (struct rfd *)rx->skb->data; in e100_rx_alloc_list()
2179 before_last->command |= cpu_to_le16(cb_el); in e100_rx_alloc_list()
2180 before_last->size = 0; in e100_rx_alloc_list()
2181 dma_sync_single_for_device(&nic->pdev->dev, rx->dma_addr, in e100_rx_alloc_list()
2184 nic->rx_to_use = nic->rx_to_clean = nic->rxs; in e100_rx_alloc_list()
2185 nic->ru_running = RU_SUSPENDED; in e100_rx_alloc_list()
2194 u8 stat_ack = ioread8(&nic->csr->scb.stat_ack); in e100_intr()
2196 netif_printk(nic, intr, KERN_DEBUG, nic->netdev, in e100_intr()
2204 iowrite8(stat_ack, &nic->csr->scb.stat_ack); in e100_intr()
2208 nic->ru_running = RU_SUSPENDED; in e100_intr()
2210 if (likely(napi_schedule_prep(&nic->napi))) { in e100_intr()
2212 __napi_schedule(&nic->napi); in e100_intr()
2230 /* only re-enable interrupt if stack agrees polling is really done */ in e100_poll()
2243 e100_intr(nic->pdev->irq, netdev); in e100_netpoll()
2254 if (!is_valid_ether_addr(addr->sa_data)) in e100_set_mac_address()
2255 return -EADDRNOTAVAIL; in e100_set_mac_address()
2257 eth_hw_addr_set(netdev, addr->sa_data); in e100_set_mac_address()
2266 return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) && in e100_asf()
2267 (le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_asf) && in e100_asf()
2268 !(le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_gcl) && in e100_asf()
2269 ((le16_to_cpu(nic->eeprom[eeprom_smbus_addr]) & 0xFF) != 0xFE); in e100_asf()
2282 e100_set_multicast_list(nic->netdev); in e100_up()
2284 mod_timer(&nic->watchdog, jiffies); in e100_up()
2285 if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED, in e100_up()
2286 nic->netdev->name, nic->netdev))) in e100_up()
2288 netif_wake_queue(nic->netdev); in e100_up()
2289 napi_enable(&nic->napi); in e100_up()
2296 del_timer_sync(&nic->watchdog); in e100_up()
2307 napi_disable(&nic->napi); in e100_down()
2308 netif_stop_queue(nic->netdev); in e100_down()
2310 free_irq(nic->pdev->irq, nic->netdev); in e100_down()
2311 del_timer_sync(&nic->watchdog); in e100_down()
2312 netif_carrier_off(nic->netdev); in e100_down()
2323 schedule_work(&nic->tx_timeout_task); in e100_tx_timeout()
2329 struct net_device *netdev = nic->netdev; in e100_tx_timeout_task()
2331 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, in e100_tx_timeout_task()
2332 "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status)); in e100_tx_timeout_task()
2350 * packet compares byte-for-byte to the transmitted packet. */ in e100_loopback_test()
2358 if (nic->flags & ich && loopback_mode == lb_phy) in e100_loopback_test()
2361 nic->loopback = loopback_mode; in e100_loopback_test()
2366 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, in e100_loopback_test()
2371 if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) { in e100_loopback_test()
2372 err = -ENOMEM; in e100_loopback_test()
2376 memset(skb->data, 0xFF, ETH_DATA_LEN); in e100_loopback_test()
2377 e100_xmit_frame(skb, nic->netdev); in e100_loopback_test()
2381 dma_sync_single_for_cpu(&nic->pdev->dev, nic->rx_to_clean->dma_addr, in e100_loopback_test()
2384 if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd), in e100_loopback_test()
2385 skb->data, ETH_DATA_LEN)) in e100_loopback_test()
2386 err = -EAGAIN; in e100_loopback_test()
2389 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0); in e100_loopback_test()
2390 nic->loopback = lb_none; in e100_loopback_test()
2408 mii_ethtool_get_link_ksettings(&nic->mii, cmd); in e100_get_link_ksettings()
2419 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET); in e100_set_link_ksettings()
2420 err = mii_ethtool_set_link_ksettings(&nic->mii, cmd); in e100_set_link_ksettings()
2430 strscpy(info->driver, DRV_NAME, sizeof(info->driver)); in e100_get_drvinfo()
2431 strscpy(info->bus_info, pci_name(nic->pdev), in e100_get_drvinfo()
2432 sizeof(info->bus_info)); in e100_get_drvinfo()
2443 return (1 + E100_PHY_REGS) * sizeof(u32) + sizeof(nic->mem->dump_buf); in e100_get_regs_len()
2453 regs->version = (1 << 24) | nic->pdev->revision; in e100_get_regs()
2454 buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 | in e100_get_regs()
2455 ioread8(&nic->csr->scb.cmd_lo) << 16 | in e100_get_regs()
2456 ioread16(&nic->csr->scb.status); in e100_get_regs()
2462 buff[1 + i] = mdio_read(netdev, nic->mii.phy_id, in e100_get_regs()
2463 E100_PHY_REGS - 1 - i); in e100_get_regs()
2464 memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf)); in e100_get_regs()
2467 memcpy(&buff[1 + E100_PHY_REGS], nic->mem->dump_buf, in e100_get_regs()
2468 sizeof(nic->mem->dump_buf)); in e100_get_regs()
2474 wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0; in e100_get_wol()
2475 wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0; in e100_get_wol()
2482 if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) || in e100_set_wol()
2483 !device_can_wakeup(&nic->pdev->dev)) in e100_set_wol()
2484 return -EOPNOTSUPP; in e100_set_wol()
2486 if (wol->wolopts) in e100_set_wol()
2487 nic->flags |= wol_magic; in e100_set_wol()
2489 nic->flags &= ~wol_magic; in e100_set_wol()
2491 device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts); in e100_set_wol()
2501 return nic->msg_enable; in e100_get_msglevel()
2507 nic->msg_enable = value; in e100_set_msglevel()
2513 return mii_nway_restart(&nic->mii); in e100_nway_reset()
2519 return mii_link_ok(&nic->mii); in e100_get_link()
2525 return nic->eeprom_wc << 1; in e100_get_eeprom_len()
2534 eeprom->magic = E100_EEPROM_MAGIC; in e100_get_eeprom()
2535 memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len); in e100_get_eeprom()
2545 if (eeprom->magic != E100_EEPROM_MAGIC) in e100_set_eeprom()
2546 return -EINVAL; in e100_set_eeprom()
2548 memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len); in e100_set_eeprom()
2550 return e100_eeprom_save(nic, eeprom->offset >> 1, in e100_set_eeprom()
2551 (eeprom->len >> 1) + 1); in e100_set_eeprom()
2560 struct param_range *rfds = &nic->params.rfds; in e100_get_ringparam()
2561 struct param_range *cbs = &nic->params.cbs; in e100_get_ringparam()
2563 ring->rx_max_pending = rfds->max; in e100_get_ringparam()
2564 ring->tx_max_pending = cbs->max; in e100_get_ringparam()
2565 ring->rx_pending = rfds->count; in e100_get_ringparam()
2566 ring->tx_pending = cbs->count; in e100_get_ringparam()
2575 struct param_range *rfds = &nic->params.rfds; in e100_set_ringparam()
2576 struct param_range *cbs = &nic->params.cbs; in e100_set_ringparam()
2578 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) in e100_set_ringparam()
2579 return -EINVAL; in e100_set_ringparam()
2583 rfds->count = max(ring->rx_pending, rfds->min); in e100_set_ringparam()
2584 rfds->count = min(rfds->count, rfds->max); in e100_set_ringparam()
2585 cbs->count = max(ring->tx_pending, cbs->min); in e100_set_ringparam()
2586 cbs->count = min(cbs->count, cbs->max); in e100_set_ringparam()
2587 netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n", in e100_set_ringparam()
2588 rfds->count, cbs->count); in e100_set_ringparam()
2612 data[0] = !mii_link_ok(&nic->mii); in e100_diag_test()
2614 if (test->flags & ETH_TEST_FL_OFFLINE) { in e100_diag_test()
2617 mii_ethtool_gset(&nic->mii, &cmd); in e100_diag_test()
2626 mii_ethtool_sset(&nic->mii, &cmd); in e100_diag_test()
2632 test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0; in e100_diag_test()
2647 u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE : in e100_set_phys_id()
2656 leds = (nic->phy == phy_82552_v) ? E100_82552_LED_ON : in e100_set_phys_id()
2657 (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559; in e100_set_phys_id()
2661 leds = (nic->phy == phy_82552_v) ? E100_82552_LED_OFF : led_off; in e100_set_phys_id()
2668 mdio_write(netdev, nic->mii.phy_id, led_reg, leds); in e100_set_phys_id()
2679 /* device-specific stats */
2696 return -EOPNOTSUPP; in e100_get_sset_count()
2707 data[i] = ((unsigned long *)&netdev->stats)[i]; in e100_get_ethtool_stats()
2709 data[i++] = nic->tx_deferred; in e100_get_ethtool_stats()
2710 data[i++] = nic->tx_single_collisions; in e100_get_ethtool_stats()
2711 data[i++] = nic->tx_multiple_collisions; in e100_get_ethtool_stats()
2712 data[i++] = nic->tx_fc_pause; in e100_get_ethtool_stats()
2713 data[i++] = nic->rx_fc_pause; in e100_get_ethtool_stats()
2714 data[i++] = nic->rx_fc_unsupported; in e100_get_ethtool_stats()
2715 data[i++] = nic->tx_tco_frames; in e100_get_ethtool_stats()
2716 data[i++] = nic->rx_tco_frames; in e100_get_ethtool_stats()
2717 data[i++] = nic->rx_short_frame_errors; in e100_get_ethtool_stats()
2718 data[i++] = nic->rx_over_length_errors; in e100_get_ethtool_stats()
2762 return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL); in e100_do_ioctl()
2767 nic->mem = dma_alloc_coherent(&nic->pdev->dev, sizeof(struct mem), in e100_alloc()
2768 &nic->dma_addr, GFP_KERNEL); in e100_alloc()
2769 return nic->mem ? 0 : -ENOMEM; in e100_alloc()
2774 if (nic->mem) { in e100_free()
2775 dma_free_coherent(&nic->pdev->dev, sizeof(struct mem), in e100_free()
2776 nic->mem, nic->dma_addr); in e100_free()
2777 nic->mem = NULL; in e100_free()
2788 netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n"); in e100_open()
2802 netdev_features_t changed = features ^ netdev->features; in e100_set_features()
2807 netdev->features = features; in e100_set_features()
2834 return -ENOMEM; in e100_probe()
2836 netdev->hw_features |= NETIF_F_RXFCS; in e100_probe()
2837 netdev->priv_flags |= IFF_SUPP_NOFCS; in e100_probe()
2838 netdev->hw_features |= NETIF_F_RXALL; in e100_probe()
2840 netdev->netdev_ops = &e100_netdev_ops; in e100_probe()
2841 netdev->ethtool_ops = &e100_ethtool_ops; in e100_probe()
2842 netdev->watchdog_timeo = E100_WATCHDOG_PERIOD; in e100_probe()
2843 strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); in e100_probe()
2846 netif_napi_add_weight(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT); in e100_probe()
2847 nic->netdev = netdev; in e100_probe()
2848 nic->pdev = pdev; in e100_probe()
2849 nic->msg_enable = (1 << debug) - 1; in e100_probe()
2850 nic->mdio_ctrl = mdio_ctrl_hw; in e100_probe()
2854 netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n"); in e100_probe()
2859 netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n"); in e100_probe()
2860 err = -ENODEV; in e100_probe()
2865 netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n"); in e100_probe()
2869 if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))) { in e100_probe()
2870 netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n"); in e100_probe()
2874 SET_NETDEV_DEV(netdev, &pdev->dev); in e100_probe()
2877 netif_info(nic, probe, nic->netdev, "using i/o access mode\n"); in e100_probe()
2879 nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr)); in e100_probe()
2880 if (!nic->csr) { in e100_probe()
2881 netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n"); in e100_probe()
2882 err = -ENOMEM; in e100_probe()
2886 if (ent->driver_data) in e100_probe()
2887 nic->flags |= ich; in e100_probe()
2889 nic->flags &= ~ich; in e100_probe()
2894 if (nic->mac < mac_82558_D101_A4) in e100_probe()
2895 netdev->features |= NETIF_F_VLAN_CHALLENGED; in e100_probe()
2898 spin_lock_init(&nic->cb_lock); in e100_probe()
2899 spin_lock_init(&nic->cmd_lock); in e100_probe()
2900 spin_lock_init(&nic->mdio_lock); in e100_probe()
2903 * funky state and has an interrupt pending - hint: we don't have the in e100_probe()
2909 timer_setup(&nic->watchdog, e100_watchdog, 0); in e100_probe()
2911 INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task); in e100_probe()
2914 netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n"); in e100_probe()
2923 eth_hw_addr_set(netdev, (u8 *)nic->eeprom); in e100_probe()
2924 if (!is_valid_ether_addr(netdev->dev_addr)) { in e100_probe()
2926 netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n"); in e100_probe()
2927 err = -EAGAIN; in e100_probe()
2930 netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n"); in e100_probe()
2935 if ((nic->mac >= mac_82558_D101_A4) && in e100_probe()
2936 (le16_to_cpu(nic->eeprom[eeprom_id]) & eeprom_id_wol)) { in e100_probe()
2937 nic->flags |= wol_magic; in e100_probe()
2938 device_set_wakeup_enable(&pdev->dev, true); in e100_probe()
2944 strcpy(netdev->name, "eth%d"); in e100_probe()
2946 netif_err(nic, probe, nic->netdev, "Cannot register net device, aborting\n"); in e100_probe()
2949 nic->cbs_pool = dma_pool_create(netdev->name, in e100_probe()
2950 &nic->pdev->dev, in e100_probe()
2951 nic->params.cbs.max * sizeof(struct cb), in e100_probe()
2954 if (!nic->cbs_pool) { in e100_probe()
2955 netif_err(nic, probe, nic->netdev, "Cannot create DMA pool, aborting\n"); in e100_probe()
2956 err = -ENOMEM; in e100_probe()
2959 netif_info(nic, probe, nic->netdev, in e100_probe()
2962 pdev->irq, netdev->dev_addr); in e100_probe()
2971 pci_iounmap(pdev, nic->csr); in e100_probe()
2989 pci_iounmap(pdev, nic->csr); in e100_remove()
2990 dma_pool_destroy(nic->cbs_pool); in e100_remove()
2998 #define E100_82552_REV_ANEG 0x0200 /* Reverse auto-negotiation */
2999 #define E100_82552_ANEG_NOW 0x0400 /* Auto-negotiate now */
3010 if ((nic->flags & wol_magic) | e100_asf(nic)) { in __e100_shutdown()
3011 /* enable reverse auto-negotiation */ in __e100_shutdown()
3012 if (nic->phy == phy_82552_v) { in __e100_shutdown()
3013 u16 smartspeed = mdio_read(netdev, nic->mii.phy_id, in __e100_shutdown()
3016 mdio_write(netdev, nic->mii.phy_id, in __e100_shutdown()
3061 /* disable reverse auto-negotiation */ in e100_resume()
3062 if (nic->phy == phy_82552_v) { in e100_resume()
3063 u16 smartspeed = mdio_read(netdev, nic->mii.phy_id, in e100_resume()
3066 mdio_write(netdev, nic->mii.phy_id, in e100_resume()
3087 /* ------------------ PCI Error Recovery infrastructure -------------- */
3089 * e100_io_error_detected - called when PCI error is detected.
3112 * e100_io_slot_reset - called after the pci bus has been reset.
3123 pr_err("Cannot re-enable PCI device after reset\n"); in e100_io_slot_reset()
3129 if (0 != PCI_FUNC(pdev->devfn)) in e100_io_slot_reset()
3138 * e100_io_resume - resume normal operations
3155 mod_timer(&nic->watchdog, jiffies); in e100_io_resume()
3182 if (((1 << debug) - 1) & NETIF_MSG_DRV) { in e100_init_module()