1 /******************************************************************************* 2 3 Intel PRO/1000 Linux driver 4 Copyright(c) 1999 - 2006 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 Linux NICS <linux.nics@intel.com> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 27 *******************************************************************************/ 28 29 #include "e1000.h" 30 #include <net/ip6_checksum.h> 31 #include <linux/io.h> 32 #include <linux/prefetch.h> 33 #include <linux/bitops.h> 34 #include <linux/if_vlan.h> 35 36 /* Intel Media SOC GbE MDIO physical base address */ 37 static unsigned long ce4100_gbe_mdio_base_phy; 38 /* Intel Media SOC GbE MDIO virtual base address */ 39 void __iomem *ce4100_gbe_mdio_base_virt; 40 41 char e1000_driver_name[] = "e1000"; 42 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; 43 #define DRV_VERSION "7.3.21-k8-NAPI" 44 const char e1000_driver_version[] = DRV_VERSION; 45 static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; 46 47 /* e1000_pci_tbl - PCI Device ID Table 48 * 49 * Last entry must be all 0s 50 * 51 * Macro expands to... 52 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} 53 */ 54 static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = { 55 INTEL_E1000_ETHERNET_DEVICE(0x1000), 56 INTEL_E1000_ETHERNET_DEVICE(0x1001), 57 INTEL_E1000_ETHERNET_DEVICE(0x1004), 58 INTEL_E1000_ETHERNET_DEVICE(0x1008), 59 INTEL_E1000_ETHERNET_DEVICE(0x1009), 60 INTEL_E1000_ETHERNET_DEVICE(0x100C), 61 INTEL_E1000_ETHERNET_DEVICE(0x100D), 62 INTEL_E1000_ETHERNET_DEVICE(0x100E), 63 INTEL_E1000_ETHERNET_DEVICE(0x100F), 64 INTEL_E1000_ETHERNET_DEVICE(0x1010), 65 INTEL_E1000_ETHERNET_DEVICE(0x1011), 66 INTEL_E1000_ETHERNET_DEVICE(0x1012), 67 INTEL_E1000_ETHERNET_DEVICE(0x1013), 68 INTEL_E1000_ETHERNET_DEVICE(0x1014), 69 INTEL_E1000_ETHERNET_DEVICE(0x1015), 70 INTEL_E1000_ETHERNET_DEVICE(0x1016), 71 INTEL_E1000_ETHERNET_DEVICE(0x1017), 72 INTEL_E1000_ETHERNET_DEVICE(0x1018), 73 INTEL_E1000_ETHERNET_DEVICE(0x1019), 74 INTEL_E1000_ETHERNET_DEVICE(0x101A), 75 INTEL_E1000_ETHERNET_DEVICE(0x101D), 76 INTEL_E1000_ETHERNET_DEVICE(0x101E), 77 INTEL_E1000_ETHERNET_DEVICE(0x1026), 78 INTEL_E1000_ETHERNET_DEVICE(0x1027), 79 INTEL_E1000_ETHERNET_DEVICE(0x1028), 80 INTEL_E1000_ETHERNET_DEVICE(0x1075), 81 INTEL_E1000_ETHERNET_DEVICE(0x1076), 82 INTEL_E1000_ETHERNET_DEVICE(0x1077), 83 INTEL_E1000_ETHERNET_DEVICE(0x1078), 84 INTEL_E1000_ETHERNET_DEVICE(0x1079), 85 INTEL_E1000_ETHERNET_DEVICE(0x107A), 86 INTEL_E1000_ETHERNET_DEVICE(0x107B), 87 INTEL_E1000_ETHERNET_DEVICE(0x107C), 88 INTEL_E1000_ETHERNET_DEVICE(0x108A), 89 INTEL_E1000_ETHERNET_DEVICE(0x1099), 90 INTEL_E1000_ETHERNET_DEVICE(0x10B5), 91 INTEL_E1000_ETHERNET_DEVICE(0x2E6E), 92 /* required last entry */ 93 {0,} 94 }; 95 96 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); 97 98 int e1000_up(struct e1000_adapter *adapter); 99 void e1000_down(struct e1000_adapter *adapter); 100 void e1000_reinit_locked(struct e1000_adapter *adapter); 101 void e1000_reset(struct e1000_adapter *adapter); 102 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); 103 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); 104 void e1000_free_all_tx_resources(struct e1000_adapter *adapter); 105 void e1000_free_all_rx_resources(struct e1000_adapter *adapter); 106 static int e1000_setup_tx_resources(struct e1000_adapter *adapter, 107 struct e1000_tx_ring *txdr); 108 static int e1000_setup_rx_resources(struct e1000_adapter *adapter, 109 struct e1000_rx_ring *rxdr); 110 static void e1000_free_tx_resources(struct e1000_adapter *adapter, 111 struct e1000_tx_ring *tx_ring); 112 static void e1000_free_rx_resources(struct e1000_adapter *adapter, 113 struct e1000_rx_ring *rx_ring); 114 void e1000_update_stats(struct e1000_adapter *adapter); 115 116 static int e1000_init_module(void); 117 static void e1000_exit_module(void); 118 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent); 119 static void __devexit e1000_remove(struct pci_dev *pdev); 120 static int e1000_alloc_queues(struct e1000_adapter *adapter); 121 static int e1000_sw_init(struct e1000_adapter *adapter); 122 static int e1000_open(struct net_device *netdev); 123 static int e1000_close(struct net_device *netdev); 124 static void e1000_configure_tx(struct e1000_adapter *adapter); 125 static void e1000_configure_rx(struct e1000_adapter *adapter); 126 static void e1000_setup_rctl(struct e1000_adapter *adapter); 127 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter); 128 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter); 129 static void e1000_clean_tx_ring(struct e1000_adapter *adapter, 130 struct e1000_tx_ring *tx_ring); 131 static void e1000_clean_rx_ring(struct e1000_adapter *adapter, 132 struct e1000_rx_ring *rx_ring); 133 static void e1000_set_rx_mode(struct net_device *netdev); 134 static void e1000_update_phy_info_task(struct work_struct *work); 135 static void e1000_watchdog(struct work_struct *work); 136 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work); 137 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, 138 struct net_device *netdev); 139 static struct net_device_stats * e1000_get_stats(struct net_device *netdev); 140 static int e1000_change_mtu(struct net_device *netdev, int new_mtu); 141 static int e1000_set_mac(struct net_device *netdev, void *p); 142 static irqreturn_t e1000_intr(int irq, void *data); 143 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, 144 struct e1000_tx_ring *tx_ring); 145 static int e1000_clean(struct napi_struct *napi, int budget); 146 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, 147 struct e1000_rx_ring *rx_ring, 148 int *work_done, int work_to_do); 149 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, 150 struct e1000_rx_ring *rx_ring, 151 int *work_done, int work_to_do); 152 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, 153 struct e1000_rx_ring *rx_ring, 154 int cleaned_count); 155 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, 156 struct e1000_rx_ring *rx_ring, 157 int cleaned_count); 158 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); 159 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, 160 int cmd); 161 static void e1000_enter_82542_rst(struct e1000_adapter *adapter); 162 static void e1000_leave_82542_rst(struct e1000_adapter *adapter); 163 static void e1000_tx_timeout(struct net_device *dev); 164 static void e1000_reset_task(struct work_struct *work); 165 static void e1000_smartspeed(struct e1000_adapter *adapter); 166 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, 167 struct sk_buff *skb); 168 169 static bool e1000_vlan_used(struct e1000_adapter *adapter); 170 static void e1000_vlan_mode(struct net_device *netdev, u32 features); 171 static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid); 172 static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); 173 static void e1000_restore_vlan(struct e1000_adapter *adapter); 174 175 #ifdef CONFIG_PM 176 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state); 177 static int e1000_resume(struct pci_dev *pdev); 178 #endif 179 static void e1000_shutdown(struct pci_dev *pdev); 180 181 #ifdef CONFIG_NET_POLL_CONTROLLER 182 /* for netdump / net console */ 183 static void e1000_netpoll (struct net_device *netdev); 184 #endif 185 186 #define COPYBREAK_DEFAULT 256 187 static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT; 188 module_param(copybreak, uint, 0644); 189 MODULE_PARM_DESC(copybreak, 190 "Maximum size of packet that is copied to a new buffer on receive"); 191 192 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, 193 pci_channel_state_t state); 194 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev); 195 static void e1000_io_resume(struct pci_dev *pdev); 196 197 static struct pci_error_handlers e1000_err_handler = { 198 .error_detected = e1000_io_error_detected, 199 .slot_reset = e1000_io_slot_reset, 200 .resume = e1000_io_resume, 201 }; 202 203 static struct pci_driver e1000_driver = { 204 .name = e1000_driver_name, 205 .id_table = e1000_pci_tbl, 206 .probe = e1000_probe, 207 .remove = __devexit_p(e1000_remove), 208 #ifdef CONFIG_PM 209 /* Power Management Hooks */ 210 .suspend = e1000_suspend, 211 .resume = e1000_resume, 212 #endif 213 .shutdown = e1000_shutdown, 214 .err_handler = &e1000_err_handler 215 }; 216 217 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 218 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); 219 MODULE_LICENSE("GPL"); 220 MODULE_VERSION(DRV_VERSION); 221 222 static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE; 223 module_param(debug, int, 0); 224 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 225 226 /** 227 * e1000_get_hw_dev - return device 228 * used by hardware layer to print debugging information 229 * 230 **/ 231 struct net_device *e1000_get_hw_dev(struct e1000_hw *hw) 232 { 233 struct e1000_adapter *adapter = hw->back; 234 return adapter->netdev; 235 } 236 237 /** 238 * e1000_init_module - Driver Registration Routine 239 * 240 * e1000_init_module is the first routine called when the driver is 241 * loaded. All it does is register with the PCI subsystem. 242 **/ 243 244 static int __init e1000_init_module(void) 245 { 246 int ret; 247 pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version); 248 249 pr_info("%s\n", e1000_copyright); 250 251 ret = pci_register_driver(&e1000_driver); 252 if (copybreak != COPYBREAK_DEFAULT) { 253 if (copybreak == 0) 254 pr_info("copybreak disabled\n"); 255 else 256 pr_info("copybreak enabled for " 257 "packets <= %u bytes\n", copybreak); 258 } 259 return ret; 260 } 261 262 module_init(e1000_init_module); 263 264 /** 265 * e1000_exit_module - Driver Exit Cleanup Routine 266 * 267 * e1000_exit_module is called just before the driver is removed 268 * from memory. 269 **/ 270 271 static void __exit e1000_exit_module(void) 272 { 273 pci_unregister_driver(&e1000_driver); 274 } 275 276 module_exit(e1000_exit_module); 277 278 static int e1000_request_irq(struct e1000_adapter *adapter) 279 { 280 struct net_device *netdev = adapter->netdev; 281 irq_handler_t handler = e1000_intr; 282 int irq_flags = IRQF_SHARED; 283 int err; 284 285 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, 286 netdev); 287 if (err) { 288 e_err(probe, "Unable to allocate interrupt Error: %d\n", err); 289 } 290 291 return err; 292 } 293 294 static void e1000_free_irq(struct e1000_adapter *adapter) 295 { 296 struct net_device *netdev = adapter->netdev; 297 298 free_irq(adapter->pdev->irq, netdev); 299 } 300 301 /** 302 * e1000_irq_disable - Mask off interrupt generation on the NIC 303 * @adapter: board private structure 304 **/ 305 306 static void e1000_irq_disable(struct e1000_adapter *adapter) 307 { 308 struct e1000_hw *hw = &adapter->hw; 309 310 ew32(IMC, ~0); 311 E1000_WRITE_FLUSH(); 312 synchronize_irq(adapter->pdev->irq); 313 } 314 315 /** 316 * e1000_irq_enable - Enable default interrupt generation settings 317 * @adapter: board private structure 318 **/ 319 320 static void e1000_irq_enable(struct e1000_adapter *adapter) 321 { 322 struct e1000_hw *hw = &adapter->hw; 323 324 ew32(IMS, IMS_ENABLE_MASK); 325 E1000_WRITE_FLUSH(); 326 } 327 328 static void e1000_update_mng_vlan(struct e1000_adapter *adapter) 329 { 330 struct e1000_hw *hw = &adapter->hw; 331 struct net_device *netdev = adapter->netdev; 332 u16 vid = hw->mng_cookie.vlan_id; 333 u16 old_vid = adapter->mng_vlan_id; 334 335 if (!e1000_vlan_used(adapter)) 336 return; 337 338 if (!test_bit(vid, adapter->active_vlans)) { 339 if (hw->mng_cookie.status & 340 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { 341 e1000_vlan_rx_add_vid(netdev, vid); 342 adapter->mng_vlan_id = vid; 343 } else { 344 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 345 } 346 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && 347 (vid != old_vid) && 348 !test_bit(old_vid, adapter->active_vlans)) 349 e1000_vlan_rx_kill_vid(netdev, old_vid); 350 } else { 351 adapter->mng_vlan_id = vid; 352 } 353 } 354 355 static void e1000_init_manageability(struct e1000_adapter *adapter) 356 { 357 struct e1000_hw *hw = &adapter->hw; 358 359 if (adapter->en_mng_pt) { 360 u32 manc = er32(MANC); 361 362 /* disable hardware interception of ARP */ 363 manc &= ~(E1000_MANC_ARP_EN); 364 365 ew32(MANC, manc); 366 } 367 } 368 369 static void e1000_release_manageability(struct e1000_adapter *adapter) 370 { 371 struct e1000_hw *hw = &adapter->hw; 372 373 if (adapter->en_mng_pt) { 374 u32 manc = er32(MANC); 375 376 /* re-enable hardware interception of ARP */ 377 manc |= E1000_MANC_ARP_EN; 378 379 ew32(MANC, manc); 380 } 381 } 382 383 /** 384 * e1000_configure - configure the hardware for RX and TX 385 * @adapter = private board structure 386 **/ 387 static void e1000_configure(struct e1000_adapter *adapter) 388 { 389 struct net_device *netdev = adapter->netdev; 390 int i; 391 392 e1000_set_rx_mode(netdev); 393 394 e1000_restore_vlan(adapter); 395 e1000_init_manageability(adapter); 396 397 e1000_configure_tx(adapter); 398 e1000_setup_rctl(adapter); 399 e1000_configure_rx(adapter); 400 /* call E1000_DESC_UNUSED which always leaves 401 * at least 1 descriptor unused to make sure 402 * next_to_use != next_to_clean */ 403 for (i = 0; i < adapter->num_rx_queues; i++) { 404 struct e1000_rx_ring *ring = &adapter->rx_ring[i]; 405 adapter->alloc_rx_buf(adapter, ring, 406 E1000_DESC_UNUSED(ring)); 407 } 408 } 409 410 int e1000_up(struct e1000_adapter *adapter) 411 { 412 struct e1000_hw *hw = &adapter->hw; 413 414 /* hardware has been reset, we need to reload some things */ 415 e1000_configure(adapter); 416 417 clear_bit(__E1000_DOWN, &adapter->flags); 418 419 napi_enable(&adapter->napi); 420 421 e1000_irq_enable(adapter); 422 423 netif_wake_queue(adapter->netdev); 424 425 /* fire a link change interrupt to start the watchdog */ 426 ew32(ICS, E1000_ICS_LSC); 427 return 0; 428 } 429 430 /** 431 * e1000_power_up_phy - restore link in case the phy was powered down 432 * @adapter: address of board private structure 433 * 434 * The phy may be powered down to save power and turn off link when the 435 * driver is unloaded and wake on lan is not enabled (among others) 436 * *** this routine MUST be followed by a call to e1000_reset *** 437 * 438 **/ 439 440 void e1000_power_up_phy(struct e1000_adapter *adapter) 441 { 442 struct e1000_hw *hw = &adapter->hw; 443 u16 mii_reg = 0; 444 445 /* Just clear the power down bit to wake the phy back up */ 446 if (hw->media_type == e1000_media_type_copper) { 447 /* according to the manual, the phy will retain its 448 * settings across a power-down/up cycle */ 449 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); 450 mii_reg &= ~MII_CR_POWER_DOWN; 451 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); 452 } 453 } 454 455 static void e1000_power_down_phy(struct e1000_adapter *adapter) 456 { 457 struct e1000_hw *hw = &adapter->hw; 458 459 /* Power down the PHY so no link is implied when interface is down * 460 * The PHY cannot be powered down if any of the following is true * 461 * (a) WoL is enabled 462 * (b) AMT is active 463 * (c) SoL/IDER session is active */ 464 if (!adapter->wol && hw->mac_type >= e1000_82540 && 465 hw->media_type == e1000_media_type_copper) { 466 u16 mii_reg = 0; 467 468 switch (hw->mac_type) { 469 case e1000_82540: 470 case e1000_82545: 471 case e1000_82545_rev_3: 472 case e1000_82546: 473 case e1000_ce4100: 474 case e1000_82546_rev_3: 475 case e1000_82541: 476 case e1000_82541_rev_2: 477 case e1000_82547: 478 case e1000_82547_rev_2: 479 if (er32(MANC) & E1000_MANC_SMBUS_EN) 480 goto out; 481 break; 482 default: 483 goto out; 484 } 485 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); 486 mii_reg |= MII_CR_POWER_DOWN; 487 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); 488 msleep(1); 489 } 490 out: 491 return; 492 } 493 494 static void e1000_down_and_stop(struct e1000_adapter *adapter) 495 { 496 set_bit(__E1000_DOWN, &adapter->flags); 497 cancel_work_sync(&adapter->reset_task); 498 cancel_delayed_work_sync(&adapter->watchdog_task); 499 cancel_delayed_work_sync(&adapter->phy_info_task); 500 cancel_delayed_work_sync(&adapter->fifo_stall_task); 501 } 502 503 void e1000_down(struct e1000_adapter *adapter) 504 { 505 struct e1000_hw *hw = &adapter->hw; 506 struct net_device *netdev = adapter->netdev; 507 u32 rctl, tctl; 508 509 510 /* disable receives in the hardware */ 511 rctl = er32(RCTL); 512 ew32(RCTL, rctl & ~E1000_RCTL_EN); 513 /* flush and sleep below */ 514 515 netif_tx_disable(netdev); 516 517 /* disable transmits in the hardware */ 518 tctl = er32(TCTL); 519 tctl &= ~E1000_TCTL_EN; 520 ew32(TCTL, tctl); 521 /* flush both disables and wait for them to finish */ 522 E1000_WRITE_FLUSH(); 523 msleep(10); 524 525 napi_disable(&adapter->napi); 526 527 e1000_irq_disable(adapter); 528 529 /* 530 * Setting DOWN must be after irq_disable to prevent 531 * a screaming interrupt. Setting DOWN also prevents 532 * tasks from rescheduling. 533 */ 534 e1000_down_and_stop(adapter); 535 536 adapter->link_speed = 0; 537 adapter->link_duplex = 0; 538 netif_carrier_off(netdev); 539 540 e1000_reset(adapter); 541 e1000_clean_all_tx_rings(adapter); 542 e1000_clean_all_rx_rings(adapter); 543 } 544 545 static void e1000_reinit_safe(struct e1000_adapter *adapter) 546 { 547 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) 548 msleep(1); 549 mutex_lock(&adapter->mutex); 550 e1000_down(adapter); 551 e1000_up(adapter); 552 mutex_unlock(&adapter->mutex); 553 clear_bit(__E1000_RESETTING, &adapter->flags); 554 } 555 556 void e1000_reinit_locked(struct e1000_adapter *adapter) 557 { 558 /* if rtnl_lock is not held the call path is bogus */ 559 ASSERT_RTNL(); 560 WARN_ON(in_interrupt()); 561 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) 562 msleep(1); 563 e1000_down(adapter); 564 e1000_up(adapter); 565 clear_bit(__E1000_RESETTING, &adapter->flags); 566 } 567 568 void e1000_reset(struct e1000_adapter *adapter) 569 { 570 struct e1000_hw *hw = &adapter->hw; 571 u32 pba = 0, tx_space, min_tx_space, min_rx_space; 572 bool legacy_pba_adjust = false; 573 u16 hwm; 574 575 /* Repartition Pba for greater than 9k mtu 576 * To take effect CTRL.RST is required. 577 */ 578 579 switch (hw->mac_type) { 580 case e1000_82542_rev2_0: 581 case e1000_82542_rev2_1: 582 case e1000_82543: 583 case e1000_82544: 584 case e1000_82540: 585 case e1000_82541: 586 case e1000_82541_rev_2: 587 legacy_pba_adjust = true; 588 pba = E1000_PBA_48K; 589 break; 590 case e1000_82545: 591 case e1000_82545_rev_3: 592 case e1000_82546: 593 case e1000_ce4100: 594 case e1000_82546_rev_3: 595 pba = E1000_PBA_48K; 596 break; 597 case e1000_82547: 598 case e1000_82547_rev_2: 599 legacy_pba_adjust = true; 600 pba = E1000_PBA_30K; 601 break; 602 case e1000_undefined: 603 case e1000_num_macs: 604 break; 605 } 606 607 if (legacy_pba_adjust) { 608 if (hw->max_frame_size > E1000_RXBUFFER_8192) 609 pba -= 8; /* allocate more FIFO for Tx */ 610 611 if (hw->mac_type == e1000_82547) { 612 adapter->tx_fifo_head = 0; 613 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; 614 adapter->tx_fifo_size = 615 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT; 616 atomic_set(&adapter->tx_fifo_stall, 0); 617 } 618 } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) { 619 /* adjust PBA for jumbo frames */ 620 ew32(PBA, pba); 621 622 /* To maintain wire speed transmits, the Tx FIFO should be 623 * large enough to accommodate two full transmit packets, 624 * rounded up to the next 1KB and expressed in KB. Likewise, 625 * the Rx FIFO should be large enough to accommodate at least 626 * one full receive packet and is similarly rounded up and 627 * expressed in KB. */ 628 pba = er32(PBA); 629 /* upper 16 bits has Tx packet buffer allocation size in KB */ 630 tx_space = pba >> 16; 631 /* lower 16 bits has Rx packet buffer allocation size in KB */ 632 pba &= 0xffff; 633 /* 634 * the tx fifo also stores 16 bytes of information about the tx 635 * but don't include ethernet FCS because hardware appends it 636 */ 637 min_tx_space = (hw->max_frame_size + 638 sizeof(struct e1000_tx_desc) - 639 ETH_FCS_LEN) * 2; 640 min_tx_space = ALIGN(min_tx_space, 1024); 641 min_tx_space >>= 10; 642 /* software strips receive CRC, so leave room for it */ 643 min_rx_space = hw->max_frame_size; 644 min_rx_space = ALIGN(min_rx_space, 1024); 645 min_rx_space >>= 10; 646 647 /* If current Tx allocation is less than the min Tx FIFO size, 648 * and the min Tx FIFO size is less than the current Rx FIFO 649 * allocation, take space away from current Rx allocation */ 650 if (tx_space < min_tx_space && 651 ((min_tx_space - tx_space) < pba)) { 652 pba = pba - (min_tx_space - tx_space); 653 654 /* PCI/PCIx hardware has PBA alignment constraints */ 655 switch (hw->mac_type) { 656 case e1000_82545 ... e1000_82546_rev_3: 657 pba &= ~(E1000_PBA_8K - 1); 658 break; 659 default: 660 break; 661 } 662 663 /* if short on rx space, rx wins and must trump tx 664 * adjustment or use Early Receive if available */ 665 if (pba < min_rx_space) 666 pba = min_rx_space; 667 } 668 } 669 670 ew32(PBA, pba); 671 672 /* 673 * flow control settings: 674 * The high water mark must be low enough to fit one full frame 675 * (or the size used for early receive) above it in the Rx FIFO. 676 * Set it to the lower of: 677 * - 90% of the Rx FIFO size, and 678 * - the full Rx FIFO size minus the early receive size (for parts 679 * with ERT support assuming ERT set to E1000_ERT_2048), or 680 * - the full Rx FIFO size minus one full frame 681 */ 682 hwm = min(((pba << 10) * 9 / 10), 683 ((pba << 10) - hw->max_frame_size)); 684 685 hw->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */ 686 hw->fc_low_water = hw->fc_high_water - 8; 687 hw->fc_pause_time = E1000_FC_PAUSE_TIME; 688 hw->fc_send_xon = 1; 689 hw->fc = hw->original_fc; 690 691 /* Allow time for pending master requests to run */ 692 e1000_reset_hw(hw); 693 if (hw->mac_type >= e1000_82544) 694 ew32(WUC, 0); 695 696 if (e1000_init_hw(hw)) 697 e_dev_err("Hardware Error\n"); 698 e1000_update_mng_vlan(adapter); 699 700 /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */ 701 if (hw->mac_type >= e1000_82544 && 702 hw->autoneg == 1 && 703 hw->autoneg_advertised == ADVERTISE_1000_FULL) { 704 u32 ctrl = er32(CTRL); 705 /* clear phy power management bit if we are in gig only mode, 706 * which if enabled will attempt negotiation to 100Mb, which 707 * can cause a loss of link at power off or driver unload */ 708 ctrl &= ~E1000_CTRL_SWDPIN3; 709 ew32(CTRL, ctrl); 710 } 711 712 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ 713 ew32(VET, ETHERNET_IEEE_VLAN_TYPE); 714 715 e1000_reset_adaptive(hw); 716 e1000_phy_get_info(hw, &adapter->phy_info); 717 718 e1000_release_manageability(adapter); 719 } 720 721 /** 722 * Dump the eeprom for users having checksum issues 723 **/ 724 static void e1000_dump_eeprom(struct e1000_adapter *adapter) 725 { 726 struct net_device *netdev = adapter->netdev; 727 struct ethtool_eeprom eeprom; 728 const struct ethtool_ops *ops = netdev->ethtool_ops; 729 u8 *data; 730 int i; 731 u16 csum_old, csum_new = 0; 732 733 eeprom.len = ops->get_eeprom_len(netdev); 734 eeprom.offset = 0; 735 736 data = kmalloc(eeprom.len, GFP_KERNEL); 737 if (!data) { 738 pr_err("Unable to allocate memory to dump EEPROM data\n"); 739 return; 740 } 741 742 ops->get_eeprom(netdev, &eeprom, data); 743 744 csum_old = (data[EEPROM_CHECKSUM_REG * 2]) + 745 (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8); 746 for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2) 747 csum_new += data[i] + (data[i + 1] << 8); 748 csum_new = EEPROM_SUM - csum_new; 749 750 pr_err("/*********************/\n"); 751 pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old); 752 pr_err("Calculated : 0x%04x\n", csum_new); 753 754 pr_err("Offset Values\n"); 755 pr_err("======== ======\n"); 756 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0); 757 758 pr_err("Include this output when contacting your support provider.\n"); 759 pr_err("This is not a software error! Something bad happened to\n"); 760 pr_err("your hardware or EEPROM image. Ignoring this problem could\n"); 761 pr_err("result in further problems, possibly loss of data,\n"); 762 pr_err("corruption or system hangs!\n"); 763 pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n"); 764 pr_err("which is invalid and requires you to set the proper MAC\n"); 765 pr_err("address manually before continuing to enable this network\n"); 766 pr_err("device. Please inspect the EEPROM dump and report the\n"); 767 pr_err("issue to your hardware vendor or Intel Customer Support.\n"); 768 pr_err("/*********************/\n"); 769 770 kfree(data); 771 } 772 773 /** 774 * e1000_is_need_ioport - determine if an adapter needs ioport resources or not 775 * @pdev: PCI device information struct 776 * 777 * Return true if an adapter needs ioport resources 778 **/ 779 static int e1000_is_need_ioport(struct pci_dev *pdev) 780 { 781 switch (pdev->device) { 782 case E1000_DEV_ID_82540EM: 783 case E1000_DEV_ID_82540EM_LOM: 784 case E1000_DEV_ID_82540EP: 785 case E1000_DEV_ID_82540EP_LOM: 786 case E1000_DEV_ID_82540EP_LP: 787 case E1000_DEV_ID_82541EI: 788 case E1000_DEV_ID_82541EI_MOBILE: 789 case E1000_DEV_ID_82541ER: 790 case E1000_DEV_ID_82541ER_LOM: 791 case E1000_DEV_ID_82541GI: 792 case E1000_DEV_ID_82541GI_LF: 793 case E1000_DEV_ID_82541GI_MOBILE: 794 case E1000_DEV_ID_82544EI_COPPER: 795 case E1000_DEV_ID_82544EI_FIBER: 796 case E1000_DEV_ID_82544GC_COPPER: 797 case E1000_DEV_ID_82544GC_LOM: 798 case E1000_DEV_ID_82545EM_COPPER: 799 case E1000_DEV_ID_82545EM_FIBER: 800 case E1000_DEV_ID_82546EB_COPPER: 801 case E1000_DEV_ID_82546EB_FIBER: 802 case E1000_DEV_ID_82546EB_QUAD_COPPER: 803 return true; 804 default: 805 return false; 806 } 807 } 808 809 static u32 e1000_fix_features(struct net_device *netdev, u32 features) 810 { 811 /* 812 * Since there is no support for separate rx/tx vlan accel 813 * enable/disable make sure tx flag is always in same state as rx. 814 */ 815 if (features & NETIF_F_HW_VLAN_RX) 816 features |= NETIF_F_HW_VLAN_TX; 817 else 818 features &= ~NETIF_F_HW_VLAN_TX; 819 820 return features; 821 } 822 823 static int e1000_set_features(struct net_device *netdev, u32 features) 824 { 825 struct e1000_adapter *adapter = netdev_priv(netdev); 826 u32 changed = features ^ netdev->features; 827 828 if (changed & NETIF_F_HW_VLAN_RX) 829 e1000_vlan_mode(netdev, features); 830 831 if (!(changed & NETIF_F_RXCSUM)) 832 return 0; 833 834 adapter->rx_csum = !!(features & NETIF_F_RXCSUM); 835 836 if (netif_running(netdev)) 837 e1000_reinit_locked(adapter); 838 else 839 e1000_reset(adapter); 840 841 return 0; 842 } 843 844 static const struct net_device_ops e1000_netdev_ops = { 845 .ndo_open = e1000_open, 846 .ndo_stop = e1000_close, 847 .ndo_start_xmit = e1000_xmit_frame, 848 .ndo_get_stats = e1000_get_stats, 849 .ndo_set_rx_mode = e1000_set_rx_mode, 850 .ndo_set_mac_address = e1000_set_mac, 851 .ndo_tx_timeout = e1000_tx_timeout, 852 .ndo_change_mtu = e1000_change_mtu, 853 .ndo_do_ioctl = e1000_ioctl, 854 .ndo_validate_addr = eth_validate_addr, 855 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid, 856 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid, 857 #ifdef CONFIG_NET_POLL_CONTROLLER 858 .ndo_poll_controller = e1000_netpoll, 859 #endif 860 .ndo_fix_features = e1000_fix_features, 861 .ndo_set_features = e1000_set_features, 862 }; 863 864 /** 865 * e1000_init_hw_struct - initialize members of hw struct 866 * @adapter: board private struct 867 * @hw: structure used by e1000_hw.c 868 * 869 * Factors out initialization of the e1000_hw struct to its own function 870 * that can be called very early at init (just after struct allocation). 871 * Fields are initialized based on PCI device information and 872 * OS network device settings (MTU size). 873 * Returns negative error codes if MAC type setup fails. 874 */ 875 static int e1000_init_hw_struct(struct e1000_adapter *adapter, 876 struct e1000_hw *hw) 877 { 878 struct pci_dev *pdev = adapter->pdev; 879 880 /* PCI config space info */ 881 hw->vendor_id = pdev->vendor; 882 hw->device_id = pdev->device; 883 hw->subsystem_vendor_id = pdev->subsystem_vendor; 884 hw->subsystem_id = pdev->subsystem_device; 885 hw->revision_id = pdev->revision; 886 887 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); 888 889 hw->max_frame_size = adapter->netdev->mtu + 890 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 891 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; 892 893 /* identify the MAC */ 894 if (e1000_set_mac_type(hw)) { 895 e_err(probe, "Unknown MAC Type\n"); 896 return -EIO; 897 } 898 899 switch (hw->mac_type) { 900 default: 901 break; 902 case e1000_82541: 903 case e1000_82547: 904 case e1000_82541_rev_2: 905 case e1000_82547_rev_2: 906 hw->phy_init_script = 1; 907 break; 908 } 909 910 e1000_set_media_type(hw); 911 e1000_get_bus_info(hw); 912 913 hw->wait_autoneg_complete = false; 914 hw->tbi_compatibility_en = true; 915 hw->adaptive_ifs = true; 916 917 /* Copper options */ 918 919 if (hw->media_type == e1000_media_type_copper) { 920 hw->mdix = AUTO_ALL_MODES; 921 hw->disable_polarity_correction = false; 922 hw->master_slave = E1000_MASTER_SLAVE; 923 } 924 925 return 0; 926 } 927 928 /** 929 * e1000_probe - Device Initialization Routine 930 * @pdev: PCI device information struct 931 * @ent: entry in e1000_pci_tbl 932 * 933 * Returns 0 on success, negative on failure 934 * 935 * e1000_probe initializes an adapter identified by a pci_dev structure. 936 * The OS initialization, configuring of the adapter private structure, 937 * and a hardware reset occur. 938 **/ 939 static int __devinit e1000_probe(struct pci_dev *pdev, 940 const struct pci_device_id *ent) 941 { 942 struct net_device *netdev; 943 struct e1000_adapter *adapter; 944 struct e1000_hw *hw; 945 946 static int cards_found = 0; 947 static int global_quad_port_a = 0; /* global ksp3 port a indication */ 948 int i, err, pci_using_dac; 949 u16 eeprom_data = 0; 950 u16 tmp = 0; 951 u16 eeprom_apme_mask = E1000_EEPROM_APME; 952 int bars, need_ioport; 953 954 /* do not allocate ioport bars when not needed */ 955 need_ioport = e1000_is_need_ioport(pdev); 956 if (need_ioport) { 957 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); 958 err = pci_enable_device(pdev); 959 } else { 960 bars = pci_select_bars(pdev, IORESOURCE_MEM); 961 err = pci_enable_device_mem(pdev); 962 } 963 if (err) 964 return err; 965 966 err = pci_request_selected_regions(pdev, bars, e1000_driver_name); 967 if (err) 968 goto err_pci_reg; 969 970 pci_set_master(pdev); 971 err = pci_save_state(pdev); 972 if (err) 973 goto err_alloc_etherdev; 974 975 err = -ENOMEM; 976 netdev = alloc_etherdev(sizeof(struct e1000_adapter)); 977 if (!netdev) 978 goto err_alloc_etherdev; 979 980 SET_NETDEV_DEV(netdev, &pdev->dev); 981 982 pci_set_drvdata(pdev, netdev); 983 adapter = netdev_priv(netdev); 984 adapter->netdev = netdev; 985 adapter->pdev = pdev; 986 adapter->msg_enable = (1 << debug) - 1; 987 adapter->bars = bars; 988 adapter->need_ioport = need_ioport; 989 990 hw = &adapter->hw; 991 hw->back = adapter; 992 993 err = -EIO; 994 hw->hw_addr = pci_ioremap_bar(pdev, BAR_0); 995 if (!hw->hw_addr) 996 goto err_ioremap; 997 998 if (adapter->need_ioport) { 999 for (i = BAR_1; i <= BAR_5; i++) { 1000 if (pci_resource_len(pdev, i) == 0) 1001 continue; 1002 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { 1003 hw->io_base = pci_resource_start(pdev, i); 1004 break; 1005 } 1006 } 1007 } 1008 1009 /* make ready for any if (hw->...) below */ 1010 err = e1000_init_hw_struct(adapter, hw); 1011 if (err) 1012 goto err_sw_init; 1013 1014 /* 1015 * there is a workaround being applied below that limits 1016 * 64-bit DMA addresses to 64-bit hardware. There are some 1017 * 32-bit adapters that Tx hang when given 64-bit DMA addresses 1018 */ 1019 pci_using_dac = 0; 1020 if ((hw->bus_type == e1000_bus_type_pcix) && 1021 !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { 1022 /* 1023 * according to DMA-API-HOWTO, coherent calls will always 1024 * succeed if the set call did 1025 */ 1026 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 1027 pci_using_dac = 1; 1028 } else { 1029 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 1030 if (err) { 1031 pr_err("No usable DMA config, aborting\n"); 1032 goto err_dma; 1033 } 1034 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 1035 } 1036 1037 netdev->netdev_ops = &e1000_netdev_ops; 1038 e1000_set_ethtool_ops(netdev); 1039 netdev->watchdog_timeo = 5 * HZ; 1040 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); 1041 1042 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); 1043 1044 adapter->bd_number = cards_found; 1045 1046 /* setup the private structure */ 1047 1048 err = e1000_sw_init(adapter); 1049 if (err) 1050 goto err_sw_init; 1051 1052 err = -EIO; 1053 if (hw->mac_type == e1000_ce4100) { 1054 ce4100_gbe_mdio_base_phy = pci_resource_start(pdev, BAR_1); 1055 ce4100_gbe_mdio_base_virt = ioremap(ce4100_gbe_mdio_base_phy, 1056 pci_resource_len(pdev, BAR_1)); 1057 1058 if (!ce4100_gbe_mdio_base_virt) 1059 goto err_mdio_ioremap; 1060 } 1061 1062 if (hw->mac_type >= e1000_82543) { 1063 netdev->hw_features = NETIF_F_SG | 1064 NETIF_F_HW_CSUM | 1065 NETIF_F_HW_VLAN_RX; 1066 netdev->features = NETIF_F_HW_VLAN_TX | 1067 NETIF_F_HW_VLAN_FILTER; 1068 } 1069 1070 if ((hw->mac_type >= e1000_82544) && 1071 (hw->mac_type != e1000_82547)) 1072 netdev->hw_features |= NETIF_F_TSO; 1073 1074 netdev->features |= netdev->hw_features; 1075 netdev->hw_features |= NETIF_F_RXCSUM; 1076 1077 if (pci_using_dac) { 1078 netdev->features |= NETIF_F_HIGHDMA; 1079 netdev->vlan_features |= NETIF_F_HIGHDMA; 1080 } 1081 1082 netdev->vlan_features |= NETIF_F_TSO; 1083 netdev->vlan_features |= NETIF_F_HW_CSUM; 1084 netdev->vlan_features |= NETIF_F_SG; 1085 1086 netdev->priv_flags |= IFF_UNICAST_FLT; 1087 1088 adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw); 1089 1090 /* initialize eeprom parameters */ 1091 if (e1000_init_eeprom_params(hw)) { 1092 e_err(probe, "EEPROM initialization failed\n"); 1093 goto err_eeprom; 1094 } 1095 1096 /* before reading the EEPROM, reset the controller to 1097 * put the device in a known good starting state */ 1098 1099 e1000_reset_hw(hw); 1100 1101 /* make sure the EEPROM is good */ 1102 if (e1000_validate_eeprom_checksum(hw) < 0) { 1103 e_err(probe, "The EEPROM Checksum Is Not Valid\n"); 1104 e1000_dump_eeprom(adapter); 1105 /* 1106 * set MAC address to all zeroes to invalidate and temporary 1107 * disable this device for the user. This blocks regular 1108 * traffic while still permitting ethtool ioctls from reaching 1109 * the hardware as well as allowing the user to run the 1110 * interface after manually setting a hw addr using 1111 * `ip set address` 1112 */ 1113 memset(hw->mac_addr, 0, netdev->addr_len); 1114 } else { 1115 /* copy the MAC address out of the EEPROM */ 1116 if (e1000_read_mac_addr(hw)) 1117 e_err(probe, "EEPROM Read Error\n"); 1118 } 1119 /* don't block initalization here due to bad MAC address */ 1120 memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len); 1121 memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len); 1122 1123 if (!is_valid_ether_addr(netdev->perm_addr)) 1124 e_err(probe, "Invalid MAC Address\n"); 1125 1126 1127 INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog); 1128 INIT_DELAYED_WORK(&adapter->fifo_stall_task, 1129 e1000_82547_tx_fifo_stall_task); 1130 INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task); 1131 INIT_WORK(&adapter->reset_task, e1000_reset_task); 1132 1133 e1000_check_options(adapter); 1134 1135 /* Initial Wake on LAN setting 1136 * If APM wake is enabled in the EEPROM, 1137 * enable the ACPI Magic Packet filter 1138 */ 1139 1140 switch (hw->mac_type) { 1141 case e1000_82542_rev2_0: 1142 case e1000_82542_rev2_1: 1143 case e1000_82543: 1144 break; 1145 case e1000_82544: 1146 e1000_read_eeprom(hw, 1147 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data); 1148 eeprom_apme_mask = E1000_EEPROM_82544_APM; 1149 break; 1150 case e1000_82546: 1151 case e1000_82546_rev_3: 1152 if (er32(STATUS) & E1000_STATUS_FUNC_1){ 1153 e1000_read_eeprom(hw, 1154 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 1155 break; 1156 } 1157 /* Fall Through */ 1158 default: 1159 e1000_read_eeprom(hw, 1160 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 1161 break; 1162 } 1163 if (eeprom_data & eeprom_apme_mask) 1164 adapter->eeprom_wol |= E1000_WUFC_MAG; 1165 1166 /* now that we have the eeprom settings, apply the special cases 1167 * where the eeprom may be wrong or the board simply won't support 1168 * wake on lan on a particular port */ 1169 switch (pdev->device) { 1170 case E1000_DEV_ID_82546GB_PCIE: 1171 adapter->eeprom_wol = 0; 1172 break; 1173 case E1000_DEV_ID_82546EB_FIBER: 1174 case E1000_DEV_ID_82546GB_FIBER: 1175 /* Wake events only supported on port A for dual fiber 1176 * regardless of eeprom setting */ 1177 if (er32(STATUS) & E1000_STATUS_FUNC_1) 1178 adapter->eeprom_wol = 0; 1179 break; 1180 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: 1181 /* if quad port adapter, disable WoL on all but port A */ 1182 if (global_quad_port_a != 0) 1183 adapter->eeprom_wol = 0; 1184 else 1185 adapter->quad_port_a = 1; 1186 /* Reset for multiple quad port adapters */ 1187 if (++global_quad_port_a == 4) 1188 global_quad_port_a = 0; 1189 break; 1190 } 1191 1192 /* initialize the wol settings based on the eeprom settings */ 1193 adapter->wol = adapter->eeprom_wol; 1194 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 1195 1196 /* Auto detect PHY address */ 1197 if (hw->mac_type == e1000_ce4100) { 1198 for (i = 0; i < 32; i++) { 1199 hw->phy_addr = i; 1200 e1000_read_phy_reg(hw, PHY_ID2, &tmp); 1201 if (tmp == 0 || tmp == 0xFF) { 1202 if (i == 31) 1203 goto err_eeprom; 1204 continue; 1205 } else 1206 break; 1207 } 1208 } 1209 1210 /* reset the hardware with the new settings */ 1211 e1000_reset(adapter); 1212 1213 strcpy(netdev->name, "eth%d"); 1214 err = register_netdev(netdev); 1215 if (err) 1216 goto err_register; 1217 1218 e1000_vlan_mode(netdev, netdev->features); 1219 1220 /* print bus type/speed/width info */ 1221 e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n", 1222 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""), 1223 ((hw->bus_speed == e1000_bus_speed_133) ? 133 : 1224 (hw->bus_speed == e1000_bus_speed_120) ? 120 : 1225 (hw->bus_speed == e1000_bus_speed_100) ? 100 : 1226 (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33), 1227 ((hw->bus_width == e1000_bus_width_64) ? 64 : 32), 1228 netdev->dev_addr); 1229 1230 /* carrier off reporting is important to ethtool even BEFORE open */ 1231 netif_carrier_off(netdev); 1232 1233 e_info(probe, "Intel(R) PRO/1000 Network Connection\n"); 1234 1235 cards_found++; 1236 return 0; 1237 1238 err_register: 1239 err_eeprom: 1240 e1000_phy_hw_reset(hw); 1241 1242 if (hw->flash_address) 1243 iounmap(hw->flash_address); 1244 kfree(adapter->tx_ring); 1245 kfree(adapter->rx_ring); 1246 err_dma: 1247 err_sw_init: 1248 err_mdio_ioremap: 1249 iounmap(ce4100_gbe_mdio_base_virt); 1250 iounmap(hw->hw_addr); 1251 err_ioremap: 1252 free_netdev(netdev); 1253 err_alloc_etherdev: 1254 pci_release_selected_regions(pdev, bars); 1255 err_pci_reg: 1256 pci_disable_device(pdev); 1257 return err; 1258 } 1259 1260 /** 1261 * e1000_remove - Device Removal Routine 1262 * @pdev: PCI device information struct 1263 * 1264 * e1000_remove is called by the PCI subsystem to alert the driver 1265 * that it should release a PCI device. The could be caused by a 1266 * Hot-Plug event, or because the driver is going to be removed from 1267 * memory. 1268 **/ 1269 1270 static void __devexit e1000_remove(struct pci_dev *pdev) 1271 { 1272 struct net_device *netdev = pci_get_drvdata(pdev); 1273 struct e1000_adapter *adapter = netdev_priv(netdev); 1274 struct e1000_hw *hw = &adapter->hw; 1275 1276 e1000_down_and_stop(adapter); 1277 e1000_release_manageability(adapter); 1278 1279 unregister_netdev(netdev); 1280 1281 e1000_phy_hw_reset(hw); 1282 1283 kfree(adapter->tx_ring); 1284 kfree(adapter->rx_ring); 1285 1286 iounmap(hw->hw_addr); 1287 if (hw->flash_address) 1288 iounmap(hw->flash_address); 1289 pci_release_selected_regions(pdev, adapter->bars); 1290 1291 free_netdev(netdev); 1292 1293 pci_disable_device(pdev); 1294 } 1295 1296 /** 1297 * e1000_sw_init - Initialize general software structures (struct e1000_adapter) 1298 * @adapter: board private structure to initialize 1299 * 1300 * e1000_sw_init initializes the Adapter private data structure. 1301 * e1000_init_hw_struct MUST be called before this function 1302 **/ 1303 1304 static int __devinit e1000_sw_init(struct e1000_adapter *adapter) 1305 { 1306 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1307 1308 adapter->num_tx_queues = 1; 1309 adapter->num_rx_queues = 1; 1310 1311 if (e1000_alloc_queues(adapter)) { 1312 e_err(probe, "Unable to allocate memory for queues\n"); 1313 return -ENOMEM; 1314 } 1315 1316 /* Explicitly disable IRQ since the NIC can be in any state. */ 1317 e1000_irq_disable(adapter); 1318 1319 spin_lock_init(&adapter->stats_lock); 1320 mutex_init(&adapter->mutex); 1321 1322 set_bit(__E1000_DOWN, &adapter->flags); 1323 1324 return 0; 1325 } 1326 1327 /** 1328 * e1000_alloc_queues - Allocate memory for all rings 1329 * @adapter: board private structure to initialize 1330 * 1331 * We allocate one ring per queue at run-time since we don't know the 1332 * number of queues at compile-time. 1333 **/ 1334 1335 static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter) 1336 { 1337 adapter->tx_ring = kcalloc(adapter->num_tx_queues, 1338 sizeof(struct e1000_tx_ring), GFP_KERNEL); 1339 if (!adapter->tx_ring) 1340 return -ENOMEM; 1341 1342 adapter->rx_ring = kcalloc(adapter->num_rx_queues, 1343 sizeof(struct e1000_rx_ring), GFP_KERNEL); 1344 if (!adapter->rx_ring) { 1345 kfree(adapter->tx_ring); 1346 return -ENOMEM; 1347 } 1348 1349 return E1000_SUCCESS; 1350 } 1351 1352 /** 1353 * e1000_open - Called when a network interface is made active 1354 * @netdev: network interface device structure 1355 * 1356 * Returns 0 on success, negative value on failure 1357 * 1358 * The open entry point is called when a network interface is made 1359 * active by the system (IFF_UP). At this point all resources needed 1360 * for transmit and receive operations are allocated, the interrupt 1361 * handler is registered with the OS, the watchdog task is started, 1362 * and the stack is notified that the interface is ready. 1363 **/ 1364 1365 static int e1000_open(struct net_device *netdev) 1366 { 1367 struct e1000_adapter *adapter = netdev_priv(netdev); 1368 struct e1000_hw *hw = &adapter->hw; 1369 int err; 1370 1371 /* disallow open during test */ 1372 if (test_bit(__E1000_TESTING, &adapter->flags)) 1373 return -EBUSY; 1374 1375 netif_carrier_off(netdev); 1376 1377 /* allocate transmit descriptors */ 1378 err = e1000_setup_all_tx_resources(adapter); 1379 if (err) 1380 goto err_setup_tx; 1381 1382 /* allocate receive descriptors */ 1383 err = e1000_setup_all_rx_resources(adapter); 1384 if (err) 1385 goto err_setup_rx; 1386 1387 e1000_power_up_phy(adapter); 1388 1389 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 1390 if ((hw->mng_cookie.status & 1391 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { 1392 e1000_update_mng_vlan(adapter); 1393 } 1394 1395 /* before we allocate an interrupt, we must be ready to handle it. 1396 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt 1397 * as soon as we call pci_request_irq, so we have to setup our 1398 * clean_rx handler before we do so. */ 1399 e1000_configure(adapter); 1400 1401 err = e1000_request_irq(adapter); 1402 if (err) 1403 goto err_req_irq; 1404 1405 /* From here on the code is the same as e1000_up() */ 1406 clear_bit(__E1000_DOWN, &adapter->flags); 1407 1408 napi_enable(&adapter->napi); 1409 1410 e1000_irq_enable(adapter); 1411 1412 netif_start_queue(netdev); 1413 1414 /* fire a link status change interrupt to start the watchdog */ 1415 ew32(ICS, E1000_ICS_LSC); 1416 1417 return E1000_SUCCESS; 1418 1419 err_req_irq: 1420 e1000_power_down_phy(adapter); 1421 e1000_free_all_rx_resources(adapter); 1422 err_setup_rx: 1423 e1000_free_all_tx_resources(adapter); 1424 err_setup_tx: 1425 e1000_reset(adapter); 1426 1427 return err; 1428 } 1429 1430 /** 1431 * e1000_close - Disables a network interface 1432 * @netdev: network interface device structure 1433 * 1434 * Returns 0, this is not allowed to fail 1435 * 1436 * The close entry point is called when an interface is de-activated 1437 * by the OS. The hardware is still under the drivers control, but 1438 * needs to be disabled. A global MAC reset is issued to stop the 1439 * hardware, and all transmit and receive resources are freed. 1440 **/ 1441 1442 static int e1000_close(struct net_device *netdev) 1443 { 1444 struct e1000_adapter *adapter = netdev_priv(netdev); 1445 struct e1000_hw *hw = &adapter->hw; 1446 1447 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); 1448 e1000_down(adapter); 1449 e1000_power_down_phy(adapter); 1450 e1000_free_irq(adapter); 1451 1452 e1000_free_all_tx_resources(adapter); 1453 e1000_free_all_rx_resources(adapter); 1454 1455 /* kill manageability vlan ID if supported, but not if a vlan with 1456 * the same ID is registered on the host OS (let 8021q kill it) */ 1457 if ((hw->mng_cookie.status & 1458 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 1459 !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) { 1460 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 1461 } 1462 1463 return 0; 1464 } 1465 1466 /** 1467 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary 1468 * @adapter: address of board private structure 1469 * @start: address of beginning of memory 1470 * @len: length of memory 1471 **/ 1472 static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start, 1473 unsigned long len) 1474 { 1475 struct e1000_hw *hw = &adapter->hw; 1476 unsigned long begin = (unsigned long)start; 1477 unsigned long end = begin + len; 1478 1479 /* First rev 82545 and 82546 need to not allow any memory 1480 * write location to cross 64k boundary due to errata 23 */ 1481 if (hw->mac_type == e1000_82545 || 1482 hw->mac_type == e1000_ce4100 || 1483 hw->mac_type == e1000_82546) { 1484 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true; 1485 } 1486 1487 return true; 1488 } 1489 1490 /** 1491 * e1000_setup_tx_resources - allocate Tx resources (Descriptors) 1492 * @adapter: board private structure 1493 * @txdr: tx descriptor ring (for a specific queue) to setup 1494 * 1495 * Return 0 on success, negative on failure 1496 **/ 1497 1498 static int e1000_setup_tx_resources(struct e1000_adapter *adapter, 1499 struct e1000_tx_ring *txdr) 1500 { 1501 struct pci_dev *pdev = adapter->pdev; 1502 int size; 1503 1504 size = sizeof(struct e1000_buffer) * txdr->count; 1505 txdr->buffer_info = vzalloc(size); 1506 if (!txdr->buffer_info) { 1507 e_err(probe, "Unable to allocate memory for the Tx descriptor " 1508 "ring\n"); 1509 return -ENOMEM; 1510 } 1511 1512 /* round up to nearest 4K */ 1513 1514 txdr->size = txdr->count * sizeof(struct e1000_tx_desc); 1515 txdr->size = ALIGN(txdr->size, 4096); 1516 1517 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, 1518 GFP_KERNEL); 1519 if (!txdr->desc) { 1520 setup_tx_desc_die: 1521 vfree(txdr->buffer_info); 1522 e_err(probe, "Unable to allocate memory for the Tx descriptor " 1523 "ring\n"); 1524 return -ENOMEM; 1525 } 1526 1527 /* Fix for errata 23, can't cross 64kB boundary */ 1528 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { 1529 void *olddesc = txdr->desc; 1530 dma_addr_t olddma = txdr->dma; 1531 e_err(tx_err, "txdr align check failed: %u bytes at %p\n", 1532 txdr->size, txdr->desc); 1533 /* Try again, without freeing the previous */ 1534 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, 1535 &txdr->dma, GFP_KERNEL); 1536 /* Failed allocation, critical failure */ 1537 if (!txdr->desc) { 1538 dma_free_coherent(&pdev->dev, txdr->size, olddesc, 1539 olddma); 1540 goto setup_tx_desc_die; 1541 } 1542 1543 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { 1544 /* give up */ 1545 dma_free_coherent(&pdev->dev, txdr->size, txdr->desc, 1546 txdr->dma); 1547 dma_free_coherent(&pdev->dev, txdr->size, olddesc, 1548 olddma); 1549 e_err(probe, "Unable to allocate aligned memory " 1550 "for the transmit descriptor ring\n"); 1551 vfree(txdr->buffer_info); 1552 return -ENOMEM; 1553 } else { 1554 /* Free old allocation, new allocation was successful */ 1555 dma_free_coherent(&pdev->dev, txdr->size, olddesc, 1556 olddma); 1557 } 1558 } 1559 memset(txdr->desc, 0, txdr->size); 1560 1561 txdr->next_to_use = 0; 1562 txdr->next_to_clean = 0; 1563 1564 return 0; 1565 } 1566 1567 /** 1568 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources 1569 * (Descriptors) for all queues 1570 * @adapter: board private structure 1571 * 1572 * Return 0 on success, negative on failure 1573 **/ 1574 1575 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter) 1576 { 1577 int i, err = 0; 1578 1579 for (i = 0; i < adapter->num_tx_queues; i++) { 1580 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); 1581 if (err) { 1582 e_err(probe, "Allocation for Tx Queue %u failed\n", i); 1583 for (i-- ; i >= 0; i--) 1584 e1000_free_tx_resources(adapter, 1585 &adapter->tx_ring[i]); 1586 break; 1587 } 1588 } 1589 1590 return err; 1591 } 1592 1593 /** 1594 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset 1595 * @adapter: board private structure 1596 * 1597 * Configure the Tx unit of the MAC after a reset. 1598 **/ 1599 1600 static void e1000_configure_tx(struct e1000_adapter *adapter) 1601 { 1602 u64 tdba; 1603 struct e1000_hw *hw = &adapter->hw; 1604 u32 tdlen, tctl, tipg; 1605 u32 ipgr1, ipgr2; 1606 1607 /* Setup the HW Tx Head and Tail descriptor pointers */ 1608 1609 switch (adapter->num_tx_queues) { 1610 case 1: 1611 default: 1612 tdba = adapter->tx_ring[0].dma; 1613 tdlen = adapter->tx_ring[0].count * 1614 sizeof(struct e1000_tx_desc); 1615 ew32(TDLEN, tdlen); 1616 ew32(TDBAH, (tdba >> 32)); 1617 ew32(TDBAL, (tdba & 0x00000000ffffffffULL)); 1618 ew32(TDT, 0); 1619 ew32(TDH, 0); 1620 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH); 1621 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT); 1622 break; 1623 } 1624 1625 /* Set the default values for the Tx Inter Packet Gap timer */ 1626 if ((hw->media_type == e1000_media_type_fiber || 1627 hw->media_type == e1000_media_type_internal_serdes)) 1628 tipg = DEFAULT_82543_TIPG_IPGT_FIBER; 1629 else 1630 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; 1631 1632 switch (hw->mac_type) { 1633 case e1000_82542_rev2_0: 1634 case e1000_82542_rev2_1: 1635 tipg = DEFAULT_82542_TIPG_IPGT; 1636 ipgr1 = DEFAULT_82542_TIPG_IPGR1; 1637 ipgr2 = DEFAULT_82542_TIPG_IPGR2; 1638 break; 1639 default: 1640 ipgr1 = DEFAULT_82543_TIPG_IPGR1; 1641 ipgr2 = DEFAULT_82543_TIPG_IPGR2; 1642 break; 1643 } 1644 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; 1645 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; 1646 ew32(TIPG, tipg); 1647 1648 /* Set the Tx Interrupt Delay register */ 1649 1650 ew32(TIDV, adapter->tx_int_delay); 1651 if (hw->mac_type >= e1000_82540) 1652 ew32(TADV, adapter->tx_abs_int_delay); 1653 1654 /* Program the Transmit Control Register */ 1655 1656 tctl = er32(TCTL); 1657 tctl &= ~E1000_TCTL_CT; 1658 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | 1659 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 1660 1661 e1000_config_collision_dist(hw); 1662 1663 /* Setup Transmit Descriptor Settings for eop descriptor */ 1664 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; 1665 1666 /* only set IDE if we are delaying interrupts using the timers */ 1667 if (adapter->tx_int_delay) 1668 adapter->txd_cmd |= E1000_TXD_CMD_IDE; 1669 1670 if (hw->mac_type < e1000_82543) 1671 adapter->txd_cmd |= E1000_TXD_CMD_RPS; 1672 else 1673 adapter->txd_cmd |= E1000_TXD_CMD_RS; 1674 1675 /* Cache if we're 82544 running in PCI-X because we'll 1676 * need this to apply a workaround later in the send path. */ 1677 if (hw->mac_type == e1000_82544 && 1678 hw->bus_type == e1000_bus_type_pcix) 1679 adapter->pcix_82544 = 1; 1680 1681 ew32(TCTL, tctl); 1682 1683 } 1684 1685 /** 1686 * e1000_setup_rx_resources - allocate Rx resources (Descriptors) 1687 * @adapter: board private structure 1688 * @rxdr: rx descriptor ring (for a specific queue) to setup 1689 * 1690 * Returns 0 on success, negative on failure 1691 **/ 1692 1693 static int e1000_setup_rx_resources(struct e1000_adapter *adapter, 1694 struct e1000_rx_ring *rxdr) 1695 { 1696 struct pci_dev *pdev = adapter->pdev; 1697 int size, desc_len; 1698 1699 size = sizeof(struct e1000_buffer) * rxdr->count; 1700 rxdr->buffer_info = vzalloc(size); 1701 if (!rxdr->buffer_info) { 1702 e_err(probe, "Unable to allocate memory for the Rx descriptor " 1703 "ring\n"); 1704 return -ENOMEM; 1705 } 1706 1707 desc_len = sizeof(struct e1000_rx_desc); 1708 1709 /* Round up to nearest 4K */ 1710 1711 rxdr->size = rxdr->count * desc_len; 1712 rxdr->size = ALIGN(rxdr->size, 4096); 1713 1714 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, 1715 GFP_KERNEL); 1716 1717 if (!rxdr->desc) { 1718 e_err(probe, "Unable to allocate memory for the Rx descriptor " 1719 "ring\n"); 1720 setup_rx_desc_die: 1721 vfree(rxdr->buffer_info); 1722 return -ENOMEM; 1723 } 1724 1725 /* Fix for errata 23, can't cross 64kB boundary */ 1726 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { 1727 void *olddesc = rxdr->desc; 1728 dma_addr_t olddma = rxdr->dma; 1729 e_err(rx_err, "rxdr align check failed: %u bytes at %p\n", 1730 rxdr->size, rxdr->desc); 1731 /* Try again, without freeing the previous */ 1732 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, 1733 &rxdr->dma, GFP_KERNEL); 1734 /* Failed allocation, critical failure */ 1735 if (!rxdr->desc) { 1736 dma_free_coherent(&pdev->dev, rxdr->size, olddesc, 1737 olddma); 1738 e_err(probe, "Unable to allocate memory for the Rx " 1739 "descriptor ring\n"); 1740 goto setup_rx_desc_die; 1741 } 1742 1743 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { 1744 /* give up */ 1745 dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc, 1746 rxdr->dma); 1747 dma_free_coherent(&pdev->dev, rxdr->size, olddesc, 1748 olddma); 1749 e_err(probe, "Unable to allocate aligned memory for " 1750 "the Rx descriptor ring\n"); 1751 goto setup_rx_desc_die; 1752 } else { 1753 /* Free old allocation, new allocation was successful */ 1754 dma_free_coherent(&pdev->dev, rxdr->size, olddesc, 1755 olddma); 1756 } 1757 } 1758 memset(rxdr->desc, 0, rxdr->size); 1759 1760 rxdr->next_to_clean = 0; 1761 rxdr->next_to_use = 0; 1762 rxdr->rx_skb_top = NULL; 1763 1764 return 0; 1765 } 1766 1767 /** 1768 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources 1769 * (Descriptors) for all queues 1770 * @adapter: board private structure 1771 * 1772 * Return 0 on success, negative on failure 1773 **/ 1774 1775 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter) 1776 { 1777 int i, err = 0; 1778 1779 for (i = 0; i < adapter->num_rx_queues; i++) { 1780 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); 1781 if (err) { 1782 e_err(probe, "Allocation for Rx Queue %u failed\n", i); 1783 for (i-- ; i >= 0; i--) 1784 e1000_free_rx_resources(adapter, 1785 &adapter->rx_ring[i]); 1786 break; 1787 } 1788 } 1789 1790 return err; 1791 } 1792 1793 /** 1794 * e1000_setup_rctl - configure the receive control registers 1795 * @adapter: Board private structure 1796 **/ 1797 static void e1000_setup_rctl(struct e1000_adapter *adapter) 1798 { 1799 struct e1000_hw *hw = &adapter->hw; 1800 u32 rctl; 1801 1802 rctl = er32(RCTL); 1803 1804 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 1805 1806 rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO | 1807 E1000_RCTL_RDMTS_HALF | 1808 (hw->mc_filter_type << E1000_RCTL_MO_SHIFT); 1809 1810 if (hw->tbi_compatibility_on == 1) 1811 rctl |= E1000_RCTL_SBP; 1812 else 1813 rctl &= ~E1000_RCTL_SBP; 1814 1815 if (adapter->netdev->mtu <= ETH_DATA_LEN) 1816 rctl &= ~E1000_RCTL_LPE; 1817 else 1818 rctl |= E1000_RCTL_LPE; 1819 1820 /* Setup buffer sizes */ 1821 rctl &= ~E1000_RCTL_SZ_4096; 1822 rctl |= E1000_RCTL_BSEX; 1823 switch (adapter->rx_buffer_len) { 1824 case E1000_RXBUFFER_2048: 1825 default: 1826 rctl |= E1000_RCTL_SZ_2048; 1827 rctl &= ~E1000_RCTL_BSEX; 1828 break; 1829 case E1000_RXBUFFER_4096: 1830 rctl |= E1000_RCTL_SZ_4096; 1831 break; 1832 case E1000_RXBUFFER_8192: 1833 rctl |= E1000_RCTL_SZ_8192; 1834 break; 1835 case E1000_RXBUFFER_16384: 1836 rctl |= E1000_RCTL_SZ_16384; 1837 break; 1838 } 1839 1840 ew32(RCTL, rctl); 1841 } 1842 1843 /** 1844 * e1000_configure_rx - Configure 8254x Receive Unit after Reset 1845 * @adapter: board private structure 1846 * 1847 * Configure the Rx unit of the MAC after a reset. 1848 **/ 1849 1850 static void e1000_configure_rx(struct e1000_adapter *adapter) 1851 { 1852 u64 rdba; 1853 struct e1000_hw *hw = &adapter->hw; 1854 u32 rdlen, rctl, rxcsum; 1855 1856 if (adapter->netdev->mtu > ETH_DATA_LEN) { 1857 rdlen = adapter->rx_ring[0].count * 1858 sizeof(struct e1000_rx_desc); 1859 adapter->clean_rx = e1000_clean_jumbo_rx_irq; 1860 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; 1861 } else { 1862 rdlen = adapter->rx_ring[0].count * 1863 sizeof(struct e1000_rx_desc); 1864 adapter->clean_rx = e1000_clean_rx_irq; 1865 adapter->alloc_rx_buf = e1000_alloc_rx_buffers; 1866 } 1867 1868 /* disable receives while setting up the descriptors */ 1869 rctl = er32(RCTL); 1870 ew32(RCTL, rctl & ~E1000_RCTL_EN); 1871 1872 /* set the Receive Delay Timer Register */ 1873 ew32(RDTR, adapter->rx_int_delay); 1874 1875 if (hw->mac_type >= e1000_82540) { 1876 ew32(RADV, adapter->rx_abs_int_delay); 1877 if (adapter->itr_setting != 0) 1878 ew32(ITR, 1000000000 / (adapter->itr * 256)); 1879 } 1880 1881 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1882 * the Base and Length of the Rx Descriptor Ring */ 1883 switch (adapter->num_rx_queues) { 1884 case 1: 1885 default: 1886 rdba = adapter->rx_ring[0].dma; 1887 ew32(RDLEN, rdlen); 1888 ew32(RDBAH, (rdba >> 32)); 1889 ew32(RDBAL, (rdba & 0x00000000ffffffffULL)); 1890 ew32(RDT, 0); 1891 ew32(RDH, 0); 1892 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH); 1893 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT); 1894 break; 1895 } 1896 1897 /* Enable 82543 Receive Checksum Offload for TCP and UDP */ 1898 if (hw->mac_type >= e1000_82543) { 1899 rxcsum = er32(RXCSUM); 1900 if (adapter->rx_csum) 1901 rxcsum |= E1000_RXCSUM_TUOFL; 1902 else 1903 /* don't need to clear IPPCSE as it defaults to 0 */ 1904 rxcsum &= ~E1000_RXCSUM_TUOFL; 1905 ew32(RXCSUM, rxcsum); 1906 } 1907 1908 /* Enable Receives */ 1909 ew32(RCTL, rctl | E1000_RCTL_EN); 1910 } 1911 1912 /** 1913 * e1000_free_tx_resources - Free Tx Resources per Queue 1914 * @adapter: board private structure 1915 * @tx_ring: Tx descriptor ring for a specific queue 1916 * 1917 * Free all transmit software resources 1918 **/ 1919 1920 static void e1000_free_tx_resources(struct e1000_adapter *adapter, 1921 struct e1000_tx_ring *tx_ring) 1922 { 1923 struct pci_dev *pdev = adapter->pdev; 1924 1925 e1000_clean_tx_ring(adapter, tx_ring); 1926 1927 vfree(tx_ring->buffer_info); 1928 tx_ring->buffer_info = NULL; 1929 1930 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, 1931 tx_ring->dma); 1932 1933 tx_ring->desc = NULL; 1934 } 1935 1936 /** 1937 * e1000_free_all_tx_resources - Free Tx Resources for All Queues 1938 * @adapter: board private structure 1939 * 1940 * Free all transmit software resources 1941 **/ 1942 1943 void e1000_free_all_tx_resources(struct e1000_adapter *adapter) 1944 { 1945 int i; 1946 1947 for (i = 0; i < adapter->num_tx_queues; i++) 1948 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]); 1949 } 1950 1951 static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, 1952 struct e1000_buffer *buffer_info) 1953 { 1954 if (buffer_info->dma) { 1955 if (buffer_info->mapped_as_page) 1956 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, 1957 buffer_info->length, DMA_TO_DEVICE); 1958 else 1959 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, 1960 buffer_info->length, 1961 DMA_TO_DEVICE); 1962 buffer_info->dma = 0; 1963 } 1964 if (buffer_info->skb) { 1965 dev_kfree_skb_any(buffer_info->skb); 1966 buffer_info->skb = NULL; 1967 } 1968 buffer_info->time_stamp = 0; 1969 /* buffer_info must be completely set up in the transmit path */ 1970 } 1971 1972 /** 1973 * e1000_clean_tx_ring - Free Tx Buffers 1974 * @adapter: board private structure 1975 * @tx_ring: ring to be cleaned 1976 **/ 1977 1978 static void e1000_clean_tx_ring(struct e1000_adapter *adapter, 1979 struct e1000_tx_ring *tx_ring) 1980 { 1981 struct e1000_hw *hw = &adapter->hw; 1982 struct e1000_buffer *buffer_info; 1983 unsigned long size; 1984 unsigned int i; 1985 1986 /* Free all the Tx ring sk_buffs */ 1987 1988 for (i = 0; i < tx_ring->count; i++) { 1989 buffer_info = &tx_ring->buffer_info[i]; 1990 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 1991 } 1992 1993 size = sizeof(struct e1000_buffer) * tx_ring->count; 1994 memset(tx_ring->buffer_info, 0, size); 1995 1996 /* Zero out the descriptor ring */ 1997 1998 memset(tx_ring->desc, 0, tx_ring->size); 1999 2000 tx_ring->next_to_use = 0; 2001 tx_ring->next_to_clean = 0; 2002 tx_ring->last_tx_tso = 0; 2003 2004 writel(0, hw->hw_addr + tx_ring->tdh); 2005 writel(0, hw->hw_addr + tx_ring->tdt); 2006 } 2007 2008 /** 2009 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues 2010 * @adapter: board private structure 2011 **/ 2012 2013 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter) 2014 { 2015 int i; 2016 2017 for (i = 0; i < adapter->num_tx_queues; i++) 2018 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]); 2019 } 2020 2021 /** 2022 * e1000_free_rx_resources - Free Rx Resources 2023 * @adapter: board private structure 2024 * @rx_ring: ring to clean the resources from 2025 * 2026 * Free all receive software resources 2027 **/ 2028 2029 static void e1000_free_rx_resources(struct e1000_adapter *adapter, 2030 struct e1000_rx_ring *rx_ring) 2031 { 2032 struct pci_dev *pdev = adapter->pdev; 2033 2034 e1000_clean_rx_ring(adapter, rx_ring); 2035 2036 vfree(rx_ring->buffer_info); 2037 rx_ring->buffer_info = NULL; 2038 2039 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, 2040 rx_ring->dma); 2041 2042 rx_ring->desc = NULL; 2043 } 2044 2045 /** 2046 * e1000_free_all_rx_resources - Free Rx Resources for All Queues 2047 * @adapter: board private structure 2048 * 2049 * Free all receive software resources 2050 **/ 2051 2052 void e1000_free_all_rx_resources(struct e1000_adapter *adapter) 2053 { 2054 int i; 2055 2056 for (i = 0; i < adapter->num_rx_queues; i++) 2057 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]); 2058 } 2059 2060 /** 2061 * e1000_clean_rx_ring - Free Rx Buffers per Queue 2062 * @adapter: board private structure 2063 * @rx_ring: ring to free buffers from 2064 **/ 2065 2066 static void e1000_clean_rx_ring(struct e1000_adapter *adapter, 2067 struct e1000_rx_ring *rx_ring) 2068 { 2069 struct e1000_hw *hw = &adapter->hw; 2070 struct e1000_buffer *buffer_info; 2071 struct pci_dev *pdev = adapter->pdev; 2072 unsigned long size; 2073 unsigned int i; 2074 2075 /* Free all the Rx ring sk_buffs */ 2076 for (i = 0; i < rx_ring->count; i++) { 2077 buffer_info = &rx_ring->buffer_info[i]; 2078 if (buffer_info->dma && 2079 adapter->clean_rx == e1000_clean_rx_irq) { 2080 dma_unmap_single(&pdev->dev, buffer_info->dma, 2081 buffer_info->length, 2082 DMA_FROM_DEVICE); 2083 } else if (buffer_info->dma && 2084 adapter->clean_rx == e1000_clean_jumbo_rx_irq) { 2085 dma_unmap_page(&pdev->dev, buffer_info->dma, 2086 buffer_info->length, 2087 DMA_FROM_DEVICE); 2088 } 2089 2090 buffer_info->dma = 0; 2091 if (buffer_info->page) { 2092 put_page(buffer_info->page); 2093 buffer_info->page = NULL; 2094 } 2095 if (buffer_info->skb) { 2096 dev_kfree_skb(buffer_info->skb); 2097 buffer_info->skb = NULL; 2098 } 2099 } 2100 2101 /* there also may be some cached data from a chained receive */ 2102 if (rx_ring->rx_skb_top) { 2103 dev_kfree_skb(rx_ring->rx_skb_top); 2104 rx_ring->rx_skb_top = NULL; 2105 } 2106 2107 size = sizeof(struct e1000_buffer) * rx_ring->count; 2108 memset(rx_ring->buffer_info, 0, size); 2109 2110 /* Zero out the descriptor ring */ 2111 memset(rx_ring->desc, 0, rx_ring->size); 2112 2113 rx_ring->next_to_clean = 0; 2114 rx_ring->next_to_use = 0; 2115 2116 writel(0, hw->hw_addr + rx_ring->rdh); 2117 writel(0, hw->hw_addr + rx_ring->rdt); 2118 } 2119 2120 /** 2121 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues 2122 * @adapter: board private structure 2123 **/ 2124 2125 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter) 2126 { 2127 int i; 2128 2129 for (i = 0; i < adapter->num_rx_queues; i++) 2130 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]); 2131 } 2132 2133 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset 2134 * and memory write and invalidate disabled for certain operations 2135 */ 2136 static void e1000_enter_82542_rst(struct e1000_adapter *adapter) 2137 { 2138 struct e1000_hw *hw = &adapter->hw; 2139 struct net_device *netdev = adapter->netdev; 2140 u32 rctl; 2141 2142 e1000_pci_clear_mwi(hw); 2143 2144 rctl = er32(RCTL); 2145 rctl |= E1000_RCTL_RST; 2146 ew32(RCTL, rctl); 2147 E1000_WRITE_FLUSH(); 2148 mdelay(5); 2149 2150 if (netif_running(netdev)) 2151 e1000_clean_all_rx_rings(adapter); 2152 } 2153 2154 static void e1000_leave_82542_rst(struct e1000_adapter *adapter) 2155 { 2156 struct e1000_hw *hw = &adapter->hw; 2157 struct net_device *netdev = adapter->netdev; 2158 u32 rctl; 2159 2160 rctl = er32(RCTL); 2161 rctl &= ~E1000_RCTL_RST; 2162 ew32(RCTL, rctl); 2163 E1000_WRITE_FLUSH(); 2164 mdelay(5); 2165 2166 if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) 2167 e1000_pci_set_mwi(hw); 2168 2169 if (netif_running(netdev)) { 2170 /* No need to loop, because 82542 supports only 1 queue */ 2171 struct e1000_rx_ring *ring = &adapter->rx_ring[0]; 2172 e1000_configure_rx(adapter); 2173 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring)); 2174 } 2175 } 2176 2177 /** 2178 * e1000_set_mac - Change the Ethernet Address of the NIC 2179 * @netdev: network interface device structure 2180 * @p: pointer to an address structure 2181 * 2182 * Returns 0 on success, negative on failure 2183 **/ 2184 2185 static int e1000_set_mac(struct net_device *netdev, void *p) 2186 { 2187 struct e1000_adapter *adapter = netdev_priv(netdev); 2188 struct e1000_hw *hw = &adapter->hw; 2189 struct sockaddr *addr = p; 2190 2191 if (!is_valid_ether_addr(addr->sa_data)) 2192 return -EADDRNOTAVAIL; 2193 2194 /* 82542 2.0 needs to be in reset to write receive address registers */ 2195 2196 if (hw->mac_type == e1000_82542_rev2_0) 2197 e1000_enter_82542_rst(adapter); 2198 2199 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 2200 memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len); 2201 2202 e1000_rar_set(hw, hw->mac_addr, 0); 2203 2204 if (hw->mac_type == e1000_82542_rev2_0) 2205 e1000_leave_82542_rst(adapter); 2206 2207 return 0; 2208 } 2209 2210 /** 2211 * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set 2212 * @netdev: network interface device structure 2213 * 2214 * The set_rx_mode entry point is called whenever the unicast or multicast 2215 * address lists or the network interface flags are updated. This routine is 2216 * responsible for configuring the hardware for proper unicast, multicast, 2217 * promiscuous mode, and all-multi behavior. 2218 **/ 2219 2220 static void e1000_set_rx_mode(struct net_device *netdev) 2221 { 2222 struct e1000_adapter *adapter = netdev_priv(netdev); 2223 struct e1000_hw *hw = &adapter->hw; 2224 struct netdev_hw_addr *ha; 2225 bool use_uc = false; 2226 u32 rctl; 2227 u32 hash_value; 2228 int i, rar_entries = E1000_RAR_ENTRIES; 2229 int mta_reg_count = E1000_NUM_MTA_REGISTERS; 2230 u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC); 2231 2232 if (!mcarray) { 2233 e_err(probe, "memory allocation failed\n"); 2234 return; 2235 } 2236 2237 /* Check for Promiscuous and All Multicast modes */ 2238 2239 rctl = er32(RCTL); 2240 2241 if (netdev->flags & IFF_PROMISC) { 2242 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 2243 rctl &= ~E1000_RCTL_VFE; 2244 } else { 2245 if (netdev->flags & IFF_ALLMULTI) 2246 rctl |= E1000_RCTL_MPE; 2247 else 2248 rctl &= ~E1000_RCTL_MPE; 2249 /* Enable VLAN filter if there is a VLAN */ 2250 if (e1000_vlan_used(adapter)) 2251 rctl |= E1000_RCTL_VFE; 2252 } 2253 2254 if (netdev_uc_count(netdev) > rar_entries - 1) { 2255 rctl |= E1000_RCTL_UPE; 2256 } else if (!(netdev->flags & IFF_PROMISC)) { 2257 rctl &= ~E1000_RCTL_UPE; 2258 use_uc = true; 2259 } 2260 2261 ew32(RCTL, rctl); 2262 2263 /* 82542 2.0 needs to be in reset to write receive address registers */ 2264 2265 if (hw->mac_type == e1000_82542_rev2_0) 2266 e1000_enter_82542_rst(adapter); 2267 2268 /* load the first 14 addresses into the exact filters 1-14. Unicast 2269 * addresses take precedence to avoid disabling unicast filtering 2270 * when possible. 2271 * 2272 * RAR 0 is used for the station MAC address 2273 * if there are not 14 addresses, go ahead and clear the filters 2274 */ 2275 i = 1; 2276 if (use_uc) 2277 netdev_for_each_uc_addr(ha, netdev) { 2278 if (i == rar_entries) 2279 break; 2280 e1000_rar_set(hw, ha->addr, i++); 2281 } 2282 2283 netdev_for_each_mc_addr(ha, netdev) { 2284 if (i == rar_entries) { 2285 /* load any remaining addresses into the hash table */ 2286 u32 hash_reg, hash_bit, mta; 2287 hash_value = e1000_hash_mc_addr(hw, ha->addr); 2288 hash_reg = (hash_value >> 5) & 0x7F; 2289 hash_bit = hash_value & 0x1F; 2290 mta = (1 << hash_bit); 2291 mcarray[hash_reg] |= mta; 2292 } else { 2293 e1000_rar_set(hw, ha->addr, i++); 2294 } 2295 } 2296 2297 for (; i < rar_entries; i++) { 2298 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0); 2299 E1000_WRITE_FLUSH(); 2300 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0); 2301 E1000_WRITE_FLUSH(); 2302 } 2303 2304 /* write the hash table completely, write from bottom to avoid 2305 * both stupid write combining chipsets, and flushing each write */ 2306 for (i = mta_reg_count - 1; i >= 0 ; i--) { 2307 /* 2308 * If we are on an 82544 has an errata where writing odd 2309 * offsets overwrites the previous even offset, but writing 2310 * backwards over the range solves the issue by always 2311 * writing the odd offset first 2312 */ 2313 E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]); 2314 } 2315 E1000_WRITE_FLUSH(); 2316 2317 if (hw->mac_type == e1000_82542_rev2_0) 2318 e1000_leave_82542_rst(adapter); 2319 2320 kfree(mcarray); 2321 } 2322 2323 /** 2324 * e1000_update_phy_info_task - get phy info 2325 * @work: work struct contained inside adapter struct 2326 * 2327 * Need to wait a few seconds after link up to get diagnostic information from 2328 * the phy 2329 */ 2330 static void e1000_update_phy_info_task(struct work_struct *work) 2331 { 2332 struct e1000_adapter *adapter = container_of(work, 2333 struct e1000_adapter, 2334 phy_info_task.work); 2335 if (test_bit(__E1000_DOWN, &adapter->flags)) 2336 return; 2337 mutex_lock(&adapter->mutex); 2338 e1000_phy_get_info(&adapter->hw, &adapter->phy_info); 2339 mutex_unlock(&adapter->mutex); 2340 } 2341 2342 /** 2343 * e1000_82547_tx_fifo_stall_task - task to complete work 2344 * @work: work struct contained inside adapter struct 2345 **/ 2346 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work) 2347 { 2348 struct e1000_adapter *adapter = container_of(work, 2349 struct e1000_adapter, 2350 fifo_stall_task.work); 2351 struct e1000_hw *hw = &adapter->hw; 2352 struct net_device *netdev = adapter->netdev; 2353 u32 tctl; 2354 2355 if (test_bit(__E1000_DOWN, &adapter->flags)) 2356 return; 2357 mutex_lock(&adapter->mutex); 2358 if (atomic_read(&adapter->tx_fifo_stall)) { 2359 if ((er32(TDT) == er32(TDH)) && 2360 (er32(TDFT) == er32(TDFH)) && 2361 (er32(TDFTS) == er32(TDFHS))) { 2362 tctl = er32(TCTL); 2363 ew32(TCTL, tctl & ~E1000_TCTL_EN); 2364 ew32(TDFT, adapter->tx_head_addr); 2365 ew32(TDFH, adapter->tx_head_addr); 2366 ew32(TDFTS, adapter->tx_head_addr); 2367 ew32(TDFHS, adapter->tx_head_addr); 2368 ew32(TCTL, tctl); 2369 E1000_WRITE_FLUSH(); 2370 2371 adapter->tx_fifo_head = 0; 2372 atomic_set(&adapter->tx_fifo_stall, 0); 2373 netif_wake_queue(netdev); 2374 } else if (!test_bit(__E1000_DOWN, &adapter->flags)) { 2375 schedule_delayed_work(&adapter->fifo_stall_task, 1); 2376 } 2377 } 2378 mutex_unlock(&adapter->mutex); 2379 } 2380 2381 bool e1000_has_link(struct e1000_adapter *adapter) 2382 { 2383 struct e1000_hw *hw = &adapter->hw; 2384 bool link_active = false; 2385 2386 /* get_link_status is set on LSC (link status) interrupt or rx 2387 * sequence error interrupt (except on intel ce4100). 2388 * get_link_status will stay false until the 2389 * e1000_check_for_link establishes link for copper adapters 2390 * ONLY 2391 */ 2392 switch (hw->media_type) { 2393 case e1000_media_type_copper: 2394 if (hw->mac_type == e1000_ce4100) 2395 hw->get_link_status = 1; 2396 if (hw->get_link_status) { 2397 e1000_check_for_link(hw); 2398 link_active = !hw->get_link_status; 2399 } else { 2400 link_active = true; 2401 } 2402 break; 2403 case e1000_media_type_fiber: 2404 e1000_check_for_link(hw); 2405 link_active = !!(er32(STATUS) & E1000_STATUS_LU); 2406 break; 2407 case e1000_media_type_internal_serdes: 2408 e1000_check_for_link(hw); 2409 link_active = hw->serdes_has_link; 2410 break; 2411 default: 2412 break; 2413 } 2414 2415 return link_active; 2416 } 2417 2418 /** 2419 * e1000_watchdog - work function 2420 * @work: work struct contained inside adapter struct 2421 **/ 2422 static void e1000_watchdog(struct work_struct *work) 2423 { 2424 struct e1000_adapter *adapter = container_of(work, 2425 struct e1000_adapter, 2426 watchdog_task.work); 2427 struct e1000_hw *hw = &adapter->hw; 2428 struct net_device *netdev = adapter->netdev; 2429 struct e1000_tx_ring *txdr = adapter->tx_ring; 2430 u32 link, tctl; 2431 2432 if (test_bit(__E1000_DOWN, &adapter->flags)) 2433 return; 2434 2435 mutex_lock(&adapter->mutex); 2436 link = e1000_has_link(adapter); 2437 if ((netif_carrier_ok(netdev)) && link) 2438 goto link_up; 2439 2440 if (link) { 2441 if (!netif_carrier_ok(netdev)) { 2442 u32 ctrl; 2443 bool txb2b = true; 2444 /* update snapshot of PHY registers on LSC */ 2445 e1000_get_speed_and_duplex(hw, 2446 &adapter->link_speed, 2447 &adapter->link_duplex); 2448 2449 ctrl = er32(CTRL); 2450 pr_info("%s NIC Link is Up %d Mbps %s, " 2451 "Flow Control: %s\n", 2452 netdev->name, 2453 adapter->link_speed, 2454 adapter->link_duplex == FULL_DUPLEX ? 2455 "Full Duplex" : "Half Duplex", 2456 ((ctrl & E1000_CTRL_TFCE) && (ctrl & 2457 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl & 2458 E1000_CTRL_RFCE) ? "RX" : ((ctrl & 2459 E1000_CTRL_TFCE) ? "TX" : "None"))); 2460 2461 /* adjust timeout factor according to speed/duplex */ 2462 adapter->tx_timeout_factor = 1; 2463 switch (adapter->link_speed) { 2464 case SPEED_10: 2465 txb2b = false; 2466 adapter->tx_timeout_factor = 16; 2467 break; 2468 case SPEED_100: 2469 txb2b = false; 2470 /* maybe add some timeout factor ? */ 2471 break; 2472 } 2473 2474 /* enable transmits in the hardware */ 2475 tctl = er32(TCTL); 2476 tctl |= E1000_TCTL_EN; 2477 ew32(TCTL, tctl); 2478 2479 netif_carrier_on(netdev); 2480 if (!test_bit(__E1000_DOWN, &adapter->flags)) 2481 schedule_delayed_work(&adapter->phy_info_task, 2482 2 * HZ); 2483 adapter->smartspeed = 0; 2484 } 2485 } else { 2486 if (netif_carrier_ok(netdev)) { 2487 adapter->link_speed = 0; 2488 adapter->link_duplex = 0; 2489 pr_info("%s NIC Link is Down\n", 2490 netdev->name); 2491 netif_carrier_off(netdev); 2492 2493 if (!test_bit(__E1000_DOWN, &adapter->flags)) 2494 schedule_delayed_work(&adapter->phy_info_task, 2495 2 * HZ); 2496 } 2497 2498 e1000_smartspeed(adapter); 2499 } 2500 2501 link_up: 2502 e1000_update_stats(adapter); 2503 2504 hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; 2505 adapter->tpt_old = adapter->stats.tpt; 2506 hw->collision_delta = adapter->stats.colc - adapter->colc_old; 2507 adapter->colc_old = adapter->stats.colc; 2508 2509 adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old; 2510 adapter->gorcl_old = adapter->stats.gorcl; 2511 adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old; 2512 adapter->gotcl_old = adapter->stats.gotcl; 2513 2514 e1000_update_adaptive(hw); 2515 2516 if (!netif_carrier_ok(netdev)) { 2517 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { 2518 /* We've lost link, so the controller stops DMA, 2519 * but we've got queued Tx work that's never going 2520 * to get done, so reset controller to flush Tx. 2521 * (Do the reset outside of interrupt context). */ 2522 adapter->tx_timeout_count++; 2523 schedule_work(&adapter->reset_task); 2524 /* exit immediately since reset is imminent */ 2525 goto unlock; 2526 } 2527 } 2528 2529 /* Simple mode for Interrupt Throttle Rate (ITR) */ 2530 if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) { 2531 /* 2532 * Symmetric Tx/Rx gets a reduced ITR=2000; 2533 * Total asymmetrical Tx or Rx gets ITR=8000; 2534 * everyone else is between 2000-8000. 2535 */ 2536 u32 goc = (adapter->gotcl + adapter->gorcl) / 10000; 2537 u32 dif = (adapter->gotcl > adapter->gorcl ? 2538 adapter->gotcl - adapter->gorcl : 2539 adapter->gorcl - adapter->gotcl) / 10000; 2540 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; 2541 2542 ew32(ITR, 1000000000 / (itr * 256)); 2543 } 2544 2545 /* Cause software interrupt to ensure rx ring is cleaned */ 2546 ew32(ICS, E1000_ICS_RXDMT0); 2547 2548 /* Force detection of hung controller every watchdog period */ 2549 adapter->detect_tx_hung = true; 2550 2551 /* Reschedule the task */ 2552 if (!test_bit(__E1000_DOWN, &adapter->flags)) 2553 schedule_delayed_work(&adapter->watchdog_task, 2 * HZ); 2554 2555 unlock: 2556 mutex_unlock(&adapter->mutex); 2557 } 2558 2559 enum latency_range { 2560 lowest_latency = 0, 2561 low_latency = 1, 2562 bulk_latency = 2, 2563 latency_invalid = 255 2564 }; 2565 2566 /** 2567 * e1000_update_itr - update the dynamic ITR value based on statistics 2568 * @adapter: pointer to adapter 2569 * @itr_setting: current adapter->itr 2570 * @packets: the number of packets during this measurement interval 2571 * @bytes: the number of bytes during this measurement interval 2572 * 2573 * Stores a new ITR value based on packets and byte 2574 * counts during the last interrupt. The advantage of per interrupt 2575 * computation is faster updates and more accurate ITR for the current 2576 * traffic pattern. Constants in this function were computed 2577 * based on theoretical maximum wire speed and thresholds were set based 2578 * on testing data as well as attempting to minimize response time 2579 * while increasing bulk throughput. 2580 * this functionality is controlled by the InterruptThrottleRate module 2581 * parameter (see e1000_param.c) 2582 **/ 2583 static unsigned int e1000_update_itr(struct e1000_adapter *adapter, 2584 u16 itr_setting, int packets, int bytes) 2585 { 2586 unsigned int retval = itr_setting; 2587 struct e1000_hw *hw = &adapter->hw; 2588 2589 if (unlikely(hw->mac_type < e1000_82540)) 2590 goto update_itr_done; 2591 2592 if (packets == 0) 2593 goto update_itr_done; 2594 2595 switch (itr_setting) { 2596 case lowest_latency: 2597 /* jumbo frames get bulk treatment*/ 2598 if (bytes/packets > 8000) 2599 retval = bulk_latency; 2600 else if ((packets < 5) && (bytes > 512)) 2601 retval = low_latency; 2602 break; 2603 case low_latency: /* 50 usec aka 20000 ints/s */ 2604 if (bytes > 10000) { 2605 /* jumbo frames need bulk latency setting */ 2606 if (bytes/packets > 8000) 2607 retval = bulk_latency; 2608 else if ((packets < 10) || ((bytes/packets) > 1200)) 2609 retval = bulk_latency; 2610 else if ((packets > 35)) 2611 retval = lowest_latency; 2612 } else if (bytes/packets > 2000) 2613 retval = bulk_latency; 2614 else if (packets <= 2 && bytes < 512) 2615 retval = lowest_latency; 2616 break; 2617 case bulk_latency: /* 250 usec aka 4000 ints/s */ 2618 if (bytes > 25000) { 2619 if (packets > 35) 2620 retval = low_latency; 2621 } else if (bytes < 6000) { 2622 retval = low_latency; 2623 } 2624 break; 2625 } 2626 2627 update_itr_done: 2628 return retval; 2629 } 2630 2631 static void e1000_set_itr(struct e1000_adapter *adapter) 2632 { 2633 struct e1000_hw *hw = &adapter->hw; 2634 u16 current_itr; 2635 u32 new_itr = adapter->itr; 2636 2637 if (unlikely(hw->mac_type < e1000_82540)) 2638 return; 2639 2640 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ 2641 if (unlikely(adapter->link_speed != SPEED_1000)) { 2642 current_itr = 0; 2643 new_itr = 4000; 2644 goto set_itr_now; 2645 } 2646 2647 adapter->tx_itr = e1000_update_itr(adapter, 2648 adapter->tx_itr, 2649 adapter->total_tx_packets, 2650 adapter->total_tx_bytes); 2651 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 2652 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) 2653 adapter->tx_itr = low_latency; 2654 2655 adapter->rx_itr = e1000_update_itr(adapter, 2656 adapter->rx_itr, 2657 adapter->total_rx_packets, 2658 adapter->total_rx_bytes); 2659 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 2660 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) 2661 adapter->rx_itr = low_latency; 2662 2663 current_itr = max(adapter->rx_itr, adapter->tx_itr); 2664 2665 switch (current_itr) { 2666 /* counts and packets in update_itr are dependent on these numbers */ 2667 case lowest_latency: 2668 new_itr = 70000; 2669 break; 2670 case low_latency: 2671 new_itr = 20000; /* aka hwitr = ~200 */ 2672 break; 2673 case bulk_latency: 2674 new_itr = 4000; 2675 break; 2676 default: 2677 break; 2678 } 2679 2680 set_itr_now: 2681 if (new_itr != adapter->itr) { 2682 /* this attempts to bias the interrupt rate towards Bulk 2683 * by adding intermediate steps when interrupt rate is 2684 * increasing */ 2685 new_itr = new_itr > adapter->itr ? 2686 min(adapter->itr + (new_itr >> 2), new_itr) : 2687 new_itr; 2688 adapter->itr = new_itr; 2689 ew32(ITR, 1000000000 / (new_itr * 256)); 2690 } 2691 } 2692 2693 #define E1000_TX_FLAGS_CSUM 0x00000001 2694 #define E1000_TX_FLAGS_VLAN 0x00000002 2695 #define E1000_TX_FLAGS_TSO 0x00000004 2696 #define E1000_TX_FLAGS_IPV4 0x00000008 2697 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 2698 #define E1000_TX_FLAGS_VLAN_SHIFT 16 2699 2700 static int e1000_tso(struct e1000_adapter *adapter, 2701 struct e1000_tx_ring *tx_ring, struct sk_buff *skb) 2702 { 2703 struct e1000_context_desc *context_desc; 2704 struct e1000_buffer *buffer_info; 2705 unsigned int i; 2706 u32 cmd_length = 0; 2707 u16 ipcse = 0, tucse, mss; 2708 u8 ipcss, ipcso, tucss, tucso, hdr_len; 2709 int err; 2710 2711 if (skb_is_gso(skb)) { 2712 if (skb_header_cloned(skb)) { 2713 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2714 if (err) 2715 return err; 2716 } 2717 2718 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 2719 mss = skb_shinfo(skb)->gso_size; 2720 if (skb->protocol == htons(ETH_P_IP)) { 2721 struct iphdr *iph = ip_hdr(skb); 2722 iph->tot_len = 0; 2723 iph->check = 0; 2724 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 2725 iph->daddr, 0, 2726 IPPROTO_TCP, 2727 0); 2728 cmd_length = E1000_TXD_CMD_IP; 2729 ipcse = skb_transport_offset(skb) - 1; 2730 } else if (skb->protocol == htons(ETH_P_IPV6)) { 2731 ipv6_hdr(skb)->payload_len = 0; 2732 tcp_hdr(skb)->check = 2733 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 2734 &ipv6_hdr(skb)->daddr, 2735 0, IPPROTO_TCP, 0); 2736 ipcse = 0; 2737 } 2738 ipcss = skb_network_offset(skb); 2739 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; 2740 tucss = skb_transport_offset(skb); 2741 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; 2742 tucse = 0; 2743 2744 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | 2745 E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); 2746 2747 i = tx_ring->next_to_use; 2748 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 2749 buffer_info = &tx_ring->buffer_info[i]; 2750 2751 context_desc->lower_setup.ip_fields.ipcss = ipcss; 2752 context_desc->lower_setup.ip_fields.ipcso = ipcso; 2753 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); 2754 context_desc->upper_setup.tcp_fields.tucss = tucss; 2755 context_desc->upper_setup.tcp_fields.tucso = tucso; 2756 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); 2757 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); 2758 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; 2759 context_desc->cmd_and_length = cpu_to_le32(cmd_length); 2760 2761 buffer_info->time_stamp = jiffies; 2762 buffer_info->next_to_watch = i; 2763 2764 if (++i == tx_ring->count) i = 0; 2765 tx_ring->next_to_use = i; 2766 2767 return true; 2768 } 2769 return false; 2770 } 2771 2772 static bool e1000_tx_csum(struct e1000_adapter *adapter, 2773 struct e1000_tx_ring *tx_ring, struct sk_buff *skb) 2774 { 2775 struct e1000_context_desc *context_desc; 2776 struct e1000_buffer *buffer_info; 2777 unsigned int i; 2778 u8 css; 2779 u32 cmd_len = E1000_TXD_CMD_DEXT; 2780 2781 if (skb->ip_summed != CHECKSUM_PARTIAL) 2782 return false; 2783 2784 switch (skb->protocol) { 2785 case cpu_to_be16(ETH_P_IP): 2786 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 2787 cmd_len |= E1000_TXD_CMD_TCP; 2788 break; 2789 case cpu_to_be16(ETH_P_IPV6): 2790 /* XXX not handling all IPV6 headers */ 2791 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 2792 cmd_len |= E1000_TXD_CMD_TCP; 2793 break; 2794 default: 2795 if (unlikely(net_ratelimit())) 2796 e_warn(drv, "checksum_partial proto=%x!\n", 2797 skb->protocol); 2798 break; 2799 } 2800 2801 css = skb_checksum_start_offset(skb); 2802 2803 i = tx_ring->next_to_use; 2804 buffer_info = &tx_ring->buffer_info[i]; 2805 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 2806 2807 context_desc->lower_setup.ip_config = 0; 2808 context_desc->upper_setup.tcp_fields.tucss = css; 2809 context_desc->upper_setup.tcp_fields.tucso = 2810 css + skb->csum_offset; 2811 context_desc->upper_setup.tcp_fields.tucse = 0; 2812 context_desc->tcp_seg_setup.data = 0; 2813 context_desc->cmd_and_length = cpu_to_le32(cmd_len); 2814 2815 buffer_info->time_stamp = jiffies; 2816 buffer_info->next_to_watch = i; 2817 2818 if (unlikely(++i == tx_ring->count)) i = 0; 2819 tx_ring->next_to_use = i; 2820 2821 return true; 2822 } 2823 2824 #define E1000_MAX_TXD_PWR 12 2825 #define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR) 2826 2827 static int e1000_tx_map(struct e1000_adapter *adapter, 2828 struct e1000_tx_ring *tx_ring, 2829 struct sk_buff *skb, unsigned int first, 2830 unsigned int max_per_txd, unsigned int nr_frags, 2831 unsigned int mss) 2832 { 2833 struct e1000_hw *hw = &adapter->hw; 2834 struct pci_dev *pdev = adapter->pdev; 2835 struct e1000_buffer *buffer_info; 2836 unsigned int len = skb_headlen(skb); 2837 unsigned int offset = 0, size, count = 0, i; 2838 unsigned int f, bytecount, segs; 2839 2840 i = tx_ring->next_to_use; 2841 2842 while (len) { 2843 buffer_info = &tx_ring->buffer_info[i]; 2844 size = min(len, max_per_txd); 2845 /* Workaround for Controller erratum -- 2846 * descriptor for non-tso packet in a linear SKB that follows a 2847 * tso gets written back prematurely before the data is fully 2848 * DMA'd to the controller */ 2849 if (!skb->data_len && tx_ring->last_tx_tso && 2850 !skb_is_gso(skb)) { 2851 tx_ring->last_tx_tso = 0; 2852 size -= 4; 2853 } 2854 2855 /* Workaround for premature desc write-backs 2856 * in TSO mode. Append 4-byte sentinel desc */ 2857 if (unlikely(mss && !nr_frags && size == len && size > 8)) 2858 size -= 4; 2859 /* work-around for errata 10 and it applies 2860 * to all controllers in PCI-X mode 2861 * The fix is to make sure that the first descriptor of a 2862 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes 2863 */ 2864 if (unlikely((hw->bus_type == e1000_bus_type_pcix) && 2865 (size > 2015) && count == 0)) 2866 size = 2015; 2867 2868 /* Workaround for potential 82544 hang in PCI-X. Avoid 2869 * terminating buffers within evenly-aligned dwords. */ 2870 if (unlikely(adapter->pcix_82544 && 2871 !((unsigned long)(skb->data + offset + size - 1) & 4) && 2872 size > 4)) 2873 size -= 4; 2874 2875 buffer_info->length = size; 2876 /* set time_stamp *before* dma to help avoid a possible race */ 2877 buffer_info->time_stamp = jiffies; 2878 buffer_info->mapped_as_page = false; 2879 buffer_info->dma = dma_map_single(&pdev->dev, 2880 skb->data + offset, 2881 size, DMA_TO_DEVICE); 2882 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) 2883 goto dma_error; 2884 buffer_info->next_to_watch = i; 2885 2886 len -= size; 2887 offset += size; 2888 count++; 2889 if (len) { 2890 i++; 2891 if (unlikely(i == tx_ring->count)) 2892 i = 0; 2893 } 2894 } 2895 2896 for (f = 0; f < nr_frags; f++) { 2897 const struct skb_frag_struct *frag; 2898 2899 frag = &skb_shinfo(skb)->frags[f]; 2900 len = skb_frag_size(frag); 2901 offset = 0; 2902 2903 while (len) { 2904 unsigned long bufend; 2905 i++; 2906 if (unlikely(i == tx_ring->count)) 2907 i = 0; 2908 2909 buffer_info = &tx_ring->buffer_info[i]; 2910 size = min(len, max_per_txd); 2911 /* Workaround for premature desc write-backs 2912 * in TSO mode. Append 4-byte sentinel desc */ 2913 if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8)) 2914 size -= 4; 2915 /* Workaround for potential 82544 hang in PCI-X. 2916 * Avoid terminating buffers within evenly-aligned 2917 * dwords. */ 2918 bufend = (unsigned long) 2919 page_to_phys(skb_frag_page(frag)); 2920 bufend += offset + size - 1; 2921 if (unlikely(adapter->pcix_82544 && 2922 !(bufend & 4) && 2923 size > 4)) 2924 size -= 4; 2925 2926 buffer_info->length = size; 2927 buffer_info->time_stamp = jiffies; 2928 buffer_info->mapped_as_page = true; 2929 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 2930 offset, size, DMA_TO_DEVICE); 2931 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) 2932 goto dma_error; 2933 buffer_info->next_to_watch = i; 2934 2935 len -= size; 2936 offset += size; 2937 count++; 2938 } 2939 } 2940 2941 segs = skb_shinfo(skb)->gso_segs ?: 1; 2942 /* multiply data chunks by size of headers */ 2943 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; 2944 2945 tx_ring->buffer_info[i].skb = skb; 2946 tx_ring->buffer_info[i].segs = segs; 2947 tx_ring->buffer_info[i].bytecount = bytecount; 2948 tx_ring->buffer_info[first].next_to_watch = i; 2949 2950 return count; 2951 2952 dma_error: 2953 dev_err(&pdev->dev, "TX DMA map failed\n"); 2954 buffer_info->dma = 0; 2955 if (count) 2956 count--; 2957 2958 while (count--) { 2959 if (i==0) 2960 i += tx_ring->count; 2961 i--; 2962 buffer_info = &tx_ring->buffer_info[i]; 2963 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 2964 } 2965 2966 return 0; 2967 } 2968 2969 static void e1000_tx_queue(struct e1000_adapter *adapter, 2970 struct e1000_tx_ring *tx_ring, int tx_flags, 2971 int count) 2972 { 2973 struct e1000_hw *hw = &adapter->hw; 2974 struct e1000_tx_desc *tx_desc = NULL; 2975 struct e1000_buffer *buffer_info; 2976 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; 2977 unsigned int i; 2978 2979 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) { 2980 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | 2981 E1000_TXD_CMD_TSE; 2982 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2983 2984 if (likely(tx_flags & E1000_TX_FLAGS_IPV4)) 2985 txd_upper |= E1000_TXD_POPTS_IXSM << 8; 2986 } 2987 2988 if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) { 2989 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; 2990 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2991 } 2992 2993 if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) { 2994 txd_lower |= E1000_TXD_CMD_VLE; 2995 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); 2996 } 2997 2998 i = tx_ring->next_to_use; 2999 3000 while (count--) { 3001 buffer_info = &tx_ring->buffer_info[i]; 3002 tx_desc = E1000_TX_DESC(*tx_ring, i); 3003 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 3004 tx_desc->lower.data = 3005 cpu_to_le32(txd_lower | buffer_info->length); 3006 tx_desc->upper.data = cpu_to_le32(txd_upper); 3007 if (unlikely(++i == tx_ring->count)) i = 0; 3008 } 3009 3010 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); 3011 3012 /* Force memory writes to complete before letting h/w 3013 * know there are new descriptors to fetch. (Only 3014 * applicable for weak-ordered memory model archs, 3015 * such as IA-64). */ 3016 wmb(); 3017 3018 tx_ring->next_to_use = i; 3019 writel(i, hw->hw_addr + tx_ring->tdt); 3020 /* we need this if more than one processor can write to our tail 3021 * at a time, it syncronizes IO on IA64/Altix systems */ 3022 mmiowb(); 3023 } 3024 3025 /** 3026 * 82547 workaround to avoid controller hang in half-duplex environment. 3027 * The workaround is to avoid queuing a large packet that would span 3028 * the internal Tx FIFO ring boundary by notifying the stack to resend 3029 * the packet at a later time. This gives the Tx FIFO an opportunity to 3030 * flush all packets. When that occurs, we reset the Tx FIFO pointers 3031 * to the beginning of the Tx FIFO. 3032 **/ 3033 3034 #define E1000_FIFO_HDR 0x10 3035 #define E1000_82547_PAD_LEN 0x3E0 3036 3037 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, 3038 struct sk_buff *skb) 3039 { 3040 u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; 3041 u32 skb_fifo_len = skb->len + E1000_FIFO_HDR; 3042 3043 skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR); 3044 3045 if (adapter->link_duplex != HALF_DUPLEX) 3046 goto no_fifo_stall_required; 3047 3048 if (atomic_read(&adapter->tx_fifo_stall)) 3049 return 1; 3050 3051 if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) { 3052 atomic_set(&adapter->tx_fifo_stall, 1); 3053 return 1; 3054 } 3055 3056 no_fifo_stall_required: 3057 adapter->tx_fifo_head += skb_fifo_len; 3058 if (adapter->tx_fifo_head >= adapter->tx_fifo_size) 3059 adapter->tx_fifo_head -= adapter->tx_fifo_size; 3060 return 0; 3061 } 3062 3063 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) 3064 { 3065 struct e1000_adapter *adapter = netdev_priv(netdev); 3066 struct e1000_tx_ring *tx_ring = adapter->tx_ring; 3067 3068 netif_stop_queue(netdev); 3069 /* Herbert's original patch had: 3070 * smp_mb__after_netif_stop_queue(); 3071 * but since that doesn't exist yet, just open code it. */ 3072 smp_mb(); 3073 3074 /* We need to check again in a case another CPU has just 3075 * made room available. */ 3076 if (likely(E1000_DESC_UNUSED(tx_ring) < size)) 3077 return -EBUSY; 3078 3079 /* A reprieve! */ 3080 netif_start_queue(netdev); 3081 ++adapter->restart_queue; 3082 return 0; 3083 } 3084 3085 static int e1000_maybe_stop_tx(struct net_device *netdev, 3086 struct e1000_tx_ring *tx_ring, int size) 3087 { 3088 if (likely(E1000_DESC_UNUSED(tx_ring) >= size)) 3089 return 0; 3090 return __e1000_maybe_stop_tx(netdev, size); 3091 } 3092 3093 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) 3094 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, 3095 struct net_device *netdev) 3096 { 3097 struct e1000_adapter *adapter = netdev_priv(netdev); 3098 struct e1000_hw *hw = &adapter->hw; 3099 struct e1000_tx_ring *tx_ring; 3100 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD; 3101 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; 3102 unsigned int tx_flags = 0; 3103 unsigned int len = skb_headlen(skb); 3104 unsigned int nr_frags; 3105 unsigned int mss; 3106 int count = 0; 3107 int tso; 3108 unsigned int f; 3109 3110 /* This goes back to the question of how to logically map a tx queue 3111 * to a flow. Right now, performance is impacted slightly negatively 3112 * if using multiple tx queues. If the stack breaks away from a 3113 * single qdisc implementation, we can look at this again. */ 3114 tx_ring = adapter->tx_ring; 3115 3116 if (unlikely(skb->len <= 0)) { 3117 dev_kfree_skb_any(skb); 3118 return NETDEV_TX_OK; 3119 } 3120 3121 mss = skb_shinfo(skb)->gso_size; 3122 /* The controller does a simple calculation to 3123 * make sure there is enough room in the FIFO before 3124 * initiating the DMA for each buffer. The calc is: 3125 * 4 = ceil(buffer len/mss). To make sure we don't 3126 * overrun the FIFO, adjust the max buffer len if mss 3127 * drops. */ 3128 if (mss) { 3129 u8 hdr_len; 3130 max_per_txd = min(mss << 2, max_per_txd); 3131 max_txd_pwr = fls(max_per_txd) - 1; 3132 3133 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 3134 if (skb->data_len && hdr_len == len) { 3135 switch (hw->mac_type) { 3136 unsigned int pull_size; 3137 case e1000_82544: 3138 /* Make sure we have room to chop off 4 bytes, 3139 * and that the end alignment will work out to 3140 * this hardware's requirements 3141 * NOTE: this is a TSO only workaround 3142 * if end byte alignment not correct move us 3143 * into the next dword */ 3144 if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4) 3145 break; 3146 /* fall through */ 3147 pull_size = min((unsigned int)4, skb->data_len); 3148 if (!__pskb_pull_tail(skb, pull_size)) { 3149 e_err(drv, "__pskb_pull_tail " 3150 "failed.\n"); 3151 dev_kfree_skb_any(skb); 3152 return NETDEV_TX_OK; 3153 } 3154 len = skb_headlen(skb); 3155 break; 3156 default: 3157 /* do nothing */ 3158 break; 3159 } 3160 } 3161 } 3162 3163 /* reserve a descriptor for the offload context */ 3164 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) 3165 count++; 3166 count++; 3167 3168 /* Controller Erratum workaround */ 3169 if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb)) 3170 count++; 3171 3172 count += TXD_USE_COUNT(len, max_txd_pwr); 3173 3174 if (adapter->pcix_82544) 3175 count++; 3176 3177 /* work-around for errata 10 and it applies to all controllers 3178 * in PCI-X mode, so add one more descriptor to the count 3179 */ 3180 if (unlikely((hw->bus_type == e1000_bus_type_pcix) && 3181 (len > 2015))) 3182 count++; 3183 3184 nr_frags = skb_shinfo(skb)->nr_frags; 3185 for (f = 0; f < nr_frags; f++) 3186 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]), 3187 max_txd_pwr); 3188 if (adapter->pcix_82544) 3189 count += nr_frags; 3190 3191 /* need: count + 2 desc gap to keep tail from touching 3192 * head, otherwise try next time */ 3193 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) 3194 return NETDEV_TX_BUSY; 3195 3196 if (unlikely((hw->mac_type == e1000_82547) && 3197 (e1000_82547_fifo_workaround(adapter, skb)))) { 3198 netif_stop_queue(netdev); 3199 if (!test_bit(__E1000_DOWN, &adapter->flags)) 3200 schedule_delayed_work(&adapter->fifo_stall_task, 1); 3201 return NETDEV_TX_BUSY; 3202 } 3203 3204 if (vlan_tx_tag_present(skb)) { 3205 tx_flags |= E1000_TX_FLAGS_VLAN; 3206 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); 3207 } 3208 3209 first = tx_ring->next_to_use; 3210 3211 tso = e1000_tso(adapter, tx_ring, skb); 3212 if (tso < 0) { 3213 dev_kfree_skb_any(skb); 3214 return NETDEV_TX_OK; 3215 } 3216 3217 if (likely(tso)) { 3218 if (likely(hw->mac_type != e1000_82544)) 3219 tx_ring->last_tx_tso = 1; 3220 tx_flags |= E1000_TX_FLAGS_TSO; 3221 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb))) 3222 tx_flags |= E1000_TX_FLAGS_CSUM; 3223 3224 if (likely(skb->protocol == htons(ETH_P_IP))) 3225 tx_flags |= E1000_TX_FLAGS_IPV4; 3226 3227 count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd, 3228 nr_frags, mss); 3229 3230 if (count) { 3231 e1000_tx_queue(adapter, tx_ring, tx_flags, count); 3232 /* Make sure there is space in the ring for the next send. */ 3233 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2); 3234 3235 } else { 3236 dev_kfree_skb_any(skb); 3237 tx_ring->buffer_info[first].time_stamp = 0; 3238 tx_ring->next_to_use = first; 3239 } 3240 3241 return NETDEV_TX_OK; 3242 } 3243 3244 /** 3245 * e1000_tx_timeout - Respond to a Tx Hang 3246 * @netdev: network interface device structure 3247 **/ 3248 3249 static void e1000_tx_timeout(struct net_device *netdev) 3250 { 3251 struct e1000_adapter *adapter = netdev_priv(netdev); 3252 3253 /* Do the reset outside of interrupt context */ 3254 adapter->tx_timeout_count++; 3255 schedule_work(&adapter->reset_task); 3256 } 3257 3258 static void e1000_reset_task(struct work_struct *work) 3259 { 3260 struct e1000_adapter *adapter = 3261 container_of(work, struct e1000_adapter, reset_task); 3262 3263 if (test_bit(__E1000_DOWN, &adapter->flags)) 3264 return; 3265 e1000_reinit_safe(adapter); 3266 } 3267 3268 /** 3269 * e1000_get_stats - Get System Network Statistics 3270 * @netdev: network interface device structure 3271 * 3272 * Returns the address of the device statistics structure. 3273 * The statistics are actually updated from the watchdog. 3274 **/ 3275 3276 static struct net_device_stats *e1000_get_stats(struct net_device *netdev) 3277 { 3278 /* only return the current stats */ 3279 return &netdev->stats; 3280 } 3281 3282 /** 3283 * e1000_change_mtu - Change the Maximum Transfer Unit 3284 * @netdev: network interface device structure 3285 * @new_mtu: new value for maximum frame size 3286 * 3287 * Returns 0 on success, negative on failure 3288 **/ 3289 3290 static int e1000_change_mtu(struct net_device *netdev, int new_mtu) 3291 { 3292 struct e1000_adapter *adapter = netdev_priv(netdev); 3293 struct e1000_hw *hw = &adapter->hw; 3294 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 3295 3296 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || 3297 (max_frame > MAX_JUMBO_FRAME_SIZE)) { 3298 e_err(probe, "Invalid MTU setting\n"); 3299 return -EINVAL; 3300 } 3301 3302 /* Adapter-specific max frame size limits. */ 3303 switch (hw->mac_type) { 3304 case e1000_undefined ... e1000_82542_rev2_1: 3305 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) { 3306 e_err(probe, "Jumbo Frames not supported.\n"); 3307 return -EINVAL; 3308 } 3309 break; 3310 default: 3311 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */ 3312 break; 3313 } 3314 3315 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) 3316 msleep(1); 3317 /* e1000_down has a dependency on max_frame_size */ 3318 hw->max_frame_size = max_frame; 3319 if (netif_running(netdev)) 3320 e1000_down(adapter); 3321 3322 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN 3323 * means we reserve 2 more, this pushes us to allocate from the next 3324 * larger slab size. 3325 * i.e. RXBUFFER_2048 --> size-4096 slab 3326 * however with the new *_jumbo_rx* routines, jumbo receives will use 3327 * fragmented skbs */ 3328 3329 if (max_frame <= E1000_RXBUFFER_2048) 3330 adapter->rx_buffer_len = E1000_RXBUFFER_2048; 3331 else 3332 #if (PAGE_SIZE >= E1000_RXBUFFER_16384) 3333 adapter->rx_buffer_len = E1000_RXBUFFER_16384; 3334 #elif (PAGE_SIZE >= E1000_RXBUFFER_4096) 3335 adapter->rx_buffer_len = PAGE_SIZE; 3336 #endif 3337 3338 /* adjust allocation if LPE protects us, and we aren't using SBP */ 3339 if (!hw->tbi_compatibility_on && 3340 ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) || 3341 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) 3342 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 3343 3344 pr_info("%s changing MTU from %d to %d\n", 3345 netdev->name, netdev->mtu, new_mtu); 3346 netdev->mtu = new_mtu; 3347 3348 if (netif_running(netdev)) 3349 e1000_up(adapter); 3350 else 3351 e1000_reset(adapter); 3352 3353 clear_bit(__E1000_RESETTING, &adapter->flags); 3354 3355 return 0; 3356 } 3357 3358 /** 3359 * e1000_update_stats - Update the board statistics counters 3360 * @adapter: board private structure 3361 **/ 3362 3363 void e1000_update_stats(struct e1000_adapter *adapter) 3364 { 3365 struct net_device *netdev = adapter->netdev; 3366 struct e1000_hw *hw = &adapter->hw; 3367 struct pci_dev *pdev = adapter->pdev; 3368 unsigned long flags; 3369 u16 phy_tmp; 3370 3371 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF 3372 3373 /* 3374 * Prevent stats update while adapter is being reset, or if the pci 3375 * connection is down. 3376 */ 3377 if (adapter->link_speed == 0) 3378 return; 3379 if (pci_channel_offline(pdev)) 3380 return; 3381 3382 spin_lock_irqsave(&adapter->stats_lock, flags); 3383 3384 /* these counters are modified from e1000_tbi_adjust_stats, 3385 * called from the interrupt context, so they must only 3386 * be written while holding adapter->stats_lock 3387 */ 3388 3389 adapter->stats.crcerrs += er32(CRCERRS); 3390 adapter->stats.gprc += er32(GPRC); 3391 adapter->stats.gorcl += er32(GORCL); 3392 adapter->stats.gorch += er32(GORCH); 3393 adapter->stats.bprc += er32(BPRC); 3394 adapter->stats.mprc += er32(MPRC); 3395 adapter->stats.roc += er32(ROC); 3396 3397 adapter->stats.prc64 += er32(PRC64); 3398 adapter->stats.prc127 += er32(PRC127); 3399 adapter->stats.prc255 += er32(PRC255); 3400 adapter->stats.prc511 += er32(PRC511); 3401 adapter->stats.prc1023 += er32(PRC1023); 3402 adapter->stats.prc1522 += er32(PRC1522); 3403 3404 adapter->stats.symerrs += er32(SYMERRS); 3405 adapter->stats.mpc += er32(MPC); 3406 adapter->stats.scc += er32(SCC); 3407 adapter->stats.ecol += er32(ECOL); 3408 adapter->stats.mcc += er32(MCC); 3409 adapter->stats.latecol += er32(LATECOL); 3410 adapter->stats.dc += er32(DC); 3411 adapter->stats.sec += er32(SEC); 3412 adapter->stats.rlec += er32(RLEC); 3413 adapter->stats.xonrxc += er32(XONRXC); 3414 adapter->stats.xontxc += er32(XONTXC); 3415 adapter->stats.xoffrxc += er32(XOFFRXC); 3416 adapter->stats.xofftxc += er32(XOFFTXC); 3417 adapter->stats.fcruc += er32(FCRUC); 3418 adapter->stats.gptc += er32(GPTC); 3419 adapter->stats.gotcl += er32(GOTCL); 3420 adapter->stats.gotch += er32(GOTCH); 3421 adapter->stats.rnbc += er32(RNBC); 3422 adapter->stats.ruc += er32(RUC); 3423 adapter->stats.rfc += er32(RFC); 3424 adapter->stats.rjc += er32(RJC); 3425 adapter->stats.torl += er32(TORL); 3426 adapter->stats.torh += er32(TORH); 3427 adapter->stats.totl += er32(TOTL); 3428 adapter->stats.toth += er32(TOTH); 3429 adapter->stats.tpr += er32(TPR); 3430 3431 adapter->stats.ptc64 += er32(PTC64); 3432 adapter->stats.ptc127 += er32(PTC127); 3433 adapter->stats.ptc255 += er32(PTC255); 3434 adapter->stats.ptc511 += er32(PTC511); 3435 adapter->stats.ptc1023 += er32(PTC1023); 3436 adapter->stats.ptc1522 += er32(PTC1522); 3437 3438 adapter->stats.mptc += er32(MPTC); 3439 adapter->stats.bptc += er32(BPTC); 3440 3441 /* used for adaptive IFS */ 3442 3443 hw->tx_packet_delta = er32(TPT); 3444 adapter->stats.tpt += hw->tx_packet_delta; 3445 hw->collision_delta = er32(COLC); 3446 adapter->stats.colc += hw->collision_delta; 3447 3448 if (hw->mac_type >= e1000_82543) { 3449 adapter->stats.algnerrc += er32(ALGNERRC); 3450 adapter->stats.rxerrc += er32(RXERRC); 3451 adapter->stats.tncrs += er32(TNCRS); 3452 adapter->stats.cexterr += er32(CEXTERR); 3453 adapter->stats.tsctc += er32(TSCTC); 3454 adapter->stats.tsctfc += er32(TSCTFC); 3455 } 3456 3457 /* Fill out the OS statistics structure */ 3458 netdev->stats.multicast = adapter->stats.mprc; 3459 netdev->stats.collisions = adapter->stats.colc; 3460 3461 /* Rx Errors */ 3462 3463 /* RLEC on some newer hardware can be incorrect so build 3464 * our own version based on RUC and ROC */ 3465 netdev->stats.rx_errors = adapter->stats.rxerrc + 3466 adapter->stats.crcerrs + adapter->stats.algnerrc + 3467 adapter->stats.ruc + adapter->stats.roc + 3468 adapter->stats.cexterr; 3469 adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc; 3470 netdev->stats.rx_length_errors = adapter->stats.rlerrc; 3471 netdev->stats.rx_crc_errors = adapter->stats.crcerrs; 3472 netdev->stats.rx_frame_errors = adapter->stats.algnerrc; 3473 netdev->stats.rx_missed_errors = adapter->stats.mpc; 3474 3475 /* Tx Errors */ 3476 adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol; 3477 netdev->stats.tx_errors = adapter->stats.txerrc; 3478 netdev->stats.tx_aborted_errors = adapter->stats.ecol; 3479 netdev->stats.tx_window_errors = adapter->stats.latecol; 3480 netdev->stats.tx_carrier_errors = adapter->stats.tncrs; 3481 if (hw->bad_tx_carr_stats_fd && 3482 adapter->link_duplex == FULL_DUPLEX) { 3483 netdev->stats.tx_carrier_errors = 0; 3484 adapter->stats.tncrs = 0; 3485 } 3486 3487 /* Tx Dropped needs to be maintained elsewhere */ 3488 3489 /* Phy Stats */ 3490 if (hw->media_type == e1000_media_type_copper) { 3491 if ((adapter->link_speed == SPEED_1000) && 3492 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { 3493 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; 3494 adapter->phy_stats.idle_errors += phy_tmp; 3495 } 3496 3497 if ((hw->mac_type <= e1000_82546) && 3498 (hw->phy_type == e1000_phy_m88) && 3499 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp)) 3500 adapter->phy_stats.receive_errors += phy_tmp; 3501 } 3502 3503 /* Management Stats */ 3504 if (hw->has_smbus) { 3505 adapter->stats.mgptc += er32(MGTPTC); 3506 adapter->stats.mgprc += er32(MGTPRC); 3507 adapter->stats.mgpdc += er32(MGTPDC); 3508 } 3509 3510 spin_unlock_irqrestore(&adapter->stats_lock, flags); 3511 } 3512 3513 /** 3514 * e1000_intr - Interrupt Handler 3515 * @irq: interrupt number 3516 * @data: pointer to a network interface device structure 3517 **/ 3518 3519 static irqreturn_t e1000_intr(int irq, void *data) 3520 { 3521 struct net_device *netdev = data; 3522 struct e1000_adapter *adapter = netdev_priv(netdev); 3523 struct e1000_hw *hw = &adapter->hw; 3524 u32 icr = er32(ICR); 3525 3526 if (unlikely((!icr))) 3527 return IRQ_NONE; /* Not our interrupt */ 3528 3529 /* 3530 * we might have caused the interrupt, but the above 3531 * read cleared it, and just in case the driver is 3532 * down there is nothing to do so return handled 3533 */ 3534 if (unlikely(test_bit(__E1000_DOWN, &adapter->flags))) 3535 return IRQ_HANDLED; 3536 3537 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { 3538 hw->get_link_status = 1; 3539 /* guard against interrupt when we're going down */ 3540 if (!test_bit(__E1000_DOWN, &adapter->flags)) 3541 schedule_delayed_work(&adapter->watchdog_task, 1); 3542 } 3543 3544 /* disable interrupts, without the synchronize_irq bit */ 3545 ew32(IMC, ~0); 3546 E1000_WRITE_FLUSH(); 3547 3548 if (likely(napi_schedule_prep(&adapter->napi))) { 3549 adapter->total_tx_bytes = 0; 3550 adapter->total_tx_packets = 0; 3551 adapter->total_rx_bytes = 0; 3552 adapter->total_rx_packets = 0; 3553 __napi_schedule(&adapter->napi); 3554 } else { 3555 /* this really should not happen! if it does it is basically a 3556 * bug, but not a hard error, so enable ints and continue */ 3557 if (!test_bit(__E1000_DOWN, &adapter->flags)) 3558 e1000_irq_enable(adapter); 3559 } 3560 3561 return IRQ_HANDLED; 3562 } 3563 3564 /** 3565 * e1000_clean - NAPI Rx polling callback 3566 * @adapter: board private structure 3567 **/ 3568 static int e1000_clean(struct napi_struct *napi, int budget) 3569 { 3570 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); 3571 int tx_clean_complete = 0, work_done = 0; 3572 3573 tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]); 3574 3575 adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget); 3576 3577 if (!tx_clean_complete) 3578 work_done = budget; 3579 3580 /* If budget not fully consumed, exit the polling mode */ 3581 if (work_done < budget) { 3582 if (likely(adapter->itr_setting & 3)) 3583 e1000_set_itr(adapter); 3584 napi_complete(napi); 3585 if (!test_bit(__E1000_DOWN, &adapter->flags)) 3586 e1000_irq_enable(adapter); 3587 } 3588 3589 return work_done; 3590 } 3591 3592 /** 3593 * e1000_clean_tx_irq - Reclaim resources after transmit completes 3594 * @adapter: board private structure 3595 **/ 3596 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, 3597 struct e1000_tx_ring *tx_ring) 3598 { 3599 struct e1000_hw *hw = &adapter->hw; 3600 struct net_device *netdev = adapter->netdev; 3601 struct e1000_tx_desc *tx_desc, *eop_desc; 3602 struct e1000_buffer *buffer_info; 3603 unsigned int i, eop; 3604 unsigned int count = 0; 3605 unsigned int total_tx_bytes=0, total_tx_packets=0; 3606 3607 i = tx_ring->next_to_clean; 3608 eop = tx_ring->buffer_info[i].next_to_watch; 3609 eop_desc = E1000_TX_DESC(*tx_ring, eop); 3610 3611 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && 3612 (count < tx_ring->count)) { 3613 bool cleaned = false; 3614 rmb(); /* read buffer_info after eop_desc */ 3615 for ( ; !cleaned; count++) { 3616 tx_desc = E1000_TX_DESC(*tx_ring, i); 3617 buffer_info = &tx_ring->buffer_info[i]; 3618 cleaned = (i == eop); 3619 3620 if (cleaned) { 3621 total_tx_packets += buffer_info->segs; 3622 total_tx_bytes += buffer_info->bytecount; 3623 } 3624 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 3625 tx_desc->upper.data = 0; 3626 3627 if (unlikely(++i == tx_ring->count)) i = 0; 3628 } 3629 3630 eop = tx_ring->buffer_info[i].next_to_watch; 3631 eop_desc = E1000_TX_DESC(*tx_ring, eop); 3632 } 3633 3634 tx_ring->next_to_clean = i; 3635 3636 #define TX_WAKE_THRESHOLD 32 3637 if (unlikely(count && netif_carrier_ok(netdev) && 3638 E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) { 3639 /* Make sure that anybody stopping the queue after this 3640 * sees the new next_to_clean. 3641 */ 3642 smp_mb(); 3643 3644 if (netif_queue_stopped(netdev) && 3645 !(test_bit(__E1000_DOWN, &adapter->flags))) { 3646 netif_wake_queue(netdev); 3647 ++adapter->restart_queue; 3648 } 3649 } 3650 3651 if (adapter->detect_tx_hung) { 3652 /* Detect a transmit hang in hardware, this serializes the 3653 * check with the clearing of time_stamp and movement of i */ 3654 adapter->detect_tx_hung = false; 3655 if (tx_ring->buffer_info[eop].time_stamp && 3656 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + 3657 (adapter->tx_timeout_factor * HZ)) && 3658 !(er32(STATUS) & E1000_STATUS_TXOFF)) { 3659 3660 /* detected Tx unit hang */ 3661 e_err(drv, "Detected Tx Unit Hang\n" 3662 " Tx Queue <%lu>\n" 3663 " TDH <%x>\n" 3664 " TDT <%x>\n" 3665 " next_to_use <%x>\n" 3666 " next_to_clean <%x>\n" 3667 "buffer_info[next_to_clean]\n" 3668 " time_stamp <%lx>\n" 3669 " next_to_watch <%x>\n" 3670 " jiffies <%lx>\n" 3671 " next_to_watch.status <%x>\n", 3672 (unsigned long)((tx_ring - adapter->tx_ring) / 3673 sizeof(struct e1000_tx_ring)), 3674 readl(hw->hw_addr + tx_ring->tdh), 3675 readl(hw->hw_addr + tx_ring->tdt), 3676 tx_ring->next_to_use, 3677 tx_ring->next_to_clean, 3678 tx_ring->buffer_info[eop].time_stamp, 3679 eop, 3680 jiffies, 3681 eop_desc->upper.fields.status); 3682 netif_stop_queue(netdev); 3683 } 3684 } 3685 adapter->total_tx_bytes += total_tx_bytes; 3686 adapter->total_tx_packets += total_tx_packets; 3687 netdev->stats.tx_bytes += total_tx_bytes; 3688 netdev->stats.tx_packets += total_tx_packets; 3689 return count < tx_ring->count; 3690 } 3691 3692 /** 3693 * e1000_rx_checksum - Receive Checksum Offload for 82543 3694 * @adapter: board private structure 3695 * @status_err: receive descriptor status and error fields 3696 * @csum: receive descriptor csum field 3697 * @sk_buff: socket buffer with received data 3698 **/ 3699 3700 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, 3701 u32 csum, struct sk_buff *skb) 3702 { 3703 struct e1000_hw *hw = &adapter->hw; 3704 u16 status = (u16)status_err; 3705 u8 errors = (u8)(status_err >> 24); 3706 3707 skb_checksum_none_assert(skb); 3708 3709 /* 82543 or newer only */ 3710 if (unlikely(hw->mac_type < e1000_82543)) return; 3711 /* Ignore Checksum bit is set */ 3712 if (unlikely(status & E1000_RXD_STAT_IXSM)) return; 3713 /* TCP/UDP checksum error bit is set */ 3714 if (unlikely(errors & E1000_RXD_ERR_TCPE)) { 3715 /* let the stack verify checksum errors */ 3716 adapter->hw_csum_err++; 3717 return; 3718 } 3719 /* TCP/UDP Checksum has not been calculated */ 3720 if (!(status & E1000_RXD_STAT_TCPCS)) 3721 return; 3722 3723 /* It must be a TCP or UDP packet with a valid checksum */ 3724 if (likely(status & E1000_RXD_STAT_TCPCS)) { 3725 /* TCP checksum is good */ 3726 skb->ip_summed = CHECKSUM_UNNECESSARY; 3727 } 3728 adapter->hw_csum_good++; 3729 } 3730 3731 /** 3732 * e1000_consume_page - helper function 3733 **/ 3734 static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, 3735 u16 length) 3736 { 3737 bi->page = NULL; 3738 skb->len += length; 3739 skb->data_len += length; 3740 skb->truesize += PAGE_SIZE; 3741 } 3742 3743 /** 3744 * e1000_receive_skb - helper function to handle rx indications 3745 * @adapter: board private structure 3746 * @status: descriptor status field as written by hardware 3747 * @vlan: descriptor vlan field as written by hardware (no le/be conversion) 3748 * @skb: pointer to sk_buff to be indicated to stack 3749 */ 3750 static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status, 3751 __le16 vlan, struct sk_buff *skb) 3752 { 3753 skb->protocol = eth_type_trans(skb, adapter->netdev); 3754 3755 if (status & E1000_RXD_STAT_VP) { 3756 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; 3757 3758 __vlan_hwaccel_put_tag(skb, vid); 3759 } 3760 napi_gro_receive(&adapter->napi, skb); 3761 } 3762 3763 /** 3764 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy 3765 * @adapter: board private structure 3766 * @rx_ring: ring to clean 3767 * @work_done: amount of napi work completed this call 3768 * @work_to_do: max amount of work allowed for this call to do 3769 * 3770 * the return value indicates whether actual cleaning was done, there 3771 * is no guarantee that everything was cleaned 3772 */ 3773 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, 3774 struct e1000_rx_ring *rx_ring, 3775 int *work_done, int work_to_do) 3776 { 3777 struct e1000_hw *hw = &adapter->hw; 3778 struct net_device *netdev = adapter->netdev; 3779 struct pci_dev *pdev = adapter->pdev; 3780 struct e1000_rx_desc *rx_desc, *next_rxd; 3781 struct e1000_buffer *buffer_info, *next_buffer; 3782 unsigned long irq_flags; 3783 u32 length; 3784 unsigned int i; 3785 int cleaned_count = 0; 3786 bool cleaned = false; 3787 unsigned int total_rx_bytes=0, total_rx_packets=0; 3788 3789 i = rx_ring->next_to_clean; 3790 rx_desc = E1000_RX_DESC(*rx_ring, i); 3791 buffer_info = &rx_ring->buffer_info[i]; 3792 3793 while (rx_desc->status & E1000_RXD_STAT_DD) { 3794 struct sk_buff *skb; 3795 u8 status; 3796 3797 if (*work_done >= work_to_do) 3798 break; 3799 (*work_done)++; 3800 rmb(); /* read descriptor and rx_buffer_info after status DD */ 3801 3802 status = rx_desc->status; 3803 skb = buffer_info->skb; 3804 buffer_info->skb = NULL; 3805 3806 if (++i == rx_ring->count) i = 0; 3807 next_rxd = E1000_RX_DESC(*rx_ring, i); 3808 prefetch(next_rxd); 3809 3810 next_buffer = &rx_ring->buffer_info[i]; 3811 3812 cleaned = true; 3813 cleaned_count++; 3814 dma_unmap_page(&pdev->dev, buffer_info->dma, 3815 buffer_info->length, DMA_FROM_DEVICE); 3816 buffer_info->dma = 0; 3817 3818 length = le16_to_cpu(rx_desc->length); 3819 3820 /* errors is only valid for DD + EOP descriptors */ 3821 if (unlikely((status & E1000_RXD_STAT_EOP) && 3822 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) { 3823 u8 last_byte = *(skb->data + length - 1); 3824 if (TBI_ACCEPT(hw, status, rx_desc->errors, length, 3825 last_byte)) { 3826 spin_lock_irqsave(&adapter->stats_lock, 3827 irq_flags); 3828 e1000_tbi_adjust_stats(hw, &adapter->stats, 3829 length, skb->data); 3830 spin_unlock_irqrestore(&adapter->stats_lock, 3831 irq_flags); 3832 length--; 3833 } else { 3834 /* recycle both page and skb */ 3835 buffer_info->skb = skb; 3836 /* an error means any chain goes out the window 3837 * too */ 3838 if (rx_ring->rx_skb_top) 3839 dev_kfree_skb(rx_ring->rx_skb_top); 3840 rx_ring->rx_skb_top = NULL; 3841 goto next_desc; 3842 } 3843 } 3844 3845 #define rxtop rx_ring->rx_skb_top 3846 if (!(status & E1000_RXD_STAT_EOP)) { 3847 /* this descriptor is only the beginning (or middle) */ 3848 if (!rxtop) { 3849 /* this is the beginning of a chain */ 3850 rxtop = skb; 3851 skb_fill_page_desc(rxtop, 0, buffer_info->page, 3852 0, length); 3853 } else { 3854 /* this is the middle of a chain */ 3855 skb_fill_page_desc(rxtop, 3856 skb_shinfo(rxtop)->nr_frags, 3857 buffer_info->page, 0, length); 3858 /* re-use the skb, only consumed the page */ 3859 buffer_info->skb = skb; 3860 } 3861 e1000_consume_page(buffer_info, rxtop, length); 3862 goto next_desc; 3863 } else { 3864 if (rxtop) { 3865 /* end of the chain */ 3866 skb_fill_page_desc(rxtop, 3867 skb_shinfo(rxtop)->nr_frags, 3868 buffer_info->page, 0, length); 3869 /* re-use the current skb, we only consumed the 3870 * page */ 3871 buffer_info->skb = skb; 3872 skb = rxtop; 3873 rxtop = NULL; 3874 e1000_consume_page(buffer_info, skb, length); 3875 } else { 3876 /* no chain, got EOP, this buf is the packet 3877 * copybreak to save the put_page/alloc_page */ 3878 if (length <= copybreak && 3879 skb_tailroom(skb) >= length) { 3880 u8 *vaddr; 3881 vaddr = kmap_atomic(buffer_info->page, 3882 KM_SKB_DATA_SOFTIRQ); 3883 memcpy(skb_tail_pointer(skb), vaddr, length); 3884 kunmap_atomic(vaddr, 3885 KM_SKB_DATA_SOFTIRQ); 3886 /* re-use the page, so don't erase 3887 * buffer_info->page */ 3888 skb_put(skb, length); 3889 } else { 3890 skb_fill_page_desc(skb, 0, 3891 buffer_info->page, 0, 3892 length); 3893 e1000_consume_page(buffer_info, skb, 3894 length); 3895 } 3896 } 3897 } 3898 3899 /* Receive Checksum Offload XXX recompute due to CRC strip? */ 3900 e1000_rx_checksum(adapter, 3901 (u32)(status) | 3902 ((u32)(rx_desc->errors) << 24), 3903 le16_to_cpu(rx_desc->csum), skb); 3904 3905 pskb_trim(skb, skb->len - 4); 3906 3907 /* probably a little skewed due to removing CRC */ 3908 total_rx_bytes += skb->len; 3909 total_rx_packets++; 3910 3911 /* eth type trans needs skb->data to point to something */ 3912 if (!pskb_may_pull(skb, ETH_HLEN)) { 3913 e_err(drv, "pskb_may_pull failed.\n"); 3914 dev_kfree_skb(skb); 3915 goto next_desc; 3916 } 3917 3918 e1000_receive_skb(adapter, status, rx_desc->special, skb); 3919 3920 next_desc: 3921 rx_desc->status = 0; 3922 3923 /* return some buffers to hardware, one at a time is too slow */ 3924 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { 3925 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); 3926 cleaned_count = 0; 3927 } 3928 3929 /* use prefetched values */ 3930 rx_desc = next_rxd; 3931 buffer_info = next_buffer; 3932 } 3933 rx_ring->next_to_clean = i; 3934 3935 cleaned_count = E1000_DESC_UNUSED(rx_ring); 3936 if (cleaned_count) 3937 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); 3938 3939 adapter->total_rx_packets += total_rx_packets; 3940 adapter->total_rx_bytes += total_rx_bytes; 3941 netdev->stats.rx_bytes += total_rx_bytes; 3942 netdev->stats.rx_packets += total_rx_packets; 3943 return cleaned; 3944 } 3945 3946 /* 3947 * this should improve performance for small packets with large amounts 3948 * of reassembly being done in the stack 3949 */ 3950 static void e1000_check_copybreak(struct net_device *netdev, 3951 struct e1000_buffer *buffer_info, 3952 u32 length, struct sk_buff **skb) 3953 { 3954 struct sk_buff *new_skb; 3955 3956 if (length > copybreak) 3957 return; 3958 3959 new_skb = netdev_alloc_skb_ip_align(netdev, length); 3960 if (!new_skb) 3961 return; 3962 3963 skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN, 3964 (*skb)->data - NET_IP_ALIGN, 3965 length + NET_IP_ALIGN); 3966 /* save the skb in buffer_info as good */ 3967 buffer_info->skb = *skb; 3968 *skb = new_skb; 3969 } 3970 3971 /** 3972 * e1000_clean_rx_irq - Send received data up the network stack; legacy 3973 * @adapter: board private structure 3974 * @rx_ring: ring to clean 3975 * @work_done: amount of napi work completed this call 3976 * @work_to_do: max amount of work allowed for this call to do 3977 */ 3978 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, 3979 struct e1000_rx_ring *rx_ring, 3980 int *work_done, int work_to_do) 3981 { 3982 struct e1000_hw *hw = &adapter->hw; 3983 struct net_device *netdev = adapter->netdev; 3984 struct pci_dev *pdev = adapter->pdev; 3985 struct e1000_rx_desc *rx_desc, *next_rxd; 3986 struct e1000_buffer *buffer_info, *next_buffer; 3987 unsigned long flags; 3988 u32 length; 3989 unsigned int i; 3990 int cleaned_count = 0; 3991 bool cleaned = false; 3992 unsigned int total_rx_bytes=0, total_rx_packets=0; 3993 3994 i = rx_ring->next_to_clean; 3995 rx_desc = E1000_RX_DESC(*rx_ring, i); 3996 buffer_info = &rx_ring->buffer_info[i]; 3997 3998 while (rx_desc->status & E1000_RXD_STAT_DD) { 3999 struct sk_buff *skb; 4000 u8 status; 4001 4002 if (*work_done >= work_to_do) 4003 break; 4004 (*work_done)++; 4005 rmb(); /* read descriptor and rx_buffer_info after status DD */ 4006 4007 status = rx_desc->status; 4008 skb = buffer_info->skb; 4009 buffer_info->skb = NULL; 4010 4011 prefetch(skb->data - NET_IP_ALIGN); 4012 4013 if (++i == rx_ring->count) i = 0; 4014 next_rxd = E1000_RX_DESC(*rx_ring, i); 4015 prefetch(next_rxd); 4016 4017 next_buffer = &rx_ring->buffer_info[i]; 4018 4019 cleaned = true; 4020 cleaned_count++; 4021 dma_unmap_single(&pdev->dev, buffer_info->dma, 4022 buffer_info->length, DMA_FROM_DEVICE); 4023 buffer_info->dma = 0; 4024 4025 length = le16_to_cpu(rx_desc->length); 4026 /* !EOP means multiple descriptors were used to store a single 4027 * packet, if thats the case we need to toss it. In fact, we 4028 * to toss every packet with the EOP bit clear and the next 4029 * frame that _does_ have the EOP bit set, as it is by 4030 * definition only a frame fragment 4031 */ 4032 if (unlikely(!(status & E1000_RXD_STAT_EOP))) 4033 adapter->discarding = true; 4034 4035 if (adapter->discarding) { 4036 /* All receives must fit into a single buffer */ 4037 e_dbg("Receive packet consumed multiple buffers\n"); 4038 /* recycle */ 4039 buffer_info->skb = skb; 4040 if (status & E1000_RXD_STAT_EOP) 4041 adapter->discarding = false; 4042 goto next_desc; 4043 } 4044 4045 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { 4046 u8 last_byte = *(skb->data + length - 1); 4047 if (TBI_ACCEPT(hw, status, rx_desc->errors, length, 4048 last_byte)) { 4049 spin_lock_irqsave(&adapter->stats_lock, flags); 4050 e1000_tbi_adjust_stats(hw, &adapter->stats, 4051 length, skb->data); 4052 spin_unlock_irqrestore(&adapter->stats_lock, 4053 flags); 4054 length--; 4055 } else { 4056 /* recycle */ 4057 buffer_info->skb = skb; 4058 goto next_desc; 4059 } 4060 } 4061 4062 /* adjust length to remove Ethernet CRC, this must be 4063 * done after the TBI_ACCEPT workaround above */ 4064 length -= 4; 4065 4066 /* probably a little skewed due to removing CRC */ 4067 total_rx_bytes += length; 4068 total_rx_packets++; 4069 4070 e1000_check_copybreak(netdev, buffer_info, length, &skb); 4071 4072 skb_put(skb, length); 4073 4074 /* Receive Checksum Offload */ 4075 e1000_rx_checksum(adapter, 4076 (u32)(status) | 4077 ((u32)(rx_desc->errors) << 24), 4078 le16_to_cpu(rx_desc->csum), skb); 4079 4080 e1000_receive_skb(adapter, status, rx_desc->special, skb); 4081 4082 next_desc: 4083 rx_desc->status = 0; 4084 4085 /* return some buffers to hardware, one at a time is too slow */ 4086 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { 4087 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); 4088 cleaned_count = 0; 4089 } 4090 4091 /* use prefetched values */ 4092 rx_desc = next_rxd; 4093 buffer_info = next_buffer; 4094 } 4095 rx_ring->next_to_clean = i; 4096 4097 cleaned_count = E1000_DESC_UNUSED(rx_ring); 4098 if (cleaned_count) 4099 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); 4100 4101 adapter->total_rx_packets += total_rx_packets; 4102 adapter->total_rx_bytes += total_rx_bytes; 4103 netdev->stats.rx_bytes += total_rx_bytes; 4104 netdev->stats.rx_packets += total_rx_packets; 4105 return cleaned; 4106 } 4107 4108 /** 4109 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers 4110 * @adapter: address of board private structure 4111 * @rx_ring: pointer to receive ring structure 4112 * @cleaned_count: number of buffers to allocate this pass 4113 **/ 4114 4115 static void 4116 e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, 4117 struct e1000_rx_ring *rx_ring, int cleaned_count) 4118 { 4119 struct net_device *netdev = adapter->netdev; 4120 struct pci_dev *pdev = adapter->pdev; 4121 struct e1000_rx_desc *rx_desc; 4122 struct e1000_buffer *buffer_info; 4123 struct sk_buff *skb; 4124 unsigned int i; 4125 unsigned int bufsz = 256 - 16 /*for skb_reserve */ ; 4126 4127 i = rx_ring->next_to_use; 4128 buffer_info = &rx_ring->buffer_info[i]; 4129 4130 while (cleaned_count--) { 4131 skb = buffer_info->skb; 4132 if (skb) { 4133 skb_trim(skb, 0); 4134 goto check_page; 4135 } 4136 4137 skb = netdev_alloc_skb_ip_align(netdev, bufsz); 4138 if (unlikely(!skb)) { 4139 /* Better luck next round */ 4140 adapter->alloc_rx_buff_failed++; 4141 break; 4142 } 4143 4144 /* Fix for errata 23, can't cross 64kB boundary */ 4145 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { 4146 struct sk_buff *oldskb = skb; 4147 e_err(rx_err, "skb align check failed: %u bytes at " 4148 "%p\n", bufsz, skb->data); 4149 /* Try again, without freeing the previous */ 4150 skb = netdev_alloc_skb_ip_align(netdev, bufsz); 4151 /* Failed allocation, critical failure */ 4152 if (!skb) { 4153 dev_kfree_skb(oldskb); 4154 adapter->alloc_rx_buff_failed++; 4155 break; 4156 } 4157 4158 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { 4159 /* give up */ 4160 dev_kfree_skb(skb); 4161 dev_kfree_skb(oldskb); 4162 break; /* while (cleaned_count--) */ 4163 } 4164 4165 /* Use new allocation */ 4166 dev_kfree_skb(oldskb); 4167 } 4168 buffer_info->skb = skb; 4169 buffer_info->length = adapter->rx_buffer_len; 4170 check_page: 4171 /* allocate a new page if necessary */ 4172 if (!buffer_info->page) { 4173 buffer_info->page = alloc_page(GFP_ATOMIC); 4174 if (unlikely(!buffer_info->page)) { 4175 adapter->alloc_rx_buff_failed++; 4176 break; 4177 } 4178 } 4179 4180 if (!buffer_info->dma) { 4181 buffer_info->dma = dma_map_page(&pdev->dev, 4182 buffer_info->page, 0, 4183 buffer_info->length, 4184 DMA_FROM_DEVICE); 4185 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { 4186 put_page(buffer_info->page); 4187 dev_kfree_skb(skb); 4188 buffer_info->page = NULL; 4189 buffer_info->skb = NULL; 4190 buffer_info->dma = 0; 4191 adapter->alloc_rx_buff_failed++; 4192 break; /* while !buffer_info->skb */ 4193 } 4194 } 4195 4196 rx_desc = E1000_RX_DESC(*rx_ring, i); 4197 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 4198 4199 if (unlikely(++i == rx_ring->count)) 4200 i = 0; 4201 buffer_info = &rx_ring->buffer_info[i]; 4202 } 4203 4204 if (likely(rx_ring->next_to_use != i)) { 4205 rx_ring->next_to_use = i; 4206 if (unlikely(i-- == 0)) 4207 i = (rx_ring->count - 1); 4208 4209 /* Force memory writes to complete before letting h/w 4210 * know there are new descriptors to fetch. (Only 4211 * applicable for weak-ordered memory model archs, 4212 * such as IA-64). */ 4213 wmb(); 4214 writel(i, adapter->hw.hw_addr + rx_ring->rdt); 4215 } 4216 } 4217 4218 /** 4219 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended 4220 * @adapter: address of board private structure 4221 **/ 4222 4223 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, 4224 struct e1000_rx_ring *rx_ring, 4225 int cleaned_count) 4226 { 4227 struct e1000_hw *hw = &adapter->hw; 4228 struct net_device *netdev = adapter->netdev; 4229 struct pci_dev *pdev = adapter->pdev; 4230 struct e1000_rx_desc *rx_desc; 4231 struct e1000_buffer *buffer_info; 4232 struct sk_buff *skb; 4233 unsigned int i; 4234 unsigned int bufsz = adapter->rx_buffer_len; 4235 4236 i = rx_ring->next_to_use; 4237 buffer_info = &rx_ring->buffer_info[i]; 4238 4239 while (cleaned_count--) { 4240 skb = buffer_info->skb; 4241 if (skb) { 4242 skb_trim(skb, 0); 4243 goto map_skb; 4244 } 4245 4246 skb = netdev_alloc_skb_ip_align(netdev, bufsz); 4247 if (unlikely(!skb)) { 4248 /* Better luck next round */ 4249 adapter->alloc_rx_buff_failed++; 4250 break; 4251 } 4252 4253 /* Fix for errata 23, can't cross 64kB boundary */ 4254 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { 4255 struct sk_buff *oldskb = skb; 4256 e_err(rx_err, "skb align check failed: %u bytes at " 4257 "%p\n", bufsz, skb->data); 4258 /* Try again, without freeing the previous */ 4259 skb = netdev_alloc_skb_ip_align(netdev, bufsz); 4260 /* Failed allocation, critical failure */ 4261 if (!skb) { 4262 dev_kfree_skb(oldskb); 4263 adapter->alloc_rx_buff_failed++; 4264 break; 4265 } 4266 4267 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { 4268 /* give up */ 4269 dev_kfree_skb(skb); 4270 dev_kfree_skb(oldskb); 4271 adapter->alloc_rx_buff_failed++; 4272 break; /* while !buffer_info->skb */ 4273 } 4274 4275 /* Use new allocation */ 4276 dev_kfree_skb(oldskb); 4277 } 4278 buffer_info->skb = skb; 4279 buffer_info->length = adapter->rx_buffer_len; 4280 map_skb: 4281 buffer_info->dma = dma_map_single(&pdev->dev, 4282 skb->data, 4283 buffer_info->length, 4284 DMA_FROM_DEVICE); 4285 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { 4286 dev_kfree_skb(skb); 4287 buffer_info->skb = NULL; 4288 buffer_info->dma = 0; 4289 adapter->alloc_rx_buff_failed++; 4290 break; /* while !buffer_info->skb */ 4291 } 4292 4293 /* 4294 * XXX if it was allocated cleanly it will never map to a 4295 * boundary crossing 4296 */ 4297 4298 /* Fix for errata 23, can't cross 64kB boundary */ 4299 if (!e1000_check_64k_bound(adapter, 4300 (void *)(unsigned long)buffer_info->dma, 4301 adapter->rx_buffer_len)) { 4302 e_err(rx_err, "dma align check failed: %u bytes at " 4303 "%p\n", adapter->rx_buffer_len, 4304 (void *)(unsigned long)buffer_info->dma); 4305 dev_kfree_skb(skb); 4306 buffer_info->skb = NULL; 4307 4308 dma_unmap_single(&pdev->dev, buffer_info->dma, 4309 adapter->rx_buffer_len, 4310 DMA_FROM_DEVICE); 4311 buffer_info->dma = 0; 4312 4313 adapter->alloc_rx_buff_failed++; 4314 break; /* while !buffer_info->skb */ 4315 } 4316 rx_desc = E1000_RX_DESC(*rx_ring, i); 4317 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 4318 4319 if (unlikely(++i == rx_ring->count)) 4320 i = 0; 4321 buffer_info = &rx_ring->buffer_info[i]; 4322 } 4323 4324 if (likely(rx_ring->next_to_use != i)) { 4325 rx_ring->next_to_use = i; 4326 if (unlikely(i-- == 0)) 4327 i = (rx_ring->count - 1); 4328 4329 /* Force memory writes to complete before letting h/w 4330 * know there are new descriptors to fetch. (Only 4331 * applicable for weak-ordered memory model archs, 4332 * such as IA-64). */ 4333 wmb(); 4334 writel(i, hw->hw_addr + rx_ring->rdt); 4335 } 4336 } 4337 4338 /** 4339 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers. 4340 * @adapter: 4341 **/ 4342 4343 static void e1000_smartspeed(struct e1000_adapter *adapter) 4344 { 4345 struct e1000_hw *hw = &adapter->hw; 4346 u16 phy_status; 4347 u16 phy_ctrl; 4348 4349 if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg || 4350 !(hw->autoneg_advertised & ADVERTISE_1000_FULL)) 4351 return; 4352 4353 if (adapter->smartspeed == 0) { 4354 /* If Master/Slave config fault is asserted twice, 4355 * we assume back-to-back */ 4356 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); 4357 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; 4358 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); 4359 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; 4360 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); 4361 if (phy_ctrl & CR_1000T_MS_ENABLE) { 4362 phy_ctrl &= ~CR_1000T_MS_ENABLE; 4363 e1000_write_phy_reg(hw, PHY_1000T_CTRL, 4364 phy_ctrl); 4365 adapter->smartspeed++; 4366 if (!e1000_phy_setup_autoneg(hw) && 4367 !e1000_read_phy_reg(hw, PHY_CTRL, 4368 &phy_ctrl)) { 4369 phy_ctrl |= (MII_CR_AUTO_NEG_EN | 4370 MII_CR_RESTART_AUTO_NEG); 4371 e1000_write_phy_reg(hw, PHY_CTRL, 4372 phy_ctrl); 4373 } 4374 } 4375 return; 4376 } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { 4377 /* If still no link, perhaps using 2/3 pair cable */ 4378 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); 4379 phy_ctrl |= CR_1000T_MS_ENABLE; 4380 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl); 4381 if (!e1000_phy_setup_autoneg(hw) && 4382 !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) { 4383 phy_ctrl |= (MII_CR_AUTO_NEG_EN | 4384 MII_CR_RESTART_AUTO_NEG); 4385 e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl); 4386 } 4387 } 4388 /* Restart process after E1000_SMARTSPEED_MAX iterations */ 4389 if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX) 4390 adapter->smartspeed = 0; 4391 } 4392 4393 /** 4394 * e1000_ioctl - 4395 * @netdev: 4396 * @ifreq: 4397 * @cmd: 4398 **/ 4399 4400 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 4401 { 4402 switch (cmd) { 4403 case SIOCGMIIPHY: 4404 case SIOCGMIIREG: 4405 case SIOCSMIIREG: 4406 return e1000_mii_ioctl(netdev, ifr, cmd); 4407 default: 4408 return -EOPNOTSUPP; 4409 } 4410 } 4411 4412 /** 4413 * e1000_mii_ioctl - 4414 * @netdev: 4415 * @ifreq: 4416 * @cmd: 4417 **/ 4418 4419 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, 4420 int cmd) 4421 { 4422 struct e1000_adapter *adapter = netdev_priv(netdev); 4423 struct e1000_hw *hw = &adapter->hw; 4424 struct mii_ioctl_data *data = if_mii(ifr); 4425 int retval; 4426 u16 mii_reg; 4427 unsigned long flags; 4428 4429 if (hw->media_type != e1000_media_type_copper) 4430 return -EOPNOTSUPP; 4431 4432 switch (cmd) { 4433 case SIOCGMIIPHY: 4434 data->phy_id = hw->phy_addr; 4435 break; 4436 case SIOCGMIIREG: 4437 spin_lock_irqsave(&adapter->stats_lock, flags); 4438 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F, 4439 &data->val_out)) { 4440 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4441 return -EIO; 4442 } 4443 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4444 break; 4445 case SIOCSMIIREG: 4446 if (data->reg_num & ~(0x1F)) 4447 return -EFAULT; 4448 mii_reg = data->val_in; 4449 spin_lock_irqsave(&adapter->stats_lock, flags); 4450 if (e1000_write_phy_reg(hw, data->reg_num, 4451 mii_reg)) { 4452 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4453 return -EIO; 4454 } 4455 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4456 if (hw->media_type == e1000_media_type_copper) { 4457 switch (data->reg_num) { 4458 case PHY_CTRL: 4459 if (mii_reg & MII_CR_POWER_DOWN) 4460 break; 4461 if (mii_reg & MII_CR_AUTO_NEG_EN) { 4462 hw->autoneg = 1; 4463 hw->autoneg_advertised = 0x2F; 4464 } else { 4465 u32 speed; 4466 if (mii_reg & 0x40) 4467 speed = SPEED_1000; 4468 else if (mii_reg & 0x2000) 4469 speed = SPEED_100; 4470 else 4471 speed = SPEED_10; 4472 retval = e1000_set_spd_dplx( 4473 adapter, speed, 4474 ((mii_reg & 0x100) 4475 ? DUPLEX_FULL : 4476 DUPLEX_HALF)); 4477 if (retval) 4478 return retval; 4479 } 4480 if (netif_running(adapter->netdev)) 4481 e1000_reinit_locked(adapter); 4482 else 4483 e1000_reset(adapter); 4484 break; 4485 case M88E1000_PHY_SPEC_CTRL: 4486 case M88E1000_EXT_PHY_SPEC_CTRL: 4487 if (e1000_phy_reset(hw)) 4488 return -EIO; 4489 break; 4490 } 4491 } else { 4492 switch (data->reg_num) { 4493 case PHY_CTRL: 4494 if (mii_reg & MII_CR_POWER_DOWN) 4495 break; 4496 if (netif_running(adapter->netdev)) 4497 e1000_reinit_locked(adapter); 4498 else 4499 e1000_reset(adapter); 4500 break; 4501 } 4502 } 4503 break; 4504 default: 4505 return -EOPNOTSUPP; 4506 } 4507 return E1000_SUCCESS; 4508 } 4509 4510 void e1000_pci_set_mwi(struct e1000_hw *hw) 4511 { 4512 struct e1000_adapter *adapter = hw->back; 4513 int ret_val = pci_set_mwi(adapter->pdev); 4514 4515 if (ret_val) 4516 e_err(probe, "Error in setting MWI\n"); 4517 } 4518 4519 void e1000_pci_clear_mwi(struct e1000_hw *hw) 4520 { 4521 struct e1000_adapter *adapter = hw->back; 4522 4523 pci_clear_mwi(adapter->pdev); 4524 } 4525 4526 int e1000_pcix_get_mmrbc(struct e1000_hw *hw) 4527 { 4528 struct e1000_adapter *adapter = hw->back; 4529 return pcix_get_mmrbc(adapter->pdev); 4530 } 4531 4532 void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc) 4533 { 4534 struct e1000_adapter *adapter = hw->back; 4535 pcix_set_mmrbc(adapter->pdev, mmrbc); 4536 } 4537 4538 void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value) 4539 { 4540 outl(value, port); 4541 } 4542 4543 static bool e1000_vlan_used(struct e1000_adapter *adapter) 4544 { 4545 u16 vid; 4546 4547 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 4548 return true; 4549 return false; 4550 } 4551 4552 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter, 4553 bool filter_on) 4554 { 4555 struct e1000_hw *hw = &adapter->hw; 4556 u32 rctl; 4557 4558 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4559 e1000_irq_disable(adapter); 4560 4561 if (filter_on) { 4562 /* enable VLAN receive filtering */ 4563 rctl = er32(RCTL); 4564 rctl &= ~E1000_RCTL_CFIEN; 4565 if (!(adapter->netdev->flags & IFF_PROMISC)) 4566 rctl |= E1000_RCTL_VFE; 4567 ew32(RCTL, rctl); 4568 e1000_update_mng_vlan(adapter); 4569 } else { 4570 /* disable VLAN receive filtering */ 4571 rctl = er32(RCTL); 4572 rctl &= ~E1000_RCTL_VFE; 4573 ew32(RCTL, rctl); 4574 } 4575 4576 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4577 e1000_irq_enable(adapter); 4578 } 4579 4580 static void e1000_vlan_mode(struct net_device *netdev, u32 features) 4581 { 4582 struct e1000_adapter *adapter = netdev_priv(netdev); 4583 struct e1000_hw *hw = &adapter->hw; 4584 u32 ctrl; 4585 4586 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4587 e1000_irq_disable(adapter); 4588 4589 ctrl = er32(CTRL); 4590 if (features & NETIF_F_HW_VLAN_RX) { 4591 /* enable VLAN tag insert/strip */ 4592 ctrl |= E1000_CTRL_VME; 4593 } else { 4594 /* disable VLAN tag insert/strip */ 4595 ctrl &= ~E1000_CTRL_VME; 4596 } 4597 ew32(CTRL, ctrl); 4598 4599 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4600 e1000_irq_enable(adapter); 4601 } 4602 4603 static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 4604 { 4605 struct e1000_adapter *adapter = netdev_priv(netdev); 4606 struct e1000_hw *hw = &adapter->hw; 4607 u32 vfta, index; 4608 4609 if ((hw->mng_cookie.status & 4610 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 4611 (vid == adapter->mng_vlan_id)) 4612 return; 4613 4614 if (!e1000_vlan_used(adapter)) 4615 e1000_vlan_filter_on_off(adapter, true); 4616 4617 /* add VID to filter table */ 4618 index = (vid >> 5) & 0x7F; 4619 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); 4620 vfta |= (1 << (vid & 0x1F)); 4621 e1000_write_vfta(hw, index, vfta); 4622 4623 set_bit(vid, adapter->active_vlans); 4624 } 4625 4626 static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 4627 { 4628 struct e1000_adapter *adapter = netdev_priv(netdev); 4629 struct e1000_hw *hw = &adapter->hw; 4630 u32 vfta, index; 4631 4632 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4633 e1000_irq_disable(adapter); 4634 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4635 e1000_irq_enable(adapter); 4636 4637 /* remove VID from filter table */ 4638 index = (vid >> 5) & 0x7F; 4639 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); 4640 vfta &= ~(1 << (vid & 0x1F)); 4641 e1000_write_vfta(hw, index, vfta); 4642 4643 clear_bit(vid, adapter->active_vlans); 4644 4645 if (!e1000_vlan_used(adapter)) 4646 e1000_vlan_filter_on_off(adapter, false); 4647 } 4648 4649 static void e1000_restore_vlan(struct e1000_adapter *adapter) 4650 { 4651 u16 vid; 4652 4653 if (!e1000_vlan_used(adapter)) 4654 return; 4655 4656 e1000_vlan_filter_on_off(adapter, true); 4657 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 4658 e1000_vlan_rx_add_vid(adapter->netdev, vid); 4659 } 4660 4661 int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx) 4662 { 4663 struct e1000_hw *hw = &adapter->hw; 4664 4665 hw->autoneg = 0; 4666 4667 /* Make sure dplx is at most 1 bit and lsb of speed is not set 4668 * for the switch() below to work */ 4669 if ((spd & 1) || (dplx & ~1)) 4670 goto err_inval; 4671 4672 /* Fiber NICs only allow 1000 gbps Full duplex */ 4673 if ((hw->media_type == e1000_media_type_fiber) && 4674 spd != SPEED_1000 && 4675 dplx != DUPLEX_FULL) 4676 goto err_inval; 4677 4678 switch (spd + dplx) { 4679 case SPEED_10 + DUPLEX_HALF: 4680 hw->forced_speed_duplex = e1000_10_half; 4681 break; 4682 case SPEED_10 + DUPLEX_FULL: 4683 hw->forced_speed_duplex = e1000_10_full; 4684 break; 4685 case SPEED_100 + DUPLEX_HALF: 4686 hw->forced_speed_duplex = e1000_100_half; 4687 break; 4688 case SPEED_100 + DUPLEX_FULL: 4689 hw->forced_speed_duplex = e1000_100_full; 4690 break; 4691 case SPEED_1000 + DUPLEX_FULL: 4692 hw->autoneg = 1; 4693 hw->autoneg_advertised = ADVERTISE_1000_FULL; 4694 break; 4695 case SPEED_1000 + DUPLEX_HALF: /* not supported */ 4696 default: 4697 goto err_inval; 4698 } 4699 return 0; 4700 4701 err_inval: 4702 e_err(probe, "Unsupported Speed/Duplex configuration\n"); 4703 return -EINVAL; 4704 } 4705 4706 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) 4707 { 4708 struct net_device *netdev = pci_get_drvdata(pdev); 4709 struct e1000_adapter *adapter = netdev_priv(netdev); 4710 struct e1000_hw *hw = &adapter->hw; 4711 u32 ctrl, ctrl_ext, rctl, status; 4712 u32 wufc = adapter->wol; 4713 #ifdef CONFIG_PM 4714 int retval = 0; 4715 #endif 4716 4717 netif_device_detach(netdev); 4718 4719 mutex_lock(&adapter->mutex); 4720 4721 if (netif_running(netdev)) { 4722 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); 4723 e1000_down(adapter); 4724 } 4725 4726 #ifdef CONFIG_PM 4727 retval = pci_save_state(pdev); 4728 if (retval) { 4729 mutex_unlock(&adapter->mutex); 4730 return retval; 4731 } 4732 #endif 4733 4734 status = er32(STATUS); 4735 if (status & E1000_STATUS_LU) 4736 wufc &= ~E1000_WUFC_LNKC; 4737 4738 if (wufc) { 4739 e1000_setup_rctl(adapter); 4740 e1000_set_rx_mode(netdev); 4741 4742 /* turn on all-multi mode if wake on multicast is enabled */ 4743 if (wufc & E1000_WUFC_MC) { 4744 rctl = er32(RCTL); 4745 rctl |= E1000_RCTL_MPE; 4746 ew32(RCTL, rctl); 4747 } 4748 4749 if (hw->mac_type >= e1000_82540) { 4750 ctrl = er32(CTRL); 4751 /* advertise wake from D3Cold */ 4752 #define E1000_CTRL_ADVD3WUC 0x00100000 4753 /* phy power management enable */ 4754 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 4755 ctrl |= E1000_CTRL_ADVD3WUC | 4756 E1000_CTRL_EN_PHY_PWR_MGMT; 4757 ew32(CTRL, ctrl); 4758 } 4759 4760 if (hw->media_type == e1000_media_type_fiber || 4761 hw->media_type == e1000_media_type_internal_serdes) { 4762 /* keep the laser running in D3 */ 4763 ctrl_ext = er32(CTRL_EXT); 4764 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; 4765 ew32(CTRL_EXT, ctrl_ext); 4766 } 4767 4768 ew32(WUC, E1000_WUC_PME_EN); 4769 ew32(WUFC, wufc); 4770 } else { 4771 ew32(WUC, 0); 4772 ew32(WUFC, 0); 4773 } 4774 4775 e1000_release_manageability(adapter); 4776 4777 *enable_wake = !!wufc; 4778 4779 /* make sure adapter isn't asleep if manageability is enabled */ 4780 if (adapter->en_mng_pt) 4781 *enable_wake = true; 4782 4783 if (netif_running(netdev)) 4784 e1000_free_irq(adapter); 4785 4786 mutex_unlock(&adapter->mutex); 4787 4788 pci_disable_device(pdev); 4789 4790 return 0; 4791 } 4792 4793 #ifdef CONFIG_PM 4794 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) 4795 { 4796 int retval; 4797 bool wake; 4798 4799 retval = __e1000_shutdown(pdev, &wake); 4800 if (retval) 4801 return retval; 4802 4803 if (wake) { 4804 pci_prepare_to_sleep(pdev); 4805 } else { 4806 pci_wake_from_d3(pdev, false); 4807 pci_set_power_state(pdev, PCI_D3hot); 4808 } 4809 4810 return 0; 4811 } 4812 4813 static int e1000_resume(struct pci_dev *pdev) 4814 { 4815 struct net_device *netdev = pci_get_drvdata(pdev); 4816 struct e1000_adapter *adapter = netdev_priv(netdev); 4817 struct e1000_hw *hw = &adapter->hw; 4818 u32 err; 4819 4820 pci_set_power_state(pdev, PCI_D0); 4821 pci_restore_state(pdev); 4822 pci_save_state(pdev); 4823 4824 if (adapter->need_ioport) 4825 err = pci_enable_device(pdev); 4826 else 4827 err = pci_enable_device_mem(pdev); 4828 if (err) { 4829 pr_err("Cannot enable PCI device from suspend\n"); 4830 return err; 4831 } 4832 pci_set_master(pdev); 4833 4834 pci_enable_wake(pdev, PCI_D3hot, 0); 4835 pci_enable_wake(pdev, PCI_D3cold, 0); 4836 4837 if (netif_running(netdev)) { 4838 err = e1000_request_irq(adapter); 4839 if (err) 4840 return err; 4841 } 4842 4843 e1000_power_up_phy(adapter); 4844 e1000_reset(adapter); 4845 ew32(WUS, ~0); 4846 4847 e1000_init_manageability(adapter); 4848 4849 if (netif_running(netdev)) 4850 e1000_up(adapter); 4851 4852 netif_device_attach(netdev); 4853 4854 return 0; 4855 } 4856 #endif 4857 4858 static void e1000_shutdown(struct pci_dev *pdev) 4859 { 4860 bool wake; 4861 4862 __e1000_shutdown(pdev, &wake); 4863 4864 if (system_state == SYSTEM_POWER_OFF) { 4865 pci_wake_from_d3(pdev, wake); 4866 pci_set_power_state(pdev, PCI_D3hot); 4867 } 4868 } 4869 4870 #ifdef CONFIG_NET_POLL_CONTROLLER 4871 /* 4872 * Polling 'interrupt' - used by things like netconsole to send skbs 4873 * without having to re-enable interrupts. It's not called while 4874 * the interrupt routine is executing. 4875 */ 4876 static void e1000_netpoll(struct net_device *netdev) 4877 { 4878 struct e1000_adapter *adapter = netdev_priv(netdev); 4879 4880 disable_irq(adapter->pdev->irq); 4881 e1000_intr(adapter->pdev->irq, netdev); 4882 enable_irq(adapter->pdev->irq); 4883 } 4884 #endif 4885 4886 /** 4887 * e1000_io_error_detected - called when PCI error is detected 4888 * @pdev: Pointer to PCI device 4889 * @state: The current pci connection state 4890 * 4891 * This function is called after a PCI bus error affecting 4892 * this device has been detected. 4893 */ 4894 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, 4895 pci_channel_state_t state) 4896 { 4897 struct net_device *netdev = pci_get_drvdata(pdev); 4898 struct e1000_adapter *adapter = netdev_priv(netdev); 4899 4900 netif_device_detach(netdev); 4901 4902 if (state == pci_channel_io_perm_failure) 4903 return PCI_ERS_RESULT_DISCONNECT; 4904 4905 if (netif_running(netdev)) 4906 e1000_down(adapter); 4907 pci_disable_device(pdev); 4908 4909 /* Request a slot slot reset. */ 4910 return PCI_ERS_RESULT_NEED_RESET; 4911 } 4912 4913 /** 4914 * e1000_io_slot_reset - called after the pci bus has been reset. 4915 * @pdev: Pointer to PCI device 4916 * 4917 * Restart the card from scratch, as if from a cold-boot. Implementation 4918 * resembles the first-half of the e1000_resume routine. 4919 */ 4920 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) 4921 { 4922 struct net_device *netdev = pci_get_drvdata(pdev); 4923 struct e1000_adapter *adapter = netdev_priv(netdev); 4924 struct e1000_hw *hw = &adapter->hw; 4925 int err; 4926 4927 if (adapter->need_ioport) 4928 err = pci_enable_device(pdev); 4929 else 4930 err = pci_enable_device_mem(pdev); 4931 if (err) { 4932 pr_err("Cannot re-enable PCI device after reset.\n"); 4933 return PCI_ERS_RESULT_DISCONNECT; 4934 } 4935 pci_set_master(pdev); 4936 4937 pci_enable_wake(pdev, PCI_D3hot, 0); 4938 pci_enable_wake(pdev, PCI_D3cold, 0); 4939 4940 e1000_reset(adapter); 4941 ew32(WUS, ~0); 4942 4943 return PCI_ERS_RESULT_RECOVERED; 4944 } 4945 4946 /** 4947 * e1000_io_resume - called when traffic can start flowing again. 4948 * @pdev: Pointer to PCI device 4949 * 4950 * This callback is called when the error recovery driver tells us that 4951 * its OK to resume normal operation. Implementation resembles the 4952 * second-half of the e1000_resume routine. 4953 */ 4954 static void e1000_io_resume(struct pci_dev *pdev) 4955 { 4956 struct net_device *netdev = pci_get_drvdata(pdev); 4957 struct e1000_adapter *adapter = netdev_priv(netdev); 4958 4959 e1000_init_manageability(adapter); 4960 4961 if (netif_running(netdev)) { 4962 if (e1000_up(adapter)) { 4963 pr_info("can't bring device back up after reset\n"); 4964 return; 4965 } 4966 } 4967 4968 netif_device_attach(netdev); 4969 } 4970 4971 /* e1000_main.c */ 4972