1 /******************************************************************************* 2 3 Intel PRO/1000 Linux driver 4 Copyright(c) 1999 - 2006 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 Linux NICS <linux.nics@intel.com> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 27 *******************************************************************************/ 28 29 #include "e1000.h" 30 #include <net/ip6_checksum.h> 31 #include <linux/io.h> 32 #include <linux/prefetch.h> 33 #include <linux/bitops.h> 34 #include <linux/if_vlan.h> 35 36 char e1000_driver_name[] = "e1000"; 37 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; 38 #define DRV_VERSION "7.3.21-k8-NAPI" 39 const char e1000_driver_version[] = DRV_VERSION; 40 static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; 41 42 /* e1000_pci_tbl - PCI Device ID Table 43 * 44 * Last entry must be all 0s 45 * 46 * Macro expands to... 47 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} 48 */ 49 static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = { 50 INTEL_E1000_ETHERNET_DEVICE(0x1000), 51 INTEL_E1000_ETHERNET_DEVICE(0x1001), 52 INTEL_E1000_ETHERNET_DEVICE(0x1004), 53 INTEL_E1000_ETHERNET_DEVICE(0x1008), 54 INTEL_E1000_ETHERNET_DEVICE(0x1009), 55 INTEL_E1000_ETHERNET_DEVICE(0x100C), 56 INTEL_E1000_ETHERNET_DEVICE(0x100D), 57 INTEL_E1000_ETHERNET_DEVICE(0x100E), 58 INTEL_E1000_ETHERNET_DEVICE(0x100F), 59 INTEL_E1000_ETHERNET_DEVICE(0x1010), 60 INTEL_E1000_ETHERNET_DEVICE(0x1011), 61 INTEL_E1000_ETHERNET_DEVICE(0x1012), 62 INTEL_E1000_ETHERNET_DEVICE(0x1013), 63 INTEL_E1000_ETHERNET_DEVICE(0x1014), 64 INTEL_E1000_ETHERNET_DEVICE(0x1015), 65 INTEL_E1000_ETHERNET_DEVICE(0x1016), 66 INTEL_E1000_ETHERNET_DEVICE(0x1017), 67 INTEL_E1000_ETHERNET_DEVICE(0x1018), 68 INTEL_E1000_ETHERNET_DEVICE(0x1019), 69 INTEL_E1000_ETHERNET_DEVICE(0x101A), 70 INTEL_E1000_ETHERNET_DEVICE(0x101D), 71 INTEL_E1000_ETHERNET_DEVICE(0x101E), 72 INTEL_E1000_ETHERNET_DEVICE(0x1026), 73 INTEL_E1000_ETHERNET_DEVICE(0x1027), 74 INTEL_E1000_ETHERNET_DEVICE(0x1028), 75 INTEL_E1000_ETHERNET_DEVICE(0x1075), 76 INTEL_E1000_ETHERNET_DEVICE(0x1076), 77 INTEL_E1000_ETHERNET_DEVICE(0x1077), 78 INTEL_E1000_ETHERNET_DEVICE(0x1078), 79 INTEL_E1000_ETHERNET_DEVICE(0x1079), 80 INTEL_E1000_ETHERNET_DEVICE(0x107A), 81 INTEL_E1000_ETHERNET_DEVICE(0x107B), 82 INTEL_E1000_ETHERNET_DEVICE(0x107C), 83 INTEL_E1000_ETHERNET_DEVICE(0x108A), 84 INTEL_E1000_ETHERNET_DEVICE(0x1099), 85 INTEL_E1000_ETHERNET_DEVICE(0x10B5), 86 INTEL_E1000_ETHERNET_DEVICE(0x2E6E), 87 /* required last entry */ 88 {0,} 89 }; 90 91 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); 92 93 int e1000_up(struct e1000_adapter *adapter); 94 void e1000_down(struct e1000_adapter *adapter); 95 void e1000_reinit_locked(struct e1000_adapter *adapter); 96 void e1000_reset(struct e1000_adapter *adapter); 97 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); 98 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); 99 void e1000_free_all_tx_resources(struct e1000_adapter *adapter); 100 void e1000_free_all_rx_resources(struct e1000_adapter *adapter); 101 static int e1000_setup_tx_resources(struct e1000_adapter *adapter, 102 struct e1000_tx_ring *txdr); 103 static int e1000_setup_rx_resources(struct e1000_adapter *adapter, 104 struct e1000_rx_ring *rxdr); 105 static void e1000_free_tx_resources(struct e1000_adapter *adapter, 106 struct e1000_tx_ring *tx_ring); 107 static void e1000_free_rx_resources(struct e1000_adapter *adapter, 108 struct e1000_rx_ring *rx_ring); 109 void e1000_update_stats(struct e1000_adapter *adapter); 110 111 static int e1000_init_module(void); 112 static void e1000_exit_module(void); 113 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent); 114 static void __devexit e1000_remove(struct pci_dev *pdev); 115 static int e1000_alloc_queues(struct e1000_adapter *adapter); 116 static int e1000_sw_init(struct e1000_adapter *adapter); 117 static int e1000_open(struct net_device *netdev); 118 static int e1000_close(struct net_device *netdev); 119 static void e1000_configure_tx(struct e1000_adapter *adapter); 120 static void e1000_configure_rx(struct e1000_adapter *adapter); 121 static void e1000_setup_rctl(struct e1000_adapter *adapter); 122 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter); 123 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter); 124 static void e1000_clean_tx_ring(struct e1000_adapter *adapter, 125 struct e1000_tx_ring *tx_ring); 126 static void e1000_clean_rx_ring(struct e1000_adapter *adapter, 127 struct e1000_rx_ring *rx_ring); 128 static void e1000_set_rx_mode(struct net_device *netdev); 129 static void e1000_update_phy_info_task(struct work_struct *work); 130 static void e1000_watchdog(struct work_struct *work); 131 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work); 132 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, 133 struct net_device *netdev); 134 static struct net_device_stats * e1000_get_stats(struct net_device *netdev); 135 static int e1000_change_mtu(struct net_device *netdev, int new_mtu); 136 static int e1000_set_mac(struct net_device *netdev, void *p); 137 static irqreturn_t e1000_intr(int irq, void *data); 138 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, 139 struct e1000_tx_ring *tx_ring); 140 static int e1000_clean(struct napi_struct *napi, int budget); 141 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, 142 struct e1000_rx_ring *rx_ring, 143 int *work_done, int work_to_do); 144 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, 145 struct e1000_rx_ring *rx_ring, 146 int *work_done, int work_to_do); 147 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, 148 struct e1000_rx_ring *rx_ring, 149 int cleaned_count); 150 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, 151 struct e1000_rx_ring *rx_ring, 152 int cleaned_count); 153 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); 154 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, 155 int cmd); 156 static void e1000_enter_82542_rst(struct e1000_adapter *adapter); 157 static void e1000_leave_82542_rst(struct e1000_adapter *adapter); 158 static void e1000_tx_timeout(struct net_device *dev); 159 static void e1000_reset_task(struct work_struct *work); 160 static void e1000_smartspeed(struct e1000_adapter *adapter); 161 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, 162 struct sk_buff *skb); 163 164 static bool e1000_vlan_used(struct e1000_adapter *adapter); 165 static void e1000_vlan_mode(struct net_device *netdev, 166 netdev_features_t features); 167 static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid); 168 static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); 169 static void e1000_restore_vlan(struct e1000_adapter *adapter); 170 171 #ifdef CONFIG_PM 172 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state); 173 static int e1000_resume(struct pci_dev *pdev); 174 #endif 175 static void e1000_shutdown(struct pci_dev *pdev); 176 177 #ifdef CONFIG_NET_POLL_CONTROLLER 178 /* for netdump / net console */ 179 static void e1000_netpoll (struct net_device *netdev); 180 #endif 181 182 #define COPYBREAK_DEFAULT 256 183 static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT; 184 module_param(copybreak, uint, 0644); 185 MODULE_PARM_DESC(copybreak, 186 "Maximum size of packet that is copied to a new buffer on receive"); 187 188 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, 189 pci_channel_state_t state); 190 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev); 191 static void e1000_io_resume(struct pci_dev *pdev); 192 193 static struct pci_error_handlers e1000_err_handler = { 194 .error_detected = e1000_io_error_detected, 195 .slot_reset = e1000_io_slot_reset, 196 .resume = e1000_io_resume, 197 }; 198 199 static struct pci_driver e1000_driver = { 200 .name = e1000_driver_name, 201 .id_table = e1000_pci_tbl, 202 .probe = e1000_probe, 203 .remove = __devexit_p(e1000_remove), 204 #ifdef CONFIG_PM 205 /* Power Management Hooks */ 206 .suspend = e1000_suspend, 207 .resume = e1000_resume, 208 #endif 209 .shutdown = e1000_shutdown, 210 .err_handler = &e1000_err_handler 211 }; 212 213 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 214 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); 215 MODULE_LICENSE("GPL"); 216 MODULE_VERSION(DRV_VERSION); 217 218 static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE; 219 module_param(debug, int, 0); 220 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 221 222 /** 223 * e1000_get_hw_dev - return device 224 * used by hardware layer to print debugging information 225 * 226 **/ 227 struct net_device *e1000_get_hw_dev(struct e1000_hw *hw) 228 { 229 struct e1000_adapter *adapter = hw->back; 230 return adapter->netdev; 231 } 232 233 /** 234 * e1000_init_module - Driver Registration Routine 235 * 236 * e1000_init_module is the first routine called when the driver is 237 * loaded. All it does is register with the PCI subsystem. 238 **/ 239 240 static int __init e1000_init_module(void) 241 { 242 int ret; 243 pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version); 244 245 pr_info("%s\n", e1000_copyright); 246 247 ret = pci_register_driver(&e1000_driver); 248 if (copybreak != COPYBREAK_DEFAULT) { 249 if (copybreak == 0) 250 pr_info("copybreak disabled\n"); 251 else 252 pr_info("copybreak enabled for " 253 "packets <= %u bytes\n", copybreak); 254 } 255 return ret; 256 } 257 258 module_init(e1000_init_module); 259 260 /** 261 * e1000_exit_module - Driver Exit Cleanup Routine 262 * 263 * e1000_exit_module is called just before the driver is removed 264 * from memory. 265 **/ 266 267 static void __exit e1000_exit_module(void) 268 { 269 pci_unregister_driver(&e1000_driver); 270 } 271 272 module_exit(e1000_exit_module); 273 274 static int e1000_request_irq(struct e1000_adapter *adapter) 275 { 276 struct net_device *netdev = adapter->netdev; 277 irq_handler_t handler = e1000_intr; 278 int irq_flags = IRQF_SHARED; 279 int err; 280 281 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, 282 netdev); 283 if (err) { 284 e_err(probe, "Unable to allocate interrupt Error: %d\n", err); 285 } 286 287 return err; 288 } 289 290 static void e1000_free_irq(struct e1000_adapter *adapter) 291 { 292 struct net_device *netdev = adapter->netdev; 293 294 free_irq(adapter->pdev->irq, netdev); 295 } 296 297 /** 298 * e1000_irq_disable - Mask off interrupt generation on the NIC 299 * @adapter: board private structure 300 **/ 301 302 static void e1000_irq_disable(struct e1000_adapter *adapter) 303 { 304 struct e1000_hw *hw = &adapter->hw; 305 306 ew32(IMC, ~0); 307 E1000_WRITE_FLUSH(); 308 synchronize_irq(adapter->pdev->irq); 309 } 310 311 /** 312 * e1000_irq_enable - Enable default interrupt generation settings 313 * @adapter: board private structure 314 **/ 315 316 static void e1000_irq_enable(struct e1000_adapter *adapter) 317 { 318 struct e1000_hw *hw = &adapter->hw; 319 320 ew32(IMS, IMS_ENABLE_MASK); 321 E1000_WRITE_FLUSH(); 322 } 323 324 static void e1000_update_mng_vlan(struct e1000_adapter *adapter) 325 { 326 struct e1000_hw *hw = &adapter->hw; 327 struct net_device *netdev = adapter->netdev; 328 u16 vid = hw->mng_cookie.vlan_id; 329 u16 old_vid = adapter->mng_vlan_id; 330 331 if (!e1000_vlan_used(adapter)) 332 return; 333 334 if (!test_bit(vid, adapter->active_vlans)) { 335 if (hw->mng_cookie.status & 336 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { 337 e1000_vlan_rx_add_vid(netdev, vid); 338 adapter->mng_vlan_id = vid; 339 } else { 340 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 341 } 342 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && 343 (vid != old_vid) && 344 !test_bit(old_vid, adapter->active_vlans)) 345 e1000_vlan_rx_kill_vid(netdev, old_vid); 346 } else { 347 adapter->mng_vlan_id = vid; 348 } 349 } 350 351 static void e1000_init_manageability(struct e1000_adapter *adapter) 352 { 353 struct e1000_hw *hw = &adapter->hw; 354 355 if (adapter->en_mng_pt) { 356 u32 manc = er32(MANC); 357 358 /* disable hardware interception of ARP */ 359 manc &= ~(E1000_MANC_ARP_EN); 360 361 ew32(MANC, manc); 362 } 363 } 364 365 static void e1000_release_manageability(struct e1000_adapter *adapter) 366 { 367 struct e1000_hw *hw = &adapter->hw; 368 369 if (adapter->en_mng_pt) { 370 u32 manc = er32(MANC); 371 372 /* re-enable hardware interception of ARP */ 373 manc |= E1000_MANC_ARP_EN; 374 375 ew32(MANC, manc); 376 } 377 } 378 379 /** 380 * e1000_configure - configure the hardware for RX and TX 381 * @adapter = private board structure 382 **/ 383 static void e1000_configure(struct e1000_adapter *adapter) 384 { 385 struct net_device *netdev = adapter->netdev; 386 int i; 387 388 e1000_set_rx_mode(netdev); 389 390 e1000_restore_vlan(adapter); 391 e1000_init_manageability(adapter); 392 393 e1000_configure_tx(adapter); 394 e1000_setup_rctl(adapter); 395 e1000_configure_rx(adapter); 396 /* call E1000_DESC_UNUSED which always leaves 397 * at least 1 descriptor unused to make sure 398 * next_to_use != next_to_clean */ 399 for (i = 0; i < adapter->num_rx_queues; i++) { 400 struct e1000_rx_ring *ring = &adapter->rx_ring[i]; 401 adapter->alloc_rx_buf(adapter, ring, 402 E1000_DESC_UNUSED(ring)); 403 } 404 } 405 406 int e1000_up(struct e1000_adapter *adapter) 407 { 408 struct e1000_hw *hw = &adapter->hw; 409 410 /* hardware has been reset, we need to reload some things */ 411 e1000_configure(adapter); 412 413 clear_bit(__E1000_DOWN, &adapter->flags); 414 415 napi_enable(&adapter->napi); 416 417 e1000_irq_enable(adapter); 418 419 netif_wake_queue(adapter->netdev); 420 421 /* fire a link change interrupt to start the watchdog */ 422 ew32(ICS, E1000_ICS_LSC); 423 return 0; 424 } 425 426 /** 427 * e1000_power_up_phy - restore link in case the phy was powered down 428 * @adapter: address of board private structure 429 * 430 * The phy may be powered down to save power and turn off link when the 431 * driver is unloaded and wake on lan is not enabled (among others) 432 * *** this routine MUST be followed by a call to e1000_reset *** 433 * 434 **/ 435 436 void e1000_power_up_phy(struct e1000_adapter *adapter) 437 { 438 struct e1000_hw *hw = &adapter->hw; 439 u16 mii_reg = 0; 440 441 /* Just clear the power down bit to wake the phy back up */ 442 if (hw->media_type == e1000_media_type_copper) { 443 /* according to the manual, the phy will retain its 444 * settings across a power-down/up cycle */ 445 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); 446 mii_reg &= ~MII_CR_POWER_DOWN; 447 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); 448 } 449 } 450 451 static void e1000_power_down_phy(struct e1000_adapter *adapter) 452 { 453 struct e1000_hw *hw = &adapter->hw; 454 455 /* Power down the PHY so no link is implied when interface is down * 456 * The PHY cannot be powered down if any of the following is true * 457 * (a) WoL is enabled 458 * (b) AMT is active 459 * (c) SoL/IDER session is active */ 460 if (!adapter->wol && hw->mac_type >= e1000_82540 && 461 hw->media_type == e1000_media_type_copper) { 462 u16 mii_reg = 0; 463 464 switch (hw->mac_type) { 465 case e1000_82540: 466 case e1000_82545: 467 case e1000_82545_rev_3: 468 case e1000_82546: 469 case e1000_ce4100: 470 case e1000_82546_rev_3: 471 case e1000_82541: 472 case e1000_82541_rev_2: 473 case e1000_82547: 474 case e1000_82547_rev_2: 475 if (er32(MANC) & E1000_MANC_SMBUS_EN) 476 goto out; 477 break; 478 default: 479 goto out; 480 } 481 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); 482 mii_reg |= MII_CR_POWER_DOWN; 483 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); 484 msleep(1); 485 } 486 out: 487 return; 488 } 489 490 static void e1000_down_and_stop(struct e1000_adapter *adapter) 491 { 492 set_bit(__E1000_DOWN, &adapter->flags); 493 cancel_work_sync(&adapter->reset_task); 494 cancel_delayed_work_sync(&adapter->watchdog_task); 495 cancel_delayed_work_sync(&adapter->phy_info_task); 496 cancel_delayed_work_sync(&adapter->fifo_stall_task); 497 } 498 499 void e1000_down(struct e1000_adapter *adapter) 500 { 501 struct e1000_hw *hw = &adapter->hw; 502 struct net_device *netdev = adapter->netdev; 503 u32 rctl, tctl; 504 505 506 /* disable receives in the hardware */ 507 rctl = er32(RCTL); 508 ew32(RCTL, rctl & ~E1000_RCTL_EN); 509 /* flush and sleep below */ 510 511 netif_tx_disable(netdev); 512 513 /* disable transmits in the hardware */ 514 tctl = er32(TCTL); 515 tctl &= ~E1000_TCTL_EN; 516 ew32(TCTL, tctl); 517 /* flush both disables and wait for them to finish */ 518 E1000_WRITE_FLUSH(); 519 msleep(10); 520 521 napi_disable(&adapter->napi); 522 523 e1000_irq_disable(adapter); 524 525 /* 526 * Setting DOWN must be after irq_disable to prevent 527 * a screaming interrupt. Setting DOWN also prevents 528 * tasks from rescheduling. 529 */ 530 e1000_down_and_stop(adapter); 531 532 adapter->link_speed = 0; 533 adapter->link_duplex = 0; 534 netif_carrier_off(netdev); 535 536 e1000_reset(adapter); 537 e1000_clean_all_tx_rings(adapter); 538 e1000_clean_all_rx_rings(adapter); 539 } 540 541 static void e1000_reinit_safe(struct e1000_adapter *adapter) 542 { 543 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) 544 msleep(1); 545 mutex_lock(&adapter->mutex); 546 e1000_down(adapter); 547 e1000_up(adapter); 548 mutex_unlock(&adapter->mutex); 549 clear_bit(__E1000_RESETTING, &adapter->flags); 550 } 551 552 void e1000_reinit_locked(struct e1000_adapter *adapter) 553 { 554 /* if rtnl_lock is not held the call path is bogus */ 555 ASSERT_RTNL(); 556 WARN_ON(in_interrupt()); 557 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) 558 msleep(1); 559 e1000_down(adapter); 560 e1000_up(adapter); 561 clear_bit(__E1000_RESETTING, &adapter->flags); 562 } 563 564 void e1000_reset(struct e1000_adapter *adapter) 565 { 566 struct e1000_hw *hw = &adapter->hw; 567 u32 pba = 0, tx_space, min_tx_space, min_rx_space; 568 bool legacy_pba_adjust = false; 569 u16 hwm; 570 571 /* Repartition Pba for greater than 9k mtu 572 * To take effect CTRL.RST is required. 573 */ 574 575 switch (hw->mac_type) { 576 case e1000_82542_rev2_0: 577 case e1000_82542_rev2_1: 578 case e1000_82543: 579 case e1000_82544: 580 case e1000_82540: 581 case e1000_82541: 582 case e1000_82541_rev_2: 583 legacy_pba_adjust = true; 584 pba = E1000_PBA_48K; 585 break; 586 case e1000_82545: 587 case e1000_82545_rev_3: 588 case e1000_82546: 589 case e1000_ce4100: 590 case e1000_82546_rev_3: 591 pba = E1000_PBA_48K; 592 break; 593 case e1000_82547: 594 case e1000_82547_rev_2: 595 legacy_pba_adjust = true; 596 pba = E1000_PBA_30K; 597 break; 598 case e1000_undefined: 599 case e1000_num_macs: 600 break; 601 } 602 603 if (legacy_pba_adjust) { 604 if (hw->max_frame_size > E1000_RXBUFFER_8192) 605 pba -= 8; /* allocate more FIFO for Tx */ 606 607 if (hw->mac_type == e1000_82547) { 608 adapter->tx_fifo_head = 0; 609 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; 610 adapter->tx_fifo_size = 611 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT; 612 atomic_set(&adapter->tx_fifo_stall, 0); 613 } 614 } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) { 615 /* adjust PBA for jumbo frames */ 616 ew32(PBA, pba); 617 618 /* To maintain wire speed transmits, the Tx FIFO should be 619 * large enough to accommodate two full transmit packets, 620 * rounded up to the next 1KB and expressed in KB. Likewise, 621 * the Rx FIFO should be large enough to accommodate at least 622 * one full receive packet and is similarly rounded up and 623 * expressed in KB. */ 624 pba = er32(PBA); 625 /* upper 16 bits has Tx packet buffer allocation size in KB */ 626 tx_space = pba >> 16; 627 /* lower 16 bits has Rx packet buffer allocation size in KB */ 628 pba &= 0xffff; 629 /* 630 * the tx fifo also stores 16 bytes of information about the tx 631 * but don't include ethernet FCS because hardware appends it 632 */ 633 min_tx_space = (hw->max_frame_size + 634 sizeof(struct e1000_tx_desc) - 635 ETH_FCS_LEN) * 2; 636 min_tx_space = ALIGN(min_tx_space, 1024); 637 min_tx_space >>= 10; 638 /* software strips receive CRC, so leave room for it */ 639 min_rx_space = hw->max_frame_size; 640 min_rx_space = ALIGN(min_rx_space, 1024); 641 min_rx_space >>= 10; 642 643 /* If current Tx allocation is less than the min Tx FIFO size, 644 * and the min Tx FIFO size is less than the current Rx FIFO 645 * allocation, take space away from current Rx allocation */ 646 if (tx_space < min_tx_space && 647 ((min_tx_space - tx_space) < pba)) { 648 pba = pba - (min_tx_space - tx_space); 649 650 /* PCI/PCIx hardware has PBA alignment constraints */ 651 switch (hw->mac_type) { 652 case e1000_82545 ... e1000_82546_rev_3: 653 pba &= ~(E1000_PBA_8K - 1); 654 break; 655 default: 656 break; 657 } 658 659 /* if short on rx space, rx wins and must trump tx 660 * adjustment or use Early Receive if available */ 661 if (pba < min_rx_space) 662 pba = min_rx_space; 663 } 664 } 665 666 ew32(PBA, pba); 667 668 /* 669 * flow control settings: 670 * The high water mark must be low enough to fit one full frame 671 * (or the size used for early receive) above it in the Rx FIFO. 672 * Set it to the lower of: 673 * - 90% of the Rx FIFO size, and 674 * - the full Rx FIFO size minus the early receive size (for parts 675 * with ERT support assuming ERT set to E1000_ERT_2048), or 676 * - the full Rx FIFO size minus one full frame 677 */ 678 hwm = min(((pba << 10) * 9 / 10), 679 ((pba << 10) - hw->max_frame_size)); 680 681 hw->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */ 682 hw->fc_low_water = hw->fc_high_water - 8; 683 hw->fc_pause_time = E1000_FC_PAUSE_TIME; 684 hw->fc_send_xon = 1; 685 hw->fc = hw->original_fc; 686 687 /* Allow time for pending master requests to run */ 688 e1000_reset_hw(hw); 689 if (hw->mac_type >= e1000_82544) 690 ew32(WUC, 0); 691 692 if (e1000_init_hw(hw)) 693 e_dev_err("Hardware Error\n"); 694 e1000_update_mng_vlan(adapter); 695 696 /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */ 697 if (hw->mac_type >= e1000_82544 && 698 hw->autoneg == 1 && 699 hw->autoneg_advertised == ADVERTISE_1000_FULL) { 700 u32 ctrl = er32(CTRL); 701 /* clear phy power management bit if we are in gig only mode, 702 * which if enabled will attempt negotiation to 100Mb, which 703 * can cause a loss of link at power off or driver unload */ 704 ctrl &= ~E1000_CTRL_SWDPIN3; 705 ew32(CTRL, ctrl); 706 } 707 708 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ 709 ew32(VET, ETHERNET_IEEE_VLAN_TYPE); 710 711 e1000_reset_adaptive(hw); 712 e1000_phy_get_info(hw, &adapter->phy_info); 713 714 e1000_release_manageability(adapter); 715 } 716 717 /** 718 * Dump the eeprom for users having checksum issues 719 **/ 720 static void e1000_dump_eeprom(struct e1000_adapter *adapter) 721 { 722 struct net_device *netdev = adapter->netdev; 723 struct ethtool_eeprom eeprom; 724 const struct ethtool_ops *ops = netdev->ethtool_ops; 725 u8 *data; 726 int i; 727 u16 csum_old, csum_new = 0; 728 729 eeprom.len = ops->get_eeprom_len(netdev); 730 eeprom.offset = 0; 731 732 data = kmalloc(eeprom.len, GFP_KERNEL); 733 if (!data) { 734 pr_err("Unable to allocate memory to dump EEPROM data\n"); 735 return; 736 } 737 738 ops->get_eeprom(netdev, &eeprom, data); 739 740 csum_old = (data[EEPROM_CHECKSUM_REG * 2]) + 741 (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8); 742 for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2) 743 csum_new += data[i] + (data[i + 1] << 8); 744 csum_new = EEPROM_SUM - csum_new; 745 746 pr_err("/*********************/\n"); 747 pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old); 748 pr_err("Calculated : 0x%04x\n", csum_new); 749 750 pr_err("Offset Values\n"); 751 pr_err("======== ======\n"); 752 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0); 753 754 pr_err("Include this output when contacting your support provider.\n"); 755 pr_err("This is not a software error! Something bad happened to\n"); 756 pr_err("your hardware or EEPROM image. Ignoring this problem could\n"); 757 pr_err("result in further problems, possibly loss of data,\n"); 758 pr_err("corruption or system hangs!\n"); 759 pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n"); 760 pr_err("which is invalid and requires you to set the proper MAC\n"); 761 pr_err("address manually before continuing to enable this network\n"); 762 pr_err("device. Please inspect the EEPROM dump and report the\n"); 763 pr_err("issue to your hardware vendor or Intel Customer Support.\n"); 764 pr_err("/*********************/\n"); 765 766 kfree(data); 767 } 768 769 /** 770 * e1000_is_need_ioport - determine if an adapter needs ioport resources or not 771 * @pdev: PCI device information struct 772 * 773 * Return true if an adapter needs ioport resources 774 **/ 775 static int e1000_is_need_ioport(struct pci_dev *pdev) 776 { 777 switch (pdev->device) { 778 case E1000_DEV_ID_82540EM: 779 case E1000_DEV_ID_82540EM_LOM: 780 case E1000_DEV_ID_82540EP: 781 case E1000_DEV_ID_82540EP_LOM: 782 case E1000_DEV_ID_82540EP_LP: 783 case E1000_DEV_ID_82541EI: 784 case E1000_DEV_ID_82541EI_MOBILE: 785 case E1000_DEV_ID_82541ER: 786 case E1000_DEV_ID_82541ER_LOM: 787 case E1000_DEV_ID_82541GI: 788 case E1000_DEV_ID_82541GI_LF: 789 case E1000_DEV_ID_82541GI_MOBILE: 790 case E1000_DEV_ID_82544EI_COPPER: 791 case E1000_DEV_ID_82544EI_FIBER: 792 case E1000_DEV_ID_82544GC_COPPER: 793 case E1000_DEV_ID_82544GC_LOM: 794 case E1000_DEV_ID_82545EM_COPPER: 795 case E1000_DEV_ID_82545EM_FIBER: 796 case E1000_DEV_ID_82546EB_COPPER: 797 case E1000_DEV_ID_82546EB_FIBER: 798 case E1000_DEV_ID_82546EB_QUAD_COPPER: 799 return true; 800 default: 801 return false; 802 } 803 } 804 805 static netdev_features_t e1000_fix_features(struct net_device *netdev, 806 netdev_features_t features) 807 { 808 /* 809 * Since there is no support for separate rx/tx vlan accel 810 * enable/disable make sure tx flag is always in same state as rx. 811 */ 812 if (features & NETIF_F_HW_VLAN_RX) 813 features |= NETIF_F_HW_VLAN_TX; 814 else 815 features &= ~NETIF_F_HW_VLAN_TX; 816 817 return features; 818 } 819 820 static int e1000_set_features(struct net_device *netdev, 821 netdev_features_t features) 822 { 823 struct e1000_adapter *adapter = netdev_priv(netdev); 824 netdev_features_t changed = features ^ netdev->features; 825 826 if (changed & NETIF_F_HW_VLAN_RX) 827 e1000_vlan_mode(netdev, features); 828 829 if (!(changed & NETIF_F_RXCSUM)) 830 return 0; 831 832 adapter->rx_csum = !!(features & NETIF_F_RXCSUM); 833 834 if (netif_running(netdev)) 835 e1000_reinit_locked(adapter); 836 else 837 e1000_reset(adapter); 838 839 return 0; 840 } 841 842 static const struct net_device_ops e1000_netdev_ops = { 843 .ndo_open = e1000_open, 844 .ndo_stop = e1000_close, 845 .ndo_start_xmit = e1000_xmit_frame, 846 .ndo_get_stats = e1000_get_stats, 847 .ndo_set_rx_mode = e1000_set_rx_mode, 848 .ndo_set_mac_address = e1000_set_mac, 849 .ndo_tx_timeout = e1000_tx_timeout, 850 .ndo_change_mtu = e1000_change_mtu, 851 .ndo_do_ioctl = e1000_ioctl, 852 .ndo_validate_addr = eth_validate_addr, 853 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid, 854 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid, 855 #ifdef CONFIG_NET_POLL_CONTROLLER 856 .ndo_poll_controller = e1000_netpoll, 857 #endif 858 .ndo_fix_features = e1000_fix_features, 859 .ndo_set_features = e1000_set_features, 860 }; 861 862 /** 863 * e1000_init_hw_struct - initialize members of hw struct 864 * @adapter: board private struct 865 * @hw: structure used by e1000_hw.c 866 * 867 * Factors out initialization of the e1000_hw struct to its own function 868 * that can be called very early at init (just after struct allocation). 869 * Fields are initialized based on PCI device information and 870 * OS network device settings (MTU size). 871 * Returns negative error codes if MAC type setup fails. 872 */ 873 static int e1000_init_hw_struct(struct e1000_adapter *adapter, 874 struct e1000_hw *hw) 875 { 876 struct pci_dev *pdev = adapter->pdev; 877 878 /* PCI config space info */ 879 hw->vendor_id = pdev->vendor; 880 hw->device_id = pdev->device; 881 hw->subsystem_vendor_id = pdev->subsystem_vendor; 882 hw->subsystem_id = pdev->subsystem_device; 883 hw->revision_id = pdev->revision; 884 885 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); 886 887 hw->max_frame_size = adapter->netdev->mtu + 888 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 889 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; 890 891 /* identify the MAC */ 892 if (e1000_set_mac_type(hw)) { 893 e_err(probe, "Unknown MAC Type\n"); 894 return -EIO; 895 } 896 897 switch (hw->mac_type) { 898 default: 899 break; 900 case e1000_82541: 901 case e1000_82547: 902 case e1000_82541_rev_2: 903 case e1000_82547_rev_2: 904 hw->phy_init_script = 1; 905 break; 906 } 907 908 e1000_set_media_type(hw); 909 e1000_get_bus_info(hw); 910 911 hw->wait_autoneg_complete = false; 912 hw->tbi_compatibility_en = true; 913 hw->adaptive_ifs = true; 914 915 /* Copper options */ 916 917 if (hw->media_type == e1000_media_type_copper) { 918 hw->mdix = AUTO_ALL_MODES; 919 hw->disable_polarity_correction = false; 920 hw->master_slave = E1000_MASTER_SLAVE; 921 } 922 923 return 0; 924 } 925 926 /** 927 * e1000_probe - Device Initialization Routine 928 * @pdev: PCI device information struct 929 * @ent: entry in e1000_pci_tbl 930 * 931 * Returns 0 on success, negative on failure 932 * 933 * e1000_probe initializes an adapter identified by a pci_dev structure. 934 * The OS initialization, configuring of the adapter private structure, 935 * and a hardware reset occur. 936 **/ 937 static int __devinit e1000_probe(struct pci_dev *pdev, 938 const struct pci_device_id *ent) 939 { 940 struct net_device *netdev; 941 struct e1000_adapter *adapter; 942 struct e1000_hw *hw; 943 944 static int cards_found = 0; 945 static int global_quad_port_a = 0; /* global ksp3 port a indication */ 946 int i, err, pci_using_dac; 947 u16 eeprom_data = 0; 948 u16 tmp = 0; 949 u16 eeprom_apme_mask = E1000_EEPROM_APME; 950 int bars, need_ioport; 951 952 /* do not allocate ioport bars when not needed */ 953 need_ioport = e1000_is_need_ioport(pdev); 954 if (need_ioport) { 955 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); 956 err = pci_enable_device(pdev); 957 } else { 958 bars = pci_select_bars(pdev, IORESOURCE_MEM); 959 err = pci_enable_device_mem(pdev); 960 } 961 if (err) 962 return err; 963 964 err = pci_request_selected_regions(pdev, bars, e1000_driver_name); 965 if (err) 966 goto err_pci_reg; 967 968 pci_set_master(pdev); 969 err = pci_save_state(pdev); 970 if (err) 971 goto err_alloc_etherdev; 972 973 err = -ENOMEM; 974 netdev = alloc_etherdev(sizeof(struct e1000_adapter)); 975 if (!netdev) 976 goto err_alloc_etherdev; 977 978 SET_NETDEV_DEV(netdev, &pdev->dev); 979 980 pci_set_drvdata(pdev, netdev); 981 adapter = netdev_priv(netdev); 982 adapter->netdev = netdev; 983 adapter->pdev = pdev; 984 adapter->msg_enable = (1 << debug) - 1; 985 adapter->bars = bars; 986 adapter->need_ioport = need_ioport; 987 988 hw = &adapter->hw; 989 hw->back = adapter; 990 991 err = -EIO; 992 hw->hw_addr = pci_ioremap_bar(pdev, BAR_0); 993 if (!hw->hw_addr) 994 goto err_ioremap; 995 996 if (adapter->need_ioport) { 997 for (i = BAR_1; i <= BAR_5; i++) { 998 if (pci_resource_len(pdev, i) == 0) 999 continue; 1000 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { 1001 hw->io_base = pci_resource_start(pdev, i); 1002 break; 1003 } 1004 } 1005 } 1006 1007 /* make ready for any if (hw->...) below */ 1008 err = e1000_init_hw_struct(adapter, hw); 1009 if (err) 1010 goto err_sw_init; 1011 1012 /* 1013 * there is a workaround being applied below that limits 1014 * 64-bit DMA addresses to 64-bit hardware. There are some 1015 * 32-bit adapters that Tx hang when given 64-bit DMA addresses 1016 */ 1017 pci_using_dac = 0; 1018 if ((hw->bus_type == e1000_bus_type_pcix) && 1019 !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { 1020 /* 1021 * according to DMA-API-HOWTO, coherent calls will always 1022 * succeed if the set call did 1023 */ 1024 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 1025 pci_using_dac = 1; 1026 } else { 1027 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 1028 if (err) { 1029 pr_err("No usable DMA config, aborting\n"); 1030 goto err_dma; 1031 } 1032 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 1033 } 1034 1035 netdev->netdev_ops = &e1000_netdev_ops; 1036 e1000_set_ethtool_ops(netdev); 1037 netdev->watchdog_timeo = 5 * HZ; 1038 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); 1039 1040 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); 1041 1042 adapter->bd_number = cards_found; 1043 1044 /* setup the private structure */ 1045 1046 err = e1000_sw_init(adapter); 1047 if (err) 1048 goto err_sw_init; 1049 1050 err = -EIO; 1051 if (hw->mac_type == e1000_ce4100) { 1052 hw->ce4100_gbe_mdio_base_virt = 1053 ioremap(pci_resource_start(pdev, BAR_1), 1054 pci_resource_len(pdev, BAR_1)); 1055 1056 if (!hw->ce4100_gbe_mdio_base_virt) 1057 goto err_mdio_ioremap; 1058 } 1059 1060 if (hw->mac_type >= e1000_82543) { 1061 netdev->hw_features = NETIF_F_SG | 1062 NETIF_F_HW_CSUM | 1063 NETIF_F_HW_VLAN_RX; 1064 netdev->features = NETIF_F_HW_VLAN_TX | 1065 NETIF_F_HW_VLAN_FILTER; 1066 } 1067 1068 if ((hw->mac_type >= e1000_82544) && 1069 (hw->mac_type != e1000_82547)) 1070 netdev->hw_features |= NETIF_F_TSO; 1071 1072 netdev->features |= netdev->hw_features; 1073 netdev->hw_features |= NETIF_F_RXCSUM; 1074 1075 if (pci_using_dac) { 1076 netdev->features |= NETIF_F_HIGHDMA; 1077 netdev->vlan_features |= NETIF_F_HIGHDMA; 1078 } 1079 1080 netdev->vlan_features |= NETIF_F_TSO; 1081 netdev->vlan_features |= NETIF_F_HW_CSUM; 1082 netdev->vlan_features |= NETIF_F_SG; 1083 1084 netdev->priv_flags |= IFF_UNICAST_FLT; 1085 1086 adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw); 1087 1088 /* initialize eeprom parameters */ 1089 if (e1000_init_eeprom_params(hw)) { 1090 e_err(probe, "EEPROM initialization failed\n"); 1091 goto err_eeprom; 1092 } 1093 1094 /* before reading the EEPROM, reset the controller to 1095 * put the device in a known good starting state */ 1096 1097 e1000_reset_hw(hw); 1098 1099 /* make sure the EEPROM is good */ 1100 if (e1000_validate_eeprom_checksum(hw) < 0) { 1101 e_err(probe, "The EEPROM Checksum Is Not Valid\n"); 1102 e1000_dump_eeprom(adapter); 1103 /* 1104 * set MAC address to all zeroes to invalidate and temporary 1105 * disable this device for the user. This blocks regular 1106 * traffic while still permitting ethtool ioctls from reaching 1107 * the hardware as well as allowing the user to run the 1108 * interface after manually setting a hw addr using 1109 * `ip set address` 1110 */ 1111 memset(hw->mac_addr, 0, netdev->addr_len); 1112 } else { 1113 /* copy the MAC address out of the EEPROM */ 1114 if (e1000_read_mac_addr(hw)) 1115 e_err(probe, "EEPROM Read Error\n"); 1116 } 1117 /* don't block initalization here due to bad MAC address */ 1118 memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len); 1119 memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len); 1120 1121 if (!is_valid_ether_addr(netdev->perm_addr)) 1122 e_err(probe, "Invalid MAC Address\n"); 1123 1124 1125 INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog); 1126 INIT_DELAYED_WORK(&adapter->fifo_stall_task, 1127 e1000_82547_tx_fifo_stall_task); 1128 INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task); 1129 INIT_WORK(&adapter->reset_task, e1000_reset_task); 1130 1131 e1000_check_options(adapter); 1132 1133 /* Initial Wake on LAN setting 1134 * If APM wake is enabled in the EEPROM, 1135 * enable the ACPI Magic Packet filter 1136 */ 1137 1138 switch (hw->mac_type) { 1139 case e1000_82542_rev2_0: 1140 case e1000_82542_rev2_1: 1141 case e1000_82543: 1142 break; 1143 case e1000_82544: 1144 e1000_read_eeprom(hw, 1145 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data); 1146 eeprom_apme_mask = E1000_EEPROM_82544_APM; 1147 break; 1148 case e1000_82546: 1149 case e1000_82546_rev_3: 1150 if (er32(STATUS) & E1000_STATUS_FUNC_1){ 1151 e1000_read_eeprom(hw, 1152 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 1153 break; 1154 } 1155 /* Fall Through */ 1156 default: 1157 e1000_read_eeprom(hw, 1158 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 1159 break; 1160 } 1161 if (eeprom_data & eeprom_apme_mask) 1162 adapter->eeprom_wol |= E1000_WUFC_MAG; 1163 1164 /* now that we have the eeprom settings, apply the special cases 1165 * where the eeprom may be wrong or the board simply won't support 1166 * wake on lan on a particular port */ 1167 switch (pdev->device) { 1168 case E1000_DEV_ID_82546GB_PCIE: 1169 adapter->eeprom_wol = 0; 1170 break; 1171 case E1000_DEV_ID_82546EB_FIBER: 1172 case E1000_DEV_ID_82546GB_FIBER: 1173 /* Wake events only supported on port A for dual fiber 1174 * regardless of eeprom setting */ 1175 if (er32(STATUS) & E1000_STATUS_FUNC_1) 1176 adapter->eeprom_wol = 0; 1177 break; 1178 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: 1179 /* if quad port adapter, disable WoL on all but port A */ 1180 if (global_quad_port_a != 0) 1181 adapter->eeprom_wol = 0; 1182 else 1183 adapter->quad_port_a = true; 1184 /* Reset for multiple quad port adapters */ 1185 if (++global_quad_port_a == 4) 1186 global_quad_port_a = 0; 1187 break; 1188 } 1189 1190 /* initialize the wol settings based on the eeprom settings */ 1191 adapter->wol = adapter->eeprom_wol; 1192 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 1193 1194 /* Auto detect PHY address */ 1195 if (hw->mac_type == e1000_ce4100) { 1196 for (i = 0; i < 32; i++) { 1197 hw->phy_addr = i; 1198 e1000_read_phy_reg(hw, PHY_ID2, &tmp); 1199 if (tmp == 0 || tmp == 0xFF) { 1200 if (i == 31) 1201 goto err_eeprom; 1202 continue; 1203 } else 1204 break; 1205 } 1206 } 1207 1208 /* reset the hardware with the new settings */ 1209 e1000_reset(adapter); 1210 1211 strcpy(netdev->name, "eth%d"); 1212 err = register_netdev(netdev); 1213 if (err) 1214 goto err_register; 1215 1216 e1000_vlan_mode(netdev, netdev->features); 1217 1218 /* print bus type/speed/width info */ 1219 e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n", 1220 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""), 1221 ((hw->bus_speed == e1000_bus_speed_133) ? 133 : 1222 (hw->bus_speed == e1000_bus_speed_120) ? 120 : 1223 (hw->bus_speed == e1000_bus_speed_100) ? 100 : 1224 (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33), 1225 ((hw->bus_width == e1000_bus_width_64) ? 64 : 32), 1226 netdev->dev_addr); 1227 1228 /* carrier off reporting is important to ethtool even BEFORE open */ 1229 netif_carrier_off(netdev); 1230 1231 e_info(probe, "Intel(R) PRO/1000 Network Connection\n"); 1232 1233 cards_found++; 1234 return 0; 1235 1236 err_register: 1237 err_eeprom: 1238 e1000_phy_hw_reset(hw); 1239 1240 if (hw->flash_address) 1241 iounmap(hw->flash_address); 1242 kfree(adapter->tx_ring); 1243 kfree(adapter->rx_ring); 1244 err_dma: 1245 err_sw_init: 1246 err_mdio_ioremap: 1247 iounmap(hw->ce4100_gbe_mdio_base_virt); 1248 iounmap(hw->hw_addr); 1249 err_ioremap: 1250 free_netdev(netdev); 1251 err_alloc_etherdev: 1252 pci_release_selected_regions(pdev, bars); 1253 err_pci_reg: 1254 pci_disable_device(pdev); 1255 return err; 1256 } 1257 1258 /** 1259 * e1000_remove - Device Removal Routine 1260 * @pdev: PCI device information struct 1261 * 1262 * e1000_remove is called by the PCI subsystem to alert the driver 1263 * that it should release a PCI device. The could be caused by a 1264 * Hot-Plug event, or because the driver is going to be removed from 1265 * memory. 1266 **/ 1267 1268 static void __devexit e1000_remove(struct pci_dev *pdev) 1269 { 1270 struct net_device *netdev = pci_get_drvdata(pdev); 1271 struct e1000_adapter *adapter = netdev_priv(netdev); 1272 struct e1000_hw *hw = &adapter->hw; 1273 1274 e1000_down_and_stop(adapter); 1275 e1000_release_manageability(adapter); 1276 1277 unregister_netdev(netdev); 1278 1279 e1000_phy_hw_reset(hw); 1280 1281 kfree(adapter->tx_ring); 1282 kfree(adapter->rx_ring); 1283 1284 if (hw->mac_type == e1000_ce4100) 1285 iounmap(hw->ce4100_gbe_mdio_base_virt); 1286 iounmap(hw->hw_addr); 1287 if (hw->flash_address) 1288 iounmap(hw->flash_address); 1289 pci_release_selected_regions(pdev, adapter->bars); 1290 1291 free_netdev(netdev); 1292 1293 pci_disable_device(pdev); 1294 } 1295 1296 /** 1297 * e1000_sw_init - Initialize general software structures (struct e1000_adapter) 1298 * @adapter: board private structure to initialize 1299 * 1300 * e1000_sw_init initializes the Adapter private data structure. 1301 * e1000_init_hw_struct MUST be called before this function 1302 **/ 1303 1304 static int __devinit e1000_sw_init(struct e1000_adapter *adapter) 1305 { 1306 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1307 1308 adapter->num_tx_queues = 1; 1309 adapter->num_rx_queues = 1; 1310 1311 if (e1000_alloc_queues(adapter)) { 1312 e_err(probe, "Unable to allocate memory for queues\n"); 1313 return -ENOMEM; 1314 } 1315 1316 /* Explicitly disable IRQ since the NIC can be in any state. */ 1317 e1000_irq_disable(adapter); 1318 1319 spin_lock_init(&adapter->stats_lock); 1320 mutex_init(&adapter->mutex); 1321 1322 set_bit(__E1000_DOWN, &adapter->flags); 1323 1324 return 0; 1325 } 1326 1327 /** 1328 * e1000_alloc_queues - Allocate memory for all rings 1329 * @adapter: board private structure to initialize 1330 * 1331 * We allocate one ring per queue at run-time since we don't know the 1332 * number of queues at compile-time. 1333 **/ 1334 1335 static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter) 1336 { 1337 adapter->tx_ring = kcalloc(adapter->num_tx_queues, 1338 sizeof(struct e1000_tx_ring), GFP_KERNEL); 1339 if (!adapter->tx_ring) 1340 return -ENOMEM; 1341 1342 adapter->rx_ring = kcalloc(adapter->num_rx_queues, 1343 sizeof(struct e1000_rx_ring), GFP_KERNEL); 1344 if (!adapter->rx_ring) { 1345 kfree(adapter->tx_ring); 1346 return -ENOMEM; 1347 } 1348 1349 return E1000_SUCCESS; 1350 } 1351 1352 /** 1353 * e1000_open - Called when a network interface is made active 1354 * @netdev: network interface device structure 1355 * 1356 * Returns 0 on success, negative value on failure 1357 * 1358 * The open entry point is called when a network interface is made 1359 * active by the system (IFF_UP). At this point all resources needed 1360 * for transmit and receive operations are allocated, the interrupt 1361 * handler is registered with the OS, the watchdog task is started, 1362 * and the stack is notified that the interface is ready. 1363 **/ 1364 1365 static int e1000_open(struct net_device *netdev) 1366 { 1367 struct e1000_adapter *adapter = netdev_priv(netdev); 1368 struct e1000_hw *hw = &adapter->hw; 1369 int err; 1370 1371 /* disallow open during test */ 1372 if (test_bit(__E1000_TESTING, &adapter->flags)) 1373 return -EBUSY; 1374 1375 netif_carrier_off(netdev); 1376 1377 /* allocate transmit descriptors */ 1378 err = e1000_setup_all_tx_resources(adapter); 1379 if (err) 1380 goto err_setup_tx; 1381 1382 /* allocate receive descriptors */ 1383 err = e1000_setup_all_rx_resources(adapter); 1384 if (err) 1385 goto err_setup_rx; 1386 1387 e1000_power_up_phy(adapter); 1388 1389 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 1390 if ((hw->mng_cookie.status & 1391 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { 1392 e1000_update_mng_vlan(adapter); 1393 } 1394 1395 /* before we allocate an interrupt, we must be ready to handle it. 1396 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt 1397 * as soon as we call pci_request_irq, so we have to setup our 1398 * clean_rx handler before we do so. */ 1399 e1000_configure(adapter); 1400 1401 err = e1000_request_irq(adapter); 1402 if (err) 1403 goto err_req_irq; 1404 1405 /* From here on the code is the same as e1000_up() */ 1406 clear_bit(__E1000_DOWN, &adapter->flags); 1407 1408 napi_enable(&adapter->napi); 1409 1410 e1000_irq_enable(adapter); 1411 1412 netif_start_queue(netdev); 1413 1414 /* fire a link status change interrupt to start the watchdog */ 1415 ew32(ICS, E1000_ICS_LSC); 1416 1417 return E1000_SUCCESS; 1418 1419 err_req_irq: 1420 e1000_power_down_phy(adapter); 1421 e1000_free_all_rx_resources(adapter); 1422 err_setup_rx: 1423 e1000_free_all_tx_resources(adapter); 1424 err_setup_tx: 1425 e1000_reset(adapter); 1426 1427 return err; 1428 } 1429 1430 /** 1431 * e1000_close - Disables a network interface 1432 * @netdev: network interface device structure 1433 * 1434 * Returns 0, this is not allowed to fail 1435 * 1436 * The close entry point is called when an interface is de-activated 1437 * by the OS. The hardware is still under the drivers control, but 1438 * needs to be disabled. A global MAC reset is issued to stop the 1439 * hardware, and all transmit and receive resources are freed. 1440 **/ 1441 1442 static int e1000_close(struct net_device *netdev) 1443 { 1444 struct e1000_adapter *adapter = netdev_priv(netdev); 1445 struct e1000_hw *hw = &adapter->hw; 1446 1447 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); 1448 e1000_down(adapter); 1449 e1000_power_down_phy(adapter); 1450 e1000_free_irq(adapter); 1451 1452 e1000_free_all_tx_resources(adapter); 1453 e1000_free_all_rx_resources(adapter); 1454 1455 /* kill manageability vlan ID if supported, but not if a vlan with 1456 * the same ID is registered on the host OS (let 8021q kill it) */ 1457 if ((hw->mng_cookie.status & 1458 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 1459 !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) { 1460 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 1461 } 1462 1463 return 0; 1464 } 1465 1466 /** 1467 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary 1468 * @adapter: address of board private structure 1469 * @start: address of beginning of memory 1470 * @len: length of memory 1471 **/ 1472 static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start, 1473 unsigned long len) 1474 { 1475 struct e1000_hw *hw = &adapter->hw; 1476 unsigned long begin = (unsigned long)start; 1477 unsigned long end = begin + len; 1478 1479 /* First rev 82545 and 82546 need to not allow any memory 1480 * write location to cross 64k boundary due to errata 23 */ 1481 if (hw->mac_type == e1000_82545 || 1482 hw->mac_type == e1000_ce4100 || 1483 hw->mac_type == e1000_82546) { 1484 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true; 1485 } 1486 1487 return true; 1488 } 1489 1490 /** 1491 * e1000_setup_tx_resources - allocate Tx resources (Descriptors) 1492 * @adapter: board private structure 1493 * @txdr: tx descriptor ring (for a specific queue) to setup 1494 * 1495 * Return 0 on success, negative on failure 1496 **/ 1497 1498 static int e1000_setup_tx_resources(struct e1000_adapter *adapter, 1499 struct e1000_tx_ring *txdr) 1500 { 1501 struct pci_dev *pdev = adapter->pdev; 1502 int size; 1503 1504 size = sizeof(struct e1000_buffer) * txdr->count; 1505 txdr->buffer_info = vzalloc(size); 1506 if (!txdr->buffer_info) { 1507 e_err(probe, "Unable to allocate memory for the Tx descriptor " 1508 "ring\n"); 1509 return -ENOMEM; 1510 } 1511 1512 /* round up to nearest 4K */ 1513 1514 txdr->size = txdr->count * sizeof(struct e1000_tx_desc); 1515 txdr->size = ALIGN(txdr->size, 4096); 1516 1517 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, 1518 GFP_KERNEL); 1519 if (!txdr->desc) { 1520 setup_tx_desc_die: 1521 vfree(txdr->buffer_info); 1522 e_err(probe, "Unable to allocate memory for the Tx descriptor " 1523 "ring\n"); 1524 return -ENOMEM; 1525 } 1526 1527 /* Fix for errata 23, can't cross 64kB boundary */ 1528 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { 1529 void *olddesc = txdr->desc; 1530 dma_addr_t olddma = txdr->dma; 1531 e_err(tx_err, "txdr align check failed: %u bytes at %p\n", 1532 txdr->size, txdr->desc); 1533 /* Try again, without freeing the previous */ 1534 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, 1535 &txdr->dma, GFP_KERNEL); 1536 /* Failed allocation, critical failure */ 1537 if (!txdr->desc) { 1538 dma_free_coherent(&pdev->dev, txdr->size, olddesc, 1539 olddma); 1540 goto setup_tx_desc_die; 1541 } 1542 1543 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { 1544 /* give up */ 1545 dma_free_coherent(&pdev->dev, txdr->size, txdr->desc, 1546 txdr->dma); 1547 dma_free_coherent(&pdev->dev, txdr->size, olddesc, 1548 olddma); 1549 e_err(probe, "Unable to allocate aligned memory " 1550 "for the transmit descriptor ring\n"); 1551 vfree(txdr->buffer_info); 1552 return -ENOMEM; 1553 } else { 1554 /* Free old allocation, new allocation was successful */ 1555 dma_free_coherent(&pdev->dev, txdr->size, olddesc, 1556 olddma); 1557 } 1558 } 1559 memset(txdr->desc, 0, txdr->size); 1560 1561 txdr->next_to_use = 0; 1562 txdr->next_to_clean = 0; 1563 1564 return 0; 1565 } 1566 1567 /** 1568 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources 1569 * (Descriptors) for all queues 1570 * @adapter: board private structure 1571 * 1572 * Return 0 on success, negative on failure 1573 **/ 1574 1575 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter) 1576 { 1577 int i, err = 0; 1578 1579 for (i = 0; i < adapter->num_tx_queues; i++) { 1580 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); 1581 if (err) { 1582 e_err(probe, "Allocation for Tx Queue %u failed\n", i); 1583 for (i-- ; i >= 0; i--) 1584 e1000_free_tx_resources(adapter, 1585 &adapter->tx_ring[i]); 1586 break; 1587 } 1588 } 1589 1590 return err; 1591 } 1592 1593 /** 1594 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset 1595 * @adapter: board private structure 1596 * 1597 * Configure the Tx unit of the MAC after a reset. 1598 **/ 1599 1600 static void e1000_configure_tx(struct e1000_adapter *adapter) 1601 { 1602 u64 tdba; 1603 struct e1000_hw *hw = &adapter->hw; 1604 u32 tdlen, tctl, tipg; 1605 u32 ipgr1, ipgr2; 1606 1607 /* Setup the HW Tx Head and Tail descriptor pointers */ 1608 1609 switch (adapter->num_tx_queues) { 1610 case 1: 1611 default: 1612 tdba = adapter->tx_ring[0].dma; 1613 tdlen = adapter->tx_ring[0].count * 1614 sizeof(struct e1000_tx_desc); 1615 ew32(TDLEN, tdlen); 1616 ew32(TDBAH, (tdba >> 32)); 1617 ew32(TDBAL, (tdba & 0x00000000ffffffffULL)); 1618 ew32(TDT, 0); 1619 ew32(TDH, 0); 1620 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH); 1621 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT); 1622 break; 1623 } 1624 1625 /* Set the default values for the Tx Inter Packet Gap timer */ 1626 if ((hw->media_type == e1000_media_type_fiber || 1627 hw->media_type == e1000_media_type_internal_serdes)) 1628 tipg = DEFAULT_82543_TIPG_IPGT_FIBER; 1629 else 1630 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; 1631 1632 switch (hw->mac_type) { 1633 case e1000_82542_rev2_0: 1634 case e1000_82542_rev2_1: 1635 tipg = DEFAULT_82542_TIPG_IPGT; 1636 ipgr1 = DEFAULT_82542_TIPG_IPGR1; 1637 ipgr2 = DEFAULT_82542_TIPG_IPGR2; 1638 break; 1639 default: 1640 ipgr1 = DEFAULT_82543_TIPG_IPGR1; 1641 ipgr2 = DEFAULT_82543_TIPG_IPGR2; 1642 break; 1643 } 1644 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; 1645 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; 1646 ew32(TIPG, tipg); 1647 1648 /* Set the Tx Interrupt Delay register */ 1649 1650 ew32(TIDV, adapter->tx_int_delay); 1651 if (hw->mac_type >= e1000_82540) 1652 ew32(TADV, adapter->tx_abs_int_delay); 1653 1654 /* Program the Transmit Control Register */ 1655 1656 tctl = er32(TCTL); 1657 tctl &= ~E1000_TCTL_CT; 1658 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | 1659 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 1660 1661 e1000_config_collision_dist(hw); 1662 1663 /* Setup Transmit Descriptor Settings for eop descriptor */ 1664 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; 1665 1666 /* only set IDE if we are delaying interrupts using the timers */ 1667 if (adapter->tx_int_delay) 1668 adapter->txd_cmd |= E1000_TXD_CMD_IDE; 1669 1670 if (hw->mac_type < e1000_82543) 1671 adapter->txd_cmd |= E1000_TXD_CMD_RPS; 1672 else 1673 adapter->txd_cmd |= E1000_TXD_CMD_RS; 1674 1675 /* Cache if we're 82544 running in PCI-X because we'll 1676 * need this to apply a workaround later in the send path. */ 1677 if (hw->mac_type == e1000_82544 && 1678 hw->bus_type == e1000_bus_type_pcix) 1679 adapter->pcix_82544 = true; 1680 1681 ew32(TCTL, tctl); 1682 1683 } 1684 1685 /** 1686 * e1000_setup_rx_resources - allocate Rx resources (Descriptors) 1687 * @adapter: board private structure 1688 * @rxdr: rx descriptor ring (for a specific queue) to setup 1689 * 1690 * Returns 0 on success, negative on failure 1691 **/ 1692 1693 static int e1000_setup_rx_resources(struct e1000_adapter *adapter, 1694 struct e1000_rx_ring *rxdr) 1695 { 1696 struct pci_dev *pdev = adapter->pdev; 1697 int size, desc_len; 1698 1699 size = sizeof(struct e1000_buffer) * rxdr->count; 1700 rxdr->buffer_info = vzalloc(size); 1701 if (!rxdr->buffer_info) { 1702 e_err(probe, "Unable to allocate memory for the Rx descriptor " 1703 "ring\n"); 1704 return -ENOMEM; 1705 } 1706 1707 desc_len = sizeof(struct e1000_rx_desc); 1708 1709 /* Round up to nearest 4K */ 1710 1711 rxdr->size = rxdr->count * desc_len; 1712 rxdr->size = ALIGN(rxdr->size, 4096); 1713 1714 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, 1715 GFP_KERNEL); 1716 1717 if (!rxdr->desc) { 1718 e_err(probe, "Unable to allocate memory for the Rx descriptor " 1719 "ring\n"); 1720 setup_rx_desc_die: 1721 vfree(rxdr->buffer_info); 1722 return -ENOMEM; 1723 } 1724 1725 /* Fix for errata 23, can't cross 64kB boundary */ 1726 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { 1727 void *olddesc = rxdr->desc; 1728 dma_addr_t olddma = rxdr->dma; 1729 e_err(rx_err, "rxdr align check failed: %u bytes at %p\n", 1730 rxdr->size, rxdr->desc); 1731 /* Try again, without freeing the previous */ 1732 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, 1733 &rxdr->dma, GFP_KERNEL); 1734 /* Failed allocation, critical failure */ 1735 if (!rxdr->desc) { 1736 dma_free_coherent(&pdev->dev, rxdr->size, olddesc, 1737 olddma); 1738 e_err(probe, "Unable to allocate memory for the Rx " 1739 "descriptor ring\n"); 1740 goto setup_rx_desc_die; 1741 } 1742 1743 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { 1744 /* give up */ 1745 dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc, 1746 rxdr->dma); 1747 dma_free_coherent(&pdev->dev, rxdr->size, olddesc, 1748 olddma); 1749 e_err(probe, "Unable to allocate aligned memory for " 1750 "the Rx descriptor ring\n"); 1751 goto setup_rx_desc_die; 1752 } else { 1753 /* Free old allocation, new allocation was successful */ 1754 dma_free_coherent(&pdev->dev, rxdr->size, olddesc, 1755 olddma); 1756 } 1757 } 1758 memset(rxdr->desc, 0, rxdr->size); 1759 1760 rxdr->next_to_clean = 0; 1761 rxdr->next_to_use = 0; 1762 rxdr->rx_skb_top = NULL; 1763 1764 return 0; 1765 } 1766 1767 /** 1768 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources 1769 * (Descriptors) for all queues 1770 * @adapter: board private structure 1771 * 1772 * Return 0 on success, negative on failure 1773 **/ 1774 1775 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter) 1776 { 1777 int i, err = 0; 1778 1779 for (i = 0; i < adapter->num_rx_queues; i++) { 1780 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); 1781 if (err) { 1782 e_err(probe, "Allocation for Rx Queue %u failed\n", i); 1783 for (i-- ; i >= 0; i--) 1784 e1000_free_rx_resources(adapter, 1785 &adapter->rx_ring[i]); 1786 break; 1787 } 1788 } 1789 1790 return err; 1791 } 1792 1793 /** 1794 * e1000_setup_rctl - configure the receive control registers 1795 * @adapter: Board private structure 1796 **/ 1797 static void e1000_setup_rctl(struct e1000_adapter *adapter) 1798 { 1799 struct e1000_hw *hw = &adapter->hw; 1800 u32 rctl; 1801 1802 rctl = er32(RCTL); 1803 1804 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 1805 1806 rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO | 1807 E1000_RCTL_RDMTS_HALF | 1808 (hw->mc_filter_type << E1000_RCTL_MO_SHIFT); 1809 1810 if (hw->tbi_compatibility_on == 1) 1811 rctl |= E1000_RCTL_SBP; 1812 else 1813 rctl &= ~E1000_RCTL_SBP; 1814 1815 if (adapter->netdev->mtu <= ETH_DATA_LEN) 1816 rctl &= ~E1000_RCTL_LPE; 1817 else 1818 rctl |= E1000_RCTL_LPE; 1819 1820 /* Setup buffer sizes */ 1821 rctl &= ~E1000_RCTL_SZ_4096; 1822 rctl |= E1000_RCTL_BSEX; 1823 switch (adapter->rx_buffer_len) { 1824 case E1000_RXBUFFER_2048: 1825 default: 1826 rctl |= E1000_RCTL_SZ_2048; 1827 rctl &= ~E1000_RCTL_BSEX; 1828 break; 1829 case E1000_RXBUFFER_4096: 1830 rctl |= E1000_RCTL_SZ_4096; 1831 break; 1832 case E1000_RXBUFFER_8192: 1833 rctl |= E1000_RCTL_SZ_8192; 1834 break; 1835 case E1000_RXBUFFER_16384: 1836 rctl |= E1000_RCTL_SZ_16384; 1837 break; 1838 } 1839 1840 ew32(RCTL, rctl); 1841 } 1842 1843 /** 1844 * e1000_configure_rx - Configure 8254x Receive Unit after Reset 1845 * @adapter: board private structure 1846 * 1847 * Configure the Rx unit of the MAC after a reset. 1848 **/ 1849 1850 static void e1000_configure_rx(struct e1000_adapter *adapter) 1851 { 1852 u64 rdba; 1853 struct e1000_hw *hw = &adapter->hw; 1854 u32 rdlen, rctl, rxcsum; 1855 1856 if (adapter->netdev->mtu > ETH_DATA_LEN) { 1857 rdlen = adapter->rx_ring[0].count * 1858 sizeof(struct e1000_rx_desc); 1859 adapter->clean_rx = e1000_clean_jumbo_rx_irq; 1860 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; 1861 } else { 1862 rdlen = adapter->rx_ring[0].count * 1863 sizeof(struct e1000_rx_desc); 1864 adapter->clean_rx = e1000_clean_rx_irq; 1865 adapter->alloc_rx_buf = e1000_alloc_rx_buffers; 1866 } 1867 1868 /* disable receives while setting up the descriptors */ 1869 rctl = er32(RCTL); 1870 ew32(RCTL, rctl & ~E1000_RCTL_EN); 1871 1872 /* set the Receive Delay Timer Register */ 1873 ew32(RDTR, adapter->rx_int_delay); 1874 1875 if (hw->mac_type >= e1000_82540) { 1876 ew32(RADV, adapter->rx_abs_int_delay); 1877 if (adapter->itr_setting != 0) 1878 ew32(ITR, 1000000000 / (adapter->itr * 256)); 1879 } 1880 1881 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1882 * the Base and Length of the Rx Descriptor Ring */ 1883 switch (adapter->num_rx_queues) { 1884 case 1: 1885 default: 1886 rdba = adapter->rx_ring[0].dma; 1887 ew32(RDLEN, rdlen); 1888 ew32(RDBAH, (rdba >> 32)); 1889 ew32(RDBAL, (rdba & 0x00000000ffffffffULL)); 1890 ew32(RDT, 0); 1891 ew32(RDH, 0); 1892 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH); 1893 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT); 1894 break; 1895 } 1896 1897 /* Enable 82543 Receive Checksum Offload for TCP and UDP */ 1898 if (hw->mac_type >= e1000_82543) { 1899 rxcsum = er32(RXCSUM); 1900 if (adapter->rx_csum) 1901 rxcsum |= E1000_RXCSUM_TUOFL; 1902 else 1903 /* don't need to clear IPPCSE as it defaults to 0 */ 1904 rxcsum &= ~E1000_RXCSUM_TUOFL; 1905 ew32(RXCSUM, rxcsum); 1906 } 1907 1908 /* Enable Receives */ 1909 ew32(RCTL, rctl | E1000_RCTL_EN); 1910 } 1911 1912 /** 1913 * e1000_free_tx_resources - Free Tx Resources per Queue 1914 * @adapter: board private structure 1915 * @tx_ring: Tx descriptor ring for a specific queue 1916 * 1917 * Free all transmit software resources 1918 **/ 1919 1920 static void e1000_free_tx_resources(struct e1000_adapter *adapter, 1921 struct e1000_tx_ring *tx_ring) 1922 { 1923 struct pci_dev *pdev = adapter->pdev; 1924 1925 e1000_clean_tx_ring(adapter, tx_ring); 1926 1927 vfree(tx_ring->buffer_info); 1928 tx_ring->buffer_info = NULL; 1929 1930 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, 1931 tx_ring->dma); 1932 1933 tx_ring->desc = NULL; 1934 } 1935 1936 /** 1937 * e1000_free_all_tx_resources - Free Tx Resources for All Queues 1938 * @adapter: board private structure 1939 * 1940 * Free all transmit software resources 1941 **/ 1942 1943 void e1000_free_all_tx_resources(struct e1000_adapter *adapter) 1944 { 1945 int i; 1946 1947 for (i = 0; i < adapter->num_tx_queues; i++) 1948 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]); 1949 } 1950 1951 static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, 1952 struct e1000_buffer *buffer_info) 1953 { 1954 if (buffer_info->dma) { 1955 if (buffer_info->mapped_as_page) 1956 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, 1957 buffer_info->length, DMA_TO_DEVICE); 1958 else 1959 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, 1960 buffer_info->length, 1961 DMA_TO_DEVICE); 1962 buffer_info->dma = 0; 1963 } 1964 if (buffer_info->skb) { 1965 dev_kfree_skb_any(buffer_info->skb); 1966 buffer_info->skb = NULL; 1967 } 1968 buffer_info->time_stamp = 0; 1969 /* buffer_info must be completely set up in the transmit path */ 1970 } 1971 1972 /** 1973 * e1000_clean_tx_ring - Free Tx Buffers 1974 * @adapter: board private structure 1975 * @tx_ring: ring to be cleaned 1976 **/ 1977 1978 static void e1000_clean_tx_ring(struct e1000_adapter *adapter, 1979 struct e1000_tx_ring *tx_ring) 1980 { 1981 struct e1000_hw *hw = &adapter->hw; 1982 struct e1000_buffer *buffer_info; 1983 unsigned long size; 1984 unsigned int i; 1985 1986 /* Free all the Tx ring sk_buffs */ 1987 1988 for (i = 0; i < tx_ring->count; i++) { 1989 buffer_info = &tx_ring->buffer_info[i]; 1990 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 1991 } 1992 1993 size = sizeof(struct e1000_buffer) * tx_ring->count; 1994 memset(tx_ring->buffer_info, 0, size); 1995 1996 /* Zero out the descriptor ring */ 1997 1998 memset(tx_ring->desc, 0, tx_ring->size); 1999 2000 tx_ring->next_to_use = 0; 2001 tx_ring->next_to_clean = 0; 2002 tx_ring->last_tx_tso = false; 2003 2004 writel(0, hw->hw_addr + tx_ring->tdh); 2005 writel(0, hw->hw_addr + tx_ring->tdt); 2006 } 2007 2008 /** 2009 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues 2010 * @adapter: board private structure 2011 **/ 2012 2013 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter) 2014 { 2015 int i; 2016 2017 for (i = 0; i < adapter->num_tx_queues; i++) 2018 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]); 2019 } 2020 2021 /** 2022 * e1000_free_rx_resources - Free Rx Resources 2023 * @adapter: board private structure 2024 * @rx_ring: ring to clean the resources from 2025 * 2026 * Free all receive software resources 2027 **/ 2028 2029 static void e1000_free_rx_resources(struct e1000_adapter *adapter, 2030 struct e1000_rx_ring *rx_ring) 2031 { 2032 struct pci_dev *pdev = adapter->pdev; 2033 2034 e1000_clean_rx_ring(adapter, rx_ring); 2035 2036 vfree(rx_ring->buffer_info); 2037 rx_ring->buffer_info = NULL; 2038 2039 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, 2040 rx_ring->dma); 2041 2042 rx_ring->desc = NULL; 2043 } 2044 2045 /** 2046 * e1000_free_all_rx_resources - Free Rx Resources for All Queues 2047 * @adapter: board private structure 2048 * 2049 * Free all receive software resources 2050 **/ 2051 2052 void e1000_free_all_rx_resources(struct e1000_adapter *adapter) 2053 { 2054 int i; 2055 2056 for (i = 0; i < adapter->num_rx_queues; i++) 2057 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]); 2058 } 2059 2060 /** 2061 * e1000_clean_rx_ring - Free Rx Buffers per Queue 2062 * @adapter: board private structure 2063 * @rx_ring: ring to free buffers from 2064 **/ 2065 2066 static void e1000_clean_rx_ring(struct e1000_adapter *adapter, 2067 struct e1000_rx_ring *rx_ring) 2068 { 2069 struct e1000_hw *hw = &adapter->hw; 2070 struct e1000_buffer *buffer_info; 2071 struct pci_dev *pdev = adapter->pdev; 2072 unsigned long size; 2073 unsigned int i; 2074 2075 /* Free all the Rx ring sk_buffs */ 2076 for (i = 0; i < rx_ring->count; i++) { 2077 buffer_info = &rx_ring->buffer_info[i]; 2078 if (buffer_info->dma && 2079 adapter->clean_rx == e1000_clean_rx_irq) { 2080 dma_unmap_single(&pdev->dev, buffer_info->dma, 2081 buffer_info->length, 2082 DMA_FROM_DEVICE); 2083 } else if (buffer_info->dma && 2084 adapter->clean_rx == e1000_clean_jumbo_rx_irq) { 2085 dma_unmap_page(&pdev->dev, buffer_info->dma, 2086 buffer_info->length, 2087 DMA_FROM_DEVICE); 2088 } 2089 2090 buffer_info->dma = 0; 2091 if (buffer_info->page) { 2092 put_page(buffer_info->page); 2093 buffer_info->page = NULL; 2094 } 2095 if (buffer_info->skb) { 2096 dev_kfree_skb(buffer_info->skb); 2097 buffer_info->skb = NULL; 2098 } 2099 } 2100 2101 /* there also may be some cached data from a chained receive */ 2102 if (rx_ring->rx_skb_top) { 2103 dev_kfree_skb(rx_ring->rx_skb_top); 2104 rx_ring->rx_skb_top = NULL; 2105 } 2106 2107 size = sizeof(struct e1000_buffer) * rx_ring->count; 2108 memset(rx_ring->buffer_info, 0, size); 2109 2110 /* Zero out the descriptor ring */ 2111 memset(rx_ring->desc, 0, rx_ring->size); 2112 2113 rx_ring->next_to_clean = 0; 2114 rx_ring->next_to_use = 0; 2115 2116 writel(0, hw->hw_addr + rx_ring->rdh); 2117 writel(0, hw->hw_addr + rx_ring->rdt); 2118 } 2119 2120 /** 2121 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues 2122 * @adapter: board private structure 2123 **/ 2124 2125 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter) 2126 { 2127 int i; 2128 2129 for (i = 0; i < adapter->num_rx_queues; i++) 2130 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]); 2131 } 2132 2133 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset 2134 * and memory write and invalidate disabled for certain operations 2135 */ 2136 static void e1000_enter_82542_rst(struct e1000_adapter *adapter) 2137 { 2138 struct e1000_hw *hw = &adapter->hw; 2139 struct net_device *netdev = adapter->netdev; 2140 u32 rctl; 2141 2142 e1000_pci_clear_mwi(hw); 2143 2144 rctl = er32(RCTL); 2145 rctl |= E1000_RCTL_RST; 2146 ew32(RCTL, rctl); 2147 E1000_WRITE_FLUSH(); 2148 mdelay(5); 2149 2150 if (netif_running(netdev)) 2151 e1000_clean_all_rx_rings(adapter); 2152 } 2153 2154 static void e1000_leave_82542_rst(struct e1000_adapter *adapter) 2155 { 2156 struct e1000_hw *hw = &adapter->hw; 2157 struct net_device *netdev = adapter->netdev; 2158 u32 rctl; 2159 2160 rctl = er32(RCTL); 2161 rctl &= ~E1000_RCTL_RST; 2162 ew32(RCTL, rctl); 2163 E1000_WRITE_FLUSH(); 2164 mdelay(5); 2165 2166 if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) 2167 e1000_pci_set_mwi(hw); 2168 2169 if (netif_running(netdev)) { 2170 /* No need to loop, because 82542 supports only 1 queue */ 2171 struct e1000_rx_ring *ring = &adapter->rx_ring[0]; 2172 e1000_configure_rx(adapter); 2173 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring)); 2174 } 2175 } 2176 2177 /** 2178 * e1000_set_mac - Change the Ethernet Address of the NIC 2179 * @netdev: network interface device structure 2180 * @p: pointer to an address structure 2181 * 2182 * Returns 0 on success, negative on failure 2183 **/ 2184 2185 static int e1000_set_mac(struct net_device *netdev, void *p) 2186 { 2187 struct e1000_adapter *adapter = netdev_priv(netdev); 2188 struct e1000_hw *hw = &adapter->hw; 2189 struct sockaddr *addr = p; 2190 2191 if (!is_valid_ether_addr(addr->sa_data)) 2192 return -EADDRNOTAVAIL; 2193 2194 /* 82542 2.0 needs to be in reset to write receive address registers */ 2195 2196 if (hw->mac_type == e1000_82542_rev2_0) 2197 e1000_enter_82542_rst(adapter); 2198 2199 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 2200 memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len); 2201 2202 e1000_rar_set(hw, hw->mac_addr, 0); 2203 2204 if (hw->mac_type == e1000_82542_rev2_0) 2205 e1000_leave_82542_rst(adapter); 2206 2207 return 0; 2208 } 2209 2210 /** 2211 * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set 2212 * @netdev: network interface device structure 2213 * 2214 * The set_rx_mode entry point is called whenever the unicast or multicast 2215 * address lists or the network interface flags are updated. This routine is 2216 * responsible for configuring the hardware for proper unicast, multicast, 2217 * promiscuous mode, and all-multi behavior. 2218 **/ 2219 2220 static void e1000_set_rx_mode(struct net_device *netdev) 2221 { 2222 struct e1000_adapter *adapter = netdev_priv(netdev); 2223 struct e1000_hw *hw = &adapter->hw; 2224 struct netdev_hw_addr *ha; 2225 bool use_uc = false; 2226 u32 rctl; 2227 u32 hash_value; 2228 int i, rar_entries = E1000_RAR_ENTRIES; 2229 int mta_reg_count = E1000_NUM_MTA_REGISTERS; 2230 u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC); 2231 2232 if (!mcarray) { 2233 e_err(probe, "memory allocation failed\n"); 2234 return; 2235 } 2236 2237 /* Check for Promiscuous and All Multicast modes */ 2238 2239 rctl = er32(RCTL); 2240 2241 if (netdev->flags & IFF_PROMISC) { 2242 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 2243 rctl &= ~E1000_RCTL_VFE; 2244 } else { 2245 if (netdev->flags & IFF_ALLMULTI) 2246 rctl |= E1000_RCTL_MPE; 2247 else 2248 rctl &= ~E1000_RCTL_MPE; 2249 /* Enable VLAN filter if there is a VLAN */ 2250 if (e1000_vlan_used(adapter)) 2251 rctl |= E1000_RCTL_VFE; 2252 } 2253 2254 if (netdev_uc_count(netdev) > rar_entries - 1) { 2255 rctl |= E1000_RCTL_UPE; 2256 } else if (!(netdev->flags & IFF_PROMISC)) { 2257 rctl &= ~E1000_RCTL_UPE; 2258 use_uc = true; 2259 } 2260 2261 ew32(RCTL, rctl); 2262 2263 /* 82542 2.0 needs to be in reset to write receive address registers */ 2264 2265 if (hw->mac_type == e1000_82542_rev2_0) 2266 e1000_enter_82542_rst(adapter); 2267 2268 /* load the first 14 addresses into the exact filters 1-14. Unicast 2269 * addresses take precedence to avoid disabling unicast filtering 2270 * when possible. 2271 * 2272 * RAR 0 is used for the station MAC address 2273 * if there are not 14 addresses, go ahead and clear the filters 2274 */ 2275 i = 1; 2276 if (use_uc) 2277 netdev_for_each_uc_addr(ha, netdev) { 2278 if (i == rar_entries) 2279 break; 2280 e1000_rar_set(hw, ha->addr, i++); 2281 } 2282 2283 netdev_for_each_mc_addr(ha, netdev) { 2284 if (i == rar_entries) { 2285 /* load any remaining addresses into the hash table */ 2286 u32 hash_reg, hash_bit, mta; 2287 hash_value = e1000_hash_mc_addr(hw, ha->addr); 2288 hash_reg = (hash_value >> 5) & 0x7F; 2289 hash_bit = hash_value & 0x1F; 2290 mta = (1 << hash_bit); 2291 mcarray[hash_reg] |= mta; 2292 } else { 2293 e1000_rar_set(hw, ha->addr, i++); 2294 } 2295 } 2296 2297 for (; i < rar_entries; i++) { 2298 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0); 2299 E1000_WRITE_FLUSH(); 2300 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0); 2301 E1000_WRITE_FLUSH(); 2302 } 2303 2304 /* write the hash table completely, write from bottom to avoid 2305 * both stupid write combining chipsets, and flushing each write */ 2306 for (i = mta_reg_count - 1; i >= 0 ; i--) { 2307 /* 2308 * If we are on an 82544 has an errata where writing odd 2309 * offsets overwrites the previous even offset, but writing 2310 * backwards over the range solves the issue by always 2311 * writing the odd offset first 2312 */ 2313 E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]); 2314 } 2315 E1000_WRITE_FLUSH(); 2316 2317 if (hw->mac_type == e1000_82542_rev2_0) 2318 e1000_leave_82542_rst(adapter); 2319 2320 kfree(mcarray); 2321 } 2322 2323 /** 2324 * e1000_update_phy_info_task - get phy info 2325 * @work: work struct contained inside adapter struct 2326 * 2327 * Need to wait a few seconds after link up to get diagnostic information from 2328 * the phy 2329 */ 2330 static void e1000_update_phy_info_task(struct work_struct *work) 2331 { 2332 struct e1000_adapter *adapter = container_of(work, 2333 struct e1000_adapter, 2334 phy_info_task.work); 2335 if (test_bit(__E1000_DOWN, &adapter->flags)) 2336 return; 2337 mutex_lock(&adapter->mutex); 2338 e1000_phy_get_info(&adapter->hw, &adapter->phy_info); 2339 mutex_unlock(&adapter->mutex); 2340 } 2341 2342 /** 2343 * e1000_82547_tx_fifo_stall_task - task to complete work 2344 * @work: work struct contained inside adapter struct 2345 **/ 2346 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work) 2347 { 2348 struct e1000_adapter *adapter = container_of(work, 2349 struct e1000_adapter, 2350 fifo_stall_task.work); 2351 struct e1000_hw *hw = &adapter->hw; 2352 struct net_device *netdev = adapter->netdev; 2353 u32 tctl; 2354 2355 if (test_bit(__E1000_DOWN, &adapter->flags)) 2356 return; 2357 mutex_lock(&adapter->mutex); 2358 if (atomic_read(&adapter->tx_fifo_stall)) { 2359 if ((er32(TDT) == er32(TDH)) && 2360 (er32(TDFT) == er32(TDFH)) && 2361 (er32(TDFTS) == er32(TDFHS))) { 2362 tctl = er32(TCTL); 2363 ew32(TCTL, tctl & ~E1000_TCTL_EN); 2364 ew32(TDFT, adapter->tx_head_addr); 2365 ew32(TDFH, adapter->tx_head_addr); 2366 ew32(TDFTS, adapter->tx_head_addr); 2367 ew32(TDFHS, adapter->tx_head_addr); 2368 ew32(TCTL, tctl); 2369 E1000_WRITE_FLUSH(); 2370 2371 adapter->tx_fifo_head = 0; 2372 atomic_set(&adapter->tx_fifo_stall, 0); 2373 netif_wake_queue(netdev); 2374 } else if (!test_bit(__E1000_DOWN, &adapter->flags)) { 2375 schedule_delayed_work(&adapter->fifo_stall_task, 1); 2376 } 2377 } 2378 mutex_unlock(&adapter->mutex); 2379 } 2380 2381 bool e1000_has_link(struct e1000_adapter *adapter) 2382 { 2383 struct e1000_hw *hw = &adapter->hw; 2384 bool link_active = false; 2385 2386 /* get_link_status is set on LSC (link status) interrupt or rx 2387 * sequence error interrupt (except on intel ce4100). 2388 * get_link_status will stay false until the 2389 * e1000_check_for_link establishes link for copper adapters 2390 * ONLY 2391 */ 2392 switch (hw->media_type) { 2393 case e1000_media_type_copper: 2394 if (hw->mac_type == e1000_ce4100) 2395 hw->get_link_status = 1; 2396 if (hw->get_link_status) { 2397 e1000_check_for_link(hw); 2398 link_active = !hw->get_link_status; 2399 } else { 2400 link_active = true; 2401 } 2402 break; 2403 case e1000_media_type_fiber: 2404 e1000_check_for_link(hw); 2405 link_active = !!(er32(STATUS) & E1000_STATUS_LU); 2406 break; 2407 case e1000_media_type_internal_serdes: 2408 e1000_check_for_link(hw); 2409 link_active = hw->serdes_has_link; 2410 break; 2411 default: 2412 break; 2413 } 2414 2415 return link_active; 2416 } 2417 2418 /** 2419 * e1000_watchdog - work function 2420 * @work: work struct contained inside adapter struct 2421 **/ 2422 static void e1000_watchdog(struct work_struct *work) 2423 { 2424 struct e1000_adapter *adapter = container_of(work, 2425 struct e1000_adapter, 2426 watchdog_task.work); 2427 struct e1000_hw *hw = &adapter->hw; 2428 struct net_device *netdev = adapter->netdev; 2429 struct e1000_tx_ring *txdr = adapter->tx_ring; 2430 u32 link, tctl; 2431 2432 if (test_bit(__E1000_DOWN, &adapter->flags)) 2433 return; 2434 2435 mutex_lock(&adapter->mutex); 2436 link = e1000_has_link(adapter); 2437 if ((netif_carrier_ok(netdev)) && link) 2438 goto link_up; 2439 2440 if (link) { 2441 if (!netif_carrier_ok(netdev)) { 2442 u32 ctrl; 2443 bool txb2b = true; 2444 /* update snapshot of PHY registers on LSC */ 2445 e1000_get_speed_and_duplex(hw, 2446 &adapter->link_speed, 2447 &adapter->link_duplex); 2448 2449 ctrl = er32(CTRL); 2450 pr_info("%s NIC Link is Up %d Mbps %s, " 2451 "Flow Control: %s\n", 2452 netdev->name, 2453 adapter->link_speed, 2454 adapter->link_duplex == FULL_DUPLEX ? 2455 "Full Duplex" : "Half Duplex", 2456 ((ctrl & E1000_CTRL_TFCE) && (ctrl & 2457 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl & 2458 E1000_CTRL_RFCE) ? "RX" : ((ctrl & 2459 E1000_CTRL_TFCE) ? "TX" : "None"))); 2460 2461 /* adjust timeout factor according to speed/duplex */ 2462 adapter->tx_timeout_factor = 1; 2463 switch (adapter->link_speed) { 2464 case SPEED_10: 2465 txb2b = false; 2466 adapter->tx_timeout_factor = 16; 2467 break; 2468 case SPEED_100: 2469 txb2b = false; 2470 /* maybe add some timeout factor ? */ 2471 break; 2472 } 2473 2474 /* enable transmits in the hardware */ 2475 tctl = er32(TCTL); 2476 tctl |= E1000_TCTL_EN; 2477 ew32(TCTL, tctl); 2478 2479 netif_carrier_on(netdev); 2480 if (!test_bit(__E1000_DOWN, &adapter->flags)) 2481 schedule_delayed_work(&adapter->phy_info_task, 2482 2 * HZ); 2483 adapter->smartspeed = 0; 2484 } 2485 } else { 2486 if (netif_carrier_ok(netdev)) { 2487 adapter->link_speed = 0; 2488 adapter->link_duplex = 0; 2489 pr_info("%s NIC Link is Down\n", 2490 netdev->name); 2491 netif_carrier_off(netdev); 2492 2493 if (!test_bit(__E1000_DOWN, &adapter->flags)) 2494 schedule_delayed_work(&adapter->phy_info_task, 2495 2 * HZ); 2496 } 2497 2498 e1000_smartspeed(adapter); 2499 } 2500 2501 link_up: 2502 e1000_update_stats(adapter); 2503 2504 hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; 2505 adapter->tpt_old = adapter->stats.tpt; 2506 hw->collision_delta = adapter->stats.colc - adapter->colc_old; 2507 adapter->colc_old = adapter->stats.colc; 2508 2509 adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old; 2510 adapter->gorcl_old = adapter->stats.gorcl; 2511 adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old; 2512 adapter->gotcl_old = adapter->stats.gotcl; 2513 2514 e1000_update_adaptive(hw); 2515 2516 if (!netif_carrier_ok(netdev)) { 2517 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { 2518 /* We've lost link, so the controller stops DMA, 2519 * but we've got queued Tx work that's never going 2520 * to get done, so reset controller to flush Tx. 2521 * (Do the reset outside of interrupt context). */ 2522 adapter->tx_timeout_count++; 2523 schedule_work(&adapter->reset_task); 2524 /* exit immediately since reset is imminent */ 2525 goto unlock; 2526 } 2527 } 2528 2529 /* Simple mode for Interrupt Throttle Rate (ITR) */ 2530 if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) { 2531 /* 2532 * Symmetric Tx/Rx gets a reduced ITR=2000; 2533 * Total asymmetrical Tx or Rx gets ITR=8000; 2534 * everyone else is between 2000-8000. 2535 */ 2536 u32 goc = (adapter->gotcl + adapter->gorcl) / 10000; 2537 u32 dif = (adapter->gotcl > adapter->gorcl ? 2538 adapter->gotcl - adapter->gorcl : 2539 adapter->gorcl - adapter->gotcl) / 10000; 2540 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; 2541 2542 ew32(ITR, 1000000000 / (itr * 256)); 2543 } 2544 2545 /* Cause software interrupt to ensure rx ring is cleaned */ 2546 ew32(ICS, E1000_ICS_RXDMT0); 2547 2548 /* Force detection of hung controller every watchdog period */ 2549 adapter->detect_tx_hung = true; 2550 2551 /* Reschedule the task */ 2552 if (!test_bit(__E1000_DOWN, &adapter->flags)) 2553 schedule_delayed_work(&adapter->watchdog_task, 2 * HZ); 2554 2555 unlock: 2556 mutex_unlock(&adapter->mutex); 2557 } 2558 2559 enum latency_range { 2560 lowest_latency = 0, 2561 low_latency = 1, 2562 bulk_latency = 2, 2563 latency_invalid = 255 2564 }; 2565 2566 /** 2567 * e1000_update_itr - update the dynamic ITR value based on statistics 2568 * @adapter: pointer to adapter 2569 * @itr_setting: current adapter->itr 2570 * @packets: the number of packets during this measurement interval 2571 * @bytes: the number of bytes during this measurement interval 2572 * 2573 * Stores a new ITR value based on packets and byte 2574 * counts during the last interrupt. The advantage of per interrupt 2575 * computation is faster updates and more accurate ITR for the current 2576 * traffic pattern. Constants in this function were computed 2577 * based on theoretical maximum wire speed and thresholds were set based 2578 * on testing data as well as attempting to minimize response time 2579 * while increasing bulk throughput. 2580 * this functionality is controlled by the InterruptThrottleRate module 2581 * parameter (see e1000_param.c) 2582 **/ 2583 static unsigned int e1000_update_itr(struct e1000_adapter *adapter, 2584 u16 itr_setting, int packets, int bytes) 2585 { 2586 unsigned int retval = itr_setting; 2587 struct e1000_hw *hw = &adapter->hw; 2588 2589 if (unlikely(hw->mac_type < e1000_82540)) 2590 goto update_itr_done; 2591 2592 if (packets == 0) 2593 goto update_itr_done; 2594 2595 switch (itr_setting) { 2596 case lowest_latency: 2597 /* jumbo frames get bulk treatment*/ 2598 if (bytes/packets > 8000) 2599 retval = bulk_latency; 2600 else if ((packets < 5) && (bytes > 512)) 2601 retval = low_latency; 2602 break; 2603 case low_latency: /* 50 usec aka 20000 ints/s */ 2604 if (bytes > 10000) { 2605 /* jumbo frames need bulk latency setting */ 2606 if (bytes/packets > 8000) 2607 retval = bulk_latency; 2608 else if ((packets < 10) || ((bytes/packets) > 1200)) 2609 retval = bulk_latency; 2610 else if ((packets > 35)) 2611 retval = lowest_latency; 2612 } else if (bytes/packets > 2000) 2613 retval = bulk_latency; 2614 else if (packets <= 2 && bytes < 512) 2615 retval = lowest_latency; 2616 break; 2617 case bulk_latency: /* 250 usec aka 4000 ints/s */ 2618 if (bytes > 25000) { 2619 if (packets > 35) 2620 retval = low_latency; 2621 } else if (bytes < 6000) { 2622 retval = low_latency; 2623 } 2624 break; 2625 } 2626 2627 update_itr_done: 2628 return retval; 2629 } 2630 2631 static void e1000_set_itr(struct e1000_adapter *adapter) 2632 { 2633 struct e1000_hw *hw = &adapter->hw; 2634 u16 current_itr; 2635 u32 new_itr = adapter->itr; 2636 2637 if (unlikely(hw->mac_type < e1000_82540)) 2638 return; 2639 2640 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ 2641 if (unlikely(adapter->link_speed != SPEED_1000)) { 2642 current_itr = 0; 2643 new_itr = 4000; 2644 goto set_itr_now; 2645 } 2646 2647 adapter->tx_itr = e1000_update_itr(adapter, 2648 adapter->tx_itr, 2649 adapter->total_tx_packets, 2650 adapter->total_tx_bytes); 2651 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 2652 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) 2653 adapter->tx_itr = low_latency; 2654 2655 adapter->rx_itr = e1000_update_itr(adapter, 2656 adapter->rx_itr, 2657 adapter->total_rx_packets, 2658 adapter->total_rx_bytes); 2659 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 2660 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) 2661 adapter->rx_itr = low_latency; 2662 2663 current_itr = max(adapter->rx_itr, adapter->tx_itr); 2664 2665 switch (current_itr) { 2666 /* counts and packets in update_itr are dependent on these numbers */ 2667 case lowest_latency: 2668 new_itr = 70000; 2669 break; 2670 case low_latency: 2671 new_itr = 20000; /* aka hwitr = ~200 */ 2672 break; 2673 case bulk_latency: 2674 new_itr = 4000; 2675 break; 2676 default: 2677 break; 2678 } 2679 2680 set_itr_now: 2681 if (new_itr != adapter->itr) { 2682 /* this attempts to bias the interrupt rate towards Bulk 2683 * by adding intermediate steps when interrupt rate is 2684 * increasing */ 2685 new_itr = new_itr > adapter->itr ? 2686 min(adapter->itr + (new_itr >> 2), new_itr) : 2687 new_itr; 2688 adapter->itr = new_itr; 2689 ew32(ITR, 1000000000 / (new_itr * 256)); 2690 } 2691 } 2692 2693 #define E1000_TX_FLAGS_CSUM 0x00000001 2694 #define E1000_TX_FLAGS_VLAN 0x00000002 2695 #define E1000_TX_FLAGS_TSO 0x00000004 2696 #define E1000_TX_FLAGS_IPV4 0x00000008 2697 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 2698 #define E1000_TX_FLAGS_VLAN_SHIFT 16 2699 2700 static int e1000_tso(struct e1000_adapter *adapter, 2701 struct e1000_tx_ring *tx_ring, struct sk_buff *skb) 2702 { 2703 struct e1000_context_desc *context_desc; 2704 struct e1000_buffer *buffer_info; 2705 unsigned int i; 2706 u32 cmd_length = 0; 2707 u16 ipcse = 0, tucse, mss; 2708 u8 ipcss, ipcso, tucss, tucso, hdr_len; 2709 int err; 2710 2711 if (skb_is_gso(skb)) { 2712 if (skb_header_cloned(skb)) { 2713 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2714 if (err) 2715 return err; 2716 } 2717 2718 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 2719 mss = skb_shinfo(skb)->gso_size; 2720 if (skb->protocol == htons(ETH_P_IP)) { 2721 struct iphdr *iph = ip_hdr(skb); 2722 iph->tot_len = 0; 2723 iph->check = 0; 2724 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 2725 iph->daddr, 0, 2726 IPPROTO_TCP, 2727 0); 2728 cmd_length = E1000_TXD_CMD_IP; 2729 ipcse = skb_transport_offset(skb) - 1; 2730 } else if (skb->protocol == htons(ETH_P_IPV6)) { 2731 ipv6_hdr(skb)->payload_len = 0; 2732 tcp_hdr(skb)->check = 2733 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 2734 &ipv6_hdr(skb)->daddr, 2735 0, IPPROTO_TCP, 0); 2736 ipcse = 0; 2737 } 2738 ipcss = skb_network_offset(skb); 2739 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; 2740 tucss = skb_transport_offset(skb); 2741 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; 2742 tucse = 0; 2743 2744 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | 2745 E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); 2746 2747 i = tx_ring->next_to_use; 2748 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 2749 buffer_info = &tx_ring->buffer_info[i]; 2750 2751 context_desc->lower_setup.ip_fields.ipcss = ipcss; 2752 context_desc->lower_setup.ip_fields.ipcso = ipcso; 2753 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); 2754 context_desc->upper_setup.tcp_fields.tucss = tucss; 2755 context_desc->upper_setup.tcp_fields.tucso = tucso; 2756 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); 2757 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); 2758 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; 2759 context_desc->cmd_and_length = cpu_to_le32(cmd_length); 2760 2761 buffer_info->time_stamp = jiffies; 2762 buffer_info->next_to_watch = i; 2763 2764 if (++i == tx_ring->count) i = 0; 2765 tx_ring->next_to_use = i; 2766 2767 return true; 2768 } 2769 return false; 2770 } 2771 2772 static bool e1000_tx_csum(struct e1000_adapter *adapter, 2773 struct e1000_tx_ring *tx_ring, struct sk_buff *skb) 2774 { 2775 struct e1000_context_desc *context_desc; 2776 struct e1000_buffer *buffer_info; 2777 unsigned int i; 2778 u8 css; 2779 u32 cmd_len = E1000_TXD_CMD_DEXT; 2780 2781 if (skb->ip_summed != CHECKSUM_PARTIAL) 2782 return false; 2783 2784 switch (skb->protocol) { 2785 case cpu_to_be16(ETH_P_IP): 2786 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 2787 cmd_len |= E1000_TXD_CMD_TCP; 2788 break; 2789 case cpu_to_be16(ETH_P_IPV6): 2790 /* XXX not handling all IPV6 headers */ 2791 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 2792 cmd_len |= E1000_TXD_CMD_TCP; 2793 break; 2794 default: 2795 if (unlikely(net_ratelimit())) 2796 e_warn(drv, "checksum_partial proto=%x!\n", 2797 skb->protocol); 2798 break; 2799 } 2800 2801 css = skb_checksum_start_offset(skb); 2802 2803 i = tx_ring->next_to_use; 2804 buffer_info = &tx_ring->buffer_info[i]; 2805 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 2806 2807 context_desc->lower_setup.ip_config = 0; 2808 context_desc->upper_setup.tcp_fields.tucss = css; 2809 context_desc->upper_setup.tcp_fields.tucso = 2810 css + skb->csum_offset; 2811 context_desc->upper_setup.tcp_fields.tucse = 0; 2812 context_desc->tcp_seg_setup.data = 0; 2813 context_desc->cmd_and_length = cpu_to_le32(cmd_len); 2814 2815 buffer_info->time_stamp = jiffies; 2816 buffer_info->next_to_watch = i; 2817 2818 if (unlikely(++i == tx_ring->count)) i = 0; 2819 tx_ring->next_to_use = i; 2820 2821 return true; 2822 } 2823 2824 #define E1000_MAX_TXD_PWR 12 2825 #define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR) 2826 2827 static int e1000_tx_map(struct e1000_adapter *adapter, 2828 struct e1000_tx_ring *tx_ring, 2829 struct sk_buff *skb, unsigned int first, 2830 unsigned int max_per_txd, unsigned int nr_frags, 2831 unsigned int mss) 2832 { 2833 struct e1000_hw *hw = &adapter->hw; 2834 struct pci_dev *pdev = adapter->pdev; 2835 struct e1000_buffer *buffer_info; 2836 unsigned int len = skb_headlen(skb); 2837 unsigned int offset = 0, size, count = 0, i; 2838 unsigned int f, bytecount, segs; 2839 2840 i = tx_ring->next_to_use; 2841 2842 while (len) { 2843 buffer_info = &tx_ring->buffer_info[i]; 2844 size = min(len, max_per_txd); 2845 /* Workaround for Controller erratum -- 2846 * descriptor for non-tso packet in a linear SKB that follows a 2847 * tso gets written back prematurely before the data is fully 2848 * DMA'd to the controller */ 2849 if (!skb->data_len && tx_ring->last_tx_tso && 2850 !skb_is_gso(skb)) { 2851 tx_ring->last_tx_tso = false; 2852 size -= 4; 2853 } 2854 2855 /* Workaround for premature desc write-backs 2856 * in TSO mode. Append 4-byte sentinel desc */ 2857 if (unlikely(mss && !nr_frags && size == len && size > 8)) 2858 size -= 4; 2859 /* work-around for errata 10 and it applies 2860 * to all controllers in PCI-X mode 2861 * The fix is to make sure that the first descriptor of a 2862 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes 2863 */ 2864 if (unlikely((hw->bus_type == e1000_bus_type_pcix) && 2865 (size > 2015) && count == 0)) 2866 size = 2015; 2867 2868 /* Workaround for potential 82544 hang in PCI-X. Avoid 2869 * terminating buffers within evenly-aligned dwords. */ 2870 if (unlikely(adapter->pcix_82544 && 2871 !((unsigned long)(skb->data + offset + size - 1) & 4) && 2872 size > 4)) 2873 size -= 4; 2874 2875 buffer_info->length = size; 2876 /* set time_stamp *before* dma to help avoid a possible race */ 2877 buffer_info->time_stamp = jiffies; 2878 buffer_info->mapped_as_page = false; 2879 buffer_info->dma = dma_map_single(&pdev->dev, 2880 skb->data + offset, 2881 size, DMA_TO_DEVICE); 2882 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) 2883 goto dma_error; 2884 buffer_info->next_to_watch = i; 2885 2886 len -= size; 2887 offset += size; 2888 count++; 2889 if (len) { 2890 i++; 2891 if (unlikely(i == tx_ring->count)) 2892 i = 0; 2893 } 2894 } 2895 2896 for (f = 0; f < nr_frags; f++) { 2897 const struct skb_frag_struct *frag; 2898 2899 frag = &skb_shinfo(skb)->frags[f]; 2900 len = skb_frag_size(frag); 2901 offset = 0; 2902 2903 while (len) { 2904 unsigned long bufend; 2905 i++; 2906 if (unlikely(i == tx_ring->count)) 2907 i = 0; 2908 2909 buffer_info = &tx_ring->buffer_info[i]; 2910 size = min(len, max_per_txd); 2911 /* Workaround for premature desc write-backs 2912 * in TSO mode. Append 4-byte sentinel desc */ 2913 if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8)) 2914 size -= 4; 2915 /* Workaround for potential 82544 hang in PCI-X. 2916 * Avoid terminating buffers within evenly-aligned 2917 * dwords. */ 2918 bufend = (unsigned long) 2919 page_to_phys(skb_frag_page(frag)); 2920 bufend += offset + size - 1; 2921 if (unlikely(adapter->pcix_82544 && 2922 !(bufend & 4) && 2923 size > 4)) 2924 size -= 4; 2925 2926 buffer_info->length = size; 2927 buffer_info->time_stamp = jiffies; 2928 buffer_info->mapped_as_page = true; 2929 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 2930 offset, size, DMA_TO_DEVICE); 2931 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) 2932 goto dma_error; 2933 buffer_info->next_to_watch = i; 2934 2935 len -= size; 2936 offset += size; 2937 count++; 2938 } 2939 } 2940 2941 segs = skb_shinfo(skb)->gso_segs ?: 1; 2942 /* multiply data chunks by size of headers */ 2943 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; 2944 2945 tx_ring->buffer_info[i].skb = skb; 2946 tx_ring->buffer_info[i].segs = segs; 2947 tx_ring->buffer_info[i].bytecount = bytecount; 2948 tx_ring->buffer_info[first].next_to_watch = i; 2949 2950 return count; 2951 2952 dma_error: 2953 dev_err(&pdev->dev, "TX DMA map failed\n"); 2954 buffer_info->dma = 0; 2955 if (count) 2956 count--; 2957 2958 while (count--) { 2959 if (i==0) 2960 i += tx_ring->count; 2961 i--; 2962 buffer_info = &tx_ring->buffer_info[i]; 2963 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 2964 } 2965 2966 return 0; 2967 } 2968 2969 static void e1000_tx_queue(struct e1000_adapter *adapter, 2970 struct e1000_tx_ring *tx_ring, int tx_flags, 2971 int count) 2972 { 2973 struct e1000_hw *hw = &adapter->hw; 2974 struct e1000_tx_desc *tx_desc = NULL; 2975 struct e1000_buffer *buffer_info; 2976 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; 2977 unsigned int i; 2978 2979 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) { 2980 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | 2981 E1000_TXD_CMD_TSE; 2982 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2983 2984 if (likely(tx_flags & E1000_TX_FLAGS_IPV4)) 2985 txd_upper |= E1000_TXD_POPTS_IXSM << 8; 2986 } 2987 2988 if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) { 2989 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; 2990 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2991 } 2992 2993 if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) { 2994 txd_lower |= E1000_TXD_CMD_VLE; 2995 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); 2996 } 2997 2998 i = tx_ring->next_to_use; 2999 3000 while (count--) { 3001 buffer_info = &tx_ring->buffer_info[i]; 3002 tx_desc = E1000_TX_DESC(*tx_ring, i); 3003 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 3004 tx_desc->lower.data = 3005 cpu_to_le32(txd_lower | buffer_info->length); 3006 tx_desc->upper.data = cpu_to_le32(txd_upper); 3007 if (unlikely(++i == tx_ring->count)) i = 0; 3008 } 3009 3010 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); 3011 3012 /* Force memory writes to complete before letting h/w 3013 * know there are new descriptors to fetch. (Only 3014 * applicable for weak-ordered memory model archs, 3015 * such as IA-64). */ 3016 wmb(); 3017 3018 tx_ring->next_to_use = i; 3019 writel(i, hw->hw_addr + tx_ring->tdt); 3020 /* we need this if more than one processor can write to our tail 3021 * at a time, it syncronizes IO on IA64/Altix systems */ 3022 mmiowb(); 3023 } 3024 3025 /** 3026 * 82547 workaround to avoid controller hang in half-duplex environment. 3027 * The workaround is to avoid queuing a large packet that would span 3028 * the internal Tx FIFO ring boundary by notifying the stack to resend 3029 * the packet at a later time. This gives the Tx FIFO an opportunity to 3030 * flush all packets. When that occurs, we reset the Tx FIFO pointers 3031 * to the beginning of the Tx FIFO. 3032 **/ 3033 3034 #define E1000_FIFO_HDR 0x10 3035 #define E1000_82547_PAD_LEN 0x3E0 3036 3037 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, 3038 struct sk_buff *skb) 3039 { 3040 u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; 3041 u32 skb_fifo_len = skb->len + E1000_FIFO_HDR; 3042 3043 skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR); 3044 3045 if (adapter->link_duplex != HALF_DUPLEX) 3046 goto no_fifo_stall_required; 3047 3048 if (atomic_read(&adapter->tx_fifo_stall)) 3049 return 1; 3050 3051 if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) { 3052 atomic_set(&adapter->tx_fifo_stall, 1); 3053 return 1; 3054 } 3055 3056 no_fifo_stall_required: 3057 adapter->tx_fifo_head += skb_fifo_len; 3058 if (adapter->tx_fifo_head >= adapter->tx_fifo_size) 3059 adapter->tx_fifo_head -= adapter->tx_fifo_size; 3060 return 0; 3061 } 3062 3063 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) 3064 { 3065 struct e1000_adapter *adapter = netdev_priv(netdev); 3066 struct e1000_tx_ring *tx_ring = adapter->tx_ring; 3067 3068 netif_stop_queue(netdev); 3069 /* Herbert's original patch had: 3070 * smp_mb__after_netif_stop_queue(); 3071 * but since that doesn't exist yet, just open code it. */ 3072 smp_mb(); 3073 3074 /* We need to check again in a case another CPU has just 3075 * made room available. */ 3076 if (likely(E1000_DESC_UNUSED(tx_ring) < size)) 3077 return -EBUSY; 3078 3079 /* A reprieve! */ 3080 netif_start_queue(netdev); 3081 ++adapter->restart_queue; 3082 return 0; 3083 } 3084 3085 static int e1000_maybe_stop_tx(struct net_device *netdev, 3086 struct e1000_tx_ring *tx_ring, int size) 3087 { 3088 if (likely(E1000_DESC_UNUSED(tx_ring) >= size)) 3089 return 0; 3090 return __e1000_maybe_stop_tx(netdev, size); 3091 } 3092 3093 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) 3094 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, 3095 struct net_device *netdev) 3096 { 3097 struct e1000_adapter *adapter = netdev_priv(netdev); 3098 struct e1000_hw *hw = &adapter->hw; 3099 struct e1000_tx_ring *tx_ring; 3100 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD; 3101 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; 3102 unsigned int tx_flags = 0; 3103 unsigned int len = skb_headlen(skb); 3104 unsigned int nr_frags; 3105 unsigned int mss; 3106 int count = 0; 3107 int tso; 3108 unsigned int f; 3109 3110 /* This goes back to the question of how to logically map a tx queue 3111 * to a flow. Right now, performance is impacted slightly negatively 3112 * if using multiple tx queues. If the stack breaks away from a 3113 * single qdisc implementation, we can look at this again. */ 3114 tx_ring = adapter->tx_ring; 3115 3116 if (unlikely(skb->len <= 0)) { 3117 dev_kfree_skb_any(skb); 3118 return NETDEV_TX_OK; 3119 } 3120 3121 mss = skb_shinfo(skb)->gso_size; 3122 /* The controller does a simple calculation to 3123 * make sure there is enough room in the FIFO before 3124 * initiating the DMA for each buffer. The calc is: 3125 * 4 = ceil(buffer len/mss). To make sure we don't 3126 * overrun the FIFO, adjust the max buffer len if mss 3127 * drops. */ 3128 if (mss) { 3129 u8 hdr_len; 3130 max_per_txd = min(mss << 2, max_per_txd); 3131 max_txd_pwr = fls(max_per_txd) - 1; 3132 3133 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 3134 if (skb->data_len && hdr_len == len) { 3135 switch (hw->mac_type) { 3136 unsigned int pull_size; 3137 case e1000_82544: 3138 /* Make sure we have room to chop off 4 bytes, 3139 * and that the end alignment will work out to 3140 * this hardware's requirements 3141 * NOTE: this is a TSO only workaround 3142 * if end byte alignment not correct move us 3143 * into the next dword */ 3144 if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4) 3145 break; 3146 /* fall through */ 3147 pull_size = min((unsigned int)4, skb->data_len); 3148 if (!__pskb_pull_tail(skb, pull_size)) { 3149 e_err(drv, "__pskb_pull_tail " 3150 "failed.\n"); 3151 dev_kfree_skb_any(skb); 3152 return NETDEV_TX_OK; 3153 } 3154 len = skb_headlen(skb); 3155 break; 3156 default: 3157 /* do nothing */ 3158 break; 3159 } 3160 } 3161 } 3162 3163 /* reserve a descriptor for the offload context */ 3164 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) 3165 count++; 3166 count++; 3167 3168 /* Controller Erratum workaround */ 3169 if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb)) 3170 count++; 3171 3172 count += TXD_USE_COUNT(len, max_txd_pwr); 3173 3174 if (adapter->pcix_82544) 3175 count++; 3176 3177 /* work-around for errata 10 and it applies to all controllers 3178 * in PCI-X mode, so add one more descriptor to the count 3179 */ 3180 if (unlikely((hw->bus_type == e1000_bus_type_pcix) && 3181 (len > 2015))) 3182 count++; 3183 3184 nr_frags = skb_shinfo(skb)->nr_frags; 3185 for (f = 0; f < nr_frags; f++) 3186 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]), 3187 max_txd_pwr); 3188 if (adapter->pcix_82544) 3189 count += nr_frags; 3190 3191 /* need: count + 2 desc gap to keep tail from touching 3192 * head, otherwise try next time */ 3193 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) 3194 return NETDEV_TX_BUSY; 3195 3196 if (unlikely((hw->mac_type == e1000_82547) && 3197 (e1000_82547_fifo_workaround(adapter, skb)))) { 3198 netif_stop_queue(netdev); 3199 if (!test_bit(__E1000_DOWN, &adapter->flags)) 3200 schedule_delayed_work(&adapter->fifo_stall_task, 1); 3201 return NETDEV_TX_BUSY; 3202 } 3203 3204 if (vlan_tx_tag_present(skb)) { 3205 tx_flags |= E1000_TX_FLAGS_VLAN; 3206 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); 3207 } 3208 3209 first = tx_ring->next_to_use; 3210 3211 tso = e1000_tso(adapter, tx_ring, skb); 3212 if (tso < 0) { 3213 dev_kfree_skb_any(skb); 3214 return NETDEV_TX_OK; 3215 } 3216 3217 if (likely(tso)) { 3218 if (likely(hw->mac_type != e1000_82544)) 3219 tx_ring->last_tx_tso = true; 3220 tx_flags |= E1000_TX_FLAGS_TSO; 3221 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb))) 3222 tx_flags |= E1000_TX_FLAGS_CSUM; 3223 3224 if (likely(skb->protocol == htons(ETH_P_IP))) 3225 tx_flags |= E1000_TX_FLAGS_IPV4; 3226 3227 count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd, 3228 nr_frags, mss); 3229 3230 if (count) { 3231 e1000_tx_queue(adapter, tx_ring, tx_flags, count); 3232 /* Make sure there is space in the ring for the next send. */ 3233 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2); 3234 3235 } else { 3236 dev_kfree_skb_any(skb); 3237 tx_ring->buffer_info[first].time_stamp = 0; 3238 tx_ring->next_to_use = first; 3239 } 3240 3241 return NETDEV_TX_OK; 3242 } 3243 3244 /** 3245 * e1000_tx_timeout - Respond to a Tx Hang 3246 * @netdev: network interface device structure 3247 **/ 3248 3249 static void e1000_tx_timeout(struct net_device *netdev) 3250 { 3251 struct e1000_adapter *adapter = netdev_priv(netdev); 3252 3253 /* Do the reset outside of interrupt context */ 3254 adapter->tx_timeout_count++; 3255 schedule_work(&adapter->reset_task); 3256 } 3257 3258 static void e1000_reset_task(struct work_struct *work) 3259 { 3260 struct e1000_adapter *adapter = 3261 container_of(work, struct e1000_adapter, reset_task); 3262 3263 if (test_bit(__E1000_DOWN, &adapter->flags)) 3264 return; 3265 e1000_reinit_safe(adapter); 3266 } 3267 3268 /** 3269 * e1000_get_stats - Get System Network Statistics 3270 * @netdev: network interface device structure 3271 * 3272 * Returns the address of the device statistics structure. 3273 * The statistics are actually updated from the watchdog. 3274 **/ 3275 3276 static struct net_device_stats *e1000_get_stats(struct net_device *netdev) 3277 { 3278 /* only return the current stats */ 3279 return &netdev->stats; 3280 } 3281 3282 /** 3283 * e1000_change_mtu - Change the Maximum Transfer Unit 3284 * @netdev: network interface device structure 3285 * @new_mtu: new value for maximum frame size 3286 * 3287 * Returns 0 on success, negative on failure 3288 **/ 3289 3290 static int e1000_change_mtu(struct net_device *netdev, int new_mtu) 3291 { 3292 struct e1000_adapter *adapter = netdev_priv(netdev); 3293 struct e1000_hw *hw = &adapter->hw; 3294 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 3295 3296 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || 3297 (max_frame > MAX_JUMBO_FRAME_SIZE)) { 3298 e_err(probe, "Invalid MTU setting\n"); 3299 return -EINVAL; 3300 } 3301 3302 /* Adapter-specific max frame size limits. */ 3303 switch (hw->mac_type) { 3304 case e1000_undefined ... e1000_82542_rev2_1: 3305 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) { 3306 e_err(probe, "Jumbo Frames not supported.\n"); 3307 return -EINVAL; 3308 } 3309 break; 3310 default: 3311 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */ 3312 break; 3313 } 3314 3315 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) 3316 msleep(1); 3317 /* e1000_down has a dependency on max_frame_size */ 3318 hw->max_frame_size = max_frame; 3319 if (netif_running(netdev)) 3320 e1000_down(adapter); 3321 3322 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN 3323 * means we reserve 2 more, this pushes us to allocate from the next 3324 * larger slab size. 3325 * i.e. RXBUFFER_2048 --> size-4096 slab 3326 * however with the new *_jumbo_rx* routines, jumbo receives will use 3327 * fragmented skbs */ 3328 3329 if (max_frame <= E1000_RXBUFFER_2048) 3330 adapter->rx_buffer_len = E1000_RXBUFFER_2048; 3331 else 3332 #if (PAGE_SIZE >= E1000_RXBUFFER_16384) 3333 adapter->rx_buffer_len = E1000_RXBUFFER_16384; 3334 #elif (PAGE_SIZE >= E1000_RXBUFFER_4096) 3335 adapter->rx_buffer_len = PAGE_SIZE; 3336 #endif 3337 3338 /* adjust allocation if LPE protects us, and we aren't using SBP */ 3339 if (!hw->tbi_compatibility_on && 3340 ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) || 3341 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) 3342 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 3343 3344 pr_info("%s changing MTU from %d to %d\n", 3345 netdev->name, netdev->mtu, new_mtu); 3346 netdev->mtu = new_mtu; 3347 3348 if (netif_running(netdev)) 3349 e1000_up(adapter); 3350 else 3351 e1000_reset(adapter); 3352 3353 clear_bit(__E1000_RESETTING, &adapter->flags); 3354 3355 return 0; 3356 } 3357 3358 /** 3359 * e1000_update_stats - Update the board statistics counters 3360 * @adapter: board private structure 3361 **/ 3362 3363 void e1000_update_stats(struct e1000_adapter *adapter) 3364 { 3365 struct net_device *netdev = adapter->netdev; 3366 struct e1000_hw *hw = &adapter->hw; 3367 struct pci_dev *pdev = adapter->pdev; 3368 unsigned long flags; 3369 u16 phy_tmp; 3370 3371 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF 3372 3373 /* 3374 * Prevent stats update while adapter is being reset, or if the pci 3375 * connection is down. 3376 */ 3377 if (adapter->link_speed == 0) 3378 return; 3379 if (pci_channel_offline(pdev)) 3380 return; 3381 3382 spin_lock_irqsave(&adapter->stats_lock, flags); 3383 3384 /* these counters are modified from e1000_tbi_adjust_stats, 3385 * called from the interrupt context, so they must only 3386 * be written while holding adapter->stats_lock 3387 */ 3388 3389 adapter->stats.crcerrs += er32(CRCERRS); 3390 adapter->stats.gprc += er32(GPRC); 3391 adapter->stats.gorcl += er32(GORCL); 3392 adapter->stats.gorch += er32(GORCH); 3393 adapter->stats.bprc += er32(BPRC); 3394 adapter->stats.mprc += er32(MPRC); 3395 adapter->stats.roc += er32(ROC); 3396 3397 adapter->stats.prc64 += er32(PRC64); 3398 adapter->stats.prc127 += er32(PRC127); 3399 adapter->stats.prc255 += er32(PRC255); 3400 adapter->stats.prc511 += er32(PRC511); 3401 adapter->stats.prc1023 += er32(PRC1023); 3402 adapter->stats.prc1522 += er32(PRC1522); 3403 3404 adapter->stats.symerrs += er32(SYMERRS); 3405 adapter->stats.mpc += er32(MPC); 3406 adapter->stats.scc += er32(SCC); 3407 adapter->stats.ecol += er32(ECOL); 3408 adapter->stats.mcc += er32(MCC); 3409 adapter->stats.latecol += er32(LATECOL); 3410 adapter->stats.dc += er32(DC); 3411 adapter->stats.sec += er32(SEC); 3412 adapter->stats.rlec += er32(RLEC); 3413 adapter->stats.xonrxc += er32(XONRXC); 3414 adapter->stats.xontxc += er32(XONTXC); 3415 adapter->stats.xoffrxc += er32(XOFFRXC); 3416 adapter->stats.xofftxc += er32(XOFFTXC); 3417 adapter->stats.fcruc += er32(FCRUC); 3418 adapter->stats.gptc += er32(GPTC); 3419 adapter->stats.gotcl += er32(GOTCL); 3420 adapter->stats.gotch += er32(GOTCH); 3421 adapter->stats.rnbc += er32(RNBC); 3422 adapter->stats.ruc += er32(RUC); 3423 adapter->stats.rfc += er32(RFC); 3424 adapter->stats.rjc += er32(RJC); 3425 adapter->stats.torl += er32(TORL); 3426 adapter->stats.torh += er32(TORH); 3427 adapter->stats.totl += er32(TOTL); 3428 adapter->stats.toth += er32(TOTH); 3429 adapter->stats.tpr += er32(TPR); 3430 3431 adapter->stats.ptc64 += er32(PTC64); 3432 adapter->stats.ptc127 += er32(PTC127); 3433 adapter->stats.ptc255 += er32(PTC255); 3434 adapter->stats.ptc511 += er32(PTC511); 3435 adapter->stats.ptc1023 += er32(PTC1023); 3436 adapter->stats.ptc1522 += er32(PTC1522); 3437 3438 adapter->stats.mptc += er32(MPTC); 3439 adapter->stats.bptc += er32(BPTC); 3440 3441 /* used for adaptive IFS */ 3442 3443 hw->tx_packet_delta = er32(TPT); 3444 adapter->stats.tpt += hw->tx_packet_delta; 3445 hw->collision_delta = er32(COLC); 3446 adapter->stats.colc += hw->collision_delta; 3447 3448 if (hw->mac_type >= e1000_82543) { 3449 adapter->stats.algnerrc += er32(ALGNERRC); 3450 adapter->stats.rxerrc += er32(RXERRC); 3451 adapter->stats.tncrs += er32(TNCRS); 3452 adapter->stats.cexterr += er32(CEXTERR); 3453 adapter->stats.tsctc += er32(TSCTC); 3454 adapter->stats.tsctfc += er32(TSCTFC); 3455 } 3456 3457 /* Fill out the OS statistics structure */ 3458 netdev->stats.multicast = adapter->stats.mprc; 3459 netdev->stats.collisions = adapter->stats.colc; 3460 3461 /* Rx Errors */ 3462 3463 /* RLEC on some newer hardware can be incorrect so build 3464 * our own version based on RUC and ROC */ 3465 netdev->stats.rx_errors = adapter->stats.rxerrc + 3466 adapter->stats.crcerrs + adapter->stats.algnerrc + 3467 adapter->stats.ruc + adapter->stats.roc + 3468 adapter->stats.cexterr; 3469 adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc; 3470 netdev->stats.rx_length_errors = adapter->stats.rlerrc; 3471 netdev->stats.rx_crc_errors = adapter->stats.crcerrs; 3472 netdev->stats.rx_frame_errors = adapter->stats.algnerrc; 3473 netdev->stats.rx_missed_errors = adapter->stats.mpc; 3474 3475 /* Tx Errors */ 3476 adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol; 3477 netdev->stats.tx_errors = adapter->stats.txerrc; 3478 netdev->stats.tx_aborted_errors = adapter->stats.ecol; 3479 netdev->stats.tx_window_errors = adapter->stats.latecol; 3480 netdev->stats.tx_carrier_errors = adapter->stats.tncrs; 3481 if (hw->bad_tx_carr_stats_fd && 3482 adapter->link_duplex == FULL_DUPLEX) { 3483 netdev->stats.tx_carrier_errors = 0; 3484 adapter->stats.tncrs = 0; 3485 } 3486 3487 /* Tx Dropped needs to be maintained elsewhere */ 3488 3489 /* Phy Stats */ 3490 if (hw->media_type == e1000_media_type_copper) { 3491 if ((adapter->link_speed == SPEED_1000) && 3492 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { 3493 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; 3494 adapter->phy_stats.idle_errors += phy_tmp; 3495 } 3496 3497 if ((hw->mac_type <= e1000_82546) && 3498 (hw->phy_type == e1000_phy_m88) && 3499 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp)) 3500 adapter->phy_stats.receive_errors += phy_tmp; 3501 } 3502 3503 /* Management Stats */ 3504 if (hw->has_smbus) { 3505 adapter->stats.mgptc += er32(MGTPTC); 3506 adapter->stats.mgprc += er32(MGTPRC); 3507 adapter->stats.mgpdc += er32(MGTPDC); 3508 } 3509 3510 spin_unlock_irqrestore(&adapter->stats_lock, flags); 3511 } 3512 3513 /** 3514 * e1000_intr - Interrupt Handler 3515 * @irq: interrupt number 3516 * @data: pointer to a network interface device structure 3517 **/ 3518 3519 static irqreturn_t e1000_intr(int irq, void *data) 3520 { 3521 struct net_device *netdev = data; 3522 struct e1000_adapter *adapter = netdev_priv(netdev); 3523 struct e1000_hw *hw = &adapter->hw; 3524 u32 icr = er32(ICR); 3525 3526 if (unlikely((!icr))) 3527 return IRQ_NONE; /* Not our interrupt */ 3528 3529 /* 3530 * we might have caused the interrupt, but the above 3531 * read cleared it, and just in case the driver is 3532 * down there is nothing to do so return handled 3533 */ 3534 if (unlikely(test_bit(__E1000_DOWN, &adapter->flags))) 3535 return IRQ_HANDLED; 3536 3537 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { 3538 hw->get_link_status = 1; 3539 /* guard against interrupt when we're going down */ 3540 if (!test_bit(__E1000_DOWN, &adapter->flags)) 3541 schedule_delayed_work(&adapter->watchdog_task, 1); 3542 } 3543 3544 /* disable interrupts, without the synchronize_irq bit */ 3545 ew32(IMC, ~0); 3546 E1000_WRITE_FLUSH(); 3547 3548 if (likely(napi_schedule_prep(&adapter->napi))) { 3549 adapter->total_tx_bytes = 0; 3550 adapter->total_tx_packets = 0; 3551 adapter->total_rx_bytes = 0; 3552 adapter->total_rx_packets = 0; 3553 __napi_schedule(&adapter->napi); 3554 } else { 3555 /* this really should not happen! if it does it is basically a 3556 * bug, but not a hard error, so enable ints and continue */ 3557 if (!test_bit(__E1000_DOWN, &adapter->flags)) 3558 e1000_irq_enable(adapter); 3559 } 3560 3561 return IRQ_HANDLED; 3562 } 3563 3564 /** 3565 * e1000_clean - NAPI Rx polling callback 3566 * @adapter: board private structure 3567 **/ 3568 static int e1000_clean(struct napi_struct *napi, int budget) 3569 { 3570 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); 3571 int tx_clean_complete = 0, work_done = 0; 3572 3573 tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]); 3574 3575 adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget); 3576 3577 if (!tx_clean_complete) 3578 work_done = budget; 3579 3580 /* If budget not fully consumed, exit the polling mode */ 3581 if (work_done < budget) { 3582 if (likely(adapter->itr_setting & 3)) 3583 e1000_set_itr(adapter); 3584 napi_complete(napi); 3585 if (!test_bit(__E1000_DOWN, &adapter->flags)) 3586 e1000_irq_enable(adapter); 3587 } 3588 3589 return work_done; 3590 } 3591 3592 /** 3593 * e1000_clean_tx_irq - Reclaim resources after transmit completes 3594 * @adapter: board private structure 3595 **/ 3596 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, 3597 struct e1000_tx_ring *tx_ring) 3598 { 3599 struct e1000_hw *hw = &adapter->hw; 3600 struct net_device *netdev = adapter->netdev; 3601 struct e1000_tx_desc *tx_desc, *eop_desc; 3602 struct e1000_buffer *buffer_info; 3603 unsigned int i, eop; 3604 unsigned int count = 0; 3605 unsigned int total_tx_bytes=0, total_tx_packets=0; 3606 3607 i = tx_ring->next_to_clean; 3608 eop = tx_ring->buffer_info[i].next_to_watch; 3609 eop_desc = E1000_TX_DESC(*tx_ring, eop); 3610 3611 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && 3612 (count < tx_ring->count)) { 3613 bool cleaned = false; 3614 rmb(); /* read buffer_info after eop_desc */ 3615 for ( ; !cleaned; count++) { 3616 tx_desc = E1000_TX_DESC(*tx_ring, i); 3617 buffer_info = &tx_ring->buffer_info[i]; 3618 cleaned = (i == eop); 3619 3620 if (cleaned) { 3621 total_tx_packets += buffer_info->segs; 3622 total_tx_bytes += buffer_info->bytecount; 3623 } 3624 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 3625 tx_desc->upper.data = 0; 3626 3627 if (unlikely(++i == tx_ring->count)) i = 0; 3628 } 3629 3630 eop = tx_ring->buffer_info[i].next_to_watch; 3631 eop_desc = E1000_TX_DESC(*tx_ring, eop); 3632 } 3633 3634 tx_ring->next_to_clean = i; 3635 3636 #define TX_WAKE_THRESHOLD 32 3637 if (unlikely(count && netif_carrier_ok(netdev) && 3638 E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) { 3639 /* Make sure that anybody stopping the queue after this 3640 * sees the new next_to_clean. 3641 */ 3642 smp_mb(); 3643 3644 if (netif_queue_stopped(netdev) && 3645 !(test_bit(__E1000_DOWN, &adapter->flags))) { 3646 netif_wake_queue(netdev); 3647 ++adapter->restart_queue; 3648 } 3649 } 3650 3651 if (adapter->detect_tx_hung) { 3652 /* Detect a transmit hang in hardware, this serializes the 3653 * check with the clearing of time_stamp and movement of i */ 3654 adapter->detect_tx_hung = false; 3655 if (tx_ring->buffer_info[eop].time_stamp && 3656 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + 3657 (adapter->tx_timeout_factor * HZ)) && 3658 !(er32(STATUS) & E1000_STATUS_TXOFF)) { 3659 3660 /* detected Tx unit hang */ 3661 e_err(drv, "Detected Tx Unit Hang\n" 3662 " Tx Queue <%lu>\n" 3663 " TDH <%x>\n" 3664 " TDT <%x>\n" 3665 " next_to_use <%x>\n" 3666 " next_to_clean <%x>\n" 3667 "buffer_info[next_to_clean]\n" 3668 " time_stamp <%lx>\n" 3669 " next_to_watch <%x>\n" 3670 " jiffies <%lx>\n" 3671 " next_to_watch.status <%x>\n", 3672 (unsigned long)((tx_ring - adapter->tx_ring) / 3673 sizeof(struct e1000_tx_ring)), 3674 readl(hw->hw_addr + tx_ring->tdh), 3675 readl(hw->hw_addr + tx_ring->tdt), 3676 tx_ring->next_to_use, 3677 tx_ring->next_to_clean, 3678 tx_ring->buffer_info[eop].time_stamp, 3679 eop, 3680 jiffies, 3681 eop_desc->upper.fields.status); 3682 netif_stop_queue(netdev); 3683 } 3684 } 3685 adapter->total_tx_bytes += total_tx_bytes; 3686 adapter->total_tx_packets += total_tx_packets; 3687 netdev->stats.tx_bytes += total_tx_bytes; 3688 netdev->stats.tx_packets += total_tx_packets; 3689 return count < tx_ring->count; 3690 } 3691 3692 /** 3693 * e1000_rx_checksum - Receive Checksum Offload for 82543 3694 * @adapter: board private structure 3695 * @status_err: receive descriptor status and error fields 3696 * @csum: receive descriptor csum field 3697 * @sk_buff: socket buffer with received data 3698 **/ 3699 3700 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, 3701 u32 csum, struct sk_buff *skb) 3702 { 3703 struct e1000_hw *hw = &adapter->hw; 3704 u16 status = (u16)status_err; 3705 u8 errors = (u8)(status_err >> 24); 3706 3707 skb_checksum_none_assert(skb); 3708 3709 /* 82543 or newer only */ 3710 if (unlikely(hw->mac_type < e1000_82543)) return; 3711 /* Ignore Checksum bit is set */ 3712 if (unlikely(status & E1000_RXD_STAT_IXSM)) return; 3713 /* TCP/UDP checksum error bit is set */ 3714 if (unlikely(errors & E1000_RXD_ERR_TCPE)) { 3715 /* let the stack verify checksum errors */ 3716 adapter->hw_csum_err++; 3717 return; 3718 } 3719 /* TCP/UDP Checksum has not been calculated */ 3720 if (!(status & E1000_RXD_STAT_TCPCS)) 3721 return; 3722 3723 /* It must be a TCP or UDP packet with a valid checksum */ 3724 if (likely(status & E1000_RXD_STAT_TCPCS)) { 3725 /* TCP checksum is good */ 3726 skb->ip_summed = CHECKSUM_UNNECESSARY; 3727 } 3728 adapter->hw_csum_good++; 3729 } 3730 3731 /** 3732 * e1000_consume_page - helper function 3733 **/ 3734 static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, 3735 u16 length) 3736 { 3737 bi->page = NULL; 3738 skb->len += length; 3739 skb->data_len += length; 3740 skb->truesize += PAGE_SIZE; 3741 } 3742 3743 /** 3744 * e1000_receive_skb - helper function to handle rx indications 3745 * @adapter: board private structure 3746 * @status: descriptor status field as written by hardware 3747 * @vlan: descriptor vlan field as written by hardware (no le/be conversion) 3748 * @skb: pointer to sk_buff to be indicated to stack 3749 */ 3750 static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status, 3751 __le16 vlan, struct sk_buff *skb) 3752 { 3753 skb->protocol = eth_type_trans(skb, adapter->netdev); 3754 3755 if (status & E1000_RXD_STAT_VP) { 3756 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; 3757 3758 __vlan_hwaccel_put_tag(skb, vid); 3759 } 3760 napi_gro_receive(&adapter->napi, skb); 3761 } 3762 3763 /** 3764 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy 3765 * @adapter: board private structure 3766 * @rx_ring: ring to clean 3767 * @work_done: amount of napi work completed this call 3768 * @work_to_do: max amount of work allowed for this call to do 3769 * 3770 * the return value indicates whether actual cleaning was done, there 3771 * is no guarantee that everything was cleaned 3772 */ 3773 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, 3774 struct e1000_rx_ring *rx_ring, 3775 int *work_done, int work_to_do) 3776 { 3777 struct e1000_hw *hw = &adapter->hw; 3778 struct net_device *netdev = adapter->netdev; 3779 struct pci_dev *pdev = adapter->pdev; 3780 struct e1000_rx_desc *rx_desc, *next_rxd; 3781 struct e1000_buffer *buffer_info, *next_buffer; 3782 unsigned long irq_flags; 3783 u32 length; 3784 unsigned int i; 3785 int cleaned_count = 0; 3786 bool cleaned = false; 3787 unsigned int total_rx_bytes=0, total_rx_packets=0; 3788 3789 i = rx_ring->next_to_clean; 3790 rx_desc = E1000_RX_DESC(*rx_ring, i); 3791 buffer_info = &rx_ring->buffer_info[i]; 3792 3793 while (rx_desc->status & E1000_RXD_STAT_DD) { 3794 struct sk_buff *skb; 3795 u8 status; 3796 3797 if (*work_done >= work_to_do) 3798 break; 3799 (*work_done)++; 3800 rmb(); /* read descriptor and rx_buffer_info after status DD */ 3801 3802 status = rx_desc->status; 3803 skb = buffer_info->skb; 3804 buffer_info->skb = NULL; 3805 3806 if (++i == rx_ring->count) i = 0; 3807 next_rxd = E1000_RX_DESC(*rx_ring, i); 3808 prefetch(next_rxd); 3809 3810 next_buffer = &rx_ring->buffer_info[i]; 3811 3812 cleaned = true; 3813 cleaned_count++; 3814 dma_unmap_page(&pdev->dev, buffer_info->dma, 3815 buffer_info->length, DMA_FROM_DEVICE); 3816 buffer_info->dma = 0; 3817 3818 length = le16_to_cpu(rx_desc->length); 3819 3820 /* errors is only valid for DD + EOP descriptors */ 3821 if (unlikely((status & E1000_RXD_STAT_EOP) && 3822 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) { 3823 u8 last_byte = *(skb->data + length - 1); 3824 if (TBI_ACCEPT(hw, status, rx_desc->errors, length, 3825 last_byte)) { 3826 spin_lock_irqsave(&adapter->stats_lock, 3827 irq_flags); 3828 e1000_tbi_adjust_stats(hw, &adapter->stats, 3829 length, skb->data); 3830 spin_unlock_irqrestore(&adapter->stats_lock, 3831 irq_flags); 3832 length--; 3833 } else { 3834 /* recycle both page and skb */ 3835 buffer_info->skb = skb; 3836 /* an error means any chain goes out the window 3837 * too */ 3838 if (rx_ring->rx_skb_top) 3839 dev_kfree_skb(rx_ring->rx_skb_top); 3840 rx_ring->rx_skb_top = NULL; 3841 goto next_desc; 3842 } 3843 } 3844 3845 #define rxtop rx_ring->rx_skb_top 3846 if (!(status & E1000_RXD_STAT_EOP)) { 3847 /* this descriptor is only the beginning (or middle) */ 3848 if (!rxtop) { 3849 /* this is the beginning of a chain */ 3850 rxtop = skb; 3851 skb_fill_page_desc(rxtop, 0, buffer_info->page, 3852 0, length); 3853 } else { 3854 /* this is the middle of a chain */ 3855 skb_fill_page_desc(rxtop, 3856 skb_shinfo(rxtop)->nr_frags, 3857 buffer_info->page, 0, length); 3858 /* re-use the skb, only consumed the page */ 3859 buffer_info->skb = skb; 3860 } 3861 e1000_consume_page(buffer_info, rxtop, length); 3862 goto next_desc; 3863 } else { 3864 if (rxtop) { 3865 /* end of the chain */ 3866 skb_fill_page_desc(rxtop, 3867 skb_shinfo(rxtop)->nr_frags, 3868 buffer_info->page, 0, length); 3869 /* re-use the current skb, we only consumed the 3870 * page */ 3871 buffer_info->skb = skb; 3872 skb = rxtop; 3873 rxtop = NULL; 3874 e1000_consume_page(buffer_info, skb, length); 3875 } else { 3876 /* no chain, got EOP, this buf is the packet 3877 * copybreak to save the put_page/alloc_page */ 3878 if (length <= copybreak && 3879 skb_tailroom(skb) >= length) { 3880 u8 *vaddr; 3881 vaddr = kmap_atomic(buffer_info->page, 3882 KM_SKB_DATA_SOFTIRQ); 3883 memcpy(skb_tail_pointer(skb), vaddr, length); 3884 kunmap_atomic(vaddr, 3885 KM_SKB_DATA_SOFTIRQ); 3886 /* re-use the page, so don't erase 3887 * buffer_info->page */ 3888 skb_put(skb, length); 3889 } else { 3890 skb_fill_page_desc(skb, 0, 3891 buffer_info->page, 0, 3892 length); 3893 e1000_consume_page(buffer_info, skb, 3894 length); 3895 } 3896 } 3897 } 3898 3899 /* Receive Checksum Offload XXX recompute due to CRC strip? */ 3900 e1000_rx_checksum(adapter, 3901 (u32)(status) | 3902 ((u32)(rx_desc->errors) << 24), 3903 le16_to_cpu(rx_desc->csum), skb); 3904 3905 pskb_trim(skb, skb->len - 4); 3906 3907 /* probably a little skewed due to removing CRC */ 3908 total_rx_bytes += skb->len; 3909 total_rx_packets++; 3910 3911 /* eth type trans needs skb->data to point to something */ 3912 if (!pskb_may_pull(skb, ETH_HLEN)) { 3913 e_err(drv, "pskb_may_pull failed.\n"); 3914 dev_kfree_skb(skb); 3915 goto next_desc; 3916 } 3917 3918 e1000_receive_skb(adapter, status, rx_desc->special, skb); 3919 3920 next_desc: 3921 rx_desc->status = 0; 3922 3923 /* return some buffers to hardware, one at a time is too slow */ 3924 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { 3925 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); 3926 cleaned_count = 0; 3927 } 3928 3929 /* use prefetched values */ 3930 rx_desc = next_rxd; 3931 buffer_info = next_buffer; 3932 } 3933 rx_ring->next_to_clean = i; 3934 3935 cleaned_count = E1000_DESC_UNUSED(rx_ring); 3936 if (cleaned_count) 3937 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); 3938 3939 adapter->total_rx_packets += total_rx_packets; 3940 adapter->total_rx_bytes += total_rx_bytes; 3941 netdev->stats.rx_bytes += total_rx_bytes; 3942 netdev->stats.rx_packets += total_rx_packets; 3943 return cleaned; 3944 } 3945 3946 /* 3947 * this should improve performance for small packets with large amounts 3948 * of reassembly being done in the stack 3949 */ 3950 static void e1000_check_copybreak(struct net_device *netdev, 3951 struct e1000_buffer *buffer_info, 3952 u32 length, struct sk_buff **skb) 3953 { 3954 struct sk_buff *new_skb; 3955 3956 if (length > copybreak) 3957 return; 3958 3959 new_skb = netdev_alloc_skb_ip_align(netdev, length); 3960 if (!new_skb) 3961 return; 3962 3963 skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN, 3964 (*skb)->data - NET_IP_ALIGN, 3965 length + NET_IP_ALIGN); 3966 /* save the skb in buffer_info as good */ 3967 buffer_info->skb = *skb; 3968 *skb = new_skb; 3969 } 3970 3971 /** 3972 * e1000_clean_rx_irq - Send received data up the network stack; legacy 3973 * @adapter: board private structure 3974 * @rx_ring: ring to clean 3975 * @work_done: amount of napi work completed this call 3976 * @work_to_do: max amount of work allowed for this call to do 3977 */ 3978 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, 3979 struct e1000_rx_ring *rx_ring, 3980 int *work_done, int work_to_do) 3981 { 3982 struct e1000_hw *hw = &adapter->hw; 3983 struct net_device *netdev = adapter->netdev; 3984 struct pci_dev *pdev = adapter->pdev; 3985 struct e1000_rx_desc *rx_desc, *next_rxd; 3986 struct e1000_buffer *buffer_info, *next_buffer; 3987 unsigned long flags; 3988 u32 length; 3989 unsigned int i; 3990 int cleaned_count = 0; 3991 bool cleaned = false; 3992 unsigned int total_rx_bytes=0, total_rx_packets=0; 3993 3994 i = rx_ring->next_to_clean; 3995 rx_desc = E1000_RX_DESC(*rx_ring, i); 3996 buffer_info = &rx_ring->buffer_info[i]; 3997 3998 while (rx_desc->status & E1000_RXD_STAT_DD) { 3999 struct sk_buff *skb; 4000 u8 status; 4001 4002 if (*work_done >= work_to_do) 4003 break; 4004 (*work_done)++; 4005 rmb(); /* read descriptor and rx_buffer_info after status DD */ 4006 4007 status = rx_desc->status; 4008 skb = buffer_info->skb; 4009 buffer_info->skb = NULL; 4010 4011 prefetch(skb->data - NET_IP_ALIGN); 4012 4013 if (++i == rx_ring->count) i = 0; 4014 next_rxd = E1000_RX_DESC(*rx_ring, i); 4015 prefetch(next_rxd); 4016 4017 next_buffer = &rx_ring->buffer_info[i]; 4018 4019 cleaned = true; 4020 cleaned_count++; 4021 dma_unmap_single(&pdev->dev, buffer_info->dma, 4022 buffer_info->length, DMA_FROM_DEVICE); 4023 buffer_info->dma = 0; 4024 4025 length = le16_to_cpu(rx_desc->length); 4026 /* !EOP means multiple descriptors were used to store a single 4027 * packet, if thats the case we need to toss it. In fact, we 4028 * to toss every packet with the EOP bit clear and the next 4029 * frame that _does_ have the EOP bit set, as it is by 4030 * definition only a frame fragment 4031 */ 4032 if (unlikely(!(status & E1000_RXD_STAT_EOP))) 4033 adapter->discarding = true; 4034 4035 if (adapter->discarding) { 4036 /* All receives must fit into a single buffer */ 4037 e_dbg("Receive packet consumed multiple buffers\n"); 4038 /* recycle */ 4039 buffer_info->skb = skb; 4040 if (status & E1000_RXD_STAT_EOP) 4041 adapter->discarding = false; 4042 goto next_desc; 4043 } 4044 4045 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { 4046 u8 last_byte = *(skb->data + length - 1); 4047 if (TBI_ACCEPT(hw, status, rx_desc->errors, length, 4048 last_byte)) { 4049 spin_lock_irqsave(&adapter->stats_lock, flags); 4050 e1000_tbi_adjust_stats(hw, &adapter->stats, 4051 length, skb->data); 4052 spin_unlock_irqrestore(&adapter->stats_lock, 4053 flags); 4054 length--; 4055 } else { 4056 /* recycle */ 4057 buffer_info->skb = skb; 4058 goto next_desc; 4059 } 4060 } 4061 4062 /* adjust length to remove Ethernet CRC, this must be 4063 * done after the TBI_ACCEPT workaround above */ 4064 length -= 4; 4065 4066 /* probably a little skewed due to removing CRC */ 4067 total_rx_bytes += length; 4068 total_rx_packets++; 4069 4070 e1000_check_copybreak(netdev, buffer_info, length, &skb); 4071 4072 skb_put(skb, length); 4073 4074 /* Receive Checksum Offload */ 4075 e1000_rx_checksum(adapter, 4076 (u32)(status) | 4077 ((u32)(rx_desc->errors) << 24), 4078 le16_to_cpu(rx_desc->csum), skb); 4079 4080 e1000_receive_skb(adapter, status, rx_desc->special, skb); 4081 4082 next_desc: 4083 rx_desc->status = 0; 4084 4085 /* return some buffers to hardware, one at a time is too slow */ 4086 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { 4087 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); 4088 cleaned_count = 0; 4089 } 4090 4091 /* use prefetched values */ 4092 rx_desc = next_rxd; 4093 buffer_info = next_buffer; 4094 } 4095 rx_ring->next_to_clean = i; 4096 4097 cleaned_count = E1000_DESC_UNUSED(rx_ring); 4098 if (cleaned_count) 4099 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); 4100 4101 adapter->total_rx_packets += total_rx_packets; 4102 adapter->total_rx_bytes += total_rx_bytes; 4103 netdev->stats.rx_bytes += total_rx_bytes; 4104 netdev->stats.rx_packets += total_rx_packets; 4105 return cleaned; 4106 } 4107 4108 /** 4109 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers 4110 * @adapter: address of board private structure 4111 * @rx_ring: pointer to receive ring structure 4112 * @cleaned_count: number of buffers to allocate this pass 4113 **/ 4114 4115 static void 4116 e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, 4117 struct e1000_rx_ring *rx_ring, int cleaned_count) 4118 { 4119 struct net_device *netdev = adapter->netdev; 4120 struct pci_dev *pdev = adapter->pdev; 4121 struct e1000_rx_desc *rx_desc; 4122 struct e1000_buffer *buffer_info; 4123 struct sk_buff *skb; 4124 unsigned int i; 4125 unsigned int bufsz = 256 - 16 /*for skb_reserve */ ; 4126 4127 i = rx_ring->next_to_use; 4128 buffer_info = &rx_ring->buffer_info[i]; 4129 4130 while (cleaned_count--) { 4131 skb = buffer_info->skb; 4132 if (skb) { 4133 skb_trim(skb, 0); 4134 goto check_page; 4135 } 4136 4137 skb = netdev_alloc_skb_ip_align(netdev, bufsz); 4138 if (unlikely(!skb)) { 4139 /* Better luck next round */ 4140 adapter->alloc_rx_buff_failed++; 4141 break; 4142 } 4143 4144 /* Fix for errata 23, can't cross 64kB boundary */ 4145 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { 4146 struct sk_buff *oldskb = skb; 4147 e_err(rx_err, "skb align check failed: %u bytes at " 4148 "%p\n", bufsz, skb->data); 4149 /* Try again, without freeing the previous */ 4150 skb = netdev_alloc_skb_ip_align(netdev, bufsz); 4151 /* Failed allocation, critical failure */ 4152 if (!skb) { 4153 dev_kfree_skb(oldskb); 4154 adapter->alloc_rx_buff_failed++; 4155 break; 4156 } 4157 4158 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { 4159 /* give up */ 4160 dev_kfree_skb(skb); 4161 dev_kfree_skb(oldskb); 4162 break; /* while (cleaned_count--) */ 4163 } 4164 4165 /* Use new allocation */ 4166 dev_kfree_skb(oldskb); 4167 } 4168 buffer_info->skb = skb; 4169 buffer_info->length = adapter->rx_buffer_len; 4170 check_page: 4171 /* allocate a new page if necessary */ 4172 if (!buffer_info->page) { 4173 buffer_info->page = alloc_page(GFP_ATOMIC); 4174 if (unlikely(!buffer_info->page)) { 4175 adapter->alloc_rx_buff_failed++; 4176 break; 4177 } 4178 } 4179 4180 if (!buffer_info->dma) { 4181 buffer_info->dma = dma_map_page(&pdev->dev, 4182 buffer_info->page, 0, 4183 buffer_info->length, 4184 DMA_FROM_DEVICE); 4185 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { 4186 put_page(buffer_info->page); 4187 dev_kfree_skb(skb); 4188 buffer_info->page = NULL; 4189 buffer_info->skb = NULL; 4190 buffer_info->dma = 0; 4191 adapter->alloc_rx_buff_failed++; 4192 break; /* while !buffer_info->skb */ 4193 } 4194 } 4195 4196 rx_desc = E1000_RX_DESC(*rx_ring, i); 4197 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 4198 4199 if (unlikely(++i == rx_ring->count)) 4200 i = 0; 4201 buffer_info = &rx_ring->buffer_info[i]; 4202 } 4203 4204 if (likely(rx_ring->next_to_use != i)) { 4205 rx_ring->next_to_use = i; 4206 if (unlikely(i-- == 0)) 4207 i = (rx_ring->count - 1); 4208 4209 /* Force memory writes to complete before letting h/w 4210 * know there are new descriptors to fetch. (Only 4211 * applicable for weak-ordered memory model archs, 4212 * such as IA-64). */ 4213 wmb(); 4214 writel(i, adapter->hw.hw_addr + rx_ring->rdt); 4215 } 4216 } 4217 4218 /** 4219 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended 4220 * @adapter: address of board private structure 4221 **/ 4222 4223 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, 4224 struct e1000_rx_ring *rx_ring, 4225 int cleaned_count) 4226 { 4227 struct e1000_hw *hw = &adapter->hw; 4228 struct net_device *netdev = adapter->netdev; 4229 struct pci_dev *pdev = adapter->pdev; 4230 struct e1000_rx_desc *rx_desc; 4231 struct e1000_buffer *buffer_info; 4232 struct sk_buff *skb; 4233 unsigned int i; 4234 unsigned int bufsz = adapter->rx_buffer_len; 4235 4236 i = rx_ring->next_to_use; 4237 buffer_info = &rx_ring->buffer_info[i]; 4238 4239 while (cleaned_count--) { 4240 skb = buffer_info->skb; 4241 if (skb) { 4242 skb_trim(skb, 0); 4243 goto map_skb; 4244 } 4245 4246 skb = netdev_alloc_skb_ip_align(netdev, bufsz); 4247 if (unlikely(!skb)) { 4248 /* Better luck next round */ 4249 adapter->alloc_rx_buff_failed++; 4250 break; 4251 } 4252 4253 /* Fix for errata 23, can't cross 64kB boundary */ 4254 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { 4255 struct sk_buff *oldskb = skb; 4256 e_err(rx_err, "skb align check failed: %u bytes at " 4257 "%p\n", bufsz, skb->data); 4258 /* Try again, without freeing the previous */ 4259 skb = netdev_alloc_skb_ip_align(netdev, bufsz); 4260 /* Failed allocation, critical failure */ 4261 if (!skb) { 4262 dev_kfree_skb(oldskb); 4263 adapter->alloc_rx_buff_failed++; 4264 break; 4265 } 4266 4267 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { 4268 /* give up */ 4269 dev_kfree_skb(skb); 4270 dev_kfree_skb(oldskb); 4271 adapter->alloc_rx_buff_failed++; 4272 break; /* while !buffer_info->skb */ 4273 } 4274 4275 /* Use new allocation */ 4276 dev_kfree_skb(oldskb); 4277 } 4278 buffer_info->skb = skb; 4279 buffer_info->length = adapter->rx_buffer_len; 4280 map_skb: 4281 buffer_info->dma = dma_map_single(&pdev->dev, 4282 skb->data, 4283 buffer_info->length, 4284 DMA_FROM_DEVICE); 4285 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { 4286 dev_kfree_skb(skb); 4287 buffer_info->skb = NULL; 4288 buffer_info->dma = 0; 4289 adapter->alloc_rx_buff_failed++; 4290 break; /* while !buffer_info->skb */ 4291 } 4292 4293 /* 4294 * XXX if it was allocated cleanly it will never map to a 4295 * boundary crossing 4296 */ 4297 4298 /* Fix for errata 23, can't cross 64kB boundary */ 4299 if (!e1000_check_64k_bound(adapter, 4300 (void *)(unsigned long)buffer_info->dma, 4301 adapter->rx_buffer_len)) { 4302 e_err(rx_err, "dma align check failed: %u bytes at " 4303 "%p\n", adapter->rx_buffer_len, 4304 (void *)(unsigned long)buffer_info->dma); 4305 dev_kfree_skb(skb); 4306 buffer_info->skb = NULL; 4307 4308 dma_unmap_single(&pdev->dev, buffer_info->dma, 4309 adapter->rx_buffer_len, 4310 DMA_FROM_DEVICE); 4311 buffer_info->dma = 0; 4312 4313 adapter->alloc_rx_buff_failed++; 4314 break; /* while !buffer_info->skb */ 4315 } 4316 rx_desc = E1000_RX_DESC(*rx_ring, i); 4317 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 4318 4319 if (unlikely(++i == rx_ring->count)) 4320 i = 0; 4321 buffer_info = &rx_ring->buffer_info[i]; 4322 } 4323 4324 if (likely(rx_ring->next_to_use != i)) { 4325 rx_ring->next_to_use = i; 4326 if (unlikely(i-- == 0)) 4327 i = (rx_ring->count - 1); 4328 4329 /* Force memory writes to complete before letting h/w 4330 * know there are new descriptors to fetch. (Only 4331 * applicable for weak-ordered memory model archs, 4332 * such as IA-64). */ 4333 wmb(); 4334 writel(i, hw->hw_addr + rx_ring->rdt); 4335 } 4336 } 4337 4338 /** 4339 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers. 4340 * @adapter: 4341 **/ 4342 4343 static void e1000_smartspeed(struct e1000_adapter *adapter) 4344 { 4345 struct e1000_hw *hw = &adapter->hw; 4346 u16 phy_status; 4347 u16 phy_ctrl; 4348 4349 if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg || 4350 !(hw->autoneg_advertised & ADVERTISE_1000_FULL)) 4351 return; 4352 4353 if (adapter->smartspeed == 0) { 4354 /* If Master/Slave config fault is asserted twice, 4355 * we assume back-to-back */ 4356 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); 4357 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; 4358 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); 4359 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; 4360 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); 4361 if (phy_ctrl & CR_1000T_MS_ENABLE) { 4362 phy_ctrl &= ~CR_1000T_MS_ENABLE; 4363 e1000_write_phy_reg(hw, PHY_1000T_CTRL, 4364 phy_ctrl); 4365 adapter->smartspeed++; 4366 if (!e1000_phy_setup_autoneg(hw) && 4367 !e1000_read_phy_reg(hw, PHY_CTRL, 4368 &phy_ctrl)) { 4369 phy_ctrl |= (MII_CR_AUTO_NEG_EN | 4370 MII_CR_RESTART_AUTO_NEG); 4371 e1000_write_phy_reg(hw, PHY_CTRL, 4372 phy_ctrl); 4373 } 4374 } 4375 return; 4376 } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { 4377 /* If still no link, perhaps using 2/3 pair cable */ 4378 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); 4379 phy_ctrl |= CR_1000T_MS_ENABLE; 4380 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl); 4381 if (!e1000_phy_setup_autoneg(hw) && 4382 !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) { 4383 phy_ctrl |= (MII_CR_AUTO_NEG_EN | 4384 MII_CR_RESTART_AUTO_NEG); 4385 e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl); 4386 } 4387 } 4388 /* Restart process after E1000_SMARTSPEED_MAX iterations */ 4389 if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX) 4390 adapter->smartspeed = 0; 4391 } 4392 4393 /** 4394 * e1000_ioctl - 4395 * @netdev: 4396 * @ifreq: 4397 * @cmd: 4398 **/ 4399 4400 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 4401 { 4402 switch (cmd) { 4403 case SIOCGMIIPHY: 4404 case SIOCGMIIREG: 4405 case SIOCSMIIREG: 4406 return e1000_mii_ioctl(netdev, ifr, cmd); 4407 default: 4408 return -EOPNOTSUPP; 4409 } 4410 } 4411 4412 /** 4413 * e1000_mii_ioctl - 4414 * @netdev: 4415 * @ifreq: 4416 * @cmd: 4417 **/ 4418 4419 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, 4420 int cmd) 4421 { 4422 struct e1000_adapter *adapter = netdev_priv(netdev); 4423 struct e1000_hw *hw = &adapter->hw; 4424 struct mii_ioctl_data *data = if_mii(ifr); 4425 int retval; 4426 u16 mii_reg; 4427 unsigned long flags; 4428 4429 if (hw->media_type != e1000_media_type_copper) 4430 return -EOPNOTSUPP; 4431 4432 switch (cmd) { 4433 case SIOCGMIIPHY: 4434 data->phy_id = hw->phy_addr; 4435 break; 4436 case SIOCGMIIREG: 4437 spin_lock_irqsave(&adapter->stats_lock, flags); 4438 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F, 4439 &data->val_out)) { 4440 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4441 return -EIO; 4442 } 4443 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4444 break; 4445 case SIOCSMIIREG: 4446 if (data->reg_num & ~(0x1F)) 4447 return -EFAULT; 4448 mii_reg = data->val_in; 4449 spin_lock_irqsave(&adapter->stats_lock, flags); 4450 if (e1000_write_phy_reg(hw, data->reg_num, 4451 mii_reg)) { 4452 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4453 return -EIO; 4454 } 4455 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4456 if (hw->media_type == e1000_media_type_copper) { 4457 switch (data->reg_num) { 4458 case PHY_CTRL: 4459 if (mii_reg & MII_CR_POWER_DOWN) 4460 break; 4461 if (mii_reg & MII_CR_AUTO_NEG_EN) { 4462 hw->autoneg = 1; 4463 hw->autoneg_advertised = 0x2F; 4464 } else { 4465 u32 speed; 4466 if (mii_reg & 0x40) 4467 speed = SPEED_1000; 4468 else if (mii_reg & 0x2000) 4469 speed = SPEED_100; 4470 else 4471 speed = SPEED_10; 4472 retval = e1000_set_spd_dplx( 4473 adapter, speed, 4474 ((mii_reg & 0x100) 4475 ? DUPLEX_FULL : 4476 DUPLEX_HALF)); 4477 if (retval) 4478 return retval; 4479 } 4480 if (netif_running(adapter->netdev)) 4481 e1000_reinit_locked(adapter); 4482 else 4483 e1000_reset(adapter); 4484 break; 4485 case M88E1000_PHY_SPEC_CTRL: 4486 case M88E1000_EXT_PHY_SPEC_CTRL: 4487 if (e1000_phy_reset(hw)) 4488 return -EIO; 4489 break; 4490 } 4491 } else { 4492 switch (data->reg_num) { 4493 case PHY_CTRL: 4494 if (mii_reg & MII_CR_POWER_DOWN) 4495 break; 4496 if (netif_running(adapter->netdev)) 4497 e1000_reinit_locked(adapter); 4498 else 4499 e1000_reset(adapter); 4500 break; 4501 } 4502 } 4503 break; 4504 default: 4505 return -EOPNOTSUPP; 4506 } 4507 return E1000_SUCCESS; 4508 } 4509 4510 void e1000_pci_set_mwi(struct e1000_hw *hw) 4511 { 4512 struct e1000_adapter *adapter = hw->back; 4513 int ret_val = pci_set_mwi(adapter->pdev); 4514 4515 if (ret_val) 4516 e_err(probe, "Error in setting MWI\n"); 4517 } 4518 4519 void e1000_pci_clear_mwi(struct e1000_hw *hw) 4520 { 4521 struct e1000_adapter *adapter = hw->back; 4522 4523 pci_clear_mwi(adapter->pdev); 4524 } 4525 4526 int e1000_pcix_get_mmrbc(struct e1000_hw *hw) 4527 { 4528 struct e1000_adapter *adapter = hw->back; 4529 return pcix_get_mmrbc(adapter->pdev); 4530 } 4531 4532 void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc) 4533 { 4534 struct e1000_adapter *adapter = hw->back; 4535 pcix_set_mmrbc(adapter->pdev, mmrbc); 4536 } 4537 4538 void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value) 4539 { 4540 outl(value, port); 4541 } 4542 4543 static bool e1000_vlan_used(struct e1000_adapter *adapter) 4544 { 4545 u16 vid; 4546 4547 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 4548 return true; 4549 return false; 4550 } 4551 4552 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter, 4553 bool filter_on) 4554 { 4555 struct e1000_hw *hw = &adapter->hw; 4556 u32 rctl; 4557 4558 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4559 e1000_irq_disable(adapter); 4560 4561 if (filter_on) { 4562 /* enable VLAN receive filtering */ 4563 rctl = er32(RCTL); 4564 rctl &= ~E1000_RCTL_CFIEN; 4565 if (!(adapter->netdev->flags & IFF_PROMISC)) 4566 rctl |= E1000_RCTL_VFE; 4567 ew32(RCTL, rctl); 4568 e1000_update_mng_vlan(adapter); 4569 } else { 4570 /* disable VLAN receive filtering */ 4571 rctl = er32(RCTL); 4572 rctl &= ~E1000_RCTL_VFE; 4573 ew32(RCTL, rctl); 4574 } 4575 4576 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4577 e1000_irq_enable(adapter); 4578 } 4579 4580 static void e1000_vlan_mode(struct net_device *netdev, 4581 netdev_features_t features) 4582 { 4583 struct e1000_adapter *adapter = netdev_priv(netdev); 4584 struct e1000_hw *hw = &adapter->hw; 4585 u32 ctrl; 4586 4587 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4588 e1000_irq_disable(adapter); 4589 4590 ctrl = er32(CTRL); 4591 if (features & NETIF_F_HW_VLAN_RX) { 4592 /* enable VLAN tag insert/strip */ 4593 ctrl |= E1000_CTRL_VME; 4594 } else { 4595 /* disable VLAN tag insert/strip */ 4596 ctrl &= ~E1000_CTRL_VME; 4597 } 4598 ew32(CTRL, ctrl); 4599 4600 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4601 e1000_irq_enable(adapter); 4602 } 4603 4604 static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 4605 { 4606 struct e1000_adapter *adapter = netdev_priv(netdev); 4607 struct e1000_hw *hw = &adapter->hw; 4608 u32 vfta, index; 4609 4610 if ((hw->mng_cookie.status & 4611 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 4612 (vid == adapter->mng_vlan_id)) 4613 return 0; 4614 4615 if (!e1000_vlan_used(adapter)) 4616 e1000_vlan_filter_on_off(adapter, true); 4617 4618 /* add VID to filter table */ 4619 index = (vid >> 5) & 0x7F; 4620 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); 4621 vfta |= (1 << (vid & 0x1F)); 4622 e1000_write_vfta(hw, index, vfta); 4623 4624 set_bit(vid, adapter->active_vlans); 4625 4626 return 0; 4627 } 4628 4629 static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 4630 { 4631 struct e1000_adapter *adapter = netdev_priv(netdev); 4632 struct e1000_hw *hw = &adapter->hw; 4633 u32 vfta, index; 4634 4635 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4636 e1000_irq_disable(adapter); 4637 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4638 e1000_irq_enable(adapter); 4639 4640 /* remove VID from filter table */ 4641 index = (vid >> 5) & 0x7F; 4642 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); 4643 vfta &= ~(1 << (vid & 0x1F)); 4644 e1000_write_vfta(hw, index, vfta); 4645 4646 clear_bit(vid, adapter->active_vlans); 4647 4648 if (!e1000_vlan_used(adapter)) 4649 e1000_vlan_filter_on_off(adapter, false); 4650 4651 return 0; 4652 } 4653 4654 static void e1000_restore_vlan(struct e1000_adapter *adapter) 4655 { 4656 u16 vid; 4657 4658 if (!e1000_vlan_used(adapter)) 4659 return; 4660 4661 e1000_vlan_filter_on_off(adapter, true); 4662 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 4663 e1000_vlan_rx_add_vid(adapter->netdev, vid); 4664 } 4665 4666 int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx) 4667 { 4668 struct e1000_hw *hw = &adapter->hw; 4669 4670 hw->autoneg = 0; 4671 4672 /* Make sure dplx is at most 1 bit and lsb of speed is not set 4673 * for the switch() below to work */ 4674 if ((spd & 1) || (dplx & ~1)) 4675 goto err_inval; 4676 4677 /* Fiber NICs only allow 1000 gbps Full duplex */ 4678 if ((hw->media_type == e1000_media_type_fiber) && 4679 spd != SPEED_1000 && 4680 dplx != DUPLEX_FULL) 4681 goto err_inval; 4682 4683 switch (spd + dplx) { 4684 case SPEED_10 + DUPLEX_HALF: 4685 hw->forced_speed_duplex = e1000_10_half; 4686 break; 4687 case SPEED_10 + DUPLEX_FULL: 4688 hw->forced_speed_duplex = e1000_10_full; 4689 break; 4690 case SPEED_100 + DUPLEX_HALF: 4691 hw->forced_speed_duplex = e1000_100_half; 4692 break; 4693 case SPEED_100 + DUPLEX_FULL: 4694 hw->forced_speed_duplex = e1000_100_full; 4695 break; 4696 case SPEED_1000 + DUPLEX_FULL: 4697 hw->autoneg = 1; 4698 hw->autoneg_advertised = ADVERTISE_1000_FULL; 4699 break; 4700 case SPEED_1000 + DUPLEX_HALF: /* not supported */ 4701 default: 4702 goto err_inval; 4703 } 4704 return 0; 4705 4706 err_inval: 4707 e_err(probe, "Unsupported Speed/Duplex configuration\n"); 4708 return -EINVAL; 4709 } 4710 4711 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) 4712 { 4713 struct net_device *netdev = pci_get_drvdata(pdev); 4714 struct e1000_adapter *adapter = netdev_priv(netdev); 4715 struct e1000_hw *hw = &adapter->hw; 4716 u32 ctrl, ctrl_ext, rctl, status; 4717 u32 wufc = adapter->wol; 4718 #ifdef CONFIG_PM 4719 int retval = 0; 4720 #endif 4721 4722 netif_device_detach(netdev); 4723 4724 if (netif_running(netdev)) { 4725 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); 4726 e1000_down(adapter); 4727 } 4728 4729 #ifdef CONFIG_PM 4730 retval = pci_save_state(pdev); 4731 if (retval) 4732 return retval; 4733 #endif 4734 4735 status = er32(STATUS); 4736 if (status & E1000_STATUS_LU) 4737 wufc &= ~E1000_WUFC_LNKC; 4738 4739 if (wufc) { 4740 e1000_setup_rctl(adapter); 4741 e1000_set_rx_mode(netdev); 4742 4743 /* turn on all-multi mode if wake on multicast is enabled */ 4744 if (wufc & E1000_WUFC_MC) { 4745 rctl = er32(RCTL); 4746 rctl |= E1000_RCTL_MPE; 4747 ew32(RCTL, rctl); 4748 } 4749 4750 if (hw->mac_type >= e1000_82540) { 4751 ctrl = er32(CTRL); 4752 /* advertise wake from D3Cold */ 4753 #define E1000_CTRL_ADVD3WUC 0x00100000 4754 /* phy power management enable */ 4755 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 4756 ctrl |= E1000_CTRL_ADVD3WUC | 4757 E1000_CTRL_EN_PHY_PWR_MGMT; 4758 ew32(CTRL, ctrl); 4759 } 4760 4761 if (hw->media_type == e1000_media_type_fiber || 4762 hw->media_type == e1000_media_type_internal_serdes) { 4763 /* keep the laser running in D3 */ 4764 ctrl_ext = er32(CTRL_EXT); 4765 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; 4766 ew32(CTRL_EXT, ctrl_ext); 4767 } 4768 4769 ew32(WUC, E1000_WUC_PME_EN); 4770 ew32(WUFC, wufc); 4771 } else { 4772 ew32(WUC, 0); 4773 ew32(WUFC, 0); 4774 } 4775 4776 e1000_release_manageability(adapter); 4777 4778 *enable_wake = !!wufc; 4779 4780 /* make sure adapter isn't asleep if manageability is enabled */ 4781 if (adapter->en_mng_pt) 4782 *enable_wake = true; 4783 4784 if (netif_running(netdev)) 4785 e1000_free_irq(adapter); 4786 4787 pci_disable_device(pdev); 4788 4789 return 0; 4790 } 4791 4792 #ifdef CONFIG_PM 4793 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) 4794 { 4795 int retval; 4796 bool wake; 4797 4798 retval = __e1000_shutdown(pdev, &wake); 4799 if (retval) 4800 return retval; 4801 4802 if (wake) { 4803 pci_prepare_to_sleep(pdev); 4804 } else { 4805 pci_wake_from_d3(pdev, false); 4806 pci_set_power_state(pdev, PCI_D3hot); 4807 } 4808 4809 return 0; 4810 } 4811 4812 static int e1000_resume(struct pci_dev *pdev) 4813 { 4814 struct net_device *netdev = pci_get_drvdata(pdev); 4815 struct e1000_adapter *adapter = netdev_priv(netdev); 4816 struct e1000_hw *hw = &adapter->hw; 4817 u32 err; 4818 4819 pci_set_power_state(pdev, PCI_D0); 4820 pci_restore_state(pdev); 4821 pci_save_state(pdev); 4822 4823 if (adapter->need_ioport) 4824 err = pci_enable_device(pdev); 4825 else 4826 err = pci_enable_device_mem(pdev); 4827 if (err) { 4828 pr_err("Cannot enable PCI device from suspend\n"); 4829 return err; 4830 } 4831 pci_set_master(pdev); 4832 4833 pci_enable_wake(pdev, PCI_D3hot, 0); 4834 pci_enable_wake(pdev, PCI_D3cold, 0); 4835 4836 if (netif_running(netdev)) { 4837 err = e1000_request_irq(adapter); 4838 if (err) 4839 return err; 4840 } 4841 4842 e1000_power_up_phy(adapter); 4843 e1000_reset(adapter); 4844 ew32(WUS, ~0); 4845 4846 e1000_init_manageability(adapter); 4847 4848 if (netif_running(netdev)) 4849 e1000_up(adapter); 4850 4851 netif_device_attach(netdev); 4852 4853 return 0; 4854 } 4855 #endif 4856 4857 static void e1000_shutdown(struct pci_dev *pdev) 4858 { 4859 bool wake; 4860 4861 __e1000_shutdown(pdev, &wake); 4862 4863 if (system_state == SYSTEM_POWER_OFF) { 4864 pci_wake_from_d3(pdev, wake); 4865 pci_set_power_state(pdev, PCI_D3hot); 4866 } 4867 } 4868 4869 #ifdef CONFIG_NET_POLL_CONTROLLER 4870 /* 4871 * Polling 'interrupt' - used by things like netconsole to send skbs 4872 * without having to re-enable interrupts. It's not called while 4873 * the interrupt routine is executing. 4874 */ 4875 static void e1000_netpoll(struct net_device *netdev) 4876 { 4877 struct e1000_adapter *adapter = netdev_priv(netdev); 4878 4879 disable_irq(adapter->pdev->irq); 4880 e1000_intr(adapter->pdev->irq, netdev); 4881 enable_irq(adapter->pdev->irq); 4882 } 4883 #endif 4884 4885 /** 4886 * e1000_io_error_detected - called when PCI error is detected 4887 * @pdev: Pointer to PCI device 4888 * @state: The current pci connection state 4889 * 4890 * This function is called after a PCI bus error affecting 4891 * this device has been detected. 4892 */ 4893 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, 4894 pci_channel_state_t state) 4895 { 4896 struct net_device *netdev = pci_get_drvdata(pdev); 4897 struct e1000_adapter *adapter = netdev_priv(netdev); 4898 4899 netif_device_detach(netdev); 4900 4901 if (state == pci_channel_io_perm_failure) 4902 return PCI_ERS_RESULT_DISCONNECT; 4903 4904 if (netif_running(netdev)) 4905 e1000_down(adapter); 4906 pci_disable_device(pdev); 4907 4908 /* Request a slot slot reset. */ 4909 return PCI_ERS_RESULT_NEED_RESET; 4910 } 4911 4912 /** 4913 * e1000_io_slot_reset - called after the pci bus has been reset. 4914 * @pdev: Pointer to PCI device 4915 * 4916 * Restart the card from scratch, as if from a cold-boot. Implementation 4917 * resembles the first-half of the e1000_resume routine. 4918 */ 4919 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) 4920 { 4921 struct net_device *netdev = pci_get_drvdata(pdev); 4922 struct e1000_adapter *adapter = netdev_priv(netdev); 4923 struct e1000_hw *hw = &adapter->hw; 4924 int err; 4925 4926 if (adapter->need_ioport) 4927 err = pci_enable_device(pdev); 4928 else 4929 err = pci_enable_device_mem(pdev); 4930 if (err) { 4931 pr_err("Cannot re-enable PCI device after reset.\n"); 4932 return PCI_ERS_RESULT_DISCONNECT; 4933 } 4934 pci_set_master(pdev); 4935 4936 pci_enable_wake(pdev, PCI_D3hot, 0); 4937 pci_enable_wake(pdev, PCI_D3cold, 0); 4938 4939 e1000_reset(adapter); 4940 ew32(WUS, ~0); 4941 4942 return PCI_ERS_RESULT_RECOVERED; 4943 } 4944 4945 /** 4946 * e1000_io_resume - called when traffic can start flowing again. 4947 * @pdev: Pointer to PCI device 4948 * 4949 * This callback is called when the error recovery driver tells us that 4950 * its OK to resume normal operation. Implementation resembles the 4951 * second-half of the e1000_resume routine. 4952 */ 4953 static void e1000_io_resume(struct pci_dev *pdev) 4954 { 4955 struct net_device *netdev = pci_get_drvdata(pdev); 4956 struct e1000_adapter *adapter = netdev_priv(netdev); 4957 4958 e1000_init_manageability(adapter); 4959 4960 if (netif_running(netdev)) { 4961 if (e1000_up(adapter)) { 4962 pr_info("can't bring device back up after reset\n"); 4963 return; 4964 } 4965 } 4966 4967 netif_device_attach(netdev); 4968 } 4969 4970 /* e1000_main.c */ 4971