1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 1999 - 2006 Intel Corporation. */ 3 4 #include "e1000.h" 5 #include <net/ip6_checksum.h> 6 #include <linux/io.h> 7 #include <linux/prefetch.h> 8 #include <linux/bitops.h> 9 #include <linux/if_vlan.h> 10 11 char e1000_driver_name[] = "e1000"; 12 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; 13 static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; 14 15 /* e1000_pci_tbl - PCI Device ID Table 16 * 17 * Last entry must be all 0s 18 * 19 * Macro expands to... 20 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} 21 */ 22 static const struct pci_device_id e1000_pci_tbl[] = { 23 INTEL_E1000_ETHERNET_DEVICE(0x1000), 24 INTEL_E1000_ETHERNET_DEVICE(0x1001), 25 INTEL_E1000_ETHERNET_DEVICE(0x1004), 26 INTEL_E1000_ETHERNET_DEVICE(0x1008), 27 INTEL_E1000_ETHERNET_DEVICE(0x1009), 28 INTEL_E1000_ETHERNET_DEVICE(0x100C), 29 INTEL_E1000_ETHERNET_DEVICE(0x100D), 30 INTEL_E1000_ETHERNET_DEVICE(0x100E), 31 INTEL_E1000_ETHERNET_DEVICE(0x100F), 32 INTEL_E1000_ETHERNET_DEVICE(0x1010), 33 INTEL_E1000_ETHERNET_DEVICE(0x1011), 34 INTEL_E1000_ETHERNET_DEVICE(0x1012), 35 INTEL_E1000_ETHERNET_DEVICE(0x1013), 36 INTEL_E1000_ETHERNET_DEVICE(0x1014), 37 INTEL_E1000_ETHERNET_DEVICE(0x1015), 38 INTEL_E1000_ETHERNET_DEVICE(0x1016), 39 INTEL_E1000_ETHERNET_DEVICE(0x1017), 40 INTEL_E1000_ETHERNET_DEVICE(0x1018), 41 INTEL_E1000_ETHERNET_DEVICE(0x1019), 42 INTEL_E1000_ETHERNET_DEVICE(0x101A), 43 INTEL_E1000_ETHERNET_DEVICE(0x101D), 44 INTEL_E1000_ETHERNET_DEVICE(0x101E), 45 INTEL_E1000_ETHERNET_DEVICE(0x1026), 46 INTEL_E1000_ETHERNET_DEVICE(0x1027), 47 INTEL_E1000_ETHERNET_DEVICE(0x1028), 48 INTEL_E1000_ETHERNET_DEVICE(0x1075), 49 INTEL_E1000_ETHERNET_DEVICE(0x1076), 50 INTEL_E1000_ETHERNET_DEVICE(0x1077), 51 INTEL_E1000_ETHERNET_DEVICE(0x1078), 52 INTEL_E1000_ETHERNET_DEVICE(0x1079), 53 INTEL_E1000_ETHERNET_DEVICE(0x107A), 54 INTEL_E1000_ETHERNET_DEVICE(0x107B), 55 INTEL_E1000_ETHERNET_DEVICE(0x107C), 56 INTEL_E1000_ETHERNET_DEVICE(0x108A), 57 INTEL_E1000_ETHERNET_DEVICE(0x1099), 58 INTEL_E1000_ETHERNET_DEVICE(0x10B5), 59 INTEL_E1000_ETHERNET_DEVICE(0x2E6E), 60 /* required last entry */ 61 {0,} 62 }; 63 64 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); 65 66 int e1000_up(struct e1000_adapter *adapter); 67 void e1000_down(struct e1000_adapter *adapter); 68 void e1000_reinit_locked(struct e1000_adapter *adapter); 69 void e1000_reset(struct e1000_adapter *adapter); 70 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); 71 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); 72 void e1000_free_all_tx_resources(struct e1000_adapter *adapter); 73 void e1000_free_all_rx_resources(struct e1000_adapter *adapter); 74 static int e1000_setup_tx_resources(struct e1000_adapter *adapter, 75 struct e1000_tx_ring *txdr); 76 static int e1000_setup_rx_resources(struct e1000_adapter *adapter, 77 struct e1000_rx_ring *rxdr); 78 static void e1000_free_tx_resources(struct e1000_adapter *adapter, 79 struct e1000_tx_ring *tx_ring); 80 static void e1000_free_rx_resources(struct e1000_adapter *adapter, 81 struct e1000_rx_ring *rx_ring); 82 void e1000_update_stats(struct e1000_adapter *adapter); 83 84 static int e1000_init_module(void); 85 static void e1000_exit_module(void); 86 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent); 87 static void e1000_remove(struct pci_dev *pdev); 88 static int e1000_alloc_queues(struct e1000_adapter *adapter); 89 static int e1000_sw_init(struct e1000_adapter *adapter); 90 int e1000_open(struct net_device *netdev); 91 int e1000_close(struct net_device *netdev); 92 static void e1000_configure_tx(struct e1000_adapter *adapter); 93 static void e1000_configure_rx(struct e1000_adapter *adapter); 94 static void e1000_setup_rctl(struct e1000_adapter *adapter); 95 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter); 96 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter); 97 static void e1000_clean_tx_ring(struct e1000_adapter *adapter, 98 struct e1000_tx_ring *tx_ring); 99 static void e1000_clean_rx_ring(struct e1000_adapter *adapter, 100 struct e1000_rx_ring *rx_ring); 101 static void e1000_set_rx_mode(struct net_device *netdev); 102 static void e1000_update_phy_info_task(struct work_struct *work); 103 static void e1000_watchdog(struct work_struct *work); 104 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work); 105 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, 106 struct net_device *netdev); 107 static int e1000_change_mtu(struct net_device *netdev, int new_mtu); 108 static int e1000_set_mac(struct net_device *netdev, void *p); 109 static irqreturn_t e1000_intr(int irq, void *data); 110 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, 111 struct e1000_tx_ring *tx_ring); 112 static int e1000_clean(struct napi_struct *napi, int budget); 113 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, 114 struct e1000_rx_ring *rx_ring, 115 int *work_done, int work_to_do); 116 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, 117 struct e1000_rx_ring *rx_ring, 118 int *work_done, int work_to_do); 119 static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter, 120 struct e1000_rx_ring *rx_ring, 121 int cleaned_count) 122 { 123 } 124 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, 125 struct e1000_rx_ring *rx_ring, 126 int cleaned_count); 127 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, 128 struct e1000_rx_ring *rx_ring, 129 int cleaned_count); 130 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); 131 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, 132 int cmd); 133 static void e1000_enter_82542_rst(struct e1000_adapter *adapter); 134 static void e1000_leave_82542_rst(struct e1000_adapter *adapter); 135 static void e1000_tx_timeout(struct net_device *dev, unsigned int txqueue); 136 static void e1000_reset_task(struct work_struct *work); 137 static void e1000_smartspeed(struct e1000_adapter *adapter); 138 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, 139 struct sk_buff *skb); 140 141 static bool e1000_vlan_used(struct e1000_adapter *adapter); 142 static void e1000_vlan_mode(struct net_device *netdev, 143 netdev_features_t features); 144 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter, 145 bool filter_on); 146 static int e1000_vlan_rx_add_vid(struct net_device *netdev, 147 __be16 proto, u16 vid); 148 static int e1000_vlan_rx_kill_vid(struct net_device *netdev, 149 __be16 proto, u16 vid); 150 static void e1000_restore_vlan(struct e1000_adapter *adapter); 151 152 static int e1000_suspend(struct device *dev); 153 static int e1000_resume(struct device *dev); 154 static void e1000_shutdown(struct pci_dev *pdev); 155 156 #ifdef CONFIG_NET_POLL_CONTROLLER 157 /* for netdump / net console */ 158 static void e1000_netpoll (struct net_device *netdev); 159 #endif 160 161 #define COPYBREAK_DEFAULT 256 162 static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT; 163 module_param(copybreak, uint, 0644); 164 MODULE_PARM_DESC(copybreak, 165 "Maximum size of packet that is copied to a new buffer on receive"); 166 167 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, 168 pci_channel_state_t state); 169 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev); 170 static void e1000_io_resume(struct pci_dev *pdev); 171 172 static const struct pci_error_handlers e1000_err_handler = { 173 .error_detected = e1000_io_error_detected, 174 .slot_reset = e1000_io_slot_reset, 175 .resume = e1000_io_resume, 176 }; 177 178 static DEFINE_SIMPLE_DEV_PM_OPS(e1000_pm_ops, e1000_suspend, e1000_resume); 179 180 static struct pci_driver e1000_driver = { 181 .name = e1000_driver_name, 182 .id_table = e1000_pci_tbl, 183 .probe = e1000_probe, 184 .remove = e1000_remove, 185 .driver.pm = pm_sleep_ptr(&e1000_pm_ops), 186 .shutdown = e1000_shutdown, 187 .err_handler = &e1000_err_handler 188 }; 189 190 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); 191 MODULE_LICENSE("GPL v2"); 192 193 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) 194 static int debug = -1; 195 module_param(debug, int, 0); 196 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 197 198 /** 199 * e1000_get_hw_dev - helper function for getting netdev 200 * @hw: pointer to HW struct 201 * 202 * return device used by hardware layer to print debugging information 203 * 204 **/ 205 struct net_device *e1000_get_hw_dev(struct e1000_hw *hw) 206 { 207 struct e1000_adapter *adapter = hw->back; 208 return adapter->netdev; 209 } 210 211 /** 212 * e1000_init_module - Driver Registration Routine 213 * 214 * e1000_init_module is the first routine called when the driver is 215 * loaded. All it does is register with the PCI subsystem. 216 **/ 217 static int __init e1000_init_module(void) 218 { 219 int ret; 220 pr_info("%s\n", e1000_driver_string); 221 222 pr_info("%s\n", e1000_copyright); 223 224 ret = pci_register_driver(&e1000_driver); 225 if (copybreak != COPYBREAK_DEFAULT) { 226 if (copybreak == 0) 227 pr_info("copybreak disabled\n"); 228 else 229 pr_info("copybreak enabled for " 230 "packets <= %u bytes\n", copybreak); 231 } 232 return ret; 233 } 234 235 module_init(e1000_init_module); 236 237 /** 238 * e1000_exit_module - Driver Exit Cleanup Routine 239 * 240 * e1000_exit_module is called just before the driver is removed 241 * from memory. 242 **/ 243 static void __exit e1000_exit_module(void) 244 { 245 pci_unregister_driver(&e1000_driver); 246 } 247 248 module_exit(e1000_exit_module); 249 250 static int e1000_request_irq(struct e1000_adapter *adapter) 251 { 252 struct net_device *netdev = adapter->netdev; 253 irq_handler_t handler = e1000_intr; 254 int irq_flags = IRQF_SHARED; 255 int err; 256 257 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, 258 netdev); 259 if (err) { 260 e_err(probe, "Unable to allocate interrupt Error: %d\n", err); 261 } 262 263 return err; 264 } 265 266 static void e1000_free_irq(struct e1000_adapter *adapter) 267 { 268 struct net_device *netdev = adapter->netdev; 269 270 free_irq(adapter->pdev->irq, netdev); 271 } 272 273 /** 274 * e1000_irq_disable - Mask off interrupt generation on the NIC 275 * @adapter: board private structure 276 **/ 277 static void e1000_irq_disable(struct e1000_adapter *adapter) 278 { 279 struct e1000_hw *hw = &adapter->hw; 280 281 ew32(IMC, ~0); 282 E1000_WRITE_FLUSH(); 283 synchronize_irq(adapter->pdev->irq); 284 } 285 286 /** 287 * e1000_irq_enable - Enable default interrupt generation settings 288 * @adapter: board private structure 289 **/ 290 static void e1000_irq_enable(struct e1000_adapter *adapter) 291 { 292 struct e1000_hw *hw = &adapter->hw; 293 294 ew32(IMS, IMS_ENABLE_MASK); 295 E1000_WRITE_FLUSH(); 296 } 297 298 static void e1000_update_mng_vlan(struct e1000_adapter *adapter) 299 { 300 struct e1000_hw *hw = &adapter->hw; 301 struct net_device *netdev = adapter->netdev; 302 u16 vid = hw->mng_cookie.vlan_id; 303 u16 old_vid = adapter->mng_vlan_id; 304 305 if (!e1000_vlan_used(adapter)) 306 return; 307 308 if (!test_bit(vid, adapter->active_vlans)) { 309 if (hw->mng_cookie.status & 310 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { 311 e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid); 312 adapter->mng_vlan_id = vid; 313 } else { 314 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 315 } 316 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && 317 (vid != old_vid) && 318 !test_bit(old_vid, adapter->active_vlans)) 319 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), 320 old_vid); 321 } else { 322 adapter->mng_vlan_id = vid; 323 } 324 } 325 326 static void e1000_init_manageability(struct e1000_adapter *adapter) 327 { 328 struct e1000_hw *hw = &adapter->hw; 329 330 if (adapter->en_mng_pt) { 331 u32 manc = er32(MANC); 332 333 /* disable hardware interception of ARP */ 334 manc &= ~(E1000_MANC_ARP_EN); 335 336 ew32(MANC, manc); 337 } 338 } 339 340 static void e1000_release_manageability(struct e1000_adapter *adapter) 341 { 342 struct e1000_hw *hw = &adapter->hw; 343 344 if (adapter->en_mng_pt) { 345 u32 manc = er32(MANC); 346 347 /* re-enable hardware interception of ARP */ 348 manc |= E1000_MANC_ARP_EN; 349 350 ew32(MANC, manc); 351 } 352 } 353 354 /** 355 * e1000_configure - configure the hardware for RX and TX 356 * @adapter: private board structure 357 **/ 358 static void e1000_configure(struct e1000_adapter *adapter) 359 { 360 struct net_device *netdev = adapter->netdev; 361 int i; 362 363 e1000_set_rx_mode(netdev); 364 365 e1000_restore_vlan(adapter); 366 e1000_init_manageability(adapter); 367 368 e1000_configure_tx(adapter); 369 e1000_setup_rctl(adapter); 370 e1000_configure_rx(adapter); 371 /* call E1000_DESC_UNUSED which always leaves 372 * at least 1 descriptor unused to make sure 373 * next_to_use != next_to_clean 374 */ 375 for (i = 0; i < adapter->num_rx_queues; i++) { 376 struct e1000_rx_ring *ring = &adapter->rx_ring[i]; 377 adapter->alloc_rx_buf(adapter, ring, 378 E1000_DESC_UNUSED(ring)); 379 } 380 } 381 382 int e1000_up(struct e1000_adapter *adapter) 383 { 384 struct e1000_hw *hw = &adapter->hw; 385 386 /* hardware has been reset, we need to reload some things */ 387 e1000_configure(adapter); 388 389 clear_bit(__E1000_DOWN, &adapter->flags); 390 391 napi_enable(&adapter->napi); 392 393 e1000_irq_enable(adapter); 394 395 netif_wake_queue(adapter->netdev); 396 397 /* fire a link change interrupt to start the watchdog */ 398 ew32(ICS, E1000_ICS_LSC); 399 return 0; 400 } 401 402 /** 403 * e1000_power_up_phy - restore link in case the phy was powered down 404 * @adapter: address of board private structure 405 * 406 * The phy may be powered down to save power and turn off link when the 407 * driver is unloaded and wake on lan is not enabled (among others) 408 * *** this routine MUST be followed by a call to e1000_reset *** 409 **/ 410 void e1000_power_up_phy(struct e1000_adapter *adapter) 411 { 412 struct e1000_hw *hw = &adapter->hw; 413 u16 mii_reg = 0; 414 415 /* Just clear the power down bit to wake the phy back up */ 416 if (hw->media_type == e1000_media_type_copper) { 417 /* according to the manual, the phy will retain its 418 * settings across a power-down/up cycle 419 */ 420 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); 421 mii_reg &= ~MII_CR_POWER_DOWN; 422 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); 423 } 424 } 425 426 static void e1000_power_down_phy(struct e1000_adapter *adapter) 427 { 428 struct e1000_hw *hw = &adapter->hw; 429 430 /* Power down the PHY so no link is implied when interface is down * 431 * The PHY cannot be powered down if any of the following is true * 432 * (a) WoL is enabled 433 * (b) AMT is active 434 * (c) SoL/IDER session is active 435 */ 436 if (!adapter->wol && hw->mac_type >= e1000_82540 && 437 hw->media_type == e1000_media_type_copper) { 438 u16 mii_reg = 0; 439 440 switch (hw->mac_type) { 441 case e1000_82540: 442 case e1000_82545: 443 case e1000_82545_rev_3: 444 case e1000_82546: 445 case e1000_ce4100: 446 case e1000_82546_rev_3: 447 case e1000_82541: 448 case e1000_82541_rev_2: 449 case e1000_82547: 450 case e1000_82547_rev_2: 451 if (er32(MANC) & E1000_MANC_SMBUS_EN) 452 goto out; 453 break; 454 default: 455 goto out; 456 } 457 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); 458 mii_reg |= MII_CR_POWER_DOWN; 459 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); 460 msleep(1); 461 } 462 out: 463 return; 464 } 465 466 static void e1000_down_and_stop(struct e1000_adapter *adapter) 467 { 468 set_bit(__E1000_DOWN, &adapter->flags); 469 470 cancel_delayed_work_sync(&adapter->watchdog_task); 471 472 /* 473 * Since the watchdog task can reschedule other tasks, we should cancel 474 * it first, otherwise we can run into the situation when a work is 475 * still running after the adapter has been turned down. 476 */ 477 478 cancel_delayed_work_sync(&adapter->phy_info_task); 479 cancel_delayed_work_sync(&adapter->fifo_stall_task); 480 481 /* Only kill reset task if adapter is not resetting */ 482 if (!test_bit(__E1000_RESETTING, &adapter->flags)) 483 cancel_work_sync(&adapter->reset_task); 484 } 485 486 void e1000_down(struct e1000_adapter *adapter) 487 { 488 struct e1000_hw *hw = &adapter->hw; 489 struct net_device *netdev = adapter->netdev; 490 u32 rctl, tctl; 491 492 /* disable receives in the hardware */ 493 rctl = er32(RCTL); 494 ew32(RCTL, rctl & ~E1000_RCTL_EN); 495 /* flush and sleep below */ 496 497 netif_tx_disable(netdev); 498 499 /* disable transmits in the hardware */ 500 tctl = er32(TCTL); 501 tctl &= ~E1000_TCTL_EN; 502 ew32(TCTL, tctl); 503 /* flush both disables and wait for them to finish */ 504 E1000_WRITE_FLUSH(); 505 msleep(10); 506 507 /* Set the carrier off after transmits have been disabled in the 508 * hardware, to avoid race conditions with e1000_watchdog() (which 509 * may be running concurrently to us, checking for the carrier 510 * bit to decide whether it should enable transmits again). Such 511 * a race condition would result into transmission being disabled 512 * in the hardware until the next IFF_DOWN+IFF_UP cycle. 513 */ 514 netif_carrier_off(netdev); 515 516 netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_RX, NULL); 517 netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_TX, NULL); 518 napi_disable(&adapter->napi); 519 520 e1000_irq_disable(adapter); 521 522 /* Setting DOWN must be after irq_disable to prevent 523 * a screaming interrupt. Setting DOWN also prevents 524 * tasks from rescheduling. 525 */ 526 e1000_down_and_stop(adapter); 527 528 adapter->link_speed = 0; 529 adapter->link_duplex = 0; 530 531 e1000_reset(adapter); 532 e1000_clean_all_tx_rings(adapter); 533 e1000_clean_all_rx_rings(adapter); 534 } 535 536 void e1000_reinit_locked(struct e1000_adapter *adapter) 537 { 538 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) 539 msleep(1); 540 541 /* only run the task if not already down */ 542 if (!test_bit(__E1000_DOWN, &adapter->flags)) { 543 e1000_down(adapter); 544 e1000_up(adapter); 545 } 546 547 clear_bit(__E1000_RESETTING, &adapter->flags); 548 } 549 550 void e1000_reset(struct e1000_adapter *adapter) 551 { 552 struct e1000_hw *hw = &adapter->hw; 553 u32 pba = 0, tx_space, min_tx_space, min_rx_space; 554 bool legacy_pba_adjust = false; 555 u16 hwm; 556 557 /* Repartition Pba for greater than 9k mtu 558 * To take effect CTRL.RST is required. 559 */ 560 561 switch (hw->mac_type) { 562 case e1000_82542_rev2_0: 563 case e1000_82542_rev2_1: 564 case e1000_82543: 565 case e1000_82544: 566 case e1000_82540: 567 case e1000_82541: 568 case e1000_82541_rev_2: 569 legacy_pba_adjust = true; 570 pba = E1000_PBA_48K; 571 break; 572 case e1000_82545: 573 case e1000_82545_rev_3: 574 case e1000_82546: 575 case e1000_ce4100: 576 case e1000_82546_rev_3: 577 pba = E1000_PBA_48K; 578 break; 579 case e1000_82547: 580 case e1000_82547_rev_2: 581 legacy_pba_adjust = true; 582 pba = E1000_PBA_30K; 583 break; 584 case e1000_undefined: 585 case e1000_num_macs: 586 break; 587 } 588 589 if (legacy_pba_adjust) { 590 if (hw->max_frame_size > E1000_RXBUFFER_8192) 591 pba -= 8; /* allocate more FIFO for Tx */ 592 593 if (hw->mac_type == e1000_82547) { 594 adapter->tx_fifo_head = 0; 595 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; 596 adapter->tx_fifo_size = 597 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT; 598 atomic_set(&adapter->tx_fifo_stall, 0); 599 } 600 } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) { 601 /* adjust PBA for jumbo frames */ 602 ew32(PBA, pba); 603 604 /* To maintain wire speed transmits, the Tx FIFO should be 605 * large enough to accommodate two full transmit packets, 606 * rounded up to the next 1KB and expressed in KB. Likewise, 607 * the Rx FIFO should be large enough to accommodate at least 608 * one full receive packet and is similarly rounded up and 609 * expressed in KB. 610 */ 611 pba = er32(PBA); 612 /* upper 16 bits has Tx packet buffer allocation size in KB */ 613 tx_space = pba >> 16; 614 /* lower 16 bits has Rx packet buffer allocation size in KB */ 615 pba &= 0xffff; 616 /* the Tx fifo also stores 16 bytes of information about the Tx 617 * but don't include ethernet FCS because hardware appends it 618 */ 619 min_tx_space = (hw->max_frame_size + 620 sizeof(struct e1000_tx_desc) - 621 ETH_FCS_LEN) * 2; 622 min_tx_space = ALIGN(min_tx_space, 1024); 623 min_tx_space >>= 10; 624 /* software strips receive CRC, so leave room for it */ 625 min_rx_space = hw->max_frame_size; 626 min_rx_space = ALIGN(min_rx_space, 1024); 627 min_rx_space >>= 10; 628 629 /* If current Tx allocation is less than the min Tx FIFO size, 630 * and the min Tx FIFO size is less than the current Rx FIFO 631 * allocation, take space away from current Rx allocation 632 */ 633 if (tx_space < min_tx_space && 634 ((min_tx_space - tx_space) < pba)) { 635 pba = pba - (min_tx_space - tx_space); 636 637 /* PCI/PCIx hardware has PBA alignment constraints */ 638 switch (hw->mac_type) { 639 case e1000_82545 ... e1000_82546_rev_3: 640 pba &= ~(E1000_PBA_8K - 1); 641 break; 642 default: 643 break; 644 } 645 646 /* if short on Rx space, Rx wins and must trump Tx 647 * adjustment or use Early Receive if available 648 */ 649 if (pba < min_rx_space) 650 pba = min_rx_space; 651 } 652 } 653 654 ew32(PBA, pba); 655 656 /* flow control settings: 657 * The high water mark must be low enough to fit one full frame 658 * (or the size used for early receive) above it in the Rx FIFO. 659 * Set it to the lower of: 660 * - 90% of the Rx FIFO size, and 661 * - the full Rx FIFO size minus the early receive size (for parts 662 * with ERT support assuming ERT set to E1000_ERT_2048), or 663 * - the full Rx FIFO size minus one full frame 664 */ 665 hwm = min(((pba << 10) * 9 / 10), 666 ((pba << 10) - hw->max_frame_size)); 667 668 hw->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */ 669 hw->fc_low_water = hw->fc_high_water - 8; 670 hw->fc_pause_time = E1000_FC_PAUSE_TIME; 671 hw->fc_send_xon = 1; 672 hw->fc = hw->original_fc; 673 674 /* Allow time for pending master requests to run */ 675 e1000_reset_hw(hw); 676 if (hw->mac_type >= e1000_82544) 677 ew32(WUC, 0); 678 679 if (e1000_init_hw(hw)) 680 e_dev_err("Hardware Error\n"); 681 e1000_update_mng_vlan(adapter); 682 683 /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */ 684 if (hw->mac_type >= e1000_82544 && 685 hw->autoneg == 1 && 686 hw->autoneg_advertised == ADVERTISE_1000_FULL) { 687 u32 ctrl = er32(CTRL); 688 /* clear phy power management bit if we are in gig only mode, 689 * which if enabled will attempt negotiation to 100Mb, which 690 * can cause a loss of link at power off or driver unload 691 */ 692 ctrl &= ~E1000_CTRL_SWDPIN3; 693 ew32(CTRL, ctrl); 694 } 695 696 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ 697 ew32(VET, ETHERNET_IEEE_VLAN_TYPE); 698 699 e1000_reset_adaptive(hw); 700 e1000_phy_get_info(hw, &adapter->phy_info); 701 702 e1000_release_manageability(adapter); 703 } 704 705 /* Dump the eeprom for users having checksum issues */ 706 static void e1000_dump_eeprom(struct e1000_adapter *adapter) 707 { 708 struct net_device *netdev = adapter->netdev; 709 struct ethtool_eeprom eeprom; 710 const struct ethtool_ops *ops = netdev->ethtool_ops; 711 u8 *data; 712 int i; 713 u16 csum_old, csum_new = 0; 714 715 eeprom.len = ops->get_eeprom_len(netdev); 716 eeprom.offset = 0; 717 718 data = kmalloc(eeprom.len, GFP_KERNEL); 719 if (!data) 720 return; 721 722 ops->get_eeprom(netdev, &eeprom, data); 723 724 csum_old = (data[EEPROM_CHECKSUM_REG * 2]) + 725 (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8); 726 for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2) 727 csum_new += data[i] + (data[i + 1] << 8); 728 csum_new = EEPROM_SUM - csum_new; 729 730 pr_err("/*********************/\n"); 731 pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old); 732 pr_err("Calculated : 0x%04x\n", csum_new); 733 734 pr_err("Offset Values\n"); 735 pr_err("======== ======\n"); 736 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0); 737 738 pr_err("Include this output when contacting your support provider.\n"); 739 pr_err("This is not a software error! Something bad happened to\n"); 740 pr_err("your hardware or EEPROM image. Ignoring this problem could\n"); 741 pr_err("result in further problems, possibly loss of data,\n"); 742 pr_err("corruption or system hangs!\n"); 743 pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n"); 744 pr_err("which is invalid and requires you to set the proper MAC\n"); 745 pr_err("address manually before continuing to enable this network\n"); 746 pr_err("device. Please inspect the EEPROM dump and report the\n"); 747 pr_err("issue to your hardware vendor or Intel Customer Support.\n"); 748 pr_err("/*********************/\n"); 749 750 kfree(data); 751 } 752 753 /** 754 * e1000_is_need_ioport - determine if an adapter needs ioport resources or not 755 * @pdev: PCI device information struct 756 * 757 * Return true if an adapter needs ioport resources 758 **/ 759 static int e1000_is_need_ioport(struct pci_dev *pdev) 760 { 761 switch (pdev->device) { 762 case E1000_DEV_ID_82540EM: 763 case E1000_DEV_ID_82540EM_LOM: 764 case E1000_DEV_ID_82540EP: 765 case E1000_DEV_ID_82540EP_LOM: 766 case E1000_DEV_ID_82540EP_LP: 767 case E1000_DEV_ID_82541EI: 768 case E1000_DEV_ID_82541EI_MOBILE: 769 case E1000_DEV_ID_82541ER: 770 case E1000_DEV_ID_82541ER_LOM: 771 case E1000_DEV_ID_82541GI: 772 case E1000_DEV_ID_82541GI_LF: 773 case E1000_DEV_ID_82541GI_MOBILE: 774 case E1000_DEV_ID_82544EI_COPPER: 775 case E1000_DEV_ID_82544EI_FIBER: 776 case E1000_DEV_ID_82544GC_COPPER: 777 case E1000_DEV_ID_82544GC_LOM: 778 case E1000_DEV_ID_82545EM_COPPER: 779 case E1000_DEV_ID_82545EM_FIBER: 780 case E1000_DEV_ID_82546EB_COPPER: 781 case E1000_DEV_ID_82546EB_FIBER: 782 case E1000_DEV_ID_82546EB_QUAD_COPPER: 783 return true; 784 default: 785 return false; 786 } 787 } 788 789 static netdev_features_t e1000_fix_features(struct net_device *netdev, 790 netdev_features_t features) 791 { 792 /* Since there is no support for separate Rx/Tx vlan accel 793 * enable/disable make sure Tx flag is always in same state as Rx. 794 */ 795 if (features & NETIF_F_HW_VLAN_CTAG_RX) 796 features |= NETIF_F_HW_VLAN_CTAG_TX; 797 else 798 features &= ~NETIF_F_HW_VLAN_CTAG_TX; 799 800 return features; 801 } 802 803 static int e1000_set_features(struct net_device *netdev, 804 netdev_features_t features) 805 { 806 struct e1000_adapter *adapter = netdev_priv(netdev); 807 netdev_features_t changed = features ^ netdev->features; 808 809 if (changed & NETIF_F_HW_VLAN_CTAG_RX) 810 e1000_vlan_mode(netdev, features); 811 812 if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL))) 813 return 0; 814 815 netdev->features = features; 816 adapter->rx_csum = !!(features & NETIF_F_RXCSUM); 817 818 if (netif_running(netdev)) 819 e1000_reinit_locked(adapter); 820 else 821 e1000_reset(adapter); 822 823 return 1; 824 } 825 826 static const struct net_device_ops e1000_netdev_ops = { 827 .ndo_open = e1000_open, 828 .ndo_stop = e1000_close, 829 .ndo_start_xmit = e1000_xmit_frame, 830 .ndo_set_rx_mode = e1000_set_rx_mode, 831 .ndo_set_mac_address = e1000_set_mac, 832 .ndo_tx_timeout = e1000_tx_timeout, 833 .ndo_change_mtu = e1000_change_mtu, 834 .ndo_eth_ioctl = e1000_ioctl, 835 .ndo_validate_addr = eth_validate_addr, 836 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid, 837 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid, 838 #ifdef CONFIG_NET_POLL_CONTROLLER 839 .ndo_poll_controller = e1000_netpoll, 840 #endif 841 .ndo_fix_features = e1000_fix_features, 842 .ndo_set_features = e1000_set_features, 843 }; 844 845 /** 846 * e1000_init_hw_struct - initialize members of hw struct 847 * @adapter: board private struct 848 * @hw: structure used by e1000_hw.c 849 * 850 * Factors out initialization of the e1000_hw struct to its own function 851 * that can be called very early at init (just after struct allocation). 852 * Fields are initialized based on PCI device information and 853 * OS network device settings (MTU size). 854 * Returns negative error codes if MAC type setup fails. 855 */ 856 static int e1000_init_hw_struct(struct e1000_adapter *adapter, 857 struct e1000_hw *hw) 858 { 859 struct pci_dev *pdev = adapter->pdev; 860 861 /* PCI config space info */ 862 hw->vendor_id = pdev->vendor; 863 hw->device_id = pdev->device; 864 hw->subsystem_vendor_id = pdev->subsystem_vendor; 865 hw->subsystem_id = pdev->subsystem_device; 866 hw->revision_id = pdev->revision; 867 868 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); 869 870 hw->max_frame_size = adapter->netdev->mtu + 871 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 872 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; 873 874 /* identify the MAC */ 875 if (e1000_set_mac_type(hw)) { 876 e_err(probe, "Unknown MAC Type\n"); 877 return -EIO; 878 } 879 880 switch (hw->mac_type) { 881 default: 882 break; 883 case e1000_82541: 884 case e1000_82547: 885 case e1000_82541_rev_2: 886 case e1000_82547_rev_2: 887 hw->phy_init_script = 1; 888 break; 889 } 890 891 e1000_set_media_type(hw); 892 e1000_get_bus_info(hw); 893 894 hw->wait_autoneg_complete = false; 895 hw->tbi_compatibility_en = true; 896 hw->adaptive_ifs = true; 897 898 /* Copper options */ 899 900 if (hw->media_type == e1000_media_type_copper) { 901 hw->mdix = AUTO_ALL_MODES; 902 hw->disable_polarity_correction = false; 903 hw->master_slave = E1000_MASTER_SLAVE; 904 } 905 906 return 0; 907 } 908 909 /** 910 * e1000_probe - Device Initialization Routine 911 * @pdev: PCI device information struct 912 * @ent: entry in e1000_pci_tbl 913 * 914 * Returns 0 on success, negative on failure 915 * 916 * e1000_probe initializes an adapter identified by a pci_dev structure. 917 * The OS initialization, configuring of the adapter private structure, 918 * and a hardware reset occur. 919 **/ 920 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 921 { 922 struct net_device *netdev; 923 struct e1000_adapter *adapter = NULL; 924 struct e1000_hw *hw; 925 926 static int cards_found; 927 static int global_quad_port_a; /* global ksp3 port a indication */ 928 int i, err, pci_using_dac; 929 u16 eeprom_data = 0; 930 u16 tmp = 0; 931 u16 eeprom_apme_mask = E1000_EEPROM_APME; 932 int bars, need_ioport; 933 bool disable_dev = false; 934 935 /* do not allocate ioport bars when not needed */ 936 need_ioport = e1000_is_need_ioport(pdev); 937 if (need_ioport) { 938 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); 939 err = pci_enable_device(pdev); 940 } else { 941 bars = pci_select_bars(pdev, IORESOURCE_MEM); 942 err = pci_enable_device_mem(pdev); 943 } 944 if (err) 945 return err; 946 947 err = pci_request_selected_regions(pdev, bars, e1000_driver_name); 948 if (err) 949 goto err_pci_reg; 950 951 pci_set_master(pdev); 952 err = pci_save_state(pdev); 953 if (err) 954 goto err_alloc_etherdev; 955 956 err = -ENOMEM; 957 netdev = alloc_etherdev(sizeof(struct e1000_adapter)); 958 if (!netdev) 959 goto err_alloc_etherdev; 960 961 SET_NETDEV_DEV(netdev, &pdev->dev); 962 963 pci_set_drvdata(pdev, netdev); 964 adapter = netdev_priv(netdev); 965 adapter->netdev = netdev; 966 adapter->pdev = pdev; 967 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 968 adapter->bars = bars; 969 adapter->need_ioport = need_ioport; 970 971 hw = &adapter->hw; 972 hw->back = adapter; 973 974 err = -EIO; 975 hw->hw_addr = pci_ioremap_bar(pdev, BAR_0); 976 if (!hw->hw_addr) 977 goto err_ioremap; 978 979 if (adapter->need_ioport) { 980 for (i = BAR_1; i < PCI_STD_NUM_BARS; i++) { 981 if (pci_resource_len(pdev, i) == 0) 982 continue; 983 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { 984 hw->io_base = pci_resource_start(pdev, i); 985 break; 986 } 987 } 988 } 989 990 /* make ready for any if (hw->...) below */ 991 err = e1000_init_hw_struct(adapter, hw); 992 if (err) 993 goto err_sw_init; 994 995 /* there is a workaround being applied below that limits 996 * 64-bit DMA addresses to 64-bit hardware. There are some 997 * 32-bit adapters that Tx hang when given 64-bit DMA addresses 998 */ 999 pci_using_dac = 0; 1000 if ((hw->bus_type == e1000_bus_type_pcix) && 1001 !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { 1002 pci_using_dac = 1; 1003 } else { 1004 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 1005 if (err) { 1006 pr_err("No usable DMA config, aborting\n"); 1007 goto err_dma; 1008 } 1009 } 1010 1011 netdev->netdev_ops = &e1000_netdev_ops; 1012 e1000_set_ethtool_ops(netdev); 1013 netdev->watchdog_timeo = 5 * HZ; 1014 netif_napi_add(netdev, &adapter->napi, e1000_clean); 1015 1016 strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); 1017 1018 adapter->bd_number = cards_found; 1019 1020 /* setup the private structure */ 1021 1022 err = e1000_sw_init(adapter); 1023 if (err) 1024 goto err_sw_init; 1025 1026 err = -EIO; 1027 if (hw->mac_type == e1000_ce4100) { 1028 hw->ce4100_gbe_mdio_base_virt = 1029 ioremap(pci_resource_start(pdev, BAR_1), 1030 pci_resource_len(pdev, BAR_1)); 1031 1032 if (!hw->ce4100_gbe_mdio_base_virt) 1033 goto err_mdio_ioremap; 1034 } 1035 1036 if (hw->mac_type >= e1000_82543) { 1037 netdev->hw_features = NETIF_F_SG | 1038 NETIF_F_HW_CSUM | 1039 NETIF_F_HW_VLAN_CTAG_RX; 1040 netdev->features = NETIF_F_HW_VLAN_CTAG_TX | 1041 NETIF_F_HW_VLAN_CTAG_FILTER; 1042 } 1043 1044 if ((hw->mac_type >= e1000_82544) && 1045 (hw->mac_type != e1000_82547)) 1046 netdev->hw_features |= NETIF_F_TSO; 1047 1048 netdev->priv_flags |= IFF_SUPP_NOFCS; 1049 1050 netdev->features |= netdev->hw_features; 1051 netdev->hw_features |= (NETIF_F_RXCSUM | 1052 NETIF_F_RXALL | 1053 NETIF_F_RXFCS); 1054 1055 if (pci_using_dac) { 1056 netdev->features |= NETIF_F_HIGHDMA; 1057 netdev->vlan_features |= NETIF_F_HIGHDMA; 1058 } 1059 1060 netdev->vlan_features |= (NETIF_F_TSO | 1061 NETIF_F_HW_CSUM | 1062 NETIF_F_SG); 1063 1064 /* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */ 1065 if (hw->device_id != E1000_DEV_ID_82545EM_COPPER || 1066 hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE) 1067 netdev->priv_flags |= IFF_UNICAST_FLT; 1068 1069 /* MTU range: 46 - 16110 */ 1070 netdev->min_mtu = ETH_ZLEN - ETH_HLEN; 1071 netdev->max_mtu = MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); 1072 1073 adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw); 1074 1075 /* initialize eeprom parameters */ 1076 if (e1000_init_eeprom_params(hw)) { 1077 e_err(probe, "EEPROM initialization failed\n"); 1078 goto err_eeprom; 1079 } 1080 1081 /* before reading the EEPROM, reset the controller to 1082 * put the device in a known good starting state 1083 */ 1084 1085 e1000_reset_hw(hw); 1086 1087 /* make sure the EEPROM is good */ 1088 if (e1000_validate_eeprom_checksum(hw) < 0) { 1089 e_err(probe, "The EEPROM Checksum Is Not Valid\n"); 1090 e1000_dump_eeprom(adapter); 1091 /* set MAC address to all zeroes to invalidate and temporary 1092 * disable this device for the user. This blocks regular 1093 * traffic while still permitting ethtool ioctls from reaching 1094 * the hardware as well as allowing the user to run the 1095 * interface after manually setting a hw addr using 1096 * `ip set address` 1097 */ 1098 memset(hw->mac_addr, 0, netdev->addr_len); 1099 } else { 1100 /* copy the MAC address out of the EEPROM */ 1101 if (e1000_read_mac_addr(hw)) 1102 e_err(probe, "EEPROM Read Error\n"); 1103 } 1104 /* don't block initialization here due to bad MAC address */ 1105 eth_hw_addr_set(netdev, hw->mac_addr); 1106 1107 if (!is_valid_ether_addr(netdev->dev_addr)) 1108 e_err(probe, "Invalid MAC Address\n"); 1109 1110 1111 INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog); 1112 INIT_DELAYED_WORK(&adapter->fifo_stall_task, 1113 e1000_82547_tx_fifo_stall_task); 1114 INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task); 1115 INIT_WORK(&adapter->reset_task, e1000_reset_task); 1116 1117 e1000_check_options(adapter); 1118 1119 /* Initial Wake on LAN setting 1120 * If APM wake is enabled in the EEPROM, 1121 * enable the ACPI Magic Packet filter 1122 */ 1123 1124 switch (hw->mac_type) { 1125 case e1000_82542_rev2_0: 1126 case e1000_82542_rev2_1: 1127 case e1000_82543: 1128 break; 1129 case e1000_82544: 1130 e1000_read_eeprom(hw, 1131 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data); 1132 eeprom_apme_mask = E1000_EEPROM_82544_APM; 1133 break; 1134 case e1000_82546: 1135 case e1000_82546_rev_3: 1136 if (er32(STATUS) & E1000_STATUS_FUNC_1) { 1137 e1000_read_eeprom(hw, 1138 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 1139 break; 1140 } 1141 fallthrough; 1142 default: 1143 e1000_read_eeprom(hw, 1144 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 1145 break; 1146 } 1147 if (eeprom_data & eeprom_apme_mask) 1148 adapter->eeprom_wol |= E1000_WUFC_MAG; 1149 1150 /* now that we have the eeprom settings, apply the special cases 1151 * where the eeprom may be wrong or the board simply won't support 1152 * wake on lan on a particular port 1153 */ 1154 switch (pdev->device) { 1155 case E1000_DEV_ID_82546GB_PCIE: 1156 adapter->eeprom_wol = 0; 1157 break; 1158 case E1000_DEV_ID_82546EB_FIBER: 1159 case E1000_DEV_ID_82546GB_FIBER: 1160 /* Wake events only supported on port A for dual fiber 1161 * regardless of eeprom setting 1162 */ 1163 if (er32(STATUS) & E1000_STATUS_FUNC_1) 1164 adapter->eeprom_wol = 0; 1165 break; 1166 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: 1167 /* if quad port adapter, disable WoL on all but port A */ 1168 if (global_quad_port_a != 0) 1169 adapter->eeprom_wol = 0; 1170 else 1171 adapter->quad_port_a = true; 1172 /* Reset for multiple quad port adapters */ 1173 if (++global_quad_port_a == 4) 1174 global_quad_port_a = 0; 1175 break; 1176 } 1177 1178 /* initialize the wol settings based on the eeprom settings */ 1179 adapter->wol = adapter->eeprom_wol; 1180 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 1181 1182 /* Auto detect PHY address */ 1183 if (hw->mac_type == e1000_ce4100) { 1184 for (i = 0; i < 32; i++) { 1185 hw->phy_addr = i; 1186 e1000_read_phy_reg(hw, PHY_ID2, &tmp); 1187 1188 if (tmp != 0 && tmp != 0xFF) 1189 break; 1190 } 1191 1192 if (i >= 32) 1193 goto err_eeprom; 1194 } 1195 1196 /* reset the hardware with the new settings */ 1197 e1000_reset(adapter); 1198 1199 strcpy(netdev->name, "eth%d"); 1200 err = register_netdev(netdev); 1201 if (err) 1202 goto err_register; 1203 1204 e1000_vlan_filter_on_off(adapter, false); 1205 1206 /* print bus type/speed/width info */ 1207 e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n", 1208 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""), 1209 ((hw->bus_speed == e1000_bus_speed_133) ? 133 : 1210 (hw->bus_speed == e1000_bus_speed_120) ? 120 : 1211 (hw->bus_speed == e1000_bus_speed_100) ? 100 : 1212 (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33), 1213 ((hw->bus_width == e1000_bus_width_64) ? 64 : 32), 1214 netdev->dev_addr); 1215 1216 /* carrier off reporting is important to ethtool even BEFORE open */ 1217 netif_carrier_off(netdev); 1218 1219 e_info(probe, "Intel(R) PRO/1000 Network Connection\n"); 1220 1221 cards_found++; 1222 return 0; 1223 1224 err_register: 1225 err_eeprom: 1226 e1000_phy_hw_reset(hw); 1227 1228 if (hw->flash_address) 1229 iounmap(hw->flash_address); 1230 kfree(adapter->tx_ring); 1231 kfree(adapter->rx_ring); 1232 err_dma: 1233 err_sw_init: 1234 err_mdio_ioremap: 1235 iounmap(hw->ce4100_gbe_mdio_base_virt); 1236 iounmap(hw->hw_addr); 1237 err_ioremap: 1238 disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags); 1239 free_netdev(netdev); 1240 err_alloc_etherdev: 1241 pci_release_selected_regions(pdev, bars); 1242 err_pci_reg: 1243 if (!adapter || disable_dev) 1244 pci_disable_device(pdev); 1245 return err; 1246 } 1247 1248 /** 1249 * e1000_remove - Device Removal Routine 1250 * @pdev: PCI device information struct 1251 * 1252 * e1000_remove is called by the PCI subsystem to alert the driver 1253 * that it should release a PCI device. That could be caused by a 1254 * Hot-Plug event, or because the driver is going to be removed from 1255 * memory. 1256 **/ 1257 static void e1000_remove(struct pci_dev *pdev) 1258 { 1259 struct net_device *netdev = pci_get_drvdata(pdev); 1260 struct e1000_adapter *adapter = netdev_priv(netdev); 1261 struct e1000_hw *hw = &adapter->hw; 1262 bool disable_dev; 1263 1264 e1000_down_and_stop(adapter); 1265 e1000_release_manageability(adapter); 1266 1267 unregister_netdev(netdev); 1268 1269 e1000_phy_hw_reset(hw); 1270 1271 kfree(adapter->tx_ring); 1272 kfree(adapter->rx_ring); 1273 1274 if (hw->mac_type == e1000_ce4100) 1275 iounmap(hw->ce4100_gbe_mdio_base_virt); 1276 iounmap(hw->hw_addr); 1277 if (hw->flash_address) 1278 iounmap(hw->flash_address); 1279 pci_release_selected_regions(pdev, adapter->bars); 1280 1281 disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags); 1282 free_netdev(netdev); 1283 1284 if (disable_dev) 1285 pci_disable_device(pdev); 1286 } 1287 1288 /** 1289 * e1000_sw_init - Initialize general software structures (struct e1000_adapter) 1290 * @adapter: board private structure to initialize 1291 * 1292 * e1000_sw_init initializes the Adapter private data structure. 1293 * e1000_init_hw_struct MUST be called before this function 1294 **/ 1295 static int e1000_sw_init(struct e1000_adapter *adapter) 1296 { 1297 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1298 1299 adapter->num_tx_queues = 1; 1300 adapter->num_rx_queues = 1; 1301 1302 if (e1000_alloc_queues(adapter)) { 1303 e_err(probe, "Unable to allocate memory for queues\n"); 1304 return -ENOMEM; 1305 } 1306 1307 /* Explicitly disable IRQ since the NIC can be in any state. */ 1308 e1000_irq_disable(adapter); 1309 1310 spin_lock_init(&adapter->stats_lock); 1311 1312 set_bit(__E1000_DOWN, &adapter->flags); 1313 1314 return 0; 1315 } 1316 1317 /** 1318 * e1000_alloc_queues - Allocate memory for all rings 1319 * @adapter: board private structure to initialize 1320 * 1321 * We allocate one ring per queue at run-time since we don't know the 1322 * number of queues at compile-time. 1323 **/ 1324 static int e1000_alloc_queues(struct e1000_adapter *adapter) 1325 { 1326 adapter->tx_ring = kcalloc(adapter->num_tx_queues, 1327 sizeof(struct e1000_tx_ring), GFP_KERNEL); 1328 if (!adapter->tx_ring) 1329 return -ENOMEM; 1330 1331 adapter->rx_ring = kcalloc(adapter->num_rx_queues, 1332 sizeof(struct e1000_rx_ring), GFP_KERNEL); 1333 if (!adapter->rx_ring) { 1334 kfree(adapter->tx_ring); 1335 return -ENOMEM; 1336 } 1337 1338 return E1000_SUCCESS; 1339 } 1340 1341 /** 1342 * e1000_open - Called when a network interface is made active 1343 * @netdev: network interface device structure 1344 * 1345 * Returns 0 on success, negative value on failure 1346 * 1347 * The open entry point is called when a network interface is made 1348 * active by the system (IFF_UP). At this point all resources needed 1349 * for transmit and receive operations are allocated, the interrupt 1350 * handler is registered with the OS, the watchdog task is started, 1351 * and the stack is notified that the interface is ready. 1352 **/ 1353 int e1000_open(struct net_device *netdev) 1354 { 1355 struct e1000_adapter *adapter = netdev_priv(netdev); 1356 struct e1000_hw *hw = &adapter->hw; 1357 int err; 1358 1359 /* disallow open during test */ 1360 if (test_bit(__E1000_TESTING, &adapter->flags)) 1361 return -EBUSY; 1362 1363 netif_carrier_off(netdev); 1364 1365 /* allocate transmit descriptors */ 1366 err = e1000_setup_all_tx_resources(adapter); 1367 if (err) 1368 goto err_setup_tx; 1369 1370 /* allocate receive descriptors */ 1371 err = e1000_setup_all_rx_resources(adapter); 1372 if (err) 1373 goto err_setup_rx; 1374 1375 e1000_power_up_phy(adapter); 1376 1377 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 1378 if ((hw->mng_cookie.status & 1379 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { 1380 e1000_update_mng_vlan(adapter); 1381 } 1382 1383 /* before we allocate an interrupt, we must be ready to handle it. 1384 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt 1385 * as soon as we call pci_request_irq, so we have to setup our 1386 * clean_rx handler before we do so. 1387 */ 1388 e1000_configure(adapter); 1389 1390 err = e1000_request_irq(adapter); 1391 if (err) 1392 goto err_req_irq; 1393 1394 /* From here on the code is the same as e1000_up() */ 1395 clear_bit(__E1000_DOWN, &adapter->flags); 1396 1397 netif_napi_set_irq(&adapter->napi, adapter->pdev->irq); 1398 napi_enable(&adapter->napi); 1399 netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_RX, &adapter->napi); 1400 netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_TX, &adapter->napi); 1401 1402 e1000_irq_enable(adapter); 1403 1404 netif_start_queue(netdev); 1405 1406 /* fire a link status change interrupt to start the watchdog */ 1407 ew32(ICS, E1000_ICS_LSC); 1408 1409 return E1000_SUCCESS; 1410 1411 err_req_irq: 1412 e1000_power_down_phy(adapter); 1413 e1000_free_all_rx_resources(adapter); 1414 err_setup_rx: 1415 e1000_free_all_tx_resources(adapter); 1416 err_setup_tx: 1417 e1000_reset(adapter); 1418 1419 return err; 1420 } 1421 1422 /** 1423 * e1000_close - Disables a network interface 1424 * @netdev: network interface device structure 1425 * 1426 * Returns 0, this is not allowed to fail 1427 * 1428 * The close entry point is called when an interface is de-activated 1429 * by the OS. The hardware is still under the drivers control, but 1430 * needs to be disabled. A global MAC reset is issued to stop the 1431 * hardware, and all transmit and receive resources are freed. 1432 **/ 1433 int e1000_close(struct net_device *netdev) 1434 { 1435 struct e1000_adapter *adapter = netdev_priv(netdev); 1436 struct e1000_hw *hw = &adapter->hw; 1437 int count = E1000_CHECK_RESET_COUNT; 1438 1439 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags) && count--) 1440 usleep_range(10000, 20000); 1441 1442 WARN_ON(count < 0); 1443 1444 /* signal that we're down so that the reset task will no longer run */ 1445 set_bit(__E1000_DOWN, &adapter->flags); 1446 clear_bit(__E1000_RESETTING, &adapter->flags); 1447 1448 e1000_down(adapter); 1449 e1000_power_down_phy(adapter); 1450 e1000_free_irq(adapter); 1451 1452 e1000_free_all_tx_resources(adapter); 1453 e1000_free_all_rx_resources(adapter); 1454 1455 /* kill manageability vlan ID if supported, but not if a vlan with 1456 * the same ID is registered on the host OS (let 8021q kill it) 1457 */ 1458 if ((hw->mng_cookie.status & 1459 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 1460 !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) { 1461 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), 1462 adapter->mng_vlan_id); 1463 } 1464 1465 return 0; 1466 } 1467 1468 /** 1469 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary 1470 * @adapter: address of board private structure 1471 * @start: address of beginning of memory 1472 * @len: length of memory 1473 **/ 1474 static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start, 1475 unsigned long len) 1476 { 1477 struct e1000_hw *hw = &adapter->hw; 1478 unsigned long begin = (unsigned long)start; 1479 unsigned long end = begin + len; 1480 1481 /* First rev 82545 and 82546 need to not allow any memory 1482 * write location to cross 64k boundary due to errata 23 1483 */ 1484 if (hw->mac_type == e1000_82545 || 1485 hw->mac_type == e1000_ce4100 || 1486 hw->mac_type == e1000_82546) { 1487 return ((begin ^ (end - 1)) >> 16) == 0; 1488 } 1489 1490 return true; 1491 } 1492 1493 /** 1494 * e1000_setup_tx_resources - allocate Tx resources (Descriptors) 1495 * @adapter: board private structure 1496 * @txdr: tx descriptor ring (for a specific queue) to setup 1497 * 1498 * Return 0 on success, negative on failure 1499 **/ 1500 static int e1000_setup_tx_resources(struct e1000_adapter *adapter, 1501 struct e1000_tx_ring *txdr) 1502 { 1503 struct pci_dev *pdev = adapter->pdev; 1504 int size; 1505 1506 size = sizeof(struct e1000_tx_buffer) * txdr->count; 1507 txdr->buffer_info = vzalloc(size); 1508 if (!txdr->buffer_info) 1509 return -ENOMEM; 1510 1511 /* round up to nearest 4K */ 1512 1513 txdr->size = txdr->count * sizeof(struct e1000_tx_desc); 1514 txdr->size = ALIGN(txdr->size, 4096); 1515 1516 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, 1517 GFP_KERNEL); 1518 if (!txdr->desc) { 1519 setup_tx_desc_die: 1520 vfree(txdr->buffer_info); 1521 return -ENOMEM; 1522 } 1523 1524 /* Fix for errata 23, can't cross 64kB boundary */ 1525 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { 1526 void *olddesc = txdr->desc; 1527 dma_addr_t olddma = txdr->dma; 1528 e_err(tx_err, "txdr align check failed: %u bytes at %p\n", 1529 txdr->size, txdr->desc); 1530 /* Try again, without freeing the previous */ 1531 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, 1532 &txdr->dma, GFP_KERNEL); 1533 /* Failed allocation, critical failure */ 1534 if (!txdr->desc) { 1535 dma_free_coherent(&pdev->dev, txdr->size, olddesc, 1536 olddma); 1537 goto setup_tx_desc_die; 1538 } 1539 1540 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { 1541 /* give up */ 1542 dma_free_coherent(&pdev->dev, txdr->size, txdr->desc, 1543 txdr->dma); 1544 dma_free_coherent(&pdev->dev, txdr->size, olddesc, 1545 olddma); 1546 e_err(probe, "Unable to allocate aligned memory " 1547 "for the transmit descriptor ring\n"); 1548 vfree(txdr->buffer_info); 1549 return -ENOMEM; 1550 } else { 1551 /* Free old allocation, new allocation was successful */ 1552 dma_free_coherent(&pdev->dev, txdr->size, olddesc, 1553 olddma); 1554 } 1555 } 1556 memset(txdr->desc, 0, txdr->size); 1557 1558 txdr->next_to_use = 0; 1559 txdr->next_to_clean = 0; 1560 1561 return 0; 1562 } 1563 1564 /** 1565 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources 1566 * (Descriptors) for all queues 1567 * @adapter: board private structure 1568 * 1569 * Return 0 on success, negative on failure 1570 **/ 1571 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter) 1572 { 1573 int i, err = 0; 1574 1575 for (i = 0; i < adapter->num_tx_queues; i++) { 1576 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); 1577 if (err) { 1578 e_err(probe, "Allocation for Tx Queue %u failed\n", i); 1579 for (i-- ; i >= 0; i--) 1580 e1000_free_tx_resources(adapter, 1581 &adapter->tx_ring[i]); 1582 break; 1583 } 1584 } 1585 1586 return err; 1587 } 1588 1589 /** 1590 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset 1591 * @adapter: board private structure 1592 * 1593 * Configure the Tx unit of the MAC after a reset. 1594 **/ 1595 static void e1000_configure_tx(struct e1000_adapter *adapter) 1596 { 1597 u64 tdba; 1598 struct e1000_hw *hw = &adapter->hw; 1599 u32 tdlen, tctl, tipg; 1600 u32 ipgr1, ipgr2; 1601 1602 /* Setup the HW Tx Head and Tail descriptor pointers */ 1603 1604 switch (adapter->num_tx_queues) { 1605 case 1: 1606 default: 1607 tdba = adapter->tx_ring[0].dma; 1608 tdlen = adapter->tx_ring[0].count * 1609 sizeof(struct e1000_tx_desc); 1610 ew32(TDLEN, tdlen); 1611 ew32(TDBAH, (tdba >> 32)); 1612 ew32(TDBAL, (tdba & 0x00000000ffffffffULL)); 1613 ew32(TDT, 0); 1614 ew32(TDH, 0); 1615 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? 1616 E1000_TDH : E1000_82542_TDH); 1617 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? 1618 E1000_TDT : E1000_82542_TDT); 1619 break; 1620 } 1621 1622 /* Set the default values for the Tx Inter Packet Gap timer */ 1623 if ((hw->media_type == e1000_media_type_fiber || 1624 hw->media_type == e1000_media_type_internal_serdes)) 1625 tipg = DEFAULT_82543_TIPG_IPGT_FIBER; 1626 else 1627 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; 1628 1629 switch (hw->mac_type) { 1630 case e1000_82542_rev2_0: 1631 case e1000_82542_rev2_1: 1632 tipg = DEFAULT_82542_TIPG_IPGT; 1633 ipgr1 = DEFAULT_82542_TIPG_IPGR1; 1634 ipgr2 = DEFAULT_82542_TIPG_IPGR2; 1635 break; 1636 default: 1637 ipgr1 = DEFAULT_82543_TIPG_IPGR1; 1638 ipgr2 = DEFAULT_82543_TIPG_IPGR2; 1639 break; 1640 } 1641 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; 1642 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; 1643 ew32(TIPG, tipg); 1644 1645 /* Set the Tx Interrupt Delay register */ 1646 1647 ew32(TIDV, adapter->tx_int_delay); 1648 if (hw->mac_type >= e1000_82540) 1649 ew32(TADV, adapter->tx_abs_int_delay); 1650 1651 /* Program the Transmit Control Register */ 1652 1653 tctl = er32(TCTL); 1654 tctl &= ~E1000_TCTL_CT; 1655 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | 1656 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 1657 1658 e1000_config_collision_dist(hw); 1659 1660 /* Setup Transmit Descriptor Settings for eop descriptor */ 1661 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; 1662 1663 /* only set IDE if we are delaying interrupts using the timers */ 1664 if (adapter->tx_int_delay) 1665 adapter->txd_cmd |= E1000_TXD_CMD_IDE; 1666 1667 if (hw->mac_type < e1000_82543) 1668 adapter->txd_cmd |= E1000_TXD_CMD_RPS; 1669 else 1670 adapter->txd_cmd |= E1000_TXD_CMD_RS; 1671 1672 /* Cache if we're 82544 running in PCI-X because we'll 1673 * need this to apply a workaround later in the send path. 1674 */ 1675 if (hw->mac_type == e1000_82544 && 1676 hw->bus_type == e1000_bus_type_pcix) 1677 adapter->pcix_82544 = true; 1678 1679 ew32(TCTL, tctl); 1680 1681 } 1682 1683 /** 1684 * e1000_setup_rx_resources - allocate Rx resources (Descriptors) 1685 * @adapter: board private structure 1686 * @rxdr: rx descriptor ring (for a specific queue) to setup 1687 * 1688 * Returns 0 on success, negative on failure 1689 **/ 1690 static int e1000_setup_rx_resources(struct e1000_adapter *adapter, 1691 struct e1000_rx_ring *rxdr) 1692 { 1693 struct pci_dev *pdev = adapter->pdev; 1694 int size, desc_len; 1695 1696 size = sizeof(struct e1000_rx_buffer) * rxdr->count; 1697 rxdr->buffer_info = vzalloc(size); 1698 if (!rxdr->buffer_info) 1699 return -ENOMEM; 1700 1701 desc_len = sizeof(struct e1000_rx_desc); 1702 1703 /* Round up to nearest 4K */ 1704 1705 rxdr->size = rxdr->count * desc_len; 1706 rxdr->size = ALIGN(rxdr->size, 4096); 1707 1708 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, 1709 GFP_KERNEL); 1710 if (!rxdr->desc) { 1711 setup_rx_desc_die: 1712 vfree(rxdr->buffer_info); 1713 return -ENOMEM; 1714 } 1715 1716 /* Fix for errata 23, can't cross 64kB boundary */ 1717 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { 1718 void *olddesc = rxdr->desc; 1719 dma_addr_t olddma = rxdr->dma; 1720 e_err(rx_err, "rxdr align check failed: %u bytes at %p\n", 1721 rxdr->size, rxdr->desc); 1722 /* Try again, without freeing the previous */ 1723 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, 1724 &rxdr->dma, GFP_KERNEL); 1725 /* Failed allocation, critical failure */ 1726 if (!rxdr->desc) { 1727 dma_free_coherent(&pdev->dev, rxdr->size, olddesc, 1728 olddma); 1729 goto setup_rx_desc_die; 1730 } 1731 1732 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { 1733 /* give up */ 1734 dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc, 1735 rxdr->dma); 1736 dma_free_coherent(&pdev->dev, rxdr->size, olddesc, 1737 olddma); 1738 e_err(probe, "Unable to allocate aligned memory for " 1739 "the Rx descriptor ring\n"); 1740 goto setup_rx_desc_die; 1741 } else { 1742 /* Free old allocation, new allocation was successful */ 1743 dma_free_coherent(&pdev->dev, rxdr->size, olddesc, 1744 olddma); 1745 } 1746 } 1747 memset(rxdr->desc, 0, rxdr->size); 1748 1749 rxdr->next_to_clean = 0; 1750 rxdr->next_to_use = 0; 1751 rxdr->rx_skb_top = NULL; 1752 1753 return 0; 1754 } 1755 1756 /** 1757 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources 1758 * (Descriptors) for all queues 1759 * @adapter: board private structure 1760 * 1761 * Return 0 on success, negative on failure 1762 **/ 1763 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter) 1764 { 1765 int i, err = 0; 1766 1767 for (i = 0; i < adapter->num_rx_queues; i++) { 1768 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); 1769 if (err) { 1770 e_err(probe, "Allocation for Rx Queue %u failed\n", i); 1771 for (i-- ; i >= 0; i--) 1772 e1000_free_rx_resources(adapter, 1773 &adapter->rx_ring[i]); 1774 break; 1775 } 1776 } 1777 1778 return err; 1779 } 1780 1781 /** 1782 * e1000_setup_rctl - configure the receive control registers 1783 * @adapter: Board private structure 1784 **/ 1785 static void e1000_setup_rctl(struct e1000_adapter *adapter) 1786 { 1787 struct e1000_hw *hw = &adapter->hw; 1788 u32 rctl; 1789 1790 rctl = er32(RCTL); 1791 1792 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 1793 1794 rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO | 1795 E1000_RCTL_RDMTS_HALF | 1796 (hw->mc_filter_type << E1000_RCTL_MO_SHIFT); 1797 1798 if (hw->tbi_compatibility_on == 1) 1799 rctl |= E1000_RCTL_SBP; 1800 else 1801 rctl &= ~E1000_RCTL_SBP; 1802 1803 if (adapter->netdev->mtu <= ETH_DATA_LEN) 1804 rctl &= ~E1000_RCTL_LPE; 1805 else 1806 rctl |= E1000_RCTL_LPE; 1807 1808 /* Setup buffer sizes */ 1809 rctl &= ~E1000_RCTL_SZ_4096; 1810 rctl |= E1000_RCTL_BSEX; 1811 switch (adapter->rx_buffer_len) { 1812 case E1000_RXBUFFER_2048: 1813 default: 1814 rctl |= E1000_RCTL_SZ_2048; 1815 rctl &= ~E1000_RCTL_BSEX; 1816 break; 1817 case E1000_RXBUFFER_4096: 1818 rctl |= E1000_RCTL_SZ_4096; 1819 break; 1820 case E1000_RXBUFFER_8192: 1821 rctl |= E1000_RCTL_SZ_8192; 1822 break; 1823 case E1000_RXBUFFER_16384: 1824 rctl |= E1000_RCTL_SZ_16384; 1825 break; 1826 } 1827 1828 /* This is useful for sniffing bad packets. */ 1829 if (adapter->netdev->features & NETIF_F_RXALL) { 1830 /* UPE and MPE will be handled by normal PROMISC logic 1831 * in e1000e_set_rx_mode 1832 */ 1833 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ 1834 E1000_RCTL_BAM | /* RX All Bcast Pkts */ 1835 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ 1836 1837 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ 1838 E1000_RCTL_DPF | /* Allow filtered pause */ 1839 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ 1840 /* Do not mess with E1000_CTRL_VME, it affects transmit as well, 1841 * and that breaks VLANs. 1842 */ 1843 } 1844 1845 ew32(RCTL, rctl); 1846 } 1847 1848 /** 1849 * e1000_configure_rx - Configure 8254x Receive Unit after Reset 1850 * @adapter: board private structure 1851 * 1852 * Configure the Rx unit of the MAC after a reset. 1853 **/ 1854 static void e1000_configure_rx(struct e1000_adapter *adapter) 1855 { 1856 u64 rdba; 1857 struct e1000_hw *hw = &adapter->hw; 1858 u32 rdlen, rctl, rxcsum; 1859 1860 if (adapter->netdev->mtu > ETH_DATA_LEN) { 1861 rdlen = adapter->rx_ring[0].count * 1862 sizeof(struct e1000_rx_desc); 1863 adapter->clean_rx = e1000_clean_jumbo_rx_irq; 1864 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; 1865 } else { 1866 rdlen = adapter->rx_ring[0].count * 1867 sizeof(struct e1000_rx_desc); 1868 adapter->clean_rx = e1000_clean_rx_irq; 1869 adapter->alloc_rx_buf = e1000_alloc_rx_buffers; 1870 } 1871 1872 /* disable receives while setting up the descriptors */ 1873 rctl = er32(RCTL); 1874 ew32(RCTL, rctl & ~E1000_RCTL_EN); 1875 1876 /* set the Receive Delay Timer Register */ 1877 ew32(RDTR, adapter->rx_int_delay); 1878 1879 if (hw->mac_type >= e1000_82540) { 1880 ew32(RADV, adapter->rx_abs_int_delay); 1881 if (adapter->itr_setting != 0) 1882 ew32(ITR, 1000000000 / (adapter->itr * 256)); 1883 } 1884 1885 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1886 * the Base and Length of the Rx Descriptor Ring 1887 */ 1888 switch (adapter->num_rx_queues) { 1889 case 1: 1890 default: 1891 rdba = adapter->rx_ring[0].dma; 1892 ew32(RDLEN, rdlen); 1893 ew32(RDBAH, (rdba >> 32)); 1894 ew32(RDBAL, (rdba & 0x00000000ffffffffULL)); 1895 ew32(RDT, 0); 1896 ew32(RDH, 0); 1897 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? 1898 E1000_RDH : E1000_82542_RDH); 1899 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? 1900 E1000_RDT : E1000_82542_RDT); 1901 break; 1902 } 1903 1904 /* Enable 82543 Receive Checksum Offload for TCP and UDP */ 1905 if (hw->mac_type >= e1000_82543) { 1906 rxcsum = er32(RXCSUM); 1907 if (adapter->rx_csum) 1908 rxcsum |= E1000_RXCSUM_TUOFL; 1909 else 1910 /* don't need to clear IPPCSE as it defaults to 0 */ 1911 rxcsum &= ~E1000_RXCSUM_TUOFL; 1912 ew32(RXCSUM, rxcsum); 1913 } 1914 1915 /* Enable Receives */ 1916 ew32(RCTL, rctl | E1000_RCTL_EN); 1917 } 1918 1919 /** 1920 * e1000_free_tx_resources - Free Tx Resources per Queue 1921 * @adapter: board private structure 1922 * @tx_ring: Tx descriptor ring for a specific queue 1923 * 1924 * Free all transmit software resources 1925 **/ 1926 static void e1000_free_tx_resources(struct e1000_adapter *adapter, 1927 struct e1000_tx_ring *tx_ring) 1928 { 1929 struct pci_dev *pdev = adapter->pdev; 1930 1931 e1000_clean_tx_ring(adapter, tx_ring); 1932 1933 vfree(tx_ring->buffer_info); 1934 tx_ring->buffer_info = NULL; 1935 1936 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, 1937 tx_ring->dma); 1938 1939 tx_ring->desc = NULL; 1940 } 1941 1942 /** 1943 * e1000_free_all_tx_resources - Free Tx Resources for All Queues 1944 * @adapter: board private structure 1945 * 1946 * Free all transmit software resources 1947 **/ 1948 void e1000_free_all_tx_resources(struct e1000_adapter *adapter) 1949 { 1950 int i; 1951 1952 for (i = 0; i < adapter->num_tx_queues; i++) 1953 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]); 1954 } 1955 1956 static void 1957 e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, 1958 struct e1000_tx_buffer *buffer_info, 1959 int budget) 1960 { 1961 if (buffer_info->dma) { 1962 if (buffer_info->mapped_as_page) 1963 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, 1964 buffer_info->length, DMA_TO_DEVICE); 1965 else 1966 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, 1967 buffer_info->length, 1968 DMA_TO_DEVICE); 1969 buffer_info->dma = 0; 1970 } 1971 if (buffer_info->skb) { 1972 napi_consume_skb(buffer_info->skb, budget); 1973 buffer_info->skb = NULL; 1974 } 1975 buffer_info->time_stamp = 0; 1976 /* buffer_info must be completely set up in the transmit path */ 1977 } 1978 1979 /** 1980 * e1000_clean_tx_ring - Free Tx Buffers 1981 * @adapter: board private structure 1982 * @tx_ring: ring to be cleaned 1983 **/ 1984 static void e1000_clean_tx_ring(struct e1000_adapter *adapter, 1985 struct e1000_tx_ring *tx_ring) 1986 { 1987 struct e1000_hw *hw = &adapter->hw; 1988 struct e1000_tx_buffer *buffer_info; 1989 unsigned long size; 1990 unsigned int i; 1991 1992 /* Free all the Tx ring sk_buffs */ 1993 1994 for (i = 0; i < tx_ring->count; i++) { 1995 buffer_info = &tx_ring->buffer_info[i]; 1996 e1000_unmap_and_free_tx_resource(adapter, buffer_info, 0); 1997 } 1998 1999 netdev_reset_queue(adapter->netdev); 2000 size = sizeof(struct e1000_tx_buffer) * tx_ring->count; 2001 memset(tx_ring->buffer_info, 0, size); 2002 2003 /* Zero out the descriptor ring */ 2004 2005 memset(tx_ring->desc, 0, tx_ring->size); 2006 2007 tx_ring->next_to_use = 0; 2008 tx_ring->next_to_clean = 0; 2009 tx_ring->last_tx_tso = false; 2010 2011 writel(0, hw->hw_addr + tx_ring->tdh); 2012 writel(0, hw->hw_addr + tx_ring->tdt); 2013 } 2014 2015 /** 2016 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues 2017 * @adapter: board private structure 2018 **/ 2019 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter) 2020 { 2021 int i; 2022 2023 for (i = 0; i < adapter->num_tx_queues; i++) 2024 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]); 2025 } 2026 2027 /** 2028 * e1000_free_rx_resources - Free Rx Resources 2029 * @adapter: board private structure 2030 * @rx_ring: ring to clean the resources from 2031 * 2032 * Free all receive software resources 2033 **/ 2034 static void e1000_free_rx_resources(struct e1000_adapter *adapter, 2035 struct e1000_rx_ring *rx_ring) 2036 { 2037 struct pci_dev *pdev = adapter->pdev; 2038 2039 e1000_clean_rx_ring(adapter, rx_ring); 2040 2041 vfree(rx_ring->buffer_info); 2042 rx_ring->buffer_info = NULL; 2043 2044 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, 2045 rx_ring->dma); 2046 2047 rx_ring->desc = NULL; 2048 } 2049 2050 /** 2051 * e1000_free_all_rx_resources - Free Rx Resources for All Queues 2052 * @adapter: board private structure 2053 * 2054 * Free all receive software resources 2055 **/ 2056 void e1000_free_all_rx_resources(struct e1000_adapter *adapter) 2057 { 2058 int i; 2059 2060 for (i = 0; i < adapter->num_rx_queues; i++) 2061 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]); 2062 } 2063 2064 #define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN) 2065 static unsigned int e1000_frag_len(const struct e1000_adapter *a) 2066 { 2067 return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) + 2068 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 2069 } 2070 2071 static void *e1000_alloc_frag(const struct e1000_adapter *a) 2072 { 2073 unsigned int len = e1000_frag_len(a); 2074 u8 *data = netdev_alloc_frag(len); 2075 2076 if (likely(data)) 2077 data += E1000_HEADROOM; 2078 return data; 2079 } 2080 2081 /** 2082 * e1000_clean_rx_ring - Free Rx Buffers per Queue 2083 * @adapter: board private structure 2084 * @rx_ring: ring to free buffers from 2085 **/ 2086 static void e1000_clean_rx_ring(struct e1000_adapter *adapter, 2087 struct e1000_rx_ring *rx_ring) 2088 { 2089 struct e1000_hw *hw = &adapter->hw; 2090 struct e1000_rx_buffer *buffer_info; 2091 struct pci_dev *pdev = adapter->pdev; 2092 unsigned long size; 2093 unsigned int i; 2094 2095 /* Free all the Rx netfrags */ 2096 for (i = 0; i < rx_ring->count; i++) { 2097 buffer_info = &rx_ring->buffer_info[i]; 2098 if (adapter->clean_rx == e1000_clean_rx_irq) { 2099 if (buffer_info->dma) 2100 dma_unmap_single(&pdev->dev, buffer_info->dma, 2101 adapter->rx_buffer_len, 2102 DMA_FROM_DEVICE); 2103 if (buffer_info->rxbuf.data) { 2104 skb_free_frag(buffer_info->rxbuf.data); 2105 buffer_info->rxbuf.data = NULL; 2106 } 2107 } else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) { 2108 if (buffer_info->dma) 2109 dma_unmap_page(&pdev->dev, buffer_info->dma, 2110 adapter->rx_buffer_len, 2111 DMA_FROM_DEVICE); 2112 if (buffer_info->rxbuf.page) { 2113 put_page(buffer_info->rxbuf.page); 2114 buffer_info->rxbuf.page = NULL; 2115 } 2116 } 2117 2118 buffer_info->dma = 0; 2119 } 2120 2121 /* there also may be some cached data from a chained receive */ 2122 napi_free_frags(&adapter->napi); 2123 rx_ring->rx_skb_top = NULL; 2124 2125 size = sizeof(struct e1000_rx_buffer) * rx_ring->count; 2126 memset(rx_ring->buffer_info, 0, size); 2127 2128 /* Zero out the descriptor ring */ 2129 memset(rx_ring->desc, 0, rx_ring->size); 2130 2131 rx_ring->next_to_clean = 0; 2132 rx_ring->next_to_use = 0; 2133 2134 writel(0, hw->hw_addr + rx_ring->rdh); 2135 writel(0, hw->hw_addr + rx_ring->rdt); 2136 } 2137 2138 /** 2139 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues 2140 * @adapter: board private structure 2141 **/ 2142 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter) 2143 { 2144 int i; 2145 2146 for (i = 0; i < adapter->num_rx_queues; i++) 2147 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]); 2148 } 2149 2150 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset 2151 * and memory write and invalidate disabled for certain operations 2152 */ 2153 static void e1000_enter_82542_rst(struct e1000_adapter *adapter) 2154 { 2155 struct e1000_hw *hw = &adapter->hw; 2156 struct net_device *netdev = adapter->netdev; 2157 u32 rctl; 2158 2159 e1000_pci_clear_mwi(hw); 2160 2161 rctl = er32(RCTL); 2162 rctl |= E1000_RCTL_RST; 2163 ew32(RCTL, rctl); 2164 E1000_WRITE_FLUSH(); 2165 mdelay(5); 2166 2167 if (netif_running(netdev)) 2168 e1000_clean_all_rx_rings(adapter); 2169 } 2170 2171 static void e1000_leave_82542_rst(struct e1000_adapter *adapter) 2172 { 2173 struct e1000_hw *hw = &adapter->hw; 2174 struct net_device *netdev = adapter->netdev; 2175 u32 rctl; 2176 2177 rctl = er32(RCTL); 2178 rctl &= ~E1000_RCTL_RST; 2179 ew32(RCTL, rctl); 2180 E1000_WRITE_FLUSH(); 2181 mdelay(5); 2182 2183 if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) 2184 e1000_pci_set_mwi(hw); 2185 2186 if (netif_running(netdev)) { 2187 /* No need to loop, because 82542 supports only 1 queue */ 2188 struct e1000_rx_ring *ring = &adapter->rx_ring[0]; 2189 e1000_configure_rx(adapter); 2190 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring)); 2191 } 2192 } 2193 2194 /** 2195 * e1000_set_mac - Change the Ethernet Address of the NIC 2196 * @netdev: network interface device structure 2197 * @p: pointer to an address structure 2198 * 2199 * Returns 0 on success, negative on failure 2200 **/ 2201 static int e1000_set_mac(struct net_device *netdev, void *p) 2202 { 2203 struct e1000_adapter *adapter = netdev_priv(netdev); 2204 struct e1000_hw *hw = &adapter->hw; 2205 struct sockaddr *addr = p; 2206 2207 if (!is_valid_ether_addr(addr->sa_data)) 2208 return -EADDRNOTAVAIL; 2209 2210 /* 82542 2.0 needs to be in reset to write receive address registers */ 2211 2212 if (hw->mac_type == e1000_82542_rev2_0) 2213 e1000_enter_82542_rst(adapter); 2214 2215 eth_hw_addr_set(netdev, addr->sa_data); 2216 memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len); 2217 2218 e1000_rar_set(hw, hw->mac_addr, 0); 2219 2220 if (hw->mac_type == e1000_82542_rev2_0) 2221 e1000_leave_82542_rst(adapter); 2222 2223 return 0; 2224 } 2225 2226 /** 2227 * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set 2228 * @netdev: network interface device structure 2229 * 2230 * The set_rx_mode entry point is called whenever the unicast or multicast 2231 * address lists or the network interface flags are updated. This routine is 2232 * responsible for configuring the hardware for proper unicast, multicast, 2233 * promiscuous mode, and all-multi behavior. 2234 **/ 2235 static void e1000_set_rx_mode(struct net_device *netdev) 2236 { 2237 struct e1000_adapter *adapter = netdev_priv(netdev); 2238 struct e1000_hw *hw = &adapter->hw; 2239 struct netdev_hw_addr *ha; 2240 bool use_uc = false; 2241 u32 rctl; 2242 u32 hash_value; 2243 int i, rar_entries = E1000_RAR_ENTRIES; 2244 int mta_reg_count = E1000_NUM_MTA_REGISTERS; 2245 u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC); 2246 2247 if (!mcarray) 2248 return; 2249 2250 /* Check for Promiscuous and All Multicast modes */ 2251 2252 rctl = er32(RCTL); 2253 2254 if (netdev->flags & IFF_PROMISC) { 2255 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 2256 rctl &= ~E1000_RCTL_VFE; 2257 } else { 2258 if (netdev->flags & IFF_ALLMULTI) 2259 rctl |= E1000_RCTL_MPE; 2260 else 2261 rctl &= ~E1000_RCTL_MPE; 2262 /* Enable VLAN filter if there is a VLAN */ 2263 if (e1000_vlan_used(adapter)) 2264 rctl |= E1000_RCTL_VFE; 2265 } 2266 2267 if (netdev_uc_count(netdev) > rar_entries - 1) { 2268 rctl |= E1000_RCTL_UPE; 2269 } else if (!(netdev->flags & IFF_PROMISC)) { 2270 rctl &= ~E1000_RCTL_UPE; 2271 use_uc = true; 2272 } 2273 2274 ew32(RCTL, rctl); 2275 2276 /* 82542 2.0 needs to be in reset to write receive address registers */ 2277 2278 if (hw->mac_type == e1000_82542_rev2_0) 2279 e1000_enter_82542_rst(adapter); 2280 2281 /* load the first 14 addresses into the exact filters 1-14. Unicast 2282 * addresses take precedence to avoid disabling unicast filtering 2283 * when possible. 2284 * 2285 * RAR 0 is used for the station MAC address 2286 * if there are not 14 addresses, go ahead and clear the filters 2287 */ 2288 i = 1; 2289 if (use_uc) 2290 netdev_for_each_uc_addr(ha, netdev) { 2291 if (i == rar_entries) 2292 break; 2293 e1000_rar_set(hw, ha->addr, i++); 2294 } 2295 2296 netdev_for_each_mc_addr(ha, netdev) { 2297 if (i == rar_entries) { 2298 /* load any remaining addresses into the hash table */ 2299 u32 hash_reg, hash_bit, mta; 2300 hash_value = e1000_hash_mc_addr(hw, ha->addr); 2301 hash_reg = (hash_value >> 5) & 0x7F; 2302 hash_bit = hash_value & 0x1F; 2303 mta = (1 << hash_bit); 2304 mcarray[hash_reg] |= mta; 2305 } else { 2306 e1000_rar_set(hw, ha->addr, i++); 2307 } 2308 } 2309 2310 for (; i < rar_entries; i++) { 2311 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0); 2312 E1000_WRITE_FLUSH(); 2313 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0); 2314 E1000_WRITE_FLUSH(); 2315 } 2316 2317 /* write the hash table completely, write from bottom to avoid 2318 * both stupid write combining chipsets, and flushing each write 2319 */ 2320 for (i = mta_reg_count - 1; i >= 0 ; i--) { 2321 /* If we are on an 82544 has an errata where writing odd 2322 * offsets overwrites the previous even offset, but writing 2323 * backwards over the range solves the issue by always 2324 * writing the odd offset first 2325 */ 2326 E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]); 2327 } 2328 E1000_WRITE_FLUSH(); 2329 2330 if (hw->mac_type == e1000_82542_rev2_0) 2331 e1000_leave_82542_rst(adapter); 2332 2333 kfree(mcarray); 2334 } 2335 2336 /** 2337 * e1000_update_phy_info_task - get phy info 2338 * @work: work struct contained inside adapter struct 2339 * 2340 * Need to wait a few seconds after link up to get diagnostic information from 2341 * the phy 2342 */ 2343 static void e1000_update_phy_info_task(struct work_struct *work) 2344 { 2345 struct e1000_adapter *adapter = container_of(work, 2346 struct e1000_adapter, 2347 phy_info_task.work); 2348 2349 e1000_phy_get_info(&adapter->hw, &adapter->phy_info); 2350 } 2351 2352 /** 2353 * e1000_82547_tx_fifo_stall_task - task to complete work 2354 * @work: work struct contained inside adapter struct 2355 **/ 2356 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work) 2357 { 2358 struct e1000_adapter *adapter = container_of(work, 2359 struct e1000_adapter, 2360 fifo_stall_task.work); 2361 struct e1000_hw *hw = &adapter->hw; 2362 struct net_device *netdev = adapter->netdev; 2363 u32 tctl; 2364 2365 if (atomic_read(&adapter->tx_fifo_stall)) { 2366 if ((er32(TDT) == er32(TDH)) && 2367 (er32(TDFT) == er32(TDFH)) && 2368 (er32(TDFTS) == er32(TDFHS))) { 2369 tctl = er32(TCTL); 2370 ew32(TCTL, tctl & ~E1000_TCTL_EN); 2371 ew32(TDFT, adapter->tx_head_addr); 2372 ew32(TDFH, adapter->tx_head_addr); 2373 ew32(TDFTS, adapter->tx_head_addr); 2374 ew32(TDFHS, adapter->tx_head_addr); 2375 ew32(TCTL, tctl); 2376 E1000_WRITE_FLUSH(); 2377 2378 adapter->tx_fifo_head = 0; 2379 atomic_set(&adapter->tx_fifo_stall, 0); 2380 netif_wake_queue(netdev); 2381 } else if (!test_bit(__E1000_DOWN, &adapter->flags)) { 2382 schedule_delayed_work(&adapter->fifo_stall_task, 1); 2383 } 2384 } 2385 } 2386 2387 bool e1000_has_link(struct e1000_adapter *adapter) 2388 { 2389 struct e1000_hw *hw = &adapter->hw; 2390 bool link_active = false; 2391 2392 /* get_link_status is set on LSC (link status) interrupt or rx 2393 * sequence error interrupt (except on intel ce4100). 2394 * get_link_status will stay false until the 2395 * e1000_check_for_link establishes link for copper adapters 2396 * ONLY 2397 */ 2398 switch (hw->media_type) { 2399 case e1000_media_type_copper: 2400 if (hw->mac_type == e1000_ce4100) 2401 hw->get_link_status = 1; 2402 if (hw->get_link_status) { 2403 e1000_check_for_link(hw); 2404 link_active = !hw->get_link_status; 2405 } else { 2406 link_active = true; 2407 } 2408 break; 2409 case e1000_media_type_fiber: 2410 e1000_check_for_link(hw); 2411 link_active = !!(er32(STATUS) & E1000_STATUS_LU); 2412 break; 2413 case e1000_media_type_internal_serdes: 2414 e1000_check_for_link(hw); 2415 link_active = hw->serdes_has_link; 2416 break; 2417 default: 2418 break; 2419 } 2420 2421 return link_active; 2422 } 2423 2424 /** 2425 * e1000_watchdog - work function 2426 * @work: work struct contained inside adapter struct 2427 **/ 2428 static void e1000_watchdog(struct work_struct *work) 2429 { 2430 struct e1000_adapter *adapter = container_of(work, 2431 struct e1000_adapter, 2432 watchdog_task.work); 2433 struct e1000_hw *hw = &adapter->hw; 2434 struct net_device *netdev = adapter->netdev; 2435 struct e1000_tx_ring *txdr = adapter->tx_ring; 2436 u32 link, tctl; 2437 2438 link = e1000_has_link(adapter); 2439 if ((netif_carrier_ok(netdev)) && link) 2440 goto link_up; 2441 2442 if (link) { 2443 if (!netif_carrier_ok(netdev)) { 2444 u32 ctrl; 2445 /* update snapshot of PHY registers on LSC */ 2446 e1000_get_speed_and_duplex(hw, 2447 &adapter->link_speed, 2448 &adapter->link_duplex); 2449 2450 ctrl = er32(CTRL); 2451 pr_info("%s NIC Link is Up %d Mbps %s, " 2452 "Flow Control: %s\n", 2453 netdev->name, 2454 adapter->link_speed, 2455 adapter->link_duplex == FULL_DUPLEX ? 2456 "Full Duplex" : "Half Duplex", 2457 ((ctrl & E1000_CTRL_TFCE) && (ctrl & 2458 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl & 2459 E1000_CTRL_RFCE) ? "RX" : ((ctrl & 2460 E1000_CTRL_TFCE) ? "TX" : "None"))); 2461 2462 /* adjust timeout factor according to speed/duplex */ 2463 adapter->tx_timeout_factor = 1; 2464 switch (adapter->link_speed) { 2465 case SPEED_10: 2466 adapter->tx_timeout_factor = 16; 2467 break; 2468 case SPEED_100: 2469 /* maybe add some timeout factor ? */ 2470 break; 2471 } 2472 2473 /* enable transmits in the hardware */ 2474 tctl = er32(TCTL); 2475 tctl |= E1000_TCTL_EN; 2476 ew32(TCTL, tctl); 2477 2478 netif_carrier_on(netdev); 2479 if (!test_bit(__E1000_DOWN, &adapter->flags)) 2480 schedule_delayed_work(&adapter->phy_info_task, 2481 2 * HZ); 2482 adapter->smartspeed = 0; 2483 } 2484 } else { 2485 if (netif_carrier_ok(netdev)) { 2486 adapter->link_speed = 0; 2487 adapter->link_duplex = 0; 2488 pr_info("%s NIC Link is Down\n", 2489 netdev->name); 2490 netif_carrier_off(netdev); 2491 2492 if (!test_bit(__E1000_DOWN, &adapter->flags)) 2493 schedule_delayed_work(&adapter->phy_info_task, 2494 2 * HZ); 2495 } 2496 2497 e1000_smartspeed(adapter); 2498 } 2499 2500 link_up: 2501 e1000_update_stats(adapter); 2502 2503 hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; 2504 adapter->tpt_old = adapter->stats.tpt; 2505 hw->collision_delta = adapter->stats.colc - adapter->colc_old; 2506 adapter->colc_old = adapter->stats.colc; 2507 2508 adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old; 2509 adapter->gorcl_old = adapter->stats.gorcl; 2510 adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old; 2511 adapter->gotcl_old = adapter->stats.gotcl; 2512 2513 e1000_update_adaptive(hw); 2514 2515 if (!netif_carrier_ok(netdev)) { 2516 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { 2517 /* We've lost link, so the controller stops DMA, 2518 * but we've got queued Tx work that's never going 2519 * to get done, so reset controller to flush Tx. 2520 * (Do the reset outside of interrupt context). 2521 */ 2522 adapter->tx_timeout_count++; 2523 schedule_work(&adapter->reset_task); 2524 /* exit immediately since reset is imminent */ 2525 return; 2526 } 2527 } 2528 2529 /* Simple mode for Interrupt Throttle Rate (ITR) */ 2530 if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) { 2531 /* Symmetric Tx/Rx gets a reduced ITR=2000; 2532 * Total asymmetrical Tx or Rx gets ITR=8000; 2533 * everyone else is between 2000-8000. 2534 */ 2535 u32 goc = (adapter->gotcl + adapter->gorcl) / 10000; 2536 u32 dif = (adapter->gotcl > adapter->gorcl ? 2537 adapter->gotcl - adapter->gorcl : 2538 adapter->gorcl - adapter->gotcl) / 10000; 2539 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; 2540 2541 ew32(ITR, 1000000000 / (itr * 256)); 2542 } 2543 2544 /* Cause software interrupt to ensure rx ring is cleaned */ 2545 ew32(ICS, E1000_ICS_RXDMT0); 2546 2547 /* Force detection of hung controller every watchdog period */ 2548 adapter->detect_tx_hung = true; 2549 2550 /* Reschedule the task */ 2551 if (!test_bit(__E1000_DOWN, &adapter->flags)) 2552 schedule_delayed_work(&adapter->watchdog_task, 2 * HZ); 2553 } 2554 2555 enum latency_range { 2556 lowest_latency = 0, 2557 low_latency = 1, 2558 bulk_latency = 2, 2559 latency_invalid = 255 2560 }; 2561 2562 /** 2563 * e1000_update_itr - update the dynamic ITR value based on statistics 2564 * @adapter: pointer to adapter 2565 * @itr_setting: current adapter->itr 2566 * @packets: the number of packets during this measurement interval 2567 * @bytes: the number of bytes during this measurement interval 2568 * 2569 * Stores a new ITR value based on packets and byte 2570 * counts during the last interrupt. The advantage of per interrupt 2571 * computation is faster updates and more accurate ITR for the current 2572 * traffic pattern. Constants in this function were computed 2573 * based on theoretical maximum wire speed and thresholds were set based 2574 * on testing data as well as attempting to minimize response time 2575 * while increasing bulk throughput. 2576 * this functionality is controlled by the InterruptThrottleRate module 2577 * parameter (see e1000_param.c) 2578 **/ 2579 static unsigned int e1000_update_itr(struct e1000_adapter *adapter, 2580 u16 itr_setting, int packets, int bytes) 2581 { 2582 unsigned int retval = itr_setting; 2583 struct e1000_hw *hw = &adapter->hw; 2584 2585 if (unlikely(hw->mac_type < e1000_82540)) 2586 goto update_itr_done; 2587 2588 if (packets == 0) 2589 goto update_itr_done; 2590 2591 switch (itr_setting) { 2592 case lowest_latency: 2593 /* jumbo frames get bulk treatment*/ 2594 if (bytes/packets > 8000) 2595 retval = bulk_latency; 2596 else if ((packets < 5) && (bytes > 512)) 2597 retval = low_latency; 2598 break; 2599 case low_latency: /* 50 usec aka 20000 ints/s */ 2600 if (bytes > 10000) { 2601 /* jumbo frames need bulk latency setting */ 2602 if (bytes/packets > 8000) 2603 retval = bulk_latency; 2604 else if ((packets < 10) || ((bytes/packets) > 1200)) 2605 retval = bulk_latency; 2606 else if ((packets > 35)) 2607 retval = lowest_latency; 2608 } else if (bytes/packets > 2000) 2609 retval = bulk_latency; 2610 else if (packets <= 2 && bytes < 512) 2611 retval = lowest_latency; 2612 break; 2613 case bulk_latency: /* 250 usec aka 4000 ints/s */ 2614 if (bytes > 25000) { 2615 if (packets > 35) 2616 retval = low_latency; 2617 } else if (bytes < 6000) { 2618 retval = low_latency; 2619 } 2620 break; 2621 } 2622 2623 update_itr_done: 2624 return retval; 2625 } 2626 2627 static void e1000_set_itr(struct e1000_adapter *adapter) 2628 { 2629 struct e1000_hw *hw = &adapter->hw; 2630 u16 current_itr; 2631 u32 new_itr = adapter->itr; 2632 2633 if (unlikely(hw->mac_type < e1000_82540)) 2634 return; 2635 2636 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ 2637 if (unlikely(adapter->link_speed != SPEED_1000)) { 2638 new_itr = 4000; 2639 goto set_itr_now; 2640 } 2641 2642 adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr, 2643 adapter->total_tx_packets, 2644 adapter->total_tx_bytes); 2645 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 2646 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) 2647 adapter->tx_itr = low_latency; 2648 2649 adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr, 2650 adapter->total_rx_packets, 2651 adapter->total_rx_bytes); 2652 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 2653 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) 2654 adapter->rx_itr = low_latency; 2655 2656 current_itr = max(adapter->rx_itr, adapter->tx_itr); 2657 2658 switch (current_itr) { 2659 /* counts and packets in update_itr are dependent on these numbers */ 2660 case lowest_latency: 2661 new_itr = 70000; 2662 break; 2663 case low_latency: 2664 new_itr = 20000; /* aka hwitr = ~200 */ 2665 break; 2666 case bulk_latency: 2667 new_itr = 4000; 2668 break; 2669 default: 2670 break; 2671 } 2672 2673 set_itr_now: 2674 if (new_itr != adapter->itr) { 2675 /* this attempts to bias the interrupt rate towards Bulk 2676 * by adding intermediate steps when interrupt rate is 2677 * increasing 2678 */ 2679 new_itr = new_itr > adapter->itr ? 2680 min(adapter->itr + (new_itr >> 2), new_itr) : 2681 new_itr; 2682 adapter->itr = new_itr; 2683 ew32(ITR, 1000000000 / (new_itr * 256)); 2684 } 2685 } 2686 2687 #define E1000_TX_FLAGS_CSUM 0x00000001 2688 #define E1000_TX_FLAGS_VLAN 0x00000002 2689 #define E1000_TX_FLAGS_TSO 0x00000004 2690 #define E1000_TX_FLAGS_IPV4 0x00000008 2691 #define E1000_TX_FLAGS_NO_FCS 0x00000010 2692 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 2693 #define E1000_TX_FLAGS_VLAN_SHIFT 16 2694 2695 static int e1000_tso(struct e1000_adapter *adapter, 2696 struct e1000_tx_ring *tx_ring, struct sk_buff *skb, 2697 __be16 protocol) 2698 { 2699 struct e1000_context_desc *context_desc; 2700 struct e1000_tx_buffer *buffer_info; 2701 unsigned int i; 2702 u32 cmd_length = 0; 2703 u16 ipcse = 0, tucse, mss; 2704 u8 ipcss, ipcso, tucss, tucso, hdr_len; 2705 2706 if (skb_is_gso(skb)) { 2707 int err; 2708 2709 err = skb_cow_head(skb, 0); 2710 if (err < 0) 2711 return err; 2712 2713 hdr_len = skb_tcp_all_headers(skb); 2714 mss = skb_shinfo(skb)->gso_size; 2715 if (protocol == htons(ETH_P_IP)) { 2716 struct iphdr *iph = ip_hdr(skb); 2717 iph->tot_len = 0; 2718 iph->check = 0; 2719 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 2720 iph->daddr, 0, 2721 IPPROTO_TCP, 2722 0); 2723 cmd_length = E1000_TXD_CMD_IP; 2724 ipcse = skb_transport_offset(skb) - 1; 2725 } else if (skb_is_gso_v6(skb)) { 2726 tcp_v6_gso_csum_prep(skb); 2727 ipcse = 0; 2728 } 2729 ipcss = skb_network_offset(skb); 2730 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; 2731 tucss = skb_transport_offset(skb); 2732 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; 2733 tucse = 0; 2734 2735 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | 2736 E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); 2737 2738 i = tx_ring->next_to_use; 2739 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 2740 buffer_info = &tx_ring->buffer_info[i]; 2741 2742 context_desc->lower_setup.ip_fields.ipcss = ipcss; 2743 context_desc->lower_setup.ip_fields.ipcso = ipcso; 2744 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); 2745 context_desc->upper_setup.tcp_fields.tucss = tucss; 2746 context_desc->upper_setup.tcp_fields.tucso = tucso; 2747 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); 2748 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); 2749 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; 2750 context_desc->cmd_and_length = cpu_to_le32(cmd_length); 2751 2752 buffer_info->time_stamp = jiffies; 2753 buffer_info->next_to_watch = i; 2754 2755 if (++i == tx_ring->count) 2756 i = 0; 2757 2758 tx_ring->next_to_use = i; 2759 2760 return true; 2761 } 2762 return false; 2763 } 2764 2765 static bool e1000_tx_csum(struct e1000_adapter *adapter, 2766 struct e1000_tx_ring *tx_ring, struct sk_buff *skb, 2767 __be16 protocol) 2768 { 2769 struct e1000_context_desc *context_desc; 2770 struct e1000_tx_buffer *buffer_info; 2771 unsigned int i; 2772 u8 css; 2773 u32 cmd_len = E1000_TXD_CMD_DEXT; 2774 2775 if (skb->ip_summed != CHECKSUM_PARTIAL) 2776 return false; 2777 2778 switch (protocol) { 2779 case cpu_to_be16(ETH_P_IP): 2780 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 2781 cmd_len |= E1000_TXD_CMD_TCP; 2782 break; 2783 case cpu_to_be16(ETH_P_IPV6): 2784 /* XXX not handling all IPV6 headers */ 2785 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 2786 cmd_len |= E1000_TXD_CMD_TCP; 2787 break; 2788 default: 2789 if (unlikely(net_ratelimit())) 2790 e_warn(drv, "checksum_partial proto=%x!\n", 2791 skb->protocol); 2792 break; 2793 } 2794 2795 css = skb_checksum_start_offset(skb); 2796 2797 i = tx_ring->next_to_use; 2798 buffer_info = &tx_ring->buffer_info[i]; 2799 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 2800 2801 context_desc->lower_setup.ip_config = 0; 2802 context_desc->upper_setup.tcp_fields.tucss = css; 2803 context_desc->upper_setup.tcp_fields.tucso = 2804 css + skb->csum_offset; 2805 context_desc->upper_setup.tcp_fields.tucse = 0; 2806 context_desc->tcp_seg_setup.data = 0; 2807 context_desc->cmd_and_length = cpu_to_le32(cmd_len); 2808 2809 buffer_info->time_stamp = jiffies; 2810 buffer_info->next_to_watch = i; 2811 2812 if (unlikely(++i == tx_ring->count)) 2813 i = 0; 2814 2815 tx_ring->next_to_use = i; 2816 2817 return true; 2818 } 2819 2820 #define E1000_MAX_TXD_PWR 12 2821 #define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR) 2822 2823 static int e1000_tx_map(struct e1000_adapter *adapter, 2824 struct e1000_tx_ring *tx_ring, 2825 struct sk_buff *skb, unsigned int first, 2826 unsigned int max_per_txd, unsigned int nr_frags, 2827 unsigned int mss) 2828 { 2829 struct e1000_hw *hw = &adapter->hw; 2830 struct pci_dev *pdev = adapter->pdev; 2831 struct e1000_tx_buffer *buffer_info; 2832 unsigned int len = skb_headlen(skb); 2833 unsigned int offset = 0, size, count = 0, i; 2834 unsigned int f, bytecount, segs; 2835 2836 i = tx_ring->next_to_use; 2837 2838 while (len) { 2839 buffer_info = &tx_ring->buffer_info[i]; 2840 size = min(len, max_per_txd); 2841 /* Workaround for Controller erratum -- 2842 * descriptor for non-tso packet in a linear SKB that follows a 2843 * tso gets written back prematurely before the data is fully 2844 * DMA'd to the controller 2845 */ 2846 if (!skb->data_len && tx_ring->last_tx_tso && 2847 !skb_is_gso(skb)) { 2848 tx_ring->last_tx_tso = false; 2849 size -= 4; 2850 } 2851 2852 /* Workaround for premature desc write-backs 2853 * in TSO mode. Append 4-byte sentinel desc 2854 */ 2855 if (unlikely(mss && !nr_frags && size == len && size > 8)) 2856 size -= 4; 2857 /* work-around for errata 10 and it applies 2858 * to all controllers in PCI-X mode 2859 * The fix is to make sure that the first descriptor of a 2860 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes 2861 */ 2862 if (unlikely((hw->bus_type == e1000_bus_type_pcix) && 2863 (size > 2015) && count == 0)) 2864 size = 2015; 2865 2866 /* Workaround for potential 82544 hang in PCI-X. Avoid 2867 * terminating buffers within evenly-aligned dwords. 2868 */ 2869 if (unlikely(adapter->pcix_82544 && 2870 !((unsigned long)(skb->data + offset + size - 1) & 4) && 2871 size > 4)) 2872 size -= 4; 2873 2874 buffer_info->length = size; 2875 /* set time_stamp *before* dma to help avoid a possible race */ 2876 buffer_info->time_stamp = jiffies; 2877 buffer_info->mapped_as_page = false; 2878 buffer_info->dma = dma_map_single(&pdev->dev, 2879 skb->data + offset, 2880 size, DMA_TO_DEVICE); 2881 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) 2882 goto dma_error; 2883 buffer_info->next_to_watch = i; 2884 2885 len -= size; 2886 offset += size; 2887 count++; 2888 if (len) { 2889 i++; 2890 if (unlikely(i == tx_ring->count)) 2891 i = 0; 2892 } 2893 } 2894 2895 for (f = 0; f < nr_frags; f++) { 2896 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; 2897 2898 len = skb_frag_size(frag); 2899 offset = 0; 2900 2901 while (len) { 2902 unsigned long bufend; 2903 i++; 2904 if (unlikely(i == tx_ring->count)) 2905 i = 0; 2906 2907 buffer_info = &tx_ring->buffer_info[i]; 2908 size = min(len, max_per_txd); 2909 /* Workaround for premature desc write-backs 2910 * in TSO mode. Append 4-byte sentinel desc 2911 */ 2912 if (unlikely(mss && f == (nr_frags-1) && 2913 size == len && size > 8)) 2914 size -= 4; 2915 /* Workaround for potential 82544 hang in PCI-X. 2916 * Avoid terminating buffers within evenly-aligned 2917 * dwords. 2918 */ 2919 bufend = (unsigned long) 2920 page_to_phys(skb_frag_page(frag)); 2921 bufend += offset + size - 1; 2922 if (unlikely(adapter->pcix_82544 && 2923 !(bufend & 4) && 2924 size > 4)) 2925 size -= 4; 2926 2927 buffer_info->length = size; 2928 buffer_info->time_stamp = jiffies; 2929 buffer_info->mapped_as_page = true; 2930 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 2931 offset, size, DMA_TO_DEVICE); 2932 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) 2933 goto dma_error; 2934 buffer_info->next_to_watch = i; 2935 2936 len -= size; 2937 offset += size; 2938 count++; 2939 } 2940 } 2941 2942 segs = skb_shinfo(skb)->gso_segs ?: 1; 2943 /* multiply data chunks by size of headers */ 2944 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; 2945 2946 tx_ring->buffer_info[i].skb = skb; 2947 tx_ring->buffer_info[i].segs = segs; 2948 tx_ring->buffer_info[i].bytecount = bytecount; 2949 tx_ring->buffer_info[first].next_to_watch = i; 2950 2951 return count; 2952 2953 dma_error: 2954 dev_err(&pdev->dev, "TX DMA map failed\n"); 2955 buffer_info->dma = 0; 2956 if (count) 2957 count--; 2958 2959 while (count--) { 2960 if (i == 0) 2961 i += tx_ring->count; 2962 i--; 2963 buffer_info = &tx_ring->buffer_info[i]; 2964 e1000_unmap_and_free_tx_resource(adapter, buffer_info, 0); 2965 } 2966 2967 return 0; 2968 } 2969 2970 static void e1000_tx_queue(struct e1000_adapter *adapter, 2971 struct e1000_tx_ring *tx_ring, int tx_flags, 2972 int count) 2973 { 2974 struct e1000_tx_desc *tx_desc = NULL; 2975 struct e1000_tx_buffer *buffer_info; 2976 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; 2977 unsigned int i; 2978 2979 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) { 2980 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | 2981 E1000_TXD_CMD_TSE; 2982 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2983 2984 if (likely(tx_flags & E1000_TX_FLAGS_IPV4)) 2985 txd_upper |= E1000_TXD_POPTS_IXSM << 8; 2986 } 2987 2988 if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) { 2989 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; 2990 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2991 } 2992 2993 if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) { 2994 txd_lower |= E1000_TXD_CMD_VLE; 2995 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); 2996 } 2997 2998 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) 2999 txd_lower &= ~(E1000_TXD_CMD_IFCS); 3000 3001 i = tx_ring->next_to_use; 3002 3003 while (count--) { 3004 buffer_info = &tx_ring->buffer_info[i]; 3005 tx_desc = E1000_TX_DESC(*tx_ring, i); 3006 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 3007 tx_desc->lower.data = 3008 cpu_to_le32(txd_lower | buffer_info->length); 3009 tx_desc->upper.data = cpu_to_le32(txd_upper); 3010 if (unlikely(++i == tx_ring->count)) 3011 i = 0; 3012 } 3013 3014 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); 3015 3016 /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */ 3017 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) 3018 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS)); 3019 3020 /* Force memory writes to complete before letting h/w 3021 * know there are new descriptors to fetch. (Only 3022 * applicable for weak-ordered memory model archs, 3023 * such as IA-64). 3024 */ 3025 dma_wmb(); 3026 3027 tx_ring->next_to_use = i; 3028 } 3029 3030 /* 82547 workaround to avoid controller hang in half-duplex environment. 3031 * The workaround is to avoid queuing a large packet that would span 3032 * the internal Tx FIFO ring boundary by notifying the stack to resend 3033 * the packet at a later time. This gives the Tx FIFO an opportunity to 3034 * flush all packets. When that occurs, we reset the Tx FIFO pointers 3035 * to the beginning of the Tx FIFO. 3036 */ 3037 3038 #define E1000_FIFO_HDR 0x10 3039 #define E1000_82547_PAD_LEN 0x3E0 3040 3041 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, 3042 struct sk_buff *skb) 3043 { 3044 u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; 3045 u32 skb_fifo_len = skb->len + E1000_FIFO_HDR; 3046 3047 skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR); 3048 3049 if (adapter->link_duplex != HALF_DUPLEX) 3050 goto no_fifo_stall_required; 3051 3052 if (atomic_read(&adapter->tx_fifo_stall)) 3053 return 1; 3054 3055 if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) { 3056 atomic_set(&adapter->tx_fifo_stall, 1); 3057 return 1; 3058 } 3059 3060 no_fifo_stall_required: 3061 adapter->tx_fifo_head += skb_fifo_len; 3062 if (adapter->tx_fifo_head >= adapter->tx_fifo_size) 3063 adapter->tx_fifo_head -= adapter->tx_fifo_size; 3064 return 0; 3065 } 3066 3067 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) 3068 { 3069 struct e1000_adapter *adapter = netdev_priv(netdev); 3070 struct e1000_tx_ring *tx_ring = adapter->tx_ring; 3071 3072 netif_stop_queue(netdev); 3073 /* Herbert's original patch had: 3074 * smp_mb__after_netif_stop_queue(); 3075 * but since that doesn't exist yet, just open code it. 3076 */ 3077 smp_mb(); 3078 3079 /* We need to check again in a case another CPU has just 3080 * made room available. 3081 */ 3082 if (likely(E1000_DESC_UNUSED(tx_ring) < size)) 3083 return -EBUSY; 3084 3085 /* A reprieve! */ 3086 netif_start_queue(netdev); 3087 ++adapter->restart_queue; 3088 return 0; 3089 } 3090 3091 static int e1000_maybe_stop_tx(struct net_device *netdev, 3092 struct e1000_tx_ring *tx_ring, int size) 3093 { 3094 if (likely(E1000_DESC_UNUSED(tx_ring) >= size)) 3095 return 0; 3096 return __e1000_maybe_stop_tx(netdev, size); 3097 } 3098 3099 #define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X)) 3100 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, 3101 struct net_device *netdev) 3102 { 3103 struct e1000_adapter *adapter = netdev_priv(netdev); 3104 struct e1000_hw *hw = &adapter->hw; 3105 struct e1000_tx_ring *tx_ring; 3106 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD; 3107 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; 3108 unsigned int tx_flags = 0; 3109 unsigned int len = skb_headlen(skb); 3110 unsigned int nr_frags; 3111 unsigned int mss; 3112 int count = 0; 3113 int tso; 3114 unsigned int f; 3115 __be16 protocol = vlan_get_protocol(skb); 3116 3117 /* This goes back to the question of how to logically map a Tx queue 3118 * to a flow. Right now, performance is impacted slightly negatively 3119 * if using multiple Tx queues. If the stack breaks away from a 3120 * single qdisc implementation, we can look at this again. 3121 */ 3122 tx_ring = adapter->tx_ring; 3123 3124 /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN, 3125 * packets may get corrupted during padding by HW. 3126 * To WA this issue, pad all small packets manually. 3127 */ 3128 if (eth_skb_pad(skb)) 3129 return NETDEV_TX_OK; 3130 3131 mss = skb_shinfo(skb)->gso_size; 3132 /* The controller does a simple calculation to 3133 * make sure there is enough room in the FIFO before 3134 * initiating the DMA for each buffer. The calc is: 3135 * 4 = ceil(buffer len/mss). To make sure we don't 3136 * overrun the FIFO, adjust the max buffer len if mss 3137 * drops. 3138 */ 3139 if (mss) { 3140 u8 hdr_len; 3141 max_per_txd = min(mss << 2, max_per_txd); 3142 max_txd_pwr = fls(max_per_txd) - 1; 3143 3144 hdr_len = skb_tcp_all_headers(skb); 3145 if (skb->data_len && hdr_len == len) { 3146 switch (hw->mac_type) { 3147 case e1000_82544: { 3148 unsigned int pull_size; 3149 3150 /* Make sure we have room to chop off 4 bytes, 3151 * and that the end alignment will work out to 3152 * this hardware's requirements 3153 * NOTE: this is a TSO only workaround 3154 * if end byte alignment not correct move us 3155 * into the next dword 3156 */ 3157 if ((unsigned long)(skb_tail_pointer(skb) - 1) 3158 & 4) 3159 break; 3160 pull_size = min((unsigned int)4, skb->data_len); 3161 if (!__pskb_pull_tail(skb, pull_size)) { 3162 e_err(drv, "__pskb_pull_tail " 3163 "failed.\n"); 3164 dev_kfree_skb_any(skb); 3165 return NETDEV_TX_OK; 3166 } 3167 len = skb_headlen(skb); 3168 break; 3169 } 3170 default: 3171 /* do nothing */ 3172 break; 3173 } 3174 } 3175 } 3176 3177 /* reserve a descriptor for the offload context */ 3178 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) 3179 count++; 3180 count++; 3181 3182 /* Controller Erratum workaround */ 3183 if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb)) 3184 count++; 3185 3186 count += TXD_USE_COUNT(len, max_txd_pwr); 3187 3188 if (adapter->pcix_82544) 3189 count++; 3190 3191 /* work-around for errata 10 and it applies to all controllers 3192 * in PCI-X mode, so add one more descriptor to the count 3193 */ 3194 if (unlikely((hw->bus_type == e1000_bus_type_pcix) && 3195 (len > 2015))) 3196 count++; 3197 3198 nr_frags = skb_shinfo(skb)->nr_frags; 3199 for (f = 0; f < nr_frags; f++) 3200 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]), 3201 max_txd_pwr); 3202 if (adapter->pcix_82544) 3203 count += nr_frags; 3204 3205 /* need: count + 2 desc gap to keep tail from touching 3206 * head, otherwise try next time 3207 */ 3208 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) 3209 return NETDEV_TX_BUSY; 3210 3211 if (unlikely((hw->mac_type == e1000_82547) && 3212 (e1000_82547_fifo_workaround(adapter, skb)))) { 3213 netif_stop_queue(netdev); 3214 if (!test_bit(__E1000_DOWN, &adapter->flags)) 3215 schedule_delayed_work(&adapter->fifo_stall_task, 1); 3216 return NETDEV_TX_BUSY; 3217 } 3218 3219 if (skb_vlan_tag_present(skb)) { 3220 tx_flags |= E1000_TX_FLAGS_VLAN; 3221 tx_flags |= (skb_vlan_tag_get(skb) << 3222 E1000_TX_FLAGS_VLAN_SHIFT); 3223 } 3224 3225 first = tx_ring->next_to_use; 3226 3227 tso = e1000_tso(adapter, tx_ring, skb, protocol); 3228 if (tso < 0) { 3229 dev_kfree_skb_any(skb); 3230 return NETDEV_TX_OK; 3231 } 3232 3233 if (likely(tso)) { 3234 if (likely(hw->mac_type != e1000_82544)) 3235 tx_ring->last_tx_tso = true; 3236 tx_flags |= E1000_TX_FLAGS_TSO; 3237 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol))) 3238 tx_flags |= E1000_TX_FLAGS_CSUM; 3239 3240 if (protocol == htons(ETH_P_IP)) 3241 tx_flags |= E1000_TX_FLAGS_IPV4; 3242 3243 if (unlikely(skb->no_fcs)) 3244 tx_flags |= E1000_TX_FLAGS_NO_FCS; 3245 3246 count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd, 3247 nr_frags, mss); 3248 3249 if (count) { 3250 /* The descriptors needed is higher than other Intel drivers 3251 * due to a number of workarounds. The breakdown is below: 3252 * Data descriptors: MAX_SKB_FRAGS + 1 3253 * Context Descriptor: 1 3254 * Keep head from touching tail: 2 3255 * Workarounds: 3 3256 */ 3257 int desc_needed = MAX_SKB_FRAGS + 7; 3258 3259 netdev_sent_queue(netdev, skb->len); 3260 skb_tx_timestamp(skb); 3261 3262 e1000_tx_queue(adapter, tx_ring, tx_flags, count); 3263 3264 /* 82544 potentially requires twice as many data descriptors 3265 * in order to guarantee buffers don't end on evenly-aligned 3266 * dwords 3267 */ 3268 if (adapter->pcix_82544) 3269 desc_needed += MAX_SKB_FRAGS + 1; 3270 3271 /* Make sure there is space in the ring for the next send. */ 3272 e1000_maybe_stop_tx(netdev, tx_ring, desc_needed); 3273 3274 if (!netdev_xmit_more() || 3275 netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) { 3276 writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt); 3277 } 3278 } else { 3279 dev_kfree_skb_any(skb); 3280 tx_ring->buffer_info[first].time_stamp = 0; 3281 tx_ring->next_to_use = first; 3282 } 3283 3284 return NETDEV_TX_OK; 3285 } 3286 3287 #define NUM_REGS 38 /* 1 based count */ 3288 static void e1000_regdump(struct e1000_adapter *adapter) 3289 { 3290 struct e1000_hw *hw = &adapter->hw; 3291 u32 regs[NUM_REGS]; 3292 u32 *regs_buff = regs; 3293 int i = 0; 3294 3295 static const char * const reg_name[] = { 3296 "CTRL", "STATUS", 3297 "RCTL", "RDLEN", "RDH", "RDT", "RDTR", 3298 "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT", 3299 "TIDV", "TXDCTL", "TADV", "TARC0", 3300 "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1", 3301 "TXDCTL1", "TARC1", 3302 "CTRL_EXT", "ERT", "RDBAL", "RDBAH", 3303 "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC", 3304 "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC" 3305 }; 3306 3307 regs_buff[0] = er32(CTRL); 3308 regs_buff[1] = er32(STATUS); 3309 3310 regs_buff[2] = er32(RCTL); 3311 regs_buff[3] = er32(RDLEN); 3312 regs_buff[4] = er32(RDH); 3313 regs_buff[5] = er32(RDT); 3314 regs_buff[6] = er32(RDTR); 3315 3316 regs_buff[7] = er32(TCTL); 3317 regs_buff[8] = er32(TDBAL); 3318 regs_buff[9] = er32(TDBAH); 3319 regs_buff[10] = er32(TDLEN); 3320 regs_buff[11] = er32(TDH); 3321 regs_buff[12] = er32(TDT); 3322 regs_buff[13] = er32(TIDV); 3323 regs_buff[14] = er32(TXDCTL); 3324 regs_buff[15] = er32(TADV); 3325 regs_buff[16] = er32(TARC0); 3326 3327 regs_buff[17] = er32(TDBAL1); 3328 regs_buff[18] = er32(TDBAH1); 3329 regs_buff[19] = er32(TDLEN1); 3330 regs_buff[20] = er32(TDH1); 3331 regs_buff[21] = er32(TDT1); 3332 regs_buff[22] = er32(TXDCTL1); 3333 regs_buff[23] = er32(TARC1); 3334 regs_buff[24] = er32(CTRL_EXT); 3335 regs_buff[25] = er32(ERT); 3336 regs_buff[26] = er32(RDBAL0); 3337 regs_buff[27] = er32(RDBAH0); 3338 regs_buff[28] = er32(TDFH); 3339 regs_buff[29] = er32(TDFT); 3340 regs_buff[30] = er32(TDFHS); 3341 regs_buff[31] = er32(TDFTS); 3342 regs_buff[32] = er32(TDFPC); 3343 regs_buff[33] = er32(RDFH); 3344 regs_buff[34] = er32(RDFT); 3345 regs_buff[35] = er32(RDFHS); 3346 regs_buff[36] = er32(RDFTS); 3347 regs_buff[37] = er32(RDFPC); 3348 3349 pr_info("Register dump\n"); 3350 for (i = 0; i < NUM_REGS; i++) 3351 pr_info("%-15s %08x\n", reg_name[i], regs_buff[i]); 3352 } 3353 3354 /* 3355 * e1000_dump: Print registers, tx ring and rx ring 3356 */ 3357 static void e1000_dump(struct e1000_adapter *adapter) 3358 { 3359 /* this code doesn't handle multiple rings */ 3360 struct e1000_tx_ring *tx_ring = adapter->tx_ring; 3361 struct e1000_rx_ring *rx_ring = adapter->rx_ring; 3362 int i; 3363 3364 if (!netif_msg_hw(adapter)) 3365 return; 3366 3367 /* Print Registers */ 3368 e1000_regdump(adapter); 3369 3370 /* transmit dump */ 3371 pr_info("TX Desc ring0 dump\n"); 3372 3373 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) 3374 * 3375 * Legacy Transmit Descriptor 3376 * +--------------------------------------------------------------+ 3377 * 0 | Buffer Address [63:0] (Reserved on Write Back) | 3378 * +--------------------------------------------------------------+ 3379 * 8 | Special | CSS | Status | CMD | CSO | Length | 3380 * +--------------------------------------------------------------+ 3381 * 63 48 47 36 35 32 31 24 23 16 15 0 3382 * 3383 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload 3384 * 63 48 47 40 39 32 31 16 15 8 7 0 3385 * +----------------------------------------------------------------+ 3386 * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS | 3387 * +----------------------------------------------------------------+ 3388 * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN | 3389 * +----------------------------------------------------------------+ 3390 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 3391 * 3392 * Extended Data Descriptor (DTYP=0x1) 3393 * +----------------------------------------------------------------+ 3394 * 0 | Buffer Address [63:0] | 3395 * +----------------------------------------------------------------+ 3396 * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN | 3397 * +----------------------------------------------------------------+ 3398 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 3399 */ 3400 pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestmp bi->skb\n"); 3401 pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestmp bi->skb\n"); 3402 3403 if (!netif_msg_tx_done(adapter)) 3404 goto rx_ring_summary; 3405 3406 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { 3407 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i); 3408 struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i]; 3409 struct my_u { __le64 a; __le64 b; }; 3410 struct my_u *u = (struct my_u *)tx_desc; 3411 const char *type; 3412 3413 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) 3414 type = "NTC/U"; 3415 else if (i == tx_ring->next_to_use) 3416 type = "NTU"; 3417 else if (i == tx_ring->next_to_clean) 3418 type = "NTC"; 3419 else 3420 type = ""; 3421 3422 pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p %s\n", 3423 ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i, 3424 le64_to_cpu(u->a), le64_to_cpu(u->b), 3425 (u64)buffer_info->dma, buffer_info->length, 3426 buffer_info->next_to_watch, 3427 (u64)buffer_info->time_stamp, buffer_info->skb, type); 3428 } 3429 3430 rx_ring_summary: 3431 /* receive dump */ 3432 pr_info("\nRX Desc ring dump\n"); 3433 3434 /* Legacy Receive Descriptor Format 3435 * 3436 * +-----------------------------------------------------+ 3437 * | Buffer Address [63:0] | 3438 * +-----------------------------------------------------+ 3439 * | VLAN Tag | Errors | Status 0 | Packet csum | Length | 3440 * +-----------------------------------------------------+ 3441 * 63 48 47 40 39 32 31 16 15 0 3442 */ 3443 pr_info("R[desc] [address 63:0 ] [vl er S cks ln] [bi->dma ] [bi->skb]\n"); 3444 3445 if (!netif_msg_rx_status(adapter)) 3446 goto exit; 3447 3448 for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) { 3449 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i); 3450 struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i]; 3451 struct my_u { __le64 a; __le64 b; }; 3452 struct my_u *u = (struct my_u *)rx_desc; 3453 const char *type; 3454 3455 if (i == rx_ring->next_to_use) 3456 type = "NTU"; 3457 else if (i == rx_ring->next_to_clean) 3458 type = "NTC"; 3459 else 3460 type = ""; 3461 3462 pr_info("R[0x%03X] %016llX %016llX %016llX %p %s\n", 3463 i, le64_to_cpu(u->a), le64_to_cpu(u->b), 3464 (u64)buffer_info->dma, buffer_info->rxbuf.data, type); 3465 } /* for */ 3466 3467 /* dump the descriptor caches */ 3468 /* rx */ 3469 pr_info("Rx descriptor cache in 64bit format\n"); 3470 for (i = 0x6000; i <= 0x63FF ; i += 0x10) { 3471 pr_info("R%04X: %08X|%08X %08X|%08X\n", 3472 i, 3473 readl(adapter->hw.hw_addr + i+4), 3474 readl(adapter->hw.hw_addr + i), 3475 readl(adapter->hw.hw_addr + i+12), 3476 readl(adapter->hw.hw_addr + i+8)); 3477 } 3478 /* tx */ 3479 pr_info("Tx descriptor cache in 64bit format\n"); 3480 for (i = 0x7000; i <= 0x73FF ; i += 0x10) { 3481 pr_info("T%04X: %08X|%08X %08X|%08X\n", 3482 i, 3483 readl(adapter->hw.hw_addr + i+4), 3484 readl(adapter->hw.hw_addr + i), 3485 readl(adapter->hw.hw_addr + i+12), 3486 readl(adapter->hw.hw_addr + i+8)); 3487 } 3488 exit: 3489 return; 3490 } 3491 3492 /** 3493 * e1000_tx_timeout - Respond to a Tx Hang 3494 * @netdev: network interface device structure 3495 * @txqueue: number of the Tx queue that hung (unused) 3496 **/ 3497 static void e1000_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue) 3498 { 3499 struct e1000_adapter *adapter = netdev_priv(netdev); 3500 3501 /* Do the reset outside of interrupt context */ 3502 adapter->tx_timeout_count++; 3503 schedule_work(&adapter->reset_task); 3504 } 3505 3506 static void e1000_reset_task(struct work_struct *work) 3507 { 3508 struct e1000_adapter *adapter = 3509 container_of(work, struct e1000_adapter, reset_task); 3510 3511 e_err(drv, "Reset adapter\n"); 3512 e1000_reinit_locked(adapter); 3513 } 3514 3515 /** 3516 * e1000_change_mtu - Change the Maximum Transfer Unit 3517 * @netdev: network interface device structure 3518 * @new_mtu: new value for maximum frame size 3519 * 3520 * Returns 0 on success, negative on failure 3521 **/ 3522 static int e1000_change_mtu(struct net_device *netdev, int new_mtu) 3523 { 3524 struct e1000_adapter *adapter = netdev_priv(netdev); 3525 struct e1000_hw *hw = &adapter->hw; 3526 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 3527 3528 /* Adapter-specific max frame size limits. */ 3529 switch (hw->mac_type) { 3530 case e1000_undefined ... e1000_82542_rev2_1: 3531 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) { 3532 e_err(probe, "Jumbo Frames not supported.\n"); 3533 return -EINVAL; 3534 } 3535 break; 3536 default: 3537 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */ 3538 break; 3539 } 3540 3541 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) 3542 msleep(1); 3543 /* e1000_down has a dependency on max_frame_size */ 3544 hw->max_frame_size = max_frame; 3545 if (netif_running(netdev)) { 3546 /* prevent buffers from being reallocated */ 3547 adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers; 3548 e1000_down(adapter); 3549 } 3550 3551 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN 3552 * means we reserve 2 more, this pushes us to allocate from the next 3553 * larger slab size. 3554 * i.e. RXBUFFER_2048 --> size-4096 slab 3555 * however with the new *_jumbo_rx* routines, jumbo receives will use 3556 * fragmented skbs 3557 */ 3558 3559 if (max_frame <= E1000_RXBUFFER_2048) 3560 adapter->rx_buffer_len = E1000_RXBUFFER_2048; 3561 else 3562 #if (PAGE_SIZE >= E1000_RXBUFFER_16384) 3563 adapter->rx_buffer_len = E1000_RXBUFFER_16384; 3564 #elif (PAGE_SIZE >= E1000_RXBUFFER_4096) 3565 adapter->rx_buffer_len = PAGE_SIZE; 3566 #endif 3567 3568 /* adjust allocation if LPE protects us, and we aren't using SBP */ 3569 if (!hw->tbi_compatibility_on && 3570 ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) || 3571 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) 3572 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 3573 3574 netdev_dbg(netdev, "changing MTU from %d to %d\n", 3575 netdev->mtu, new_mtu); 3576 WRITE_ONCE(netdev->mtu, new_mtu); 3577 3578 if (netif_running(netdev)) 3579 e1000_up(adapter); 3580 else 3581 e1000_reset(adapter); 3582 3583 clear_bit(__E1000_RESETTING, &adapter->flags); 3584 3585 return 0; 3586 } 3587 3588 /** 3589 * e1000_update_stats - Update the board statistics counters 3590 * @adapter: board private structure 3591 **/ 3592 void e1000_update_stats(struct e1000_adapter *adapter) 3593 { 3594 struct net_device *netdev = adapter->netdev; 3595 struct e1000_hw *hw = &adapter->hw; 3596 struct pci_dev *pdev = adapter->pdev; 3597 unsigned long flags; 3598 u16 phy_tmp; 3599 3600 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF 3601 3602 /* Prevent stats update while adapter is being reset, or if the pci 3603 * connection is down. 3604 */ 3605 if (adapter->link_speed == 0) 3606 return; 3607 if (pci_channel_offline(pdev)) 3608 return; 3609 3610 spin_lock_irqsave(&adapter->stats_lock, flags); 3611 3612 /* these counters are modified from e1000_tbi_adjust_stats, 3613 * called from the interrupt context, so they must only 3614 * be written while holding adapter->stats_lock 3615 */ 3616 3617 adapter->stats.crcerrs += er32(CRCERRS); 3618 adapter->stats.gprc += er32(GPRC); 3619 adapter->stats.gorcl += er32(GORCL); 3620 adapter->stats.gorch += er32(GORCH); 3621 adapter->stats.bprc += er32(BPRC); 3622 adapter->stats.mprc += er32(MPRC); 3623 adapter->stats.roc += er32(ROC); 3624 3625 adapter->stats.prc64 += er32(PRC64); 3626 adapter->stats.prc127 += er32(PRC127); 3627 adapter->stats.prc255 += er32(PRC255); 3628 adapter->stats.prc511 += er32(PRC511); 3629 adapter->stats.prc1023 += er32(PRC1023); 3630 adapter->stats.prc1522 += er32(PRC1522); 3631 3632 adapter->stats.symerrs += er32(SYMERRS); 3633 adapter->stats.mpc += er32(MPC); 3634 adapter->stats.scc += er32(SCC); 3635 adapter->stats.ecol += er32(ECOL); 3636 adapter->stats.mcc += er32(MCC); 3637 adapter->stats.latecol += er32(LATECOL); 3638 adapter->stats.dc += er32(DC); 3639 adapter->stats.sec += er32(SEC); 3640 adapter->stats.rlec += er32(RLEC); 3641 adapter->stats.xonrxc += er32(XONRXC); 3642 adapter->stats.xontxc += er32(XONTXC); 3643 adapter->stats.xoffrxc += er32(XOFFRXC); 3644 adapter->stats.xofftxc += er32(XOFFTXC); 3645 adapter->stats.fcruc += er32(FCRUC); 3646 adapter->stats.gptc += er32(GPTC); 3647 adapter->stats.gotcl += er32(GOTCL); 3648 adapter->stats.gotch += er32(GOTCH); 3649 adapter->stats.rnbc += er32(RNBC); 3650 adapter->stats.ruc += er32(RUC); 3651 adapter->stats.rfc += er32(RFC); 3652 adapter->stats.rjc += er32(RJC); 3653 adapter->stats.torl += er32(TORL); 3654 adapter->stats.torh += er32(TORH); 3655 adapter->stats.totl += er32(TOTL); 3656 adapter->stats.toth += er32(TOTH); 3657 adapter->stats.tpr += er32(TPR); 3658 3659 adapter->stats.ptc64 += er32(PTC64); 3660 adapter->stats.ptc127 += er32(PTC127); 3661 adapter->stats.ptc255 += er32(PTC255); 3662 adapter->stats.ptc511 += er32(PTC511); 3663 adapter->stats.ptc1023 += er32(PTC1023); 3664 adapter->stats.ptc1522 += er32(PTC1522); 3665 3666 adapter->stats.mptc += er32(MPTC); 3667 adapter->stats.bptc += er32(BPTC); 3668 3669 /* used for adaptive IFS */ 3670 3671 hw->tx_packet_delta = er32(TPT); 3672 adapter->stats.tpt += hw->tx_packet_delta; 3673 hw->collision_delta = er32(COLC); 3674 adapter->stats.colc += hw->collision_delta; 3675 3676 if (hw->mac_type >= e1000_82543) { 3677 adapter->stats.algnerrc += er32(ALGNERRC); 3678 adapter->stats.rxerrc += er32(RXERRC); 3679 adapter->stats.tncrs += er32(TNCRS); 3680 adapter->stats.cexterr += er32(CEXTERR); 3681 adapter->stats.tsctc += er32(TSCTC); 3682 adapter->stats.tsctfc += er32(TSCTFC); 3683 } 3684 3685 /* Fill out the OS statistics structure */ 3686 netdev->stats.multicast = adapter->stats.mprc; 3687 netdev->stats.collisions = adapter->stats.colc; 3688 3689 /* Rx Errors */ 3690 3691 /* RLEC on some newer hardware can be incorrect so build 3692 * our own version based on RUC and ROC 3693 */ 3694 netdev->stats.rx_errors = adapter->stats.rxerrc + 3695 adapter->stats.crcerrs + adapter->stats.algnerrc + 3696 adapter->stats.ruc + adapter->stats.roc + 3697 adapter->stats.cexterr; 3698 adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc; 3699 netdev->stats.rx_length_errors = adapter->stats.rlerrc; 3700 netdev->stats.rx_crc_errors = adapter->stats.crcerrs; 3701 netdev->stats.rx_frame_errors = adapter->stats.algnerrc; 3702 netdev->stats.rx_missed_errors = adapter->stats.mpc; 3703 3704 /* Tx Errors */ 3705 adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol; 3706 netdev->stats.tx_errors = adapter->stats.txerrc; 3707 netdev->stats.tx_aborted_errors = adapter->stats.ecol; 3708 netdev->stats.tx_window_errors = adapter->stats.latecol; 3709 netdev->stats.tx_carrier_errors = adapter->stats.tncrs; 3710 if (hw->bad_tx_carr_stats_fd && 3711 adapter->link_duplex == FULL_DUPLEX) { 3712 netdev->stats.tx_carrier_errors = 0; 3713 adapter->stats.tncrs = 0; 3714 } 3715 3716 /* Tx Dropped needs to be maintained elsewhere */ 3717 3718 /* Phy Stats */ 3719 if (hw->media_type == e1000_media_type_copper) { 3720 if ((adapter->link_speed == SPEED_1000) && 3721 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { 3722 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; 3723 adapter->phy_stats.idle_errors += phy_tmp; 3724 } 3725 3726 if ((hw->mac_type <= e1000_82546) && 3727 (hw->phy_type == e1000_phy_m88) && 3728 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp)) 3729 adapter->phy_stats.receive_errors += phy_tmp; 3730 } 3731 3732 /* Management Stats */ 3733 if (hw->has_smbus) { 3734 adapter->stats.mgptc += er32(MGTPTC); 3735 adapter->stats.mgprc += er32(MGTPRC); 3736 adapter->stats.mgpdc += er32(MGTPDC); 3737 } 3738 3739 spin_unlock_irqrestore(&adapter->stats_lock, flags); 3740 } 3741 3742 /** 3743 * e1000_intr - Interrupt Handler 3744 * @irq: interrupt number 3745 * @data: pointer to a network interface device structure 3746 **/ 3747 static irqreturn_t e1000_intr(int irq, void *data) 3748 { 3749 struct net_device *netdev = data; 3750 struct e1000_adapter *adapter = netdev_priv(netdev); 3751 struct e1000_hw *hw = &adapter->hw; 3752 u32 icr = er32(ICR); 3753 3754 if (unlikely((!icr))) 3755 return IRQ_NONE; /* Not our interrupt */ 3756 3757 /* we might have caused the interrupt, but the above 3758 * read cleared it, and just in case the driver is 3759 * down there is nothing to do so return handled 3760 */ 3761 if (unlikely(test_bit(__E1000_DOWN, &adapter->flags))) 3762 return IRQ_HANDLED; 3763 3764 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { 3765 hw->get_link_status = 1; 3766 /* guard against interrupt when we're going down */ 3767 if (!test_bit(__E1000_DOWN, &adapter->flags)) 3768 schedule_delayed_work(&adapter->watchdog_task, 1); 3769 } 3770 3771 /* disable interrupts, without the synchronize_irq bit */ 3772 ew32(IMC, ~0); 3773 E1000_WRITE_FLUSH(); 3774 3775 if (likely(napi_schedule_prep(&adapter->napi))) { 3776 adapter->total_tx_bytes = 0; 3777 adapter->total_tx_packets = 0; 3778 adapter->total_rx_bytes = 0; 3779 adapter->total_rx_packets = 0; 3780 __napi_schedule(&adapter->napi); 3781 } else { 3782 /* this really should not happen! if it does it is basically a 3783 * bug, but not a hard error, so enable ints and continue 3784 */ 3785 if (!test_bit(__E1000_DOWN, &adapter->flags)) 3786 e1000_irq_enable(adapter); 3787 } 3788 3789 return IRQ_HANDLED; 3790 } 3791 3792 /** 3793 * e1000_clean - NAPI Rx polling callback 3794 * @napi: napi struct containing references to driver info 3795 * @budget: budget given to driver for receive packets 3796 **/ 3797 static int e1000_clean(struct napi_struct *napi, int budget) 3798 { 3799 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, 3800 napi); 3801 int tx_clean_complete = 0, work_done = 0; 3802 3803 tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]); 3804 3805 adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget); 3806 3807 if (!tx_clean_complete || work_done == budget) 3808 return budget; 3809 3810 /* Exit the polling mode, but don't re-enable interrupts if stack might 3811 * poll us due to busy-polling 3812 */ 3813 if (likely(napi_complete_done(napi, work_done))) { 3814 if (likely(adapter->itr_setting & 3)) 3815 e1000_set_itr(adapter); 3816 if (!test_bit(__E1000_DOWN, &adapter->flags)) 3817 e1000_irq_enable(adapter); 3818 } 3819 3820 return work_done; 3821 } 3822 3823 /** 3824 * e1000_clean_tx_irq - Reclaim resources after transmit completes 3825 * @adapter: board private structure 3826 * @tx_ring: ring to clean 3827 **/ 3828 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, 3829 struct e1000_tx_ring *tx_ring) 3830 { 3831 struct e1000_hw *hw = &adapter->hw; 3832 struct net_device *netdev = adapter->netdev; 3833 struct e1000_tx_desc *tx_desc, *eop_desc; 3834 struct e1000_tx_buffer *buffer_info; 3835 unsigned int i, eop; 3836 unsigned int count = 0; 3837 unsigned int total_tx_bytes = 0, total_tx_packets = 0; 3838 unsigned int bytes_compl = 0, pkts_compl = 0; 3839 3840 i = tx_ring->next_to_clean; 3841 eop = tx_ring->buffer_info[i].next_to_watch; 3842 eop_desc = E1000_TX_DESC(*tx_ring, eop); 3843 3844 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && 3845 (count < tx_ring->count)) { 3846 bool cleaned = false; 3847 dma_rmb(); /* read buffer_info after eop_desc */ 3848 for ( ; !cleaned; count++) { 3849 tx_desc = E1000_TX_DESC(*tx_ring, i); 3850 buffer_info = &tx_ring->buffer_info[i]; 3851 cleaned = (i == eop); 3852 3853 if (cleaned) { 3854 total_tx_packets += buffer_info->segs; 3855 total_tx_bytes += buffer_info->bytecount; 3856 if (buffer_info->skb) { 3857 bytes_compl += buffer_info->skb->len; 3858 pkts_compl++; 3859 } 3860 3861 } 3862 e1000_unmap_and_free_tx_resource(adapter, buffer_info, 3863 64); 3864 tx_desc->upper.data = 0; 3865 3866 if (unlikely(++i == tx_ring->count)) 3867 i = 0; 3868 } 3869 3870 eop = tx_ring->buffer_info[i].next_to_watch; 3871 eop_desc = E1000_TX_DESC(*tx_ring, eop); 3872 } 3873 3874 /* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame, 3875 * which will reuse the cleaned buffers. 3876 */ 3877 smp_store_release(&tx_ring->next_to_clean, i); 3878 3879 netdev_completed_queue(netdev, pkts_compl, bytes_compl); 3880 3881 #define TX_WAKE_THRESHOLD 32 3882 if (unlikely(count && netif_carrier_ok(netdev) && 3883 E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) { 3884 /* Make sure that anybody stopping the queue after this 3885 * sees the new next_to_clean. 3886 */ 3887 smp_mb(); 3888 3889 if (netif_queue_stopped(netdev) && 3890 !(test_bit(__E1000_DOWN, &adapter->flags))) { 3891 netif_wake_queue(netdev); 3892 ++adapter->restart_queue; 3893 } 3894 } 3895 3896 if (adapter->detect_tx_hung) { 3897 /* Detect a transmit hang in hardware, this serializes the 3898 * check with the clearing of time_stamp and movement of i 3899 */ 3900 adapter->detect_tx_hung = false; 3901 if (tx_ring->buffer_info[eop].time_stamp && 3902 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + 3903 (adapter->tx_timeout_factor * HZ)) && 3904 !(er32(STATUS) & E1000_STATUS_TXOFF)) { 3905 3906 /* detected Tx unit hang */ 3907 e_err(drv, "Detected Tx Unit Hang\n" 3908 " Tx Queue <%lu>\n" 3909 " TDH <%x>\n" 3910 " TDT <%x>\n" 3911 " next_to_use <%x>\n" 3912 " next_to_clean <%x>\n" 3913 "buffer_info[next_to_clean]\n" 3914 " time_stamp <%lx>\n" 3915 " next_to_watch <%x>\n" 3916 " jiffies <%lx>\n" 3917 " next_to_watch.status <%x>\n", 3918 (unsigned long)(tx_ring - adapter->tx_ring), 3919 readl(hw->hw_addr + tx_ring->tdh), 3920 readl(hw->hw_addr + tx_ring->tdt), 3921 tx_ring->next_to_use, 3922 tx_ring->next_to_clean, 3923 tx_ring->buffer_info[eop].time_stamp, 3924 eop, 3925 jiffies, 3926 eop_desc->upper.fields.status); 3927 e1000_dump(adapter); 3928 netif_stop_queue(netdev); 3929 } 3930 } 3931 adapter->total_tx_bytes += total_tx_bytes; 3932 adapter->total_tx_packets += total_tx_packets; 3933 netdev->stats.tx_bytes += total_tx_bytes; 3934 netdev->stats.tx_packets += total_tx_packets; 3935 return count < tx_ring->count; 3936 } 3937 3938 /** 3939 * e1000_rx_checksum - Receive Checksum Offload for 82543 3940 * @adapter: board private structure 3941 * @status_err: receive descriptor status and error fields 3942 * @csum: receive descriptor csum field 3943 * @skb: socket buffer with received data 3944 **/ 3945 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, 3946 u32 csum, struct sk_buff *skb) 3947 { 3948 struct e1000_hw *hw = &adapter->hw; 3949 u16 status = (u16)status_err; 3950 u8 errors = (u8)(status_err >> 24); 3951 3952 skb_checksum_none_assert(skb); 3953 3954 /* 82543 or newer only */ 3955 if (unlikely(hw->mac_type < e1000_82543)) 3956 return; 3957 /* Ignore Checksum bit is set */ 3958 if (unlikely(status & E1000_RXD_STAT_IXSM)) 3959 return; 3960 /* TCP/UDP checksum error bit is set */ 3961 if (unlikely(errors & E1000_RXD_ERR_TCPE)) { 3962 /* let the stack verify checksum errors */ 3963 adapter->hw_csum_err++; 3964 return; 3965 } 3966 /* TCP/UDP Checksum has not been calculated */ 3967 if (!(status & E1000_RXD_STAT_TCPCS)) 3968 return; 3969 3970 /* It must be a TCP or UDP packet with a valid checksum */ 3971 if (likely(status & E1000_RXD_STAT_TCPCS)) { 3972 /* TCP checksum is good */ 3973 skb->ip_summed = CHECKSUM_UNNECESSARY; 3974 } 3975 adapter->hw_csum_good++; 3976 } 3977 3978 /** 3979 * e1000_consume_page - helper function for jumbo Rx path 3980 * @bi: software descriptor shadow data 3981 * @skb: skb being modified 3982 * @length: length of data being added 3983 **/ 3984 static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb, 3985 u16 length) 3986 { 3987 bi->rxbuf.page = NULL; 3988 skb->len += length; 3989 skb->data_len += length; 3990 skb->truesize += PAGE_SIZE; 3991 } 3992 3993 /** 3994 * e1000_receive_skb - helper function to handle rx indications 3995 * @adapter: board private structure 3996 * @status: descriptor status field as written by hardware 3997 * @vlan: descriptor vlan field as written by hardware (no le/be conversion) 3998 * @skb: pointer to sk_buff to be indicated to stack 3999 */ 4000 static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status, 4001 __le16 vlan, struct sk_buff *skb) 4002 { 4003 skb->protocol = eth_type_trans(skb, adapter->netdev); 4004 4005 if (status & E1000_RXD_STAT_VP) { 4006 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; 4007 4008 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); 4009 } 4010 napi_gro_receive(&adapter->napi, skb); 4011 } 4012 4013 /** 4014 * e1000_tbi_adjust_stats 4015 * @hw: Struct containing variables accessed by shared code 4016 * @stats: point to stats struct 4017 * @frame_len: The length of the frame in question 4018 * @mac_addr: The Ethernet destination address of the frame in question 4019 * 4020 * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT 4021 */ 4022 static void e1000_tbi_adjust_stats(struct e1000_hw *hw, 4023 struct e1000_hw_stats *stats, 4024 u32 frame_len, const u8 *mac_addr) 4025 { 4026 u64 carry_bit; 4027 4028 /* First adjust the frame length. */ 4029 frame_len--; 4030 /* We need to adjust the statistics counters, since the hardware 4031 * counters overcount this packet as a CRC error and undercount 4032 * the packet as a good packet 4033 */ 4034 /* This packet should not be counted as a CRC error. */ 4035 stats->crcerrs--; 4036 /* This packet does count as a Good Packet Received. */ 4037 stats->gprc++; 4038 4039 /* Adjust the Good Octets received counters */ 4040 carry_bit = 0x80000000 & stats->gorcl; 4041 stats->gorcl += frame_len; 4042 /* If the high bit of Gorcl (the low 32 bits of the Good Octets 4043 * Received Count) was one before the addition, 4044 * AND it is zero after, then we lost the carry out, 4045 * need to add one to Gorch (Good Octets Received Count High). 4046 * This could be simplified if all environments supported 4047 * 64-bit integers. 4048 */ 4049 if (carry_bit && ((stats->gorcl & 0x80000000) == 0)) 4050 stats->gorch++; 4051 /* Is this a broadcast or multicast? Check broadcast first, 4052 * since the test for a multicast frame will test positive on 4053 * a broadcast frame. 4054 */ 4055 if (is_broadcast_ether_addr(mac_addr)) 4056 stats->bprc++; 4057 else if (is_multicast_ether_addr(mac_addr)) 4058 stats->mprc++; 4059 4060 if (frame_len == hw->max_frame_size) { 4061 /* In this case, the hardware has overcounted the number of 4062 * oversize frames. 4063 */ 4064 if (stats->roc > 0) 4065 stats->roc--; 4066 } 4067 4068 /* Adjust the bin counters when the extra byte put the frame in the 4069 * wrong bin. Remember that the frame_len was adjusted above. 4070 */ 4071 if (frame_len == 64) { 4072 stats->prc64++; 4073 stats->prc127--; 4074 } else if (frame_len == 127) { 4075 stats->prc127++; 4076 stats->prc255--; 4077 } else if (frame_len == 255) { 4078 stats->prc255++; 4079 stats->prc511--; 4080 } else if (frame_len == 511) { 4081 stats->prc511++; 4082 stats->prc1023--; 4083 } else if (frame_len == 1023) { 4084 stats->prc1023++; 4085 stats->prc1522--; 4086 } else if (frame_len == 1522) { 4087 stats->prc1522++; 4088 } 4089 } 4090 4091 static bool e1000_tbi_should_accept(struct e1000_adapter *adapter, 4092 u8 status, u8 errors, 4093 u32 length, const u8 *data) 4094 { 4095 struct e1000_hw *hw = &adapter->hw; 4096 u8 last_byte = *(data + length - 1); 4097 4098 if (TBI_ACCEPT(hw, status, errors, length, last_byte)) { 4099 unsigned long irq_flags; 4100 4101 spin_lock_irqsave(&adapter->stats_lock, irq_flags); 4102 e1000_tbi_adjust_stats(hw, &adapter->stats, length, data); 4103 spin_unlock_irqrestore(&adapter->stats_lock, irq_flags); 4104 4105 return true; 4106 } 4107 4108 return false; 4109 } 4110 4111 static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter, 4112 unsigned int bufsz) 4113 { 4114 struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz); 4115 4116 if (unlikely(!skb)) 4117 adapter->alloc_rx_buff_failed++; 4118 return skb; 4119 } 4120 4121 /** 4122 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy 4123 * @adapter: board private structure 4124 * @rx_ring: ring to clean 4125 * @work_done: amount of napi work completed this call 4126 * @work_to_do: max amount of work allowed for this call to do 4127 * 4128 * the return value indicates whether actual cleaning was done, there 4129 * is no guarantee that everything was cleaned 4130 */ 4131 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, 4132 struct e1000_rx_ring *rx_ring, 4133 int *work_done, int work_to_do) 4134 { 4135 struct net_device *netdev = adapter->netdev; 4136 struct pci_dev *pdev = adapter->pdev; 4137 struct e1000_rx_desc *rx_desc, *next_rxd; 4138 struct e1000_rx_buffer *buffer_info, *next_buffer; 4139 u32 length; 4140 unsigned int i; 4141 int cleaned_count = 0; 4142 bool cleaned = false; 4143 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 4144 4145 i = rx_ring->next_to_clean; 4146 rx_desc = E1000_RX_DESC(*rx_ring, i); 4147 buffer_info = &rx_ring->buffer_info[i]; 4148 4149 while (rx_desc->status & E1000_RXD_STAT_DD) { 4150 struct sk_buff *skb; 4151 u8 status; 4152 4153 if (*work_done >= work_to_do) 4154 break; 4155 (*work_done)++; 4156 dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ 4157 4158 status = rx_desc->status; 4159 4160 if (++i == rx_ring->count) 4161 i = 0; 4162 4163 next_rxd = E1000_RX_DESC(*rx_ring, i); 4164 prefetch(next_rxd); 4165 4166 next_buffer = &rx_ring->buffer_info[i]; 4167 4168 cleaned = true; 4169 cleaned_count++; 4170 dma_unmap_page(&pdev->dev, buffer_info->dma, 4171 adapter->rx_buffer_len, DMA_FROM_DEVICE); 4172 buffer_info->dma = 0; 4173 4174 length = le16_to_cpu(rx_desc->length); 4175 4176 /* errors is only valid for DD + EOP descriptors */ 4177 if (unlikely((status & E1000_RXD_STAT_EOP) && 4178 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) { 4179 u8 *mapped = page_address(buffer_info->rxbuf.page); 4180 4181 if (e1000_tbi_should_accept(adapter, status, 4182 rx_desc->errors, 4183 length, mapped)) { 4184 length--; 4185 } else if (netdev->features & NETIF_F_RXALL) { 4186 goto process_skb; 4187 } else { 4188 /* an error means any chain goes out the window 4189 * too 4190 */ 4191 dev_kfree_skb(rx_ring->rx_skb_top); 4192 rx_ring->rx_skb_top = NULL; 4193 goto next_desc; 4194 } 4195 } 4196 4197 #define rxtop rx_ring->rx_skb_top 4198 process_skb: 4199 if (!(status & E1000_RXD_STAT_EOP)) { 4200 /* this descriptor is only the beginning (or middle) */ 4201 if (!rxtop) { 4202 /* this is the beginning of a chain */ 4203 rxtop = napi_get_frags(&adapter->napi); 4204 if (!rxtop) 4205 break; 4206 4207 skb_fill_page_desc(rxtop, 0, 4208 buffer_info->rxbuf.page, 4209 0, length); 4210 } else { 4211 /* this is the middle of a chain */ 4212 skb_fill_page_desc(rxtop, 4213 skb_shinfo(rxtop)->nr_frags, 4214 buffer_info->rxbuf.page, 0, length); 4215 } 4216 e1000_consume_page(buffer_info, rxtop, length); 4217 goto next_desc; 4218 } else { 4219 if (rxtop) { 4220 /* end of the chain */ 4221 skb_fill_page_desc(rxtop, 4222 skb_shinfo(rxtop)->nr_frags, 4223 buffer_info->rxbuf.page, 0, length); 4224 skb = rxtop; 4225 rxtop = NULL; 4226 e1000_consume_page(buffer_info, skb, length); 4227 } else { 4228 struct page *p; 4229 /* no chain, got EOP, this buf is the packet 4230 * copybreak to save the put_page/alloc_page 4231 */ 4232 p = buffer_info->rxbuf.page; 4233 if (length <= copybreak) { 4234 if (likely(!(netdev->features & NETIF_F_RXFCS))) 4235 length -= 4; 4236 skb = e1000_alloc_rx_skb(adapter, 4237 length); 4238 if (!skb) 4239 break; 4240 4241 memcpy(skb_tail_pointer(skb), 4242 page_address(p), length); 4243 4244 /* re-use the page, so don't erase 4245 * buffer_info->rxbuf.page 4246 */ 4247 skb_put(skb, length); 4248 e1000_rx_checksum(adapter, 4249 status | rx_desc->errors << 24, 4250 le16_to_cpu(rx_desc->csum), skb); 4251 4252 total_rx_bytes += skb->len; 4253 total_rx_packets++; 4254 4255 e1000_receive_skb(adapter, status, 4256 rx_desc->special, skb); 4257 goto next_desc; 4258 } else { 4259 skb = napi_get_frags(&adapter->napi); 4260 if (!skb) { 4261 adapter->alloc_rx_buff_failed++; 4262 break; 4263 } 4264 skb_fill_page_desc(skb, 0, p, 0, 4265 length); 4266 e1000_consume_page(buffer_info, skb, 4267 length); 4268 } 4269 } 4270 } 4271 4272 /* Receive Checksum Offload XXX recompute due to CRC strip? */ 4273 e1000_rx_checksum(adapter, 4274 (u32)(status) | 4275 ((u32)(rx_desc->errors) << 24), 4276 le16_to_cpu(rx_desc->csum), skb); 4277 4278 total_rx_bytes += (skb->len - 4); /* don't count FCS */ 4279 if (likely(!(netdev->features & NETIF_F_RXFCS))) 4280 pskb_trim(skb, skb->len - 4); 4281 total_rx_packets++; 4282 4283 if (status & E1000_RXD_STAT_VP) { 4284 __le16 vlan = rx_desc->special; 4285 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; 4286 4287 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); 4288 } 4289 4290 napi_gro_frags(&adapter->napi); 4291 4292 next_desc: 4293 rx_desc->status = 0; 4294 4295 /* return some buffers to hardware, one at a time is too slow */ 4296 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { 4297 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); 4298 cleaned_count = 0; 4299 } 4300 4301 /* use prefetched values */ 4302 rx_desc = next_rxd; 4303 buffer_info = next_buffer; 4304 } 4305 rx_ring->next_to_clean = i; 4306 4307 cleaned_count = E1000_DESC_UNUSED(rx_ring); 4308 if (cleaned_count) 4309 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); 4310 4311 adapter->total_rx_packets += total_rx_packets; 4312 adapter->total_rx_bytes += total_rx_bytes; 4313 netdev->stats.rx_bytes += total_rx_bytes; 4314 netdev->stats.rx_packets += total_rx_packets; 4315 return cleaned; 4316 } 4317 4318 /* this should improve performance for small packets with large amounts 4319 * of reassembly being done in the stack 4320 */ 4321 static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter, 4322 struct e1000_rx_buffer *buffer_info, 4323 u32 length, const void *data) 4324 { 4325 struct sk_buff *skb; 4326 4327 if (length > copybreak) 4328 return NULL; 4329 4330 skb = e1000_alloc_rx_skb(adapter, length); 4331 if (!skb) 4332 return NULL; 4333 4334 dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma, 4335 length, DMA_FROM_DEVICE); 4336 4337 skb_put_data(skb, data, length); 4338 4339 return skb; 4340 } 4341 4342 /** 4343 * e1000_clean_rx_irq - Send received data up the network stack; legacy 4344 * @adapter: board private structure 4345 * @rx_ring: ring to clean 4346 * @work_done: amount of napi work completed this call 4347 * @work_to_do: max amount of work allowed for this call to do 4348 */ 4349 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, 4350 struct e1000_rx_ring *rx_ring, 4351 int *work_done, int work_to_do) 4352 { 4353 struct net_device *netdev = adapter->netdev; 4354 struct pci_dev *pdev = adapter->pdev; 4355 struct e1000_rx_desc *rx_desc, *next_rxd; 4356 struct e1000_rx_buffer *buffer_info, *next_buffer; 4357 u32 length; 4358 unsigned int i; 4359 int cleaned_count = 0; 4360 bool cleaned = false; 4361 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 4362 4363 i = rx_ring->next_to_clean; 4364 rx_desc = E1000_RX_DESC(*rx_ring, i); 4365 buffer_info = &rx_ring->buffer_info[i]; 4366 4367 while (rx_desc->status & E1000_RXD_STAT_DD) { 4368 struct sk_buff *skb; 4369 u8 *data; 4370 u8 status; 4371 4372 if (*work_done >= work_to_do) 4373 break; 4374 (*work_done)++; 4375 dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ 4376 4377 status = rx_desc->status; 4378 length = le16_to_cpu(rx_desc->length); 4379 4380 data = buffer_info->rxbuf.data; 4381 prefetch(data); 4382 skb = e1000_copybreak(adapter, buffer_info, length, data); 4383 if (!skb) { 4384 unsigned int frag_len = e1000_frag_len(adapter); 4385 4386 skb = napi_build_skb(data - E1000_HEADROOM, frag_len); 4387 if (!skb) { 4388 adapter->alloc_rx_buff_failed++; 4389 break; 4390 } 4391 4392 skb_reserve(skb, E1000_HEADROOM); 4393 dma_unmap_single(&pdev->dev, buffer_info->dma, 4394 adapter->rx_buffer_len, 4395 DMA_FROM_DEVICE); 4396 buffer_info->dma = 0; 4397 buffer_info->rxbuf.data = NULL; 4398 } 4399 4400 if (++i == rx_ring->count) 4401 i = 0; 4402 4403 next_rxd = E1000_RX_DESC(*rx_ring, i); 4404 prefetch(next_rxd); 4405 4406 next_buffer = &rx_ring->buffer_info[i]; 4407 4408 cleaned = true; 4409 cleaned_count++; 4410 4411 /* !EOP means multiple descriptors were used to store a single 4412 * packet, if thats the case we need to toss it. In fact, we 4413 * to toss every packet with the EOP bit clear and the next 4414 * frame that _does_ have the EOP bit set, as it is by 4415 * definition only a frame fragment 4416 */ 4417 if (unlikely(!(status & E1000_RXD_STAT_EOP))) 4418 adapter->discarding = true; 4419 4420 if (adapter->discarding) { 4421 /* All receives must fit into a single buffer */ 4422 netdev_dbg(netdev, "Receive packet consumed multiple buffers\n"); 4423 dev_kfree_skb(skb); 4424 if (status & E1000_RXD_STAT_EOP) 4425 adapter->discarding = false; 4426 goto next_desc; 4427 } 4428 4429 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { 4430 if (e1000_tbi_should_accept(adapter, status, 4431 rx_desc->errors, 4432 length, data)) { 4433 length--; 4434 } else if (netdev->features & NETIF_F_RXALL) { 4435 goto process_skb; 4436 } else { 4437 dev_kfree_skb(skb); 4438 goto next_desc; 4439 } 4440 } 4441 4442 process_skb: 4443 total_rx_bytes += (length - 4); /* don't count FCS */ 4444 total_rx_packets++; 4445 4446 if (likely(!(netdev->features & NETIF_F_RXFCS))) 4447 /* adjust length to remove Ethernet CRC, this must be 4448 * done after the TBI_ACCEPT workaround above 4449 */ 4450 length -= 4; 4451 4452 if (buffer_info->rxbuf.data == NULL) 4453 skb_put(skb, length); 4454 else /* copybreak skb */ 4455 skb_trim(skb, length); 4456 4457 /* Receive Checksum Offload */ 4458 e1000_rx_checksum(adapter, 4459 (u32)(status) | 4460 ((u32)(rx_desc->errors) << 24), 4461 le16_to_cpu(rx_desc->csum), skb); 4462 4463 e1000_receive_skb(adapter, status, rx_desc->special, skb); 4464 4465 next_desc: 4466 rx_desc->status = 0; 4467 4468 /* return some buffers to hardware, one at a time is too slow */ 4469 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { 4470 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); 4471 cleaned_count = 0; 4472 } 4473 4474 /* use prefetched values */ 4475 rx_desc = next_rxd; 4476 buffer_info = next_buffer; 4477 } 4478 rx_ring->next_to_clean = i; 4479 4480 cleaned_count = E1000_DESC_UNUSED(rx_ring); 4481 if (cleaned_count) 4482 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); 4483 4484 adapter->total_rx_packets += total_rx_packets; 4485 adapter->total_rx_bytes += total_rx_bytes; 4486 netdev->stats.rx_bytes += total_rx_bytes; 4487 netdev->stats.rx_packets += total_rx_packets; 4488 return cleaned; 4489 } 4490 4491 /** 4492 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers 4493 * @adapter: address of board private structure 4494 * @rx_ring: pointer to receive ring structure 4495 * @cleaned_count: number of buffers to allocate this pass 4496 **/ 4497 static void 4498 e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, 4499 struct e1000_rx_ring *rx_ring, int cleaned_count) 4500 { 4501 struct pci_dev *pdev = adapter->pdev; 4502 struct e1000_rx_desc *rx_desc; 4503 struct e1000_rx_buffer *buffer_info; 4504 unsigned int i; 4505 4506 i = rx_ring->next_to_use; 4507 buffer_info = &rx_ring->buffer_info[i]; 4508 4509 while (cleaned_count--) { 4510 /* allocate a new page if necessary */ 4511 if (!buffer_info->rxbuf.page) { 4512 buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC); 4513 if (unlikely(!buffer_info->rxbuf.page)) { 4514 adapter->alloc_rx_buff_failed++; 4515 break; 4516 } 4517 } 4518 4519 if (!buffer_info->dma) { 4520 buffer_info->dma = dma_map_page(&pdev->dev, 4521 buffer_info->rxbuf.page, 0, 4522 adapter->rx_buffer_len, 4523 DMA_FROM_DEVICE); 4524 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { 4525 put_page(buffer_info->rxbuf.page); 4526 buffer_info->rxbuf.page = NULL; 4527 buffer_info->dma = 0; 4528 adapter->alloc_rx_buff_failed++; 4529 break; 4530 } 4531 } 4532 4533 rx_desc = E1000_RX_DESC(*rx_ring, i); 4534 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 4535 4536 if (unlikely(++i == rx_ring->count)) 4537 i = 0; 4538 buffer_info = &rx_ring->buffer_info[i]; 4539 } 4540 4541 if (likely(rx_ring->next_to_use != i)) { 4542 rx_ring->next_to_use = i; 4543 if (unlikely(i-- == 0)) 4544 i = (rx_ring->count - 1); 4545 4546 /* Force memory writes to complete before letting h/w 4547 * know there are new descriptors to fetch. (Only 4548 * applicable for weak-ordered memory model archs, 4549 * such as IA-64). 4550 */ 4551 dma_wmb(); 4552 writel(i, adapter->hw.hw_addr + rx_ring->rdt); 4553 } 4554 } 4555 4556 /** 4557 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended 4558 * @adapter: address of board private structure 4559 * @rx_ring: pointer to ring struct 4560 * @cleaned_count: number of new Rx buffers to try to allocate 4561 **/ 4562 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, 4563 struct e1000_rx_ring *rx_ring, 4564 int cleaned_count) 4565 { 4566 struct e1000_hw *hw = &adapter->hw; 4567 struct pci_dev *pdev = adapter->pdev; 4568 struct e1000_rx_desc *rx_desc; 4569 struct e1000_rx_buffer *buffer_info; 4570 unsigned int i; 4571 unsigned int bufsz = adapter->rx_buffer_len; 4572 4573 i = rx_ring->next_to_use; 4574 buffer_info = &rx_ring->buffer_info[i]; 4575 4576 while (cleaned_count--) { 4577 void *data; 4578 4579 if (buffer_info->rxbuf.data) 4580 goto skip; 4581 4582 data = e1000_alloc_frag(adapter); 4583 if (!data) { 4584 /* Better luck next round */ 4585 adapter->alloc_rx_buff_failed++; 4586 break; 4587 } 4588 4589 /* Fix for errata 23, can't cross 64kB boundary */ 4590 if (!e1000_check_64k_bound(adapter, data, bufsz)) { 4591 void *olddata = data; 4592 e_err(rx_err, "skb align check failed: %u bytes at " 4593 "%p\n", bufsz, data); 4594 /* Try again, without freeing the previous */ 4595 data = e1000_alloc_frag(adapter); 4596 /* Failed allocation, critical failure */ 4597 if (!data) { 4598 skb_free_frag(olddata); 4599 adapter->alloc_rx_buff_failed++; 4600 break; 4601 } 4602 4603 if (!e1000_check_64k_bound(adapter, data, bufsz)) { 4604 /* give up */ 4605 skb_free_frag(data); 4606 skb_free_frag(olddata); 4607 adapter->alloc_rx_buff_failed++; 4608 break; 4609 } 4610 4611 /* Use new allocation */ 4612 skb_free_frag(olddata); 4613 } 4614 buffer_info->dma = dma_map_single(&pdev->dev, 4615 data, 4616 adapter->rx_buffer_len, 4617 DMA_FROM_DEVICE); 4618 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { 4619 skb_free_frag(data); 4620 buffer_info->dma = 0; 4621 adapter->alloc_rx_buff_failed++; 4622 break; 4623 } 4624 4625 /* XXX if it was allocated cleanly it will never map to a 4626 * boundary crossing 4627 */ 4628 4629 /* Fix for errata 23, can't cross 64kB boundary */ 4630 if (!e1000_check_64k_bound(adapter, 4631 (void *)(unsigned long)buffer_info->dma, 4632 adapter->rx_buffer_len)) { 4633 e_err(rx_err, "dma align check failed: %u bytes at " 4634 "%p\n", adapter->rx_buffer_len, 4635 (void *)(unsigned long)buffer_info->dma); 4636 4637 dma_unmap_single(&pdev->dev, buffer_info->dma, 4638 adapter->rx_buffer_len, 4639 DMA_FROM_DEVICE); 4640 4641 skb_free_frag(data); 4642 buffer_info->rxbuf.data = NULL; 4643 buffer_info->dma = 0; 4644 4645 adapter->alloc_rx_buff_failed++; 4646 break; 4647 } 4648 buffer_info->rxbuf.data = data; 4649 skip: 4650 rx_desc = E1000_RX_DESC(*rx_ring, i); 4651 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 4652 4653 if (unlikely(++i == rx_ring->count)) 4654 i = 0; 4655 buffer_info = &rx_ring->buffer_info[i]; 4656 } 4657 4658 if (likely(rx_ring->next_to_use != i)) { 4659 rx_ring->next_to_use = i; 4660 if (unlikely(i-- == 0)) 4661 i = (rx_ring->count - 1); 4662 4663 /* Force memory writes to complete before letting h/w 4664 * know there are new descriptors to fetch. (Only 4665 * applicable for weak-ordered memory model archs, 4666 * such as IA-64). 4667 */ 4668 dma_wmb(); 4669 writel(i, hw->hw_addr + rx_ring->rdt); 4670 } 4671 } 4672 4673 /** 4674 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers. 4675 * @adapter: address of board private structure 4676 **/ 4677 static void e1000_smartspeed(struct e1000_adapter *adapter) 4678 { 4679 struct e1000_hw *hw = &adapter->hw; 4680 u16 phy_status; 4681 u16 phy_ctrl; 4682 4683 if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg || 4684 !(hw->autoneg_advertised & ADVERTISE_1000_FULL)) 4685 return; 4686 4687 if (adapter->smartspeed == 0) { 4688 /* If Master/Slave config fault is asserted twice, 4689 * we assume back-to-back 4690 */ 4691 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); 4692 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) 4693 return; 4694 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); 4695 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) 4696 return; 4697 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); 4698 if (phy_ctrl & CR_1000T_MS_ENABLE) { 4699 phy_ctrl &= ~CR_1000T_MS_ENABLE; 4700 e1000_write_phy_reg(hw, PHY_1000T_CTRL, 4701 phy_ctrl); 4702 adapter->smartspeed++; 4703 if (!e1000_phy_setup_autoneg(hw) && 4704 !e1000_read_phy_reg(hw, PHY_CTRL, 4705 &phy_ctrl)) { 4706 phy_ctrl |= (MII_CR_AUTO_NEG_EN | 4707 MII_CR_RESTART_AUTO_NEG); 4708 e1000_write_phy_reg(hw, PHY_CTRL, 4709 phy_ctrl); 4710 } 4711 } 4712 return; 4713 } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { 4714 /* If still no link, perhaps using 2/3 pair cable */ 4715 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); 4716 phy_ctrl |= CR_1000T_MS_ENABLE; 4717 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl); 4718 if (!e1000_phy_setup_autoneg(hw) && 4719 !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) { 4720 phy_ctrl |= (MII_CR_AUTO_NEG_EN | 4721 MII_CR_RESTART_AUTO_NEG); 4722 e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl); 4723 } 4724 } 4725 /* Restart process after E1000_SMARTSPEED_MAX iterations */ 4726 if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX) 4727 adapter->smartspeed = 0; 4728 } 4729 4730 /** 4731 * e1000_ioctl - handle ioctl calls 4732 * @netdev: pointer to our netdev 4733 * @ifr: pointer to interface request structure 4734 * @cmd: ioctl data 4735 **/ 4736 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 4737 { 4738 switch (cmd) { 4739 case SIOCGMIIPHY: 4740 case SIOCGMIIREG: 4741 case SIOCSMIIREG: 4742 return e1000_mii_ioctl(netdev, ifr, cmd); 4743 default: 4744 return -EOPNOTSUPP; 4745 } 4746 } 4747 4748 /** 4749 * e1000_mii_ioctl - 4750 * @netdev: pointer to our netdev 4751 * @ifr: pointer to interface request structure 4752 * @cmd: ioctl data 4753 **/ 4754 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, 4755 int cmd) 4756 { 4757 struct e1000_adapter *adapter = netdev_priv(netdev); 4758 struct e1000_hw *hw = &adapter->hw; 4759 struct mii_ioctl_data *data = if_mii(ifr); 4760 int retval; 4761 u16 mii_reg; 4762 unsigned long flags; 4763 4764 if (hw->media_type != e1000_media_type_copper) 4765 return -EOPNOTSUPP; 4766 4767 switch (cmd) { 4768 case SIOCGMIIPHY: 4769 data->phy_id = hw->phy_addr; 4770 break; 4771 case SIOCGMIIREG: 4772 spin_lock_irqsave(&adapter->stats_lock, flags); 4773 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F, 4774 &data->val_out)) { 4775 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4776 return -EIO; 4777 } 4778 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4779 break; 4780 case SIOCSMIIREG: 4781 if (data->reg_num & ~(0x1F)) 4782 return -EFAULT; 4783 mii_reg = data->val_in; 4784 spin_lock_irqsave(&adapter->stats_lock, flags); 4785 if (e1000_write_phy_reg(hw, data->reg_num, 4786 mii_reg)) { 4787 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4788 return -EIO; 4789 } 4790 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4791 if (hw->media_type == e1000_media_type_copper) { 4792 switch (data->reg_num) { 4793 case PHY_CTRL: 4794 if (mii_reg & MII_CR_POWER_DOWN) 4795 break; 4796 if (mii_reg & MII_CR_AUTO_NEG_EN) { 4797 hw->autoneg = 1; 4798 hw->autoneg_advertised = 0x2F; 4799 } else { 4800 u32 speed; 4801 if (mii_reg & 0x40) 4802 speed = SPEED_1000; 4803 else if (mii_reg & 0x2000) 4804 speed = SPEED_100; 4805 else 4806 speed = SPEED_10; 4807 retval = e1000_set_spd_dplx( 4808 adapter, speed, 4809 ((mii_reg & 0x100) 4810 ? DUPLEX_FULL : 4811 DUPLEX_HALF)); 4812 if (retval) 4813 return retval; 4814 } 4815 if (netif_running(adapter->netdev)) 4816 e1000_reinit_locked(adapter); 4817 else 4818 e1000_reset(adapter); 4819 break; 4820 case M88E1000_PHY_SPEC_CTRL: 4821 case M88E1000_EXT_PHY_SPEC_CTRL: 4822 if (e1000_phy_reset(hw)) 4823 return -EIO; 4824 break; 4825 } 4826 } else { 4827 switch (data->reg_num) { 4828 case PHY_CTRL: 4829 if (mii_reg & MII_CR_POWER_DOWN) 4830 break; 4831 if (netif_running(adapter->netdev)) 4832 e1000_reinit_locked(adapter); 4833 else 4834 e1000_reset(adapter); 4835 break; 4836 } 4837 } 4838 break; 4839 default: 4840 return -EOPNOTSUPP; 4841 } 4842 return E1000_SUCCESS; 4843 } 4844 4845 void e1000_pci_set_mwi(struct e1000_hw *hw) 4846 { 4847 struct e1000_adapter *adapter = hw->back; 4848 int ret_val = pci_set_mwi(adapter->pdev); 4849 4850 if (ret_val) 4851 e_err(probe, "Error in setting MWI\n"); 4852 } 4853 4854 void e1000_pci_clear_mwi(struct e1000_hw *hw) 4855 { 4856 struct e1000_adapter *adapter = hw->back; 4857 4858 pci_clear_mwi(adapter->pdev); 4859 } 4860 4861 int e1000_pcix_get_mmrbc(struct e1000_hw *hw) 4862 { 4863 struct e1000_adapter *adapter = hw->back; 4864 return pcix_get_mmrbc(adapter->pdev); 4865 } 4866 4867 void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc) 4868 { 4869 struct e1000_adapter *adapter = hw->back; 4870 pcix_set_mmrbc(adapter->pdev, mmrbc); 4871 } 4872 4873 void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value) 4874 { 4875 outl(value, port); 4876 } 4877 4878 static bool e1000_vlan_used(struct e1000_adapter *adapter) 4879 { 4880 u16 vid; 4881 4882 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 4883 return true; 4884 return false; 4885 } 4886 4887 static void __e1000_vlan_mode(struct e1000_adapter *adapter, 4888 netdev_features_t features) 4889 { 4890 struct e1000_hw *hw = &adapter->hw; 4891 u32 ctrl; 4892 4893 ctrl = er32(CTRL); 4894 if (features & NETIF_F_HW_VLAN_CTAG_RX) { 4895 /* enable VLAN tag insert/strip */ 4896 ctrl |= E1000_CTRL_VME; 4897 } else { 4898 /* disable VLAN tag insert/strip */ 4899 ctrl &= ~E1000_CTRL_VME; 4900 } 4901 ew32(CTRL, ctrl); 4902 } 4903 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter, 4904 bool filter_on) 4905 { 4906 struct e1000_hw *hw = &adapter->hw; 4907 u32 rctl; 4908 4909 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4910 e1000_irq_disable(adapter); 4911 4912 __e1000_vlan_mode(adapter, adapter->netdev->features); 4913 if (filter_on) { 4914 /* enable VLAN receive filtering */ 4915 rctl = er32(RCTL); 4916 rctl &= ~E1000_RCTL_CFIEN; 4917 if (!(adapter->netdev->flags & IFF_PROMISC)) 4918 rctl |= E1000_RCTL_VFE; 4919 ew32(RCTL, rctl); 4920 e1000_update_mng_vlan(adapter); 4921 } else { 4922 /* disable VLAN receive filtering */ 4923 rctl = er32(RCTL); 4924 rctl &= ~E1000_RCTL_VFE; 4925 ew32(RCTL, rctl); 4926 } 4927 4928 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4929 e1000_irq_enable(adapter); 4930 } 4931 4932 static void e1000_vlan_mode(struct net_device *netdev, 4933 netdev_features_t features) 4934 { 4935 struct e1000_adapter *adapter = netdev_priv(netdev); 4936 4937 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4938 e1000_irq_disable(adapter); 4939 4940 __e1000_vlan_mode(adapter, features); 4941 4942 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4943 e1000_irq_enable(adapter); 4944 } 4945 4946 static int e1000_vlan_rx_add_vid(struct net_device *netdev, 4947 __be16 proto, u16 vid) 4948 { 4949 struct e1000_adapter *adapter = netdev_priv(netdev); 4950 struct e1000_hw *hw = &adapter->hw; 4951 u32 vfta, index; 4952 4953 if ((hw->mng_cookie.status & 4954 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 4955 (vid == adapter->mng_vlan_id)) 4956 return 0; 4957 4958 if (!e1000_vlan_used(adapter)) 4959 e1000_vlan_filter_on_off(adapter, true); 4960 4961 /* add VID to filter table */ 4962 index = (vid >> 5) & 0x7F; 4963 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); 4964 vfta |= (1 << (vid & 0x1F)); 4965 e1000_write_vfta(hw, index, vfta); 4966 4967 set_bit(vid, adapter->active_vlans); 4968 4969 return 0; 4970 } 4971 4972 static int e1000_vlan_rx_kill_vid(struct net_device *netdev, 4973 __be16 proto, u16 vid) 4974 { 4975 struct e1000_adapter *adapter = netdev_priv(netdev); 4976 struct e1000_hw *hw = &adapter->hw; 4977 u32 vfta, index; 4978 4979 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4980 e1000_irq_disable(adapter); 4981 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4982 e1000_irq_enable(adapter); 4983 4984 /* remove VID from filter table */ 4985 index = (vid >> 5) & 0x7F; 4986 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); 4987 vfta &= ~(1 << (vid & 0x1F)); 4988 e1000_write_vfta(hw, index, vfta); 4989 4990 clear_bit(vid, adapter->active_vlans); 4991 4992 if (!e1000_vlan_used(adapter)) 4993 e1000_vlan_filter_on_off(adapter, false); 4994 4995 return 0; 4996 } 4997 4998 static void e1000_restore_vlan(struct e1000_adapter *adapter) 4999 { 5000 u16 vid; 5001 5002 if (!e1000_vlan_used(adapter)) 5003 return; 5004 5005 e1000_vlan_filter_on_off(adapter, true); 5006 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 5007 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); 5008 } 5009 5010 int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx) 5011 { 5012 struct e1000_hw *hw = &adapter->hw; 5013 5014 hw->autoneg = 0; 5015 5016 /* Make sure dplx is at most 1 bit and lsb of speed is not set 5017 * for the switch() below to work 5018 */ 5019 if ((spd & 1) || (dplx & ~1)) 5020 goto err_inval; 5021 5022 /* Fiber NICs only allow 1000 gbps Full duplex */ 5023 if ((hw->media_type == e1000_media_type_fiber) && 5024 spd != SPEED_1000 && 5025 dplx != DUPLEX_FULL) 5026 goto err_inval; 5027 5028 switch (spd + dplx) { 5029 case SPEED_10 + DUPLEX_HALF: 5030 hw->forced_speed_duplex = e1000_10_half; 5031 break; 5032 case SPEED_10 + DUPLEX_FULL: 5033 hw->forced_speed_duplex = e1000_10_full; 5034 break; 5035 case SPEED_100 + DUPLEX_HALF: 5036 hw->forced_speed_duplex = e1000_100_half; 5037 break; 5038 case SPEED_100 + DUPLEX_FULL: 5039 hw->forced_speed_duplex = e1000_100_full; 5040 break; 5041 case SPEED_1000 + DUPLEX_FULL: 5042 hw->autoneg = 1; 5043 hw->autoneg_advertised = ADVERTISE_1000_FULL; 5044 break; 5045 case SPEED_1000 + DUPLEX_HALF: /* not supported */ 5046 default: 5047 goto err_inval; 5048 } 5049 5050 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ 5051 hw->mdix = AUTO_ALL_MODES; 5052 5053 return 0; 5054 5055 err_inval: 5056 e_err(probe, "Unsupported Speed/Duplex configuration\n"); 5057 return -EINVAL; 5058 } 5059 5060 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) 5061 { 5062 struct net_device *netdev = pci_get_drvdata(pdev); 5063 struct e1000_adapter *adapter = netdev_priv(netdev); 5064 struct e1000_hw *hw = &adapter->hw; 5065 u32 ctrl, ctrl_ext, rctl, status; 5066 u32 wufc = adapter->wol; 5067 5068 netif_device_detach(netdev); 5069 5070 if (netif_running(netdev)) { 5071 int count = E1000_CHECK_RESET_COUNT; 5072 5073 while (test_bit(__E1000_RESETTING, &adapter->flags) && count--) 5074 usleep_range(10000, 20000); 5075 5076 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); 5077 e1000_down(adapter); 5078 } 5079 5080 status = er32(STATUS); 5081 if (status & E1000_STATUS_LU) 5082 wufc &= ~E1000_WUFC_LNKC; 5083 5084 if (wufc) { 5085 e1000_setup_rctl(adapter); 5086 e1000_set_rx_mode(netdev); 5087 5088 rctl = er32(RCTL); 5089 5090 /* turn on all-multi mode if wake on multicast is enabled */ 5091 if (wufc & E1000_WUFC_MC) 5092 rctl |= E1000_RCTL_MPE; 5093 5094 /* enable receives in the hardware */ 5095 ew32(RCTL, rctl | E1000_RCTL_EN); 5096 5097 if (hw->mac_type >= e1000_82540) { 5098 ctrl = er32(CTRL); 5099 /* advertise wake from D3Cold */ 5100 #define E1000_CTRL_ADVD3WUC 0x00100000 5101 /* phy power management enable */ 5102 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 5103 ctrl |= E1000_CTRL_ADVD3WUC | 5104 E1000_CTRL_EN_PHY_PWR_MGMT; 5105 ew32(CTRL, ctrl); 5106 } 5107 5108 if (hw->media_type == e1000_media_type_fiber || 5109 hw->media_type == e1000_media_type_internal_serdes) { 5110 /* keep the laser running in D3 */ 5111 ctrl_ext = er32(CTRL_EXT); 5112 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; 5113 ew32(CTRL_EXT, ctrl_ext); 5114 } 5115 5116 ew32(WUC, E1000_WUC_PME_EN); 5117 ew32(WUFC, wufc); 5118 } else { 5119 ew32(WUC, 0); 5120 ew32(WUFC, 0); 5121 } 5122 5123 e1000_release_manageability(adapter); 5124 5125 *enable_wake = !!wufc; 5126 5127 /* make sure adapter isn't asleep if manageability is enabled */ 5128 if (adapter->en_mng_pt) 5129 *enable_wake = true; 5130 5131 if (netif_running(netdev)) 5132 e1000_free_irq(adapter); 5133 5134 if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags)) 5135 pci_disable_device(pdev); 5136 5137 return 0; 5138 } 5139 5140 static int e1000_suspend(struct device *dev) 5141 { 5142 int retval; 5143 struct pci_dev *pdev = to_pci_dev(dev); 5144 bool wake; 5145 5146 retval = __e1000_shutdown(pdev, &wake); 5147 device_set_wakeup_enable(dev, wake); 5148 5149 return retval; 5150 } 5151 5152 static int e1000_resume(struct device *dev) 5153 { 5154 struct pci_dev *pdev = to_pci_dev(dev); 5155 struct net_device *netdev = pci_get_drvdata(pdev); 5156 struct e1000_adapter *adapter = netdev_priv(netdev); 5157 struct e1000_hw *hw = &adapter->hw; 5158 u32 err; 5159 5160 if (adapter->need_ioport) 5161 err = pci_enable_device(pdev); 5162 else 5163 err = pci_enable_device_mem(pdev); 5164 if (err) { 5165 pr_err("Cannot enable PCI device from suspend\n"); 5166 return err; 5167 } 5168 5169 /* flush memory to make sure state is correct */ 5170 smp_mb__before_atomic(); 5171 clear_bit(__E1000_DISABLED, &adapter->flags); 5172 pci_set_master(pdev); 5173 5174 pci_enable_wake(pdev, PCI_D3hot, 0); 5175 pci_enable_wake(pdev, PCI_D3cold, 0); 5176 5177 if (netif_running(netdev)) { 5178 err = e1000_request_irq(adapter); 5179 if (err) 5180 return err; 5181 } 5182 5183 e1000_power_up_phy(adapter); 5184 e1000_reset(adapter); 5185 ew32(WUS, ~0); 5186 5187 e1000_init_manageability(adapter); 5188 5189 if (netif_running(netdev)) 5190 e1000_up(adapter); 5191 5192 netif_device_attach(netdev); 5193 5194 return 0; 5195 } 5196 5197 static void e1000_shutdown(struct pci_dev *pdev) 5198 { 5199 bool wake; 5200 5201 __e1000_shutdown(pdev, &wake); 5202 5203 if (system_state == SYSTEM_POWER_OFF) { 5204 pci_wake_from_d3(pdev, wake); 5205 pci_set_power_state(pdev, PCI_D3hot); 5206 } 5207 } 5208 5209 #ifdef CONFIG_NET_POLL_CONTROLLER 5210 /* Polling 'interrupt' - used by things like netconsole to send skbs 5211 * without having to re-enable interrupts. It's not called while 5212 * the interrupt routine is executing. 5213 */ 5214 static void e1000_netpoll(struct net_device *netdev) 5215 { 5216 struct e1000_adapter *adapter = netdev_priv(netdev); 5217 5218 if (disable_hardirq(adapter->pdev->irq)) 5219 e1000_intr(adapter->pdev->irq, netdev); 5220 enable_irq(adapter->pdev->irq); 5221 } 5222 #endif 5223 5224 /** 5225 * e1000_io_error_detected - called when PCI error is detected 5226 * @pdev: Pointer to PCI device 5227 * @state: The current pci connection state 5228 * 5229 * This function is called after a PCI bus error affecting 5230 * this device has been detected. 5231 */ 5232 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, 5233 pci_channel_state_t state) 5234 { 5235 struct net_device *netdev = pci_get_drvdata(pdev); 5236 struct e1000_adapter *adapter = netdev_priv(netdev); 5237 5238 netif_device_detach(netdev); 5239 5240 if (state == pci_channel_io_perm_failure) 5241 return PCI_ERS_RESULT_DISCONNECT; 5242 5243 if (netif_running(netdev)) 5244 e1000_down(adapter); 5245 5246 if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags)) 5247 pci_disable_device(pdev); 5248 5249 /* Request a slot reset. */ 5250 return PCI_ERS_RESULT_NEED_RESET; 5251 } 5252 5253 /** 5254 * e1000_io_slot_reset - called after the pci bus has been reset. 5255 * @pdev: Pointer to PCI device 5256 * 5257 * Restart the card from scratch, as if from a cold-boot. Implementation 5258 * resembles the first-half of the e1000_resume routine. 5259 */ 5260 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) 5261 { 5262 struct net_device *netdev = pci_get_drvdata(pdev); 5263 struct e1000_adapter *adapter = netdev_priv(netdev); 5264 struct e1000_hw *hw = &adapter->hw; 5265 int err; 5266 5267 if (adapter->need_ioport) 5268 err = pci_enable_device(pdev); 5269 else 5270 err = pci_enable_device_mem(pdev); 5271 if (err) { 5272 pr_err("Cannot re-enable PCI device after reset.\n"); 5273 return PCI_ERS_RESULT_DISCONNECT; 5274 } 5275 5276 /* flush memory to make sure state is correct */ 5277 smp_mb__before_atomic(); 5278 clear_bit(__E1000_DISABLED, &adapter->flags); 5279 pci_set_master(pdev); 5280 5281 pci_enable_wake(pdev, PCI_D3hot, 0); 5282 pci_enable_wake(pdev, PCI_D3cold, 0); 5283 5284 e1000_reset(adapter); 5285 ew32(WUS, ~0); 5286 5287 return PCI_ERS_RESULT_RECOVERED; 5288 } 5289 5290 /** 5291 * e1000_io_resume - called when traffic can start flowing again. 5292 * @pdev: Pointer to PCI device 5293 * 5294 * This callback is called when the error recovery driver tells us that 5295 * its OK to resume normal operation. Implementation resembles the 5296 * second-half of the e1000_resume routine. 5297 */ 5298 static void e1000_io_resume(struct pci_dev *pdev) 5299 { 5300 struct net_device *netdev = pci_get_drvdata(pdev); 5301 struct e1000_adapter *adapter = netdev_priv(netdev); 5302 5303 e1000_init_manageability(adapter); 5304 5305 if (netif_running(netdev)) { 5306 if (e1000_up(adapter)) { 5307 pr_info("can't bring device back up after reset\n"); 5308 return; 5309 } 5310 } 5311 5312 netif_device_attach(netdev); 5313 } 5314 5315 /* e1000_main.c */ 5316