1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 1999 - 2006 Intel Corporation. */ 3 4 #include "e1000.h" 5 #include <net/ip6_checksum.h> 6 #include <linux/io.h> 7 #include <linux/prefetch.h> 8 #include <linux/bitops.h> 9 #include <linux/if_vlan.h> 10 11 char e1000_driver_name[] = "e1000"; 12 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; 13 static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; 14 15 /* e1000_pci_tbl - PCI Device ID Table 16 * 17 * Last entry must be all 0s 18 * 19 * Macro expands to... 20 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} 21 */ 22 static const struct pci_device_id e1000_pci_tbl[] = { 23 INTEL_E1000_ETHERNET_DEVICE(0x1000), 24 INTEL_E1000_ETHERNET_DEVICE(0x1001), 25 INTEL_E1000_ETHERNET_DEVICE(0x1004), 26 INTEL_E1000_ETHERNET_DEVICE(0x1008), 27 INTEL_E1000_ETHERNET_DEVICE(0x1009), 28 INTEL_E1000_ETHERNET_DEVICE(0x100C), 29 INTEL_E1000_ETHERNET_DEVICE(0x100D), 30 INTEL_E1000_ETHERNET_DEVICE(0x100E), 31 INTEL_E1000_ETHERNET_DEVICE(0x100F), 32 INTEL_E1000_ETHERNET_DEVICE(0x1010), 33 INTEL_E1000_ETHERNET_DEVICE(0x1011), 34 INTEL_E1000_ETHERNET_DEVICE(0x1012), 35 INTEL_E1000_ETHERNET_DEVICE(0x1013), 36 INTEL_E1000_ETHERNET_DEVICE(0x1014), 37 INTEL_E1000_ETHERNET_DEVICE(0x1015), 38 INTEL_E1000_ETHERNET_DEVICE(0x1016), 39 INTEL_E1000_ETHERNET_DEVICE(0x1017), 40 INTEL_E1000_ETHERNET_DEVICE(0x1018), 41 INTEL_E1000_ETHERNET_DEVICE(0x1019), 42 INTEL_E1000_ETHERNET_DEVICE(0x101A), 43 INTEL_E1000_ETHERNET_DEVICE(0x101D), 44 INTEL_E1000_ETHERNET_DEVICE(0x101E), 45 INTEL_E1000_ETHERNET_DEVICE(0x1026), 46 INTEL_E1000_ETHERNET_DEVICE(0x1027), 47 INTEL_E1000_ETHERNET_DEVICE(0x1028), 48 INTEL_E1000_ETHERNET_DEVICE(0x1075), 49 INTEL_E1000_ETHERNET_DEVICE(0x1076), 50 INTEL_E1000_ETHERNET_DEVICE(0x1077), 51 INTEL_E1000_ETHERNET_DEVICE(0x1078), 52 INTEL_E1000_ETHERNET_DEVICE(0x1079), 53 INTEL_E1000_ETHERNET_DEVICE(0x107A), 54 INTEL_E1000_ETHERNET_DEVICE(0x107B), 55 INTEL_E1000_ETHERNET_DEVICE(0x107C), 56 INTEL_E1000_ETHERNET_DEVICE(0x108A), 57 INTEL_E1000_ETHERNET_DEVICE(0x1099), 58 INTEL_E1000_ETHERNET_DEVICE(0x10B5), 59 INTEL_E1000_ETHERNET_DEVICE(0x2E6E), 60 /* required last entry */ 61 {0,} 62 }; 63 64 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); 65 66 int e1000_up(struct e1000_adapter *adapter); 67 void e1000_down(struct e1000_adapter *adapter); 68 void e1000_reinit_locked(struct e1000_adapter *adapter); 69 void e1000_reset(struct e1000_adapter *adapter); 70 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); 71 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); 72 void e1000_free_all_tx_resources(struct e1000_adapter *adapter); 73 void e1000_free_all_rx_resources(struct e1000_adapter *adapter); 74 static int e1000_setup_tx_resources(struct e1000_adapter *adapter, 75 struct e1000_tx_ring *txdr); 76 static int e1000_setup_rx_resources(struct e1000_adapter *adapter, 77 struct e1000_rx_ring *rxdr); 78 static void e1000_free_tx_resources(struct e1000_adapter *adapter, 79 struct e1000_tx_ring *tx_ring); 80 static void e1000_free_rx_resources(struct e1000_adapter *adapter, 81 struct e1000_rx_ring *rx_ring); 82 void e1000_update_stats(struct e1000_adapter *adapter); 83 84 static int e1000_init_module(void); 85 static void e1000_exit_module(void); 86 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent); 87 static void e1000_remove(struct pci_dev *pdev); 88 static int e1000_alloc_queues(struct e1000_adapter *adapter); 89 static int e1000_sw_init(struct e1000_adapter *adapter); 90 int e1000_open(struct net_device *netdev); 91 int e1000_close(struct net_device *netdev); 92 static void e1000_configure_tx(struct e1000_adapter *adapter); 93 static void e1000_configure_rx(struct e1000_adapter *adapter); 94 static void e1000_setup_rctl(struct e1000_adapter *adapter); 95 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter); 96 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter); 97 static void e1000_clean_tx_ring(struct e1000_adapter *adapter, 98 struct e1000_tx_ring *tx_ring); 99 static void e1000_clean_rx_ring(struct e1000_adapter *adapter, 100 struct e1000_rx_ring *rx_ring); 101 static void e1000_set_rx_mode(struct net_device *netdev); 102 static void e1000_update_phy_info_task(struct work_struct *work); 103 static void e1000_watchdog(struct work_struct *work); 104 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work); 105 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, 106 struct net_device *netdev); 107 static int e1000_change_mtu(struct net_device *netdev, int new_mtu); 108 static int e1000_set_mac(struct net_device *netdev, void *p); 109 static irqreturn_t e1000_intr(int irq, void *data); 110 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, 111 struct e1000_tx_ring *tx_ring); 112 static int e1000_clean(struct napi_struct *napi, int budget); 113 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, 114 struct e1000_rx_ring *rx_ring, 115 int *work_done, int work_to_do); 116 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, 117 struct e1000_rx_ring *rx_ring, 118 int *work_done, int work_to_do); 119 static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter, 120 struct e1000_rx_ring *rx_ring, 121 int cleaned_count) 122 { 123 } 124 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, 125 struct e1000_rx_ring *rx_ring, 126 int cleaned_count); 127 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, 128 struct e1000_rx_ring *rx_ring, 129 int cleaned_count); 130 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); 131 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, 132 int cmd); 133 static void e1000_enter_82542_rst(struct e1000_adapter *adapter); 134 static void e1000_leave_82542_rst(struct e1000_adapter *adapter); 135 static void e1000_tx_timeout(struct net_device *dev, unsigned int txqueue); 136 static void e1000_reset_task(struct work_struct *work); 137 static void e1000_smartspeed(struct e1000_adapter *adapter); 138 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, 139 struct sk_buff *skb); 140 141 static bool e1000_vlan_used(struct e1000_adapter *adapter); 142 static void e1000_vlan_mode(struct net_device *netdev, 143 netdev_features_t features); 144 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter, 145 bool filter_on); 146 static int e1000_vlan_rx_add_vid(struct net_device *netdev, 147 __be16 proto, u16 vid); 148 static int e1000_vlan_rx_kill_vid(struct net_device *netdev, 149 __be16 proto, u16 vid); 150 static void e1000_restore_vlan(struct e1000_adapter *adapter); 151 152 static int e1000_suspend(struct device *dev); 153 static int e1000_resume(struct device *dev); 154 static void e1000_shutdown(struct pci_dev *pdev); 155 156 #ifdef CONFIG_NET_POLL_CONTROLLER 157 /* for netdump / net console */ 158 static void e1000_netpoll (struct net_device *netdev); 159 #endif 160 161 #define COPYBREAK_DEFAULT 256 162 static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT; 163 module_param(copybreak, uint, 0644); 164 MODULE_PARM_DESC(copybreak, 165 "Maximum size of packet that is copied to a new buffer on receive"); 166 167 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, 168 pci_channel_state_t state); 169 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev); 170 static void e1000_io_resume(struct pci_dev *pdev); 171 172 static const struct pci_error_handlers e1000_err_handler = { 173 .error_detected = e1000_io_error_detected, 174 .slot_reset = e1000_io_slot_reset, 175 .resume = e1000_io_resume, 176 }; 177 178 static DEFINE_SIMPLE_DEV_PM_OPS(e1000_pm_ops, e1000_suspend, e1000_resume); 179 180 static struct pci_driver e1000_driver = { 181 .name = e1000_driver_name, 182 .id_table = e1000_pci_tbl, 183 .probe = e1000_probe, 184 .remove = e1000_remove, 185 .driver.pm = pm_sleep_ptr(&e1000_pm_ops), 186 .shutdown = e1000_shutdown, 187 .err_handler = &e1000_err_handler 188 }; 189 190 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); 191 MODULE_LICENSE("GPL v2"); 192 193 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) 194 static int debug = -1; 195 module_param(debug, int, 0); 196 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 197 198 /** 199 * e1000_get_hw_dev - helper function for getting netdev 200 * @hw: pointer to HW struct 201 * 202 * return device used by hardware layer to print debugging information 203 * 204 **/ 205 struct net_device *e1000_get_hw_dev(struct e1000_hw *hw) 206 { 207 struct e1000_adapter *adapter = hw->back; 208 return adapter->netdev; 209 } 210 211 /** 212 * e1000_init_module - Driver Registration Routine 213 * 214 * e1000_init_module is the first routine called when the driver is 215 * loaded. All it does is register with the PCI subsystem. 216 **/ 217 static int __init e1000_init_module(void) 218 { 219 int ret; 220 pr_info("%s\n", e1000_driver_string); 221 222 pr_info("%s\n", e1000_copyright); 223 224 ret = pci_register_driver(&e1000_driver); 225 if (copybreak != COPYBREAK_DEFAULT) { 226 if (copybreak == 0) 227 pr_info("copybreak disabled\n"); 228 else 229 pr_info("copybreak enabled for " 230 "packets <= %u bytes\n", copybreak); 231 } 232 return ret; 233 } 234 235 module_init(e1000_init_module); 236 237 /** 238 * e1000_exit_module - Driver Exit Cleanup Routine 239 * 240 * e1000_exit_module is called just before the driver is removed 241 * from memory. 242 **/ 243 static void __exit e1000_exit_module(void) 244 { 245 pci_unregister_driver(&e1000_driver); 246 } 247 248 module_exit(e1000_exit_module); 249 250 static int e1000_request_irq(struct e1000_adapter *adapter) 251 { 252 struct net_device *netdev = adapter->netdev; 253 irq_handler_t handler = e1000_intr; 254 int irq_flags = IRQF_SHARED; 255 int err; 256 257 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, 258 netdev); 259 if (err) { 260 e_err(probe, "Unable to allocate interrupt Error: %d\n", err); 261 } 262 263 return err; 264 } 265 266 static void e1000_free_irq(struct e1000_adapter *adapter) 267 { 268 struct net_device *netdev = adapter->netdev; 269 270 free_irq(adapter->pdev->irq, netdev); 271 } 272 273 /** 274 * e1000_irq_disable - Mask off interrupt generation on the NIC 275 * @adapter: board private structure 276 **/ 277 static void e1000_irq_disable(struct e1000_adapter *adapter) 278 { 279 struct e1000_hw *hw = &adapter->hw; 280 281 ew32(IMC, ~0); 282 E1000_WRITE_FLUSH(); 283 synchronize_irq(adapter->pdev->irq); 284 } 285 286 /** 287 * e1000_irq_enable - Enable default interrupt generation settings 288 * @adapter: board private structure 289 **/ 290 static void e1000_irq_enable(struct e1000_adapter *adapter) 291 { 292 struct e1000_hw *hw = &adapter->hw; 293 294 ew32(IMS, IMS_ENABLE_MASK); 295 E1000_WRITE_FLUSH(); 296 } 297 298 static void e1000_update_mng_vlan(struct e1000_adapter *adapter) 299 { 300 struct e1000_hw *hw = &adapter->hw; 301 struct net_device *netdev = adapter->netdev; 302 u16 vid = hw->mng_cookie.vlan_id; 303 u16 old_vid = adapter->mng_vlan_id; 304 305 if (!e1000_vlan_used(adapter)) 306 return; 307 308 if (!test_bit(vid, adapter->active_vlans)) { 309 if (hw->mng_cookie.status & 310 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { 311 e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid); 312 adapter->mng_vlan_id = vid; 313 } else { 314 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 315 } 316 if (old_vid != E1000_MNG_VLAN_NONE && vid != old_vid && 317 !test_bit(old_vid, adapter->active_vlans)) 318 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), 319 old_vid); 320 } else { 321 adapter->mng_vlan_id = vid; 322 } 323 } 324 325 static void e1000_init_manageability(struct e1000_adapter *adapter) 326 { 327 struct e1000_hw *hw = &adapter->hw; 328 329 if (adapter->en_mng_pt) { 330 u32 manc = er32(MANC); 331 332 /* disable hardware interception of ARP */ 333 manc &= ~(E1000_MANC_ARP_EN); 334 335 ew32(MANC, manc); 336 } 337 } 338 339 static void e1000_release_manageability(struct e1000_adapter *adapter) 340 { 341 struct e1000_hw *hw = &adapter->hw; 342 343 if (adapter->en_mng_pt) { 344 u32 manc = er32(MANC); 345 346 /* re-enable hardware interception of ARP */ 347 manc |= E1000_MANC_ARP_EN; 348 349 ew32(MANC, manc); 350 } 351 } 352 353 /** 354 * e1000_configure - configure the hardware for RX and TX 355 * @adapter: private board structure 356 **/ 357 static void e1000_configure(struct e1000_adapter *adapter) 358 { 359 struct net_device *netdev = adapter->netdev; 360 int i; 361 362 e1000_set_rx_mode(netdev); 363 364 e1000_restore_vlan(adapter); 365 e1000_init_manageability(adapter); 366 367 e1000_configure_tx(adapter); 368 e1000_setup_rctl(adapter); 369 e1000_configure_rx(adapter); 370 /* call E1000_DESC_UNUSED which always leaves 371 * at least 1 descriptor unused to make sure 372 * next_to_use != next_to_clean 373 */ 374 for (i = 0; i < adapter->num_rx_queues; i++) { 375 struct e1000_rx_ring *ring = &adapter->rx_ring[i]; 376 adapter->alloc_rx_buf(adapter, ring, 377 E1000_DESC_UNUSED(ring)); 378 } 379 } 380 381 int e1000_up(struct e1000_adapter *adapter) 382 { 383 struct e1000_hw *hw = &adapter->hw; 384 385 /* hardware has been reset, we need to reload some things */ 386 e1000_configure(adapter); 387 388 clear_bit(__E1000_DOWN, &adapter->flags); 389 390 napi_enable(&adapter->napi); 391 392 e1000_irq_enable(adapter); 393 394 netif_wake_queue(adapter->netdev); 395 396 /* fire a link change interrupt to start the watchdog */ 397 ew32(ICS, E1000_ICS_LSC); 398 return 0; 399 } 400 401 /** 402 * e1000_power_up_phy - restore link in case the phy was powered down 403 * @adapter: address of board private structure 404 * 405 * The phy may be powered down to save power and turn off link when the 406 * driver is unloaded and wake on lan is not enabled (among others) 407 * *** this routine MUST be followed by a call to e1000_reset *** 408 **/ 409 void e1000_power_up_phy(struct e1000_adapter *adapter) 410 { 411 struct e1000_hw *hw = &adapter->hw; 412 u16 mii_reg = 0; 413 414 /* Just clear the power down bit to wake the phy back up */ 415 if (hw->media_type == e1000_media_type_copper) { 416 /* according to the manual, the phy will retain its 417 * settings across a power-down/up cycle 418 */ 419 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); 420 mii_reg &= ~MII_CR_POWER_DOWN; 421 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); 422 } 423 } 424 425 static void e1000_power_down_phy(struct e1000_adapter *adapter) 426 { 427 struct e1000_hw *hw = &adapter->hw; 428 429 /* Power down the PHY so no link is implied when interface is down * 430 * The PHY cannot be powered down if any of the following is true * 431 * (a) WoL is enabled 432 * (b) AMT is active 433 * (c) SoL/IDER session is active 434 */ 435 if (!adapter->wol && hw->mac_type >= e1000_82540 && 436 hw->media_type == e1000_media_type_copper) { 437 u16 mii_reg = 0; 438 439 switch (hw->mac_type) { 440 case e1000_82540: 441 case e1000_82545: 442 case e1000_82545_rev_3: 443 case e1000_82546: 444 case e1000_ce4100: 445 case e1000_82546_rev_3: 446 case e1000_82541: 447 case e1000_82541_rev_2: 448 case e1000_82547: 449 case e1000_82547_rev_2: 450 if (er32(MANC) & E1000_MANC_SMBUS_EN) 451 goto out; 452 break; 453 default: 454 goto out; 455 } 456 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); 457 mii_reg |= MII_CR_POWER_DOWN; 458 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); 459 msleep(1); 460 } 461 out: 462 return; 463 } 464 465 static void e1000_down_and_stop(struct e1000_adapter *adapter) 466 { 467 set_bit(__E1000_DOWN, &adapter->flags); 468 469 cancel_delayed_work_sync(&adapter->watchdog_task); 470 471 /* 472 * Since the watchdog task can reschedule other tasks, we should cancel 473 * it first, otherwise we can run into the situation when a work is 474 * still running after the adapter has been turned down. 475 */ 476 477 cancel_delayed_work_sync(&adapter->phy_info_task); 478 cancel_delayed_work_sync(&adapter->fifo_stall_task); 479 } 480 481 void e1000_down(struct e1000_adapter *adapter) 482 { 483 struct e1000_hw *hw = &adapter->hw; 484 struct net_device *netdev = adapter->netdev; 485 u32 rctl, tctl; 486 487 /* disable receives in the hardware */ 488 rctl = er32(RCTL); 489 ew32(RCTL, rctl & ~E1000_RCTL_EN); 490 /* flush and sleep below */ 491 492 netif_tx_disable(netdev); 493 494 /* disable transmits in the hardware */ 495 tctl = er32(TCTL); 496 tctl &= ~E1000_TCTL_EN; 497 ew32(TCTL, tctl); 498 /* flush both disables and wait for them to finish */ 499 E1000_WRITE_FLUSH(); 500 msleep(10); 501 502 /* Set the carrier off after transmits have been disabled in the 503 * hardware, to avoid race conditions with e1000_watchdog() (which 504 * may be running concurrently to us, checking for the carrier 505 * bit to decide whether it should enable transmits again). Such 506 * a race condition would result into transmission being disabled 507 * in the hardware until the next IFF_DOWN+IFF_UP cycle. 508 */ 509 netif_carrier_off(netdev); 510 511 netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_RX, NULL); 512 netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_TX, NULL); 513 napi_disable(&adapter->napi); 514 515 e1000_irq_disable(adapter); 516 517 /* Setting DOWN must be after irq_disable to prevent 518 * a screaming interrupt. Setting DOWN also prevents 519 * tasks from rescheduling. 520 */ 521 e1000_down_and_stop(adapter); 522 523 adapter->link_speed = 0; 524 adapter->link_duplex = 0; 525 526 e1000_reset(adapter); 527 e1000_clean_all_tx_rings(adapter); 528 e1000_clean_all_rx_rings(adapter); 529 } 530 531 void e1000_reinit_locked(struct e1000_adapter *adapter) 532 { 533 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) 534 msleep(1); 535 536 /* only run the task if not already down */ 537 if (!test_bit(__E1000_DOWN, &adapter->flags)) { 538 e1000_down(adapter); 539 e1000_up(adapter); 540 } 541 542 clear_bit(__E1000_RESETTING, &adapter->flags); 543 } 544 545 void e1000_reset(struct e1000_adapter *adapter) 546 { 547 struct e1000_hw *hw = &adapter->hw; 548 u32 pba = 0, tx_space, min_tx_space, min_rx_space; 549 bool legacy_pba_adjust = false; 550 u16 hwm; 551 552 /* Repartition Pba for greater than 9k mtu 553 * To take effect CTRL.RST is required. 554 */ 555 556 switch (hw->mac_type) { 557 case e1000_82542_rev2_0: 558 case e1000_82542_rev2_1: 559 case e1000_82543: 560 case e1000_82544: 561 case e1000_82540: 562 case e1000_82541: 563 case e1000_82541_rev_2: 564 legacy_pba_adjust = true; 565 pba = E1000_PBA_48K; 566 break; 567 case e1000_82545: 568 case e1000_82545_rev_3: 569 case e1000_82546: 570 case e1000_ce4100: 571 case e1000_82546_rev_3: 572 pba = E1000_PBA_48K; 573 break; 574 case e1000_82547: 575 case e1000_82547_rev_2: 576 legacy_pba_adjust = true; 577 pba = E1000_PBA_30K; 578 break; 579 case e1000_undefined: 580 case e1000_num_macs: 581 break; 582 } 583 584 if (legacy_pba_adjust) { 585 if (hw->max_frame_size > E1000_RXBUFFER_8192) 586 pba -= 8; /* allocate more FIFO for Tx */ 587 588 if (hw->mac_type == e1000_82547) { 589 adapter->tx_fifo_head = 0; 590 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; 591 adapter->tx_fifo_size = 592 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT; 593 atomic_set(&adapter->tx_fifo_stall, 0); 594 } 595 } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) { 596 /* adjust PBA for jumbo frames */ 597 ew32(PBA, pba); 598 599 /* To maintain wire speed transmits, the Tx FIFO should be 600 * large enough to accommodate two full transmit packets, 601 * rounded up to the next 1KB and expressed in KB. Likewise, 602 * the Rx FIFO should be large enough to accommodate at least 603 * one full receive packet and is similarly rounded up and 604 * expressed in KB. 605 */ 606 pba = er32(PBA); 607 /* upper 16 bits has Tx packet buffer allocation size in KB */ 608 tx_space = pba >> 16; 609 /* lower 16 bits has Rx packet buffer allocation size in KB */ 610 pba &= 0xffff; 611 /* the Tx fifo also stores 16 bytes of information about the Tx 612 * but don't include ethernet FCS because hardware appends it 613 */ 614 min_tx_space = (hw->max_frame_size + 615 sizeof(struct e1000_tx_desc) - 616 ETH_FCS_LEN) * 2; 617 min_tx_space = ALIGN(min_tx_space, 1024); 618 min_tx_space >>= 10; 619 /* software strips receive CRC, so leave room for it */ 620 min_rx_space = hw->max_frame_size; 621 min_rx_space = ALIGN(min_rx_space, 1024); 622 min_rx_space >>= 10; 623 624 /* If current Tx allocation is less than the min Tx FIFO size, 625 * and the min Tx FIFO size is less than the current Rx FIFO 626 * allocation, take space away from current Rx allocation 627 */ 628 if (tx_space < min_tx_space && 629 ((min_tx_space - tx_space) < pba)) { 630 pba = pba - (min_tx_space - tx_space); 631 632 /* PCI/PCIx hardware has PBA alignment constraints */ 633 switch (hw->mac_type) { 634 case e1000_82545 ... e1000_82546_rev_3: 635 pba &= ~(E1000_PBA_8K - 1); 636 break; 637 default: 638 break; 639 } 640 641 /* if short on Rx space, Rx wins and must trump Tx 642 * adjustment or use Early Receive if available 643 */ 644 if (pba < min_rx_space) 645 pba = min_rx_space; 646 } 647 } 648 649 ew32(PBA, pba); 650 651 /* flow control settings: 652 * The high water mark must be low enough to fit one full frame 653 * (or the size used for early receive) above it in the Rx FIFO. 654 * Set it to the lower of: 655 * - 90% of the Rx FIFO size, and 656 * - the full Rx FIFO size minus the early receive size (for parts 657 * with ERT support assuming ERT set to E1000_ERT_2048), or 658 * - the full Rx FIFO size minus one full frame 659 */ 660 hwm = min(((pba << 10) * 9 / 10), 661 ((pba << 10) - hw->max_frame_size)); 662 663 hw->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */ 664 hw->fc_low_water = hw->fc_high_water - 8; 665 hw->fc_pause_time = E1000_FC_PAUSE_TIME; 666 hw->fc_send_xon = 1; 667 hw->fc = hw->original_fc; 668 669 /* Allow time for pending master requests to run */ 670 e1000_reset_hw(hw); 671 if (hw->mac_type >= e1000_82544) 672 ew32(WUC, 0); 673 674 if (e1000_init_hw(hw)) 675 e_dev_err("Hardware Error\n"); 676 e1000_update_mng_vlan(adapter); 677 678 /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */ 679 if (hw->mac_type >= e1000_82544 && 680 hw->autoneg == 1 && 681 hw->autoneg_advertised == ADVERTISE_1000_FULL) { 682 u32 ctrl = er32(CTRL); 683 /* clear phy power management bit if we are in gig only mode, 684 * which if enabled will attempt negotiation to 100Mb, which 685 * can cause a loss of link at power off or driver unload 686 */ 687 ctrl &= ~E1000_CTRL_SWDPIN3; 688 ew32(CTRL, ctrl); 689 } 690 691 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ 692 ew32(VET, ETHERNET_IEEE_VLAN_TYPE); 693 694 e1000_reset_adaptive(hw); 695 e1000_phy_get_info(hw, &adapter->phy_info); 696 697 e1000_release_manageability(adapter); 698 } 699 700 /* Dump the eeprom for users having checksum issues */ 701 static void e1000_dump_eeprom(struct e1000_adapter *adapter) 702 { 703 struct net_device *netdev = adapter->netdev; 704 struct ethtool_eeprom eeprom; 705 const struct ethtool_ops *ops = netdev->ethtool_ops; 706 u8 *data; 707 int i; 708 u16 csum_old, csum_new = 0; 709 710 eeprom.len = ops->get_eeprom_len(netdev); 711 eeprom.offset = 0; 712 713 data = kmalloc(eeprom.len, GFP_KERNEL); 714 if (!data) 715 return; 716 717 ops->get_eeprom(netdev, &eeprom, data); 718 719 csum_old = (data[EEPROM_CHECKSUM_REG * 2]) + 720 (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8); 721 for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2) 722 csum_new += data[i] + (data[i + 1] << 8); 723 csum_new = EEPROM_SUM - csum_new; 724 725 pr_err("/*********************/\n"); 726 pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old); 727 pr_err("Calculated : 0x%04x\n", csum_new); 728 729 pr_err("Offset Values\n"); 730 pr_err("======== ======\n"); 731 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0); 732 733 pr_err("Include this output when contacting your support provider.\n"); 734 pr_err("This is not a software error! Something bad happened to\n"); 735 pr_err("your hardware or EEPROM image. Ignoring this problem could\n"); 736 pr_err("result in further problems, possibly loss of data,\n"); 737 pr_err("corruption or system hangs!\n"); 738 pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n"); 739 pr_err("which is invalid and requires you to set the proper MAC\n"); 740 pr_err("address manually before continuing to enable this network\n"); 741 pr_err("device. Please inspect the EEPROM dump and report the\n"); 742 pr_err("issue to your hardware vendor or Intel Customer Support.\n"); 743 pr_err("/*********************/\n"); 744 745 kfree(data); 746 } 747 748 /** 749 * e1000_is_need_ioport - determine if an adapter needs ioport resources or not 750 * @pdev: PCI device information struct 751 * 752 * Return true if an adapter needs ioport resources 753 **/ 754 static int e1000_is_need_ioport(struct pci_dev *pdev) 755 { 756 switch (pdev->device) { 757 case E1000_DEV_ID_82540EM: 758 case E1000_DEV_ID_82540EM_LOM: 759 case E1000_DEV_ID_82540EP: 760 case E1000_DEV_ID_82540EP_LOM: 761 case E1000_DEV_ID_82540EP_LP: 762 case E1000_DEV_ID_82541EI: 763 case E1000_DEV_ID_82541EI_MOBILE: 764 case E1000_DEV_ID_82541ER: 765 case E1000_DEV_ID_82541ER_LOM: 766 case E1000_DEV_ID_82541GI: 767 case E1000_DEV_ID_82541GI_LF: 768 case E1000_DEV_ID_82541GI_MOBILE: 769 case E1000_DEV_ID_82544EI_COPPER: 770 case E1000_DEV_ID_82544EI_FIBER: 771 case E1000_DEV_ID_82544GC_COPPER: 772 case E1000_DEV_ID_82544GC_LOM: 773 case E1000_DEV_ID_82545EM_COPPER: 774 case E1000_DEV_ID_82545EM_FIBER: 775 case E1000_DEV_ID_82546EB_COPPER: 776 case E1000_DEV_ID_82546EB_FIBER: 777 case E1000_DEV_ID_82546EB_QUAD_COPPER: 778 return true; 779 default: 780 return false; 781 } 782 } 783 784 static netdev_features_t e1000_fix_features(struct net_device *netdev, 785 netdev_features_t features) 786 { 787 /* Since there is no support for separate Rx/Tx vlan accel 788 * enable/disable make sure Tx flag is always in same state as Rx. 789 */ 790 if (features & NETIF_F_HW_VLAN_CTAG_RX) 791 features |= NETIF_F_HW_VLAN_CTAG_TX; 792 else 793 features &= ~NETIF_F_HW_VLAN_CTAG_TX; 794 795 return features; 796 } 797 798 static int e1000_set_features(struct net_device *netdev, 799 netdev_features_t features) 800 { 801 struct e1000_adapter *adapter = netdev_priv(netdev); 802 netdev_features_t changed = features ^ netdev->features; 803 804 if (changed & NETIF_F_HW_VLAN_CTAG_RX) 805 e1000_vlan_mode(netdev, features); 806 807 if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL))) 808 return 0; 809 810 netdev->features = features; 811 adapter->rx_csum = !!(features & NETIF_F_RXCSUM); 812 813 if (netif_running(netdev)) 814 e1000_reinit_locked(adapter); 815 else 816 e1000_reset(adapter); 817 818 return 1; 819 } 820 821 static const struct net_device_ops e1000_netdev_ops = { 822 .ndo_open = e1000_open, 823 .ndo_stop = e1000_close, 824 .ndo_start_xmit = e1000_xmit_frame, 825 .ndo_set_rx_mode = e1000_set_rx_mode, 826 .ndo_set_mac_address = e1000_set_mac, 827 .ndo_tx_timeout = e1000_tx_timeout, 828 .ndo_change_mtu = e1000_change_mtu, 829 .ndo_eth_ioctl = e1000_ioctl, 830 .ndo_validate_addr = eth_validate_addr, 831 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid, 832 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid, 833 #ifdef CONFIG_NET_POLL_CONTROLLER 834 .ndo_poll_controller = e1000_netpoll, 835 #endif 836 .ndo_fix_features = e1000_fix_features, 837 .ndo_set_features = e1000_set_features, 838 }; 839 840 /** 841 * e1000_init_hw_struct - initialize members of hw struct 842 * @adapter: board private struct 843 * @hw: structure used by e1000_hw.c 844 * 845 * Factors out initialization of the e1000_hw struct to its own function 846 * that can be called very early at init (just after struct allocation). 847 * Fields are initialized based on PCI device information and 848 * OS network device settings (MTU size). 849 * Returns negative error codes if MAC type setup fails. 850 */ 851 static int e1000_init_hw_struct(struct e1000_adapter *adapter, 852 struct e1000_hw *hw) 853 { 854 struct pci_dev *pdev = adapter->pdev; 855 856 /* PCI config space info */ 857 hw->vendor_id = pdev->vendor; 858 hw->device_id = pdev->device; 859 hw->subsystem_vendor_id = pdev->subsystem_vendor; 860 hw->subsystem_id = pdev->subsystem_device; 861 hw->revision_id = pdev->revision; 862 863 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); 864 865 hw->max_frame_size = adapter->netdev->mtu + 866 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 867 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; 868 869 /* identify the MAC */ 870 if (e1000_set_mac_type(hw)) { 871 e_err(probe, "Unknown MAC Type\n"); 872 return -EIO; 873 } 874 875 switch (hw->mac_type) { 876 default: 877 break; 878 case e1000_82541: 879 case e1000_82547: 880 case e1000_82541_rev_2: 881 case e1000_82547_rev_2: 882 hw->phy_init_script = 1; 883 break; 884 } 885 886 e1000_set_media_type(hw); 887 e1000_get_bus_info(hw); 888 889 hw->wait_autoneg_complete = false; 890 hw->tbi_compatibility_en = true; 891 hw->adaptive_ifs = true; 892 893 /* Copper options */ 894 895 if (hw->media_type == e1000_media_type_copper) { 896 hw->mdix = AUTO_ALL_MODES; 897 hw->disable_polarity_correction = false; 898 hw->master_slave = E1000_MASTER_SLAVE; 899 } 900 901 return 0; 902 } 903 904 /** 905 * e1000_probe - Device Initialization Routine 906 * @pdev: PCI device information struct 907 * @ent: entry in e1000_pci_tbl 908 * 909 * Returns 0 on success, negative on failure 910 * 911 * e1000_probe initializes an adapter identified by a pci_dev structure. 912 * The OS initialization, configuring of the adapter private structure, 913 * and a hardware reset occur. 914 **/ 915 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 916 { 917 struct net_device *netdev; 918 struct e1000_adapter *adapter = NULL; 919 struct e1000_hw *hw; 920 921 static int cards_found; 922 static int global_quad_port_a; /* global ksp3 port a indication */ 923 int i, err, pci_using_dac; 924 u16 eeprom_data = 0; 925 u16 tmp = 0; 926 u16 eeprom_apme_mask = E1000_EEPROM_APME; 927 int bars, need_ioport; 928 bool disable_dev = false; 929 930 /* do not allocate ioport bars when not needed */ 931 need_ioport = e1000_is_need_ioport(pdev); 932 if (need_ioport) { 933 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); 934 err = pci_enable_device(pdev); 935 } else { 936 bars = pci_select_bars(pdev, IORESOURCE_MEM); 937 err = pci_enable_device_mem(pdev); 938 } 939 if (err) 940 return err; 941 942 err = pci_request_selected_regions(pdev, bars, e1000_driver_name); 943 if (err) 944 goto err_pci_reg; 945 946 pci_set_master(pdev); 947 err = pci_save_state(pdev); 948 if (err) 949 goto err_alloc_etherdev; 950 951 err = -ENOMEM; 952 netdev = alloc_etherdev(sizeof(struct e1000_adapter)); 953 if (!netdev) 954 goto err_alloc_etherdev; 955 956 SET_NETDEV_DEV(netdev, &pdev->dev); 957 958 pci_set_drvdata(pdev, netdev); 959 adapter = netdev_priv(netdev); 960 adapter->netdev = netdev; 961 adapter->pdev = pdev; 962 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 963 adapter->bars = bars; 964 adapter->need_ioport = need_ioport; 965 966 hw = &adapter->hw; 967 hw->back = adapter; 968 969 err = -EIO; 970 hw->hw_addr = pci_ioremap_bar(pdev, BAR_0); 971 if (!hw->hw_addr) 972 goto err_ioremap; 973 974 if (adapter->need_ioport) { 975 for (i = BAR_1; i < PCI_STD_NUM_BARS; i++) { 976 if (pci_resource_len(pdev, i) == 0) 977 continue; 978 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { 979 hw->io_base = pci_resource_start(pdev, i); 980 break; 981 } 982 } 983 } 984 985 /* make ready for any if (hw->...) below */ 986 err = e1000_init_hw_struct(adapter, hw); 987 if (err) 988 goto err_sw_init; 989 990 /* there is a workaround being applied below that limits 991 * 64-bit DMA addresses to 64-bit hardware. There are some 992 * 32-bit adapters that Tx hang when given 64-bit DMA addresses 993 */ 994 pci_using_dac = 0; 995 if ((hw->bus_type == e1000_bus_type_pcix) && 996 !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { 997 pci_using_dac = 1; 998 } else { 999 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 1000 if (err) { 1001 pr_err("No usable DMA config, aborting\n"); 1002 goto err_dma; 1003 } 1004 } 1005 1006 netdev->netdev_ops = &e1000_netdev_ops; 1007 e1000_set_ethtool_ops(netdev); 1008 netdev->watchdog_timeo = 5 * HZ; 1009 netif_napi_add(netdev, &adapter->napi, e1000_clean); 1010 1011 strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); 1012 1013 adapter->bd_number = cards_found; 1014 1015 /* setup the private structure */ 1016 1017 err = e1000_sw_init(adapter); 1018 if (err) 1019 goto err_sw_init; 1020 1021 err = -EIO; 1022 if (hw->mac_type == e1000_ce4100) { 1023 hw->ce4100_gbe_mdio_base_virt = 1024 ioremap(pci_resource_start(pdev, BAR_1), 1025 pci_resource_len(pdev, BAR_1)); 1026 1027 if (!hw->ce4100_gbe_mdio_base_virt) 1028 goto err_mdio_ioremap; 1029 } 1030 1031 if (hw->mac_type >= e1000_82543) { 1032 netdev->hw_features = NETIF_F_SG | 1033 NETIF_F_HW_CSUM | 1034 NETIF_F_HW_VLAN_CTAG_RX; 1035 netdev->features = NETIF_F_HW_VLAN_CTAG_TX | 1036 NETIF_F_HW_VLAN_CTAG_FILTER; 1037 } 1038 1039 if ((hw->mac_type >= e1000_82544) && 1040 (hw->mac_type != e1000_82547)) 1041 netdev->hw_features |= NETIF_F_TSO; 1042 1043 netdev->priv_flags |= IFF_SUPP_NOFCS; 1044 1045 netdev->features |= netdev->hw_features; 1046 netdev->hw_features |= (NETIF_F_RXCSUM | 1047 NETIF_F_RXALL | 1048 NETIF_F_RXFCS); 1049 1050 if (pci_using_dac) { 1051 netdev->features |= NETIF_F_HIGHDMA; 1052 netdev->vlan_features |= NETIF_F_HIGHDMA; 1053 } 1054 1055 netdev->vlan_features |= (NETIF_F_TSO | 1056 NETIF_F_HW_CSUM | 1057 NETIF_F_SG); 1058 1059 /* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */ 1060 if (hw->device_id != E1000_DEV_ID_82545EM_COPPER || 1061 hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE) 1062 netdev->priv_flags |= IFF_UNICAST_FLT; 1063 1064 /* MTU range: 46 - 16110 */ 1065 netdev->min_mtu = ETH_ZLEN - ETH_HLEN; 1066 netdev->max_mtu = MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); 1067 1068 adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw); 1069 1070 /* initialize eeprom parameters */ 1071 if (e1000_init_eeprom_params(hw)) { 1072 e_err(probe, "EEPROM initialization failed\n"); 1073 goto err_eeprom; 1074 } 1075 1076 /* before reading the EEPROM, reset the controller to 1077 * put the device in a known good starting state 1078 */ 1079 1080 e1000_reset_hw(hw); 1081 1082 /* make sure the EEPROM is good */ 1083 if (e1000_validate_eeprom_checksum(hw) < 0) { 1084 e_err(probe, "The EEPROM Checksum Is Not Valid\n"); 1085 e1000_dump_eeprom(adapter); 1086 /* set MAC address to all zeroes to invalidate and temporary 1087 * disable this device for the user. This blocks regular 1088 * traffic while still permitting ethtool ioctls from reaching 1089 * the hardware as well as allowing the user to run the 1090 * interface after manually setting a hw addr using 1091 * `ip set address` 1092 */ 1093 memset(hw->mac_addr, 0, netdev->addr_len); 1094 } else { 1095 /* copy the MAC address out of the EEPROM */ 1096 if (e1000_read_mac_addr(hw)) 1097 e_err(probe, "EEPROM Read Error\n"); 1098 } 1099 /* don't block initialization here due to bad MAC address */ 1100 eth_hw_addr_set(netdev, hw->mac_addr); 1101 1102 if (!is_valid_ether_addr(netdev->dev_addr)) 1103 e_err(probe, "Invalid MAC Address\n"); 1104 1105 1106 INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog); 1107 INIT_DELAYED_WORK(&adapter->fifo_stall_task, 1108 e1000_82547_tx_fifo_stall_task); 1109 INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task); 1110 INIT_WORK(&adapter->reset_task, e1000_reset_task); 1111 1112 e1000_check_options(adapter); 1113 1114 /* Initial Wake on LAN setting 1115 * If APM wake is enabled in the EEPROM, 1116 * enable the ACPI Magic Packet filter 1117 */ 1118 1119 switch (hw->mac_type) { 1120 case e1000_82542_rev2_0: 1121 case e1000_82542_rev2_1: 1122 case e1000_82543: 1123 break; 1124 case e1000_82544: 1125 e1000_read_eeprom(hw, 1126 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data); 1127 eeprom_apme_mask = E1000_EEPROM_82544_APM; 1128 break; 1129 case e1000_82546: 1130 case e1000_82546_rev_3: 1131 if (er32(STATUS) & E1000_STATUS_FUNC_1) { 1132 e1000_read_eeprom(hw, 1133 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 1134 break; 1135 } 1136 fallthrough; 1137 default: 1138 e1000_read_eeprom(hw, 1139 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 1140 break; 1141 } 1142 if (eeprom_data & eeprom_apme_mask) 1143 adapter->eeprom_wol |= E1000_WUFC_MAG; 1144 1145 /* now that we have the eeprom settings, apply the special cases 1146 * where the eeprom may be wrong or the board simply won't support 1147 * wake on lan on a particular port 1148 */ 1149 switch (pdev->device) { 1150 case E1000_DEV_ID_82546GB_PCIE: 1151 adapter->eeprom_wol = 0; 1152 break; 1153 case E1000_DEV_ID_82546EB_FIBER: 1154 case E1000_DEV_ID_82546GB_FIBER: 1155 /* Wake events only supported on port A for dual fiber 1156 * regardless of eeprom setting 1157 */ 1158 if (er32(STATUS) & E1000_STATUS_FUNC_1) 1159 adapter->eeprom_wol = 0; 1160 break; 1161 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: 1162 /* if quad port adapter, disable WoL on all but port A */ 1163 if (global_quad_port_a != 0) 1164 adapter->eeprom_wol = 0; 1165 else 1166 adapter->quad_port_a = true; 1167 /* Reset for multiple quad port adapters */ 1168 if (++global_quad_port_a == 4) 1169 global_quad_port_a = 0; 1170 break; 1171 } 1172 1173 /* initialize the wol settings based on the eeprom settings */ 1174 adapter->wol = adapter->eeprom_wol; 1175 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 1176 1177 /* Auto detect PHY address */ 1178 if (hw->mac_type == e1000_ce4100) { 1179 for (i = 0; i < 32; i++) { 1180 hw->phy_addr = i; 1181 e1000_read_phy_reg(hw, PHY_ID2, &tmp); 1182 1183 if (tmp != 0 && tmp != 0xFF) 1184 break; 1185 } 1186 1187 if (i >= 32) 1188 goto err_eeprom; 1189 } 1190 1191 /* reset the hardware with the new settings */ 1192 e1000_reset(adapter); 1193 1194 strcpy(netdev->name, "eth%d"); 1195 err = register_netdev(netdev); 1196 if (err) 1197 goto err_register; 1198 1199 e1000_vlan_filter_on_off(adapter, false); 1200 1201 /* print bus type/speed/width info */ 1202 e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n", 1203 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""), 1204 ((hw->bus_speed == e1000_bus_speed_133) ? 133 : 1205 (hw->bus_speed == e1000_bus_speed_120) ? 120 : 1206 (hw->bus_speed == e1000_bus_speed_100) ? 100 : 1207 (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33), 1208 ((hw->bus_width == e1000_bus_width_64) ? 64 : 32), 1209 netdev->dev_addr); 1210 1211 /* carrier off reporting is important to ethtool even BEFORE open */ 1212 netif_carrier_off(netdev); 1213 1214 e_info(probe, "Intel(R) PRO/1000 Network Connection\n"); 1215 1216 cards_found++; 1217 return 0; 1218 1219 err_register: 1220 err_eeprom: 1221 e1000_phy_hw_reset(hw); 1222 1223 if (hw->flash_address) 1224 iounmap(hw->flash_address); 1225 kfree(adapter->tx_ring); 1226 kfree(adapter->rx_ring); 1227 err_dma: 1228 err_sw_init: 1229 err_mdio_ioremap: 1230 iounmap(hw->ce4100_gbe_mdio_base_virt); 1231 iounmap(hw->hw_addr); 1232 err_ioremap: 1233 disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags); 1234 free_netdev(netdev); 1235 err_alloc_etherdev: 1236 pci_release_selected_regions(pdev, bars); 1237 err_pci_reg: 1238 if (!adapter || disable_dev) 1239 pci_disable_device(pdev); 1240 return err; 1241 } 1242 1243 /** 1244 * e1000_remove - Device Removal Routine 1245 * @pdev: PCI device information struct 1246 * 1247 * e1000_remove is called by the PCI subsystem to alert the driver 1248 * that it should release a PCI device. That could be caused by a 1249 * Hot-Plug event, or because the driver is going to be removed from 1250 * memory. 1251 **/ 1252 static void e1000_remove(struct pci_dev *pdev) 1253 { 1254 struct net_device *netdev = pci_get_drvdata(pdev); 1255 struct e1000_adapter *adapter = netdev_priv(netdev); 1256 struct e1000_hw *hw = &adapter->hw; 1257 bool disable_dev; 1258 1259 e1000_down_and_stop(adapter); 1260 e1000_release_manageability(adapter); 1261 1262 unregister_netdev(netdev); 1263 1264 /* Only kill reset task if adapter is not resetting */ 1265 if (!test_bit(__E1000_RESETTING, &adapter->flags)) 1266 cancel_work_sync(&adapter->reset_task); 1267 1268 e1000_phy_hw_reset(hw); 1269 1270 kfree(adapter->tx_ring); 1271 kfree(adapter->rx_ring); 1272 1273 if (hw->mac_type == e1000_ce4100) 1274 iounmap(hw->ce4100_gbe_mdio_base_virt); 1275 iounmap(hw->hw_addr); 1276 if (hw->flash_address) 1277 iounmap(hw->flash_address); 1278 pci_release_selected_regions(pdev, adapter->bars); 1279 1280 disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags); 1281 free_netdev(netdev); 1282 1283 if (disable_dev) 1284 pci_disable_device(pdev); 1285 } 1286 1287 /** 1288 * e1000_sw_init - Initialize general software structures (struct e1000_adapter) 1289 * @adapter: board private structure to initialize 1290 * 1291 * e1000_sw_init initializes the Adapter private data structure. 1292 * e1000_init_hw_struct MUST be called before this function 1293 **/ 1294 static int e1000_sw_init(struct e1000_adapter *adapter) 1295 { 1296 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1297 1298 adapter->num_tx_queues = 1; 1299 adapter->num_rx_queues = 1; 1300 1301 if (e1000_alloc_queues(adapter)) { 1302 e_err(probe, "Unable to allocate memory for queues\n"); 1303 return -ENOMEM; 1304 } 1305 1306 /* Explicitly disable IRQ since the NIC can be in any state. */ 1307 e1000_irq_disable(adapter); 1308 1309 spin_lock_init(&adapter->stats_lock); 1310 1311 set_bit(__E1000_DOWN, &adapter->flags); 1312 1313 return 0; 1314 } 1315 1316 /** 1317 * e1000_alloc_queues - Allocate memory for all rings 1318 * @adapter: board private structure to initialize 1319 * 1320 * We allocate one ring per queue at run-time since we don't know the 1321 * number of queues at compile-time. 1322 **/ 1323 static int e1000_alloc_queues(struct e1000_adapter *adapter) 1324 { 1325 adapter->tx_ring = kcalloc(adapter->num_tx_queues, 1326 sizeof(struct e1000_tx_ring), GFP_KERNEL); 1327 if (!adapter->tx_ring) 1328 return -ENOMEM; 1329 1330 adapter->rx_ring = kcalloc(adapter->num_rx_queues, 1331 sizeof(struct e1000_rx_ring), GFP_KERNEL); 1332 if (!adapter->rx_ring) { 1333 kfree(adapter->tx_ring); 1334 return -ENOMEM; 1335 } 1336 1337 return E1000_SUCCESS; 1338 } 1339 1340 /** 1341 * e1000_open - Called when a network interface is made active 1342 * @netdev: network interface device structure 1343 * 1344 * Returns 0 on success, negative value on failure 1345 * 1346 * The open entry point is called when a network interface is made 1347 * active by the system (IFF_UP). At this point all resources needed 1348 * for transmit and receive operations are allocated, the interrupt 1349 * handler is registered with the OS, the watchdog task is started, 1350 * and the stack is notified that the interface is ready. 1351 **/ 1352 int e1000_open(struct net_device *netdev) 1353 { 1354 struct e1000_adapter *adapter = netdev_priv(netdev); 1355 struct e1000_hw *hw = &adapter->hw; 1356 int err; 1357 1358 /* disallow open during test */ 1359 if (test_bit(__E1000_TESTING, &adapter->flags)) 1360 return -EBUSY; 1361 1362 netif_carrier_off(netdev); 1363 1364 /* allocate transmit descriptors */ 1365 err = e1000_setup_all_tx_resources(adapter); 1366 if (err) 1367 goto err_setup_tx; 1368 1369 /* allocate receive descriptors */ 1370 err = e1000_setup_all_rx_resources(adapter); 1371 if (err) 1372 goto err_setup_rx; 1373 1374 e1000_power_up_phy(adapter); 1375 1376 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 1377 if ((hw->mng_cookie.status & 1378 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { 1379 e1000_update_mng_vlan(adapter); 1380 } 1381 1382 /* before we allocate an interrupt, we must be ready to handle it. 1383 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt 1384 * as soon as we call pci_request_irq, so we have to setup our 1385 * clean_rx handler before we do so. 1386 */ 1387 e1000_configure(adapter); 1388 1389 err = e1000_request_irq(adapter); 1390 if (err) 1391 goto err_req_irq; 1392 1393 /* From here on the code is the same as e1000_up() */ 1394 clear_bit(__E1000_DOWN, &adapter->flags); 1395 1396 netif_napi_set_irq(&adapter->napi, adapter->pdev->irq); 1397 napi_enable(&adapter->napi); 1398 netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_RX, &adapter->napi); 1399 netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_TX, &adapter->napi); 1400 1401 e1000_irq_enable(adapter); 1402 1403 netif_start_queue(netdev); 1404 1405 /* fire a link status change interrupt to start the watchdog */ 1406 ew32(ICS, E1000_ICS_LSC); 1407 1408 return E1000_SUCCESS; 1409 1410 err_req_irq: 1411 e1000_power_down_phy(adapter); 1412 e1000_free_all_rx_resources(adapter); 1413 err_setup_rx: 1414 e1000_free_all_tx_resources(adapter); 1415 err_setup_tx: 1416 e1000_reset(adapter); 1417 1418 return err; 1419 } 1420 1421 /** 1422 * e1000_close - Disables a network interface 1423 * @netdev: network interface device structure 1424 * 1425 * Returns 0, this is not allowed to fail 1426 * 1427 * The close entry point is called when an interface is de-activated 1428 * by the OS. The hardware is still under the drivers control, but 1429 * needs to be disabled. A global MAC reset is issued to stop the 1430 * hardware, and all transmit and receive resources are freed. 1431 **/ 1432 int e1000_close(struct net_device *netdev) 1433 { 1434 struct e1000_adapter *adapter = netdev_priv(netdev); 1435 struct e1000_hw *hw = &adapter->hw; 1436 int count = E1000_CHECK_RESET_COUNT; 1437 1438 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags) && count--) 1439 usleep_range(10000, 20000); 1440 1441 WARN_ON(count < 0); 1442 1443 /* signal that we're down so that the reset task will no longer run */ 1444 set_bit(__E1000_DOWN, &adapter->flags); 1445 clear_bit(__E1000_RESETTING, &adapter->flags); 1446 1447 e1000_down(adapter); 1448 e1000_power_down_phy(adapter); 1449 e1000_free_irq(adapter); 1450 1451 e1000_free_all_tx_resources(adapter); 1452 e1000_free_all_rx_resources(adapter); 1453 1454 /* kill manageability vlan ID if supported, but not if a vlan with 1455 * the same ID is registered on the host OS (let 8021q kill it) 1456 */ 1457 if ((hw->mng_cookie.status & 1458 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 1459 !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) { 1460 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), 1461 adapter->mng_vlan_id); 1462 } 1463 1464 return 0; 1465 } 1466 1467 /** 1468 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary 1469 * @adapter: address of board private structure 1470 * @start: address of beginning of memory 1471 * @len: length of memory 1472 **/ 1473 static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start, 1474 unsigned long len) 1475 { 1476 struct e1000_hw *hw = &adapter->hw; 1477 unsigned long begin = (unsigned long)start; 1478 unsigned long end = begin + len; 1479 1480 /* First rev 82545 and 82546 need to not allow any memory 1481 * write location to cross 64k boundary due to errata 23 1482 */ 1483 if (hw->mac_type == e1000_82545 || 1484 hw->mac_type == e1000_ce4100 || 1485 hw->mac_type == e1000_82546) { 1486 return ((begin ^ (end - 1)) >> 16) == 0; 1487 } 1488 1489 return true; 1490 } 1491 1492 /** 1493 * e1000_setup_tx_resources - allocate Tx resources (Descriptors) 1494 * @adapter: board private structure 1495 * @txdr: tx descriptor ring (for a specific queue) to setup 1496 * 1497 * Return 0 on success, negative on failure 1498 **/ 1499 static int e1000_setup_tx_resources(struct e1000_adapter *adapter, 1500 struct e1000_tx_ring *txdr) 1501 { 1502 struct pci_dev *pdev = adapter->pdev; 1503 int size; 1504 1505 size = sizeof(struct e1000_tx_buffer) * txdr->count; 1506 txdr->buffer_info = vzalloc(size); 1507 if (!txdr->buffer_info) 1508 return -ENOMEM; 1509 1510 /* round up to nearest 4K */ 1511 1512 txdr->size = txdr->count * sizeof(struct e1000_tx_desc); 1513 txdr->size = ALIGN(txdr->size, 4096); 1514 1515 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, 1516 GFP_KERNEL); 1517 if (!txdr->desc) { 1518 setup_tx_desc_die: 1519 vfree(txdr->buffer_info); 1520 return -ENOMEM; 1521 } 1522 1523 /* Fix for errata 23, can't cross 64kB boundary */ 1524 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { 1525 void *olddesc = txdr->desc; 1526 dma_addr_t olddma = txdr->dma; 1527 e_err(tx_err, "txdr align check failed: %u bytes at %p\n", 1528 txdr->size, txdr->desc); 1529 /* Try again, without freeing the previous */ 1530 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, 1531 &txdr->dma, GFP_KERNEL); 1532 /* Failed allocation, critical failure */ 1533 if (!txdr->desc) { 1534 dma_free_coherent(&pdev->dev, txdr->size, olddesc, 1535 olddma); 1536 goto setup_tx_desc_die; 1537 } 1538 1539 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { 1540 /* give up */ 1541 dma_free_coherent(&pdev->dev, txdr->size, txdr->desc, 1542 txdr->dma); 1543 dma_free_coherent(&pdev->dev, txdr->size, olddesc, 1544 olddma); 1545 e_err(probe, "Unable to allocate aligned memory " 1546 "for the transmit descriptor ring\n"); 1547 vfree(txdr->buffer_info); 1548 return -ENOMEM; 1549 } else { 1550 /* Free old allocation, new allocation was successful */ 1551 dma_free_coherent(&pdev->dev, txdr->size, olddesc, 1552 olddma); 1553 } 1554 } 1555 memset(txdr->desc, 0, txdr->size); 1556 1557 txdr->next_to_use = 0; 1558 txdr->next_to_clean = 0; 1559 1560 return 0; 1561 } 1562 1563 /** 1564 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources 1565 * (Descriptors) for all queues 1566 * @adapter: board private structure 1567 * 1568 * Return 0 on success, negative on failure 1569 **/ 1570 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter) 1571 { 1572 int i, err = 0; 1573 1574 for (i = 0; i < adapter->num_tx_queues; i++) { 1575 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); 1576 if (err) { 1577 e_err(probe, "Allocation for Tx Queue %u failed\n", i); 1578 for (i-- ; i >= 0; i--) 1579 e1000_free_tx_resources(adapter, 1580 &adapter->tx_ring[i]); 1581 break; 1582 } 1583 } 1584 1585 return err; 1586 } 1587 1588 /** 1589 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset 1590 * @adapter: board private structure 1591 * 1592 * Configure the Tx unit of the MAC after a reset. 1593 **/ 1594 static void e1000_configure_tx(struct e1000_adapter *adapter) 1595 { 1596 u64 tdba; 1597 struct e1000_hw *hw = &adapter->hw; 1598 u32 tdlen, tctl, tipg; 1599 u32 ipgr1, ipgr2; 1600 1601 /* Setup the HW Tx Head and Tail descriptor pointers */ 1602 1603 switch (adapter->num_tx_queues) { 1604 case 1: 1605 default: 1606 tdba = adapter->tx_ring[0].dma; 1607 tdlen = adapter->tx_ring[0].count * 1608 sizeof(struct e1000_tx_desc); 1609 ew32(TDLEN, tdlen); 1610 ew32(TDBAH, (tdba >> 32)); 1611 ew32(TDBAL, (tdba & 0x00000000ffffffffULL)); 1612 ew32(TDT, 0); 1613 ew32(TDH, 0); 1614 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? 1615 E1000_TDH : E1000_82542_TDH); 1616 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? 1617 E1000_TDT : E1000_82542_TDT); 1618 break; 1619 } 1620 1621 /* Set the default values for the Tx Inter Packet Gap timer */ 1622 if ((hw->media_type == e1000_media_type_fiber || 1623 hw->media_type == e1000_media_type_internal_serdes)) 1624 tipg = DEFAULT_82543_TIPG_IPGT_FIBER; 1625 else 1626 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; 1627 1628 switch (hw->mac_type) { 1629 case e1000_82542_rev2_0: 1630 case e1000_82542_rev2_1: 1631 tipg = DEFAULT_82542_TIPG_IPGT; 1632 ipgr1 = DEFAULT_82542_TIPG_IPGR1; 1633 ipgr2 = DEFAULT_82542_TIPG_IPGR2; 1634 break; 1635 default: 1636 ipgr1 = DEFAULT_82543_TIPG_IPGR1; 1637 ipgr2 = DEFAULT_82543_TIPG_IPGR2; 1638 break; 1639 } 1640 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; 1641 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; 1642 ew32(TIPG, tipg); 1643 1644 /* Set the Tx Interrupt Delay register */ 1645 1646 ew32(TIDV, adapter->tx_int_delay); 1647 if (hw->mac_type >= e1000_82540) 1648 ew32(TADV, adapter->tx_abs_int_delay); 1649 1650 /* Program the Transmit Control Register */ 1651 1652 tctl = er32(TCTL); 1653 tctl &= ~E1000_TCTL_CT; 1654 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | 1655 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 1656 1657 e1000_config_collision_dist(hw); 1658 1659 /* Setup Transmit Descriptor Settings for eop descriptor */ 1660 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; 1661 1662 /* only set IDE if we are delaying interrupts using the timers */ 1663 if (adapter->tx_int_delay) 1664 adapter->txd_cmd |= E1000_TXD_CMD_IDE; 1665 1666 if (hw->mac_type < e1000_82543) 1667 adapter->txd_cmd |= E1000_TXD_CMD_RPS; 1668 else 1669 adapter->txd_cmd |= E1000_TXD_CMD_RS; 1670 1671 /* Cache if we're 82544 running in PCI-X because we'll 1672 * need this to apply a workaround later in the send path. 1673 */ 1674 if (hw->mac_type == e1000_82544 && 1675 hw->bus_type == e1000_bus_type_pcix) 1676 adapter->pcix_82544 = true; 1677 1678 ew32(TCTL, tctl); 1679 1680 } 1681 1682 /** 1683 * e1000_setup_rx_resources - allocate Rx resources (Descriptors) 1684 * @adapter: board private structure 1685 * @rxdr: rx descriptor ring (for a specific queue) to setup 1686 * 1687 * Returns 0 on success, negative on failure 1688 **/ 1689 static int e1000_setup_rx_resources(struct e1000_adapter *adapter, 1690 struct e1000_rx_ring *rxdr) 1691 { 1692 struct pci_dev *pdev = adapter->pdev; 1693 int size, desc_len; 1694 1695 size = sizeof(struct e1000_rx_buffer) * rxdr->count; 1696 rxdr->buffer_info = vzalloc(size); 1697 if (!rxdr->buffer_info) 1698 return -ENOMEM; 1699 1700 desc_len = sizeof(struct e1000_rx_desc); 1701 1702 /* Round up to nearest 4K */ 1703 1704 rxdr->size = rxdr->count * desc_len; 1705 rxdr->size = ALIGN(rxdr->size, 4096); 1706 1707 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, 1708 GFP_KERNEL); 1709 if (!rxdr->desc) { 1710 setup_rx_desc_die: 1711 vfree(rxdr->buffer_info); 1712 return -ENOMEM; 1713 } 1714 1715 /* Fix for errata 23, can't cross 64kB boundary */ 1716 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { 1717 void *olddesc = rxdr->desc; 1718 dma_addr_t olddma = rxdr->dma; 1719 e_err(rx_err, "rxdr align check failed: %u bytes at %p\n", 1720 rxdr->size, rxdr->desc); 1721 /* Try again, without freeing the previous */ 1722 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, 1723 &rxdr->dma, GFP_KERNEL); 1724 /* Failed allocation, critical failure */ 1725 if (!rxdr->desc) { 1726 dma_free_coherent(&pdev->dev, rxdr->size, olddesc, 1727 olddma); 1728 goto setup_rx_desc_die; 1729 } 1730 1731 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { 1732 /* give up */ 1733 dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc, 1734 rxdr->dma); 1735 dma_free_coherent(&pdev->dev, rxdr->size, olddesc, 1736 olddma); 1737 e_err(probe, "Unable to allocate aligned memory for " 1738 "the Rx descriptor ring\n"); 1739 goto setup_rx_desc_die; 1740 } else { 1741 /* Free old allocation, new allocation was successful */ 1742 dma_free_coherent(&pdev->dev, rxdr->size, olddesc, 1743 olddma); 1744 } 1745 } 1746 memset(rxdr->desc, 0, rxdr->size); 1747 1748 rxdr->next_to_clean = 0; 1749 rxdr->next_to_use = 0; 1750 rxdr->rx_skb_top = NULL; 1751 1752 return 0; 1753 } 1754 1755 /** 1756 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources 1757 * (Descriptors) for all queues 1758 * @adapter: board private structure 1759 * 1760 * Return 0 on success, negative on failure 1761 **/ 1762 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter) 1763 { 1764 int i, err = 0; 1765 1766 for (i = 0; i < adapter->num_rx_queues; i++) { 1767 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); 1768 if (err) { 1769 e_err(probe, "Allocation for Rx Queue %u failed\n", i); 1770 for (i-- ; i >= 0; i--) 1771 e1000_free_rx_resources(adapter, 1772 &adapter->rx_ring[i]); 1773 break; 1774 } 1775 } 1776 1777 return err; 1778 } 1779 1780 /** 1781 * e1000_setup_rctl - configure the receive control registers 1782 * @adapter: Board private structure 1783 **/ 1784 static void e1000_setup_rctl(struct e1000_adapter *adapter) 1785 { 1786 struct e1000_hw *hw = &adapter->hw; 1787 u32 rctl; 1788 1789 rctl = er32(RCTL); 1790 1791 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 1792 1793 rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO | 1794 E1000_RCTL_RDMTS_HALF | 1795 (hw->mc_filter_type << E1000_RCTL_MO_SHIFT); 1796 1797 if (hw->tbi_compatibility_on == 1) 1798 rctl |= E1000_RCTL_SBP; 1799 else 1800 rctl &= ~E1000_RCTL_SBP; 1801 1802 if (adapter->netdev->mtu <= ETH_DATA_LEN) 1803 rctl &= ~E1000_RCTL_LPE; 1804 else 1805 rctl |= E1000_RCTL_LPE; 1806 1807 /* Setup buffer sizes */ 1808 rctl &= ~E1000_RCTL_SZ_4096; 1809 rctl |= E1000_RCTL_BSEX; 1810 switch (adapter->rx_buffer_len) { 1811 case E1000_RXBUFFER_2048: 1812 default: 1813 rctl |= E1000_RCTL_SZ_2048; 1814 rctl &= ~E1000_RCTL_BSEX; 1815 break; 1816 case E1000_RXBUFFER_4096: 1817 rctl |= E1000_RCTL_SZ_4096; 1818 break; 1819 case E1000_RXBUFFER_8192: 1820 rctl |= E1000_RCTL_SZ_8192; 1821 break; 1822 case E1000_RXBUFFER_16384: 1823 rctl |= E1000_RCTL_SZ_16384; 1824 break; 1825 } 1826 1827 /* This is useful for sniffing bad packets. */ 1828 if (adapter->netdev->features & NETIF_F_RXALL) { 1829 /* UPE and MPE will be handled by normal PROMISC logic 1830 * in e1000e_set_rx_mode 1831 */ 1832 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ 1833 E1000_RCTL_BAM | /* RX All Bcast Pkts */ 1834 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ 1835 1836 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ 1837 E1000_RCTL_DPF | /* Allow filtered pause */ 1838 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ 1839 /* Do not mess with E1000_CTRL_VME, it affects transmit as well, 1840 * and that breaks VLANs. 1841 */ 1842 } 1843 1844 ew32(RCTL, rctl); 1845 } 1846 1847 /** 1848 * e1000_configure_rx - Configure 8254x Receive Unit after Reset 1849 * @adapter: board private structure 1850 * 1851 * Configure the Rx unit of the MAC after a reset. 1852 **/ 1853 static void e1000_configure_rx(struct e1000_adapter *adapter) 1854 { 1855 u64 rdba; 1856 struct e1000_hw *hw = &adapter->hw; 1857 u32 rdlen, rctl, rxcsum; 1858 1859 if (adapter->netdev->mtu > ETH_DATA_LEN) { 1860 rdlen = adapter->rx_ring[0].count * 1861 sizeof(struct e1000_rx_desc); 1862 adapter->clean_rx = e1000_clean_jumbo_rx_irq; 1863 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; 1864 } else { 1865 rdlen = adapter->rx_ring[0].count * 1866 sizeof(struct e1000_rx_desc); 1867 adapter->clean_rx = e1000_clean_rx_irq; 1868 adapter->alloc_rx_buf = e1000_alloc_rx_buffers; 1869 } 1870 1871 /* disable receives while setting up the descriptors */ 1872 rctl = er32(RCTL); 1873 ew32(RCTL, rctl & ~E1000_RCTL_EN); 1874 1875 /* set the Receive Delay Timer Register */ 1876 ew32(RDTR, adapter->rx_int_delay); 1877 1878 if (hw->mac_type >= e1000_82540) { 1879 ew32(RADV, adapter->rx_abs_int_delay); 1880 if (adapter->itr_setting != 0) 1881 ew32(ITR, 1000000000 / (adapter->itr * 256)); 1882 } 1883 1884 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1885 * the Base and Length of the Rx Descriptor Ring 1886 */ 1887 switch (adapter->num_rx_queues) { 1888 case 1: 1889 default: 1890 rdba = adapter->rx_ring[0].dma; 1891 ew32(RDLEN, rdlen); 1892 ew32(RDBAH, (rdba >> 32)); 1893 ew32(RDBAL, (rdba & 0x00000000ffffffffULL)); 1894 ew32(RDT, 0); 1895 ew32(RDH, 0); 1896 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? 1897 E1000_RDH : E1000_82542_RDH); 1898 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? 1899 E1000_RDT : E1000_82542_RDT); 1900 break; 1901 } 1902 1903 /* Enable 82543 Receive Checksum Offload for TCP and UDP */ 1904 if (hw->mac_type >= e1000_82543) { 1905 rxcsum = er32(RXCSUM); 1906 if (adapter->rx_csum) 1907 rxcsum |= E1000_RXCSUM_TUOFL; 1908 else 1909 /* don't need to clear IPPCSE as it defaults to 0 */ 1910 rxcsum &= ~E1000_RXCSUM_TUOFL; 1911 ew32(RXCSUM, rxcsum); 1912 } 1913 1914 /* Enable Receives */ 1915 ew32(RCTL, rctl | E1000_RCTL_EN); 1916 } 1917 1918 /** 1919 * e1000_free_tx_resources - Free Tx Resources per Queue 1920 * @adapter: board private structure 1921 * @tx_ring: Tx descriptor ring for a specific queue 1922 * 1923 * Free all transmit software resources 1924 **/ 1925 static void e1000_free_tx_resources(struct e1000_adapter *adapter, 1926 struct e1000_tx_ring *tx_ring) 1927 { 1928 struct pci_dev *pdev = adapter->pdev; 1929 1930 e1000_clean_tx_ring(adapter, tx_ring); 1931 1932 vfree(tx_ring->buffer_info); 1933 tx_ring->buffer_info = NULL; 1934 1935 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, 1936 tx_ring->dma); 1937 1938 tx_ring->desc = NULL; 1939 } 1940 1941 /** 1942 * e1000_free_all_tx_resources - Free Tx Resources for All Queues 1943 * @adapter: board private structure 1944 * 1945 * Free all transmit software resources 1946 **/ 1947 void e1000_free_all_tx_resources(struct e1000_adapter *adapter) 1948 { 1949 int i; 1950 1951 for (i = 0; i < adapter->num_tx_queues; i++) 1952 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]); 1953 } 1954 1955 static void 1956 e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, 1957 struct e1000_tx_buffer *buffer_info, 1958 int budget) 1959 { 1960 if (buffer_info->dma) { 1961 if (buffer_info->mapped_as_page) 1962 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, 1963 buffer_info->length, DMA_TO_DEVICE); 1964 else 1965 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, 1966 buffer_info->length, 1967 DMA_TO_DEVICE); 1968 buffer_info->dma = 0; 1969 } 1970 if (buffer_info->skb) { 1971 napi_consume_skb(buffer_info->skb, budget); 1972 buffer_info->skb = NULL; 1973 } 1974 buffer_info->time_stamp = 0; 1975 /* buffer_info must be completely set up in the transmit path */ 1976 } 1977 1978 /** 1979 * e1000_clean_tx_ring - Free Tx Buffers 1980 * @adapter: board private structure 1981 * @tx_ring: ring to be cleaned 1982 **/ 1983 static void e1000_clean_tx_ring(struct e1000_adapter *adapter, 1984 struct e1000_tx_ring *tx_ring) 1985 { 1986 struct e1000_hw *hw = &adapter->hw; 1987 struct e1000_tx_buffer *buffer_info; 1988 unsigned long size; 1989 unsigned int i; 1990 1991 /* Free all the Tx ring sk_buffs */ 1992 1993 for (i = 0; i < tx_ring->count; i++) { 1994 buffer_info = &tx_ring->buffer_info[i]; 1995 e1000_unmap_and_free_tx_resource(adapter, buffer_info, 0); 1996 } 1997 1998 netdev_reset_queue(adapter->netdev); 1999 size = sizeof(struct e1000_tx_buffer) * tx_ring->count; 2000 memset(tx_ring->buffer_info, 0, size); 2001 2002 /* Zero out the descriptor ring */ 2003 2004 memset(tx_ring->desc, 0, tx_ring->size); 2005 2006 tx_ring->next_to_use = 0; 2007 tx_ring->next_to_clean = 0; 2008 tx_ring->last_tx_tso = false; 2009 2010 writel(0, hw->hw_addr + tx_ring->tdh); 2011 writel(0, hw->hw_addr + tx_ring->tdt); 2012 } 2013 2014 /** 2015 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues 2016 * @adapter: board private structure 2017 **/ 2018 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter) 2019 { 2020 int i; 2021 2022 for (i = 0; i < adapter->num_tx_queues; i++) 2023 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]); 2024 } 2025 2026 /** 2027 * e1000_free_rx_resources - Free Rx Resources 2028 * @adapter: board private structure 2029 * @rx_ring: ring to clean the resources from 2030 * 2031 * Free all receive software resources 2032 **/ 2033 static void e1000_free_rx_resources(struct e1000_adapter *adapter, 2034 struct e1000_rx_ring *rx_ring) 2035 { 2036 struct pci_dev *pdev = adapter->pdev; 2037 2038 e1000_clean_rx_ring(adapter, rx_ring); 2039 2040 vfree(rx_ring->buffer_info); 2041 rx_ring->buffer_info = NULL; 2042 2043 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, 2044 rx_ring->dma); 2045 2046 rx_ring->desc = NULL; 2047 } 2048 2049 /** 2050 * e1000_free_all_rx_resources - Free Rx Resources for All Queues 2051 * @adapter: board private structure 2052 * 2053 * Free all receive software resources 2054 **/ 2055 void e1000_free_all_rx_resources(struct e1000_adapter *adapter) 2056 { 2057 int i; 2058 2059 for (i = 0; i < adapter->num_rx_queues; i++) 2060 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]); 2061 } 2062 2063 #define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN) 2064 static unsigned int e1000_frag_len(const struct e1000_adapter *a) 2065 { 2066 return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) + 2067 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 2068 } 2069 2070 static void *e1000_alloc_frag(const struct e1000_adapter *a) 2071 { 2072 unsigned int len = e1000_frag_len(a); 2073 u8 *data = netdev_alloc_frag(len); 2074 2075 if (likely(data)) 2076 data += E1000_HEADROOM; 2077 return data; 2078 } 2079 2080 /** 2081 * e1000_clean_rx_ring - Free Rx Buffers per Queue 2082 * @adapter: board private structure 2083 * @rx_ring: ring to free buffers from 2084 **/ 2085 static void e1000_clean_rx_ring(struct e1000_adapter *adapter, 2086 struct e1000_rx_ring *rx_ring) 2087 { 2088 struct e1000_hw *hw = &adapter->hw; 2089 struct e1000_rx_buffer *buffer_info; 2090 struct pci_dev *pdev = adapter->pdev; 2091 unsigned long size; 2092 unsigned int i; 2093 2094 /* Free all the Rx netfrags */ 2095 for (i = 0; i < rx_ring->count; i++) { 2096 buffer_info = &rx_ring->buffer_info[i]; 2097 if (adapter->clean_rx == e1000_clean_rx_irq) { 2098 if (buffer_info->dma) 2099 dma_unmap_single(&pdev->dev, buffer_info->dma, 2100 adapter->rx_buffer_len, 2101 DMA_FROM_DEVICE); 2102 if (buffer_info->rxbuf.data) { 2103 skb_free_frag(buffer_info->rxbuf.data); 2104 buffer_info->rxbuf.data = NULL; 2105 } 2106 } else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) { 2107 if (buffer_info->dma) 2108 dma_unmap_page(&pdev->dev, buffer_info->dma, 2109 adapter->rx_buffer_len, 2110 DMA_FROM_DEVICE); 2111 if (buffer_info->rxbuf.page) { 2112 put_page(buffer_info->rxbuf.page); 2113 buffer_info->rxbuf.page = NULL; 2114 } 2115 } 2116 2117 buffer_info->dma = 0; 2118 } 2119 2120 /* there also may be some cached data from a chained receive */ 2121 napi_free_frags(&adapter->napi); 2122 rx_ring->rx_skb_top = NULL; 2123 2124 size = sizeof(struct e1000_rx_buffer) * rx_ring->count; 2125 memset(rx_ring->buffer_info, 0, size); 2126 2127 /* Zero out the descriptor ring */ 2128 memset(rx_ring->desc, 0, rx_ring->size); 2129 2130 rx_ring->next_to_clean = 0; 2131 rx_ring->next_to_use = 0; 2132 2133 writel(0, hw->hw_addr + rx_ring->rdh); 2134 writel(0, hw->hw_addr + rx_ring->rdt); 2135 } 2136 2137 /** 2138 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues 2139 * @adapter: board private structure 2140 **/ 2141 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter) 2142 { 2143 int i; 2144 2145 for (i = 0; i < adapter->num_rx_queues; i++) 2146 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]); 2147 } 2148 2149 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset 2150 * and memory write and invalidate disabled for certain operations 2151 */ 2152 static void e1000_enter_82542_rst(struct e1000_adapter *adapter) 2153 { 2154 struct e1000_hw *hw = &adapter->hw; 2155 struct net_device *netdev = adapter->netdev; 2156 u32 rctl; 2157 2158 e1000_pci_clear_mwi(hw); 2159 2160 rctl = er32(RCTL); 2161 rctl |= E1000_RCTL_RST; 2162 ew32(RCTL, rctl); 2163 E1000_WRITE_FLUSH(); 2164 mdelay(5); 2165 2166 if (netif_running(netdev)) 2167 e1000_clean_all_rx_rings(adapter); 2168 } 2169 2170 static void e1000_leave_82542_rst(struct e1000_adapter *adapter) 2171 { 2172 struct e1000_hw *hw = &adapter->hw; 2173 struct net_device *netdev = adapter->netdev; 2174 u32 rctl; 2175 2176 rctl = er32(RCTL); 2177 rctl &= ~E1000_RCTL_RST; 2178 ew32(RCTL, rctl); 2179 E1000_WRITE_FLUSH(); 2180 mdelay(5); 2181 2182 if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) 2183 e1000_pci_set_mwi(hw); 2184 2185 if (netif_running(netdev)) { 2186 /* No need to loop, because 82542 supports only 1 queue */ 2187 struct e1000_rx_ring *ring = &adapter->rx_ring[0]; 2188 e1000_configure_rx(adapter); 2189 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring)); 2190 } 2191 } 2192 2193 /** 2194 * e1000_set_mac - Change the Ethernet Address of the NIC 2195 * @netdev: network interface device structure 2196 * @p: pointer to an address structure 2197 * 2198 * Returns 0 on success, negative on failure 2199 **/ 2200 static int e1000_set_mac(struct net_device *netdev, void *p) 2201 { 2202 struct e1000_adapter *adapter = netdev_priv(netdev); 2203 struct e1000_hw *hw = &adapter->hw; 2204 struct sockaddr *addr = p; 2205 2206 if (!is_valid_ether_addr(addr->sa_data)) 2207 return -EADDRNOTAVAIL; 2208 2209 /* 82542 2.0 needs to be in reset to write receive address registers */ 2210 2211 if (hw->mac_type == e1000_82542_rev2_0) 2212 e1000_enter_82542_rst(adapter); 2213 2214 eth_hw_addr_set(netdev, addr->sa_data); 2215 memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len); 2216 2217 e1000_rar_set(hw, hw->mac_addr, 0); 2218 2219 if (hw->mac_type == e1000_82542_rev2_0) 2220 e1000_leave_82542_rst(adapter); 2221 2222 return 0; 2223 } 2224 2225 /** 2226 * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set 2227 * @netdev: network interface device structure 2228 * 2229 * The set_rx_mode entry point is called whenever the unicast or multicast 2230 * address lists or the network interface flags are updated. This routine is 2231 * responsible for configuring the hardware for proper unicast, multicast, 2232 * promiscuous mode, and all-multi behavior. 2233 **/ 2234 static void e1000_set_rx_mode(struct net_device *netdev) 2235 { 2236 struct e1000_adapter *adapter = netdev_priv(netdev); 2237 struct e1000_hw *hw = &adapter->hw; 2238 struct netdev_hw_addr *ha; 2239 bool use_uc = false; 2240 u32 rctl; 2241 u32 hash_value; 2242 int i, rar_entries = E1000_RAR_ENTRIES; 2243 int mta_reg_count = E1000_NUM_MTA_REGISTERS; 2244 u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC); 2245 2246 if (!mcarray) 2247 return; 2248 2249 /* Check for Promiscuous and All Multicast modes */ 2250 2251 rctl = er32(RCTL); 2252 2253 if (netdev->flags & IFF_PROMISC) { 2254 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 2255 rctl &= ~E1000_RCTL_VFE; 2256 } else { 2257 if (netdev->flags & IFF_ALLMULTI) 2258 rctl |= E1000_RCTL_MPE; 2259 else 2260 rctl &= ~E1000_RCTL_MPE; 2261 /* Enable VLAN filter if there is a VLAN */ 2262 if (e1000_vlan_used(adapter)) 2263 rctl |= E1000_RCTL_VFE; 2264 } 2265 2266 if (netdev_uc_count(netdev) > rar_entries - 1) { 2267 rctl |= E1000_RCTL_UPE; 2268 } else if (!(netdev->flags & IFF_PROMISC)) { 2269 rctl &= ~E1000_RCTL_UPE; 2270 use_uc = true; 2271 } 2272 2273 ew32(RCTL, rctl); 2274 2275 /* 82542 2.0 needs to be in reset to write receive address registers */ 2276 2277 if (hw->mac_type == e1000_82542_rev2_0) 2278 e1000_enter_82542_rst(adapter); 2279 2280 /* load the first 14 addresses into the exact filters 1-14. Unicast 2281 * addresses take precedence to avoid disabling unicast filtering 2282 * when possible. 2283 * 2284 * RAR 0 is used for the station MAC address 2285 * if there are not 14 addresses, go ahead and clear the filters 2286 */ 2287 i = 1; 2288 if (use_uc) 2289 netdev_for_each_uc_addr(ha, netdev) { 2290 if (i == rar_entries) 2291 break; 2292 e1000_rar_set(hw, ha->addr, i++); 2293 } 2294 2295 netdev_for_each_mc_addr(ha, netdev) { 2296 if (i == rar_entries) { 2297 /* load any remaining addresses into the hash table */ 2298 u32 hash_reg, hash_bit, mta; 2299 hash_value = e1000_hash_mc_addr(hw, ha->addr); 2300 hash_reg = (hash_value >> 5) & 0x7F; 2301 hash_bit = hash_value & 0x1F; 2302 mta = (1 << hash_bit); 2303 mcarray[hash_reg] |= mta; 2304 } else { 2305 e1000_rar_set(hw, ha->addr, i++); 2306 } 2307 } 2308 2309 for (; i < rar_entries; i++) { 2310 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0); 2311 E1000_WRITE_FLUSH(); 2312 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0); 2313 E1000_WRITE_FLUSH(); 2314 } 2315 2316 /* write the hash table completely, write from bottom to avoid 2317 * both stupid write combining chipsets, and flushing each write 2318 */ 2319 for (i = mta_reg_count - 1; i >= 0 ; i--) { 2320 /* If we are on an 82544 has an errata where writing odd 2321 * offsets overwrites the previous even offset, but writing 2322 * backwards over the range solves the issue by always 2323 * writing the odd offset first 2324 */ 2325 E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]); 2326 } 2327 E1000_WRITE_FLUSH(); 2328 2329 if (hw->mac_type == e1000_82542_rev2_0) 2330 e1000_leave_82542_rst(adapter); 2331 2332 kfree(mcarray); 2333 } 2334 2335 /** 2336 * e1000_update_phy_info_task - get phy info 2337 * @work: work struct contained inside adapter struct 2338 * 2339 * Need to wait a few seconds after link up to get diagnostic information from 2340 * the phy 2341 */ 2342 static void e1000_update_phy_info_task(struct work_struct *work) 2343 { 2344 struct e1000_adapter *adapter = container_of(work, 2345 struct e1000_adapter, 2346 phy_info_task.work); 2347 2348 e1000_phy_get_info(&adapter->hw, &adapter->phy_info); 2349 } 2350 2351 /** 2352 * e1000_82547_tx_fifo_stall_task - task to complete work 2353 * @work: work struct contained inside adapter struct 2354 **/ 2355 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work) 2356 { 2357 struct e1000_adapter *adapter = container_of(work, 2358 struct e1000_adapter, 2359 fifo_stall_task.work); 2360 struct e1000_hw *hw = &adapter->hw; 2361 struct net_device *netdev = adapter->netdev; 2362 u32 tctl; 2363 2364 if (atomic_read(&adapter->tx_fifo_stall)) { 2365 if ((er32(TDT) == er32(TDH)) && 2366 (er32(TDFT) == er32(TDFH)) && 2367 (er32(TDFTS) == er32(TDFHS))) { 2368 tctl = er32(TCTL); 2369 ew32(TCTL, tctl & ~E1000_TCTL_EN); 2370 ew32(TDFT, adapter->tx_head_addr); 2371 ew32(TDFH, adapter->tx_head_addr); 2372 ew32(TDFTS, adapter->tx_head_addr); 2373 ew32(TDFHS, adapter->tx_head_addr); 2374 ew32(TCTL, tctl); 2375 E1000_WRITE_FLUSH(); 2376 2377 adapter->tx_fifo_head = 0; 2378 atomic_set(&adapter->tx_fifo_stall, 0); 2379 netif_wake_queue(netdev); 2380 } else if (!test_bit(__E1000_DOWN, &adapter->flags)) { 2381 schedule_delayed_work(&adapter->fifo_stall_task, 1); 2382 } 2383 } 2384 } 2385 2386 bool e1000_has_link(struct e1000_adapter *adapter) 2387 { 2388 struct e1000_hw *hw = &adapter->hw; 2389 bool link_active = false; 2390 2391 /* get_link_status is set on LSC (link status) interrupt or rx 2392 * sequence error interrupt (except on intel ce4100). 2393 * get_link_status will stay false until the 2394 * e1000_check_for_link establishes link for copper adapters 2395 * ONLY 2396 */ 2397 switch (hw->media_type) { 2398 case e1000_media_type_copper: 2399 if (hw->mac_type == e1000_ce4100) 2400 hw->get_link_status = 1; 2401 if (hw->get_link_status) { 2402 e1000_check_for_link(hw); 2403 link_active = !hw->get_link_status; 2404 } else { 2405 link_active = true; 2406 } 2407 break; 2408 case e1000_media_type_fiber: 2409 e1000_check_for_link(hw); 2410 link_active = !!(er32(STATUS) & E1000_STATUS_LU); 2411 break; 2412 case e1000_media_type_internal_serdes: 2413 e1000_check_for_link(hw); 2414 link_active = hw->serdes_has_link; 2415 break; 2416 default: 2417 break; 2418 } 2419 2420 return link_active; 2421 } 2422 2423 /** 2424 * e1000_watchdog - work function 2425 * @work: work struct contained inside adapter struct 2426 **/ 2427 static void e1000_watchdog(struct work_struct *work) 2428 { 2429 struct e1000_adapter *adapter = container_of(work, 2430 struct e1000_adapter, 2431 watchdog_task.work); 2432 struct e1000_hw *hw = &adapter->hw; 2433 struct net_device *netdev = adapter->netdev; 2434 struct e1000_tx_ring *txdr = adapter->tx_ring; 2435 u32 link, tctl; 2436 2437 link = e1000_has_link(adapter); 2438 if ((netif_carrier_ok(netdev)) && link) 2439 goto link_up; 2440 2441 if (link) { 2442 if (!netif_carrier_ok(netdev)) { 2443 u32 ctrl; 2444 /* update snapshot of PHY registers on LSC */ 2445 e1000_get_speed_and_duplex(hw, 2446 &adapter->link_speed, 2447 &adapter->link_duplex); 2448 2449 ctrl = er32(CTRL); 2450 pr_info("%s NIC Link is Up %d Mbps %s, " 2451 "Flow Control: %s\n", 2452 netdev->name, 2453 adapter->link_speed, 2454 adapter->link_duplex == FULL_DUPLEX ? 2455 "Full Duplex" : "Half Duplex", 2456 ((ctrl & E1000_CTRL_TFCE) && (ctrl & 2457 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl & 2458 E1000_CTRL_RFCE) ? "RX" : ((ctrl & 2459 E1000_CTRL_TFCE) ? "TX" : "None"))); 2460 2461 /* adjust timeout factor according to speed/duplex */ 2462 adapter->tx_timeout_factor = 1; 2463 switch (adapter->link_speed) { 2464 case SPEED_10: 2465 adapter->tx_timeout_factor = 16; 2466 break; 2467 case SPEED_100: 2468 /* maybe add some timeout factor ? */ 2469 break; 2470 } 2471 2472 /* enable transmits in the hardware */ 2473 tctl = er32(TCTL); 2474 tctl |= E1000_TCTL_EN; 2475 ew32(TCTL, tctl); 2476 2477 netif_carrier_on(netdev); 2478 if (!test_bit(__E1000_DOWN, &adapter->flags)) 2479 schedule_delayed_work(&adapter->phy_info_task, 2480 2 * HZ); 2481 adapter->smartspeed = 0; 2482 } 2483 } else { 2484 if (netif_carrier_ok(netdev)) { 2485 adapter->link_speed = 0; 2486 adapter->link_duplex = 0; 2487 pr_info("%s NIC Link is Down\n", 2488 netdev->name); 2489 netif_carrier_off(netdev); 2490 2491 if (!test_bit(__E1000_DOWN, &adapter->flags)) 2492 schedule_delayed_work(&adapter->phy_info_task, 2493 2 * HZ); 2494 } 2495 2496 e1000_smartspeed(adapter); 2497 } 2498 2499 link_up: 2500 e1000_update_stats(adapter); 2501 2502 hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; 2503 adapter->tpt_old = adapter->stats.tpt; 2504 hw->collision_delta = adapter->stats.colc - adapter->colc_old; 2505 adapter->colc_old = adapter->stats.colc; 2506 2507 adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old; 2508 adapter->gorcl_old = adapter->stats.gorcl; 2509 adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old; 2510 adapter->gotcl_old = adapter->stats.gotcl; 2511 2512 e1000_update_adaptive(hw); 2513 2514 if (!netif_carrier_ok(netdev)) { 2515 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { 2516 /* We've lost link, so the controller stops DMA, 2517 * but we've got queued Tx work that's never going 2518 * to get done, so reset controller to flush Tx. 2519 * (Do the reset outside of interrupt context). 2520 */ 2521 adapter->tx_timeout_count++; 2522 schedule_work(&adapter->reset_task); 2523 /* exit immediately since reset is imminent */ 2524 return; 2525 } 2526 } 2527 2528 /* Simple mode for Interrupt Throttle Rate (ITR) */ 2529 if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) { 2530 /* Symmetric Tx/Rx gets a reduced ITR=2000; 2531 * Total asymmetrical Tx or Rx gets ITR=8000; 2532 * everyone else is between 2000-8000. 2533 */ 2534 u32 goc = (adapter->gotcl + adapter->gorcl) / 10000; 2535 u32 dif = (adapter->gotcl > adapter->gorcl ? 2536 adapter->gotcl - adapter->gorcl : 2537 adapter->gorcl - adapter->gotcl) / 10000; 2538 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; 2539 2540 ew32(ITR, 1000000000 / (itr * 256)); 2541 } 2542 2543 /* Cause software interrupt to ensure rx ring is cleaned */ 2544 ew32(ICS, E1000_ICS_RXDMT0); 2545 2546 /* Force detection of hung controller every watchdog period */ 2547 adapter->detect_tx_hung = true; 2548 2549 /* Reschedule the task */ 2550 if (!test_bit(__E1000_DOWN, &adapter->flags)) 2551 schedule_delayed_work(&adapter->watchdog_task, 2 * HZ); 2552 } 2553 2554 enum latency_range { 2555 lowest_latency = 0, 2556 low_latency = 1, 2557 bulk_latency = 2, 2558 latency_invalid = 255 2559 }; 2560 2561 /** 2562 * e1000_update_itr - update the dynamic ITR value based on statistics 2563 * @adapter: pointer to adapter 2564 * @itr_setting: current adapter->itr 2565 * @packets: the number of packets during this measurement interval 2566 * @bytes: the number of bytes during this measurement interval 2567 * 2568 * Stores a new ITR value based on packets and byte 2569 * counts during the last interrupt. The advantage of per interrupt 2570 * computation is faster updates and more accurate ITR for the current 2571 * traffic pattern. Constants in this function were computed 2572 * based on theoretical maximum wire speed and thresholds were set based 2573 * on testing data as well as attempting to minimize response time 2574 * while increasing bulk throughput. 2575 * this functionality is controlled by the InterruptThrottleRate module 2576 * parameter (see e1000_param.c) 2577 **/ 2578 static unsigned int e1000_update_itr(struct e1000_adapter *adapter, 2579 u16 itr_setting, int packets, int bytes) 2580 { 2581 unsigned int retval = itr_setting; 2582 struct e1000_hw *hw = &adapter->hw; 2583 2584 if (unlikely(hw->mac_type < e1000_82540)) 2585 goto update_itr_done; 2586 2587 if (packets == 0) 2588 goto update_itr_done; 2589 2590 switch (itr_setting) { 2591 case lowest_latency: 2592 /* jumbo frames get bulk treatment*/ 2593 if (bytes/packets > 8000) 2594 retval = bulk_latency; 2595 else if ((packets < 5) && (bytes > 512)) 2596 retval = low_latency; 2597 break; 2598 case low_latency: /* 50 usec aka 20000 ints/s */ 2599 if (bytes > 10000) { 2600 /* jumbo frames need bulk latency setting */ 2601 if (bytes/packets > 8000) 2602 retval = bulk_latency; 2603 else if ((packets < 10) || ((bytes/packets) > 1200)) 2604 retval = bulk_latency; 2605 else if ((packets > 35)) 2606 retval = lowest_latency; 2607 } else if (bytes/packets > 2000) 2608 retval = bulk_latency; 2609 else if (packets <= 2 && bytes < 512) 2610 retval = lowest_latency; 2611 break; 2612 case bulk_latency: /* 250 usec aka 4000 ints/s */ 2613 if (bytes > 25000) { 2614 if (packets > 35) 2615 retval = low_latency; 2616 } else if (bytes < 6000) { 2617 retval = low_latency; 2618 } 2619 break; 2620 } 2621 2622 update_itr_done: 2623 return retval; 2624 } 2625 2626 static void e1000_set_itr(struct e1000_adapter *adapter) 2627 { 2628 struct e1000_hw *hw = &adapter->hw; 2629 u16 current_itr; 2630 u32 new_itr = adapter->itr; 2631 2632 if (unlikely(hw->mac_type < e1000_82540)) 2633 return; 2634 2635 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ 2636 if (unlikely(adapter->link_speed != SPEED_1000)) { 2637 new_itr = 4000; 2638 goto set_itr_now; 2639 } 2640 2641 adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr, 2642 adapter->total_tx_packets, 2643 adapter->total_tx_bytes); 2644 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 2645 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) 2646 adapter->tx_itr = low_latency; 2647 2648 adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr, 2649 adapter->total_rx_packets, 2650 adapter->total_rx_bytes); 2651 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 2652 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) 2653 adapter->rx_itr = low_latency; 2654 2655 current_itr = max(adapter->rx_itr, adapter->tx_itr); 2656 2657 switch (current_itr) { 2658 /* counts and packets in update_itr are dependent on these numbers */ 2659 case lowest_latency: 2660 new_itr = 70000; 2661 break; 2662 case low_latency: 2663 new_itr = 20000; /* aka hwitr = ~200 */ 2664 break; 2665 case bulk_latency: 2666 new_itr = 4000; 2667 break; 2668 default: 2669 break; 2670 } 2671 2672 set_itr_now: 2673 if (new_itr != adapter->itr) { 2674 /* this attempts to bias the interrupt rate towards Bulk 2675 * by adding intermediate steps when interrupt rate is 2676 * increasing 2677 */ 2678 new_itr = new_itr > adapter->itr ? 2679 min(adapter->itr + (new_itr >> 2), new_itr) : 2680 new_itr; 2681 adapter->itr = new_itr; 2682 ew32(ITR, 1000000000 / (new_itr * 256)); 2683 } 2684 } 2685 2686 #define E1000_TX_FLAGS_CSUM 0x00000001 2687 #define E1000_TX_FLAGS_VLAN 0x00000002 2688 #define E1000_TX_FLAGS_TSO 0x00000004 2689 #define E1000_TX_FLAGS_IPV4 0x00000008 2690 #define E1000_TX_FLAGS_NO_FCS 0x00000010 2691 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 2692 #define E1000_TX_FLAGS_VLAN_SHIFT 16 2693 2694 static int e1000_tso(struct e1000_adapter *adapter, 2695 struct e1000_tx_ring *tx_ring, struct sk_buff *skb, 2696 __be16 protocol) 2697 { 2698 struct e1000_context_desc *context_desc; 2699 struct e1000_tx_buffer *buffer_info; 2700 unsigned int i; 2701 u32 cmd_length = 0; 2702 u16 ipcse = 0, tucse, mss; 2703 u8 ipcss, ipcso, tucss, tucso, hdr_len; 2704 2705 if (skb_is_gso(skb)) { 2706 int err; 2707 2708 err = skb_cow_head(skb, 0); 2709 if (err < 0) 2710 return err; 2711 2712 hdr_len = skb_tcp_all_headers(skb); 2713 mss = skb_shinfo(skb)->gso_size; 2714 if (protocol == htons(ETH_P_IP)) { 2715 struct iphdr *iph = ip_hdr(skb); 2716 iph->tot_len = 0; 2717 iph->check = 0; 2718 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 2719 iph->daddr, 0, 2720 IPPROTO_TCP, 2721 0); 2722 cmd_length = E1000_TXD_CMD_IP; 2723 ipcse = skb_transport_offset(skb) - 1; 2724 } else if (skb_is_gso_v6(skb)) { 2725 tcp_v6_gso_csum_prep(skb); 2726 ipcse = 0; 2727 } 2728 ipcss = skb_network_offset(skb); 2729 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; 2730 tucss = skb_transport_offset(skb); 2731 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; 2732 tucse = 0; 2733 2734 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | 2735 E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); 2736 2737 i = tx_ring->next_to_use; 2738 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 2739 buffer_info = &tx_ring->buffer_info[i]; 2740 2741 context_desc->lower_setup.ip_fields.ipcss = ipcss; 2742 context_desc->lower_setup.ip_fields.ipcso = ipcso; 2743 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); 2744 context_desc->upper_setup.tcp_fields.tucss = tucss; 2745 context_desc->upper_setup.tcp_fields.tucso = tucso; 2746 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); 2747 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); 2748 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; 2749 context_desc->cmd_and_length = cpu_to_le32(cmd_length); 2750 2751 buffer_info->time_stamp = jiffies; 2752 buffer_info->next_to_watch = i; 2753 2754 if (++i == tx_ring->count) 2755 i = 0; 2756 2757 tx_ring->next_to_use = i; 2758 2759 return true; 2760 } 2761 return false; 2762 } 2763 2764 static bool e1000_tx_csum(struct e1000_adapter *adapter, 2765 struct e1000_tx_ring *tx_ring, struct sk_buff *skb, 2766 __be16 protocol) 2767 { 2768 struct e1000_context_desc *context_desc; 2769 struct e1000_tx_buffer *buffer_info; 2770 unsigned int i; 2771 u8 css; 2772 u32 cmd_len = E1000_TXD_CMD_DEXT; 2773 2774 if (skb->ip_summed != CHECKSUM_PARTIAL) 2775 return false; 2776 2777 switch (protocol) { 2778 case cpu_to_be16(ETH_P_IP): 2779 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 2780 cmd_len |= E1000_TXD_CMD_TCP; 2781 break; 2782 case cpu_to_be16(ETH_P_IPV6): 2783 /* XXX not handling all IPV6 headers */ 2784 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 2785 cmd_len |= E1000_TXD_CMD_TCP; 2786 break; 2787 default: 2788 if (unlikely(net_ratelimit())) 2789 e_warn(drv, "checksum_partial proto=%x!\n", 2790 skb->protocol); 2791 break; 2792 } 2793 2794 css = skb_checksum_start_offset(skb); 2795 2796 i = tx_ring->next_to_use; 2797 buffer_info = &tx_ring->buffer_info[i]; 2798 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 2799 2800 context_desc->lower_setup.ip_config = 0; 2801 context_desc->upper_setup.tcp_fields.tucss = css; 2802 context_desc->upper_setup.tcp_fields.tucso = 2803 css + skb->csum_offset; 2804 context_desc->upper_setup.tcp_fields.tucse = 0; 2805 context_desc->tcp_seg_setup.data = 0; 2806 context_desc->cmd_and_length = cpu_to_le32(cmd_len); 2807 2808 buffer_info->time_stamp = jiffies; 2809 buffer_info->next_to_watch = i; 2810 2811 if (unlikely(++i == tx_ring->count)) 2812 i = 0; 2813 2814 tx_ring->next_to_use = i; 2815 2816 return true; 2817 } 2818 2819 #define E1000_MAX_TXD_PWR 12 2820 #define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR) 2821 2822 static int e1000_tx_map(struct e1000_adapter *adapter, 2823 struct e1000_tx_ring *tx_ring, 2824 struct sk_buff *skb, unsigned int first, 2825 unsigned int max_per_txd, unsigned int nr_frags, 2826 unsigned int mss) 2827 { 2828 struct e1000_hw *hw = &adapter->hw; 2829 struct pci_dev *pdev = adapter->pdev; 2830 struct e1000_tx_buffer *buffer_info; 2831 unsigned int len = skb_headlen(skb); 2832 unsigned int offset = 0, size, count = 0, i; 2833 unsigned int f, bytecount, segs; 2834 2835 i = tx_ring->next_to_use; 2836 2837 while (len) { 2838 buffer_info = &tx_ring->buffer_info[i]; 2839 size = min(len, max_per_txd); 2840 /* Workaround for Controller erratum -- 2841 * descriptor for non-tso packet in a linear SKB that follows a 2842 * tso gets written back prematurely before the data is fully 2843 * DMA'd to the controller 2844 */ 2845 if (!skb->data_len && tx_ring->last_tx_tso && 2846 !skb_is_gso(skb)) { 2847 tx_ring->last_tx_tso = false; 2848 size -= 4; 2849 } 2850 2851 /* Workaround for premature desc write-backs 2852 * in TSO mode. Append 4-byte sentinel desc 2853 */ 2854 if (unlikely(mss && !nr_frags && size == len && size > 8)) 2855 size -= 4; 2856 /* work-around for errata 10 and it applies 2857 * to all controllers in PCI-X mode 2858 * The fix is to make sure that the first descriptor of a 2859 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes 2860 */ 2861 if (unlikely((hw->bus_type == e1000_bus_type_pcix) && 2862 (size > 2015) && count == 0)) 2863 size = 2015; 2864 2865 /* Workaround for potential 82544 hang in PCI-X. Avoid 2866 * terminating buffers within evenly-aligned dwords. 2867 */ 2868 if (unlikely(adapter->pcix_82544 && 2869 !((unsigned long)(skb->data + offset + size - 1) & 4) && 2870 size > 4)) 2871 size -= 4; 2872 2873 buffer_info->length = size; 2874 /* set time_stamp *before* dma to help avoid a possible race */ 2875 buffer_info->time_stamp = jiffies; 2876 buffer_info->mapped_as_page = false; 2877 buffer_info->dma = dma_map_single(&pdev->dev, 2878 skb->data + offset, 2879 size, DMA_TO_DEVICE); 2880 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) 2881 goto dma_error; 2882 buffer_info->next_to_watch = i; 2883 2884 len -= size; 2885 offset += size; 2886 count++; 2887 if (len) { 2888 i++; 2889 if (unlikely(i == tx_ring->count)) 2890 i = 0; 2891 } 2892 } 2893 2894 for (f = 0; f < nr_frags; f++) { 2895 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; 2896 2897 len = skb_frag_size(frag); 2898 offset = 0; 2899 2900 while (len) { 2901 unsigned long bufend; 2902 i++; 2903 if (unlikely(i == tx_ring->count)) 2904 i = 0; 2905 2906 buffer_info = &tx_ring->buffer_info[i]; 2907 size = min(len, max_per_txd); 2908 /* Workaround for premature desc write-backs 2909 * in TSO mode. Append 4-byte sentinel desc 2910 */ 2911 if (unlikely(mss && f == (nr_frags-1) && 2912 size == len && size > 8)) 2913 size -= 4; 2914 /* Workaround for potential 82544 hang in PCI-X. 2915 * Avoid terminating buffers within evenly-aligned 2916 * dwords. 2917 */ 2918 bufend = (unsigned long) 2919 page_to_phys(skb_frag_page(frag)); 2920 bufend += offset + size - 1; 2921 if (unlikely(adapter->pcix_82544 && 2922 !(bufend & 4) && 2923 size > 4)) 2924 size -= 4; 2925 2926 buffer_info->length = size; 2927 buffer_info->time_stamp = jiffies; 2928 buffer_info->mapped_as_page = true; 2929 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 2930 offset, size, DMA_TO_DEVICE); 2931 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) 2932 goto dma_error; 2933 buffer_info->next_to_watch = i; 2934 2935 len -= size; 2936 offset += size; 2937 count++; 2938 } 2939 } 2940 2941 segs = skb_shinfo(skb)->gso_segs ?: 1; 2942 /* multiply data chunks by size of headers */ 2943 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; 2944 2945 tx_ring->buffer_info[i].skb = skb; 2946 tx_ring->buffer_info[i].segs = segs; 2947 tx_ring->buffer_info[i].bytecount = bytecount; 2948 tx_ring->buffer_info[first].next_to_watch = i; 2949 2950 return count; 2951 2952 dma_error: 2953 dev_err(&pdev->dev, "TX DMA map failed\n"); 2954 buffer_info->dma = 0; 2955 if (count) 2956 count--; 2957 2958 while (count--) { 2959 if (i == 0) 2960 i += tx_ring->count; 2961 i--; 2962 buffer_info = &tx_ring->buffer_info[i]; 2963 e1000_unmap_and_free_tx_resource(adapter, buffer_info, 0); 2964 } 2965 2966 return 0; 2967 } 2968 2969 static void e1000_tx_queue(struct e1000_adapter *adapter, 2970 struct e1000_tx_ring *tx_ring, int tx_flags, 2971 int count) 2972 { 2973 struct e1000_tx_desc *tx_desc = NULL; 2974 struct e1000_tx_buffer *buffer_info; 2975 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; 2976 unsigned int i; 2977 2978 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) { 2979 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | 2980 E1000_TXD_CMD_TSE; 2981 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2982 2983 if (likely(tx_flags & E1000_TX_FLAGS_IPV4)) 2984 txd_upper |= E1000_TXD_POPTS_IXSM << 8; 2985 } 2986 2987 if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) { 2988 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; 2989 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2990 } 2991 2992 if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) { 2993 txd_lower |= E1000_TXD_CMD_VLE; 2994 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); 2995 } 2996 2997 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) 2998 txd_lower &= ~(E1000_TXD_CMD_IFCS); 2999 3000 i = tx_ring->next_to_use; 3001 3002 while (count--) { 3003 buffer_info = &tx_ring->buffer_info[i]; 3004 tx_desc = E1000_TX_DESC(*tx_ring, i); 3005 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 3006 tx_desc->lower.data = 3007 cpu_to_le32(txd_lower | buffer_info->length); 3008 tx_desc->upper.data = cpu_to_le32(txd_upper); 3009 if (unlikely(++i == tx_ring->count)) 3010 i = 0; 3011 } 3012 3013 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); 3014 3015 /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */ 3016 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) 3017 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS)); 3018 3019 /* Force memory writes to complete before letting h/w 3020 * know there are new descriptors to fetch. (Only 3021 * applicable for weak-ordered memory model archs, 3022 * such as IA-64). 3023 */ 3024 dma_wmb(); 3025 3026 tx_ring->next_to_use = i; 3027 } 3028 3029 /* 82547 workaround to avoid controller hang in half-duplex environment. 3030 * The workaround is to avoid queuing a large packet that would span 3031 * the internal Tx FIFO ring boundary by notifying the stack to resend 3032 * the packet at a later time. This gives the Tx FIFO an opportunity to 3033 * flush all packets. When that occurs, we reset the Tx FIFO pointers 3034 * to the beginning of the Tx FIFO. 3035 */ 3036 3037 #define E1000_FIFO_HDR 0x10 3038 #define E1000_82547_PAD_LEN 0x3E0 3039 3040 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, 3041 struct sk_buff *skb) 3042 { 3043 u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; 3044 u32 skb_fifo_len = skb->len + E1000_FIFO_HDR; 3045 3046 skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR); 3047 3048 if (adapter->link_duplex != HALF_DUPLEX) 3049 goto no_fifo_stall_required; 3050 3051 if (atomic_read(&adapter->tx_fifo_stall)) 3052 return 1; 3053 3054 if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) { 3055 atomic_set(&adapter->tx_fifo_stall, 1); 3056 return 1; 3057 } 3058 3059 no_fifo_stall_required: 3060 adapter->tx_fifo_head += skb_fifo_len; 3061 if (adapter->tx_fifo_head >= adapter->tx_fifo_size) 3062 adapter->tx_fifo_head -= adapter->tx_fifo_size; 3063 return 0; 3064 } 3065 3066 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) 3067 { 3068 struct e1000_adapter *adapter = netdev_priv(netdev); 3069 struct e1000_tx_ring *tx_ring = adapter->tx_ring; 3070 3071 netif_stop_queue(netdev); 3072 /* Herbert's original patch had: 3073 * smp_mb__after_netif_stop_queue(); 3074 * but since that doesn't exist yet, just open code it. 3075 */ 3076 smp_mb(); 3077 3078 /* We need to check again in a case another CPU has just 3079 * made room available. 3080 */ 3081 if (likely(E1000_DESC_UNUSED(tx_ring) < size)) 3082 return -EBUSY; 3083 3084 /* A reprieve! */ 3085 netif_start_queue(netdev); 3086 ++adapter->restart_queue; 3087 return 0; 3088 } 3089 3090 static int e1000_maybe_stop_tx(struct net_device *netdev, 3091 struct e1000_tx_ring *tx_ring, int size) 3092 { 3093 if (likely(E1000_DESC_UNUSED(tx_ring) >= size)) 3094 return 0; 3095 return __e1000_maybe_stop_tx(netdev, size); 3096 } 3097 3098 #define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X)) 3099 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, 3100 struct net_device *netdev) 3101 { 3102 struct e1000_adapter *adapter = netdev_priv(netdev); 3103 struct e1000_hw *hw = &adapter->hw; 3104 struct e1000_tx_ring *tx_ring; 3105 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD; 3106 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; 3107 unsigned int tx_flags = 0; 3108 unsigned int len = skb_headlen(skb); 3109 unsigned int nr_frags; 3110 unsigned int mss; 3111 int count = 0; 3112 int tso; 3113 unsigned int f; 3114 __be16 protocol = vlan_get_protocol(skb); 3115 3116 /* This goes back to the question of how to logically map a Tx queue 3117 * to a flow. Right now, performance is impacted slightly negatively 3118 * if using multiple Tx queues. If the stack breaks away from a 3119 * single qdisc implementation, we can look at this again. 3120 */ 3121 tx_ring = adapter->tx_ring; 3122 3123 /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN, 3124 * packets may get corrupted during padding by HW. 3125 * To WA this issue, pad all small packets manually. 3126 */ 3127 if (eth_skb_pad(skb)) 3128 return NETDEV_TX_OK; 3129 3130 mss = skb_shinfo(skb)->gso_size; 3131 /* The controller does a simple calculation to 3132 * make sure there is enough room in the FIFO before 3133 * initiating the DMA for each buffer. The calc is: 3134 * 4 = ceil(buffer len/mss). To make sure we don't 3135 * overrun the FIFO, adjust the max buffer len if mss 3136 * drops. 3137 */ 3138 if (mss) { 3139 u8 hdr_len; 3140 max_per_txd = min(mss << 2, max_per_txd); 3141 max_txd_pwr = fls(max_per_txd) - 1; 3142 3143 hdr_len = skb_tcp_all_headers(skb); 3144 if (skb->data_len && hdr_len == len) { 3145 switch (hw->mac_type) { 3146 case e1000_82544: { 3147 unsigned int pull_size; 3148 3149 /* Make sure we have room to chop off 4 bytes, 3150 * and that the end alignment will work out to 3151 * this hardware's requirements 3152 * NOTE: this is a TSO only workaround 3153 * if end byte alignment not correct move us 3154 * into the next dword 3155 */ 3156 if ((unsigned long)(skb_tail_pointer(skb) - 1) 3157 & 4) 3158 break; 3159 pull_size = min((unsigned int)4, skb->data_len); 3160 if (!__pskb_pull_tail(skb, pull_size)) { 3161 e_err(drv, "__pskb_pull_tail " 3162 "failed.\n"); 3163 dev_kfree_skb_any(skb); 3164 return NETDEV_TX_OK; 3165 } 3166 len = skb_headlen(skb); 3167 break; 3168 } 3169 default: 3170 /* do nothing */ 3171 break; 3172 } 3173 } 3174 } 3175 3176 /* reserve a descriptor for the offload context */ 3177 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) 3178 count++; 3179 count++; 3180 3181 /* Controller Erratum workaround */ 3182 if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb)) 3183 count++; 3184 3185 count += TXD_USE_COUNT(len, max_txd_pwr); 3186 3187 if (adapter->pcix_82544) 3188 count++; 3189 3190 /* work-around for errata 10 and it applies to all controllers 3191 * in PCI-X mode, so add one more descriptor to the count 3192 */ 3193 if (unlikely((hw->bus_type == e1000_bus_type_pcix) && 3194 (len > 2015))) 3195 count++; 3196 3197 nr_frags = skb_shinfo(skb)->nr_frags; 3198 for (f = 0; f < nr_frags; f++) 3199 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]), 3200 max_txd_pwr); 3201 if (adapter->pcix_82544) 3202 count += nr_frags; 3203 3204 /* need: count + 2 desc gap to keep tail from touching 3205 * head, otherwise try next time 3206 */ 3207 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) 3208 return NETDEV_TX_BUSY; 3209 3210 if (unlikely((hw->mac_type == e1000_82547) && 3211 (e1000_82547_fifo_workaround(adapter, skb)))) { 3212 netif_stop_queue(netdev); 3213 if (!test_bit(__E1000_DOWN, &adapter->flags)) 3214 schedule_delayed_work(&adapter->fifo_stall_task, 1); 3215 return NETDEV_TX_BUSY; 3216 } 3217 3218 if (skb_vlan_tag_present(skb)) { 3219 tx_flags |= E1000_TX_FLAGS_VLAN; 3220 tx_flags |= (skb_vlan_tag_get(skb) << 3221 E1000_TX_FLAGS_VLAN_SHIFT); 3222 } 3223 3224 first = tx_ring->next_to_use; 3225 3226 tso = e1000_tso(adapter, tx_ring, skb, protocol); 3227 if (tso < 0) { 3228 dev_kfree_skb_any(skb); 3229 return NETDEV_TX_OK; 3230 } 3231 3232 if (likely(tso)) { 3233 if (likely(hw->mac_type != e1000_82544)) 3234 tx_ring->last_tx_tso = true; 3235 tx_flags |= E1000_TX_FLAGS_TSO; 3236 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol))) 3237 tx_flags |= E1000_TX_FLAGS_CSUM; 3238 3239 if (protocol == htons(ETH_P_IP)) 3240 tx_flags |= E1000_TX_FLAGS_IPV4; 3241 3242 if (unlikely(skb->no_fcs)) 3243 tx_flags |= E1000_TX_FLAGS_NO_FCS; 3244 3245 count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd, 3246 nr_frags, mss); 3247 3248 if (count) { 3249 /* The descriptors needed is higher than other Intel drivers 3250 * due to a number of workarounds. The breakdown is below: 3251 * Data descriptors: MAX_SKB_FRAGS + 1 3252 * Context Descriptor: 1 3253 * Keep head from touching tail: 2 3254 * Workarounds: 3 3255 */ 3256 int desc_needed = MAX_SKB_FRAGS + 7; 3257 3258 netdev_sent_queue(netdev, skb->len); 3259 skb_tx_timestamp(skb); 3260 3261 e1000_tx_queue(adapter, tx_ring, tx_flags, count); 3262 3263 /* 82544 potentially requires twice as many data descriptors 3264 * in order to guarantee buffers don't end on evenly-aligned 3265 * dwords 3266 */ 3267 if (adapter->pcix_82544) 3268 desc_needed += MAX_SKB_FRAGS + 1; 3269 3270 /* Make sure there is space in the ring for the next send. */ 3271 e1000_maybe_stop_tx(netdev, tx_ring, desc_needed); 3272 3273 if (!netdev_xmit_more() || 3274 netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) { 3275 writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt); 3276 } 3277 } else { 3278 dev_kfree_skb_any(skb); 3279 tx_ring->buffer_info[first].time_stamp = 0; 3280 tx_ring->next_to_use = first; 3281 } 3282 3283 return NETDEV_TX_OK; 3284 } 3285 3286 #define NUM_REGS 38 /* 1 based count */ 3287 static void e1000_regdump(struct e1000_adapter *adapter) 3288 { 3289 struct e1000_hw *hw = &adapter->hw; 3290 u32 regs[NUM_REGS]; 3291 u32 *regs_buff = regs; 3292 int i = 0; 3293 3294 static const char * const reg_name[] = { 3295 "CTRL", "STATUS", 3296 "RCTL", "RDLEN", "RDH", "RDT", "RDTR", 3297 "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT", 3298 "TIDV", "TXDCTL", "TADV", "TARC0", 3299 "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1", 3300 "TXDCTL1", "TARC1", 3301 "CTRL_EXT", "ERT", "RDBAL", "RDBAH", 3302 "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC", 3303 "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC" 3304 }; 3305 3306 regs_buff[0] = er32(CTRL); 3307 regs_buff[1] = er32(STATUS); 3308 3309 regs_buff[2] = er32(RCTL); 3310 regs_buff[3] = er32(RDLEN); 3311 regs_buff[4] = er32(RDH); 3312 regs_buff[5] = er32(RDT); 3313 regs_buff[6] = er32(RDTR); 3314 3315 regs_buff[7] = er32(TCTL); 3316 regs_buff[8] = er32(TDBAL); 3317 regs_buff[9] = er32(TDBAH); 3318 regs_buff[10] = er32(TDLEN); 3319 regs_buff[11] = er32(TDH); 3320 regs_buff[12] = er32(TDT); 3321 regs_buff[13] = er32(TIDV); 3322 regs_buff[14] = er32(TXDCTL); 3323 regs_buff[15] = er32(TADV); 3324 regs_buff[16] = er32(TARC0); 3325 3326 regs_buff[17] = er32(TDBAL1); 3327 regs_buff[18] = er32(TDBAH1); 3328 regs_buff[19] = er32(TDLEN1); 3329 regs_buff[20] = er32(TDH1); 3330 regs_buff[21] = er32(TDT1); 3331 regs_buff[22] = er32(TXDCTL1); 3332 regs_buff[23] = er32(TARC1); 3333 regs_buff[24] = er32(CTRL_EXT); 3334 regs_buff[25] = er32(ERT); 3335 regs_buff[26] = er32(RDBAL0); 3336 regs_buff[27] = er32(RDBAH0); 3337 regs_buff[28] = er32(TDFH); 3338 regs_buff[29] = er32(TDFT); 3339 regs_buff[30] = er32(TDFHS); 3340 regs_buff[31] = er32(TDFTS); 3341 regs_buff[32] = er32(TDFPC); 3342 regs_buff[33] = er32(RDFH); 3343 regs_buff[34] = er32(RDFT); 3344 regs_buff[35] = er32(RDFHS); 3345 regs_buff[36] = er32(RDFTS); 3346 regs_buff[37] = er32(RDFPC); 3347 3348 pr_info("Register dump\n"); 3349 for (i = 0; i < NUM_REGS; i++) 3350 pr_info("%-15s %08x\n", reg_name[i], regs_buff[i]); 3351 } 3352 3353 /* 3354 * e1000_dump: Print registers, tx ring and rx ring 3355 */ 3356 static void e1000_dump(struct e1000_adapter *adapter) 3357 { 3358 /* this code doesn't handle multiple rings */ 3359 struct e1000_tx_ring *tx_ring = adapter->tx_ring; 3360 struct e1000_rx_ring *rx_ring = adapter->rx_ring; 3361 int i; 3362 3363 if (!netif_msg_hw(adapter)) 3364 return; 3365 3366 /* Print Registers */ 3367 e1000_regdump(adapter); 3368 3369 /* transmit dump */ 3370 pr_info("TX Desc ring0 dump\n"); 3371 3372 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) 3373 * 3374 * Legacy Transmit Descriptor 3375 * +--------------------------------------------------------------+ 3376 * 0 | Buffer Address [63:0] (Reserved on Write Back) | 3377 * +--------------------------------------------------------------+ 3378 * 8 | Special | CSS | Status | CMD | CSO | Length | 3379 * +--------------------------------------------------------------+ 3380 * 63 48 47 36 35 32 31 24 23 16 15 0 3381 * 3382 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload 3383 * 63 48 47 40 39 32 31 16 15 8 7 0 3384 * +----------------------------------------------------------------+ 3385 * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS | 3386 * +----------------------------------------------------------------+ 3387 * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN | 3388 * +----------------------------------------------------------------+ 3389 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 3390 * 3391 * Extended Data Descriptor (DTYP=0x1) 3392 * +----------------------------------------------------------------+ 3393 * 0 | Buffer Address [63:0] | 3394 * +----------------------------------------------------------------+ 3395 * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN | 3396 * +----------------------------------------------------------------+ 3397 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 3398 */ 3399 pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestmp bi->skb\n"); 3400 pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestmp bi->skb\n"); 3401 3402 if (!netif_msg_tx_done(adapter)) 3403 goto rx_ring_summary; 3404 3405 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { 3406 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i); 3407 struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i]; 3408 struct my_u { __le64 a; __le64 b; }; 3409 struct my_u *u = (struct my_u *)tx_desc; 3410 const char *type; 3411 3412 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) 3413 type = "NTC/U"; 3414 else if (i == tx_ring->next_to_use) 3415 type = "NTU"; 3416 else if (i == tx_ring->next_to_clean) 3417 type = "NTC"; 3418 else 3419 type = ""; 3420 3421 pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p %s\n", 3422 ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i, 3423 le64_to_cpu(u->a), le64_to_cpu(u->b), 3424 (u64)buffer_info->dma, buffer_info->length, 3425 buffer_info->next_to_watch, 3426 (u64)buffer_info->time_stamp, buffer_info->skb, type); 3427 } 3428 3429 rx_ring_summary: 3430 /* receive dump */ 3431 pr_info("\nRX Desc ring dump\n"); 3432 3433 /* Legacy Receive Descriptor Format 3434 * 3435 * +-----------------------------------------------------+ 3436 * | Buffer Address [63:0] | 3437 * +-----------------------------------------------------+ 3438 * | VLAN Tag | Errors | Status 0 | Packet csum | Length | 3439 * +-----------------------------------------------------+ 3440 * 63 48 47 40 39 32 31 16 15 0 3441 */ 3442 pr_info("R[desc] [address 63:0 ] [vl er S cks ln] [bi->dma ] [bi->skb]\n"); 3443 3444 if (!netif_msg_rx_status(adapter)) 3445 goto exit; 3446 3447 for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) { 3448 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i); 3449 struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i]; 3450 struct my_u { __le64 a; __le64 b; }; 3451 struct my_u *u = (struct my_u *)rx_desc; 3452 const char *type; 3453 3454 if (i == rx_ring->next_to_use) 3455 type = "NTU"; 3456 else if (i == rx_ring->next_to_clean) 3457 type = "NTC"; 3458 else 3459 type = ""; 3460 3461 pr_info("R[0x%03X] %016llX %016llX %016llX %p %s\n", 3462 i, le64_to_cpu(u->a), le64_to_cpu(u->b), 3463 (u64)buffer_info->dma, buffer_info->rxbuf.data, type); 3464 } /* for */ 3465 3466 /* dump the descriptor caches */ 3467 /* rx */ 3468 pr_info("Rx descriptor cache in 64bit format\n"); 3469 for (i = 0x6000; i <= 0x63FF ; i += 0x10) { 3470 pr_info("R%04X: %08X|%08X %08X|%08X\n", 3471 i, 3472 readl(adapter->hw.hw_addr + i+4), 3473 readl(adapter->hw.hw_addr + i), 3474 readl(adapter->hw.hw_addr + i+12), 3475 readl(adapter->hw.hw_addr + i+8)); 3476 } 3477 /* tx */ 3478 pr_info("Tx descriptor cache in 64bit format\n"); 3479 for (i = 0x7000; i <= 0x73FF ; i += 0x10) { 3480 pr_info("T%04X: %08X|%08X %08X|%08X\n", 3481 i, 3482 readl(adapter->hw.hw_addr + i+4), 3483 readl(adapter->hw.hw_addr + i), 3484 readl(adapter->hw.hw_addr + i+12), 3485 readl(adapter->hw.hw_addr + i+8)); 3486 } 3487 exit: 3488 return; 3489 } 3490 3491 /** 3492 * e1000_tx_timeout - Respond to a Tx Hang 3493 * @netdev: network interface device structure 3494 * @txqueue: number of the Tx queue that hung (unused) 3495 **/ 3496 static void e1000_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue) 3497 { 3498 struct e1000_adapter *adapter = netdev_priv(netdev); 3499 3500 /* Do the reset outside of interrupt context */ 3501 adapter->tx_timeout_count++; 3502 schedule_work(&adapter->reset_task); 3503 } 3504 3505 static void e1000_reset_task(struct work_struct *work) 3506 { 3507 struct e1000_adapter *adapter = 3508 container_of(work, struct e1000_adapter, reset_task); 3509 3510 e_err(drv, "Reset adapter\n"); 3511 rtnl_lock(); 3512 e1000_reinit_locked(adapter); 3513 rtnl_unlock(); 3514 } 3515 3516 /** 3517 * e1000_change_mtu - Change the Maximum Transfer Unit 3518 * @netdev: network interface device structure 3519 * @new_mtu: new value for maximum frame size 3520 * 3521 * Returns 0 on success, negative on failure 3522 **/ 3523 static int e1000_change_mtu(struct net_device *netdev, int new_mtu) 3524 { 3525 struct e1000_adapter *adapter = netdev_priv(netdev); 3526 struct e1000_hw *hw = &adapter->hw; 3527 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 3528 3529 /* Adapter-specific max frame size limits. */ 3530 switch (hw->mac_type) { 3531 case e1000_undefined ... e1000_82542_rev2_1: 3532 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) { 3533 e_err(probe, "Jumbo Frames not supported.\n"); 3534 return -EINVAL; 3535 } 3536 break; 3537 default: 3538 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */ 3539 break; 3540 } 3541 3542 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) 3543 msleep(1); 3544 /* e1000_down has a dependency on max_frame_size */ 3545 hw->max_frame_size = max_frame; 3546 if (netif_running(netdev)) { 3547 /* prevent buffers from being reallocated */ 3548 adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers; 3549 e1000_down(adapter); 3550 } 3551 3552 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN 3553 * means we reserve 2 more, this pushes us to allocate from the next 3554 * larger slab size. 3555 * i.e. RXBUFFER_2048 --> size-4096 slab 3556 * however with the new *_jumbo_rx* routines, jumbo receives will use 3557 * fragmented skbs 3558 */ 3559 3560 if (max_frame <= E1000_RXBUFFER_2048) 3561 adapter->rx_buffer_len = E1000_RXBUFFER_2048; 3562 else 3563 #if (PAGE_SIZE >= E1000_RXBUFFER_16384) 3564 adapter->rx_buffer_len = E1000_RXBUFFER_16384; 3565 #elif (PAGE_SIZE >= E1000_RXBUFFER_4096) 3566 adapter->rx_buffer_len = PAGE_SIZE; 3567 #endif 3568 3569 /* adjust allocation if LPE protects us, and we aren't using SBP */ 3570 if (!hw->tbi_compatibility_on && 3571 ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) || 3572 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) 3573 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 3574 3575 netdev_dbg(netdev, "changing MTU from %d to %d\n", 3576 netdev->mtu, new_mtu); 3577 WRITE_ONCE(netdev->mtu, new_mtu); 3578 3579 if (netif_running(netdev)) 3580 e1000_up(adapter); 3581 else 3582 e1000_reset(adapter); 3583 3584 clear_bit(__E1000_RESETTING, &adapter->flags); 3585 3586 return 0; 3587 } 3588 3589 /** 3590 * e1000_update_stats - Update the board statistics counters 3591 * @adapter: board private structure 3592 **/ 3593 void e1000_update_stats(struct e1000_adapter *adapter) 3594 { 3595 struct net_device *netdev = adapter->netdev; 3596 struct e1000_hw *hw = &adapter->hw; 3597 struct pci_dev *pdev = adapter->pdev; 3598 unsigned long flags; 3599 u16 phy_tmp; 3600 3601 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF 3602 3603 /* Prevent stats update while adapter is being reset, or if the pci 3604 * connection is down. 3605 */ 3606 if (adapter->link_speed == 0) 3607 return; 3608 if (pci_channel_offline(pdev)) 3609 return; 3610 3611 spin_lock_irqsave(&adapter->stats_lock, flags); 3612 3613 /* these counters are modified from e1000_tbi_adjust_stats, 3614 * called from the interrupt context, so they must only 3615 * be written while holding adapter->stats_lock 3616 */ 3617 3618 adapter->stats.crcerrs += er32(CRCERRS); 3619 adapter->stats.gprc += er32(GPRC); 3620 adapter->stats.gorcl += er32(GORCL); 3621 adapter->stats.gorch += er32(GORCH); 3622 adapter->stats.bprc += er32(BPRC); 3623 adapter->stats.mprc += er32(MPRC); 3624 adapter->stats.roc += er32(ROC); 3625 3626 adapter->stats.prc64 += er32(PRC64); 3627 adapter->stats.prc127 += er32(PRC127); 3628 adapter->stats.prc255 += er32(PRC255); 3629 adapter->stats.prc511 += er32(PRC511); 3630 adapter->stats.prc1023 += er32(PRC1023); 3631 adapter->stats.prc1522 += er32(PRC1522); 3632 3633 adapter->stats.symerrs += er32(SYMERRS); 3634 adapter->stats.mpc += er32(MPC); 3635 adapter->stats.scc += er32(SCC); 3636 adapter->stats.ecol += er32(ECOL); 3637 adapter->stats.mcc += er32(MCC); 3638 adapter->stats.latecol += er32(LATECOL); 3639 adapter->stats.dc += er32(DC); 3640 adapter->stats.sec += er32(SEC); 3641 adapter->stats.rlec += er32(RLEC); 3642 adapter->stats.xonrxc += er32(XONRXC); 3643 adapter->stats.xontxc += er32(XONTXC); 3644 adapter->stats.xoffrxc += er32(XOFFRXC); 3645 adapter->stats.xofftxc += er32(XOFFTXC); 3646 adapter->stats.fcruc += er32(FCRUC); 3647 adapter->stats.gptc += er32(GPTC); 3648 adapter->stats.gotcl += er32(GOTCL); 3649 adapter->stats.gotch += er32(GOTCH); 3650 adapter->stats.rnbc += er32(RNBC); 3651 adapter->stats.ruc += er32(RUC); 3652 adapter->stats.rfc += er32(RFC); 3653 adapter->stats.rjc += er32(RJC); 3654 adapter->stats.torl += er32(TORL); 3655 adapter->stats.torh += er32(TORH); 3656 adapter->stats.totl += er32(TOTL); 3657 adapter->stats.toth += er32(TOTH); 3658 adapter->stats.tpr += er32(TPR); 3659 3660 adapter->stats.ptc64 += er32(PTC64); 3661 adapter->stats.ptc127 += er32(PTC127); 3662 adapter->stats.ptc255 += er32(PTC255); 3663 adapter->stats.ptc511 += er32(PTC511); 3664 adapter->stats.ptc1023 += er32(PTC1023); 3665 adapter->stats.ptc1522 += er32(PTC1522); 3666 3667 adapter->stats.mptc += er32(MPTC); 3668 adapter->stats.bptc += er32(BPTC); 3669 3670 /* used for adaptive IFS */ 3671 3672 hw->tx_packet_delta = er32(TPT); 3673 adapter->stats.tpt += hw->tx_packet_delta; 3674 hw->collision_delta = er32(COLC); 3675 adapter->stats.colc += hw->collision_delta; 3676 3677 if (hw->mac_type >= e1000_82543) { 3678 adapter->stats.algnerrc += er32(ALGNERRC); 3679 adapter->stats.rxerrc += er32(RXERRC); 3680 adapter->stats.tncrs += er32(TNCRS); 3681 adapter->stats.cexterr += er32(CEXTERR); 3682 adapter->stats.tsctc += er32(TSCTC); 3683 adapter->stats.tsctfc += er32(TSCTFC); 3684 } 3685 3686 /* Fill out the OS statistics structure */ 3687 netdev->stats.multicast = adapter->stats.mprc; 3688 netdev->stats.collisions = adapter->stats.colc; 3689 3690 /* Rx Errors */ 3691 3692 /* RLEC on some newer hardware can be incorrect so build 3693 * our own version based on RUC and ROC 3694 */ 3695 netdev->stats.rx_errors = adapter->stats.rxerrc + 3696 adapter->stats.crcerrs + adapter->stats.algnerrc + 3697 adapter->stats.ruc + adapter->stats.roc + 3698 adapter->stats.cexterr; 3699 adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc; 3700 netdev->stats.rx_length_errors = adapter->stats.rlerrc; 3701 netdev->stats.rx_crc_errors = adapter->stats.crcerrs; 3702 netdev->stats.rx_frame_errors = adapter->stats.algnerrc; 3703 netdev->stats.rx_missed_errors = adapter->stats.mpc; 3704 3705 /* Tx Errors */ 3706 adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol; 3707 netdev->stats.tx_errors = adapter->stats.txerrc; 3708 netdev->stats.tx_aborted_errors = adapter->stats.ecol; 3709 netdev->stats.tx_window_errors = adapter->stats.latecol; 3710 netdev->stats.tx_carrier_errors = adapter->stats.tncrs; 3711 if (hw->bad_tx_carr_stats_fd && 3712 adapter->link_duplex == FULL_DUPLEX) { 3713 netdev->stats.tx_carrier_errors = 0; 3714 adapter->stats.tncrs = 0; 3715 } 3716 3717 /* Tx Dropped needs to be maintained elsewhere */ 3718 3719 /* Phy Stats */ 3720 if (hw->media_type == e1000_media_type_copper) { 3721 if ((adapter->link_speed == SPEED_1000) && 3722 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { 3723 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; 3724 adapter->phy_stats.idle_errors += phy_tmp; 3725 } 3726 3727 if ((hw->mac_type <= e1000_82546) && 3728 (hw->phy_type == e1000_phy_m88) && 3729 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp)) 3730 adapter->phy_stats.receive_errors += phy_tmp; 3731 } 3732 3733 /* Management Stats */ 3734 if (hw->has_smbus) { 3735 adapter->stats.mgptc += er32(MGTPTC); 3736 adapter->stats.mgprc += er32(MGTPRC); 3737 adapter->stats.mgpdc += er32(MGTPDC); 3738 } 3739 3740 spin_unlock_irqrestore(&adapter->stats_lock, flags); 3741 } 3742 3743 /** 3744 * e1000_intr - Interrupt Handler 3745 * @irq: interrupt number 3746 * @data: pointer to a network interface device structure 3747 **/ 3748 static irqreturn_t e1000_intr(int irq, void *data) 3749 { 3750 struct net_device *netdev = data; 3751 struct e1000_adapter *adapter = netdev_priv(netdev); 3752 struct e1000_hw *hw = &adapter->hw; 3753 u32 icr = er32(ICR); 3754 3755 if (unlikely((!icr))) 3756 return IRQ_NONE; /* Not our interrupt */ 3757 3758 /* we might have caused the interrupt, but the above 3759 * read cleared it, and just in case the driver is 3760 * down there is nothing to do so return handled 3761 */ 3762 if (unlikely(test_bit(__E1000_DOWN, &adapter->flags))) 3763 return IRQ_HANDLED; 3764 3765 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { 3766 hw->get_link_status = 1; 3767 /* guard against interrupt when we're going down */ 3768 if (!test_bit(__E1000_DOWN, &adapter->flags)) 3769 schedule_delayed_work(&adapter->watchdog_task, 1); 3770 } 3771 3772 /* disable interrupts, without the synchronize_irq bit */ 3773 ew32(IMC, ~0); 3774 E1000_WRITE_FLUSH(); 3775 3776 if (likely(napi_schedule_prep(&adapter->napi))) { 3777 adapter->total_tx_bytes = 0; 3778 adapter->total_tx_packets = 0; 3779 adapter->total_rx_bytes = 0; 3780 adapter->total_rx_packets = 0; 3781 __napi_schedule(&adapter->napi); 3782 } else { 3783 /* this really should not happen! if it does it is basically a 3784 * bug, but not a hard error, so enable ints and continue 3785 */ 3786 if (!test_bit(__E1000_DOWN, &adapter->flags)) 3787 e1000_irq_enable(adapter); 3788 } 3789 3790 return IRQ_HANDLED; 3791 } 3792 3793 /** 3794 * e1000_clean - NAPI Rx polling callback 3795 * @napi: napi struct containing references to driver info 3796 * @budget: budget given to driver for receive packets 3797 **/ 3798 static int e1000_clean(struct napi_struct *napi, int budget) 3799 { 3800 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, 3801 napi); 3802 int tx_clean_complete = 0, work_done = 0; 3803 3804 tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]); 3805 3806 adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget); 3807 3808 if (!tx_clean_complete || work_done == budget) 3809 return budget; 3810 3811 /* Exit the polling mode, but don't re-enable interrupts if stack might 3812 * poll us due to busy-polling 3813 */ 3814 if (likely(napi_complete_done(napi, work_done))) { 3815 if (likely(adapter->itr_setting & 3)) 3816 e1000_set_itr(adapter); 3817 if (!test_bit(__E1000_DOWN, &adapter->flags)) 3818 e1000_irq_enable(adapter); 3819 } 3820 3821 return work_done; 3822 } 3823 3824 /** 3825 * e1000_clean_tx_irq - Reclaim resources after transmit completes 3826 * @adapter: board private structure 3827 * @tx_ring: ring to clean 3828 **/ 3829 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, 3830 struct e1000_tx_ring *tx_ring) 3831 { 3832 struct e1000_hw *hw = &adapter->hw; 3833 struct net_device *netdev = adapter->netdev; 3834 struct e1000_tx_desc *tx_desc, *eop_desc; 3835 struct e1000_tx_buffer *buffer_info; 3836 unsigned int i, eop; 3837 unsigned int count = 0; 3838 unsigned int total_tx_bytes = 0, total_tx_packets = 0; 3839 unsigned int bytes_compl = 0, pkts_compl = 0; 3840 3841 i = tx_ring->next_to_clean; 3842 eop = tx_ring->buffer_info[i].next_to_watch; 3843 eop_desc = E1000_TX_DESC(*tx_ring, eop); 3844 3845 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && 3846 (count < tx_ring->count)) { 3847 bool cleaned = false; 3848 dma_rmb(); /* read buffer_info after eop_desc */ 3849 for ( ; !cleaned; count++) { 3850 tx_desc = E1000_TX_DESC(*tx_ring, i); 3851 buffer_info = &tx_ring->buffer_info[i]; 3852 cleaned = (i == eop); 3853 3854 if (cleaned) { 3855 total_tx_packets += buffer_info->segs; 3856 total_tx_bytes += buffer_info->bytecount; 3857 if (buffer_info->skb) { 3858 bytes_compl += buffer_info->skb->len; 3859 pkts_compl++; 3860 } 3861 3862 } 3863 e1000_unmap_and_free_tx_resource(adapter, buffer_info, 3864 64); 3865 tx_desc->upper.data = 0; 3866 3867 if (unlikely(++i == tx_ring->count)) 3868 i = 0; 3869 } 3870 3871 eop = tx_ring->buffer_info[i].next_to_watch; 3872 eop_desc = E1000_TX_DESC(*tx_ring, eop); 3873 } 3874 3875 /* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame, 3876 * which will reuse the cleaned buffers. 3877 */ 3878 smp_store_release(&tx_ring->next_to_clean, i); 3879 3880 netdev_completed_queue(netdev, pkts_compl, bytes_compl); 3881 3882 #define TX_WAKE_THRESHOLD 32 3883 if (unlikely(count && netif_carrier_ok(netdev) && 3884 E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) { 3885 /* Make sure that anybody stopping the queue after this 3886 * sees the new next_to_clean. 3887 */ 3888 smp_mb(); 3889 3890 if (netif_queue_stopped(netdev) && 3891 !(test_bit(__E1000_DOWN, &adapter->flags))) { 3892 netif_wake_queue(netdev); 3893 ++adapter->restart_queue; 3894 } 3895 } 3896 3897 if (adapter->detect_tx_hung) { 3898 /* Detect a transmit hang in hardware, this serializes the 3899 * check with the clearing of time_stamp and movement of i 3900 */ 3901 adapter->detect_tx_hung = false; 3902 if (tx_ring->buffer_info[eop].time_stamp && 3903 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + 3904 (adapter->tx_timeout_factor * HZ)) && 3905 !(er32(STATUS) & E1000_STATUS_TXOFF)) { 3906 3907 /* detected Tx unit hang */ 3908 e_err(drv, "Detected Tx Unit Hang\n" 3909 " Tx Queue <%lu>\n" 3910 " TDH <%x>\n" 3911 " TDT <%x>\n" 3912 " next_to_use <%x>\n" 3913 " next_to_clean <%x>\n" 3914 "buffer_info[next_to_clean]\n" 3915 " time_stamp <%lx>\n" 3916 " next_to_watch <%x>\n" 3917 " jiffies <%lx>\n" 3918 " next_to_watch.status <%x>\n", 3919 (unsigned long)(tx_ring - adapter->tx_ring), 3920 readl(hw->hw_addr + tx_ring->tdh), 3921 readl(hw->hw_addr + tx_ring->tdt), 3922 tx_ring->next_to_use, 3923 tx_ring->next_to_clean, 3924 tx_ring->buffer_info[eop].time_stamp, 3925 eop, 3926 jiffies, 3927 eop_desc->upper.fields.status); 3928 e1000_dump(adapter); 3929 netif_stop_queue(netdev); 3930 } 3931 } 3932 adapter->total_tx_bytes += total_tx_bytes; 3933 adapter->total_tx_packets += total_tx_packets; 3934 netdev->stats.tx_bytes += total_tx_bytes; 3935 netdev->stats.tx_packets += total_tx_packets; 3936 return count < tx_ring->count; 3937 } 3938 3939 /** 3940 * e1000_rx_checksum - Receive Checksum Offload for 82543 3941 * @adapter: board private structure 3942 * @status_err: receive descriptor status and error fields 3943 * @csum: receive descriptor csum field 3944 * @skb: socket buffer with received data 3945 **/ 3946 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, 3947 u32 csum, struct sk_buff *skb) 3948 { 3949 struct e1000_hw *hw = &adapter->hw; 3950 u16 status = (u16)status_err; 3951 u8 errors = (u8)(status_err >> 24); 3952 3953 skb_checksum_none_assert(skb); 3954 3955 /* 82543 or newer only */ 3956 if (unlikely(hw->mac_type < e1000_82543)) 3957 return; 3958 /* Ignore Checksum bit is set */ 3959 if (unlikely(status & E1000_RXD_STAT_IXSM)) 3960 return; 3961 /* TCP/UDP checksum error bit is set */ 3962 if (unlikely(errors & E1000_RXD_ERR_TCPE)) { 3963 /* let the stack verify checksum errors */ 3964 adapter->hw_csum_err++; 3965 return; 3966 } 3967 /* TCP/UDP Checksum has not been calculated */ 3968 if (!(status & E1000_RXD_STAT_TCPCS)) 3969 return; 3970 3971 /* It must be a TCP or UDP packet with a valid checksum */ 3972 if (likely(status & E1000_RXD_STAT_TCPCS)) { 3973 /* TCP checksum is good */ 3974 skb->ip_summed = CHECKSUM_UNNECESSARY; 3975 } 3976 adapter->hw_csum_good++; 3977 } 3978 3979 /** 3980 * e1000_consume_page - helper function for jumbo Rx path 3981 * @bi: software descriptor shadow data 3982 * @skb: skb being modified 3983 * @length: length of data being added 3984 **/ 3985 static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb, 3986 u16 length) 3987 { 3988 bi->rxbuf.page = NULL; 3989 skb->len += length; 3990 skb->data_len += length; 3991 skb->truesize += PAGE_SIZE; 3992 } 3993 3994 /** 3995 * e1000_receive_skb - helper function to handle rx indications 3996 * @adapter: board private structure 3997 * @status: descriptor status field as written by hardware 3998 * @vlan: descriptor vlan field as written by hardware (no le/be conversion) 3999 * @skb: pointer to sk_buff to be indicated to stack 4000 */ 4001 static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status, 4002 __le16 vlan, struct sk_buff *skb) 4003 { 4004 skb->protocol = eth_type_trans(skb, adapter->netdev); 4005 4006 if (status & E1000_RXD_STAT_VP) { 4007 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; 4008 4009 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); 4010 } 4011 napi_gro_receive(&adapter->napi, skb); 4012 } 4013 4014 /** 4015 * e1000_tbi_adjust_stats 4016 * @hw: Struct containing variables accessed by shared code 4017 * @stats: point to stats struct 4018 * @frame_len: The length of the frame in question 4019 * @mac_addr: The Ethernet destination address of the frame in question 4020 * 4021 * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT 4022 */ 4023 static void e1000_tbi_adjust_stats(struct e1000_hw *hw, 4024 struct e1000_hw_stats *stats, 4025 u32 frame_len, const u8 *mac_addr) 4026 { 4027 u64 carry_bit; 4028 4029 /* First adjust the frame length. */ 4030 frame_len--; 4031 /* We need to adjust the statistics counters, since the hardware 4032 * counters overcount this packet as a CRC error and undercount 4033 * the packet as a good packet 4034 */ 4035 /* This packet should not be counted as a CRC error. */ 4036 stats->crcerrs--; 4037 /* This packet does count as a Good Packet Received. */ 4038 stats->gprc++; 4039 4040 /* Adjust the Good Octets received counters */ 4041 carry_bit = 0x80000000 & stats->gorcl; 4042 stats->gorcl += frame_len; 4043 /* If the high bit of Gorcl (the low 32 bits of the Good Octets 4044 * Received Count) was one before the addition, 4045 * AND it is zero after, then we lost the carry out, 4046 * need to add one to Gorch (Good Octets Received Count High). 4047 * This could be simplified if all environments supported 4048 * 64-bit integers. 4049 */ 4050 if (carry_bit && ((stats->gorcl & 0x80000000) == 0)) 4051 stats->gorch++; 4052 /* Is this a broadcast or multicast? Check broadcast first, 4053 * since the test for a multicast frame will test positive on 4054 * a broadcast frame. 4055 */ 4056 if (is_broadcast_ether_addr(mac_addr)) 4057 stats->bprc++; 4058 else if (is_multicast_ether_addr(mac_addr)) 4059 stats->mprc++; 4060 4061 if (frame_len == hw->max_frame_size) { 4062 /* In this case, the hardware has overcounted the number of 4063 * oversize frames. 4064 */ 4065 if (stats->roc > 0) 4066 stats->roc--; 4067 } 4068 4069 /* Adjust the bin counters when the extra byte put the frame in the 4070 * wrong bin. Remember that the frame_len was adjusted above. 4071 */ 4072 if (frame_len == 64) { 4073 stats->prc64++; 4074 stats->prc127--; 4075 } else if (frame_len == 127) { 4076 stats->prc127++; 4077 stats->prc255--; 4078 } else if (frame_len == 255) { 4079 stats->prc255++; 4080 stats->prc511--; 4081 } else if (frame_len == 511) { 4082 stats->prc511++; 4083 stats->prc1023--; 4084 } else if (frame_len == 1023) { 4085 stats->prc1023++; 4086 stats->prc1522--; 4087 } else if (frame_len == 1522) { 4088 stats->prc1522++; 4089 } 4090 } 4091 4092 static bool e1000_tbi_should_accept(struct e1000_adapter *adapter, 4093 u8 status, u8 errors, 4094 u32 length, const u8 *data) 4095 { 4096 struct e1000_hw *hw = &adapter->hw; 4097 u8 last_byte = *(data + length - 1); 4098 4099 if (TBI_ACCEPT(hw, status, errors, length, last_byte)) { 4100 unsigned long irq_flags; 4101 4102 spin_lock_irqsave(&adapter->stats_lock, irq_flags); 4103 e1000_tbi_adjust_stats(hw, &adapter->stats, length, data); 4104 spin_unlock_irqrestore(&adapter->stats_lock, irq_flags); 4105 4106 return true; 4107 } 4108 4109 return false; 4110 } 4111 4112 static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter, 4113 unsigned int bufsz) 4114 { 4115 struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz); 4116 4117 if (unlikely(!skb)) 4118 adapter->alloc_rx_buff_failed++; 4119 return skb; 4120 } 4121 4122 /** 4123 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy 4124 * @adapter: board private structure 4125 * @rx_ring: ring to clean 4126 * @work_done: amount of napi work completed this call 4127 * @work_to_do: max amount of work allowed for this call to do 4128 * 4129 * the return value indicates whether actual cleaning was done, there 4130 * is no guarantee that everything was cleaned 4131 */ 4132 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, 4133 struct e1000_rx_ring *rx_ring, 4134 int *work_done, int work_to_do) 4135 { 4136 struct net_device *netdev = adapter->netdev; 4137 struct pci_dev *pdev = adapter->pdev; 4138 struct e1000_rx_desc *rx_desc, *next_rxd; 4139 struct e1000_rx_buffer *buffer_info, *next_buffer; 4140 u32 length; 4141 unsigned int i; 4142 int cleaned_count = 0; 4143 bool cleaned = false; 4144 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 4145 4146 i = rx_ring->next_to_clean; 4147 rx_desc = E1000_RX_DESC(*rx_ring, i); 4148 buffer_info = &rx_ring->buffer_info[i]; 4149 4150 while (rx_desc->status & E1000_RXD_STAT_DD) { 4151 struct sk_buff *skb; 4152 u8 status; 4153 4154 if (*work_done >= work_to_do) 4155 break; 4156 (*work_done)++; 4157 dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ 4158 4159 status = rx_desc->status; 4160 4161 if (++i == rx_ring->count) 4162 i = 0; 4163 4164 next_rxd = E1000_RX_DESC(*rx_ring, i); 4165 prefetch(next_rxd); 4166 4167 next_buffer = &rx_ring->buffer_info[i]; 4168 4169 cleaned = true; 4170 cleaned_count++; 4171 dma_unmap_page(&pdev->dev, buffer_info->dma, 4172 adapter->rx_buffer_len, DMA_FROM_DEVICE); 4173 buffer_info->dma = 0; 4174 4175 length = le16_to_cpu(rx_desc->length); 4176 4177 /* errors is only valid for DD + EOP descriptors */ 4178 if (unlikely((status & E1000_RXD_STAT_EOP) && 4179 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) { 4180 u8 *mapped = page_address(buffer_info->rxbuf.page); 4181 4182 if (e1000_tbi_should_accept(adapter, status, 4183 rx_desc->errors, 4184 length, mapped)) { 4185 length--; 4186 } else if (netdev->features & NETIF_F_RXALL) { 4187 goto process_skb; 4188 } else { 4189 /* an error means any chain goes out the window 4190 * too 4191 */ 4192 dev_kfree_skb(rx_ring->rx_skb_top); 4193 rx_ring->rx_skb_top = NULL; 4194 goto next_desc; 4195 } 4196 } 4197 4198 #define rxtop rx_ring->rx_skb_top 4199 process_skb: 4200 if (!(status & E1000_RXD_STAT_EOP)) { 4201 /* this descriptor is only the beginning (or middle) */ 4202 if (!rxtop) { 4203 /* this is the beginning of a chain */ 4204 rxtop = napi_get_frags(&adapter->napi); 4205 if (!rxtop) 4206 break; 4207 4208 skb_fill_page_desc(rxtop, 0, 4209 buffer_info->rxbuf.page, 4210 0, length); 4211 } else { 4212 /* this is the middle of a chain */ 4213 skb_fill_page_desc(rxtop, 4214 skb_shinfo(rxtop)->nr_frags, 4215 buffer_info->rxbuf.page, 0, length); 4216 } 4217 e1000_consume_page(buffer_info, rxtop, length); 4218 goto next_desc; 4219 } else { 4220 if (rxtop) { 4221 /* end of the chain */ 4222 skb_fill_page_desc(rxtop, 4223 skb_shinfo(rxtop)->nr_frags, 4224 buffer_info->rxbuf.page, 0, length); 4225 skb = rxtop; 4226 rxtop = NULL; 4227 e1000_consume_page(buffer_info, skb, length); 4228 } else { 4229 struct page *p; 4230 /* no chain, got EOP, this buf is the packet 4231 * copybreak to save the put_page/alloc_page 4232 */ 4233 p = buffer_info->rxbuf.page; 4234 if (length <= copybreak) { 4235 if (likely(!(netdev->features & NETIF_F_RXFCS))) 4236 length -= 4; 4237 skb = e1000_alloc_rx_skb(adapter, 4238 length); 4239 if (!skb) 4240 break; 4241 4242 memcpy(skb_tail_pointer(skb), 4243 page_address(p), length); 4244 4245 /* re-use the page, so don't erase 4246 * buffer_info->rxbuf.page 4247 */ 4248 skb_put(skb, length); 4249 e1000_rx_checksum(adapter, 4250 status | rx_desc->errors << 24, 4251 le16_to_cpu(rx_desc->csum), skb); 4252 4253 total_rx_bytes += skb->len; 4254 total_rx_packets++; 4255 4256 e1000_receive_skb(adapter, status, 4257 rx_desc->special, skb); 4258 goto next_desc; 4259 } else { 4260 skb = napi_get_frags(&adapter->napi); 4261 if (!skb) { 4262 adapter->alloc_rx_buff_failed++; 4263 break; 4264 } 4265 skb_fill_page_desc(skb, 0, p, 0, 4266 length); 4267 e1000_consume_page(buffer_info, skb, 4268 length); 4269 } 4270 } 4271 } 4272 4273 /* Receive Checksum Offload XXX recompute due to CRC strip? */ 4274 e1000_rx_checksum(adapter, 4275 (u32)(status) | 4276 ((u32)(rx_desc->errors) << 24), 4277 le16_to_cpu(rx_desc->csum), skb); 4278 4279 total_rx_bytes += (skb->len - 4); /* don't count FCS */ 4280 if (likely(!(netdev->features & NETIF_F_RXFCS))) 4281 pskb_trim(skb, skb->len - 4); 4282 total_rx_packets++; 4283 4284 if (status & E1000_RXD_STAT_VP) { 4285 __le16 vlan = rx_desc->special; 4286 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; 4287 4288 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); 4289 } 4290 4291 napi_gro_frags(&adapter->napi); 4292 4293 next_desc: 4294 rx_desc->status = 0; 4295 4296 /* return some buffers to hardware, one at a time is too slow */ 4297 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { 4298 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); 4299 cleaned_count = 0; 4300 } 4301 4302 /* use prefetched values */ 4303 rx_desc = next_rxd; 4304 buffer_info = next_buffer; 4305 } 4306 rx_ring->next_to_clean = i; 4307 4308 cleaned_count = E1000_DESC_UNUSED(rx_ring); 4309 if (cleaned_count) 4310 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); 4311 4312 adapter->total_rx_packets += total_rx_packets; 4313 adapter->total_rx_bytes += total_rx_bytes; 4314 netdev->stats.rx_bytes += total_rx_bytes; 4315 netdev->stats.rx_packets += total_rx_packets; 4316 return cleaned; 4317 } 4318 4319 /* this should improve performance for small packets with large amounts 4320 * of reassembly being done in the stack 4321 */ 4322 static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter, 4323 struct e1000_rx_buffer *buffer_info, 4324 u32 length, const void *data) 4325 { 4326 struct sk_buff *skb; 4327 4328 if (length > copybreak) 4329 return NULL; 4330 4331 skb = e1000_alloc_rx_skb(adapter, length); 4332 if (!skb) 4333 return NULL; 4334 4335 dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma, 4336 length, DMA_FROM_DEVICE); 4337 4338 skb_put_data(skb, data, length); 4339 4340 return skb; 4341 } 4342 4343 /** 4344 * e1000_clean_rx_irq - Send received data up the network stack; legacy 4345 * @adapter: board private structure 4346 * @rx_ring: ring to clean 4347 * @work_done: amount of napi work completed this call 4348 * @work_to_do: max amount of work allowed for this call to do 4349 */ 4350 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, 4351 struct e1000_rx_ring *rx_ring, 4352 int *work_done, int work_to_do) 4353 { 4354 struct net_device *netdev = adapter->netdev; 4355 struct pci_dev *pdev = adapter->pdev; 4356 struct e1000_rx_desc *rx_desc, *next_rxd; 4357 struct e1000_rx_buffer *buffer_info, *next_buffer; 4358 u32 length; 4359 unsigned int i; 4360 int cleaned_count = 0; 4361 bool cleaned = false; 4362 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 4363 4364 i = rx_ring->next_to_clean; 4365 rx_desc = E1000_RX_DESC(*rx_ring, i); 4366 buffer_info = &rx_ring->buffer_info[i]; 4367 4368 while (rx_desc->status & E1000_RXD_STAT_DD) { 4369 struct sk_buff *skb; 4370 u8 *data; 4371 u8 status; 4372 4373 if (*work_done >= work_to_do) 4374 break; 4375 (*work_done)++; 4376 dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ 4377 4378 status = rx_desc->status; 4379 length = le16_to_cpu(rx_desc->length); 4380 4381 data = buffer_info->rxbuf.data; 4382 prefetch(data); 4383 skb = e1000_copybreak(adapter, buffer_info, length, data); 4384 if (!skb) { 4385 unsigned int frag_len = e1000_frag_len(adapter); 4386 4387 skb = napi_build_skb(data - E1000_HEADROOM, frag_len); 4388 if (!skb) { 4389 adapter->alloc_rx_buff_failed++; 4390 break; 4391 } 4392 4393 skb_reserve(skb, E1000_HEADROOM); 4394 dma_unmap_single(&pdev->dev, buffer_info->dma, 4395 adapter->rx_buffer_len, 4396 DMA_FROM_DEVICE); 4397 buffer_info->dma = 0; 4398 buffer_info->rxbuf.data = NULL; 4399 } 4400 4401 if (++i == rx_ring->count) 4402 i = 0; 4403 4404 next_rxd = E1000_RX_DESC(*rx_ring, i); 4405 prefetch(next_rxd); 4406 4407 next_buffer = &rx_ring->buffer_info[i]; 4408 4409 cleaned = true; 4410 cleaned_count++; 4411 4412 /* !EOP means multiple descriptors were used to store a single 4413 * packet, if thats the case we need to toss it. In fact, we 4414 * to toss every packet with the EOP bit clear and the next 4415 * frame that _does_ have the EOP bit set, as it is by 4416 * definition only a frame fragment 4417 */ 4418 if (unlikely(!(status & E1000_RXD_STAT_EOP))) 4419 adapter->discarding = true; 4420 4421 if (adapter->discarding) { 4422 /* All receives must fit into a single buffer */ 4423 netdev_dbg(netdev, "Receive packet consumed multiple buffers\n"); 4424 dev_kfree_skb(skb); 4425 if (status & E1000_RXD_STAT_EOP) 4426 adapter->discarding = false; 4427 goto next_desc; 4428 } 4429 4430 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { 4431 if (e1000_tbi_should_accept(adapter, status, 4432 rx_desc->errors, 4433 length, data)) { 4434 length--; 4435 } else if (netdev->features & NETIF_F_RXALL) { 4436 goto process_skb; 4437 } else { 4438 dev_kfree_skb(skb); 4439 goto next_desc; 4440 } 4441 } 4442 4443 process_skb: 4444 total_rx_bytes += (length - 4); /* don't count FCS */ 4445 total_rx_packets++; 4446 4447 if (likely(!(netdev->features & NETIF_F_RXFCS))) 4448 /* adjust length to remove Ethernet CRC, this must be 4449 * done after the TBI_ACCEPT workaround above 4450 */ 4451 length -= 4; 4452 4453 if (buffer_info->rxbuf.data == NULL) 4454 skb_put(skb, length); 4455 else /* copybreak skb */ 4456 skb_trim(skb, length); 4457 4458 /* Receive Checksum Offload */ 4459 e1000_rx_checksum(adapter, 4460 (u32)(status) | 4461 ((u32)(rx_desc->errors) << 24), 4462 le16_to_cpu(rx_desc->csum), skb); 4463 4464 e1000_receive_skb(adapter, status, rx_desc->special, skb); 4465 4466 next_desc: 4467 rx_desc->status = 0; 4468 4469 /* return some buffers to hardware, one at a time is too slow */ 4470 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { 4471 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); 4472 cleaned_count = 0; 4473 } 4474 4475 /* use prefetched values */ 4476 rx_desc = next_rxd; 4477 buffer_info = next_buffer; 4478 } 4479 rx_ring->next_to_clean = i; 4480 4481 cleaned_count = E1000_DESC_UNUSED(rx_ring); 4482 if (cleaned_count) 4483 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); 4484 4485 adapter->total_rx_packets += total_rx_packets; 4486 adapter->total_rx_bytes += total_rx_bytes; 4487 netdev->stats.rx_bytes += total_rx_bytes; 4488 netdev->stats.rx_packets += total_rx_packets; 4489 return cleaned; 4490 } 4491 4492 /** 4493 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers 4494 * @adapter: address of board private structure 4495 * @rx_ring: pointer to receive ring structure 4496 * @cleaned_count: number of buffers to allocate this pass 4497 **/ 4498 static void 4499 e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, 4500 struct e1000_rx_ring *rx_ring, int cleaned_count) 4501 { 4502 struct pci_dev *pdev = adapter->pdev; 4503 struct e1000_rx_desc *rx_desc; 4504 struct e1000_rx_buffer *buffer_info; 4505 unsigned int i; 4506 4507 i = rx_ring->next_to_use; 4508 buffer_info = &rx_ring->buffer_info[i]; 4509 4510 while (cleaned_count--) { 4511 /* allocate a new page if necessary */ 4512 if (!buffer_info->rxbuf.page) { 4513 buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC); 4514 if (unlikely(!buffer_info->rxbuf.page)) { 4515 adapter->alloc_rx_buff_failed++; 4516 break; 4517 } 4518 } 4519 4520 if (!buffer_info->dma) { 4521 buffer_info->dma = dma_map_page(&pdev->dev, 4522 buffer_info->rxbuf.page, 0, 4523 adapter->rx_buffer_len, 4524 DMA_FROM_DEVICE); 4525 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { 4526 put_page(buffer_info->rxbuf.page); 4527 buffer_info->rxbuf.page = NULL; 4528 buffer_info->dma = 0; 4529 adapter->alloc_rx_buff_failed++; 4530 break; 4531 } 4532 } 4533 4534 rx_desc = E1000_RX_DESC(*rx_ring, i); 4535 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 4536 4537 if (unlikely(++i == rx_ring->count)) 4538 i = 0; 4539 buffer_info = &rx_ring->buffer_info[i]; 4540 } 4541 4542 if (likely(rx_ring->next_to_use != i)) { 4543 rx_ring->next_to_use = i; 4544 if (unlikely(i-- == 0)) 4545 i = (rx_ring->count - 1); 4546 4547 /* Force memory writes to complete before letting h/w 4548 * know there are new descriptors to fetch. (Only 4549 * applicable for weak-ordered memory model archs, 4550 * such as IA-64). 4551 */ 4552 dma_wmb(); 4553 writel(i, adapter->hw.hw_addr + rx_ring->rdt); 4554 } 4555 } 4556 4557 /** 4558 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended 4559 * @adapter: address of board private structure 4560 * @rx_ring: pointer to ring struct 4561 * @cleaned_count: number of new Rx buffers to try to allocate 4562 **/ 4563 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, 4564 struct e1000_rx_ring *rx_ring, 4565 int cleaned_count) 4566 { 4567 struct e1000_hw *hw = &adapter->hw; 4568 struct pci_dev *pdev = adapter->pdev; 4569 struct e1000_rx_desc *rx_desc; 4570 struct e1000_rx_buffer *buffer_info; 4571 unsigned int i; 4572 unsigned int bufsz = adapter->rx_buffer_len; 4573 4574 i = rx_ring->next_to_use; 4575 buffer_info = &rx_ring->buffer_info[i]; 4576 4577 while (cleaned_count--) { 4578 void *data; 4579 4580 if (buffer_info->rxbuf.data) 4581 goto skip; 4582 4583 data = e1000_alloc_frag(adapter); 4584 if (!data) { 4585 /* Better luck next round */ 4586 adapter->alloc_rx_buff_failed++; 4587 break; 4588 } 4589 4590 /* Fix for errata 23, can't cross 64kB boundary */ 4591 if (!e1000_check_64k_bound(adapter, data, bufsz)) { 4592 void *olddata = data; 4593 e_err(rx_err, "skb align check failed: %u bytes at " 4594 "%p\n", bufsz, data); 4595 /* Try again, without freeing the previous */ 4596 data = e1000_alloc_frag(adapter); 4597 /* Failed allocation, critical failure */ 4598 if (!data) { 4599 skb_free_frag(olddata); 4600 adapter->alloc_rx_buff_failed++; 4601 break; 4602 } 4603 4604 if (!e1000_check_64k_bound(adapter, data, bufsz)) { 4605 /* give up */ 4606 skb_free_frag(data); 4607 skb_free_frag(olddata); 4608 adapter->alloc_rx_buff_failed++; 4609 break; 4610 } 4611 4612 /* Use new allocation */ 4613 skb_free_frag(olddata); 4614 } 4615 buffer_info->dma = dma_map_single(&pdev->dev, 4616 data, 4617 adapter->rx_buffer_len, 4618 DMA_FROM_DEVICE); 4619 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { 4620 skb_free_frag(data); 4621 buffer_info->dma = 0; 4622 adapter->alloc_rx_buff_failed++; 4623 break; 4624 } 4625 4626 /* XXX if it was allocated cleanly it will never map to a 4627 * boundary crossing 4628 */ 4629 4630 /* Fix for errata 23, can't cross 64kB boundary */ 4631 if (!e1000_check_64k_bound(adapter, 4632 (void *)(unsigned long)buffer_info->dma, 4633 adapter->rx_buffer_len)) { 4634 e_err(rx_err, "dma align check failed: %u bytes at " 4635 "%p\n", adapter->rx_buffer_len, 4636 (void *)(unsigned long)buffer_info->dma); 4637 4638 dma_unmap_single(&pdev->dev, buffer_info->dma, 4639 adapter->rx_buffer_len, 4640 DMA_FROM_DEVICE); 4641 4642 skb_free_frag(data); 4643 buffer_info->rxbuf.data = NULL; 4644 buffer_info->dma = 0; 4645 4646 adapter->alloc_rx_buff_failed++; 4647 break; 4648 } 4649 buffer_info->rxbuf.data = data; 4650 skip: 4651 rx_desc = E1000_RX_DESC(*rx_ring, i); 4652 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 4653 4654 if (unlikely(++i == rx_ring->count)) 4655 i = 0; 4656 buffer_info = &rx_ring->buffer_info[i]; 4657 } 4658 4659 if (likely(rx_ring->next_to_use != i)) { 4660 rx_ring->next_to_use = i; 4661 if (unlikely(i-- == 0)) 4662 i = (rx_ring->count - 1); 4663 4664 /* Force memory writes to complete before letting h/w 4665 * know there are new descriptors to fetch. (Only 4666 * applicable for weak-ordered memory model archs, 4667 * such as IA-64). 4668 */ 4669 dma_wmb(); 4670 writel(i, hw->hw_addr + rx_ring->rdt); 4671 } 4672 } 4673 4674 /** 4675 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers. 4676 * @adapter: address of board private structure 4677 **/ 4678 static void e1000_smartspeed(struct e1000_adapter *adapter) 4679 { 4680 struct e1000_hw *hw = &adapter->hw; 4681 u16 phy_status; 4682 u16 phy_ctrl; 4683 4684 if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg || 4685 !(hw->autoneg_advertised & ADVERTISE_1000_FULL)) 4686 return; 4687 4688 if (adapter->smartspeed == 0) { 4689 /* If Master/Slave config fault is asserted twice, 4690 * we assume back-to-back 4691 */ 4692 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); 4693 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) 4694 return; 4695 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); 4696 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) 4697 return; 4698 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); 4699 if (phy_ctrl & CR_1000T_MS_ENABLE) { 4700 phy_ctrl &= ~CR_1000T_MS_ENABLE; 4701 e1000_write_phy_reg(hw, PHY_1000T_CTRL, 4702 phy_ctrl); 4703 adapter->smartspeed++; 4704 if (!e1000_phy_setup_autoneg(hw) && 4705 !e1000_read_phy_reg(hw, PHY_CTRL, 4706 &phy_ctrl)) { 4707 phy_ctrl |= (MII_CR_AUTO_NEG_EN | 4708 MII_CR_RESTART_AUTO_NEG); 4709 e1000_write_phy_reg(hw, PHY_CTRL, 4710 phy_ctrl); 4711 } 4712 } 4713 return; 4714 } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { 4715 /* If still no link, perhaps using 2/3 pair cable */ 4716 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); 4717 phy_ctrl |= CR_1000T_MS_ENABLE; 4718 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl); 4719 if (!e1000_phy_setup_autoneg(hw) && 4720 !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) { 4721 phy_ctrl |= (MII_CR_AUTO_NEG_EN | 4722 MII_CR_RESTART_AUTO_NEG); 4723 e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl); 4724 } 4725 } 4726 /* Restart process after E1000_SMARTSPEED_MAX iterations */ 4727 if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX) 4728 adapter->smartspeed = 0; 4729 } 4730 4731 /** 4732 * e1000_ioctl - handle ioctl calls 4733 * @netdev: pointer to our netdev 4734 * @ifr: pointer to interface request structure 4735 * @cmd: ioctl data 4736 **/ 4737 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 4738 { 4739 switch (cmd) { 4740 case SIOCGMIIPHY: 4741 case SIOCGMIIREG: 4742 case SIOCSMIIREG: 4743 return e1000_mii_ioctl(netdev, ifr, cmd); 4744 default: 4745 return -EOPNOTSUPP; 4746 } 4747 } 4748 4749 /** 4750 * e1000_mii_ioctl - 4751 * @netdev: pointer to our netdev 4752 * @ifr: pointer to interface request structure 4753 * @cmd: ioctl data 4754 **/ 4755 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, 4756 int cmd) 4757 { 4758 struct e1000_adapter *adapter = netdev_priv(netdev); 4759 struct e1000_hw *hw = &adapter->hw; 4760 struct mii_ioctl_data *data = if_mii(ifr); 4761 int retval; 4762 u16 mii_reg; 4763 unsigned long flags; 4764 4765 if (hw->media_type != e1000_media_type_copper) 4766 return -EOPNOTSUPP; 4767 4768 switch (cmd) { 4769 case SIOCGMIIPHY: 4770 data->phy_id = hw->phy_addr; 4771 break; 4772 case SIOCGMIIREG: 4773 spin_lock_irqsave(&adapter->stats_lock, flags); 4774 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F, 4775 &data->val_out)) { 4776 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4777 return -EIO; 4778 } 4779 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4780 break; 4781 case SIOCSMIIREG: 4782 if (data->reg_num & ~(0x1F)) 4783 return -EFAULT; 4784 mii_reg = data->val_in; 4785 spin_lock_irqsave(&adapter->stats_lock, flags); 4786 if (e1000_write_phy_reg(hw, data->reg_num, 4787 mii_reg)) { 4788 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4789 return -EIO; 4790 } 4791 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4792 if (hw->media_type == e1000_media_type_copper) { 4793 switch (data->reg_num) { 4794 case PHY_CTRL: 4795 if (mii_reg & MII_CR_POWER_DOWN) 4796 break; 4797 if (mii_reg & MII_CR_AUTO_NEG_EN) { 4798 hw->autoneg = 1; 4799 hw->autoneg_advertised = 0x2F; 4800 } else { 4801 u32 speed; 4802 if (mii_reg & 0x40) 4803 speed = SPEED_1000; 4804 else if (mii_reg & 0x2000) 4805 speed = SPEED_100; 4806 else 4807 speed = SPEED_10; 4808 retval = e1000_set_spd_dplx( 4809 adapter, speed, 4810 ((mii_reg & 0x100) 4811 ? DUPLEX_FULL : 4812 DUPLEX_HALF)); 4813 if (retval) 4814 return retval; 4815 } 4816 if (netif_running(adapter->netdev)) 4817 e1000_reinit_locked(adapter); 4818 else 4819 e1000_reset(adapter); 4820 break; 4821 case M88E1000_PHY_SPEC_CTRL: 4822 case M88E1000_EXT_PHY_SPEC_CTRL: 4823 if (e1000_phy_reset(hw)) 4824 return -EIO; 4825 break; 4826 } 4827 } else { 4828 switch (data->reg_num) { 4829 case PHY_CTRL: 4830 if (mii_reg & MII_CR_POWER_DOWN) 4831 break; 4832 if (netif_running(adapter->netdev)) 4833 e1000_reinit_locked(adapter); 4834 else 4835 e1000_reset(adapter); 4836 break; 4837 } 4838 } 4839 break; 4840 default: 4841 return -EOPNOTSUPP; 4842 } 4843 return E1000_SUCCESS; 4844 } 4845 4846 void e1000_pci_set_mwi(struct e1000_hw *hw) 4847 { 4848 struct e1000_adapter *adapter = hw->back; 4849 int ret_val = pci_set_mwi(adapter->pdev); 4850 4851 if (ret_val) 4852 e_err(probe, "Error in setting MWI\n"); 4853 } 4854 4855 void e1000_pci_clear_mwi(struct e1000_hw *hw) 4856 { 4857 struct e1000_adapter *adapter = hw->back; 4858 4859 pci_clear_mwi(adapter->pdev); 4860 } 4861 4862 int e1000_pcix_get_mmrbc(struct e1000_hw *hw) 4863 { 4864 struct e1000_adapter *adapter = hw->back; 4865 return pcix_get_mmrbc(adapter->pdev); 4866 } 4867 4868 void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc) 4869 { 4870 struct e1000_adapter *adapter = hw->back; 4871 pcix_set_mmrbc(adapter->pdev, mmrbc); 4872 } 4873 4874 void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value) 4875 { 4876 outl(value, port); 4877 } 4878 4879 static bool e1000_vlan_used(struct e1000_adapter *adapter) 4880 { 4881 u16 vid; 4882 4883 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 4884 return true; 4885 return false; 4886 } 4887 4888 static void __e1000_vlan_mode(struct e1000_adapter *adapter, 4889 netdev_features_t features) 4890 { 4891 struct e1000_hw *hw = &adapter->hw; 4892 u32 ctrl; 4893 4894 ctrl = er32(CTRL); 4895 if (features & NETIF_F_HW_VLAN_CTAG_RX) { 4896 /* enable VLAN tag insert/strip */ 4897 ctrl |= E1000_CTRL_VME; 4898 } else { 4899 /* disable VLAN tag insert/strip */ 4900 ctrl &= ~E1000_CTRL_VME; 4901 } 4902 ew32(CTRL, ctrl); 4903 } 4904 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter, 4905 bool filter_on) 4906 { 4907 struct e1000_hw *hw = &adapter->hw; 4908 u32 rctl; 4909 4910 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4911 e1000_irq_disable(adapter); 4912 4913 __e1000_vlan_mode(adapter, adapter->netdev->features); 4914 if (filter_on) { 4915 /* enable VLAN receive filtering */ 4916 rctl = er32(RCTL); 4917 rctl &= ~E1000_RCTL_CFIEN; 4918 if (!(adapter->netdev->flags & IFF_PROMISC)) 4919 rctl |= E1000_RCTL_VFE; 4920 ew32(RCTL, rctl); 4921 e1000_update_mng_vlan(adapter); 4922 } else { 4923 /* disable VLAN receive filtering */ 4924 rctl = er32(RCTL); 4925 rctl &= ~E1000_RCTL_VFE; 4926 ew32(RCTL, rctl); 4927 } 4928 4929 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4930 e1000_irq_enable(adapter); 4931 } 4932 4933 static void e1000_vlan_mode(struct net_device *netdev, 4934 netdev_features_t features) 4935 { 4936 struct e1000_adapter *adapter = netdev_priv(netdev); 4937 4938 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4939 e1000_irq_disable(adapter); 4940 4941 __e1000_vlan_mode(adapter, features); 4942 4943 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4944 e1000_irq_enable(adapter); 4945 } 4946 4947 static int e1000_vlan_rx_add_vid(struct net_device *netdev, 4948 __be16 proto, u16 vid) 4949 { 4950 struct e1000_adapter *adapter = netdev_priv(netdev); 4951 struct e1000_hw *hw = &adapter->hw; 4952 u32 vfta, index; 4953 4954 if ((hw->mng_cookie.status & 4955 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 4956 (vid == adapter->mng_vlan_id)) 4957 return 0; 4958 4959 if (!e1000_vlan_used(adapter)) 4960 e1000_vlan_filter_on_off(adapter, true); 4961 4962 /* add VID to filter table */ 4963 index = (vid >> 5) & 0x7F; 4964 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); 4965 vfta |= (1 << (vid & 0x1F)); 4966 e1000_write_vfta(hw, index, vfta); 4967 4968 set_bit(vid, adapter->active_vlans); 4969 4970 return 0; 4971 } 4972 4973 static int e1000_vlan_rx_kill_vid(struct net_device *netdev, 4974 __be16 proto, u16 vid) 4975 { 4976 struct e1000_adapter *adapter = netdev_priv(netdev); 4977 struct e1000_hw *hw = &adapter->hw; 4978 u32 vfta, index; 4979 4980 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4981 e1000_irq_disable(adapter); 4982 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4983 e1000_irq_enable(adapter); 4984 4985 /* remove VID from filter table */ 4986 index = (vid >> 5) & 0x7F; 4987 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); 4988 vfta &= ~(1 << (vid & 0x1F)); 4989 e1000_write_vfta(hw, index, vfta); 4990 4991 clear_bit(vid, adapter->active_vlans); 4992 4993 if (!e1000_vlan_used(adapter)) 4994 e1000_vlan_filter_on_off(adapter, false); 4995 4996 return 0; 4997 } 4998 4999 static void e1000_restore_vlan(struct e1000_adapter *adapter) 5000 { 5001 u16 vid; 5002 5003 if (!e1000_vlan_used(adapter)) 5004 return; 5005 5006 e1000_vlan_filter_on_off(adapter, true); 5007 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 5008 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); 5009 } 5010 5011 int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx) 5012 { 5013 struct e1000_hw *hw = &adapter->hw; 5014 5015 hw->autoneg = 0; 5016 5017 /* Make sure dplx is at most 1 bit and lsb of speed is not set 5018 * for the switch() below to work 5019 */ 5020 if ((spd & 1) || (dplx & ~1)) 5021 goto err_inval; 5022 5023 /* Fiber NICs only allow 1000 gbps Full duplex */ 5024 if ((hw->media_type == e1000_media_type_fiber) && 5025 spd != SPEED_1000 && 5026 dplx != DUPLEX_FULL) 5027 goto err_inval; 5028 5029 switch (spd + dplx) { 5030 case SPEED_10 + DUPLEX_HALF: 5031 hw->forced_speed_duplex = e1000_10_half; 5032 break; 5033 case SPEED_10 + DUPLEX_FULL: 5034 hw->forced_speed_duplex = e1000_10_full; 5035 break; 5036 case SPEED_100 + DUPLEX_HALF: 5037 hw->forced_speed_duplex = e1000_100_half; 5038 break; 5039 case SPEED_100 + DUPLEX_FULL: 5040 hw->forced_speed_duplex = e1000_100_full; 5041 break; 5042 case SPEED_1000 + DUPLEX_FULL: 5043 hw->autoneg = 1; 5044 hw->autoneg_advertised = ADVERTISE_1000_FULL; 5045 break; 5046 case SPEED_1000 + DUPLEX_HALF: /* not supported */ 5047 default: 5048 goto err_inval; 5049 } 5050 5051 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ 5052 hw->mdix = AUTO_ALL_MODES; 5053 5054 return 0; 5055 5056 err_inval: 5057 e_err(probe, "Unsupported Speed/Duplex configuration\n"); 5058 return -EINVAL; 5059 } 5060 5061 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) 5062 { 5063 struct net_device *netdev = pci_get_drvdata(pdev); 5064 struct e1000_adapter *adapter = netdev_priv(netdev); 5065 struct e1000_hw *hw = &adapter->hw; 5066 u32 ctrl, ctrl_ext, rctl, status; 5067 u32 wufc = adapter->wol; 5068 5069 netif_device_detach(netdev); 5070 5071 if (netif_running(netdev)) { 5072 int count = E1000_CHECK_RESET_COUNT; 5073 5074 while (test_bit(__E1000_RESETTING, &adapter->flags) && count--) 5075 usleep_range(10000, 20000); 5076 5077 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); 5078 rtnl_lock(); 5079 e1000_down(adapter); 5080 rtnl_unlock(); 5081 } 5082 5083 status = er32(STATUS); 5084 if (status & E1000_STATUS_LU) 5085 wufc &= ~E1000_WUFC_LNKC; 5086 5087 if (wufc) { 5088 e1000_setup_rctl(adapter); 5089 e1000_set_rx_mode(netdev); 5090 5091 rctl = er32(RCTL); 5092 5093 /* turn on all-multi mode if wake on multicast is enabled */ 5094 if (wufc & E1000_WUFC_MC) 5095 rctl |= E1000_RCTL_MPE; 5096 5097 /* enable receives in the hardware */ 5098 ew32(RCTL, rctl | E1000_RCTL_EN); 5099 5100 if (hw->mac_type >= e1000_82540) { 5101 ctrl = er32(CTRL); 5102 /* advertise wake from D3Cold */ 5103 #define E1000_CTRL_ADVD3WUC 0x00100000 5104 /* phy power management enable */ 5105 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 5106 ctrl |= E1000_CTRL_ADVD3WUC | 5107 E1000_CTRL_EN_PHY_PWR_MGMT; 5108 ew32(CTRL, ctrl); 5109 } 5110 5111 if (hw->media_type == e1000_media_type_fiber || 5112 hw->media_type == e1000_media_type_internal_serdes) { 5113 /* keep the laser running in D3 */ 5114 ctrl_ext = er32(CTRL_EXT); 5115 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; 5116 ew32(CTRL_EXT, ctrl_ext); 5117 } 5118 5119 ew32(WUC, E1000_WUC_PME_EN); 5120 ew32(WUFC, wufc); 5121 } else { 5122 ew32(WUC, 0); 5123 ew32(WUFC, 0); 5124 } 5125 5126 e1000_release_manageability(adapter); 5127 5128 *enable_wake = !!wufc; 5129 5130 /* make sure adapter isn't asleep if manageability is enabled */ 5131 if (adapter->en_mng_pt) 5132 *enable_wake = true; 5133 5134 if (netif_running(netdev)) 5135 e1000_free_irq(adapter); 5136 5137 if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags)) 5138 pci_disable_device(pdev); 5139 5140 return 0; 5141 } 5142 5143 static int e1000_suspend(struct device *dev) 5144 { 5145 int retval; 5146 struct pci_dev *pdev = to_pci_dev(dev); 5147 bool wake; 5148 5149 retval = __e1000_shutdown(pdev, &wake); 5150 device_set_wakeup_enable(dev, wake); 5151 5152 return retval; 5153 } 5154 5155 static int e1000_resume(struct device *dev) 5156 { 5157 struct pci_dev *pdev = to_pci_dev(dev); 5158 struct net_device *netdev = pci_get_drvdata(pdev); 5159 struct e1000_adapter *adapter = netdev_priv(netdev); 5160 struct e1000_hw *hw = &adapter->hw; 5161 u32 err; 5162 5163 if (adapter->need_ioport) 5164 err = pci_enable_device(pdev); 5165 else 5166 err = pci_enable_device_mem(pdev); 5167 if (err) { 5168 pr_err("Cannot enable PCI device from suspend\n"); 5169 return err; 5170 } 5171 5172 /* flush memory to make sure state is correct */ 5173 smp_mb__before_atomic(); 5174 clear_bit(__E1000_DISABLED, &adapter->flags); 5175 pci_set_master(pdev); 5176 5177 pci_enable_wake(pdev, PCI_D3hot, 0); 5178 pci_enable_wake(pdev, PCI_D3cold, 0); 5179 5180 if (netif_running(netdev)) { 5181 err = e1000_request_irq(adapter); 5182 if (err) 5183 return err; 5184 } 5185 5186 e1000_power_up_phy(adapter); 5187 e1000_reset(adapter); 5188 ew32(WUS, ~0); 5189 5190 e1000_init_manageability(adapter); 5191 5192 if (netif_running(netdev)) 5193 e1000_up(adapter); 5194 5195 netif_device_attach(netdev); 5196 5197 return 0; 5198 } 5199 5200 static void e1000_shutdown(struct pci_dev *pdev) 5201 { 5202 bool wake; 5203 5204 __e1000_shutdown(pdev, &wake); 5205 5206 if (system_state == SYSTEM_POWER_OFF) { 5207 pci_wake_from_d3(pdev, wake); 5208 pci_set_power_state(pdev, PCI_D3hot); 5209 } 5210 } 5211 5212 #ifdef CONFIG_NET_POLL_CONTROLLER 5213 /* Polling 'interrupt' - used by things like netconsole to send skbs 5214 * without having to re-enable interrupts. It's not called while 5215 * the interrupt routine is executing. 5216 */ 5217 static void e1000_netpoll(struct net_device *netdev) 5218 { 5219 struct e1000_adapter *adapter = netdev_priv(netdev); 5220 5221 if (disable_hardirq(adapter->pdev->irq)) 5222 e1000_intr(adapter->pdev->irq, netdev); 5223 enable_irq(adapter->pdev->irq); 5224 } 5225 #endif 5226 5227 /** 5228 * e1000_io_error_detected - called when PCI error is detected 5229 * @pdev: Pointer to PCI device 5230 * @state: The current pci connection state 5231 * 5232 * This function is called after a PCI bus error affecting 5233 * this device has been detected. 5234 */ 5235 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, 5236 pci_channel_state_t state) 5237 { 5238 struct net_device *netdev = pci_get_drvdata(pdev); 5239 struct e1000_adapter *adapter = netdev_priv(netdev); 5240 5241 rtnl_lock(); 5242 netif_device_detach(netdev); 5243 5244 if (state == pci_channel_io_perm_failure) { 5245 rtnl_unlock(); 5246 return PCI_ERS_RESULT_DISCONNECT; 5247 } 5248 5249 if (netif_running(netdev)) 5250 e1000_down(adapter); 5251 5252 if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags)) 5253 pci_disable_device(pdev); 5254 rtnl_unlock(); 5255 5256 /* Request a slot reset. */ 5257 return PCI_ERS_RESULT_NEED_RESET; 5258 } 5259 5260 /** 5261 * e1000_io_slot_reset - called after the pci bus has been reset. 5262 * @pdev: Pointer to PCI device 5263 * 5264 * Restart the card from scratch, as if from a cold-boot. Implementation 5265 * resembles the first-half of the e1000_resume routine. 5266 */ 5267 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) 5268 { 5269 struct net_device *netdev = pci_get_drvdata(pdev); 5270 struct e1000_adapter *adapter = netdev_priv(netdev); 5271 struct e1000_hw *hw = &adapter->hw; 5272 int err; 5273 5274 if (adapter->need_ioport) 5275 err = pci_enable_device(pdev); 5276 else 5277 err = pci_enable_device_mem(pdev); 5278 if (err) { 5279 pr_err("Cannot re-enable PCI device after reset.\n"); 5280 return PCI_ERS_RESULT_DISCONNECT; 5281 } 5282 5283 /* flush memory to make sure state is correct */ 5284 smp_mb__before_atomic(); 5285 clear_bit(__E1000_DISABLED, &adapter->flags); 5286 pci_set_master(pdev); 5287 5288 pci_enable_wake(pdev, PCI_D3hot, 0); 5289 pci_enable_wake(pdev, PCI_D3cold, 0); 5290 5291 e1000_reset(adapter); 5292 ew32(WUS, ~0); 5293 5294 return PCI_ERS_RESULT_RECOVERED; 5295 } 5296 5297 /** 5298 * e1000_io_resume - called when traffic can start flowing again. 5299 * @pdev: Pointer to PCI device 5300 * 5301 * This callback is called when the error recovery driver tells us that 5302 * its OK to resume normal operation. Implementation resembles the 5303 * second-half of the e1000_resume routine. 5304 */ 5305 static void e1000_io_resume(struct pci_dev *pdev) 5306 { 5307 struct net_device *netdev = pci_get_drvdata(pdev); 5308 struct e1000_adapter *adapter = netdev_priv(netdev); 5309 5310 e1000_init_manageability(adapter); 5311 5312 if (netif_running(netdev)) { 5313 if (e1000_up(adapter)) { 5314 pr_info("can't bring device back up after reset\n"); 5315 return; 5316 } 5317 } 5318 5319 netif_device_attach(netdev); 5320 } 5321 5322 /* e1000_main.c */ 5323