xref: /linux/drivers/net/ethernet/intel/e1000/e1000_main.c (revision 7bb377107c72a40ab7505341f8626c8eb79a0cb7)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2006 Intel Corporation. */
3 
4 #include "e1000.h"
5 #include <net/ip6_checksum.h>
6 #include <linux/io.h>
7 #include <linux/prefetch.h>
8 #include <linux/bitops.h>
9 #include <linux/if_vlan.h>
10 
11 char e1000_driver_name[] = "e1000";
12 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
13 #define DRV_VERSION "7.3.21-k8-NAPI"
14 const char e1000_driver_version[] = DRV_VERSION;
15 static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
16 
17 /* e1000_pci_tbl - PCI Device ID Table
18  *
19  * Last entry must be all 0s
20  *
21  * Macro expands to...
22  *   {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
23  */
24 static const struct pci_device_id e1000_pci_tbl[] = {
25 	INTEL_E1000_ETHERNET_DEVICE(0x1000),
26 	INTEL_E1000_ETHERNET_DEVICE(0x1001),
27 	INTEL_E1000_ETHERNET_DEVICE(0x1004),
28 	INTEL_E1000_ETHERNET_DEVICE(0x1008),
29 	INTEL_E1000_ETHERNET_DEVICE(0x1009),
30 	INTEL_E1000_ETHERNET_DEVICE(0x100C),
31 	INTEL_E1000_ETHERNET_DEVICE(0x100D),
32 	INTEL_E1000_ETHERNET_DEVICE(0x100E),
33 	INTEL_E1000_ETHERNET_DEVICE(0x100F),
34 	INTEL_E1000_ETHERNET_DEVICE(0x1010),
35 	INTEL_E1000_ETHERNET_DEVICE(0x1011),
36 	INTEL_E1000_ETHERNET_DEVICE(0x1012),
37 	INTEL_E1000_ETHERNET_DEVICE(0x1013),
38 	INTEL_E1000_ETHERNET_DEVICE(0x1014),
39 	INTEL_E1000_ETHERNET_DEVICE(0x1015),
40 	INTEL_E1000_ETHERNET_DEVICE(0x1016),
41 	INTEL_E1000_ETHERNET_DEVICE(0x1017),
42 	INTEL_E1000_ETHERNET_DEVICE(0x1018),
43 	INTEL_E1000_ETHERNET_DEVICE(0x1019),
44 	INTEL_E1000_ETHERNET_DEVICE(0x101A),
45 	INTEL_E1000_ETHERNET_DEVICE(0x101D),
46 	INTEL_E1000_ETHERNET_DEVICE(0x101E),
47 	INTEL_E1000_ETHERNET_DEVICE(0x1026),
48 	INTEL_E1000_ETHERNET_DEVICE(0x1027),
49 	INTEL_E1000_ETHERNET_DEVICE(0x1028),
50 	INTEL_E1000_ETHERNET_DEVICE(0x1075),
51 	INTEL_E1000_ETHERNET_DEVICE(0x1076),
52 	INTEL_E1000_ETHERNET_DEVICE(0x1077),
53 	INTEL_E1000_ETHERNET_DEVICE(0x1078),
54 	INTEL_E1000_ETHERNET_DEVICE(0x1079),
55 	INTEL_E1000_ETHERNET_DEVICE(0x107A),
56 	INTEL_E1000_ETHERNET_DEVICE(0x107B),
57 	INTEL_E1000_ETHERNET_DEVICE(0x107C),
58 	INTEL_E1000_ETHERNET_DEVICE(0x108A),
59 	INTEL_E1000_ETHERNET_DEVICE(0x1099),
60 	INTEL_E1000_ETHERNET_DEVICE(0x10B5),
61 	INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
62 	/* required last entry */
63 	{0,}
64 };
65 
66 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
67 
68 int e1000_up(struct e1000_adapter *adapter);
69 void e1000_down(struct e1000_adapter *adapter);
70 void e1000_reinit_locked(struct e1000_adapter *adapter);
71 void e1000_reset(struct e1000_adapter *adapter);
72 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
73 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
74 void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
75 void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
76 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
77 				    struct e1000_tx_ring *txdr);
78 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
79 				    struct e1000_rx_ring *rxdr);
80 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
81 				    struct e1000_tx_ring *tx_ring);
82 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
83 				    struct e1000_rx_ring *rx_ring);
84 void e1000_update_stats(struct e1000_adapter *adapter);
85 
86 static int e1000_init_module(void);
87 static void e1000_exit_module(void);
88 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
89 static void e1000_remove(struct pci_dev *pdev);
90 static int e1000_alloc_queues(struct e1000_adapter *adapter);
91 static int e1000_sw_init(struct e1000_adapter *adapter);
92 int e1000_open(struct net_device *netdev);
93 int e1000_close(struct net_device *netdev);
94 static void e1000_configure_tx(struct e1000_adapter *adapter);
95 static void e1000_configure_rx(struct e1000_adapter *adapter);
96 static void e1000_setup_rctl(struct e1000_adapter *adapter);
97 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
98 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
99 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
100 				struct e1000_tx_ring *tx_ring);
101 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
102 				struct e1000_rx_ring *rx_ring);
103 static void e1000_set_rx_mode(struct net_device *netdev);
104 static void e1000_update_phy_info_task(struct work_struct *work);
105 static void e1000_watchdog(struct work_struct *work);
106 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
107 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
108 				    struct net_device *netdev);
109 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
110 static int e1000_set_mac(struct net_device *netdev, void *p);
111 static irqreturn_t e1000_intr(int irq, void *data);
112 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
113 			       struct e1000_tx_ring *tx_ring);
114 static int e1000_clean(struct napi_struct *napi, int budget);
115 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
116 			       struct e1000_rx_ring *rx_ring,
117 			       int *work_done, int work_to_do);
118 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
119 				     struct e1000_rx_ring *rx_ring,
120 				     int *work_done, int work_to_do);
121 static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter,
122 					 struct e1000_rx_ring *rx_ring,
123 					 int cleaned_count)
124 {
125 }
126 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
127 				   struct e1000_rx_ring *rx_ring,
128 				   int cleaned_count);
129 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
130 					 struct e1000_rx_ring *rx_ring,
131 					 int cleaned_count);
132 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
133 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
134 			   int cmd);
135 static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
136 static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
137 static void e1000_tx_timeout(struct net_device *dev, unsigned int txqueue);
138 static void e1000_reset_task(struct work_struct *work);
139 static void e1000_smartspeed(struct e1000_adapter *adapter);
140 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
141 				       struct sk_buff *skb);
142 
143 static bool e1000_vlan_used(struct e1000_adapter *adapter);
144 static void e1000_vlan_mode(struct net_device *netdev,
145 			    netdev_features_t features);
146 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
147 				     bool filter_on);
148 static int e1000_vlan_rx_add_vid(struct net_device *netdev,
149 				 __be16 proto, u16 vid);
150 static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
151 				  __be16 proto, u16 vid);
152 static void e1000_restore_vlan(struct e1000_adapter *adapter);
153 
154 #ifdef CONFIG_PM
155 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
156 static int e1000_resume(struct pci_dev *pdev);
157 #endif
158 static void e1000_shutdown(struct pci_dev *pdev);
159 
160 #ifdef CONFIG_NET_POLL_CONTROLLER
161 /* for netdump / net console */
162 static void e1000_netpoll (struct net_device *netdev);
163 #endif
164 
165 #define COPYBREAK_DEFAULT 256
166 static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
167 module_param(copybreak, uint, 0644);
168 MODULE_PARM_DESC(copybreak,
169 	"Maximum size of packet that is copied to a new buffer on receive");
170 
171 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
172 						pci_channel_state_t state);
173 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
174 static void e1000_io_resume(struct pci_dev *pdev);
175 
176 static const struct pci_error_handlers e1000_err_handler = {
177 	.error_detected = e1000_io_error_detected,
178 	.slot_reset = e1000_io_slot_reset,
179 	.resume = e1000_io_resume,
180 };
181 
182 static struct pci_driver e1000_driver = {
183 	.name     = e1000_driver_name,
184 	.id_table = e1000_pci_tbl,
185 	.probe    = e1000_probe,
186 	.remove   = e1000_remove,
187 #ifdef CONFIG_PM
188 	/* Power Management Hooks */
189 	.suspend  = e1000_suspend,
190 	.resume   = e1000_resume,
191 #endif
192 	.shutdown = e1000_shutdown,
193 	.err_handler = &e1000_err_handler
194 };
195 
196 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
197 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
198 MODULE_LICENSE("GPL v2");
199 MODULE_VERSION(DRV_VERSION);
200 
201 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
202 static int debug = -1;
203 module_param(debug, int, 0);
204 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
205 
206 /**
207  * e1000_get_hw_dev - return device
208  * used by hardware layer to print debugging information
209  *
210  **/
211 struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
212 {
213 	struct e1000_adapter *adapter = hw->back;
214 	return adapter->netdev;
215 }
216 
217 /**
218  * e1000_init_module - Driver Registration Routine
219  *
220  * e1000_init_module is the first routine called when the driver is
221  * loaded. All it does is register with the PCI subsystem.
222  **/
223 static int __init e1000_init_module(void)
224 {
225 	int ret;
226 	pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
227 
228 	pr_info("%s\n", e1000_copyright);
229 
230 	ret = pci_register_driver(&e1000_driver);
231 	if (copybreak != COPYBREAK_DEFAULT) {
232 		if (copybreak == 0)
233 			pr_info("copybreak disabled\n");
234 		else
235 			pr_info("copybreak enabled for "
236 				   "packets <= %u bytes\n", copybreak);
237 	}
238 	return ret;
239 }
240 
241 module_init(e1000_init_module);
242 
243 /**
244  * e1000_exit_module - Driver Exit Cleanup Routine
245  *
246  * e1000_exit_module is called just before the driver is removed
247  * from memory.
248  **/
249 static void __exit e1000_exit_module(void)
250 {
251 	pci_unregister_driver(&e1000_driver);
252 }
253 
254 module_exit(e1000_exit_module);
255 
256 static int e1000_request_irq(struct e1000_adapter *adapter)
257 {
258 	struct net_device *netdev = adapter->netdev;
259 	irq_handler_t handler = e1000_intr;
260 	int irq_flags = IRQF_SHARED;
261 	int err;
262 
263 	err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
264 			  netdev);
265 	if (err) {
266 		e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
267 	}
268 
269 	return err;
270 }
271 
272 static void e1000_free_irq(struct e1000_adapter *adapter)
273 {
274 	struct net_device *netdev = adapter->netdev;
275 
276 	free_irq(adapter->pdev->irq, netdev);
277 }
278 
279 /**
280  * e1000_irq_disable - Mask off interrupt generation on the NIC
281  * @adapter: board private structure
282  **/
283 static void e1000_irq_disable(struct e1000_adapter *adapter)
284 {
285 	struct e1000_hw *hw = &adapter->hw;
286 
287 	ew32(IMC, ~0);
288 	E1000_WRITE_FLUSH();
289 	synchronize_irq(adapter->pdev->irq);
290 }
291 
292 /**
293  * e1000_irq_enable - Enable default interrupt generation settings
294  * @adapter: board private structure
295  **/
296 static void e1000_irq_enable(struct e1000_adapter *adapter)
297 {
298 	struct e1000_hw *hw = &adapter->hw;
299 
300 	ew32(IMS, IMS_ENABLE_MASK);
301 	E1000_WRITE_FLUSH();
302 }
303 
304 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
305 {
306 	struct e1000_hw *hw = &adapter->hw;
307 	struct net_device *netdev = adapter->netdev;
308 	u16 vid = hw->mng_cookie.vlan_id;
309 	u16 old_vid = adapter->mng_vlan_id;
310 
311 	if (!e1000_vlan_used(adapter))
312 		return;
313 
314 	if (!test_bit(vid, adapter->active_vlans)) {
315 		if (hw->mng_cookie.status &
316 		    E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
317 			e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
318 			adapter->mng_vlan_id = vid;
319 		} else {
320 			adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
321 		}
322 		if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
323 		    (vid != old_vid) &&
324 		    !test_bit(old_vid, adapter->active_vlans))
325 			e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
326 					       old_vid);
327 	} else {
328 		adapter->mng_vlan_id = vid;
329 	}
330 }
331 
332 static void e1000_init_manageability(struct e1000_adapter *adapter)
333 {
334 	struct e1000_hw *hw = &adapter->hw;
335 
336 	if (adapter->en_mng_pt) {
337 		u32 manc = er32(MANC);
338 
339 		/* disable hardware interception of ARP */
340 		manc &= ~(E1000_MANC_ARP_EN);
341 
342 		ew32(MANC, manc);
343 	}
344 }
345 
346 static void e1000_release_manageability(struct e1000_adapter *adapter)
347 {
348 	struct e1000_hw *hw = &adapter->hw;
349 
350 	if (adapter->en_mng_pt) {
351 		u32 manc = er32(MANC);
352 
353 		/* re-enable hardware interception of ARP */
354 		manc |= E1000_MANC_ARP_EN;
355 
356 		ew32(MANC, manc);
357 	}
358 }
359 
360 /**
361  * e1000_configure - configure the hardware for RX and TX
362  * @adapter = private board structure
363  **/
364 static void e1000_configure(struct e1000_adapter *adapter)
365 {
366 	struct net_device *netdev = adapter->netdev;
367 	int i;
368 
369 	e1000_set_rx_mode(netdev);
370 
371 	e1000_restore_vlan(adapter);
372 	e1000_init_manageability(adapter);
373 
374 	e1000_configure_tx(adapter);
375 	e1000_setup_rctl(adapter);
376 	e1000_configure_rx(adapter);
377 	/* call E1000_DESC_UNUSED which always leaves
378 	 * at least 1 descriptor unused to make sure
379 	 * next_to_use != next_to_clean
380 	 */
381 	for (i = 0; i < adapter->num_rx_queues; i++) {
382 		struct e1000_rx_ring *ring = &adapter->rx_ring[i];
383 		adapter->alloc_rx_buf(adapter, ring,
384 				      E1000_DESC_UNUSED(ring));
385 	}
386 }
387 
388 int e1000_up(struct e1000_adapter *adapter)
389 {
390 	struct e1000_hw *hw = &adapter->hw;
391 
392 	/* hardware has been reset, we need to reload some things */
393 	e1000_configure(adapter);
394 
395 	clear_bit(__E1000_DOWN, &adapter->flags);
396 
397 	napi_enable(&adapter->napi);
398 
399 	e1000_irq_enable(adapter);
400 
401 	netif_wake_queue(adapter->netdev);
402 
403 	/* fire a link change interrupt to start the watchdog */
404 	ew32(ICS, E1000_ICS_LSC);
405 	return 0;
406 }
407 
408 /**
409  * e1000_power_up_phy - restore link in case the phy was powered down
410  * @adapter: address of board private structure
411  *
412  * The phy may be powered down to save power and turn off link when the
413  * driver is unloaded and wake on lan is not enabled (among others)
414  * *** this routine MUST be followed by a call to e1000_reset ***
415  **/
416 void e1000_power_up_phy(struct e1000_adapter *adapter)
417 {
418 	struct e1000_hw *hw = &adapter->hw;
419 	u16 mii_reg = 0;
420 
421 	/* Just clear the power down bit to wake the phy back up */
422 	if (hw->media_type == e1000_media_type_copper) {
423 		/* according to the manual, the phy will retain its
424 		 * settings across a power-down/up cycle
425 		 */
426 		e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
427 		mii_reg &= ~MII_CR_POWER_DOWN;
428 		e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
429 	}
430 }
431 
432 static void e1000_power_down_phy(struct e1000_adapter *adapter)
433 {
434 	struct e1000_hw *hw = &adapter->hw;
435 
436 	/* Power down the PHY so no link is implied when interface is down *
437 	 * The PHY cannot be powered down if any of the following is true *
438 	 * (a) WoL is enabled
439 	 * (b) AMT is active
440 	 * (c) SoL/IDER session is active
441 	 */
442 	if (!adapter->wol && hw->mac_type >= e1000_82540 &&
443 	   hw->media_type == e1000_media_type_copper) {
444 		u16 mii_reg = 0;
445 
446 		switch (hw->mac_type) {
447 		case e1000_82540:
448 		case e1000_82545:
449 		case e1000_82545_rev_3:
450 		case e1000_82546:
451 		case e1000_ce4100:
452 		case e1000_82546_rev_3:
453 		case e1000_82541:
454 		case e1000_82541_rev_2:
455 		case e1000_82547:
456 		case e1000_82547_rev_2:
457 			if (er32(MANC) & E1000_MANC_SMBUS_EN)
458 				goto out;
459 			break;
460 		default:
461 			goto out;
462 		}
463 		e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
464 		mii_reg |= MII_CR_POWER_DOWN;
465 		e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
466 		msleep(1);
467 	}
468 out:
469 	return;
470 }
471 
472 static void e1000_down_and_stop(struct e1000_adapter *adapter)
473 {
474 	set_bit(__E1000_DOWN, &adapter->flags);
475 
476 	cancel_delayed_work_sync(&adapter->watchdog_task);
477 
478 	/*
479 	 * Since the watchdog task can reschedule other tasks, we should cancel
480 	 * it first, otherwise we can run into the situation when a work is
481 	 * still running after the adapter has been turned down.
482 	 */
483 
484 	cancel_delayed_work_sync(&adapter->phy_info_task);
485 	cancel_delayed_work_sync(&adapter->fifo_stall_task);
486 
487 	/* Only kill reset task if adapter is not resetting */
488 	if (!test_bit(__E1000_RESETTING, &adapter->flags))
489 		cancel_work_sync(&adapter->reset_task);
490 }
491 
492 void e1000_down(struct e1000_adapter *adapter)
493 {
494 	struct e1000_hw *hw = &adapter->hw;
495 	struct net_device *netdev = adapter->netdev;
496 	u32 rctl, tctl;
497 
498 	/* disable receives in the hardware */
499 	rctl = er32(RCTL);
500 	ew32(RCTL, rctl & ~E1000_RCTL_EN);
501 	/* flush and sleep below */
502 
503 	netif_tx_disable(netdev);
504 
505 	/* disable transmits in the hardware */
506 	tctl = er32(TCTL);
507 	tctl &= ~E1000_TCTL_EN;
508 	ew32(TCTL, tctl);
509 	/* flush both disables and wait for them to finish */
510 	E1000_WRITE_FLUSH();
511 	msleep(10);
512 
513 	/* Set the carrier off after transmits have been disabled in the
514 	 * hardware, to avoid race conditions with e1000_watchdog() (which
515 	 * may be running concurrently to us, checking for the carrier
516 	 * bit to decide whether it should enable transmits again). Such
517 	 * a race condition would result into transmission being disabled
518 	 * in the hardware until the next IFF_DOWN+IFF_UP cycle.
519 	 */
520 	netif_carrier_off(netdev);
521 
522 	napi_disable(&adapter->napi);
523 
524 	e1000_irq_disable(adapter);
525 
526 	/* Setting DOWN must be after irq_disable to prevent
527 	 * a screaming interrupt.  Setting DOWN also prevents
528 	 * tasks from rescheduling.
529 	 */
530 	e1000_down_and_stop(adapter);
531 
532 	adapter->link_speed = 0;
533 	adapter->link_duplex = 0;
534 
535 	e1000_reset(adapter);
536 	e1000_clean_all_tx_rings(adapter);
537 	e1000_clean_all_rx_rings(adapter);
538 }
539 
540 void e1000_reinit_locked(struct e1000_adapter *adapter)
541 {
542 	WARN_ON(in_interrupt());
543 	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
544 		msleep(1);
545 	e1000_down(adapter);
546 	e1000_up(adapter);
547 	clear_bit(__E1000_RESETTING, &adapter->flags);
548 }
549 
550 void e1000_reset(struct e1000_adapter *adapter)
551 {
552 	struct e1000_hw *hw = &adapter->hw;
553 	u32 pba = 0, tx_space, min_tx_space, min_rx_space;
554 	bool legacy_pba_adjust = false;
555 	u16 hwm;
556 
557 	/* Repartition Pba for greater than 9k mtu
558 	 * To take effect CTRL.RST is required.
559 	 */
560 
561 	switch (hw->mac_type) {
562 	case e1000_82542_rev2_0:
563 	case e1000_82542_rev2_1:
564 	case e1000_82543:
565 	case e1000_82544:
566 	case e1000_82540:
567 	case e1000_82541:
568 	case e1000_82541_rev_2:
569 		legacy_pba_adjust = true;
570 		pba = E1000_PBA_48K;
571 		break;
572 	case e1000_82545:
573 	case e1000_82545_rev_3:
574 	case e1000_82546:
575 	case e1000_ce4100:
576 	case e1000_82546_rev_3:
577 		pba = E1000_PBA_48K;
578 		break;
579 	case e1000_82547:
580 	case e1000_82547_rev_2:
581 		legacy_pba_adjust = true;
582 		pba = E1000_PBA_30K;
583 		break;
584 	case e1000_undefined:
585 	case e1000_num_macs:
586 		break;
587 	}
588 
589 	if (legacy_pba_adjust) {
590 		if (hw->max_frame_size > E1000_RXBUFFER_8192)
591 			pba -= 8; /* allocate more FIFO for Tx */
592 
593 		if (hw->mac_type == e1000_82547) {
594 			adapter->tx_fifo_head = 0;
595 			adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
596 			adapter->tx_fifo_size =
597 				(E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
598 			atomic_set(&adapter->tx_fifo_stall, 0);
599 		}
600 	} else if (hw->max_frame_size >  ETH_FRAME_LEN + ETH_FCS_LEN) {
601 		/* adjust PBA for jumbo frames */
602 		ew32(PBA, pba);
603 
604 		/* To maintain wire speed transmits, the Tx FIFO should be
605 		 * large enough to accommodate two full transmit packets,
606 		 * rounded up to the next 1KB and expressed in KB.  Likewise,
607 		 * the Rx FIFO should be large enough to accommodate at least
608 		 * one full receive packet and is similarly rounded up and
609 		 * expressed in KB.
610 		 */
611 		pba = er32(PBA);
612 		/* upper 16 bits has Tx packet buffer allocation size in KB */
613 		tx_space = pba >> 16;
614 		/* lower 16 bits has Rx packet buffer allocation size in KB */
615 		pba &= 0xffff;
616 		/* the Tx fifo also stores 16 bytes of information about the Tx
617 		 * but don't include ethernet FCS because hardware appends it
618 		 */
619 		min_tx_space = (hw->max_frame_size +
620 				sizeof(struct e1000_tx_desc) -
621 				ETH_FCS_LEN) * 2;
622 		min_tx_space = ALIGN(min_tx_space, 1024);
623 		min_tx_space >>= 10;
624 		/* software strips receive CRC, so leave room for it */
625 		min_rx_space = hw->max_frame_size;
626 		min_rx_space = ALIGN(min_rx_space, 1024);
627 		min_rx_space >>= 10;
628 
629 		/* If current Tx allocation is less than the min Tx FIFO size,
630 		 * and the min Tx FIFO size is less than the current Rx FIFO
631 		 * allocation, take space away from current Rx allocation
632 		 */
633 		if (tx_space < min_tx_space &&
634 		    ((min_tx_space - tx_space) < pba)) {
635 			pba = pba - (min_tx_space - tx_space);
636 
637 			/* PCI/PCIx hardware has PBA alignment constraints */
638 			switch (hw->mac_type) {
639 			case e1000_82545 ... e1000_82546_rev_3:
640 				pba &= ~(E1000_PBA_8K - 1);
641 				break;
642 			default:
643 				break;
644 			}
645 
646 			/* if short on Rx space, Rx wins and must trump Tx
647 			 * adjustment or use Early Receive if available
648 			 */
649 			if (pba < min_rx_space)
650 				pba = min_rx_space;
651 		}
652 	}
653 
654 	ew32(PBA, pba);
655 
656 	/* flow control settings:
657 	 * The high water mark must be low enough to fit one full frame
658 	 * (or the size used for early receive) above it in the Rx FIFO.
659 	 * Set it to the lower of:
660 	 * - 90% of the Rx FIFO size, and
661 	 * - the full Rx FIFO size minus the early receive size (for parts
662 	 *   with ERT support assuming ERT set to E1000_ERT_2048), or
663 	 * - the full Rx FIFO size minus one full frame
664 	 */
665 	hwm = min(((pba << 10) * 9 / 10),
666 		  ((pba << 10) - hw->max_frame_size));
667 
668 	hw->fc_high_water = hwm & 0xFFF8;	/* 8-byte granularity */
669 	hw->fc_low_water = hw->fc_high_water - 8;
670 	hw->fc_pause_time = E1000_FC_PAUSE_TIME;
671 	hw->fc_send_xon = 1;
672 	hw->fc = hw->original_fc;
673 
674 	/* Allow time for pending master requests to run */
675 	e1000_reset_hw(hw);
676 	if (hw->mac_type >= e1000_82544)
677 		ew32(WUC, 0);
678 
679 	if (e1000_init_hw(hw))
680 		e_dev_err("Hardware Error\n");
681 	e1000_update_mng_vlan(adapter);
682 
683 	/* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
684 	if (hw->mac_type >= e1000_82544 &&
685 	    hw->autoneg == 1 &&
686 	    hw->autoneg_advertised == ADVERTISE_1000_FULL) {
687 		u32 ctrl = er32(CTRL);
688 		/* clear phy power management bit if we are in gig only mode,
689 		 * which if enabled will attempt negotiation to 100Mb, which
690 		 * can cause a loss of link at power off or driver unload
691 		 */
692 		ctrl &= ~E1000_CTRL_SWDPIN3;
693 		ew32(CTRL, ctrl);
694 	}
695 
696 	/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
697 	ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
698 
699 	e1000_reset_adaptive(hw);
700 	e1000_phy_get_info(hw, &adapter->phy_info);
701 
702 	e1000_release_manageability(adapter);
703 }
704 
705 /* Dump the eeprom for users having checksum issues */
706 static void e1000_dump_eeprom(struct e1000_adapter *adapter)
707 {
708 	struct net_device *netdev = adapter->netdev;
709 	struct ethtool_eeprom eeprom;
710 	const struct ethtool_ops *ops = netdev->ethtool_ops;
711 	u8 *data;
712 	int i;
713 	u16 csum_old, csum_new = 0;
714 
715 	eeprom.len = ops->get_eeprom_len(netdev);
716 	eeprom.offset = 0;
717 
718 	data = kmalloc(eeprom.len, GFP_KERNEL);
719 	if (!data)
720 		return;
721 
722 	ops->get_eeprom(netdev, &eeprom, data);
723 
724 	csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
725 		   (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
726 	for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
727 		csum_new += data[i] + (data[i + 1] << 8);
728 	csum_new = EEPROM_SUM - csum_new;
729 
730 	pr_err("/*********************/\n");
731 	pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
732 	pr_err("Calculated              : 0x%04x\n", csum_new);
733 
734 	pr_err("Offset    Values\n");
735 	pr_err("========  ======\n");
736 	print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
737 
738 	pr_err("Include this output when contacting your support provider.\n");
739 	pr_err("This is not a software error! Something bad happened to\n");
740 	pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
741 	pr_err("result in further problems, possibly loss of data,\n");
742 	pr_err("corruption or system hangs!\n");
743 	pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
744 	pr_err("which is invalid and requires you to set the proper MAC\n");
745 	pr_err("address manually before continuing to enable this network\n");
746 	pr_err("device. Please inspect the EEPROM dump and report the\n");
747 	pr_err("issue to your hardware vendor or Intel Customer Support.\n");
748 	pr_err("/*********************/\n");
749 
750 	kfree(data);
751 }
752 
753 /**
754  * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
755  * @pdev: PCI device information struct
756  *
757  * Return true if an adapter needs ioport resources
758  **/
759 static int e1000_is_need_ioport(struct pci_dev *pdev)
760 {
761 	switch (pdev->device) {
762 	case E1000_DEV_ID_82540EM:
763 	case E1000_DEV_ID_82540EM_LOM:
764 	case E1000_DEV_ID_82540EP:
765 	case E1000_DEV_ID_82540EP_LOM:
766 	case E1000_DEV_ID_82540EP_LP:
767 	case E1000_DEV_ID_82541EI:
768 	case E1000_DEV_ID_82541EI_MOBILE:
769 	case E1000_DEV_ID_82541ER:
770 	case E1000_DEV_ID_82541ER_LOM:
771 	case E1000_DEV_ID_82541GI:
772 	case E1000_DEV_ID_82541GI_LF:
773 	case E1000_DEV_ID_82541GI_MOBILE:
774 	case E1000_DEV_ID_82544EI_COPPER:
775 	case E1000_DEV_ID_82544EI_FIBER:
776 	case E1000_DEV_ID_82544GC_COPPER:
777 	case E1000_DEV_ID_82544GC_LOM:
778 	case E1000_DEV_ID_82545EM_COPPER:
779 	case E1000_DEV_ID_82545EM_FIBER:
780 	case E1000_DEV_ID_82546EB_COPPER:
781 	case E1000_DEV_ID_82546EB_FIBER:
782 	case E1000_DEV_ID_82546EB_QUAD_COPPER:
783 		return true;
784 	default:
785 		return false;
786 	}
787 }
788 
789 static netdev_features_t e1000_fix_features(struct net_device *netdev,
790 	netdev_features_t features)
791 {
792 	/* Since there is no support for separate Rx/Tx vlan accel
793 	 * enable/disable make sure Tx flag is always in same state as Rx.
794 	 */
795 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
796 		features |= NETIF_F_HW_VLAN_CTAG_TX;
797 	else
798 		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
799 
800 	return features;
801 }
802 
803 static int e1000_set_features(struct net_device *netdev,
804 	netdev_features_t features)
805 {
806 	struct e1000_adapter *adapter = netdev_priv(netdev);
807 	netdev_features_t changed = features ^ netdev->features;
808 
809 	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
810 		e1000_vlan_mode(netdev, features);
811 
812 	if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
813 		return 0;
814 
815 	netdev->features = features;
816 	adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
817 
818 	if (netif_running(netdev))
819 		e1000_reinit_locked(adapter);
820 	else
821 		e1000_reset(adapter);
822 
823 	return 1;
824 }
825 
826 static const struct net_device_ops e1000_netdev_ops = {
827 	.ndo_open		= e1000_open,
828 	.ndo_stop		= e1000_close,
829 	.ndo_start_xmit		= e1000_xmit_frame,
830 	.ndo_set_rx_mode	= e1000_set_rx_mode,
831 	.ndo_set_mac_address	= e1000_set_mac,
832 	.ndo_tx_timeout		= e1000_tx_timeout,
833 	.ndo_change_mtu		= e1000_change_mtu,
834 	.ndo_do_ioctl		= e1000_ioctl,
835 	.ndo_validate_addr	= eth_validate_addr,
836 	.ndo_vlan_rx_add_vid	= e1000_vlan_rx_add_vid,
837 	.ndo_vlan_rx_kill_vid	= e1000_vlan_rx_kill_vid,
838 #ifdef CONFIG_NET_POLL_CONTROLLER
839 	.ndo_poll_controller	= e1000_netpoll,
840 #endif
841 	.ndo_fix_features	= e1000_fix_features,
842 	.ndo_set_features	= e1000_set_features,
843 };
844 
845 /**
846  * e1000_init_hw_struct - initialize members of hw struct
847  * @adapter: board private struct
848  * @hw: structure used by e1000_hw.c
849  *
850  * Factors out initialization of the e1000_hw struct to its own function
851  * that can be called very early at init (just after struct allocation).
852  * Fields are initialized based on PCI device information and
853  * OS network device settings (MTU size).
854  * Returns negative error codes if MAC type setup fails.
855  */
856 static int e1000_init_hw_struct(struct e1000_adapter *adapter,
857 				struct e1000_hw *hw)
858 {
859 	struct pci_dev *pdev = adapter->pdev;
860 
861 	/* PCI config space info */
862 	hw->vendor_id = pdev->vendor;
863 	hw->device_id = pdev->device;
864 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
865 	hw->subsystem_id = pdev->subsystem_device;
866 	hw->revision_id = pdev->revision;
867 
868 	pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
869 
870 	hw->max_frame_size = adapter->netdev->mtu +
871 			     ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
872 	hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
873 
874 	/* identify the MAC */
875 	if (e1000_set_mac_type(hw)) {
876 		e_err(probe, "Unknown MAC Type\n");
877 		return -EIO;
878 	}
879 
880 	switch (hw->mac_type) {
881 	default:
882 		break;
883 	case e1000_82541:
884 	case e1000_82547:
885 	case e1000_82541_rev_2:
886 	case e1000_82547_rev_2:
887 		hw->phy_init_script = 1;
888 		break;
889 	}
890 
891 	e1000_set_media_type(hw);
892 	e1000_get_bus_info(hw);
893 
894 	hw->wait_autoneg_complete = false;
895 	hw->tbi_compatibility_en = true;
896 	hw->adaptive_ifs = true;
897 
898 	/* Copper options */
899 
900 	if (hw->media_type == e1000_media_type_copper) {
901 		hw->mdix = AUTO_ALL_MODES;
902 		hw->disable_polarity_correction = false;
903 		hw->master_slave = E1000_MASTER_SLAVE;
904 	}
905 
906 	return 0;
907 }
908 
909 /**
910  * e1000_probe - Device Initialization Routine
911  * @pdev: PCI device information struct
912  * @ent: entry in e1000_pci_tbl
913  *
914  * Returns 0 on success, negative on failure
915  *
916  * e1000_probe initializes an adapter identified by a pci_dev structure.
917  * The OS initialization, configuring of the adapter private structure,
918  * and a hardware reset occur.
919  **/
920 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
921 {
922 	struct net_device *netdev;
923 	struct e1000_adapter *adapter = NULL;
924 	struct e1000_hw *hw;
925 
926 	static int cards_found;
927 	static int global_quad_port_a; /* global ksp3 port a indication */
928 	int i, err, pci_using_dac;
929 	u16 eeprom_data = 0;
930 	u16 tmp = 0;
931 	u16 eeprom_apme_mask = E1000_EEPROM_APME;
932 	int bars, need_ioport;
933 	bool disable_dev = false;
934 
935 	/* do not allocate ioport bars when not needed */
936 	need_ioport = e1000_is_need_ioport(pdev);
937 	if (need_ioport) {
938 		bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
939 		err = pci_enable_device(pdev);
940 	} else {
941 		bars = pci_select_bars(pdev, IORESOURCE_MEM);
942 		err = pci_enable_device_mem(pdev);
943 	}
944 	if (err)
945 		return err;
946 
947 	err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
948 	if (err)
949 		goto err_pci_reg;
950 
951 	pci_set_master(pdev);
952 	err = pci_save_state(pdev);
953 	if (err)
954 		goto err_alloc_etherdev;
955 
956 	err = -ENOMEM;
957 	netdev = alloc_etherdev(sizeof(struct e1000_adapter));
958 	if (!netdev)
959 		goto err_alloc_etherdev;
960 
961 	SET_NETDEV_DEV(netdev, &pdev->dev);
962 
963 	pci_set_drvdata(pdev, netdev);
964 	adapter = netdev_priv(netdev);
965 	adapter->netdev = netdev;
966 	adapter->pdev = pdev;
967 	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
968 	adapter->bars = bars;
969 	adapter->need_ioport = need_ioport;
970 
971 	hw = &adapter->hw;
972 	hw->back = adapter;
973 
974 	err = -EIO;
975 	hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
976 	if (!hw->hw_addr)
977 		goto err_ioremap;
978 
979 	if (adapter->need_ioport) {
980 		for (i = BAR_1; i < PCI_STD_NUM_BARS; i++) {
981 			if (pci_resource_len(pdev, i) == 0)
982 				continue;
983 			if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
984 				hw->io_base = pci_resource_start(pdev, i);
985 				break;
986 			}
987 		}
988 	}
989 
990 	/* make ready for any if (hw->...) below */
991 	err = e1000_init_hw_struct(adapter, hw);
992 	if (err)
993 		goto err_sw_init;
994 
995 	/* there is a workaround being applied below that limits
996 	 * 64-bit DMA addresses to 64-bit hardware.  There are some
997 	 * 32-bit adapters that Tx hang when given 64-bit DMA addresses
998 	 */
999 	pci_using_dac = 0;
1000 	if ((hw->bus_type == e1000_bus_type_pcix) &&
1001 	    !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
1002 		pci_using_dac = 1;
1003 	} else {
1004 		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1005 		if (err) {
1006 			pr_err("No usable DMA config, aborting\n");
1007 			goto err_dma;
1008 		}
1009 	}
1010 
1011 	netdev->netdev_ops = &e1000_netdev_ops;
1012 	e1000_set_ethtool_ops(netdev);
1013 	netdev->watchdog_timeo = 5 * HZ;
1014 	netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
1015 
1016 	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1017 
1018 	adapter->bd_number = cards_found;
1019 
1020 	/* setup the private structure */
1021 
1022 	err = e1000_sw_init(adapter);
1023 	if (err)
1024 		goto err_sw_init;
1025 
1026 	err = -EIO;
1027 	if (hw->mac_type == e1000_ce4100) {
1028 		hw->ce4100_gbe_mdio_base_virt =
1029 					ioremap(pci_resource_start(pdev, BAR_1),
1030 						pci_resource_len(pdev, BAR_1));
1031 
1032 		if (!hw->ce4100_gbe_mdio_base_virt)
1033 			goto err_mdio_ioremap;
1034 	}
1035 
1036 	if (hw->mac_type >= e1000_82543) {
1037 		netdev->hw_features = NETIF_F_SG |
1038 				   NETIF_F_HW_CSUM |
1039 				   NETIF_F_HW_VLAN_CTAG_RX;
1040 		netdev->features = NETIF_F_HW_VLAN_CTAG_TX |
1041 				   NETIF_F_HW_VLAN_CTAG_FILTER;
1042 	}
1043 
1044 	if ((hw->mac_type >= e1000_82544) &&
1045 	   (hw->mac_type != e1000_82547))
1046 		netdev->hw_features |= NETIF_F_TSO;
1047 
1048 	netdev->priv_flags |= IFF_SUPP_NOFCS;
1049 
1050 	netdev->features |= netdev->hw_features;
1051 	netdev->hw_features |= (NETIF_F_RXCSUM |
1052 				NETIF_F_RXALL |
1053 				NETIF_F_RXFCS);
1054 
1055 	if (pci_using_dac) {
1056 		netdev->features |= NETIF_F_HIGHDMA;
1057 		netdev->vlan_features |= NETIF_F_HIGHDMA;
1058 	}
1059 
1060 	netdev->vlan_features |= (NETIF_F_TSO |
1061 				  NETIF_F_HW_CSUM |
1062 				  NETIF_F_SG);
1063 
1064 	/* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */
1065 	if (hw->device_id != E1000_DEV_ID_82545EM_COPPER ||
1066 	    hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE)
1067 		netdev->priv_flags |= IFF_UNICAST_FLT;
1068 
1069 	/* MTU range: 46 - 16110 */
1070 	netdev->min_mtu = ETH_ZLEN - ETH_HLEN;
1071 	netdev->max_mtu = MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
1072 
1073 	adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
1074 
1075 	/* initialize eeprom parameters */
1076 	if (e1000_init_eeprom_params(hw)) {
1077 		e_err(probe, "EEPROM initialization failed\n");
1078 		goto err_eeprom;
1079 	}
1080 
1081 	/* before reading the EEPROM, reset the controller to
1082 	 * put the device in a known good starting state
1083 	 */
1084 
1085 	e1000_reset_hw(hw);
1086 
1087 	/* make sure the EEPROM is good */
1088 	if (e1000_validate_eeprom_checksum(hw) < 0) {
1089 		e_err(probe, "The EEPROM Checksum Is Not Valid\n");
1090 		e1000_dump_eeprom(adapter);
1091 		/* set MAC address to all zeroes to invalidate and temporary
1092 		 * disable this device for the user. This blocks regular
1093 		 * traffic while still permitting ethtool ioctls from reaching
1094 		 * the hardware as well as allowing the user to run the
1095 		 * interface after manually setting a hw addr using
1096 		 * `ip set address`
1097 		 */
1098 		memset(hw->mac_addr, 0, netdev->addr_len);
1099 	} else {
1100 		/* copy the MAC address out of the EEPROM */
1101 		if (e1000_read_mac_addr(hw))
1102 			e_err(probe, "EEPROM Read Error\n");
1103 	}
1104 	/* don't block initialization here due to bad MAC address */
1105 	memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
1106 
1107 	if (!is_valid_ether_addr(netdev->dev_addr))
1108 		e_err(probe, "Invalid MAC Address\n");
1109 
1110 
1111 	INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
1112 	INIT_DELAYED_WORK(&adapter->fifo_stall_task,
1113 			  e1000_82547_tx_fifo_stall_task);
1114 	INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
1115 	INIT_WORK(&adapter->reset_task, e1000_reset_task);
1116 
1117 	e1000_check_options(adapter);
1118 
1119 	/* Initial Wake on LAN setting
1120 	 * If APM wake is enabled in the EEPROM,
1121 	 * enable the ACPI Magic Packet filter
1122 	 */
1123 
1124 	switch (hw->mac_type) {
1125 	case e1000_82542_rev2_0:
1126 	case e1000_82542_rev2_1:
1127 	case e1000_82543:
1128 		break;
1129 	case e1000_82544:
1130 		e1000_read_eeprom(hw,
1131 			EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1132 		eeprom_apme_mask = E1000_EEPROM_82544_APM;
1133 		break;
1134 	case e1000_82546:
1135 	case e1000_82546_rev_3:
1136 		if (er32(STATUS) & E1000_STATUS_FUNC_1) {
1137 			e1000_read_eeprom(hw,
1138 				EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1139 			break;
1140 		}
1141 		/* Fall Through */
1142 	default:
1143 		e1000_read_eeprom(hw,
1144 			EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1145 		break;
1146 	}
1147 	if (eeprom_data & eeprom_apme_mask)
1148 		adapter->eeprom_wol |= E1000_WUFC_MAG;
1149 
1150 	/* now that we have the eeprom settings, apply the special cases
1151 	 * where the eeprom may be wrong or the board simply won't support
1152 	 * wake on lan on a particular port
1153 	 */
1154 	switch (pdev->device) {
1155 	case E1000_DEV_ID_82546GB_PCIE:
1156 		adapter->eeprom_wol = 0;
1157 		break;
1158 	case E1000_DEV_ID_82546EB_FIBER:
1159 	case E1000_DEV_ID_82546GB_FIBER:
1160 		/* Wake events only supported on port A for dual fiber
1161 		 * regardless of eeprom setting
1162 		 */
1163 		if (er32(STATUS) & E1000_STATUS_FUNC_1)
1164 			adapter->eeprom_wol = 0;
1165 		break;
1166 	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1167 		/* if quad port adapter, disable WoL on all but port A */
1168 		if (global_quad_port_a != 0)
1169 			adapter->eeprom_wol = 0;
1170 		else
1171 			adapter->quad_port_a = true;
1172 		/* Reset for multiple quad port adapters */
1173 		if (++global_quad_port_a == 4)
1174 			global_quad_port_a = 0;
1175 		break;
1176 	}
1177 
1178 	/* initialize the wol settings based on the eeprom settings */
1179 	adapter->wol = adapter->eeprom_wol;
1180 	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1181 
1182 	/* Auto detect PHY address */
1183 	if (hw->mac_type == e1000_ce4100) {
1184 		for (i = 0; i < 32; i++) {
1185 			hw->phy_addr = i;
1186 			e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1187 
1188 			if (tmp != 0 && tmp != 0xFF)
1189 				break;
1190 		}
1191 
1192 		if (i >= 32)
1193 			goto err_eeprom;
1194 	}
1195 
1196 	/* reset the hardware with the new settings */
1197 	e1000_reset(adapter);
1198 
1199 	strcpy(netdev->name, "eth%d");
1200 	err = register_netdev(netdev);
1201 	if (err)
1202 		goto err_register;
1203 
1204 	e1000_vlan_filter_on_off(adapter, false);
1205 
1206 	/* print bus type/speed/width info */
1207 	e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
1208 	       ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1209 	       ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1210 		(hw->bus_speed == e1000_bus_speed_120) ? 120 :
1211 		(hw->bus_speed == e1000_bus_speed_100) ? 100 :
1212 		(hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1213 	       ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1214 	       netdev->dev_addr);
1215 
1216 	/* carrier off reporting is important to ethtool even BEFORE open */
1217 	netif_carrier_off(netdev);
1218 
1219 	e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
1220 
1221 	cards_found++;
1222 	return 0;
1223 
1224 err_register:
1225 err_eeprom:
1226 	e1000_phy_hw_reset(hw);
1227 
1228 	if (hw->flash_address)
1229 		iounmap(hw->flash_address);
1230 	kfree(adapter->tx_ring);
1231 	kfree(adapter->rx_ring);
1232 err_dma:
1233 err_sw_init:
1234 err_mdio_ioremap:
1235 	iounmap(hw->ce4100_gbe_mdio_base_virt);
1236 	iounmap(hw->hw_addr);
1237 err_ioremap:
1238 	disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1239 	free_netdev(netdev);
1240 err_alloc_etherdev:
1241 	pci_release_selected_regions(pdev, bars);
1242 err_pci_reg:
1243 	if (!adapter || disable_dev)
1244 		pci_disable_device(pdev);
1245 	return err;
1246 }
1247 
1248 /**
1249  * e1000_remove - Device Removal Routine
1250  * @pdev: PCI device information struct
1251  *
1252  * e1000_remove is called by the PCI subsystem to alert the driver
1253  * that it should release a PCI device. That could be caused by a
1254  * Hot-Plug event, or because the driver is going to be removed from
1255  * memory.
1256  **/
1257 static void e1000_remove(struct pci_dev *pdev)
1258 {
1259 	struct net_device *netdev = pci_get_drvdata(pdev);
1260 	struct e1000_adapter *adapter = netdev_priv(netdev);
1261 	struct e1000_hw *hw = &adapter->hw;
1262 	bool disable_dev;
1263 
1264 	e1000_down_and_stop(adapter);
1265 	e1000_release_manageability(adapter);
1266 
1267 	unregister_netdev(netdev);
1268 
1269 	e1000_phy_hw_reset(hw);
1270 
1271 	kfree(adapter->tx_ring);
1272 	kfree(adapter->rx_ring);
1273 
1274 	if (hw->mac_type == e1000_ce4100)
1275 		iounmap(hw->ce4100_gbe_mdio_base_virt);
1276 	iounmap(hw->hw_addr);
1277 	if (hw->flash_address)
1278 		iounmap(hw->flash_address);
1279 	pci_release_selected_regions(pdev, adapter->bars);
1280 
1281 	disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1282 	free_netdev(netdev);
1283 
1284 	if (disable_dev)
1285 		pci_disable_device(pdev);
1286 }
1287 
1288 /**
1289  * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1290  * @adapter: board private structure to initialize
1291  *
1292  * e1000_sw_init initializes the Adapter private data structure.
1293  * e1000_init_hw_struct MUST be called before this function
1294  **/
1295 static int e1000_sw_init(struct e1000_adapter *adapter)
1296 {
1297 	adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1298 
1299 	adapter->num_tx_queues = 1;
1300 	adapter->num_rx_queues = 1;
1301 
1302 	if (e1000_alloc_queues(adapter)) {
1303 		e_err(probe, "Unable to allocate memory for queues\n");
1304 		return -ENOMEM;
1305 	}
1306 
1307 	/* Explicitly disable IRQ since the NIC can be in any state. */
1308 	e1000_irq_disable(adapter);
1309 
1310 	spin_lock_init(&adapter->stats_lock);
1311 
1312 	set_bit(__E1000_DOWN, &adapter->flags);
1313 
1314 	return 0;
1315 }
1316 
1317 /**
1318  * e1000_alloc_queues - Allocate memory for all rings
1319  * @adapter: board private structure to initialize
1320  *
1321  * We allocate one ring per queue at run-time since we don't know the
1322  * number of queues at compile-time.
1323  **/
1324 static int e1000_alloc_queues(struct e1000_adapter *adapter)
1325 {
1326 	adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1327 				   sizeof(struct e1000_tx_ring), GFP_KERNEL);
1328 	if (!adapter->tx_ring)
1329 		return -ENOMEM;
1330 
1331 	adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1332 				   sizeof(struct e1000_rx_ring), GFP_KERNEL);
1333 	if (!adapter->rx_ring) {
1334 		kfree(adapter->tx_ring);
1335 		return -ENOMEM;
1336 	}
1337 
1338 	return E1000_SUCCESS;
1339 }
1340 
1341 /**
1342  * e1000_open - Called when a network interface is made active
1343  * @netdev: network interface device structure
1344  *
1345  * Returns 0 on success, negative value on failure
1346  *
1347  * The open entry point is called when a network interface is made
1348  * active by the system (IFF_UP).  At this point all resources needed
1349  * for transmit and receive operations are allocated, the interrupt
1350  * handler is registered with the OS, the watchdog task is started,
1351  * and the stack is notified that the interface is ready.
1352  **/
1353 int e1000_open(struct net_device *netdev)
1354 {
1355 	struct e1000_adapter *adapter = netdev_priv(netdev);
1356 	struct e1000_hw *hw = &adapter->hw;
1357 	int err;
1358 
1359 	/* disallow open during test */
1360 	if (test_bit(__E1000_TESTING, &adapter->flags))
1361 		return -EBUSY;
1362 
1363 	netif_carrier_off(netdev);
1364 
1365 	/* allocate transmit descriptors */
1366 	err = e1000_setup_all_tx_resources(adapter);
1367 	if (err)
1368 		goto err_setup_tx;
1369 
1370 	/* allocate receive descriptors */
1371 	err = e1000_setup_all_rx_resources(adapter);
1372 	if (err)
1373 		goto err_setup_rx;
1374 
1375 	e1000_power_up_phy(adapter);
1376 
1377 	adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1378 	if ((hw->mng_cookie.status &
1379 			  E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1380 		e1000_update_mng_vlan(adapter);
1381 	}
1382 
1383 	/* before we allocate an interrupt, we must be ready to handle it.
1384 	 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1385 	 * as soon as we call pci_request_irq, so we have to setup our
1386 	 * clean_rx handler before we do so.
1387 	 */
1388 	e1000_configure(adapter);
1389 
1390 	err = e1000_request_irq(adapter);
1391 	if (err)
1392 		goto err_req_irq;
1393 
1394 	/* From here on the code is the same as e1000_up() */
1395 	clear_bit(__E1000_DOWN, &adapter->flags);
1396 
1397 	napi_enable(&adapter->napi);
1398 
1399 	e1000_irq_enable(adapter);
1400 
1401 	netif_start_queue(netdev);
1402 
1403 	/* fire a link status change interrupt to start the watchdog */
1404 	ew32(ICS, E1000_ICS_LSC);
1405 
1406 	return E1000_SUCCESS;
1407 
1408 err_req_irq:
1409 	e1000_power_down_phy(adapter);
1410 	e1000_free_all_rx_resources(adapter);
1411 err_setup_rx:
1412 	e1000_free_all_tx_resources(adapter);
1413 err_setup_tx:
1414 	e1000_reset(adapter);
1415 
1416 	return err;
1417 }
1418 
1419 /**
1420  * e1000_close - Disables a network interface
1421  * @netdev: network interface device structure
1422  *
1423  * Returns 0, this is not allowed to fail
1424  *
1425  * The close entry point is called when an interface is de-activated
1426  * by the OS.  The hardware is still under the drivers control, but
1427  * needs to be disabled.  A global MAC reset is issued to stop the
1428  * hardware, and all transmit and receive resources are freed.
1429  **/
1430 int e1000_close(struct net_device *netdev)
1431 {
1432 	struct e1000_adapter *adapter = netdev_priv(netdev);
1433 	struct e1000_hw *hw = &adapter->hw;
1434 	int count = E1000_CHECK_RESET_COUNT;
1435 
1436 	while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
1437 		usleep_range(10000, 20000);
1438 
1439 	WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
1440 	e1000_down(adapter);
1441 	e1000_power_down_phy(adapter);
1442 	e1000_free_irq(adapter);
1443 
1444 	e1000_free_all_tx_resources(adapter);
1445 	e1000_free_all_rx_resources(adapter);
1446 
1447 	/* kill manageability vlan ID if supported, but not if a vlan with
1448 	 * the same ID is registered on the host OS (let 8021q kill it)
1449 	 */
1450 	if ((hw->mng_cookie.status &
1451 	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1452 	    !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
1453 		e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
1454 				       adapter->mng_vlan_id);
1455 	}
1456 
1457 	return 0;
1458 }
1459 
1460 /**
1461  * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1462  * @adapter: address of board private structure
1463  * @start: address of beginning of memory
1464  * @len: length of memory
1465  **/
1466 static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1467 				  unsigned long len)
1468 {
1469 	struct e1000_hw *hw = &adapter->hw;
1470 	unsigned long begin = (unsigned long)start;
1471 	unsigned long end = begin + len;
1472 
1473 	/* First rev 82545 and 82546 need to not allow any memory
1474 	 * write location to cross 64k boundary due to errata 23
1475 	 */
1476 	if (hw->mac_type == e1000_82545 ||
1477 	    hw->mac_type == e1000_ce4100 ||
1478 	    hw->mac_type == e1000_82546) {
1479 		return ((begin ^ (end - 1)) >> 16) == 0;
1480 	}
1481 
1482 	return true;
1483 }
1484 
1485 /**
1486  * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1487  * @adapter: board private structure
1488  * @txdr:    tx descriptor ring (for a specific queue) to setup
1489  *
1490  * Return 0 on success, negative on failure
1491  **/
1492 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1493 				    struct e1000_tx_ring *txdr)
1494 {
1495 	struct pci_dev *pdev = adapter->pdev;
1496 	int size;
1497 
1498 	size = sizeof(struct e1000_tx_buffer) * txdr->count;
1499 	txdr->buffer_info = vzalloc(size);
1500 	if (!txdr->buffer_info)
1501 		return -ENOMEM;
1502 
1503 	/* round up to nearest 4K */
1504 
1505 	txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1506 	txdr->size = ALIGN(txdr->size, 4096);
1507 
1508 	txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1509 					GFP_KERNEL);
1510 	if (!txdr->desc) {
1511 setup_tx_desc_die:
1512 		vfree(txdr->buffer_info);
1513 		return -ENOMEM;
1514 	}
1515 
1516 	/* Fix for errata 23, can't cross 64kB boundary */
1517 	if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1518 		void *olddesc = txdr->desc;
1519 		dma_addr_t olddma = txdr->dma;
1520 		e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
1521 		      txdr->size, txdr->desc);
1522 		/* Try again, without freeing the previous */
1523 		txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1524 						&txdr->dma, GFP_KERNEL);
1525 		/* Failed allocation, critical failure */
1526 		if (!txdr->desc) {
1527 			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1528 					  olddma);
1529 			goto setup_tx_desc_die;
1530 		}
1531 
1532 		if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1533 			/* give up */
1534 			dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1535 					  txdr->dma);
1536 			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1537 					  olddma);
1538 			e_err(probe, "Unable to allocate aligned memory "
1539 			      "for the transmit descriptor ring\n");
1540 			vfree(txdr->buffer_info);
1541 			return -ENOMEM;
1542 		} else {
1543 			/* Free old allocation, new allocation was successful */
1544 			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1545 					  olddma);
1546 		}
1547 	}
1548 	memset(txdr->desc, 0, txdr->size);
1549 
1550 	txdr->next_to_use = 0;
1551 	txdr->next_to_clean = 0;
1552 
1553 	return 0;
1554 }
1555 
1556 /**
1557  * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1558  * 				  (Descriptors) for all queues
1559  * @adapter: board private structure
1560  *
1561  * Return 0 on success, negative on failure
1562  **/
1563 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1564 {
1565 	int i, err = 0;
1566 
1567 	for (i = 0; i < adapter->num_tx_queues; i++) {
1568 		err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1569 		if (err) {
1570 			e_err(probe, "Allocation for Tx Queue %u failed\n", i);
1571 			for (i-- ; i >= 0; i--)
1572 				e1000_free_tx_resources(adapter,
1573 							&adapter->tx_ring[i]);
1574 			break;
1575 		}
1576 	}
1577 
1578 	return err;
1579 }
1580 
1581 /**
1582  * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1583  * @adapter: board private structure
1584  *
1585  * Configure the Tx unit of the MAC after a reset.
1586  **/
1587 static void e1000_configure_tx(struct e1000_adapter *adapter)
1588 {
1589 	u64 tdba;
1590 	struct e1000_hw *hw = &adapter->hw;
1591 	u32 tdlen, tctl, tipg;
1592 	u32 ipgr1, ipgr2;
1593 
1594 	/* Setup the HW Tx Head and Tail descriptor pointers */
1595 
1596 	switch (adapter->num_tx_queues) {
1597 	case 1:
1598 	default:
1599 		tdba = adapter->tx_ring[0].dma;
1600 		tdlen = adapter->tx_ring[0].count *
1601 			sizeof(struct e1000_tx_desc);
1602 		ew32(TDLEN, tdlen);
1603 		ew32(TDBAH, (tdba >> 32));
1604 		ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1605 		ew32(TDT, 0);
1606 		ew32(TDH, 0);
1607 		adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ?
1608 					   E1000_TDH : E1000_82542_TDH);
1609 		adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ?
1610 					   E1000_TDT : E1000_82542_TDT);
1611 		break;
1612 	}
1613 
1614 	/* Set the default values for the Tx Inter Packet Gap timer */
1615 	if ((hw->media_type == e1000_media_type_fiber ||
1616 	     hw->media_type == e1000_media_type_internal_serdes))
1617 		tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1618 	else
1619 		tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1620 
1621 	switch (hw->mac_type) {
1622 	case e1000_82542_rev2_0:
1623 	case e1000_82542_rev2_1:
1624 		tipg = DEFAULT_82542_TIPG_IPGT;
1625 		ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1626 		ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1627 		break;
1628 	default:
1629 		ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1630 		ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1631 		break;
1632 	}
1633 	tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1634 	tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1635 	ew32(TIPG, tipg);
1636 
1637 	/* Set the Tx Interrupt Delay register */
1638 
1639 	ew32(TIDV, adapter->tx_int_delay);
1640 	if (hw->mac_type >= e1000_82540)
1641 		ew32(TADV, adapter->tx_abs_int_delay);
1642 
1643 	/* Program the Transmit Control Register */
1644 
1645 	tctl = er32(TCTL);
1646 	tctl &= ~E1000_TCTL_CT;
1647 	tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1648 		(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1649 
1650 	e1000_config_collision_dist(hw);
1651 
1652 	/* Setup Transmit Descriptor Settings for eop descriptor */
1653 	adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1654 
1655 	/* only set IDE if we are delaying interrupts using the timers */
1656 	if (adapter->tx_int_delay)
1657 		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1658 
1659 	if (hw->mac_type < e1000_82543)
1660 		adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1661 	else
1662 		adapter->txd_cmd |= E1000_TXD_CMD_RS;
1663 
1664 	/* Cache if we're 82544 running in PCI-X because we'll
1665 	 * need this to apply a workaround later in the send path.
1666 	 */
1667 	if (hw->mac_type == e1000_82544 &&
1668 	    hw->bus_type == e1000_bus_type_pcix)
1669 		adapter->pcix_82544 = true;
1670 
1671 	ew32(TCTL, tctl);
1672 
1673 }
1674 
1675 /**
1676  * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1677  * @adapter: board private structure
1678  * @rxdr:    rx descriptor ring (for a specific queue) to setup
1679  *
1680  * Returns 0 on success, negative on failure
1681  **/
1682 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1683 				    struct e1000_rx_ring *rxdr)
1684 {
1685 	struct pci_dev *pdev = adapter->pdev;
1686 	int size, desc_len;
1687 
1688 	size = sizeof(struct e1000_rx_buffer) * rxdr->count;
1689 	rxdr->buffer_info = vzalloc(size);
1690 	if (!rxdr->buffer_info)
1691 		return -ENOMEM;
1692 
1693 	desc_len = sizeof(struct e1000_rx_desc);
1694 
1695 	/* Round up to nearest 4K */
1696 
1697 	rxdr->size = rxdr->count * desc_len;
1698 	rxdr->size = ALIGN(rxdr->size, 4096);
1699 
1700 	rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1701 					GFP_KERNEL);
1702 	if (!rxdr->desc) {
1703 setup_rx_desc_die:
1704 		vfree(rxdr->buffer_info);
1705 		return -ENOMEM;
1706 	}
1707 
1708 	/* Fix for errata 23, can't cross 64kB boundary */
1709 	if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1710 		void *olddesc = rxdr->desc;
1711 		dma_addr_t olddma = rxdr->dma;
1712 		e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
1713 		      rxdr->size, rxdr->desc);
1714 		/* Try again, without freeing the previous */
1715 		rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1716 						&rxdr->dma, GFP_KERNEL);
1717 		/* Failed allocation, critical failure */
1718 		if (!rxdr->desc) {
1719 			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1720 					  olddma);
1721 			goto setup_rx_desc_die;
1722 		}
1723 
1724 		if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1725 			/* give up */
1726 			dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1727 					  rxdr->dma);
1728 			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1729 					  olddma);
1730 			e_err(probe, "Unable to allocate aligned memory for "
1731 			      "the Rx descriptor ring\n");
1732 			goto setup_rx_desc_die;
1733 		} else {
1734 			/* Free old allocation, new allocation was successful */
1735 			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1736 					  olddma);
1737 		}
1738 	}
1739 	memset(rxdr->desc, 0, rxdr->size);
1740 
1741 	rxdr->next_to_clean = 0;
1742 	rxdr->next_to_use = 0;
1743 	rxdr->rx_skb_top = NULL;
1744 
1745 	return 0;
1746 }
1747 
1748 /**
1749  * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1750  * 				  (Descriptors) for all queues
1751  * @adapter: board private structure
1752  *
1753  * Return 0 on success, negative on failure
1754  **/
1755 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1756 {
1757 	int i, err = 0;
1758 
1759 	for (i = 0; i < adapter->num_rx_queues; i++) {
1760 		err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1761 		if (err) {
1762 			e_err(probe, "Allocation for Rx Queue %u failed\n", i);
1763 			for (i-- ; i >= 0; i--)
1764 				e1000_free_rx_resources(adapter,
1765 							&adapter->rx_ring[i]);
1766 			break;
1767 		}
1768 	}
1769 
1770 	return err;
1771 }
1772 
1773 /**
1774  * e1000_setup_rctl - configure the receive control registers
1775  * @adapter: Board private structure
1776  **/
1777 static void e1000_setup_rctl(struct e1000_adapter *adapter)
1778 {
1779 	struct e1000_hw *hw = &adapter->hw;
1780 	u32 rctl;
1781 
1782 	rctl = er32(RCTL);
1783 
1784 	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1785 
1786 	rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1787 		E1000_RCTL_RDMTS_HALF |
1788 		(hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
1789 
1790 	if (hw->tbi_compatibility_on == 1)
1791 		rctl |= E1000_RCTL_SBP;
1792 	else
1793 		rctl &= ~E1000_RCTL_SBP;
1794 
1795 	if (adapter->netdev->mtu <= ETH_DATA_LEN)
1796 		rctl &= ~E1000_RCTL_LPE;
1797 	else
1798 		rctl |= E1000_RCTL_LPE;
1799 
1800 	/* Setup buffer sizes */
1801 	rctl &= ~E1000_RCTL_SZ_4096;
1802 	rctl |= E1000_RCTL_BSEX;
1803 	switch (adapter->rx_buffer_len) {
1804 	case E1000_RXBUFFER_2048:
1805 	default:
1806 		rctl |= E1000_RCTL_SZ_2048;
1807 		rctl &= ~E1000_RCTL_BSEX;
1808 		break;
1809 	case E1000_RXBUFFER_4096:
1810 		rctl |= E1000_RCTL_SZ_4096;
1811 		break;
1812 	case E1000_RXBUFFER_8192:
1813 		rctl |= E1000_RCTL_SZ_8192;
1814 		break;
1815 	case E1000_RXBUFFER_16384:
1816 		rctl |= E1000_RCTL_SZ_16384;
1817 		break;
1818 	}
1819 
1820 	/* This is useful for sniffing bad packets. */
1821 	if (adapter->netdev->features & NETIF_F_RXALL) {
1822 		/* UPE and MPE will be handled by normal PROMISC logic
1823 		 * in e1000e_set_rx_mode
1824 		 */
1825 		rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
1826 			 E1000_RCTL_BAM | /* RX All Bcast Pkts */
1827 			 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
1828 
1829 		rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
1830 			  E1000_RCTL_DPF | /* Allow filtered pause */
1831 			  E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
1832 		/* Do not mess with E1000_CTRL_VME, it affects transmit as well,
1833 		 * and that breaks VLANs.
1834 		 */
1835 	}
1836 
1837 	ew32(RCTL, rctl);
1838 }
1839 
1840 /**
1841  * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1842  * @adapter: board private structure
1843  *
1844  * Configure the Rx unit of the MAC after a reset.
1845  **/
1846 static void e1000_configure_rx(struct e1000_adapter *adapter)
1847 {
1848 	u64 rdba;
1849 	struct e1000_hw *hw = &adapter->hw;
1850 	u32 rdlen, rctl, rxcsum;
1851 
1852 	if (adapter->netdev->mtu > ETH_DATA_LEN) {
1853 		rdlen = adapter->rx_ring[0].count *
1854 			sizeof(struct e1000_rx_desc);
1855 		adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1856 		adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1857 	} else {
1858 		rdlen = adapter->rx_ring[0].count *
1859 			sizeof(struct e1000_rx_desc);
1860 		adapter->clean_rx = e1000_clean_rx_irq;
1861 		adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1862 	}
1863 
1864 	/* disable receives while setting up the descriptors */
1865 	rctl = er32(RCTL);
1866 	ew32(RCTL, rctl & ~E1000_RCTL_EN);
1867 
1868 	/* set the Receive Delay Timer Register */
1869 	ew32(RDTR, adapter->rx_int_delay);
1870 
1871 	if (hw->mac_type >= e1000_82540) {
1872 		ew32(RADV, adapter->rx_abs_int_delay);
1873 		if (adapter->itr_setting != 0)
1874 			ew32(ITR, 1000000000 / (adapter->itr * 256));
1875 	}
1876 
1877 	/* Setup the HW Rx Head and Tail Descriptor Pointers and
1878 	 * the Base and Length of the Rx Descriptor Ring
1879 	 */
1880 	switch (adapter->num_rx_queues) {
1881 	case 1:
1882 	default:
1883 		rdba = adapter->rx_ring[0].dma;
1884 		ew32(RDLEN, rdlen);
1885 		ew32(RDBAH, (rdba >> 32));
1886 		ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1887 		ew32(RDT, 0);
1888 		ew32(RDH, 0);
1889 		adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ?
1890 					   E1000_RDH : E1000_82542_RDH);
1891 		adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ?
1892 					   E1000_RDT : E1000_82542_RDT);
1893 		break;
1894 	}
1895 
1896 	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
1897 	if (hw->mac_type >= e1000_82543) {
1898 		rxcsum = er32(RXCSUM);
1899 		if (adapter->rx_csum)
1900 			rxcsum |= E1000_RXCSUM_TUOFL;
1901 		else
1902 			/* don't need to clear IPPCSE as it defaults to 0 */
1903 			rxcsum &= ~E1000_RXCSUM_TUOFL;
1904 		ew32(RXCSUM, rxcsum);
1905 	}
1906 
1907 	/* Enable Receives */
1908 	ew32(RCTL, rctl | E1000_RCTL_EN);
1909 }
1910 
1911 /**
1912  * e1000_free_tx_resources - Free Tx Resources per Queue
1913  * @adapter: board private structure
1914  * @tx_ring: Tx descriptor ring for a specific queue
1915  *
1916  * Free all transmit software resources
1917  **/
1918 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1919 				    struct e1000_tx_ring *tx_ring)
1920 {
1921 	struct pci_dev *pdev = adapter->pdev;
1922 
1923 	e1000_clean_tx_ring(adapter, tx_ring);
1924 
1925 	vfree(tx_ring->buffer_info);
1926 	tx_ring->buffer_info = NULL;
1927 
1928 	dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1929 			  tx_ring->dma);
1930 
1931 	tx_ring->desc = NULL;
1932 }
1933 
1934 /**
1935  * e1000_free_all_tx_resources - Free Tx Resources for All Queues
1936  * @adapter: board private structure
1937  *
1938  * Free all transmit software resources
1939  **/
1940 void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1941 {
1942 	int i;
1943 
1944 	for (i = 0; i < adapter->num_tx_queues; i++)
1945 		e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1946 }
1947 
1948 static void
1949 e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1950 				 struct e1000_tx_buffer *buffer_info)
1951 {
1952 	if (buffer_info->dma) {
1953 		if (buffer_info->mapped_as_page)
1954 			dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1955 				       buffer_info->length, DMA_TO_DEVICE);
1956 		else
1957 			dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1958 					 buffer_info->length,
1959 					 DMA_TO_DEVICE);
1960 		buffer_info->dma = 0;
1961 	}
1962 	if (buffer_info->skb) {
1963 		dev_kfree_skb_any(buffer_info->skb);
1964 		buffer_info->skb = NULL;
1965 	}
1966 	buffer_info->time_stamp = 0;
1967 	/* buffer_info must be completely set up in the transmit path */
1968 }
1969 
1970 /**
1971  * e1000_clean_tx_ring - Free Tx Buffers
1972  * @adapter: board private structure
1973  * @tx_ring: ring to be cleaned
1974  **/
1975 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
1976 				struct e1000_tx_ring *tx_ring)
1977 {
1978 	struct e1000_hw *hw = &adapter->hw;
1979 	struct e1000_tx_buffer *buffer_info;
1980 	unsigned long size;
1981 	unsigned int i;
1982 
1983 	/* Free all the Tx ring sk_buffs */
1984 
1985 	for (i = 0; i < tx_ring->count; i++) {
1986 		buffer_info = &tx_ring->buffer_info[i];
1987 		e1000_unmap_and_free_tx_resource(adapter, buffer_info);
1988 	}
1989 
1990 	netdev_reset_queue(adapter->netdev);
1991 	size = sizeof(struct e1000_tx_buffer) * tx_ring->count;
1992 	memset(tx_ring->buffer_info, 0, size);
1993 
1994 	/* Zero out the descriptor ring */
1995 
1996 	memset(tx_ring->desc, 0, tx_ring->size);
1997 
1998 	tx_ring->next_to_use = 0;
1999 	tx_ring->next_to_clean = 0;
2000 	tx_ring->last_tx_tso = false;
2001 
2002 	writel(0, hw->hw_addr + tx_ring->tdh);
2003 	writel(0, hw->hw_addr + tx_ring->tdt);
2004 }
2005 
2006 /**
2007  * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2008  * @adapter: board private structure
2009  **/
2010 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
2011 {
2012 	int i;
2013 
2014 	for (i = 0; i < adapter->num_tx_queues; i++)
2015 		e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2016 }
2017 
2018 /**
2019  * e1000_free_rx_resources - Free Rx Resources
2020  * @adapter: board private structure
2021  * @rx_ring: ring to clean the resources from
2022  *
2023  * Free all receive software resources
2024  **/
2025 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2026 				    struct e1000_rx_ring *rx_ring)
2027 {
2028 	struct pci_dev *pdev = adapter->pdev;
2029 
2030 	e1000_clean_rx_ring(adapter, rx_ring);
2031 
2032 	vfree(rx_ring->buffer_info);
2033 	rx_ring->buffer_info = NULL;
2034 
2035 	dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2036 			  rx_ring->dma);
2037 
2038 	rx_ring->desc = NULL;
2039 }
2040 
2041 /**
2042  * e1000_free_all_rx_resources - Free Rx Resources for All Queues
2043  * @adapter: board private structure
2044  *
2045  * Free all receive software resources
2046  **/
2047 void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2048 {
2049 	int i;
2050 
2051 	for (i = 0; i < adapter->num_rx_queues; i++)
2052 		e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2053 }
2054 
2055 #define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
2056 static unsigned int e1000_frag_len(const struct e1000_adapter *a)
2057 {
2058 	return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) +
2059 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2060 }
2061 
2062 static void *e1000_alloc_frag(const struct e1000_adapter *a)
2063 {
2064 	unsigned int len = e1000_frag_len(a);
2065 	u8 *data = netdev_alloc_frag(len);
2066 
2067 	if (likely(data))
2068 		data += E1000_HEADROOM;
2069 	return data;
2070 }
2071 
2072 /**
2073  * e1000_clean_rx_ring - Free Rx Buffers per Queue
2074  * @adapter: board private structure
2075  * @rx_ring: ring to free buffers from
2076  **/
2077 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2078 				struct e1000_rx_ring *rx_ring)
2079 {
2080 	struct e1000_hw *hw = &adapter->hw;
2081 	struct e1000_rx_buffer *buffer_info;
2082 	struct pci_dev *pdev = adapter->pdev;
2083 	unsigned long size;
2084 	unsigned int i;
2085 
2086 	/* Free all the Rx netfrags */
2087 	for (i = 0; i < rx_ring->count; i++) {
2088 		buffer_info = &rx_ring->buffer_info[i];
2089 		if (adapter->clean_rx == e1000_clean_rx_irq) {
2090 			if (buffer_info->dma)
2091 				dma_unmap_single(&pdev->dev, buffer_info->dma,
2092 						 adapter->rx_buffer_len,
2093 						 DMA_FROM_DEVICE);
2094 			if (buffer_info->rxbuf.data) {
2095 				skb_free_frag(buffer_info->rxbuf.data);
2096 				buffer_info->rxbuf.data = NULL;
2097 			}
2098 		} else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
2099 			if (buffer_info->dma)
2100 				dma_unmap_page(&pdev->dev, buffer_info->dma,
2101 					       adapter->rx_buffer_len,
2102 					       DMA_FROM_DEVICE);
2103 			if (buffer_info->rxbuf.page) {
2104 				put_page(buffer_info->rxbuf.page);
2105 				buffer_info->rxbuf.page = NULL;
2106 			}
2107 		}
2108 
2109 		buffer_info->dma = 0;
2110 	}
2111 
2112 	/* there also may be some cached data from a chained receive */
2113 	napi_free_frags(&adapter->napi);
2114 	rx_ring->rx_skb_top = NULL;
2115 
2116 	size = sizeof(struct e1000_rx_buffer) * rx_ring->count;
2117 	memset(rx_ring->buffer_info, 0, size);
2118 
2119 	/* Zero out the descriptor ring */
2120 	memset(rx_ring->desc, 0, rx_ring->size);
2121 
2122 	rx_ring->next_to_clean = 0;
2123 	rx_ring->next_to_use = 0;
2124 
2125 	writel(0, hw->hw_addr + rx_ring->rdh);
2126 	writel(0, hw->hw_addr + rx_ring->rdt);
2127 }
2128 
2129 /**
2130  * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2131  * @adapter: board private structure
2132  **/
2133 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2134 {
2135 	int i;
2136 
2137 	for (i = 0; i < adapter->num_rx_queues; i++)
2138 		e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2139 }
2140 
2141 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2142  * and memory write and invalidate disabled for certain operations
2143  */
2144 static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
2145 {
2146 	struct e1000_hw *hw = &adapter->hw;
2147 	struct net_device *netdev = adapter->netdev;
2148 	u32 rctl;
2149 
2150 	e1000_pci_clear_mwi(hw);
2151 
2152 	rctl = er32(RCTL);
2153 	rctl |= E1000_RCTL_RST;
2154 	ew32(RCTL, rctl);
2155 	E1000_WRITE_FLUSH();
2156 	mdelay(5);
2157 
2158 	if (netif_running(netdev))
2159 		e1000_clean_all_rx_rings(adapter);
2160 }
2161 
2162 static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
2163 {
2164 	struct e1000_hw *hw = &adapter->hw;
2165 	struct net_device *netdev = adapter->netdev;
2166 	u32 rctl;
2167 
2168 	rctl = er32(RCTL);
2169 	rctl &= ~E1000_RCTL_RST;
2170 	ew32(RCTL, rctl);
2171 	E1000_WRITE_FLUSH();
2172 	mdelay(5);
2173 
2174 	if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2175 		e1000_pci_set_mwi(hw);
2176 
2177 	if (netif_running(netdev)) {
2178 		/* No need to loop, because 82542 supports only 1 queue */
2179 		struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2180 		e1000_configure_rx(adapter);
2181 		adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2182 	}
2183 }
2184 
2185 /**
2186  * e1000_set_mac - Change the Ethernet Address of the NIC
2187  * @netdev: network interface device structure
2188  * @p: pointer to an address structure
2189  *
2190  * Returns 0 on success, negative on failure
2191  **/
2192 static int e1000_set_mac(struct net_device *netdev, void *p)
2193 {
2194 	struct e1000_adapter *adapter = netdev_priv(netdev);
2195 	struct e1000_hw *hw = &adapter->hw;
2196 	struct sockaddr *addr = p;
2197 
2198 	if (!is_valid_ether_addr(addr->sa_data))
2199 		return -EADDRNOTAVAIL;
2200 
2201 	/* 82542 2.0 needs to be in reset to write receive address registers */
2202 
2203 	if (hw->mac_type == e1000_82542_rev2_0)
2204 		e1000_enter_82542_rst(adapter);
2205 
2206 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2207 	memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
2208 
2209 	e1000_rar_set(hw, hw->mac_addr, 0);
2210 
2211 	if (hw->mac_type == e1000_82542_rev2_0)
2212 		e1000_leave_82542_rst(adapter);
2213 
2214 	return 0;
2215 }
2216 
2217 /**
2218  * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2219  * @netdev: network interface device structure
2220  *
2221  * The set_rx_mode entry point is called whenever the unicast or multicast
2222  * address lists or the network interface flags are updated. This routine is
2223  * responsible for configuring the hardware for proper unicast, multicast,
2224  * promiscuous mode, and all-multi behavior.
2225  **/
2226 static void e1000_set_rx_mode(struct net_device *netdev)
2227 {
2228 	struct e1000_adapter *adapter = netdev_priv(netdev);
2229 	struct e1000_hw *hw = &adapter->hw;
2230 	struct netdev_hw_addr *ha;
2231 	bool use_uc = false;
2232 	u32 rctl;
2233 	u32 hash_value;
2234 	int i, rar_entries = E1000_RAR_ENTRIES;
2235 	int mta_reg_count = E1000_NUM_MTA_REGISTERS;
2236 	u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2237 
2238 	if (!mcarray)
2239 		return;
2240 
2241 	/* Check for Promiscuous and All Multicast modes */
2242 
2243 	rctl = er32(RCTL);
2244 
2245 	if (netdev->flags & IFF_PROMISC) {
2246 		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2247 		rctl &= ~E1000_RCTL_VFE;
2248 	} else {
2249 		if (netdev->flags & IFF_ALLMULTI)
2250 			rctl |= E1000_RCTL_MPE;
2251 		else
2252 			rctl &= ~E1000_RCTL_MPE;
2253 		/* Enable VLAN filter if there is a VLAN */
2254 		if (e1000_vlan_used(adapter))
2255 			rctl |= E1000_RCTL_VFE;
2256 	}
2257 
2258 	if (netdev_uc_count(netdev) > rar_entries - 1) {
2259 		rctl |= E1000_RCTL_UPE;
2260 	} else if (!(netdev->flags & IFF_PROMISC)) {
2261 		rctl &= ~E1000_RCTL_UPE;
2262 		use_uc = true;
2263 	}
2264 
2265 	ew32(RCTL, rctl);
2266 
2267 	/* 82542 2.0 needs to be in reset to write receive address registers */
2268 
2269 	if (hw->mac_type == e1000_82542_rev2_0)
2270 		e1000_enter_82542_rst(adapter);
2271 
2272 	/* load the first 14 addresses into the exact filters 1-14. Unicast
2273 	 * addresses take precedence to avoid disabling unicast filtering
2274 	 * when possible.
2275 	 *
2276 	 * RAR 0 is used for the station MAC address
2277 	 * if there are not 14 addresses, go ahead and clear the filters
2278 	 */
2279 	i = 1;
2280 	if (use_uc)
2281 		netdev_for_each_uc_addr(ha, netdev) {
2282 			if (i == rar_entries)
2283 				break;
2284 			e1000_rar_set(hw, ha->addr, i++);
2285 		}
2286 
2287 	netdev_for_each_mc_addr(ha, netdev) {
2288 		if (i == rar_entries) {
2289 			/* load any remaining addresses into the hash table */
2290 			u32 hash_reg, hash_bit, mta;
2291 			hash_value = e1000_hash_mc_addr(hw, ha->addr);
2292 			hash_reg = (hash_value >> 5) & 0x7F;
2293 			hash_bit = hash_value & 0x1F;
2294 			mta = (1 << hash_bit);
2295 			mcarray[hash_reg] |= mta;
2296 		} else {
2297 			e1000_rar_set(hw, ha->addr, i++);
2298 		}
2299 	}
2300 
2301 	for (; i < rar_entries; i++) {
2302 		E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2303 		E1000_WRITE_FLUSH();
2304 		E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2305 		E1000_WRITE_FLUSH();
2306 	}
2307 
2308 	/* write the hash table completely, write from bottom to avoid
2309 	 * both stupid write combining chipsets, and flushing each write
2310 	 */
2311 	for (i = mta_reg_count - 1; i >= 0 ; i--) {
2312 		/* If we are on an 82544 has an errata where writing odd
2313 		 * offsets overwrites the previous even offset, but writing
2314 		 * backwards over the range solves the issue by always
2315 		 * writing the odd offset first
2316 		 */
2317 		E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2318 	}
2319 	E1000_WRITE_FLUSH();
2320 
2321 	if (hw->mac_type == e1000_82542_rev2_0)
2322 		e1000_leave_82542_rst(adapter);
2323 
2324 	kfree(mcarray);
2325 }
2326 
2327 /**
2328  * e1000_update_phy_info_task - get phy info
2329  * @work: work struct contained inside adapter struct
2330  *
2331  * Need to wait a few seconds after link up to get diagnostic information from
2332  * the phy
2333  */
2334 static void e1000_update_phy_info_task(struct work_struct *work)
2335 {
2336 	struct e1000_adapter *adapter = container_of(work,
2337 						     struct e1000_adapter,
2338 						     phy_info_task.work);
2339 
2340 	e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
2341 }
2342 
2343 /**
2344  * e1000_82547_tx_fifo_stall_task - task to complete work
2345  * @work: work struct contained inside adapter struct
2346  **/
2347 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2348 {
2349 	struct e1000_adapter *adapter = container_of(work,
2350 						     struct e1000_adapter,
2351 						     fifo_stall_task.work);
2352 	struct e1000_hw *hw = &adapter->hw;
2353 	struct net_device *netdev = adapter->netdev;
2354 	u32 tctl;
2355 
2356 	if (atomic_read(&adapter->tx_fifo_stall)) {
2357 		if ((er32(TDT) == er32(TDH)) &&
2358 		   (er32(TDFT) == er32(TDFH)) &&
2359 		   (er32(TDFTS) == er32(TDFHS))) {
2360 			tctl = er32(TCTL);
2361 			ew32(TCTL, tctl & ~E1000_TCTL_EN);
2362 			ew32(TDFT, adapter->tx_head_addr);
2363 			ew32(TDFH, adapter->tx_head_addr);
2364 			ew32(TDFTS, adapter->tx_head_addr);
2365 			ew32(TDFHS, adapter->tx_head_addr);
2366 			ew32(TCTL, tctl);
2367 			E1000_WRITE_FLUSH();
2368 
2369 			adapter->tx_fifo_head = 0;
2370 			atomic_set(&adapter->tx_fifo_stall, 0);
2371 			netif_wake_queue(netdev);
2372 		} else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
2373 			schedule_delayed_work(&adapter->fifo_stall_task, 1);
2374 		}
2375 	}
2376 }
2377 
2378 bool e1000_has_link(struct e1000_adapter *adapter)
2379 {
2380 	struct e1000_hw *hw = &adapter->hw;
2381 	bool link_active = false;
2382 
2383 	/* get_link_status is set on LSC (link status) interrupt or rx
2384 	 * sequence error interrupt (except on intel ce4100).
2385 	 * get_link_status will stay false until the
2386 	 * e1000_check_for_link establishes link for copper adapters
2387 	 * ONLY
2388 	 */
2389 	switch (hw->media_type) {
2390 	case e1000_media_type_copper:
2391 		if (hw->mac_type == e1000_ce4100)
2392 			hw->get_link_status = 1;
2393 		if (hw->get_link_status) {
2394 			e1000_check_for_link(hw);
2395 			link_active = !hw->get_link_status;
2396 		} else {
2397 			link_active = true;
2398 		}
2399 		break;
2400 	case e1000_media_type_fiber:
2401 		e1000_check_for_link(hw);
2402 		link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2403 		break;
2404 	case e1000_media_type_internal_serdes:
2405 		e1000_check_for_link(hw);
2406 		link_active = hw->serdes_has_link;
2407 		break;
2408 	default:
2409 		break;
2410 	}
2411 
2412 	return link_active;
2413 }
2414 
2415 /**
2416  * e1000_watchdog - work function
2417  * @work: work struct contained inside adapter struct
2418  **/
2419 static void e1000_watchdog(struct work_struct *work)
2420 {
2421 	struct e1000_adapter *adapter = container_of(work,
2422 						     struct e1000_adapter,
2423 						     watchdog_task.work);
2424 	struct e1000_hw *hw = &adapter->hw;
2425 	struct net_device *netdev = adapter->netdev;
2426 	struct e1000_tx_ring *txdr = adapter->tx_ring;
2427 	u32 link, tctl;
2428 
2429 	link = e1000_has_link(adapter);
2430 	if ((netif_carrier_ok(netdev)) && link)
2431 		goto link_up;
2432 
2433 	if (link) {
2434 		if (!netif_carrier_ok(netdev)) {
2435 			u32 ctrl;
2436 			/* update snapshot of PHY registers on LSC */
2437 			e1000_get_speed_and_duplex(hw,
2438 						   &adapter->link_speed,
2439 						   &adapter->link_duplex);
2440 
2441 			ctrl = er32(CTRL);
2442 			pr_info("%s NIC Link is Up %d Mbps %s, "
2443 				"Flow Control: %s\n",
2444 				netdev->name,
2445 				adapter->link_speed,
2446 				adapter->link_duplex == FULL_DUPLEX ?
2447 				"Full Duplex" : "Half Duplex",
2448 				((ctrl & E1000_CTRL_TFCE) && (ctrl &
2449 				E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2450 				E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2451 				E1000_CTRL_TFCE) ? "TX" : "None")));
2452 
2453 			/* adjust timeout factor according to speed/duplex */
2454 			adapter->tx_timeout_factor = 1;
2455 			switch (adapter->link_speed) {
2456 			case SPEED_10:
2457 				adapter->tx_timeout_factor = 16;
2458 				break;
2459 			case SPEED_100:
2460 				/* maybe add some timeout factor ? */
2461 				break;
2462 			}
2463 
2464 			/* enable transmits in the hardware */
2465 			tctl = er32(TCTL);
2466 			tctl |= E1000_TCTL_EN;
2467 			ew32(TCTL, tctl);
2468 
2469 			netif_carrier_on(netdev);
2470 			if (!test_bit(__E1000_DOWN, &adapter->flags))
2471 				schedule_delayed_work(&adapter->phy_info_task,
2472 						      2 * HZ);
2473 			adapter->smartspeed = 0;
2474 		}
2475 	} else {
2476 		if (netif_carrier_ok(netdev)) {
2477 			adapter->link_speed = 0;
2478 			adapter->link_duplex = 0;
2479 			pr_info("%s NIC Link is Down\n",
2480 				netdev->name);
2481 			netif_carrier_off(netdev);
2482 
2483 			if (!test_bit(__E1000_DOWN, &adapter->flags))
2484 				schedule_delayed_work(&adapter->phy_info_task,
2485 						      2 * HZ);
2486 		}
2487 
2488 		e1000_smartspeed(adapter);
2489 	}
2490 
2491 link_up:
2492 	e1000_update_stats(adapter);
2493 
2494 	hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2495 	adapter->tpt_old = adapter->stats.tpt;
2496 	hw->collision_delta = adapter->stats.colc - adapter->colc_old;
2497 	adapter->colc_old = adapter->stats.colc;
2498 
2499 	adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2500 	adapter->gorcl_old = adapter->stats.gorcl;
2501 	adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2502 	adapter->gotcl_old = adapter->stats.gotcl;
2503 
2504 	e1000_update_adaptive(hw);
2505 
2506 	if (!netif_carrier_ok(netdev)) {
2507 		if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2508 			/* We've lost link, so the controller stops DMA,
2509 			 * but we've got queued Tx work that's never going
2510 			 * to get done, so reset controller to flush Tx.
2511 			 * (Do the reset outside of interrupt context).
2512 			 */
2513 			adapter->tx_timeout_count++;
2514 			schedule_work(&adapter->reset_task);
2515 			/* exit immediately since reset is imminent */
2516 			return;
2517 		}
2518 	}
2519 
2520 	/* Simple mode for Interrupt Throttle Rate (ITR) */
2521 	if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2522 		/* Symmetric Tx/Rx gets a reduced ITR=2000;
2523 		 * Total asymmetrical Tx or Rx gets ITR=8000;
2524 		 * everyone else is between 2000-8000.
2525 		 */
2526 		u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2527 		u32 dif = (adapter->gotcl > adapter->gorcl ?
2528 			    adapter->gotcl - adapter->gorcl :
2529 			    adapter->gorcl - adapter->gotcl) / 10000;
2530 		u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2531 
2532 		ew32(ITR, 1000000000 / (itr * 256));
2533 	}
2534 
2535 	/* Cause software interrupt to ensure rx ring is cleaned */
2536 	ew32(ICS, E1000_ICS_RXDMT0);
2537 
2538 	/* Force detection of hung controller every watchdog period */
2539 	adapter->detect_tx_hung = true;
2540 
2541 	/* Reschedule the task */
2542 	if (!test_bit(__E1000_DOWN, &adapter->flags))
2543 		schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
2544 }
2545 
2546 enum latency_range {
2547 	lowest_latency = 0,
2548 	low_latency = 1,
2549 	bulk_latency = 2,
2550 	latency_invalid = 255
2551 };
2552 
2553 /**
2554  * e1000_update_itr - update the dynamic ITR value based on statistics
2555  * @adapter: pointer to adapter
2556  * @itr_setting: current adapter->itr
2557  * @packets: the number of packets during this measurement interval
2558  * @bytes: the number of bytes during this measurement interval
2559  *
2560  *      Stores a new ITR value based on packets and byte
2561  *      counts during the last interrupt.  The advantage of per interrupt
2562  *      computation is faster updates and more accurate ITR for the current
2563  *      traffic pattern.  Constants in this function were computed
2564  *      based on theoretical maximum wire speed and thresholds were set based
2565  *      on testing data as well as attempting to minimize response time
2566  *      while increasing bulk throughput.
2567  *      this functionality is controlled by the InterruptThrottleRate module
2568  *      parameter (see e1000_param.c)
2569  **/
2570 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2571 				     u16 itr_setting, int packets, int bytes)
2572 {
2573 	unsigned int retval = itr_setting;
2574 	struct e1000_hw *hw = &adapter->hw;
2575 
2576 	if (unlikely(hw->mac_type < e1000_82540))
2577 		goto update_itr_done;
2578 
2579 	if (packets == 0)
2580 		goto update_itr_done;
2581 
2582 	switch (itr_setting) {
2583 	case lowest_latency:
2584 		/* jumbo frames get bulk treatment*/
2585 		if (bytes/packets > 8000)
2586 			retval = bulk_latency;
2587 		else if ((packets < 5) && (bytes > 512))
2588 			retval = low_latency;
2589 		break;
2590 	case low_latency:  /* 50 usec aka 20000 ints/s */
2591 		if (bytes > 10000) {
2592 			/* jumbo frames need bulk latency setting */
2593 			if (bytes/packets > 8000)
2594 				retval = bulk_latency;
2595 			else if ((packets < 10) || ((bytes/packets) > 1200))
2596 				retval = bulk_latency;
2597 			else if ((packets > 35))
2598 				retval = lowest_latency;
2599 		} else if (bytes/packets > 2000)
2600 			retval = bulk_latency;
2601 		else if (packets <= 2 && bytes < 512)
2602 			retval = lowest_latency;
2603 		break;
2604 	case bulk_latency: /* 250 usec aka 4000 ints/s */
2605 		if (bytes > 25000) {
2606 			if (packets > 35)
2607 				retval = low_latency;
2608 		} else if (bytes < 6000) {
2609 			retval = low_latency;
2610 		}
2611 		break;
2612 	}
2613 
2614 update_itr_done:
2615 	return retval;
2616 }
2617 
2618 static void e1000_set_itr(struct e1000_adapter *adapter)
2619 {
2620 	struct e1000_hw *hw = &adapter->hw;
2621 	u16 current_itr;
2622 	u32 new_itr = adapter->itr;
2623 
2624 	if (unlikely(hw->mac_type < e1000_82540))
2625 		return;
2626 
2627 	/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2628 	if (unlikely(adapter->link_speed != SPEED_1000)) {
2629 		current_itr = 0;
2630 		new_itr = 4000;
2631 		goto set_itr_now;
2632 	}
2633 
2634 	adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr,
2635 					   adapter->total_tx_packets,
2636 					   adapter->total_tx_bytes);
2637 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
2638 	if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2639 		adapter->tx_itr = low_latency;
2640 
2641 	adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr,
2642 					   adapter->total_rx_packets,
2643 					   adapter->total_rx_bytes);
2644 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
2645 	if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2646 		adapter->rx_itr = low_latency;
2647 
2648 	current_itr = max(adapter->rx_itr, adapter->tx_itr);
2649 
2650 	switch (current_itr) {
2651 	/* counts and packets in update_itr are dependent on these numbers */
2652 	case lowest_latency:
2653 		new_itr = 70000;
2654 		break;
2655 	case low_latency:
2656 		new_itr = 20000; /* aka hwitr = ~200 */
2657 		break;
2658 	case bulk_latency:
2659 		new_itr = 4000;
2660 		break;
2661 	default:
2662 		break;
2663 	}
2664 
2665 set_itr_now:
2666 	if (new_itr != adapter->itr) {
2667 		/* this attempts to bias the interrupt rate towards Bulk
2668 		 * by adding intermediate steps when interrupt rate is
2669 		 * increasing
2670 		 */
2671 		new_itr = new_itr > adapter->itr ?
2672 			  min(adapter->itr + (new_itr >> 2), new_itr) :
2673 			  new_itr;
2674 		adapter->itr = new_itr;
2675 		ew32(ITR, 1000000000 / (new_itr * 256));
2676 	}
2677 }
2678 
2679 #define E1000_TX_FLAGS_CSUM		0x00000001
2680 #define E1000_TX_FLAGS_VLAN		0x00000002
2681 #define E1000_TX_FLAGS_TSO		0x00000004
2682 #define E1000_TX_FLAGS_IPV4		0x00000008
2683 #define E1000_TX_FLAGS_NO_FCS		0x00000010
2684 #define E1000_TX_FLAGS_VLAN_MASK	0xffff0000
2685 #define E1000_TX_FLAGS_VLAN_SHIFT	16
2686 
2687 static int e1000_tso(struct e1000_adapter *adapter,
2688 		     struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2689 		     __be16 protocol)
2690 {
2691 	struct e1000_context_desc *context_desc;
2692 	struct e1000_tx_buffer *buffer_info;
2693 	unsigned int i;
2694 	u32 cmd_length = 0;
2695 	u16 ipcse = 0, tucse, mss;
2696 	u8 ipcss, ipcso, tucss, tucso, hdr_len;
2697 
2698 	if (skb_is_gso(skb)) {
2699 		int err;
2700 
2701 		err = skb_cow_head(skb, 0);
2702 		if (err < 0)
2703 			return err;
2704 
2705 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2706 		mss = skb_shinfo(skb)->gso_size;
2707 		if (protocol == htons(ETH_P_IP)) {
2708 			struct iphdr *iph = ip_hdr(skb);
2709 			iph->tot_len = 0;
2710 			iph->check = 0;
2711 			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2712 								 iph->daddr, 0,
2713 								 IPPROTO_TCP,
2714 								 0);
2715 			cmd_length = E1000_TXD_CMD_IP;
2716 			ipcse = skb_transport_offset(skb) - 1;
2717 		} else if (skb_is_gso_v6(skb)) {
2718 			tcp_v6_gso_csum_prep(skb);
2719 			ipcse = 0;
2720 		}
2721 		ipcss = skb_network_offset(skb);
2722 		ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
2723 		tucss = skb_transport_offset(skb);
2724 		tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
2725 		tucse = 0;
2726 
2727 		cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2728 			       E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
2729 
2730 		i = tx_ring->next_to_use;
2731 		context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2732 		buffer_info = &tx_ring->buffer_info[i];
2733 
2734 		context_desc->lower_setup.ip_fields.ipcss  = ipcss;
2735 		context_desc->lower_setup.ip_fields.ipcso  = ipcso;
2736 		context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
2737 		context_desc->upper_setup.tcp_fields.tucss = tucss;
2738 		context_desc->upper_setup.tcp_fields.tucso = tucso;
2739 		context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2740 		context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
2741 		context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2742 		context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2743 
2744 		buffer_info->time_stamp = jiffies;
2745 		buffer_info->next_to_watch = i;
2746 
2747 		if (++i == tx_ring->count)
2748 			i = 0;
2749 
2750 		tx_ring->next_to_use = i;
2751 
2752 		return true;
2753 	}
2754 	return false;
2755 }
2756 
2757 static bool e1000_tx_csum(struct e1000_adapter *adapter,
2758 			  struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2759 			  __be16 protocol)
2760 {
2761 	struct e1000_context_desc *context_desc;
2762 	struct e1000_tx_buffer *buffer_info;
2763 	unsigned int i;
2764 	u8 css;
2765 	u32 cmd_len = E1000_TXD_CMD_DEXT;
2766 
2767 	if (skb->ip_summed != CHECKSUM_PARTIAL)
2768 		return false;
2769 
2770 	switch (protocol) {
2771 	case cpu_to_be16(ETH_P_IP):
2772 		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2773 			cmd_len |= E1000_TXD_CMD_TCP;
2774 		break;
2775 	case cpu_to_be16(ETH_P_IPV6):
2776 		/* XXX not handling all IPV6 headers */
2777 		if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2778 			cmd_len |= E1000_TXD_CMD_TCP;
2779 		break;
2780 	default:
2781 		if (unlikely(net_ratelimit()))
2782 			e_warn(drv, "checksum_partial proto=%x!\n",
2783 			       skb->protocol);
2784 		break;
2785 	}
2786 
2787 	css = skb_checksum_start_offset(skb);
2788 
2789 	i = tx_ring->next_to_use;
2790 	buffer_info = &tx_ring->buffer_info[i];
2791 	context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2792 
2793 	context_desc->lower_setup.ip_config = 0;
2794 	context_desc->upper_setup.tcp_fields.tucss = css;
2795 	context_desc->upper_setup.tcp_fields.tucso =
2796 		css + skb->csum_offset;
2797 	context_desc->upper_setup.tcp_fields.tucse = 0;
2798 	context_desc->tcp_seg_setup.data = 0;
2799 	context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2800 
2801 	buffer_info->time_stamp = jiffies;
2802 	buffer_info->next_to_watch = i;
2803 
2804 	if (unlikely(++i == tx_ring->count))
2805 		i = 0;
2806 
2807 	tx_ring->next_to_use = i;
2808 
2809 	return true;
2810 }
2811 
2812 #define E1000_MAX_TXD_PWR	12
2813 #define E1000_MAX_DATA_PER_TXD	(1<<E1000_MAX_TXD_PWR)
2814 
2815 static int e1000_tx_map(struct e1000_adapter *adapter,
2816 			struct e1000_tx_ring *tx_ring,
2817 			struct sk_buff *skb, unsigned int first,
2818 			unsigned int max_per_txd, unsigned int nr_frags,
2819 			unsigned int mss)
2820 {
2821 	struct e1000_hw *hw = &adapter->hw;
2822 	struct pci_dev *pdev = adapter->pdev;
2823 	struct e1000_tx_buffer *buffer_info;
2824 	unsigned int len = skb_headlen(skb);
2825 	unsigned int offset = 0, size, count = 0, i;
2826 	unsigned int f, bytecount, segs;
2827 
2828 	i = tx_ring->next_to_use;
2829 
2830 	while (len) {
2831 		buffer_info = &tx_ring->buffer_info[i];
2832 		size = min(len, max_per_txd);
2833 		/* Workaround for Controller erratum --
2834 		 * descriptor for non-tso packet in a linear SKB that follows a
2835 		 * tso gets written back prematurely before the data is fully
2836 		 * DMA'd to the controller
2837 		 */
2838 		if (!skb->data_len && tx_ring->last_tx_tso &&
2839 		    !skb_is_gso(skb)) {
2840 			tx_ring->last_tx_tso = false;
2841 			size -= 4;
2842 		}
2843 
2844 		/* Workaround for premature desc write-backs
2845 		 * in TSO mode.  Append 4-byte sentinel desc
2846 		 */
2847 		if (unlikely(mss && !nr_frags && size == len && size > 8))
2848 			size -= 4;
2849 		/* work-around for errata 10 and it applies
2850 		 * to all controllers in PCI-X mode
2851 		 * The fix is to make sure that the first descriptor of a
2852 		 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2853 		 */
2854 		if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
2855 			     (size > 2015) && count == 0))
2856 			size = 2015;
2857 
2858 		/* Workaround for potential 82544 hang in PCI-X.  Avoid
2859 		 * terminating buffers within evenly-aligned dwords.
2860 		 */
2861 		if (unlikely(adapter->pcix_82544 &&
2862 		   !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2863 		   size > 4))
2864 			size -= 4;
2865 
2866 		buffer_info->length = size;
2867 		/* set time_stamp *before* dma to help avoid a possible race */
2868 		buffer_info->time_stamp = jiffies;
2869 		buffer_info->mapped_as_page = false;
2870 		buffer_info->dma = dma_map_single(&pdev->dev,
2871 						  skb->data + offset,
2872 						  size, DMA_TO_DEVICE);
2873 		if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2874 			goto dma_error;
2875 		buffer_info->next_to_watch = i;
2876 
2877 		len -= size;
2878 		offset += size;
2879 		count++;
2880 		if (len) {
2881 			i++;
2882 			if (unlikely(i == tx_ring->count))
2883 				i = 0;
2884 		}
2885 	}
2886 
2887 	for (f = 0; f < nr_frags; f++) {
2888 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
2889 
2890 		len = skb_frag_size(frag);
2891 		offset = 0;
2892 
2893 		while (len) {
2894 			unsigned long bufend;
2895 			i++;
2896 			if (unlikely(i == tx_ring->count))
2897 				i = 0;
2898 
2899 			buffer_info = &tx_ring->buffer_info[i];
2900 			size = min(len, max_per_txd);
2901 			/* Workaround for premature desc write-backs
2902 			 * in TSO mode.  Append 4-byte sentinel desc
2903 			 */
2904 			if (unlikely(mss && f == (nr_frags-1) &&
2905 			    size == len && size > 8))
2906 				size -= 4;
2907 			/* Workaround for potential 82544 hang in PCI-X.
2908 			 * Avoid terminating buffers within evenly-aligned
2909 			 * dwords.
2910 			 */
2911 			bufend = (unsigned long)
2912 				page_to_phys(skb_frag_page(frag));
2913 			bufend += offset + size - 1;
2914 			if (unlikely(adapter->pcix_82544 &&
2915 				     !(bufend & 4) &&
2916 				     size > 4))
2917 				size -= 4;
2918 
2919 			buffer_info->length = size;
2920 			buffer_info->time_stamp = jiffies;
2921 			buffer_info->mapped_as_page = true;
2922 			buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
2923 						offset, size, DMA_TO_DEVICE);
2924 			if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2925 				goto dma_error;
2926 			buffer_info->next_to_watch = i;
2927 
2928 			len -= size;
2929 			offset += size;
2930 			count++;
2931 		}
2932 	}
2933 
2934 	segs = skb_shinfo(skb)->gso_segs ?: 1;
2935 	/* multiply data chunks by size of headers */
2936 	bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
2937 
2938 	tx_ring->buffer_info[i].skb = skb;
2939 	tx_ring->buffer_info[i].segs = segs;
2940 	tx_ring->buffer_info[i].bytecount = bytecount;
2941 	tx_ring->buffer_info[first].next_to_watch = i;
2942 
2943 	return count;
2944 
2945 dma_error:
2946 	dev_err(&pdev->dev, "TX DMA map failed\n");
2947 	buffer_info->dma = 0;
2948 	if (count)
2949 		count--;
2950 
2951 	while (count--) {
2952 		if (i == 0)
2953 			i += tx_ring->count;
2954 		i--;
2955 		buffer_info = &tx_ring->buffer_info[i];
2956 		e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2957 	}
2958 
2959 	return 0;
2960 }
2961 
2962 static void e1000_tx_queue(struct e1000_adapter *adapter,
2963 			   struct e1000_tx_ring *tx_ring, int tx_flags,
2964 			   int count)
2965 {
2966 	struct e1000_tx_desc *tx_desc = NULL;
2967 	struct e1000_tx_buffer *buffer_info;
2968 	u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
2969 	unsigned int i;
2970 
2971 	if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
2972 		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
2973 			     E1000_TXD_CMD_TSE;
2974 		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2975 
2976 		if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
2977 			txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2978 	}
2979 
2980 	if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
2981 		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2982 		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2983 	}
2984 
2985 	if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
2986 		txd_lower |= E1000_TXD_CMD_VLE;
2987 		txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
2988 	}
2989 
2990 	if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
2991 		txd_lower &= ~(E1000_TXD_CMD_IFCS);
2992 
2993 	i = tx_ring->next_to_use;
2994 
2995 	while (count--) {
2996 		buffer_info = &tx_ring->buffer_info[i];
2997 		tx_desc = E1000_TX_DESC(*tx_ring, i);
2998 		tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
2999 		tx_desc->lower.data =
3000 			cpu_to_le32(txd_lower | buffer_info->length);
3001 		tx_desc->upper.data = cpu_to_le32(txd_upper);
3002 		if (unlikely(++i == tx_ring->count))
3003 			i = 0;
3004 	}
3005 
3006 	tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3007 
3008 	/* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
3009 	if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3010 		tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
3011 
3012 	/* Force memory writes to complete before letting h/w
3013 	 * know there are new descriptors to fetch.  (Only
3014 	 * applicable for weak-ordered memory model archs,
3015 	 * such as IA-64).
3016 	 */
3017 	dma_wmb();
3018 
3019 	tx_ring->next_to_use = i;
3020 }
3021 
3022 /* 82547 workaround to avoid controller hang in half-duplex environment.
3023  * The workaround is to avoid queuing a large packet that would span
3024  * the internal Tx FIFO ring boundary by notifying the stack to resend
3025  * the packet at a later time.  This gives the Tx FIFO an opportunity to
3026  * flush all packets.  When that occurs, we reset the Tx FIFO pointers
3027  * to the beginning of the Tx FIFO.
3028  */
3029 
3030 #define E1000_FIFO_HDR			0x10
3031 #define E1000_82547_PAD_LEN		0x3E0
3032 
3033 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3034 				       struct sk_buff *skb)
3035 {
3036 	u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3037 	u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
3038 
3039 	skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
3040 
3041 	if (adapter->link_duplex != HALF_DUPLEX)
3042 		goto no_fifo_stall_required;
3043 
3044 	if (atomic_read(&adapter->tx_fifo_stall))
3045 		return 1;
3046 
3047 	if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
3048 		atomic_set(&adapter->tx_fifo_stall, 1);
3049 		return 1;
3050 	}
3051 
3052 no_fifo_stall_required:
3053 	adapter->tx_fifo_head += skb_fifo_len;
3054 	if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
3055 		adapter->tx_fifo_head -= adapter->tx_fifo_size;
3056 	return 0;
3057 }
3058 
3059 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3060 {
3061 	struct e1000_adapter *adapter = netdev_priv(netdev);
3062 	struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3063 
3064 	netif_stop_queue(netdev);
3065 	/* Herbert's original patch had:
3066 	 *  smp_mb__after_netif_stop_queue();
3067 	 * but since that doesn't exist yet, just open code it.
3068 	 */
3069 	smp_mb();
3070 
3071 	/* We need to check again in a case another CPU has just
3072 	 * made room available.
3073 	 */
3074 	if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3075 		return -EBUSY;
3076 
3077 	/* A reprieve! */
3078 	netif_start_queue(netdev);
3079 	++adapter->restart_queue;
3080 	return 0;
3081 }
3082 
3083 static int e1000_maybe_stop_tx(struct net_device *netdev,
3084 			       struct e1000_tx_ring *tx_ring, int size)
3085 {
3086 	if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3087 		return 0;
3088 	return __e1000_maybe_stop_tx(netdev, size);
3089 }
3090 
3091 #define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X))
3092 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3093 				    struct net_device *netdev)
3094 {
3095 	struct e1000_adapter *adapter = netdev_priv(netdev);
3096 	struct e1000_hw *hw = &adapter->hw;
3097 	struct e1000_tx_ring *tx_ring;
3098 	unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3099 	unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3100 	unsigned int tx_flags = 0;
3101 	unsigned int len = skb_headlen(skb);
3102 	unsigned int nr_frags;
3103 	unsigned int mss;
3104 	int count = 0;
3105 	int tso;
3106 	unsigned int f;
3107 	__be16 protocol = vlan_get_protocol(skb);
3108 
3109 	/* This goes back to the question of how to logically map a Tx queue
3110 	 * to a flow.  Right now, performance is impacted slightly negatively
3111 	 * if using multiple Tx queues.  If the stack breaks away from a
3112 	 * single qdisc implementation, we can look at this again.
3113 	 */
3114 	tx_ring = adapter->tx_ring;
3115 
3116 	/* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN,
3117 	 * packets may get corrupted during padding by HW.
3118 	 * To WA this issue, pad all small packets manually.
3119 	 */
3120 	if (eth_skb_pad(skb))
3121 		return NETDEV_TX_OK;
3122 
3123 	mss = skb_shinfo(skb)->gso_size;
3124 	/* The controller does a simple calculation to
3125 	 * make sure there is enough room in the FIFO before
3126 	 * initiating the DMA for each buffer.  The calc is:
3127 	 * 4 = ceil(buffer len/mss).  To make sure we don't
3128 	 * overrun the FIFO, adjust the max buffer len if mss
3129 	 * drops.
3130 	 */
3131 	if (mss) {
3132 		u8 hdr_len;
3133 		max_per_txd = min(mss << 2, max_per_txd);
3134 		max_txd_pwr = fls(max_per_txd) - 1;
3135 
3136 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3137 		if (skb->data_len && hdr_len == len) {
3138 			switch (hw->mac_type) {
3139 			case e1000_82544: {
3140 				unsigned int pull_size;
3141 
3142 				/* Make sure we have room to chop off 4 bytes,
3143 				 * and that the end alignment will work out to
3144 				 * this hardware's requirements
3145 				 * NOTE: this is a TSO only workaround
3146 				 * if end byte alignment not correct move us
3147 				 * into the next dword
3148 				 */
3149 				if ((unsigned long)(skb_tail_pointer(skb) - 1)
3150 				    & 4)
3151 					break;
3152 				/* fall through */
3153 				pull_size = min((unsigned int)4, skb->data_len);
3154 				if (!__pskb_pull_tail(skb, pull_size)) {
3155 					e_err(drv, "__pskb_pull_tail "
3156 					      "failed.\n");
3157 					dev_kfree_skb_any(skb);
3158 					return NETDEV_TX_OK;
3159 				}
3160 				len = skb_headlen(skb);
3161 				break;
3162 			}
3163 			default:
3164 				/* do nothing */
3165 				break;
3166 			}
3167 		}
3168 	}
3169 
3170 	/* reserve a descriptor for the offload context */
3171 	if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3172 		count++;
3173 	count++;
3174 
3175 	/* Controller Erratum workaround */
3176 	if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3177 		count++;
3178 
3179 	count += TXD_USE_COUNT(len, max_txd_pwr);
3180 
3181 	if (adapter->pcix_82544)
3182 		count++;
3183 
3184 	/* work-around for errata 10 and it applies to all controllers
3185 	 * in PCI-X mode, so add one more descriptor to the count
3186 	 */
3187 	if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
3188 			(len > 2015)))
3189 		count++;
3190 
3191 	nr_frags = skb_shinfo(skb)->nr_frags;
3192 	for (f = 0; f < nr_frags; f++)
3193 		count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
3194 				       max_txd_pwr);
3195 	if (adapter->pcix_82544)
3196 		count += nr_frags;
3197 
3198 	/* need: count + 2 desc gap to keep tail from touching
3199 	 * head, otherwise try next time
3200 	 */
3201 	if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
3202 		return NETDEV_TX_BUSY;
3203 
3204 	if (unlikely((hw->mac_type == e1000_82547) &&
3205 		     (e1000_82547_fifo_workaround(adapter, skb)))) {
3206 		netif_stop_queue(netdev);
3207 		if (!test_bit(__E1000_DOWN, &adapter->flags))
3208 			schedule_delayed_work(&adapter->fifo_stall_task, 1);
3209 		return NETDEV_TX_BUSY;
3210 	}
3211 
3212 	if (skb_vlan_tag_present(skb)) {
3213 		tx_flags |= E1000_TX_FLAGS_VLAN;
3214 		tx_flags |= (skb_vlan_tag_get(skb) <<
3215 			     E1000_TX_FLAGS_VLAN_SHIFT);
3216 	}
3217 
3218 	first = tx_ring->next_to_use;
3219 
3220 	tso = e1000_tso(adapter, tx_ring, skb, protocol);
3221 	if (tso < 0) {
3222 		dev_kfree_skb_any(skb);
3223 		return NETDEV_TX_OK;
3224 	}
3225 
3226 	if (likely(tso)) {
3227 		if (likely(hw->mac_type != e1000_82544))
3228 			tx_ring->last_tx_tso = true;
3229 		tx_flags |= E1000_TX_FLAGS_TSO;
3230 	} else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol)))
3231 		tx_flags |= E1000_TX_FLAGS_CSUM;
3232 
3233 	if (protocol == htons(ETH_P_IP))
3234 		tx_flags |= E1000_TX_FLAGS_IPV4;
3235 
3236 	if (unlikely(skb->no_fcs))
3237 		tx_flags |= E1000_TX_FLAGS_NO_FCS;
3238 
3239 	count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3240 			     nr_frags, mss);
3241 
3242 	if (count) {
3243 		/* The descriptors needed is higher than other Intel drivers
3244 		 * due to a number of workarounds.  The breakdown is below:
3245 		 * Data descriptors: MAX_SKB_FRAGS + 1
3246 		 * Context Descriptor: 1
3247 		 * Keep head from touching tail: 2
3248 		 * Workarounds: 3
3249 		 */
3250 		int desc_needed = MAX_SKB_FRAGS + 7;
3251 
3252 		netdev_sent_queue(netdev, skb->len);
3253 		skb_tx_timestamp(skb);
3254 
3255 		e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3256 
3257 		/* 82544 potentially requires twice as many data descriptors
3258 		 * in order to guarantee buffers don't end on evenly-aligned
3259 		 * dwords
3260 		 */
3261 		if (adapter->pcix_82544)
3262 			desc_needed += MAX_SKB_FRAGS + 1;
3263 
3264 		/* Make sure there is space in the ring for the next send. */
3265 		e1000_maybe_stop_tx(netdev, tx_ring, desc_needed);
3266 
3267 		if (!netdev_xmit_more() ||
3268 		    netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
3269 			writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt);
3270 		}
3271 	} else {
3272 		dev_kfree_skb_any(skb);
3273 		tx_ring->buffer_info[first].time_stamp = 0;
3274 		tx_ring->next_to_use = first;
3275 	}
3276 
3277 	return NETDEV_TX_OK;
3278 }
3279 
3280 #define NUM_REGS 38 /* 1 based count */
3281 static void e1000_regdump(struct e1000_adapter *adapter)
3282 {
3283 	struct e1000_hw *hw = &adapter->hw;
3284 	u32 regs[NUM_REGS];
3285 	u32 *regs_buff = regs;
3286 	int i = 0;
3287 
3288 	static const char * const reg_name[] = {
3289 		"CTRL",  "STATUS",
3290 		"RCTL", "RDLEN", "RDH", "RDT", "RDTR",
3291 		"TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
3292 		"TIDV", "TXDCTL", "TADV", "TARC0",
3293 		"TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
3294 		"TXDCTL1", "TARC1",
3295 		"CTRL_EXT", "ERT", "RDBAL", "RDBAH",
3296 		"TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
3297 		"RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
3298 	};
3299 
3300 	regs_buff[0]  = er32(CTRL);
3301 	regs_buff[1]  = er32(STATUS);
3302 
3303 	regs_buff[2]  = er32(RCTL);
3304 	regs_buff[3]  = er32(RDLEN);
3305 	regs_buff[4]  = er32(RDH);
3306 	regs_buff[5]  = er32(RDT);
3307 	regs_buff[6]  = er32(RDTR);
3308 
3309 	regs_buff[7]  = er32(TCTL);
3310 	regs_buff[8]  = er32(TDBAL);
3311 	regs_buff[9]  = er32(TDBAH);
3312 	regs_buff[10] = er32(TDLEN);
3313 	regs_buff[11] = er32(TDH);
3314 	regs_buff[12] = er32(TDT);
3315 	regs_buff[13] = er32(TIDV);
3316 	regs_buff[14] = er32(TXDCTL);
3317 	regs_buff[15] = er32(TADV);
3318 	regs_buff[16] = er32(TARC0);
3319 
3320 	regs_buff[17] = er32(TDBAL1);
3321 	regs_buff[18] = er32(TDBAH1);
3322 	regs_buff[19] = er32(TDLEN1);
3323 	regs_buff[20] = er32(TDH1);
3324 	regs_buff[21] = er32(TDT1);
3325 	regs_buff[22] = er32(TXDCTL1);
3326 	regs_buff[23] = er32(TARC1);
3327 	regs_buff[24] = er32(CTRL_EXT);
3328 	regs_buff[25] = er32(ERT);
3329 	regs_buff[26] = er32(RDBAL0);
3330 	regs_buff[27] = er32(RDBAH0);
3331 	regs_buff[28] = er32(TDFH);
3332 	regs_buff[29] = er32(TDFT);
3333 	regs_buff[30] = er32(TDFHS);
3334 	regs_buff[31] = er32(TDFTS);
3335 	regs_buff[32] = er32(TDFPC);
3336 	regs_buff[33] = er32(RDFH);
3337 	regs_buff[34] = er32(RDFT);
3338 	regs_buff[35] = er32(RDFHS);
3339 	regs_buff[36] = er32(RDFTS);
3340 	regs_buff[37] = er32(RDFPC);
3341 
3342 	pr_info("Register dump\n");
3343 	for (i = 0; i < NUM_REGS; i++)
3344 		pr_info("%-15s  %08x\n", reg_name[i], regs_buff[i]);
3345 }
3346 
3347 /*
3348  * e1000_dump: Print registers, tx ring and rx ring
3349  */
3350 static void e1000_dump(struct e1000_adapter *adapter)
3351 {
3352 	/* this code doesn't handle multiple rings */
3353 	struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3354 	struct e1000_rx_ring *rx_ring = adapter->rx_ring;
3355 	int i;
3356 
3357 	if (!netif_msg_hw(adapter))
3358 		return;
3359 
3360 	/* Print Registers */
3361 	e1000_regdump(adapter);
3362 
3363 	/* transmit dump */
3364 	pr_info("TX Desc ring0 dump\n");
3365 
3366 	/* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
3367 	 *
3368 	 * Legacy Transmit Descriptor
3369 	 *   +--------------------------------------------------------------+
3370 	 * 0 |         Buffer Address [63:0] (Reserved on Write Back)       |
3371 	 *   +--------------------------------------------------------------+
3372 	 * 8 | Special  |    CSS     | Status |  CMD    |  CSO   |  Length  |
3373 	 *   +--------------------------------------------------------------+
3374 	 *   63       48 47        36 35    32 31     24 23    16 15        0
3375 	 *
3376 	 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
3377 	 *   63      48 47    40 39       32 31             16 15    8 7      0
3378 	 *   +----------------------------------------------------------------+
3379 	 * 0 |  TUCSE  | TUCS0  |   TUCSS   |     IPCSE       | IPCS0 | IPCSS |
3380 	 *   +----------------------------------------------------------------+
3381 	 * 8 |   MSS   | HDRLEN | RSV | STA | TUCMD | DTYP |      PAYLEN      |
3382 	 *   +----------------------------------------------------------------+
3383 	 *   63      48 47    40 39 36 35 32 31   24 23  20 19                0
3384 	 *
3385 	 * Extended Data Descriptor (DTYP=0x1)
3386 	 *   +----------------------------------------------------------------+
3387 	 * 0 |                     Buffer Address [63:0]                      |
3388 	 *   +----------------------------------------------------------------+
3389 	 * 8 | VLAN tag |  POPTS  | Rsvd | Status | Command | DTYP |  DTALEN  |
3390 	 *   +----------------------------------------------------------------+
3391 	 *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
3392 	 */
3393 	pr_info("Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3394 	pr_info("Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3395 
3396 	if (!netif_msg_tx_done(adapter))
3397 		goto rx_ring_summary;
3398 
3399 	for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3400 		struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
3401 		struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i];
3402 		struct my_u { __le64 a; __le64 b; };
3403 		struct my_u *u = (struct my_u *)tx_desc;
3404 		const char *type;
3405 
3406 		if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
3407 			type = "NTC/U";
3408 		else if (i == tx_ring->next_to_use)
3409 			type = "NTU";
3410 		else if (i == tx_ring->next_to_clean)
3411 			type = "NTC";
3412 		else
3413 			type = "";
3414 
3415 		pr_info("T%c[0x%03X]    %016llX %016llX %016llX %04X  %3X %016llX %p %s\n",
3416 			((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
3417 			le64_to_cpu(u->a), le64_to_cpu(u->b),
3418 			(u64)buffer_info->dma, buffer_info->length,
3419 			buffer_info->next_to_watch,
3420 			(u64)buffer_info->time_stamp, buffer_info->skb, type);
3421 	}
3422 
3423 rx_ring_summary:
3424 	/* receive dump */
3425 	pr_info("\nRX Desc ring dump\n");
3426 
3427 	/* Legacy Receive Descriptor Format
3428 	 *
3429 	 * +-----------------------------------------------------+
3430 	 * |                Buffer Address [63:0]                |
3431 	 * +-----------------------------------------------------+
3432 	 * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
3433 	 * +-----------------------------------------------------+
3434 	 * 63       48 47    40 39      32 31         16 15      0
3435 	 */
3436 	pr_info("R[desc]      [address 63:0  ] [vl er S cks ln] [bi->dma       ] [bi->skb]\n");
3437 
3438 	if (!netif_msg_rx_status(adapter))
3439 		goto exit;
3440 
3441 	for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3442 		struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
3443 		struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i];
3444 		struct my_u { __le64 a; __le64 b; };
3445 		struct my_u *u = (struct my_u *)rx_desc;
3446 		const char *type;
3447 
3448 		if (i == rx_ring->next_to_use)
3449 			type = "NTU";
3450 		else if (i == rx_ring->next_to_clean)
3451 			type = "NTC";
3452 		else
3453 			type = "";
3454 
3455 		pr_info("R[0x%03X]     %016llX %016llX %016llX %p %s\n",
3456 			i, le64_to_cpu(u->a), le64_to_cpu(u->b),
3457 			(u64)buffer_info->dma, buffer_info->rxbuf.data, type);
3458 	} /* for */
3459 
3460 	/* dump the descriptor caches */
3461 	/* rx */
3462 	pr_info("Rx descriptor cache in 64bit format\n");
3463 	for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
3464 		pr_info("R%04X: %08X|%08X %08X|%08X\n",
3465 			i,
3466 			readl(adapter->hw.hw_addr + i+4),
3467 			readl(adapter->hw.hw_addr + i),
3468 			readl(adapter->hw.hw_addr + i+12),
3469 			readl(adapter->hw.hw_addr + i+8));
3470 	}
3471 	/* tx */
3472 	pr_info("Tx descriptor cache in 64bit format\n");
3473 	for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
3474 		pr_info("T%04X: %08X|%08X %08X|%08X\n",
3475 			i,
3476 			readl(adapter->hw.hw_addr + i+4),
3477 			readl(adapter->hw.hw_addr + i),
3478 			readl(adapter->hw.hw_addr + i+12),
3479 			readl(adapter->hw.hw_addr + i+8));
3480 	}
3481 exit:
3482 	return;
3483 }
3484 
3485 /**
3486  * e1000_tx_timeout - Respond to a Tx Hang
3487  * @netdev: network interface device structure
3488  **/
3489 static void e1000_tx_timeout(struct net_device *netdev, unsigned int txqueue)
3490 {
3491 	struct e1000_adapter *adapter = netdev_priv(netdev);
3492 
3493 	/* Do the reset outside of interrupt context */
3494 	adapter->tx_timeout_count++;
3495 	schedule_work(&adapter->reset_task);
3496 }
3497 
3498 static void e1000_reset_task(struct work_struct *work)
3499 {
3500 	struct e1000_adapter *adapter =
3501 		container_of(work, struct e1000_adapter, reset_task);
3502 
3503 	e_err(drv, "Reset adapter\n");
3504 	e1000_reinit_locked(adapter);
3505 }
3506 
3507 /**
3508  * e1000_change_mtu - Change the Maximum Transfer Unit
3509  * @netdev: network interface device structure
3510  * @new_mtu: new value for maximum frame size
3511  *
3512  * Returns 0 on success, negative on failure
3513  **/
3514 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3515 {
3516 	struct e1000_adapter *adapter = netdev_priv(netdev);
3517 	struct e1000_hw *hw = &adapter->hw;
3518 	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3519 
3520 	/* Adapter-specific max frame size limits. */
3521 	switch (hw->mac_type) {
3522 	case e1000_undefined ... e1000_82542_rev2_1:
3523 		if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3524 			e_err(probe, "Jumbo Frames not supported.\n");
3525 			return -EINVAL;
3526 		}
3527 		break;
3528 	default:
3529 		/* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3530 		break;
3531 	}
3532 
3533 	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3534 		msleep(1);
3535 	/* e1000_down has a dependency on max_frame_size */
3536 	hw->max_frame_size = max_frame;
3537 	if (netif_running(netdev)) {
3538 		/* prevent buffers from being reallocated */
3539 		adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers;
3540 		e1000_down(adapter);
3541 	}
3542 
3543 	/* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3544 	 * means we reserve 2 more, this pushes us to allocate from the next
3545 	 * larger slab size.
3546 	 * i.e. RXBUFFER_2048 --> size-4096 slab
3547 	 * however with the new *_jumbo_rx* routines, jumbo receives will use
3548 	 * fragmented skbs
3549 	 */
3550 
3551 	if (max_frame <= E1000_RXBUFFER_2048)
3552 		adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3553 	else
3554 #if (PAGE_SIZE >= E1000_RXBUFFER_16384)
3555 		adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3556 #elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3557 		adapter->rx_buffer_len = PAGE_SIZE;
3558 #endif
3559 
3560 	/* adjust allocation if LPE protects us, and we aren't using SBP */
3561 	if (!hw->tbi_compatibility_on &&
3562 	    ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
3563 	     (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3564 		adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3565 
3566 	netdev_dbg(netdev, "changing MTU from %d to %d\n",
3567 		   netdev->mtu, new_mtu);
3568 	netdev->mtu = new_mtu;
3569 
3570 	if (netif_running(netdev))
3571 		e1000_up(adapter);
3572 	else
3573 		e1000_reset(adapter);
3574 
3575 	clear_bit(__E1000_RESETTING, &adapter->flags);
3576 
3577 	return 0;
3578 }
3579 
3580 /**
3581  * e1000_update_stats - Update the board statistics counters
3582  * @adapter: board private structure
3583  **/
3584 void e1000_update_stats(struct e1000_adapter *adapter)
3585 {
3586 	struct net_device *netdev = adapter->netdev;
3587 	struct e1000_hw *hw = &adapter->hw;
3588 	struct pci_dev *pdev = adapter->pdev;
3589 	unsigned long flags;
3590 	u16 phy_tmp;
3591 
3592 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3593 
3594 	/* Prevent stats update while adapter is being reset, or if the pci
3595 	 * connection is down.
3596 	 */
3597 	if (adapter->link_speed == 0)
3598 		return;
3599 	if (pci_channel_offline(pdev))
3600 		return;
3601 
3602 	spin_lock_irqsave(&adapter->stats_lock, flags);
3603 
3604 	/* these counters are modified from e1000_tbi_adjust_stats,
3605 	 * called from the interrupt context, so they must only
3606 	 * be written while holding adapter->stats_lock
3607 	 */
3608 
3609 	adapter->stats.crcerrs += er32(CRCERRS);
3610 	adapter->stats.gprc += er32(GPRC);
3611 	adapter->stats.gorcl += er32(GORCL);
3612 	adapter->stats.gorch += er32(GORCH);
3613 	adapter->stats.bprc += er32(BPRC);
3614 	adapter->stats.mprc += er32(MPRC);
3615 	adapter->stats.roc += er32(ROC);
3616 
3617 	adapter->stats.prc64 += er32(PRC64);
3618 	adapter->stats.prc127 += er32(PRC127);
3619 	adapter->stats.prc255 += er32(PRC255);
3620 	adapter->stats.prc511 += er32(PRC511);
3621 	adapter->stats.prc1023 += er32(PRC1023);
3622 	adapter->stats.prc1522 += er32(PRC1522);
3623 
3624 	adapter->stats.symerrs += er32(SYMERRS);
3625 	adapter->stats.mpc += er32(MPC);
3626 	adapter->stats.scc += er32(SCC);
3627 	adapter->stats.ecol += er32(ECOL);
3628 	adapter->stats.mcc += er32(MCC);
3629 	adapter->stats.latecol += er32(LATECOL);
3630 	adapter->stats.dc += er32(DC);
3631 	adapter->stats.sec += er32(SEC);
3632 	adapter->stats.rlec += er32(RLEC);
3633 	adapter->stats.xonrxc += er32(XONRXC);
3634 	adapter->stats.xontxc += er32(XONTXC);
3635 	adapter->stats.xoffrxc += er32(XOFFRXC);
3636 	adapter->stats.xofftxc += er32(XOFFTXC);
3637 	adapter->stats.fcruc += er32(FCRUC);
3638 	adapter->stats.gptc += er32(GPTC);
3639 	adapter->stats.gotcl += er32(GOTCL);
3640 	adapter->stats.gotch += er32(GOTCH);
3641 	adapter->stats.rnbc += er32(RNBC);
3642 	adapter->stats.ruc += er32(RUC);
3643 	adapter->stats.rfc += er32(RFC);
3644 	adapter->stats.rjc += er32(RJC);
3645 	adapter->stats.torl += er32(TORL);
3646 	adapter->stats.torh += er32(TORH);
3647 	adapter->stats.totl += er32(TOTL);
3648 	adapter->stats.toth += er32(TOTH);
3649 	adapter->stats.tpr += er32(TPR);
3650 
3651 	adapter->stats.ptc64 += er32(PTC64);
3652 	adapter->stats.ptc127 += er32(PTC127);
3653 	adapter->stats.ptc255 += er32(PTC255);
3654 	adapter->stats.ptc511 += er32(PTC511);
3655 	adapter->stats.ptc1023 += er32(PTC1023);
3656 	adapter->stats.ptc1522 += er32(PTC1522);
3657 
3658 	adapter->stats.mptc += er32(MPTC);
3659 	adapter->stats.bptc += er32(BPTC);
3660 
3661 	/* used for adaptive IFS */
3662 
3663 	hw->tx_packet_delta = er32(TPT);
3664 	adapter->stats.tpt += hw->tx_packet_delta;
3665 	hw->collision_delta = er32(COLC);
3666 	adapter->stats.colc += hw->collision_delta;
3667 
3668 	if (hw->mac_type >= e1000_82543) {
3669 		adapter->stats.algnerrc += er32(ALGNERRC);
3670 		adapter->stats.rxerrc += er32(RXERRC);
3671 		adapter->stats.tncrs += er32(TNCRS);
3672 		adapter->stats.cexterr += er32(CEXTERR);
3673 		adapter->stats.tsctc += er32(TSCTC);
3674 		adapter->stats.tsctfc += er32(TSCTFC);
3675 	}
3676 
3677 	/* Fill out the OS statistics structure */
3678 	netdev->stats.multicast = adapter->stats.mprc;
3679 	netdev->stats.collisions = adapter->stats.colc;
3680 
3681 	/* Rx Errors */
3682 
3683 	/* RLEC on some newer hardware can be incorrect so build
3684 	 * our own version based on RUC and ROC
3685 	 */
3686 	netdev->stats.rx_errors = adapter->stats.rxerrc +
3687 		adapter->stats.crcerrs + adapter->stats.algnerrc +
3688 		adapter->stats.ruc + adapter->stats.roc +
3689 		adapter->stats.cexterr;
3690 	adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3691 	netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3692 	netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3693 	netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3694 	netdev->stats.rx_missed_errors = adapter->stats.mpc;
3695 
3696 	/* Tx Errors */
3697 	adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3698 	netdev->stats.tx_errors = adapter->stats.txerrc;
3699 	netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3700 	netdev->stats.tx_window_errors = adapter->stats.latecol;
3701 	netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3702 	if (hw->bad_tx_carr_stats_fd &&
3703 	    adapter->link_duplex == FULL_DUPLEX) {
3704 		netdev->stats.tx_carrier_errors = 0;
3705 		adapter->stats.tncrs = 0;
3706 	}
3707 
3708 	/* Tx Dropped needs to be maintained elsewhere */
3709 
3710 	/* Phy Stats */
3711 	if (hw->media_type == e1000_media_type_copper) {
3712 		if ((adapter->link_speed == SPEED_1000) &&
3713 		   (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3714 			phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3715 			adapter->phy_stats.idle_errors += phy_tmp;
3716 		}
3717 
3718 		if ((hw->mac_type <= e1000_82546) &&
3719 		   (hw->phy_type == e1000_phy_m88) &&
3720 		   !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3721 			adapter->phy_stats.receive_errors += phy_tmp;
3722 	}
3723 
3724 	/* Management Stats */
3725 	if (hw->has_smbus) {
3726 		adapter->stats.mgptc += er32(MGTPTC);
3727 		adapter->stats.mgprc += er32(MGTPRC);
3728 		adapter->stats.mgpdc += er32(MGTPDC);
3729 	}
3730 
3731 	spin_unlock_irqrestore(&adapter->stats_lock, flags);
3732 }
3733 
3734 /**
3735  * e1000_intr - Interrupt Handler
3736  * @irq: interrupt number
3737  * @data: pointer to a network interface device structure
3738  **/
3739 static irqreturn_t e1000_intr(int irq, void *data)
3740 {
3741 	struct net_device *netdev = data;
3742 	struct e1000_adapter *adapter = netdev_priv(netdev);
3743 	struct e1000_hw *hw = &adapter->hw;
3744 	u32 icr = er32(ICR);
3745 
3746 	if (unlikely((!icr)))
3747 		return IRQ_NONE;  /* Not our interrupt */
3748 
3749 	/* we might have caused the interrupt, but the above
3750 	 * read cleared it, and just in case the driver is
3751 	 * down there is nothing to do so return handled
3752 	 */
3753 	if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3754 		return IRQ_HANDLED;
3755 
3756 	if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3757 		hw->get_link_status = 1;
3758 		/* guard against interrupt when we're going down */
3759 		if (!test_bit(__E1000_DOWN, &adapter->flags))
3760 			schedule_delayed_work(&adapter->watchdog_task, 1);
3761 	}
3762 
3763 	/* disable interrupts, without the synchronize_irq bit */
3764 	ew32(IMC, ~0);
3765 	E1000_WRITE_FLUSH();
3766 
3767 	if (likely(napi_schedule_prep(&adapter->napi))) {
3768 		adapter->total_tx_bytes = 0;
3769 		adapter->total_tx_packets = 0;
3770 		adapter->total_rx_bytes = 0;
3771 		adapter->total_rx_packets = 0;
3772 		__napi_schedule(&adapter->napi);
3773 	} else {
3774 		/* this really should not happen! if it does it is basically a
3775 		 * bug, but not a hard error, so enable ints and continue
3776 		 */
3777 		if (!test_bit(__E1000_DOWN, &adapter->flags))
3778 			e1000_irq_enable(adapter);
3779 	}
3780 
3781 	return IRQ_HANDLED;
3782 }
3783 
3784 /**
3785  * e1000_clean - NAPI Rx polling callback
3786  * @adapter: board private structure
3787  **/
3788 static int e1000_clean(struct napi_struct *napi, int budget)
3789 {
3790 	struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
3791 						     napi);
3792 	int tx_clean_complete = 0, work_done = 0;
3793 
3794 	tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
3795 
3796 	adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
3797 
3798 	if (!tx_clean_complete || work_done == budget)
3799 		return budget;
3800 
3801 	/* Exit the polling mode, but don't re-enable interrupts if stack might
3802 	 * poll us due to busy-polling
3803 	 */
3804 	if (likely(napi_complete_done(napi, work_done))) {
3805 		if (likely(adapter->itr_setting & 3))
3806 			e1000_set_itr(adapter);
3807 		if (!test_bit(__E1000_DOWN, &adapter->flags))
3808 			e1000_irq_enable(adapter);
3809 	}
3810 
3811 	return work_done;
3812 }
3813 
3814 /**
3815  * e1000_clean_tx_irq - Reclaim resources after transmit completes
3816  * @adapter: board private structure
3817  **/
3818 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3819 			       struct e1000_tx_ring *tx_ring)
3820 {
3821 	struct e1000_hw *hw = &adapter->hw;
3822 	struct net_device *netdev = adapter->netdev;
3823 	struct e1000_tx_desc *tx_desc, *eop_desc;
3824 	struct e1000_tx_buffer *buffer_info;
3825 	unsigned int i, eop;
3826 	unsigned int count = 0;
3827 	unsigned int total_tx_bytes = 0, total_tx_packets = 0;
3828 	unsigned int bytes_compl = 0, pkts_compl = 0;
3829 
3830 	i = tx_ring->next_to_clean;
3831 	eop = tx_ring->buffer_info[i].next_to_watch;
3832 	eop_desc = E1000_TX_DESC(*tx_ring, eop);
3833 
3834 	while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3835 	       (count < tx_ring->count)) {
3836 		bool cleaned = false;
3837 		dma_rmb();	/* read buffer_info after eop_desc */
3838 		for ( ; !cleaned; count++) {
3839 			tx_desc = E1000_TX_DESC(*tx_ring, i);
3840 			buffer_info = &tx_ring->buffer_info[i];
3841 			cleaned = (i == eop);
3842 
3843 			if (cleaned) {
3844 				total_tx_packets += buffer_info->segs;
3845 				total_tx_bytes += buffer_info->bytecount;
3846 				if (buffer_info->skb) {
3847 					bytes_compl += buffer_info->skb->len;
3848 					pkts_compl++;
3849 				}
3850 
3851 			}
3852 			e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3853 			tx_desc->upper.data = 0;
3854 
3855 			if (unlikely(++i == tx_ring->count))
3856 				i = 0;
3857 		}
3858 
3859 		eop = tx_ring->buffer_info[i].next_to_watch;
3860 		eop_desc = E1000_TX_DESC(*tx_ring, eop);
3861 	}
3862 
3863 	/* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame,
3864 	 * which will reuse the cleaned buffers.
3865 	 */
3866 	smp_store_release(&tx_ring->next_to_clean, i);
3867 
3868 	netdev_completed_queue(netdev, pkts_compl, bytes_compl);
3869 
3870 #define TX_WAKE_THRESHOLD 32
3871 	if (unlikely(count && netif_carrier_ok(netdev) &&
3872 		     E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3873 		/* Make sure that anybody stopping the queue after this
3874 		 * sees the new next_to_clean.
3875 		 */
3876 		smp_mb();
3877 
3878 		if (netif_queue_stopped(netdev) &&
3879 		    !(test_bit(__E1000_DOWN, &adapter->flags))) {
3880 			netif_wake_queue(netdev);
3881 			++adapter->restart_queue;
3882 		}
3883 	}
3884 
3885 	if (adapter->detect_tx_hung) {
3886 		/* Detect a transmit hang in hardware, this serializes the
3887 		 * check with the clearing of time_stamp and movement of i
3888 		 */
3889 		adapter->detect_tx_hung = false;
3890 		if (tx_ring->buffer_info[eop].time_stamp &&
3891 		    time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3892 			       (adapter->tx_timeout_factor * HZ)) &&
3893 		    !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3894 
3895 			/* detected Tx unit hang */
3896 			e_err(drv, "Detected Tx Unit Hang\n"
3897 			      "  Tx Queue             <%lu>\n"
3898 			      "  TDH                  <%x>\n"
3899 			      "  TDT                  <%x>\n"
3900 			      "  next_to_use          <%x>\n"
3901 			      "  next_to_clean        <%x>\n"
3902 			      "buffer_info[next_to_clean]\n"
3903 			      "  time_stamp           <%lx>\n"
3904 			      "  next_to_watch        <%x>\n"
3905 			      "  jiffies              <%lx>\n"
3906 			      "  next_to_watch.status <%x>\n",
3907 				(unsigned long)(tx_ring - adapter->tx_ring),
3908 				readl(hw->hw_addr + tx_ring->tdh),
3909 				readl(hw->hw_addr + tx_ring->tdt),
3910 				tx_ring->next_to_use,
3911 				tx_ring->next_to_clean,
3912 				tx_ring->buffer_info[eop].time_stamp,
3913 				eop,
3914 				jiffies,
3915 				eop_desc->upper.fields.status);
3916 			e1000_dump(adapter);
3917 			netif_stop_queue(netdev);
3918 		}
3919 	}
3920 	adapter->total_tx_bytes += total_tx_bytes;
3921 	adapter->total_tx_packets += total_tx_packets;
3922 	netdev->stats.tx_bytes += total_tx_bytes;
3923 	netdev->stats.tx_packets += total_tx_packets;
3924 	return count < tx_ring->count;
3925 }
3926 
3927 /**
3928  * e1000_rx_checksum - Receive Checksum Offload for 82543
3929  * @adapter:     board private structure
3930  * @status_err:  receive descriptor status and error fields
3931  * @csum:        receive descriptor csum field
3932  * @sk_buff:     socket buffer with received data
3933  **/
3934 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3935 			      u32 csum, struct sk_buff *skb)
3936 {
3937 	struct e1000_hw *hw = &adapter->hw;
3938 	u16 status = (u16)status_err;
3939 	u8 errors = (u8)(status_err >> 24);
3940 
3941 	skb_checksum_none_assert(skb);
3942 
3943 	/* 82543 or newer only */
3944 	if (unlikely(hw->mac_type < e1000_82543))
3945 		return;
3946 	/* Ignore Checksum bit is set */
3947 	if (unlikely(status & E1000_RXD_STAT_IXSM))
3948 		return;
3949 	/* TCP/UDP checksum error bit is set */
3950 	if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
3951 		/* let the stack verify checksum errors */
3952 		adapter->hw_csum_err++;
3953 		return;
3954 	}
3955 	/* TCP/UDP Checksum has not been calculated */
3956 	if (!(status & E1000_RXD_STAT_TCPCS))
3957 		return;
3958 
3959 	/* It must be a TCP or UDP packet with a valid checksum */
3960 	if (likely(status & E1000_RXD_STAT_TCPCS)) {
3961 		/* TCP checksum is good */
3962 		skb->ip_summed = CHECKSUM_UNNECESSARY;
3963 	}
3964 	adapter->hw_csum_good++;
3965 }
3966 
3967 /**
3968  * e1000_consume_page - helper function for jumbo Rx path
3969  **/
3970 static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb,
3971 			       u16 length)
3972 {
3973 	bi->rxbuf.page = NULL;
3974 	skb->len += length;
3975 	skb->data_len += length;
3976 	skb->truesize += PAGE_SIZE;
3977 }
3978 
3979 /**
3980  * e1000_receive_skb - helper function to handle rx indications
3981  * @adapter: board private structure
3982  * @status: descriptor status field as written by hardware
3983  * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
3984  * @skb: pointer to sk_buff to be indicated to stack
3985  */
3986 static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
3987 			      __le16 vlan, struct sk_buff *skb)
3988 {
3989 	skb->protocol = eth_type_trans(skb, adapter->netdev);
3990 
3991 	if (status & E1000_RXD_STAT_VP) {
3992 		u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
3993 
3994 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
3995 	}
3996 	napi_gro_receive(&adapter->napi, skb);
3997 }
3998 
3999 /**
4000  * e1000_tbi_adjust_stats
4001  * @hw: Struct containing variables accessed by shared code
4002  * @frame_len: The length of the frame in question
4003  * @mac_addr: The Ethernet destination address of the frame in question
4004  *
4005  * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
4006  */
4007 static void e1000_tbi_adjust_stats(struct e1000_hw *hw,
4008 				   struct e1000_hw_stats *stats,
4009 				   u32 frame_len, const u8 *mac_addr)
4010 {
4011 	u64 carry_bit;
4012 
4013 	/* First adjust the frame length. */
4014 	frame_len--;
4015 	/* We need to adjust the statistics counters, since the hardware
4016 	 * counters overcount this packet as a CRC error and undercount
4017 	 * the packet as a good packet
4018 	 */
4019 	/* This packet should not be counted as a CRC error. */
4020 	stats->crcerrs--;
4021 	/* This packet does count as a Good Packet Received. */
4022 	stats->gprc++;
4023 
4024 	/* Adjust the Good Octets received counters */
4025 	carry_bit = 0x80000000 & stats->gorcl;
4026 	stats->gorcl += frame_len;
4027 	/* If the high bit of Gorcl (the low 32 bits of the Good Octets
4028 	 * Received Count) was one before the addition,
4029 	 * AND it is zero after, then we lost the carry out,
4030 	 * need to add one to Gorch (Good Octets Received Count High).
4031 	 * This could be simplified if all environments supported
4032 	 * 64-bit integers.
4033 	 */
4034 	if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
4035 		stats->gorch++;
4036 	/* Is this a broadcast or multicast?  Check broadcast first,
4037 	 * since the test for a multicast frame will test positive on
4038 	 * a broadcast frame.
4039 	 */
4040 	if (is_broadcast_ether_addr(mac_addr))
4041 		stats->bprc++;
4042 	else if (is_multicast_ether_addr(mac_addr))
4043 		stats->mprc++;
4044 
4045 	if (frame_len == hw->max_frame_size) {
4046 		/* In this case, the hardware has overcounted the number of
4047 		 * oversize frames.
4048 		 */
4049 		if (stats->roc > 0)
4050 			stats->roc--;
4051 	}
4052 
4053 	/* Adjust the bin counters when the extra byte put the frame in the
4054 	 * wrong bin. Remember that the frame_len was adjusted above.
4055 	 */
4056 	if (frame_len == 64) {
4057 		stats->prc64++;
4058 		stats->prc127--;
4059 	} else if (frame_len == 127) {
4060 		stats->prc127++;
4061 		stats->prc255--;
4062 	} else if (frame_len == 255) {
4063 		stats->prc255++;
4064 		stats->prc511--;
4065 	} else if (frame_len == 511) {
4066 		stats->prc511++;
4067 		stats->prc1023--;
4068 	} else if (frame_len == 1023) {
4069 		stats->prc1023++;
4070 		stats->prc1522--;
4071 	} else if (frame_len == 1522) {
4072 		stats->prc1522++;
4073 	}
4074 }
4075 
4076 static bool e1000_tbi_should_accept(struct e1000_adapter *adapter,
4077 				    u8 status, u8 errors,
4078 				    u32 length, const u8 *data)
4079 {
4080 	struct e1000_hw *hw = &adapter->hw;
4081 	u8 last_byte = *(data + length - 1);
4082 
4083 	if (TBI_ACCEPT(hw, status, errors, length, last_byte)) {
4084 		unsigned long irq_flags;
4085 
4086 		spin_lock_irqsave(&adapter->stats_lock, irq_flags);
4087 		e1000_tbi_adjust_stats(hw, &adapter->stats, length, data);
4088 		spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
4089 
4090 		return true;
4091 	}
4092 
4093 	return false;
4094 }
4095 
4096 static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter,
4097 					  unsigned int bufsz)
4098 {
4099 	struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz);
4100 
4101 	if (unlikely(!skb))
4102 		adapter->alloc_rx_buff_failed++;
4103 	return skb;
4104 }
4105 
4106 /**
4107  * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
4108  * @adapter: board private structure
4109  * @rx_ring: ring to clean
4110  * @work_done: amount of napi work completed this call
4111  * @work_to_do: max amount of work allowed for this call to do
4112  *
4113  * the return value indicates whether actual cleaning was done, there
4114  * is no guarantee that everything was cleaned
4115  */
4116 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4117 				     struct e1000_rx_ring *rx_ring,
4118 				     int *work_done, int work_to_do)
4119 {
4120 	struct net_device *netdev = adapter->netdev;
4121 	struct pci_dev *pdev = adapter->pdev;
4122 	struct e1000_rx_desc *rx_desc, *next_rxd;
4123 	struct e1000_rx_buffer *buffer_info, *next_buffer;
4124 	u32 length;
4125 	unsigned int i;
4126 	int cleaned_count = 0;
4127 	bool cleaned = false;
4128 	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4129 
4130 	i = rx_ring->next_to_clean;
4131 	rx_desc = E1000_RX_DESC(*rx_ring, i);
4132 	buffer_info = &rx_ring->buffer_info[i];
4133 
4134 	while (rx_desc->status & E1000_RXD_STAT_DD) {
4135 		struct sk_buff *skb;
4136 		u8 status;
4137 
4138 		if (*work_done >= work_to_do)
4139 			break;
4140 		(*work_done)++;
4141 		dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
4142 
4143 		status = rx_desc->status;
4144 
4145 		if (++i == rx_ring->count)
4146 			i = 0;
4147 
4148 		next_rxd = E1000_RX_DESC(*rx_ring, i);
4149 		prefetch(next_rxd);
4150 
4151 		next_buffer = &rx_ring->buffer_info[i];
4152 
4153 		cleaned = true;
4154 		cleaned_count++;
4155 		dma_unmap_page(&pdev->dev, buffer_info->dma,
4156 			       adapter->rx_buffer_len, DMA_FROM_DEVICE);
4157 		buffer_info->dma = 0;
4158 
4159 		length = le16_to_cpu(rx_desc->length);
4160 
4161 		/* errors is only valid for DD + EOP descriptors */
4162 		if (unlikely((status & E1000_RXD_STAT_EOP) &&
4163 		    (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
4164 			u8 *mapped = page_address(buffer_info->rxbuf.page);
4165 
4166 			if (e1000_tbi_should_accept(adapter, status,
4167 						    rx_desc->errors,
4168 						    length, mapped)) {
4169 				length--;
4170 			} else if (netdev->features & NETIF_F_RXALL) {
4171 				goto process_skb;
4172 			} else {
4173 				/* an error means any chain goes out the window
4174 				 * too
4175 				 */
4176 				dev_kfree_skb(rx_ring->rx_skb_top);
4177 				rx_ring->rx_skb_top = NULL;
4178 				goto next_desc;
4179 			}
4180 		}
4181 
4182 #define rxtop rx_ring->rx_skb_top
4183 process_skb:
4184 		if (!(status & E1000_RXD_STAT_EOP)) {
4185 			/* this descriptor is only the beginning (or middle) */
4186 			if (!rxtop) {
4187 				/* this is the beginning of a chain */
4188 				rxtop = napi_get_frags(&adapter->napi);
4189 				if (!rxtop)
4190 					break;
4191 
4192 				skb_fill_page_desc(rxtop, 0,
4193 						   buffer_info->rxbuf.page,
4194 						   0, length);
4195 			} else {
4196 				/* this is the middle of a chain */
4197 				skb_fill_page_desc(rxtop,
4198 				    skb_shinfo(rxtop)->nr_frags,
4199 				    buffer_info->rxbuf.page, 0, length);
4200 			}
4201 			e1000_consume_page(buffer_info, rxtop, length);
4202 			goto next_desc;
4203 		} else {
4204 			if (rxtop) {
4205 				/* end of the chain */
4206 				skb_fill_page_desc(rxtop,
4207 				    skb_shinfo(rxtop)->nr_frags,
4208 				    buffer_info->rxbuf.page, 0, length);
4209 				skb = rxtop;
4210 				rxtop = NULL;
4211 				e1000_consume_page(buffer_info, skb, length);
4212 			} else {
4213 				struct page *p;
4214 				/* no chain, got EOP, this buf is the packet
4215 				 * copybreak to save the put_page/alloc_page
4216 				 */
4217 				p = buffer_info->rxbuf.page;
4218 				if (length <= copybreak) {
4219 					u8 *vaddr;
4220 
4221 					if (likely(!(netdev->features & NETIF_F_RXFCS)))
4222 						length -= 4;
4223 					skb = e1000_alloc_rx_skb(adapter,
4224 								 length);
4225 					if (!skb)
4226 						break;
4227 
4228 					vaddr = kmap_atomic(p);
4229 					memcpy(skb_tail_pointer(skb), vaddr,
4230 					       length);
4231 					kunmap_atomic(vaddr);
4232 					/* re-use the page, so don't erase
4233 					 * buffer_info->rxbuf.page
4234 					 */
4235 					skb_put(skb, length);
4236 					e1000_rx_checksum(adapter,
4237 							  status | rx_desc->errors << 24,
4238 							  le16_to_cpu(rx_desc->csum), skb);
4239 
4240 					total_rx_bytes += skb->len;
4241 					total_rx_packets++;
4242 
4243 					e1000_receive_skb(adapter, status,
4244 							  rx_desc->special, skb);
4245 					goto next_desc;
4246 				} else {
4247 					skb = napi_get_frags(&adapter->napi);
4248 					if (!skb) {
4249 						adapter->alloc_rx_buff_failed++;
4250 						break;
4251 					}
4252 					skb_fill_page_desc(skb, 0, p, 0,
4253 							   length);
4254 					e1000_consume_page(buffer_info, skb,
4255 							   length);
4256 				}
4257 			}
4258 		}
4259 
4260 		/* Receive Checksum Offload XXX recompute due to CRC strip? */
4261 		e1000_rx_checksum(adapter,
4262 				  (u32)(status) |
4263 				  ((u32)(rx_desc->errors) << 24),
4264 				  le16_to_cpu(rx_desc->csum), skb);
4265 
4266 		total_rx_bytes += (skb->len - 4); /* don't count FCS */
4267 		if (likely(!(netdev->features & NETIF_F_RXFCS)))
4268 			pskb_trim(skb, skb->len - 4);
4269 		total_rx_packets++;
4270 
4271 		if (status & E1000_RXD_STAT_VP) {
4272 			__le16 vlan = rx_desc->special;
4273 			u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4274 
4275 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4276 		}
4277 
4278 		napi_gro_frags(&adapter->napi);
4279 
4280 next_desc:
4281 		rx_desc->status = 0;
4282 
4283 		/* return some buffers to hardware, one at a time is too slow */
4284 		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4285 			adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4286 			cleaned_count = 0;
4287 		}
4288 
4289 		/* use prefetched values */
4290 		rx_desc = next_rxd;
4291 		buffer_info = next_buffer;
4292 	}
4293 	rx_ring->next_to_clean = i;
4294 
4295 	cleaned_count = E1000_DESC_UNUSED(rx_ring);
4296 	if (cleaned_count)
4297 		adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4298 
4299 	adapter->total_rx_packets += total_rx_packets;
4300 	adapter->total_rx_bytes += total_rx_bytes;
4301 	netdev->stats.rx_bytes += total_rx_bytes;
4302 	netdev->stats.rx_packets += total_rx_packets;
4303 	return cleaned;
4304 }
4305 
4306 /* this should improve performance for small packets with large amounts
4307  * of reassembly being done in the stack
4308  */
4309 static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter,
4310 				       struct e1000_rx_buffer *buffer_info,
4311 				       u32 length, const void *data)
4312 {
4313 	struct sk_buff *skb;
4314 
4315 	if (length > copybreak)
4316 		return NULL;
4317 
4318 	skb = e1000_alloc_rx_skb(adapter, length);
4319 	if (!skb)
4320 		return NULL;
4321 
4322 	dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma,
4323 				length, DMA_FROM_DEVICE);
4324 
4325 	skb_put_data(skb, data, length);
4326 
4327 	return skb;
4328 }
4329 
4330 /**
4331  * e1000_clean_rx_irq - Send received data up the network stack; legacy
4332  * @adapter: board private structure
4333  * @rx_ring: ring to clean
4334  * @work_done: amount of napi work completed this call
4335  * @work_to_do: max amount of work allowed for this call to do
4336  */
4337 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4338 			       struct e1000_rx_ring *rx_ring,
4339 			       int *work_done, int work_to_do)
4340 {
4341 	struct net_device *netdev = adapter->netdev;
4342 	struct pci_dev *pdev = adapter->pdev;
4343 	struct e1000_rx_desc *rx_desc, *next_rxd;
4344 	struct e1000_rx_buffer *buffer_info, *next_buffer;
4345 	u32 length;
4346 	unsigned int i;
4347 	int cleaned_count = 0;
4348 	bool cleaned = false;
4349 	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4350 
4351 	i = rx_ring->next_to_clean;
4352 	rx_desc = E1000_RX_DESC(*rx_ring, i);
4353 	buffer_info = &rx_ring->buffer_info[i];
4354 
4355 	while (rx_desc->status & E1000_RXD_STAT_DD) {
4356 		struct sk_buff *skb;
4357 		u8 *data;
4358 		u8 status;
4359 
4360 		if (*work_done >= work_to_do)
4361 			break;
4362 		(*work_done)++;
4363 		dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
4364 
4365 		status = rx_desc->status;
4366 		length = le16_to_cpu(rx_desc->length);
4367 
4368 		data = buffer_info->rxbuf.data;
4369 		prefetch(data);
4370 		skb = e1000_copybreak(adapter, buffer_info, length, data);
4371 		if (!skb) {
4372 			unsigned int frag_len = e1000_frag_len(adapter);
4373 
4374 			skb = build_skb(data - E1000_HEADROOM, frag_len);
4375 			if (!skb) {
4376 				adapter->alloc_rx_buff_failed++;
4377 				break;
4378 			}
4379 
4380 			skb_reserve(skb, E1000_HEADROOM);
4381 			dma_unmap_single(&pdev->dev, buffer_info->dma,
4382 					 adapter->rx_buffer_len,
4383 					 DMA_FROM_DEVICE);
4384 			buffer_info->dma = 0;
4385 			buffer_info->rxbuf.data = NULL;
4386 		}
4387 
4388 		if (++i == rx_ring->count)
4389 			i = 0;
4390 
4391 		next_rxd = E1000_RX_DESC(*rx_ring, i);
4392 		prefetch(next_rxd);
4393 
4394 		next_buffer = &rx_ring->buffer_info[i];
4395 
4396 		cleaned = true;
4397 		cleaned_count++;
4398 
4399 		/* !EOP means multiple descriptors were used to store a single
4400 		 * packet, if thats the case we need to toss it.  In fact, we
4401 		 * to toss every packet with the EOP bit clear and the next
4402 		 * frame that _does_ have the EOP bit set, as it is by
4403 		 * definition only a frame fragment
4404 		 */
4405 		if (unlikely(!(status & E1000_RXD_STAT_EOP)))
4406 			adapter->discarding = true;
4407 
4408 		if (adapter->discarding) {
4409 			/* All receives must fit into a single buffer */
4410 			netdev_dbg(netdev, "Receive packet consumed multiple buffers\n");
4411 			dev_kfree_skb(skb);
4412 			if (status & E1000_RXD_STAT_EOP)
4413 				adapter->discarding = false;
4414 			goto next_desc;
4415 		}
4416 
4417 		if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
4418 			if (e1000_tbi_should_accept(adapter, status,
4419 						    rx_desc->errors,
4420 						    length, data)) {
4421 				length--;
4422 			} else if (netdev->features & NETIF_F_RXALL) {
4423 				goto process_skb;
4424 			} else {
4425 				dev_kfree_skb(skb);
4426 				goto next_desc;
4427 			}
4428 		}
4429 
4430 process_skb:
4431 		total_rx_bytes += (length - 4); /* don't count FCS */
4432 		total_rx_packets++;
4433 
4434 		if (likely(!(netdev->features & NETIF_F_RXFCS)))
4435 			/* adjust length to remove Ethernet CRC, this must be
4436 			 * done after the TBI_ACCEPT workaround above
4437 			 */
4438 			length -= 4;
4439 
4440 		if (buffer_info->rxbuf.data == NULL)
4441 			skb_put(skb, length);
4442 		else /* copybreak skb */
4443 			skb_trim(skb, length);
4444 
4445 		/* Receive Checksum Offload */
4446 		e1000_rx_checksum(adapter,
4447 				  (u32)(status) |
4448 				  ((u32)(rx_desc->errors) << 24),
4449 				  le16_to_cpu(rx_desc->csum), skb);
4450 
4451 		e1000_receive_skb(adapter, status, rx_desc->special, skb);
4452 
4453 next_desc:
4454 		rx_desc->status = 0;
4455 
4456 		/* return some buffers to hardware, one at a time is too slow */
4457 		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4458 			adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4459 			cleaned_count = 0;
4460 		}
4461 
4462 		/* use prefetched values */
4463 		rx_desc = next_rxd;
4464 		buffer_info = next_buffer;
4465 	}
4466 	rx_ring->next_to_clean = i;
4467 
4468 	cleaned_count = E1000_DESC_UNUSED(rx_ring);
4469 	if (cleaned_count)
4470 		adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4471 
4472 	adapter->total_rx_packets += total_rx_packets;
4473 	adapter->total_rx_bytes += total_rx_bytes;
4474 	netdev->stats.rx_bytes += total_rx_bytes;
4475 	netdev->stats.rx_packets += total_rx_packets;
4476 	return cleaned;
4477 }
4478 
4479 /**
4480  * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
4481  * @adapter: address of board private structure
4482  * @rx_ring: pointer to receive ring structure
4483  * @cleaned_count: number of buffers to allocate this pass
4484  **/
4485 static void
4486 e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4487 			     struct e1000_rx_ring *rx_ring, int cleaned_count)
4488 {
4489 	struct pci_dev *pdev = adapter->pdev;
4490 	struct e1000_rx_desc *rx_desc;
4491 	struct e1000_rx_buffer *buffer_info;
4492 	unsigned int i;
4493 
4494 	i = rx_ring->next_to_use;
4495 	buffer_info = &rx_ring->buffer_info[i];
4496 
4497 	while (cleaned_count--) {
4498 		/* allocate a new page if necessary */
4499 		if (!buffer_info->rxbuf.page) {
4500 			buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC);
4501 			if (unlikely(!buffer_info->rxbuf.page)) {
4502 				adapter->alloc_rx_buff_failed++;
4503 				break;
4504 			}
4505 		}
4506 
4507 		if (!buffer_info->dma) {
4508 			buffer_info->dma = dma_map_page(&pdev->dev,
4509 							buffer_info->rxbuf.page, 0,
4510 							adapter->rx_buffer_len,
4511 							DMA_FROM_DEVICE);
4512 			if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4513 				put_page(buffer_info->rxbuf.page);
4514 				buffer_info->rxbuf.page = NULL;
4515 				buffer_info->dma = 0;
4516 				adapter->alloc_rx_buff_failed++;
4517 				break;
4518 			}
4519 		}
4520 
4521 		rx_desc = E1000_RX_DESC(*rx_ring, i);
4522 		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4523 
4524 		if (unlikely(++i == rx_ring->count))
4525 			i = 0;
4526 		buffer_info = &rx_ring->buffer_info[i];
4527 	}
4528 
4529 	if (likely(rx_ring->next_to_use != i)) {
4530 		rx_ring->next_to_use = i;
4531 		if (unlikely(i-- == 0))
4532 			i = (rx_ring->count - 1);
4533 
4534 		/* Force memory writes to complete before letting h/w
4535 		 * know there are new descriptors to fetch.  (Only
4536 		 * applicable for weak-ordered memory model archs,
4537 		 * such as IA-64).
4538 		 */
4539 		dma_wmb();
4540 		writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4541 	}
4542 }
4543 
4544 /**
4545  * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
4546  * @adapter: address of board private structure
4547  **/
4548 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4549 				   struct e1000_rx_ring *rx_ring,
4550 				   int cleaned_count)
4551 {
4552 	struct e1000_hw *hw = &adapter->hw;
4553 	struct pci_dev *pdev = adapter->pdev;
4554 	struct e1000_rx_desc *rx_desc;
4555 	struct e1000_rx_buffer *buffer_info;
4556 	unsigned int i;
4557 	unsigned int bufsz = adapter->rx_buffer_len;
4558 
4559 	i = rx_ring->next_to_use;
4560 	buffer_info = &rx_ring->buffer_info[i];
4561 
4562 	while (cleaned_count--) {
4563 		void *data;
4564 
4565 		if (buffer_info->rxbuf.data)
4566 			goto skip;
4567 
4568 		data = e1000_alloc_frag(adapter);
4569 		if (!data) {
4570 			/* Better luck next round */
4571 			adapter->alloc_rx_buff_failed++;
4572 			break;
4573 		}
4574 
4575 		/* Fix for errata 23, can't cross 64kB boundary */
4576 		if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4577 			void *olddata = data;
4578 			e_err(rx_err, "skb align check failed: %u bytes at "
4579 			      "%p\n", bufsz, data);
4580 			/* Try again, without freeing the previous */
4581 			data = e1000_alloc_frag(adapter);
4582 			/* Failed allocation, critical failure */
4583 			if (!data) {
4584 				skb_free_frag(olddata);
4585 				adapter->alloc_rx_buff_failed++;
4586 				break;
4587 			}
4588 
4589 			if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4590 				/* give up */
4591 				skb_free_frag(data);
4592 				skb_free_frag(olddata);
4593 				adapter->alloc_rx_buff_failed++;
4594 				break;
4595 			}
4596 
4597 			/* Use new allocation */
4598 			skb_free_frag(olddata);
4599 		}
4600 		buffer_info->dma = dma_map_single(&pdev->dev,
4601 						  data,
4602 						  adapter->rx_buffer_len,
4603 						  DMA_FROM_DEVICE);
4604 		if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4605 			skb_free_frag(data);
4606 			buffer_info->dma = 0;
4607 			adapter->alloc_rx_buff_failed++;
4608 			break;
4609 		}
4610 
4611 		/* XXX if it was allocated cleanly it will never map to a
4612 		 * boundary crossing
4613 		 */
4614 
4615 		/* Fix for errata 23, can't cross 64kB boundary */
4616 		if (!e1000_check_64k_bound(adapter,
4617 					(void *)(unsigned long)buffer_info->dma,
4618 					adapter->rx_buffer_len)) {
4619 			e_err(rx_err, "dma align check failed: %u bytes at "
4620 			      "%p\n", adapter->rx_buffer_len,
4621 			      (void *)(unsigned long)buffer_info->dma);
4622 
4623 			dma_unmap_single(&pdev->dev, buffer_info->dma,
4624 					 adapter->rx_buffer_len,
4625 					 DMA_FROM_DEVICE);
4626 
4627 			skb_free_frag(data);
4628 			buffer_info->rxbuf.data = NULL;
4629 			buffer_info->dma = 0;
4630 
4631 			adapter->alloc_rx_buff_failed++;
4632 			break;
4633 		}
4634 		buffer_info->rxbuf.data = data;
4635  skip:
4636 		rx_desc = E1000_RX_DESC(*rx_ring, i);
4637 		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4638 
4639 		if (unlikely(++i == rx_ring->count))
4640 			i = 0;
4641 		buffer_info = &rx_ring->buffer_info[i];
4642 	}
4643 
4644 	if (likely(rx_ring->next_to_use != i)) {
4645 		rx_ring->next_to_use = i;
4646 		if (unlikely(i-- == 0))
4647 			i = (rx_ring->count - 1);
4648 
4649 		/* Force memory writes to complete before letting h/w
4650 		 * know there are new descriptors to fetch.  (Only
4651 		 * applicable for weak-ordered memory model archs,
4652 		 * such as IA-64).
4653 		 */
4654 		dma_wmb();
4655 		writel(i, hw->hw_addr + rx_ring->rdt);
4656 	}
4657 }
4658 
4659 /**
4660  * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4661  * @adapter:
4662  **/
4663 static void e1000_smartspeed(struct e1000_adapter *adapter)
4664 {
4665 	struct e1000_hw *hw = &adapter->hw;
4666 	u16 phy_status;
4667 	u16 phy_ctrl;
4668 
4669 	if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4670 	   !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
4671 		return;
4672 
4673 	if (adapter->smartspeed == 0) {
4674 		/* If Master/Slave config fault is asserted twice,
4675 		 * we assume back-to-back
4676 		 */
4677 		e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4678 		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4679 			return;
4680 		e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4681 		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4682 			return;
4683 		e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4684 		if (phy_ctrl & CR_1000T_MS_ENABLE) {
4685 			phy_ctrl &= ~CR_1000T_MS_ENABLE;
4686 			e1000_write_phy_reg(hw, PHY_1000T_CTRL,
4687 					    phy_ctrl);
4688 			adapter->smartspeed++;
4689 			if (!e1000_phy_setup_autoneg(hw) &&
4690 			   !e1000_read_phy_reg(hw, PHY_CTRL,
4691 					       &phy_ctrl)) {
4692 				phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4693 					     MII_CR_RESTART_AUTO_NEG);
4694 				e1000_write_phy_reg(hw, PHY_CTRL,
4695 						    phy_ctrl);
4696 			}
4697 		}
4698 		return;
4699 	} else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4700 		/* If still no link, perhaps using 2/3 pair cable */
4701 		e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4702 		phy_ctrl |= CR_1000T_MS_ENABLE;
4703 		e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4704 		if (!e1000_phy_setup_autoneg(hw) &&
4705 		   !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
4706 			phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4707 				     MII_CR_RESTART_AUTO_NEG);
4708 			e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
4709 		}
4710 	}
4711 	/* Restart process after E1000_SMARTSPEED_MAX iterations */
4712 	if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4713 		adapter->smartspeed = 0;
4714 }
4715 
4716 /**
4717  * e1000_ioctl -
4718  * @netdev:
4719  * @ifreq:
4720  * @cmd:
4721  **/
4722 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4723 {
4724 	switch (cmd) {
4725 	case SIOCGMIIPHY:
4726 	case SIOCGMIIREG:
4727 	case SIOCSMIIREG:
4728 		return e1000_mii_ioctl(netdev, ifr, cmd);
4729 	default:
4730 		return -EOPNOTSUPP;
4731 	}
4732 }
4733 
4734 /**
4735  * e1000_mii_ioctl -
4736  * @netdev:
4737  * @ifreq:
4738  * @cmd:
4739  **/
4740 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4741 			   int cmd)
4742 {
4743 	struct e1000_adapter *adapter = netdev_priv(netdev);
4744 	struct e1000_hw *hw = &adapter->hw;
4745 	struct mii_ioctl_data *data = if_mii(ifr);
4746 	int retval;
4747 	u16 mii_reg;
4748 	unsigned long flags;
4749 
4750 	if (hw->media_type != e1000_media_type_copper)
4751 		return -EOPNOTSUPP;
4752 
4753 	switch (cmd) {
4754 	case SIOCGMIIPHY:
4755 		data->phy_id = hw->phy_addr;
4756 		break;
4757 	case SIOCGMIIREG:
4758 		spin_lock_irqsave(&adapter->stats_lock, flags);
4759 		if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
4760 				   &data->val_out)) {
4761 			spin_unlock_irqrestore(&adapter->stats_lock, flags);
4762 			return -EIO;
4763 		}
4764 		spin_unlock_irqrestore(&adapter->stats_lock, flags);
4765 		break;
4766 	case SIOCSMIIREG:
4767 		if (data->reg_num & ~(0x1F))
4768 			return -EFAULT;
4769 		mii_reg = data->val_in;
4770 		spin_lock_irqsave(&adapter->stats_lock, flags);
4771 		if (e1000_write_phy_reg(hw, data->reg_num,
4772 					mii_reg)) {
4773 			spin_unlock_irqrestore(&adapter->stats_lock, flags);
4774 			return -EIO;
4775 		}
4776 		spin_unlock_irqrestore(&adapter->stats_lock, flags);
4777 		if (hw->media_type == e1000_media_type_copper) {
4778 			switch (data->reg_num) {
4779 			case PHY_CTRL:
4780 				if (mii_reg & MII_CR_POWER_DOWN)
4781 					break;
4782 				if (mii_reg & MII_CR_AUTO_NEG_EN) {
4783 					hw->autoneg = 1;
4784 					hw->autoneg_advertised = 0x2F;
4785 				} else {
4786 					u32 speed;
4787 					if (mii_reg & 0x40)
4788 						speed = SPEED_1000;
4789 					else if (mii_reg & 0x2000)
4790 						speed = SPEED_100;
4791 					else
4792 						speed = SPEED_10;
4793 					retval = e1000_set_spd_dplx(
4794 						adapter, speed,
4795 						((mii_reg & 0x100)
4796 						 ? DUPLEX_FULL :
4797 						 DUPLEX_HALF));
4798 					if (retval)
4799 						return retval;
4800 				}
4801 				if (netif_running(adapter->netdev))
4802 					e1000_reinit_locked(adapter);
4803 				else
4804 					e1000_reset(adapter);
4805 				break;
4806 			case M88E1000_PHY_SPEC_CTRL:
4807 			case M88E1000_EXT_PHY_SPEC_CTRL:
4808 				if (e1000_phy_reset(hw))
4809 					return -EIO;
4810 				break;
4811 			}
4812 		} else {
4813 			switch (data->reg_num) {
4814 			case PHY_CTRL:
4815 				if (mii_reg & MII_CR_POWER_DOWN)
4816 					break;
4817 				if (netif_running(adapter->netdev))
4818 					e1000_reinit_locked(adapter);
4819 				else
4820 					e1000_reset(adapter);
4821 				break;
4822 			}
4823 		}
4824 		break;
4825 	default:
4826 		return -EOPNOTSUPP;
4827 	}
4828 	return E1000_SUCCESS;
4829 }
4830 
4831 void e1000_pci_set_mwi(struct e1000_hw *hw)
4832 {
4833 	struct e1000_adapter *adapter = hw->back;
4834 	int ret_val = pci_set_mwi(adapter->pdev);
4835 
4836 	if (ret_val)
4837 		e_err(probe, "Error in setting MWI\n");
4838 }
4839 
4840 void e1000_pci_clear_mwi(struct e1000_hw *hw)
4841 {
4842 	struct e1000_adapter *adapter = hw->back;
4843 
4844 	pci_clear_mwi(adapter->pdev);
4845 }
4846 
4847 int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
4848 {
4849 	struct e1000_adapter *adapter = hw->back;
4850 	return pcix_get_mmrbc(adapter->pdev);
4851 }
4852 
4853 void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
4854 {
4855 	struct e1000_adapter *adapter = hw->back;
4856 	pcix_set_mmrbc(adapter->pdev, mmrbc);
4857 }
4858 
4859 void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
4860 {
4861 	outl(value, port);
4862 }
4863 
4864 static bool e1000_vlan_used(struct e1000_adapter *adapter)
4865 {
4866 	u16 vid;
4867 
4868 	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4869 		return true;
4870 	return false;
4871 }
4872 
4873 static void __e1000_vlan_mode(struct e1000_adapter *adapter,
4874 			      netdev_features_t features)
4875 {
4876 	struct e1000_hw *hw = &adapter->hw;
4877 	u32 ctrl;
4878 
4879 	ctrl = er32(CTRL);
4880 	if (features & NETIF_F_HW_VLAN_CTAG_RX) {
4881 		/* enable VLAN tag insert/strip */
4882 		ctrl |= E1000_CTRL_VME;
4883 	} else {
4884 		/* disable VLAN tag insert/strip */
4885 		ctrl &= ~E1000_CTRL_VME;
4886 	}
4887 	ew32(CTRL, ctrl);
4888 }
4889 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4890 				     bool filter_on)
4891 {
4892 	struct e1000_hw *hw = &adapter->hw;
4893 	u32 rctl;
4894 
4895 	if (!test_bit(__E1000_DOWN, &adapter->flags))
4896 		e1000_irq_disable(adapter);
4897 
4898 	__e1000_vlan_mode(adapter, adapter->netdev->features);
4899 	if (filter_on) {
4900 		/* enable VLAN receive filtering */
4901 		rctl = er32(RCTL);
4902 		rctl &= ~E1000_RCTL_CFIEN;
4903 		if (!(adapter->netdev->flags & IFF_PROMISC))
4904 			rctl |= E1000_RCTL_VFE;
4905 		ew32(RCTL, rctl);
4906 		e1000_update_mng_vlan(adapter);
4907 	} else {
4908 		/* disable VLAN receive filtering */
4909 		rctl = er32(RCTL);
4910 		rctl &= ~E1000_RCTL_VFE;
4911 		ew32(RCTL, rctl);
4912 	}
4913 
4914 	if (!test_bit(__E1000_DOWN, &adapter->flags))
4915 		e1000_irq_enable(adapter);
4916 }
4917 
4918 static void e1000_vlan_mode(struct net_device *netdev,
4919 			    netdev_features_t features)
4920 {
4921 	struct e1000_adapter *adapter = netdev_priv(netdev);
4922 
4923 	if (!test_bit(__E1000_DOWN, &adapter->flags))
4924 		e1000_irq_disable(adapter);
4925 
4926 	__e1000_vlan_mode(adapter, features);
4927 
4928 	if (!test_bit(__E1000_DOWN, &adapter->flags))
4929 		e1000_irq_enable(adapter);
4930 }
4931 
4932 static int e1000_vlan_rx_add_vid(struct net_device *netdev,
4933 				 __be16 proto, u16 vid)
4934 {
4935 	struct e1000_adapter *adapter = netdev_priv(netdev);
4936 	struct e1000_hw *hw = &adapter->hw;
4937 	u32 vfta, index;
4938 
4939 	if ((hw->mng_cookie.status &
4940 	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4941 	    (vid == adapter->mng_vlan_id))
4942 		return 0;
4943 
4944 	if (!e1000_vlan_used(adapter))
4945 		e1000_vlan_filter_on_off(adapter, true);
4946 
4947 	/* add VID to filter table */
4948 	index = (vid >> 5) & 0x7F;
4949 	vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4950 	vfta |= (1 << (vid & 0x1F));
4951 	e1000_write_vfta(hw, index, vfta);
4952 
4953 	set_bit(vid, adapter->active_vlans);
4954 
4955 	return 0;
4956 }
4957 
4958 static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
4959 				  __be16 proto, u16 vid)
4960 {
4961 	struct e1000_adapter *adapter = netdev_priv(netdev);
4962 	struct e1000_hw *hw = &adapter->hw;
4963 	u32 vfta, index;
4964 
4965 	if (!test_bit(__E1000_DOWN, &adapter->flags))
4966 		e1000_irq_disable(adapter);
4967 	if (!test_bit(__E1000_DOWN, &adapter->flags))
4968 		e1000_irq_enable(adapter);
4969 
4970 	/* remove VID from filter table */
4971 	index = (vid >> 5) & 0x7F;
4972 	vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4973 	vfta &= ~(1 << (vid & 0x1F));
4974 	e1000_write_vfta(hw, index, vfta);
4975 
4976 	clear_bit(vid, adapter->active_vlans);
4977 
4978 	if (!e1000_vlan_used(adapter))
4979 		e1000_vlan_filter_on_off(adapter, false);
4980 
4981 	return 0;
4982 }
4983 
4984 static void e1000_restore_vlan(struct e1000_adapter *adapter)
4985 {
4986 	u16 vid;
4987 
4988 	if (!e1000_vlan_used(adapter))
4989 		return;
4990 
4991 	e1000_vlan_filter_on_off(adapter, true);
4992 	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4993 		e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
4994 }
4995 
4996 int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
4997 {
4998 	struct e1000_hw *hw = &adapter->hw;
4999 
5000 	hw->autoneg = 0;
5001 
5002 	/* Make sure dplx is at most 1 bit and lsb of speed is not set
5003 	 * for the switch() below to work
5004 	 */
5005 	if ((spd & 1) || (dplx & ~1))
5006 		goto err_inval;
5007 
5008 	/* Fiber NICs only allow 1000 gbps Full duplex */
5009 	if ((hw->media_type == e1000_media_type_fiber) &&
5010 	    spd != SPEED_1000 &&
5011 	    dplx != DUPLEX_FULL)
5012 		goto err_inval;
5013 
5014 	switch (spd + dplx) {
5015 	case SPEED_10 + DUPLEX_HALF:
5016 		hw->forced_speed_duplex = e1000_10_half;
5017 		break;
5018 	case SPEED_10 + DUPLEX_FULL:
5019 		hw->forced_speed_duplex = e1000_10_full;
5020 		break;
5021 	case SPEED_100 + DUPLEX_HALF:
5022 		hw->forced_speed_duplex = e1000_100_half;
5023 		break;
5024 	case SPEED_100 + DUPLEX_FULL:
5025 		hw->forced_speed_duplex = e1000_100_full;
5026 		break;
5027 	case SPEED_1000 + DUPLEX_FULL:
5028 		hw->autoneg = 1;
5029 		hw->autoneg_advertised = ADVERTISE_1000_FULL;
5030 		break;
5031 	case SPEED_1000 + DUPLEX_HALF: /* not supported */
5032 	default:
5033 		goto err_inval;
5034 	}
5035 
5036 	/* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
5037 	hw->mdix = AUTO_ALL_MODES;
5038 
5039 	return 0;
5040 
5041 err_inval:
5042 	e_err(probe, "Unsupported Speed/Duplex configuration\n");
5043 	return -EINVAL;
5044 }
5045 
5046 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
5047 {
5048 	struct net_device *netdev = pci_get_drvdata(pdev);
5049 	struct e1000_adapter *adapter = netdev_priv(netdev);
5050 	struct e1000_hw *hw = &adapter->hw;
5051 	u32 ctrl, ctrl_ext, rctl, status;
5052 	u32 wufc = adapter->wol;
5053 #ifdef CONFIG_PM
5054 	int retval = 0;
5055 #endif
5056 
5057 	netif_device_detach(netdev);
5058 
5059 	if (netif_running(netdev)) {
5060 		int count = E1000_CHECK_RESET_COUNT;
5061 
5062 		while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
5063 			usleep_range(10000, 20000);
5064 
5065 		WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
5066 		e1000_down(adapter);
5067 	}
5068 
5069 #ifdef CONFIG_PM
5070 	retval = pci_save_state(pdev);
5071 	if (retval)
5072 		return retval;
5073 #endif
5074 
5075 	status = er32(STATUS);
5076 	if (status & E1000_STATUS_LU)
5077 		wufc &= ~E1000_WUFC_LNKC;
5078 
5079 	if (wufc) {
5080 		e1000_setup_rctl(adapter);
5081 		e1000_set_rx_mode(netdev);
5082 
5083 		rctl = er32(RCTL);
5084 
5085 		/* turn on all-multi mode if wake on multicast is enabled */
5086 		if (wufc & E1000_WUFC_MC)
5087 			rctl |= E1000_RCTL_MPE;
5088 
5089 		/* enable receives in the hardware */
5090 		ew32(RCTL, rctl | E1000_RCTL_EN);
5091 
5092 		if (hw->mac_type >= e1000_82540) {
5093 			ctrl = er32(CTRL);
5094 			/* advertise wake from D3Cold */
5095 			#define E1000_CTRL_ADVD3WUC 0x00100000
5096 			/* phy power management enable */
5097 			#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5098 			ctrl |= E1000_CTRL_ADVD3WUC |
5099 				E1000_CTRL_EN_PHY_PWR_MGMT;
5100 			ew32(CTRL, ctrl);
5101 		}
5102 
5103 		if (hw->media_type == e1000_media_type_fiber ||
5104 		    hw->media_type == e1000_media_type_internal_serdes) {
5105 			/* keep the laser running in D3 */
5106 			ctrl_ext = er32(CTRL_EXT);
5107 			ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
5108 			ew32(CTRL_EXT, ctrl_ext);
5109 		}
5110 
5111 		ew32(WUC, E1000_WUC_PME_EN);
5112 		ew32(WUFC, wufc);
5113 	} else {
5114 		ew32(WUC, 0);
5115 		ew32(WUFC, 0);
5116 	}
5117 
5118 	e1000_release_manageability(adapter);
5119 
5120 	*enable_wake = !!wufc;
5121 
5122 	/* make sure adapter isn't asleep if manageability is enabled */
5123 	if (adapter->en_mng_pt)
5124 		*enable_wake = true;
5125 
5126 	if (netif_running(netdev))
5127 		e1000_free_irq(adapter);
5128 
5129 	if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5130 		pci_disable_device(pdev);
5131 
5132 	return 0;
5133 }
5134 
5135 #ifdef CONFIG_PM
5136 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
5137 {
5138 	int retval;
5139 	bool wake;
5140 
5141 	retval = __e1000_shutdown(pdev, &wake);
5142 	if (retval)
5143 		return retval;
5144 
5145 	if (wake) {
5146 		pci_prepare_to_sleep(pdev);
5147 	} else {
5148 		pci_wake_from_d3(pdev, false);
5149 		pci_set_power_state(pdev, PCI_D3hot);
5150 	}
5151 
5152 	return 0;
5153 }
5154 
5155 static int e1000_resume(struct pci_dev *pdev)
5156 {
5157 	struct net_device *netdev = pci_get_drvdata(pdev);
5158 	struct e1000_adapter *adapter = netdev_priv(netdev);
5159 	struct e1000_hw *hw = &adapter->hw;
5160 	u32 err;
5161 
5162 	pci_set_power_state(pdev, PCI_D0);
5163 	pci_restore_state(pdev);
5164 	pci_save_state(pdev);
5165 
5166 	if (adapter->need_ioport)
5167 		err = pci_enable_device(pdev);
5168 	else
5169 		err = pci_enable_device_mem(pdev);
5170 	if (err) {
5171 		pr_err("Cannot enable PCI device from suspend\n");
5172 		return err;
5173 	}
5174 
5175 	/* flush memory to make sure state is correct */
5176 	smp_mb__before_atomic();
5177 	clear_bit(__E1000_DISABLED, &adapter->flags);
5178 	pci_set_master(pdev);
5179 
5180 	pci_enable_wake(pdev, PCI_D3hot, 0);
5181 	pci_enable_wake(pdev, PCI_D3cold, 0);
5182 
5183 	if (netif_running(netdev)) {
5184 		err = e1000_request_irq(adapter);
5185 		if (err)
5186 			return err;
5187 	}
5188 
5189 	e1000_power_up_phy(adapter);
5190 	e1000_reset(adapter);
5191 	ew32(WUS, ~0);
5192 
5193 	e1000_init_manageability(adapter);
5194 
5195 	if (netif_running(netdev))
5196 		e1000_up(adapter);
5197 
5198 	netif_device_attach(netdev);
5199 
5200 	return 0;
5201 }
5202 #endif
5203 
5204 static void e1000_shutdown(struct pci_dev *pdev)
5205 {
5206 	bool wake;
5207 
5208 	__e1000_shutdown(pdev, &wake);
5209 
5210 	if (system_state == SYSTEM_POWER_OFF) {
5211 		pci_wake_from_d3(pdev, wake);
5212 		pci_set_power_state(pdev, PCI_D3hot);
5213 	}
5214 }
5215 
5216 #ifdef CONFIG_NET_POLL_CONTROLLER
5217 /* Polling 'interrupt' - used by things like netconsole to send skbs
5218  * without having to re-enable interrupts. It's not called while
5219  * the interrupt routine is executing.
5220  */
5221 static void e1000_netpoll(struct net_device *netdev)
5222 {
5223 	struct e1000_adapter *adapter = netdev_priv(netdev);
5224 
5225 	if (disable_hardirq(adapter->pdev->irq))
5226 		e1000_intr(adapter->pdev->irq, netdev);
5227 	enable_irq(adapter->pdev->irq);
5228 }
5229 #endif
5230 
5231 /**
5232  * e1000_io_error_detected - called when PCI error is detected
5233  * @pdev: Pointer to PCI device
5234  * @state: The current pci connection state
5235  *
5236  * This function is called after a PCI bus error affecting
5237  * this device has been detected.
5238  */
5239 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5240 						pci_channel_state_t state)
5241 {
5242 	struct net_device *netdev = pci_get_drvdata(pdev);
5243 	struct e1000_adapter *adapter = netdev_priv(netdev);
5244 
5245 	netif_device_detach(netdev);
5246 
5247 	if (state == pci_channel_io_perm_failure)
5248 		return PCI_ERS_RESULT_DISCONNECT;
5249 
5250 	if (netif_running(netdev))
5251 		e1000_down(adapter);
5252 
5253 	if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5254 		pci_disable_device(pdev);
5255 
5256 	/* Request a slot slot reset. */
5257 	return PCI_ERS_RESULT_NEED_RESET;
5258 }
5259 
5260 /**
5261  * e1000_io_slot_reset - called after the pci bus has been reset.
5262  * @pdev: Pointer to PCI device
5263  *
5264  * Restart the card from scratch, as if from a cold-boot. Implementation
5265  * resembles the first-half of the e1000_resume routine.
5266  */
5267 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5268 {
5269 	struct net_device *netdev = pci_get_drvdata(pdev);
5270 	struct e1000_adapter *adapter = netdev_priv(netdev);
5271 	struct e1000_hw *hw = &adapter->hw;
5272 	int err;
5273 
5274 	if (adapter->need_ioport)
5275 		err = pci_enable_device(pdev);
5276 	else
5277 		err = pci_enable_device_mem(pdev);
5278 	if (err) {
5279 		pr_err("Cannot re-enable PCI device after reset.\n");
5280 		return PCI_ERS_RESULT_DISCONNECT;
5281 	}
5282 
5283 	/* flush memory to make sure state is correct */
5284 	smp_mb__before_atomic();
5285 	clear_bit(__E1000_DISABLED, &adapter->flags);
5286 	pci_set_master(pdev);
5287 
5288 	pci_enable_wake(pdev, PCI_D3hot, 0);
5289 	pci_enable_wake(pdev, PCI_D3cold, 0);
5290 
5291 	e1000_reset(adapter);
5292 	ew32(WUS, ~0);
5293 
5294 	return PCI_ERS_RESULT_RECOVERED;
5295 }
5296 
5297 /**
5298  * e1000_io_resume - called when traffic can start flowing again.
5299  * @pdev: Pointer to PCI device
5300  *
5301  * This callback is called when the error recovery driver tells us that
5302  * its OK to resume normal operation. Implementation resembles the
5303  * second-half of the e1000_resume routine.
5304  */
5305 static void e1000_io_resume(struct pci_dev *pdev)
5306 {
5307 	struct net_device *netdev = pci_get_drvdata(pdev);
5308 	struct e1000_adapter *adapter = netdev_priv(netdev);
5309 
5310 	e1000_init_manageability(adapter);
5311 
5312 	if (netif_running(netdev)) {
5313 		if (e1000_up(adapter)) {
5314 			pr_info("can't bring device back up after reset\n");
5315 			return;
5316 		}
5317 	}
5318 
5319 	netif_device_attach(netdev);
5320 }
5321 
5322 /* e1000_main.c */
5323