xref: /linux/drivers/net/ethernet/intel/e1000/e1000_main.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2006 Intel Corporation. */
3 
4 #include "e1000.h"
5 #include <net/ip6_checksum.h>
6 #include <linux/io.h>
7 #include <linux/prefetch.h>
8 #include <linux/bitops.h>
9 #include <linux/if_vlan.h>
10 
11 char e1000_driver_name[] = "e1000";
12 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
13 static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
14 
15 /* e1000_pci_tbl - PCI Device ID Table
16  *
17  * Last entry must be all 0s
18  *
19  * Macro expands to...
20  *   {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
21  */
22 static const struct pci_device_id e1000_pci_tbl[] = {
23 	INTEL_E1000_ETHERNET_DEVICE(0x1000),
24 	INTEL_E1000_ETHERNET_DEVICE(0x1001),
25 	INTEL_E1000_ETHERNET_DEVICE(0x1004),
26 	INTEL_E1000_ETHERNET_DEVICE(0x1008),
27 	INTEL_E1000_ETHERNET_DEVICE(0x1009),
28 	INTEL_E1000_ETHERNET_DEVICE(0x100C),
29 	INTEL_E1000_ETHERNET_DEVICE(0x100D),
30 	INTEL_E1000_ETHERNET_DEVICE(0x100E),
31 	INTEL_E1000_ETHERNET_DEVICE(0x100F),
32 	INTEL_E1000_ETHERNET_DEVICE(0x1010),
33 	INTEL_E1000_ETHERNET_DEVICE(0x1011),
34 	INTEL_E1000_ETHERNET_DEVICE(0x1012),
35 	INTEL_E1000_ETHERNET_DEVICE(0x1013),
36 	INTEL_E1000_ETHERNET_DEVICE(0x1014),
37 	INTEL_E1000_ETHERNET_DEVICE(0x1015),
38 	INTEL_E1000_ETHERNET_DEVICE(0x1016),
39 	INTEL_E1000_ETHERNET_DEVICE(0x1017),
40 	INTEL_E1000_ETHERNET_DEVICE(0x1018),
41 	INTEL_E1000_ETHERNET_DEVICE(0x1019),
42 	INTEL_E1000_ETHERNET_DEVICE(0x101A),
43 	INTEL_E1000_ETHERNET_DEVICE(0x101D),
44 	INTEL_E1000_ETHERNET_DEVICE(0x101E),
45 	INTEL_E1000_ETHERNET_DEVICE(0x1026),
46 	INTEL_E1000_ETHERNET_DEVICE(0x1027),
47 	INTEL_E1000_ETHERNET_DEVICE(0x1028),
48 	INTEL_E1000_ETHERNET_DEVICE(0x1075),
49 	INTEL_E1000_ETHERNET_DEVICE(0x1076),
50 	INTEL_E1000_ETHERNET_DEVICE(0x1077),
51 	INTEL_E1000_ETHERNET_DEVICE(0x1078),
52 	INTEL_E1000_ETHERNET_DEVICE(0x1079),
53 	INTEL_E1000_ETHERNET_DEVICE(0x107A),
54 	INTEL_E1000_ETHERNET_DEVICE(0x107B),
55 	INTEL_E1000_ETHERNET_DEVICE(0x107C),
56 	INTEL_E1000_ETHERNET_DEVICE(0x108A),
57 	INTEL_E1000_ETHERNET_DEVICE(0x1099),
58 	INTEL_E1000_ETHERNET_DEVICE(0x10B5),
59 	INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
60 	/* required last entry */
61 	{0,}
62 };
63 
64 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
65 
66 int e1000_up(struct e1000_adapter *adapter);
67 void e1000_down(struct e1000_adapter *adapter);
68 void e1000_reinit_locked(struct e1000_adapter *adapter);
69 void e1000_reset(struct e1000_adapter *adapter);
70 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
71 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
72 void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
73 void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
74 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
75 				    struct e1000_tx_ring *txdr);
76 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
77 				    struct e1000_rx_ring *rxdr);
78 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
79 				    struct e1000_tx_ring *tx_ring);
80 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
81 				    struct e1000_rx_ring *rx_ring);
82 void e1000_update_stats(struct e1000_adapter *adapter);
83 
84 static int e1000_init_module(void);
85 static void e1000_exit_module(void);
86 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
87 static void e1000_remove(struct pci_dev *pdev);
88 static int e1000_alloc_queues(struct e1000_adapter *adapter);
89 static int e1000_sw_init(struct e1000_adapter *adapter);
90 int e1000_open(struct net_device *netdev);
91 int e1000_close(struct net_device *netdev);
92 static void e1000_configure_tx(struct e1000_adapter *adapter);
93 static void e1000_configure_rx(struct e1000_adapter *adapter);
94 static void e1000_setup_rctl(struct e1000_adapter *adapter);
95 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
96 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
97 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
98 				struct e1000_tx_ring *tx_ring);
99 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
100 				struct e1000_rx_ring *rx_ring);
101 static void e1000_set_rx_mode(struct net_device *netdev);
102 static void e1000_update_phy_info_task(struct work_struct *work);
103 static void e1000_watchdog(struct work_struct *work);
104 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
105 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
106 				    struct net_device *netdev);
107 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
108 static int e1000_set_mac(struct net_device *netdev, void *p);
109 static irqreturn_t e1000_intr(int irq, void *data);
110 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
111 			       struct e1000_tx_ring *tx_ring);
112 static int e1000_clean(struct napi_struct *napi, int budget);
113 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
114 			       struct e1000_rx_ring *rx_ring,
115 			       int *work_done, int work_to_do);
116 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
117 				     struct e1000_rx_ring *rx_ring,
118 				     int *work_done, int work_to_do);
e1000_alloc_dummy_rx_buffers(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring,int cleaned_count)119 static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter,
120 					 struct e1000_rx_ring *rx_ring,
121 					 int cleaned_count)
122 {
123 }
124 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
125 				   struct e1000_rx_ring *rx_ring,
126 				   int cleaned_count);
127 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
128 					 struct e1000_rx_ring *rx_ring,
129 					 int cleaned_count);
130 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
131 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
132 			   int cmd);
133 static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
134 static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
135 static void e1000_tx_timeout(struct net_device *dev, unsigned int txqueue);
136 static void e1000_reset_task(struct work_struct *work);
137 static void e1000_smartspeed(struct e1000_adapter *adapter);
138 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
139 				       struct sk_buff *skb);
140 
141 static bool e1000_vlan_used(struct e1000_adapter *adapter);
142 static void e1000_vlan_mode(struct net_device *netdev,
143 			    netdev_features_t features);
144 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
145 				     bool filter_on);
146 static int e1000_vlan_rx_add_vid(struct net_device *netdev,
147 				 __be16 proto, u16 vid);
148 static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
149 				  __be16 proto, u16 vid);
150 static void e1000_restore_vlan(struct e1000_adapter *adapter);
151 
152 static int e1000_suspend(struct device *dev);
153 static int e1000_resume(struct device *dev);
154 static void e1000_shutdown(struct pci_dev *pdev);
155 
156 #ifdef CONFIG_NET_POLL_CONTROLLER
157 /* for netdump / net console */
158 static void e1000_netpoll (struct net_device *netdev);
159 #endif
160 
161 #define COPYBREAK_DEFAULT 256
162 static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
163 module_param(copybreak, uint, 0644);
164 MODULE_PARM_DESC(copybreak,
165 	"Maximum size of packet that is copied to a new buffer on receive");
166 
167 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
168 						pci_channel_state_t state);
169 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
170 static void e1000_io_resume(struct pci_dev *pdev);
171 
172 static const struct pci_error_handlers e1000_err_handler = {
173 	.error_detected = e1000_io_error_detected,
174 	.slot_reset = e1000_io_slot_reset,
175 	.resume = e1000_io_resume,
176 };
177 
178 static DEFINE_SIMPLE_DEV_PM_OPS(e1000_pm_ops, e1000_suspend, e1000_resume);
179 
180 static struct pci_driver e1000_driver = {
181 	.name     = e1000_driver_name,
182 	.id_table = e1000_pci_tbl,
183 	.probe    = e1000_probe,
184 	.remove   = e1000_remove,
185 	.driver.pm = pm_sleep_ptr(&e1000_pm_ops),
186 	.shutdown = e1000_shutdown,
187 	.err_handler = &e1000_err_handler
188 };
189 
190 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
191 MODULE_LICENSE("GPL v2");
192 
193 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
194 static int debug = -1;
195 module_param(debug, int, 0);
196 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
197 
198 /**
199  * e1000_get_hw_dev - helper function for getting netdev
200  * @hw: pointer to HW struct
201  *
202  * return device used by hardware layer to print debugging information
203  *
204  **/
e1000_get_hw_dev(struct e1000_hw * hw)205 struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
206 {
207 	struct e1000_adapter *adapter = hw->back;
208 	return adapter->netdev;
209 }
210 
211 /**
212  * e1000_init_module - Driver Registration Routine
213  *
214  * e1000_init_module is the first routine called when the driver is
215  * loaded. All it does is register with the PCI subsystem.
216  **/
e1000_init_module(void)217 static int __init e1000_init_module(void)
218 {
219 	int ret;
220 	pr_info("%s\n", e1000_driver_string);
221 
222 	pr_info("%s\n", e1000_copyright);
223 
224 	ret = pci_register_driver(&e1000_driver);
225 	if (copybreak != COPYBREAK_DEFAULT) {
226 		if (copybreak == 0)
227 			pr_info("copybreak disabled\n");
228 		else
229 			pr_info("copybreak enabled for "
230 				   "packets <= %u bytes\n", copybreak);
231 	}
232 	return ret;
233 }
234 
235 module_init(e1000_init_module);
236 
237 /**
238  * e1000_exit_module - Driver Exit Cleanup Routine
239  *
240  * e1000_exit_module is called just before the driver is removed
241  * from memory.
242  **/
e1000_exit_module(void)243 static void __exit e1000_exit_module(void)
244 {
245 	pci_unregister_driver(&e1000_driver);
246 }
247 
248 module_exit(e1000_exit_module);
249 
e1000_request_irq(struct e1000_adapter * adapter)250 static int e1000_request_irq(struct e1000_adapter *adapter)
251 {
252 	struct net_device *netdev = adapter->netdev;
253 	irq_handler_t handler = e1000_intr;
254 	int irq_flags = IRQF_SHARED;
255 	int err;
256 
257 	err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
258 			  netdev);
259 	if (err) {
260 		e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
261 	}
262 
263 	return err;
264 }
265 
e1000_free_irq(struct e1000_adapter * adapter)266 static void e1000_free_irq(struct e1000_adapter *adapter)
267 {
268 	struct net_device *netdev = adapter->netdev;
269 
270 	free_irq(adapter->pdev->irq, netdev);
271 }
272 
273 /**
274  * e1000_irq_disable - Mask off interrupt generation on the NIC
275  * @adapter: board private structure
276  **/
e1000_irq_disable(struct e1000_adapter * adapter)277 static void e1000_irq_disable(struct e1000_adapter *adapter)
278 {
279 	struct e1000_hw *hw = &adapter->hw;
280 
281 	ew32(IMC, ~0);
282 	E1000_WRITE_FLUSH();
283 	synchronize_irq(adapter->pdev->irq);
284 }
285 
286 /**
287  * e1000_irq_enable - Enable default interrupt generation settings
288  * @adapter: board private structure
289  **/
e1000_irq_enable(struct e1000_adapter * adapter)290 static void e1000_irq_enable(struct e1000_adapter *adapter)
291 {
292 	struct e1000_hw *hw = &adapter->hw;
293 
294 	ew32(IMS, IMS_ENABLE_MASK);
295 	E1000_WRITE_FLUSH();
296 }
297 
e1000_update_mng_vlan(struct e1000_adapter * adapter)298 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
299 {
300 	struct e1000_hw *hw = &adapter->hw;
301 	struct net_device *netdev = adapter->netdev;
302 	u16 vid = hw->mng_cookie.vlan_id;
303 	u16 old_vid = adapter->mng_vlan_id;
304 
305 	if (!e1000_vlan_used(adapter))
306 		return;
307 
308 	if (!test_bit(vid, adapter->active_vlans)) {
309 		if (hw->mng_cookie.status &
310 		    E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
311 			e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
312 			adapter->mng_vlan_id = vid;
313 		} else {
314 			adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
315 		}
316 		if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
317 		    (vid != old_vid) &&
318 		    !test_bit(old_vid, adapter->active_vlans))
319 			e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
320 					       old_vid);
321 	} else {
322 		adapter->mng_vlan_id = vid;
323 	}
324 }
325 
e1000_init_manageability(struct e1000_adapter * adapter)326 static void e1000_init_manageability(struct e1000_adapter *adapter)
327 {
328 	struct e1000_hw *hw = &adapter->hw;
329 
330 	if (adapter->en_mng_pt) {
331 		u32 manc = er32(MANC);
332 
333 		/* disable hardware interception of ARP */
334 		manc &= ~(E1000_MANC_ARP_EN);
335 
336 		ew32(MANC, manc);
337 	}
338 }
339 
e1000_release_manageability(struct e1000_adapter * adapter)340 static void e1000_release_manageability(struct e1000_adapter *adapter)
341 {
342 	struct e1000_hw *hw = &adapter->hw;
343 
344 	if (adapter->en_mng_pt) {
345 		u32 manc = er32(MANC);
346 
347 		/* re-enable hardware interception of ARP */
348 		manc |= E1000_MANC_ARP_EN;
349 
350 		ew32(MANC, manc);
351 	}
352 }
353 
354 /**
355  * e1000_configure - configure the hardware for RX and TX
356  * @adapter: private board structure
357  **/
e1000_configure(struct e1000_adapter * adapter)358 static void e1000_configure(struct e1000_adapter *adapter)
359 {
360 	struct net_device *netdev = adapter->netdev;
361 	int i;
362 
363 	e1000_set_rx_mode(netdev);
364 
365 	e1000_restore_vlan(adapter);
366 	e1000_init_manageability(adapter);
367 
368 	e1000_configure_tx(adapter);
369 	e1000_setup_rctl(adapter);
370 	e1000_configure_rx(adapter);
371 	/* call E1000_DESC_UNUSED which always leaves
372 	 * at least 1 descriptor unused to make sure
373 	 * next_to_use != next_to_clean
374 	 */
375 	for (i = 0; i < adapter->num_rx_queues; i++) {
376 		struct e1000_rx_ring *ring = &adapter->rx_ring[i];
377 		adapter->alloc_rx_buf(adapter, ring,
378 				      E1000_DESC_UNUSED(ring));
379 	}
380 }
381 
e1000_up(struct e1000_adapter * adapter)382 int e1000_up(struct e1000_adapter *adapter)
383 {
384 	struct e1000_hw *hw = &adapter->hw;
385 
386 	/* hardware has been reset, we need to reload some things */
387 	e1000_configure(adapter);
388 
389 	clear_bit(__E1000_DOWN, &adapter->flags);
390 
391 	napi_enable(&adapter->napi);
392 
393 	e1000_irq_enable(adapter);
394 
395 	netif_wake_queue(adapter->netdev);
396 
397 	/* fire a link change interrupt to start the watchdog */
398 	ew32(ICS, E1000_ICS_LSC);
399 	return 0;
400 }
401 
402 /**
403  * e1000_power_up_phy - restore link in case the phy was powered down
404  * @adapter: address of board private structure
405  *
406  * The phy may be powered down to save power and turn off link when the
407  * driver is unloaded and wake on lan is not enabled (among others)
408  * *** this routine MUST be followed by a call to e1000_reset ***
409  **/
e1000_power_up_phy(struct e1000_adapter * adapter)410 void e1000_power_up_phy(struct e1000_adapter *adapter)
411 {
412 	struct e1000_hw *hw = &adapter->hw;
413 	u16 mii_reg = 0;
414 
415 	/* Just clear the power down bit to wake the phy back up */
416 	if (hw->media_type == e1000_media_type_copper) {
417 		/* according to the manual, the phy will retain its
418 		 * settings across a power-down/up cycle
419 		 */
420 		e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
421 		mii_reg &= ~MII_CR_POWER_DOWN;
422 		e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
423 	}
424 }
425 
e1000_power_down_phy(struct e1000_adapter * adapter)426 static void e1000_power_down_phy(struct e1000_adapter *adapter)
427 {
428 	struct e1000_hw *hw = &adapter->hw;
429 
430 	/* Power down the PHY so no link is implied when interface is down *
431 	 * The PHY cannot be powered down if any of the following is true *
432 	 * (a) WoL is enabled
433 	 * (b) AMT is active
434 	 * (c) SoL/IDER session is active
435 	 */
436 	if (!adapter->wol && hw->mac_type >= e1000_82540 &&
437 	   hw->media_type == e1000_media_type_copper) {
438 		u16 mii_reg = 0;
439 
440 		switch (hw->mac_type) {
441 		case e1000_82540:
442 		case e1000_82545:
443 		case e1000_82545_rev_3:
444 		case e1000_82546:
445 		case e1000_ce4100:
446 		case e1000_82546_rev_3:
447 		case e1000_82541:
448 		case e1000_82541_rev_2:
449 		case e1000_82547:
450 		case e1000_82547_rev_2:
451 			if (er32(MANC) & E1000_MANC_SMBUS_EN)
452 				goto out;
453 			break;
454 		default:
455 			goto out;
456 		}
457 		e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
458 		mii_reg |= MII_CR_POWER_DOWN;
459 		e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
460 		msleep(1);
461 	}
462 out:
463 	return;
464 }
465 
e1000_down_and_stop(struct e1000_adapter * adapter)466 static void e1000_down_and_stop(struct e1000_adapter *adapter)
467 {
468 	set_bit(__E1000_DOWN, &adapter->flags);
469 
470 	cancel_delayed_work_sync(&adapter->watchdog_task);
471 
472 	/*
473 	 * Since the watchdog task can reschedule other tasks, we should cancel
474 	 * it first, otherwise we can run into the situation when a work is
475 	 * still running after the adapter has been turned down.
476 	 */
477 
478 	cancel_delayed_work_sync(&adapter->phy_info_task);
479 	cancel_delayed_work_sync(&adapter->fifo_stall_task);
480 
481 	/* Only kill reset task if adapter is not resetting */
482 	if (!test_bit(__E1000_RESETTING, &adapter->flags))
483 		cancel_work_sync(&adapter->reset_task);
484 }
485 
e1000_down(struct e1000_adapter * adapter)486 void e1000_down(struct e1000_adapter *adapter)
487 {
488 	struct e1000_hw *hw = &adapter->hw;
489 	struct net_device *netdev = adapter->netdev;
490 	u32 rctl, tctl;
491 
492 	/* disable receives in the hardware */
493 	rctl = er32(RCTL);
494 	ew32(RCTL, rctl & ~E1000_RCTL_EN);
495 	/* flush and sleep below */
496 
497 	netif_tx_disable(netdev);
498 
499 	/* disable transmits in the hardware */
500 	tctl = er32(TCTL);
501 	tctl &= ~E1000_TCTL_EN;
502 	ew32(TCTL, tctl);
503 	/* flush both disables and wait for them to finish */
504 	E1000_WRITE_FLUSH();
505 	msleep(10);
506 
507 	/* Set the carrier off after transmits have been disabled in the
508 	 * hardware, to avoid race conditions with e1000_watchdog() (which
509 	 * may be running concurrently to us, checking for the carrier
510 	 * bit to decide whether it should enable transmits again). Such
511 	 * a race condition would result into transmission being disabled
512 	 * in the hardware until the next IFF_DOWN+IFF_UP cycle.
513 	 */
514 	netif_carrier_off(netdev);
515 
516 	napi_disable(&adapter->napi);
517 
518 	e1000_irq_disable(adapter);
519 
520 	/* Setting DOWN must be after irq_disable to prevent
521 	 * a screaming interrupt.  Setting DOWN also prevents
522 	 * tasks from rescheduling.
523 	 */
524 	e1000_down_and_stop(adapter);
525 
526 	adapter->link_speed = 0;
527 	adapter->link_duplex = 0;
528 
529 	e1000_reset(adapter);
530 	e1000_clean_all_tx_rings(adapter);
531 	e1000_clean_all_rx_rings(adapter);
532 }
533 
e1000_reinit_locked(struct e1000_adapter * adapter)534 void e1000_reinit_locked(struct e1000_adapter *adapter)
535 {
536 	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
537 		msleep(1);
538 
539 	/* only run the task if not already down */
540 	if (!test_bit(__E1000_DOWN, &adapter->flags)) {
541 		e1000_down(adapter);
542 		e1000_up(adapter);
543 	}
544 
545 	clear_bit(__E1000_RESETTING, &adapter->flags);
546 }
547 
e1000_reset(struct e1000_adapter * adapter)548 void e1000_reset(struct e1000_adapter *adapter)
549 {
550 	struct e1000_hw *hw = &adapter->hw;
551 	u32 pba = 0, tx_space, min_tx_space, min_rx_space;
552 	bool legacy_pba_adjust = false;
553 	u16 hwm;
554 
555 	/* Repartition Pba for greater than 9k mtu
556 	 * To take effect CTRL.RST is required.
557 	 */
558 
559 	switch (hw->mac_type) {
560 	case e1000_82542_rev2_0:
561 	case e1000_82542_rev2_1:
562 	case e1000_82543:
563 	case e1000_82544:
564 	case e1000_82540:
565 	case e1000_82541:
566 	case e1000_82541_rev_2:
567 		legacy_pba_adjust = true;
568 		pba = E1000_PBA_48K;
569 		break;
570 	case e1000_82545:
571 	case e1000_82545_rev_3:
572 	case e1000_82546:
573 	case e1000_ce4100:
574 	case e1000_82546_rev_3:
575 		pba = E1000_PBA_48K;
576 		break;
577 	case e1000_82547:
578 	case e1000_82547_rev_2:
579 		legacy_pba_adjust = true;
580 		pba = E1000_PBA_30K;
581 		break;
582 	case e1000_undefined:
583 	case e1000_num_macs:
584 		break;
585 	}
586 
587 	if (legacy_pba_adjust) {
588 		if (hw->max_frame_size > E1000_RXBUFFER_8192)
589 			pba -= 8; /* allocate more FIFO for Tx */
590 
591 		if (hw->mac_type == e1000_82547) {
592 			adapter->tx_fifo_head = 0;
593 			adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
594 			adapter->tx_fifo_size =
595 				(E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
596 			atomic_set(&adapter->tx_fifo_stall, 0);
597 		}
598 	} else if (hw->max_frame_size >  ETH_FRAME_LEN + ETH_FCS_LEN) {
599 		/* adjust PBA for jumbo frames */
600 		ew32(PBA, pba);
601 
602 		/* To maintain wire speed transmits, the Tx FIFO should be
603 		 * large enough to accommodate two full transmit packets,
604 		 * rounded up to the next 1KB and expressed in KB.  Likewise,
605 		 * the Rx FIFO should be large enough to accommodate at least
606 		 * one full receive packet and is similarly rounded up and
607 		 * expressed in KB.
608 		 */
609 		pba = er32(PBA);
610 		/* upper 16 bits has Tx packet buffer allocation size in KB */
611 		tx_space = pba >> 16;
612 		/* lower 16 bits has Rx packet buffer allocation size in KB */
613 		pba &= 0xffff;
614 		/* the Tx fifo also stores 16 bytes of information about the Tx
615 		 * but don't include ethernet FCS because hardware appends it
616 		 */
617 		min_tx_space = (hw->max_frame_size +
618 				sizeof(struct e1000_tx_desc) -
619 				ETH_FCS_LEN) * 2;
620 		min_tx_space = ALIGN(min_tx_space, 1024);
621 		min_tx_space >>= 10;
622 		/* software strips receive CRC, so leave room for it */
623 		min_rx_space = hw->max_frame_size;
624 		min_rx_space = ALIGN(min_rx_space, 1024);
625 		min_rx_space >>= 10;
626 
627 		/* If current Tx allocation is less than the min Tx FIFO size,
628 		 * and the min Tx FIFO size is less than the current Rx FIFO
629 		 * allocation, take space away from current Rx allocation
630 		 */
631 		if (tx_space < min_tx_space &&
632 		    ((min_tx_space - tx_space) < pba)) {
633 			pba = pba - (min_tx_space - tx_space);
634 
635 			/* PCI/PCIx hardware has PBA alignment constraints */
636 			switch (hw->mac_type) {
637 			case e1000_82545 ... e1000_82546_rev_3:
638 				pba &= ~(E1000_PBA_8K - 1);
639 				break;
640 			default:
641 				break;
642 			}
643 
644 			/* if short on Rx space, Rx wins and must trump Tx
645 			 * adjustment or use Early Receive if available
646 			 */
647 			if (pba < min_rx_space)
648 				pba = min_rx_space;
649 		}
650 	}
651 
652 	ew32(PBA, pba);
653 
654 	/* flow control settings:
655 	 * The high water mark must be low enough to fit one full frame
656 	 * (or the size used for early receive) above it in the Rx FIFO.
657 	 * Set it to the lower of:
658 	 * - 90% of the Rx FIFO size, and
659 	 * - the full Rx FIFO size minus the early receive size (for parts
660 	 *   with ERT support assuming ERT set to E1000_ERT_2048), or
661 	 * - the full Rx FIFO size minus one full frame
662 	 */
663 	hwm = min(((pba << 10) * 9 / 10),
664 		  ((pba << 10) - hw->max_frame_size));
665 
666 	hw->fc_high_water = hwm & 0xFFF8;	/* 8-byte granularity */
667 	hw->fc_low_water = hw->fc_high_water - 8;
668 	hw->fc_pause_time = E1000_FC_PAUSE_TIME;
669 	hw->fc_send_xon = 1;
670 	hw->fc = hw->original_fc;
671 
672 	/* Allow time for pending master requests to run */
673 	e1000_reset_hw(hw);
674 	if (hw->mac_type >= e1000_82544)
675 		ew32(WUC, 0);
676 
677 	if (e1000_init_hw(hw))
678 		e_dev_err("Hardware Error\n");
679 	e1000_update_mng_vlan(adapter);
680 
681 	/* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
682 	if (hw->mac_type >= e1000_82544 &&
683 	    hw->autoneg == 1 &&
684 	    hw->autoneg_advertised == ADVERTISE_1000_FULL) {
685 		u32 ctrl = er32(CTRL);
686 		/* clear phy power management bit if we are in gig only mode,
687 		 * which if enabled will attempt negotiation to 100Mb, which
688 		 * can cause a loss of link at power off or driver unload
689 		 */
690 		ctrl &= ~E1000_CTRL_SWDPIN3;
691 		ew32(CTRL, ctrl);
692 	}
693 
694 	/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
695 	ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
696 
697 	e1000_reset_adaptive(hw);
698 	e1000_phy_get_info(hw, &adapter->phy_info);
699 
700 	e1000_release_manageability(adapter);
701 }
702 
703 /* Dump the eeprom for users having checksum issues */
e1000_dump_eeprom(struct e1000_adapter * adapter)704 static void e1000_dump_eeprom(struct e1000_adapter *adapter)
705 {
706 	struct net_device *netdev = adapter->netdev;
707 	struct ethtool_eeprom eeprom;
708 	const struct ethtool_ops *ops = netdev->ethtool_ops;
709 	u8 *data;
710 	int i;
711 	u16 csum_old, csum_new = 0;
712 
713 	eeprom.len = ops->get_eeprom_len(netdev);
714 	eeprom.offset = 0;
715 
716 	data = kmalloc(eeprom.len, GFP_KERNEL);
717 	if (!data)
718 		return;
719 
720 	ops->get_eeprom(netdev, &eeprom, data);
721 
722 	csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
723 		   (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
724 	for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
725 		csum_new += data[i] + (data[i + 1] << 8);
726 	csum_new = EEPROM_SUM - csum_new;
727 
728 	pr_err("/*********************/\n");
729 	pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
730 	pr_err("Calculated              : 0x%04x\n", csum_new);
731 
732 	pr_err("Offset    Values\n");
733 	pr_err("========  ======\n");
734 	print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
735 
736 	pr_err("Include this output when contacting your support provider.\n");
737 	pr_err("This is not a software error! Something bad happened to\n");
738 	pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
739 	pr_err("result in further problems, possibly loss of data,\n");
740 	pr_err("corruption or system hangs!\n");
741 	pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
742 	pr_err("which is invalid and requires you to set the proper MAC\n");
743 	pr_err("address manually before continuing to enable this network\n");
744 	pr_err("device. Please inspect the EEPROM dump and report the\n");
745 	pr_err("issue to your hardware vendor or Intel Customer Support.\n");
746 	pr_err("/*********************/\n");
747 
748 	kfree(data);
749 }
750 
751 /**
752  * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
753  * @pdev: PCI device information struct
754  *
755  * Return true if an adapter needs ioport resources
756  **/
e1000_is_need_ioport(struct pci_dev * pdev)757 static int e1000_is_need_ioport(struct pci_dev *pdev)
758 {
759 	switch (pdev->device) {
760 	case E1000_DEV_ID_82540EM:
761 	case E1000_DEV_ID_82540EM_LOM:
762 	case E1000_DEV_ID_82540EP:
763 	case E1000_DEV_ID_82540EP_LOM:
764 	case E1000_DEV_ID_82540EP_LP:
765 	case E1000_DEV_ID_82541EI:
766 	case E1000_DEV_ID_82541EI_MOBILE:
767 	case E1000_DEV_ID_82541ER:
768 	case E1000_DEV_ID_82541ER_LOM:
769 	case E1000_DEV_ID_82541GI:
770 	case E1000_DEV_ID_82541GI_LF:
771 	case E1000_DEV_ID_82541GI_MOBILE:
772 	case E1000_DEV_ID_82544EI_COPPER:
773 	case E1000_DEV_ID_82544EI_FIBER:
774 	case E1000_DEV_ID_82544GC_COPPER:
775 	case E1000_DEV_ID_82544GC_LOM:
776 	case E1000_DEV_ID_82545EM_COPPER:
777 	case E1000_DEV_ID_82545EM_FIBER:
778 	case E1000_DEV_ID_82546EB_COPPER:
779 	case E1000_DEV_ID_82546EB_FIBER:
780 	case E1000_DEV_ID_82546EB_QUAD_COPPER:
781 		return true;
782 	default:
783 		return false;
784 	}
785 }
786 
e1000_fix_features(struct net_device * netdev,netdev_features_t features)787 static netdev_features_t e1000_fix_features(struct net_device *netdev,
788 	netdev_features_t features)
789 {
790 	/* Since there is no support for separate Rx/Tx vlan accel
791 	 * enable/disable make sure Tx flag is always in same state as Rx.
792 	 */
793 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
794 		features |= NETIF_F_HW_VLAN_CTAG_TX;
795 	else
796 		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
797 
798 	return features;
799 }
800 
e1000_set_features(struct net_device * netdev,netdev_features_t features)801 static int e1000_set_features(struct net_device *netdev,
802 	netdev_features_t features)
803 {
804 	struct e1000_adapter *adapter = netdev_priv(netdev);
805 	netdev_features_t changed = features ^ netdev->features;
806 
807 	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
808 		e1000_vlan_mode(netdev, features);
809 
810 	if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
811 		return 0;
812 
813 	netdev->features = features;
814 	adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
815 
816 	if (netif_running(netdev))
817 		e1000_reinit_locked(adapter);
818 	else
819 		e1000_reset(adapter);
820 
821 	return 1;
822 }
823 
824 static const struct net_device_ops e1000_netdev_ops = {
825 	.ndo_open		= e1000_open,
826 	.ndo_stop		= e1000_close,
827 	.ndo_start_xmit		= e1000_xmit_frame,
828 	.ndo_set_rx_mode	= e1000_set_rx_mode,
829 	.ndo_set_mac_address	= e1000_set_mac,
830 	.ndo_tx_timeout		= e1000_tx_timeout,
831 	.ndo_change_mtu		= e1000_change_mtu,
832 	.ndo_eth_ioctl		= e1000_ioctl,
833 	.ndo_validate_addr	= eth_validate_addr,
834 	.ndo_vlan_rx_add_vid	= e1000_vlan_rx_add_vid,
835 	.ndo_vlan_rx_kill_vid	= e1000_vlan_rx_kill_vid,
836 #ifdef CONFIG_NET_POLL_CONTROLLER
837 	.ndo_poll_controller	= e1000_netpoll,
838 #endif
839 	.ndo_fix_features	= e1000_fix_features,
840 	.ndo_set_features	= e1000_set_features,
841 };
842 
843 /**
844  * e1000_init_hw_struct - initialize members of hw struct
845  * @adapter: board private struct
846  * @hw: structure used by e1000_hw.c
847  *
848  * Factors out initialization of the e1000_hw struct to its own function
849  * that can be called very early at init (just after struct allocation).
850  * Fields are initialized based on PCI device information and
851  * OS network device settings (MTU size).
852  * Returns negative error codes if MAC type setup fails.
853  */
e1000_init_hw_struct(struct e1000_adapter * adapter,struct e1000_hw * hw)854 static int e1000_init_hw_struct(struct e1000_adapter *adapter,
855 				struct e1000_hw *hw)
856 {
857 	struct pci_dev *pdev = adapter->pdev;
858 
859 	/* PCI config space info */
860 	hw->vendor_id = pdev->vendor;
861 	hw->device_id = pdev->device;
862 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
863 	hw->subsystem_id = pdev->subsystem_device;
864 	hw->revision_id = pdev->revision;
865 
866 	pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
867 
868 	hw->max_frame_size = adapter->netdev->mtu +
869 			     ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
870 	hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
871 
872 	/* identify the MAC */
873 	if (e1000_set_mac_type(hw)) {
874 		e_err(probe, "Unknown MAC Type\n");
875 		return -EIO;
876 	}
877 
878 	switch (hw->mac_type) {
879 	default:
880 		break;
881 	case e1000_82541:
882 	case e1000_82547:
883 	case e1000_82541_rev_2:
884 	case e1000_82547_rev_2:
885 		hw->phy_init_script = 1;
886 		break;
887 	}
888 
889 	e1000_set_media_type(hw);
890 	e1000_get_bus_info(hw);
891 
892 	hw->wait_autoneg_complete = false;
893 	hw->tbi_compatibility_en = true;
894 	hw->adaptive_ifs = true;
895 
896 	/* Copper options */
897 
898 	if (hw->media_type == e1000_media_type_copper) {
899 		hw->mdix = AUTO_ALL_MODES;
900 		hw->disable_polarity_correction = false;
901 		hw->master_slave = E1000_MASTER_SLAVE;
902 	}
903 
904 	return 0;
905 }
906 
907 /**
908  * e1000_probe - Device Initialization Routine
909  * @pdev: PCI device information struct
910  * @ent: entry in e1000_pci_tbl
911  *
912  * Returns 0 on success, negative on failure
913  *
914  * e1000_probe initializes an adapter identified by a pci_dev structure.
915  * The OS initialization, configuring of the adapter private structure,
916  * and a hardware reset occur.
917  **/
e1000_probe(struct pci_dev * pdev,const struct pci_device_id * ent)918 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
919 {
920 	struct net_device *netdev;
921 	struct e1000_adapter *adapter = NULL;
922 	struct e1000_hw *hw;
923 
924 	static int cards_found;
925 	static int global_quad_port_a; /* global ksp3 port a indication */
926 	int i, err, pci_using_dac;
927 	u16 eeprom_data = 0;
928 	u16 tmp = 0;
929 	u16 eeprom_apme_mask = E1000_EEPROM_APME;
930 	int bars, need_ioport;
931 	bool disable_dev = false;
932 
933 	/* do not allocate ioport bars when not needed */
934 	need_ioport = e1000_is_need_ioport(pdev);
935 	if (need_ioport) {
936 		bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
937 		err = pci_enable_device(pdev);
938 	} else {
939 		bars = pci_select_bars(pdev, IORESOURCE_MEM);
940 		err = pci_enable_device_mem(pdev);
941 	}
942 	if (err)
943 		return err;
944 
945 	err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
946 	if (err)
947 		goto err_pci_reg;
948 
949 	pci_set_master(pdev);
950 	err = pci_save_state(pdev);
951 	if (err)
952 		goto err_alloc_etherdev;
953 
954 	err = -ENOMEM;
955 	netdev = alloc_etherdev(sizeof(struct e1000_adapter));
956 	if (!netdev)
957 		goto err_alloc_etherdev;
958 
959 	SET_NETDEV_DEV(netdev, &pdev->dev);
960 
961 	pci_set_drvdata(pdev, netdev);
962 	adapter = netdev_priv(netdev);
963 	adapter->netdev = netdev;
964 	adapter->pdev = pdev;
965 	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
966 	adapter->bars = bars;
967 	adapter->need_ioport = need_ioport;
968 
969 	hw = &adapter->hw;
970 	hw->back = adapter;
971 
972 	err = -EIO;
973 	hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
974 	if (!hw->hw_addr)
975 		goto err_ioremap;
976 
977 	if (adapter->need_ioport) {
978 		for (i = BAR_1; i < PCI_STD_NUM_BARS; i++) {
979 			if (pci_resource_len(pdev, i) == 0)
980 				continue;
981 			if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
982 				hw->io_base = pci_resource_start(pdev, i);
983 				break;
984 			}
985 		}
986 	}
987 
988 	/* make ready for any if (hw->...) below */
989 	err = e1000_init_hw_struct(adapter, hw);
990 	if (err)
991 		goto err_sw_init;
992 
993 	/* there is a workaround being applied below that limits
994 	 * 64-bit DMA addresses to 64-bit hardware.  There are some
995 	 * 32-bit adapters that Tx hang when given 64-bit DMA addresses
996 	 */
997 	pci_using_dac = 0;
998 	if ((hw->bus_type == e1000_bus_type_pcix) &&
999 	    !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
1000 		pci_using_dac = 1;
1001 	} else {
1002 		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1003 		if (err) {
1004 			pr_err("No usable DMA config, aborting\n");
1005 			goto err_dma;
1006 		}
1007 	}
1008 
1009 	netdev->netdev_ops = &e1000_netdev_ops;
1010 	e1000_set_ethtool_ops(netdev);
1011 	netdev->watchdog_timeo = 5 * HZ;
1012 	netif_napi_add(netdev, &adapter->napi, e1000_clean);
1013 
1014 	strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
1015 
1016 	adapter->bd_number = cards_found;
1017 
1018 	/* setup the private structure */
1019 
1020 	err = e1000_sw_init(adapter);
1021 	if (err)
1022 		goto err_sw_init;
1023 
1024 	err = -EIO;
1025 	if (hw->mac_type == e1000_ce4100) {
1026 		hw->ce4100_gbe_mdio_base_virt =
1027 					ioremap(pci_resource_start(pdev, BAR_1),
1028 						pci_resource_len(pdev, BAR_1));
1029 
1030 		if (!hw->ce4100_gbe_mdio_base_virt)
1031 			goto err_mdio_ioremap;
1032 	}
1033 
1034 	if (hw->mac_type >= e1000_82543) {
1035 		netdev->hw_features = NETIF_F_SG |
1036 				   NETIF_F_HW_CSUM |
1037 				   NETIF_F_HW_VLAN_CTAG_RX;
1038 		netdev->features = NETIF_F_HW_VLAN_CTAG_TX |
1039 				   NETIF_F_HW_VLAN_CTAG_FILTER;
1040 	}
1041 
1042 	if ((hw->mac_type >= e1000_82544) &&
1043 	   (hw->mac_type != e1000_82547))
1044 		netdev->hw_features |= NETIF_F_TSO;
1045 
1046 	netdev->priv_flags |= IFF_SUPP_NOFCS;
1047 
1048 	netdev->features |= netdev->hw_features;
1049 	netdev->hw_features |= (NETIF_F_RXCSUM |
1050 				NETIF_F_RXALL |
1051 				NETIF_F_RXFCS);
1052 
1053 	if (pci_using_dac) {
1054 		netdev->features |= NETIF_F_HIGHDMA;
1055 		netdev->vlan_features |= NETIF_F_HIGHDMA;
1056 	}
1057 
1058 	netdev->vlan_features |= (NETIF_F_TSO |
1059 				  NETIF_F_HW_CSUM |
1060 				  NETIF_F_SG);
1061 
1062 	/* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */
1063 	if (hw->device_id != E1000_DEV_ID_82545EM_COPPER ||
1064 	    hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE)
1065 		netdev->priv_flags |= IFF_UNICAST_FLT;
1066 
1067 	/* MTU range: 46 - 16110 */
1068 	netdev->min_mtu = ETH_ZLEN - ETH_HLEN;
1069 	netdev->max_mtu = MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
1070 
1071 	adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
1072 
1073 	/* initialize eeprom parameters */
1074 	if (e1000_init_eeprom_params(hw)) {
1075 		e_err(probe, "EEPROM initialization failed\n");
1076 		goto err_eeprom;
1077 	}
1078 
1079 	/* before reading the EEPROM, reset the controller to
1080 	 * put the device in a known good starting state
1081 	 */
1082 
1083 	e1000_reset_hw(hw);
1084 
1085 	/* make sure the EEPROM is good */
1086 	if (e1000_validate_eeprom_checksum(hw) < 0) {
1087 		e_err(probe, "The EEPROM Checksum Is Not Valid\n");
1088 		e1000_dump_eeprom(adapter);
1089 		/* set MAC address to all zeroes to invalidate and temporary
1090 		 * disable this device for the user. This blocks regular
1091 		 * traffic while still permitting ethtool ioctls from reaching
1092 		 * the hardware as well as allowing the user to run the
1093 		 * interface after manually setting a hw addr using
1094 		 * `ip set address`
1095 		 */
1096 		memset(hw->mac_addr, 0, netdev->addr_len);
1097 	} else {
1098 		/* copy the MAC address out of the EEPROM */
1099 		if (e1000_read_mac_addr(hw))
1100 			e_err(probe, "EEPROM Read Error\n");
1101 	}
1102 	/* don't block initialization here due to bad MAC address */
1103 	eth_hw_addr_set(netdev, hw->mac_addr);
1104 
1105 	if (!is_valid_ether_addr(netdev->dev_addr))
1106 		e_err(probe, "Invalid MAC Address\n");
1107 
1108 
1109 	INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
1110 	INIT_DELAYED_WORK(&adapter->fifo_stall_task,
1111 			  e1000_82547_tx_fifo_stall_task);
1112 	INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
1113 	INIT_WORK(&adapter->reset_task, e1000_reset_task);
1114 
1115 	e1000_check_options(adapter);
1116 
1117 	/* Initial Wake on LAN setting
1118 	 * If APM wake is enabled in the EEPROM,
1119 	 * enable the ACPI Magic Packet filter
1120 	 */
1121 
1122 	switch (hw->mac_type) {
1123 	case e1000_82542_rev2_0:
1124 	case e1000_82542_rev2_1:
1125 	case e1000_82543:
1126 		break;
1127 	case e1000_82544:
1128 		e1000_read_eeprom(hw,
1129 			EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1130 		eeprom_apme_mask = E1000_EEPROM_82544_APM;
1131 		break;
1132 	case e1000_82546:
1133 	case e1000_82546_rev_3:
1134 		if (er32(STATUS) & E1000_STATUS_FUNC_1) {
1135 			e1000_read_eeprom(hw,
1136 				EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1137 			break;
1138 		}
1139 		fallthrough;
1140 	default:
1141 		e1000_read_eeprom(hw,
1142 			EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1143 		break;
1144 	}
1145 	if (eeprom_data & eeprom_apme_mask)
1146 		adapter->eeprom_wol |= E1000_WUFC_MAG;
1147 
1148 	/* now that we have the eeprom settings, apply the special cases
1149 	 * where the eeprom may be wrong or the board simply won't support
1150 	 * wake on lan on a particular port
1151 	 */
1152 	switch (pdev->device) {
1153 	case E1000_DEV_ID_82546GB_PCIE:
1154 		adapter->eeprom_wol = 0;
1155 		break;
1156 	case E1000_DEV_ID_82546EB_FIBER:
1157 	case E1000_DEV_ID_82546GB_FIBER:
1158 		/* Wake events only supported on port A for dual fiber
1159 		 * regardless of eeprom setting
1160 		 */
1161 		if (er32(STATUS) & E1000_STATUS_FUNC_1)
1162 			adapter->eeprom_wol = 0;
1163 		break;
1164 	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1165 		/* if quad port adapter, disable WoL on all but port A */
1166 		if (global_quad_port_a != 0)
1167 			adapter->eeprom_wol = 0;
1168 		else
1169 			adapter->quad_port_a = true;
1170 		/* Reset for multiple quad port adapters */
1171 		if (++global_quad_port_a == 4)
1172 			global_quad_port_a = 0;
1173 		break;
1174 	}
1175 
1176 	/* initialize the wol settings based on the eeprom settings */
1177 	adapter->wol = adapter->eeprom_wol;
1178 	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1179 
1180 	/* Auto detect PHY address */
1181 	if (hw->mac_type == e1000_ce4100) {
1182 		for (i = 0; i < 32; i++) {
1183 			hw->phy_addr = i;
1184 			e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1185 
1186 			if (tmp != 0 && tmp != 0xFF)
1187 				break;
1188 		}
1189 
1190 		if (i >= 32)
1191 			goto err_eeprom;
1192 	}
1193 
1194 	/* reset the hardware with the new settings */
1195 	e1000_reset(adapter);
1196 
1197 	strcpy(netdev->name, "eth%d");
1198 	err = register_netdev(netdev);
1199 	if (err)
1200 		goto err_register;
1201 
1202 	e1000_vlan_filter_on_off(adapter, false);
1203 
1204 	/* print bus type/speed/width info */
1205 	e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
1206 	       ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1207 	       ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1208 		(hw->bus_speed == e1000_bus_speed_120) ? 120 :
1209 		(hw->bus_speed == e1000_bus_speed_100) ? 100 :
1210 		(hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1211 	       ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1212 	       netdev->dev_addr);
1213 
1214 	/* carrier off reporting is important to ethtool even BEFORE open */
1215 	netif_carrier_off(netdev);
1216 
1217 	e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
1218 
1219 	cards_found++;
1220 	return 0;
1221 
1222 err_register:
1223 err_eeprom:
1224 	e1000_phy_hw_reset(hw);
1225 
1226 	if (hw->flash_address)
1227 		iounmap(hw->flash_address);
1228 	kfree(adapter->tx_ring);
1229 	kfree(adapter->rx_ring);
1230 err_dma:
1231 err_sw_init:
1232 err_mdio_ioremap:
1233 	iounmap(hw->ce4100_gbe_mdio_base_virt);
1234 	iounmap(hw->hw_addr);
1235 err_ioremap:
1236 	disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1237 	free_netdev(netdev);
1238 err_alloc_etherdev:
1239 	pci_release_selected_regions(pdev, bars);
1240 err_pci_reg:
1241 	if (!adapter || disable_dev)
1242 		pci_disable_device(pdev);
1243 	return err;
1244 }
1245 
1246 /**
1247  * e1000_remove - Device Removal Routine
1248  * @pdev: PCI device information struct
1249  *
1250  * e1000_remove is called by the PCI subsystem to alert the driver
1251  * that it should release a PCI device. That could be caused by a
1252  * Hot-Plug event, or because the driver is going to be removed from
1253  * memory.
1254  **/
e1000_remove(struct pci_dev * pdev)1255 static void e1000_remove(struct pci_dev *pdev)
1256 {
1257 	struct net_device *netdev = pci_get_drvdata(pdev);
1258 	struct e1000_adapter *adapter = netdev_priv(netdev);
1259 	struct e1000_hw *hw = &adapter->hw;
1260 	bool disable_dev;
1261 
1262 	e1000_down_and_stop(adapter);
1263 	e1000_release_manageability(adapter);
1264 
1265 	unregister_netdev(netdev);
1266 
1267 	e1000_phy_hw_reset(hw);
1268 
1269 	kfree(adapter->tx_ring);
1270 	kfree(adapter->rx_ring);
1271 
1272 	if (hw->mac_type == e1000_ce4100)
1273 		iounmap(hw->ce4100_gbe_mdio_base_virt);
1274 	iounmap(hw->hw_addr);
1275 	if (hw->flash_address)
1276 		iounmap(hw->flash_address);
1277 	pci_release_selected_regions(pdev, adapter->bars);
1278 
1279 	disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1280 	free_netdev(netdev);
1281 
1282 	if (disable_dev)
1283 		pci_disable_device(pdev);
1284 }
1285 
1286 /**
1287  * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1288  * @adapter: board private structure to initialize
1289  *
1290  * e1000_sw_init initializes the Adapter private data structure.
1291  * e1000_init_hw_struct MUST be called before this function
1292  **/
e1000_sw_init(struct e1000_adapter * adapter)1293 static int e1000_sw_init(struct e1000_adapter *adapter)
1294 {
1295 	adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1296 
1297 	adapter->num_tx_queues = 1;
1298 	adapter->num_rx_queues = 1;
1299 
1300 	if (e1000_alloc_queues(adapter)) {
1301 		e_err(probe, "Unable to allocate memory for queues\n");
1302 		return -ENOMEM;
1303 	}
1304 
1305 	/* Explicitly disable IRQ since the NIC can be in any state. */
1306 	e1000_irq_disable(adapter);
1307 
1308 	spin_lock_init(&adapter->stats_lock);
1309 
1310 	set_bit(__E1000_DOWN, &adapter->flags);
1311 
1312 	return 0;
1313 }
1314 
1315 /**
1316  * e1000_alloc_queues - Allocate memory for all rings
1317  * @adapter: board private structure to initialize
1318  *
1319  * We allocate one ring per queue at run-time since we don't know the
1320  * number of queues at compile-time.
1321  **/
e1000_alloc_queues(struct e1000_adapter * adapter)1322 static int e1000_alloc_queues(struct e1000_adapter *adapter)
1323 {
1324 	adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1325 				   sizeof(struct e1000_tx_ring), GFP_KERNEL);
1326 	if (!adapter->tx_ring)
1327 		return -ENOMEM;
1328 
1329 	adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1330 				   sizeof(struct e1000_rx_ring), GFP_KERNEL);
1331 	if (!adapter->rx_ring) {
1332 		kfree(adapter->tx_ring);
1333 		return -ENOMEM;
1334 	}
1335 
1336 	return E1000_SUCCESS;
1337 }
1338 
1339 /**
1340  * e1000_open - Called when a network interface is made active
1341  * @netdev: network interface device structure
1342  *
1343  * Returns 0 on success, negative value on failure
1344  *
1345  * The open entry point is called when a network interface is made
1346  * active by the system (IFF_UP).  At this point all resources needed
1347  * for transmit and receive operations are allocated, the interrupt
1348  * handler is registered with the OS, the watchdog task is started,
1349  * and the stack is notified that the interface is ready.
1350  **/
e1000_open(struct net_device * netdev)1351 int e1000_open(struct net_device *netdev)
1352 {
1353 	struct e1000_adapter *adapter = netdev_priv(netdev);
1354 	struct e1000_hw *hw = &adapter->hw;
1355 	int err;
1356 
1357 	/* disallow open during test */
1358 	if (test_bit(__E1000_TESTING, &adapter->flags))
1359 		return -EBUSY;
1360 
1361 	netif_carrier_off(netdev);
1362 
1363 	/* allocate transmit descriptors */
1364 	err = e1000_setup_all_tx_resources(adapter);
1365 	if (err)
1366 		goto err_setup_tx;
1367 
1368 	/* allocate receive descriptors */
1369 	err = e1000_setup_all_rx_resources(adapter);
1370 	if (err)
1371 		goto err_setup_rx;
1372 
1373 	e1000_power_up_phy(adapter);
1374 
1375 	adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1376 	if ((hw->mng_cookie.status &
1377 			  E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1378 		e1000_update_mng_vlan(adapter);
1379 	}
1380 
1381 	/* before we allocate an interrupt, we must be ready to handle it.
1382 	 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1383 	 * as soon as we call pci_request_irq, so we have to setup our
1384 	 * clean_rx handler before we do so.
1385 	 */
1386 	e1000_configure(adapter);
1387 
1388 	err = e1000_request_irq(adapter);
1389 	if (err)
1390 		goto err_req_irq;
1391 
1392 	/* From here on the code is the same as e1000_up() */
1393 	clear_bit(__E1000_DOWN, &adapter->flags);
1394 
1395 	napi_enable(&adapter->napi);
1396 
1397 	e1000_irq_enable(adapter);
1398 
1399 	netif_start_queue(netdev);
1400 
1401 	/* fire a link status change interrupt to start the watchdog */
1402 	ew32(ICS, E1000_ICS_LSC);
1403 
1404 	return E1000_SUCCESS;
1405 
1406 err_req_irq:
1407 	e1000_power_down_phy(adapter);
1408 	e1000_free_all_rx_resources(adapter);
1409 err_setup_rx:
1410 	e1000_free_all_tx_resources(adapter);
1411 err_setup_tx:
1412 	e1000_reset(adapter);
1413 
1414 	return err;
1415 }
1416 
1417 /**
1418  * e1000_close - Disables a network interface
1419  * @netdev: network interface device structure
1420  *
1421  * Returns 0, this is not allowed to fail
1422  *
1423  * The close entry point is called when an interface is de-activated
1424  * by the OS.  The hardware is still under the drivers control, but
1425  * needs to be disabled.  A global MAC reset is issued to stop the
1426  * hardware, and all transmit and receive resources are freed.
1427  **/
e1000_close(struct net_device * netdev)1428 int e1000_close(struct net_device *netdev)
1429 {
1430 	struct e1000_adapter *adapter = netdev_priv(netdev);
1431 	struct e1000_hw *hw = &adapter->hw;
1432 	int count = E1000_CHECK_RESET_COUNT;
1433 
1434 	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags) && count--)
1435 		usleep_range(10000, 20000);
1436 
1437 	WARN_ON(count < 0);
1438 
1439 	/* signal that we're down so that the reset task will no longer run */
1440 	set_bit(__E1000_DOWN, &adapter->flags);
1441 	clear_bit(__E1000_RESETTING, &adapter->flags);
1442 
1443 	e1000_down(adapter);
1444 	e1000_power_down_phy(adapter);
1445 	e1000_free_irq(adapter);
1446 
1447 	e1000_free_all_tx_resources(adapter);
1448 	e1000_free_all_rx_resources(adapter);
1449 
1450 	/* kill manageability vlan ID if supported, but not if a vlan with
1451 	 * the same ID is registered on the host OS (let 8021q kill it)
1452 	 */
1453 	if ((hw->mng_cookie.status &
1454 	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1455 	    !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
1456 		e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
1457 				       adapter->mng_vlan_id);
1458 	}
1459 
1460 	return 0;
1461 }
1462 
1463 /**
1464  * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1465  * @adapter: address of board private structure
1466  * @start: address of beginning of memory
1467  * @len: length of memory
1468  **/
e1000_check_64k_bound(struct e1000_adapter * adapter,void * start,unsigned long len)1469 static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1470 				  unsigned long len)
1471 {
1472 	struct e1000_hw *hw = &adapter->hw;
1473 	unsigned long begin = (unsigned long)start;
1474 	unsigned long end = begin + len;
1475 
1476 	/* First rev 82545 and 82546 need to not allow any memory
1477 	 * write location to cross 64k boundary due to errata 23
1478 	 */
1479 	if (hw->mac_type == e1000_82545 ||
1480 	    hw->mac_type == e1000_ce4100 ||
1481 	    hw->mac_type == e1000_82546) {
1482 		return ((begin ^ (end - 1)) >> 16) == 0;
1483 	}
1484 
1485 	return true;
1486 }
1487 
1488 /**
1489  * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1490  * @adapter: board private structure
1491  * @txdr:    tx descriptor ring (for a specific queue) to setup
1492  *
1493  * Return 0 on success, negative on failure
1494  **/
e1000_setup_tx_resources(struct e1000_adapter * adapter,struct e1000_tx_ring * txdr)1495 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1496 				    struct e1000_tx_ring *txdr)
1497 {
1498 	struct pci_dev *pdev = adapter->pdev;
1499 	int size;
1500 
1501 	size = sizeof(struct e1000_tx_buffer) * txdr->count;
1502 	txdr->buffer_info = vzalloc(size);
1503 	if (!txdr->buffer_info)
1504 		return -ENOMEM;
1505 
1506 	/* round up to nearest 4K */
1507 
1508 	txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1509 	txdr->size = ALIGN(txdr->size, 4096);
1510 
1511 	txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1512 					GFP_KERNEL);
1513 	if (!txdr->desc) {
1514 setup_tx_desc_die:
1515 		vfree(txdr->buffer_info);
1516 		return -ENOMEM;
1517 	}
1518 
1519 	/* Fix for errata 23, can't cross 64kB boundary */
1520 	if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1521 		void *olddesc = txdr->desc;
1522 		dma_addr_t olddma = txdr->dma;
1523 		e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
1524 		      txdr->size, txdr->desc);
1525 		/* Try again, without freeing the previous */
1526 		txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1527 						&txdr->dma, GFP_KERNEL);
1528 		/* Failed allocation, critical failure */
1529 		if (!txdr->desc) {
1530 			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1531 					  olddma);
1532 			goto setup_tx_desc_die;
1533 		}
1534 
1535 		if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1536 			/* give up */
1537 			dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1538 					  txdr->dma);
1539 			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1540 					  olddma);
1541 			e_err(probe, "Unable to allocate aligned memory "
1542 			      "for the transmit descriptor ring\n");
1543 			vfree(txdr->buffer_info);
1544 			return -ENOMEM;
1545 		} else {
1546 			/* Free old allocation, new allocation was successful */
1547 			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1548 					  olddma);
1549 		}
1550 	}
1551 	memset(txdr->desc, 0, txdr->size);
1552 
1553 	txdr->next_to_use = 0;
1554 	txdr->next_to_clean = 0;
1555 
1556 	return 0;
1557 }
1558 
1559 /**
1560  * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1561  * 				  (Descriptors) for all queues
1562  * @adapter: board private structure
1563  *
1564  * Return 0 on success, negative on failure
1565  **/
e1000_setup_all_tx_resources(struct e1000_adapter * adapter)1566 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1567 {
1568 	int i, err = 0;
1569 
1570 	for (i = 0; i < adapter->num_tx_queues; i++) {
1571 		err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1572 		if (err) {
1573 			e_err(probe, "Allocation for Tx Queue %u failed\n", i);
1574 			for (i-- ; i >= 0; i--)
1575 				e1000_free_tx_resources(adapter,
1576 							&adapter->tx_ring[i]);
1577 			break;
1578 		}
1579 	}
1580 
1581 	return err;
1582 }
1583 
1584 /**
1585  * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1586  * @adapter: board private structure
1587  *
1588  * Configure the Tx unit of the MAC after a reset.
1589  **/
e1000_configure_tx(struct e1000_adapter * adapter)1590 static void e1000_configure_tx(struct e1000_adapter *adapter)
1591 {
1592 	u64 tdba;
1593 	struct e1000_hw *hw = &adapter->hw;
1594 	u32 tdlen, tctl, tipg;
1595 	u32 ipgr1, ipgr2;
1596 
1597 	/* Setup the HW Tx Head and Tail descriptor pointers */
1598 
1599 	switch (adapter->num_tx_queues) {
1600 	case 1:
1601 	default:
1602 		tdba = adapter->tx_ring[0].dma;
1603 		tdlen = adapter->tx_ring[0].count *
1604 			sizeof(struct e1000_tx_desc);
1605 		ew32(TDLEN, tdlen);
1606 		ew32(TDBAH, (tdba >> 32));
1607 		ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1608 		ew32(TDT, 0);
1609 		ew32(TDH, 0);
1610 		adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ?
1611 					   E1000_TDH : E1000_82542_TDH);
1612 		adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ?
1613 					   E1000_TDT : E1000_82542_TDT);
1614 		break;
1615 	}
1616 
1617 	/* Set the default values for the Tx Inter Packet Gap timer */
1618 	if ((hw->media_type == e1000_media_type_fiber ||
1619 	     hw->media_type == e1000_media_type_internal_serdes))
1620 		tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1621 	else
1622 		tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1623 
1624 	switch (hw->mac_type) {
1625 	case e1000_82542_rev2_0:
1626 	case e1000_82542_rev2_1:
1627 		tipg = DEFAULT_82542_TIPG_IPGT;
1628 		ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1629 		ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1630 		break;
1631 	default:
1632 		ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1633 		ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1634 		break;
1635 	}
1636 	tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1637 	tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1638 	ew32(TIPG, tipg);
1639 
1640 	/* Set the Tx Interrupt Delay register */
1641 
1642 	ew32(TIDV, adapter->tx_int_delay);
1643 	if (hw->mac_type >= e1000_82540)
1644 		ew32(TADV, adapter->tx_abs_int_delay);
1645 
1646 	/* Program the Transmit Control Register */
1647 
1648 	tctl = er32(TCTL);
1649 	tctl &= ~E1000_TCTL_CT;
1650 	tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1651 		(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1652 
1653 	e1000_config_collision_dist(hw);
1654 
1655 	/* Setup Transmit Descriptor Settings for eop descriptor */
1656 	adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1657 
1658 	/* only set IDE if we are delaying interrupts using the timers */
1659 	if (adapter->tx_int_delay)
1660 		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1661 
1662 	if (hw->mac_type < e1000_82543)
1663 		adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1664 	else
1665 		adapter->txd_cmd |= E1000_TXD_CMD_RS;
1666 
1667 	/* Cache if we're 82544 running in PCI-X because we'll
1668 	 * need this to apply a workaround later in the send path.
1669 	 */
1670 	if (hw->mac_type == e1000_82544 &&
1671 	    hw->bus_type == e1000_bus_type_pcix)
1672 		adapter->pcix_82544 = true;
1673 
1674 	ew32(TCTL, tctl);
1675 
1676 }
1677 
1678 /**
1679  * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1680  * @adapter: board private structure
1681  * @rxdr:    rx descriptor ring (for a specific queue) to setup
1682  *
1683  * Returns 0 on success, negative on failure
1684  **/
e1000_setup_rx_resources(struct e1000_adapter * adapter,struct e1000_rx_ring * rxdr)1685 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1686 				    struct e1000_rx_ring *rxdr)
1687 {
1688 	struct pci_dev *pdev = adapter->pdev;
1689 	int size, desc_len;
1690 
1691 	size = sizeof(struct e1000_rx_buffer) * rxdr->count;
1692 	rxdr->buffer_info = vzalloc(size);
1693 	if (!rxdr->buffer_info)
1694 		return -ENOMEM;
1695 
1696 	desc_len = sizeof(struct e1000_rx_desc);
1697 
1698 	/* Round up to nearest 4K */
1699 
1700 	rxdr->size = rxdr->count * desc_len;
1701 	rxdr->size = ALIGN(rxdr->size, 4096);
1702 
1703 	rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1704 					GFP_KERNEL);
1705 	if (!rxdr->desc) {
1706 setup_rx_desc_die:
1707 		vfree(rxdr->buffer_info);
1708 		return -ENOMEM;
1709 	}
1710 
1711 	/* Fix for errata 23, can't cross 64kB boundary */
1712 	if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1713 		void *olddesc = rxdr->desc;
1714 		dma_addr_t olddma = rxdr->dma;
1715 		e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
1716 		      rxdr->size, rxdr->desc);
1717 		/* Try again, without freeing the previous */
1718 		rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1719 						&rxdr->dma, GFP_KERNEL);
1720 		/* Failed allocation, critical failure */
1721 		if (!rxdr->desc) {
1722 			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1723 					  olddma);
1724 			goto setup_rx_desc_die;
1725 		}
1726 
1727 		if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1728 			/* give up */
1729 			dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1730 					  rxdr->dma);
1731 			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1732 					  olddma);
1733 			e_err(probe, "Unable to allocate aligned memory for "
1734 			      "the Rx descriptor ring\n");
1735 			goto setup_rx_desc_die;
1736 		} else {
1737 			/* Free old allocation, new allocation was successful */
1738 			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1739 					  olddma);
1740 		}
1741 	}
1742 	memset(rxdr->desc, 0, rxdr->size);
1743 
1744 	rxdr->next_to_clean = 0;
1745 	rxdr->next_to_use = 0;
1746 	rxdr->rx_skb_top = NULL;
1747 
1748 	return 0;
1749 }
1750 
1751 /**
1752  * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1753  * 				  (Descriptors) for all queues
1754  * @adapter: board private structure
1755  *
1756  * Return 0 on success, negative on failure
1757  **/
e1000_setup_all_rx_resources(struct e1000_adapter * adapter)1758 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1759 {
1760 	int i, err = 0;
1761 
1762 	for (i = 0; i < adapter->num_rx_queues; i++) {
1763 		err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1764 		if (err) {
1765 			e_err(probe, "Allocation for Rx Queue %u failed\n", i);
1766 			for (i-- ; i >= 0; i--)
1767 				e1000_free_rx_resources(adapter,
1768 							&adapter->rx_ring[i]);
1769 			break;
1770 		}
1771 	}
1772 
1773 	return err;
1774 }
1775 
1776 /**
1777  * e1000_setup_rctl - configure the receive control registers
1778  * @adapter: Board private structure
1779  **/
e1000_setup_rctl(struct e1000_adapter * adapter)1780 static void e1000_setup_rctl(struct e1000_adapter *adapter)
1781 {
1782 	struct e1000_hw *hw = &adapter->hw;
1783 	u32 rctl;
1784 
1785 	rctl = er32(RCTL);
1786 
1787 	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1788 
1789 	rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1790 		E1000_RCTL_RDMTS_HALF |
1791 		(hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
1792 
1793 	if (hw->tbi_compatibility_on == 1)
1794 		rctl |= E1000_RCTL_SBP;
1795 	else
1796 		rctl &= ~E1000_RCTL_SBP;
1797 
1798 	if (adapter->netdev->mtu <= ETH_DATA_LEN)
1799 		rctl &= ~E1000_RCTL_LPE;
1800 	else
1801 		rctl |= E1000_RCTL_LPE;
1802 
1803 	/* Setup buffer sizes */
1804 	rctl &= ~E1000_RCTL_SZ_4096;
1805 	rctl |= E1000_RCTL_BSEX;
1806 	switch (adapter->rx_buffer_len) {
1807 	case E1000_RXBUFFER_2048:
1808 	default:
1809 		rctl |= E1000_RCTL_SZ_2048;
1810 		rctl &= ~E1000_RCTL_BSEX;
1811 		break;
1812 	case E1000_RXBUFFER_4096:
1813 		rctl |= E1000_RCTL_SZ_4096;
1814 		break;
1815 	case E1000_RXBUFFER_8192:
1816 		rctl |= E1000_RCTL_SZ_8192;
1817 		break;
1818 	case E1000_RXBUFFER_16384:
1819 		rctl |= E1000_RCTL_SZ_16384;
1820 		break;
1821 	}
1822 
1823 	/* This is useful for sniffing bad packets. */
1824 	if (adapter->netdev->features & NETIF_F_RXALL) {
1825 		/* UPE and MPE will be handled by normal PROMISC logic
1826 		 * in e1000e_set_rx_mode
1827 		 */
1828 		rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
1829 			 E1000_RCTL_BAM | /* RX All Bcast Pkts */
1830 			 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
1831 
1832 		rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
1833 			  E1000_RCTL_DPF | /* Allow filtered pause */
1834 			  E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
1835 		/* Do not mess with E1000_CTRL_VME, it affects transmit as well,
1836 		 * and that breaks VLANs.
1837 		 */
1838 	}
1839 
1840 	ew32(RCTL, rctl);
1841 }
1842 
1843 /**
1844  * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1845  * @adapter: board private structure
1846  *
1847  * Configure the Rx unit of the MAC after a reset.
1848  **/
e1000_configure_rx(struct e1000_adapter * adapter)1849 static void e1000_configure_rx(struct e1000_adapter *adapter)
1850 {
1851 	u64 rdba;
1852 	struct e1000_hw *hw = &adapter->hw;
1853 	u32 rdlen, rctl, rxcsum;
1854 
1855 	if (adapter->netdev->mtu > ETH_DATA_LEN) {
1856 		rdlen = adapter->rx_ring[0].count *
1857 			sizeof(struct e1000_rx_desc);
1858 		adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1859 		adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1860 	} else {
1861 		rdlen = adapter->rx_ring[0].count *
1862 			sizeof(struct e1000_rx_desc);
1863 		adapter->clean_rx = e1000_clean_rx_irq;
1864 		adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1865 	}
1866 
1867 	/* disable receives while setting up the descriptors */
1868 	rctl = er32(RCTL);
1869 	ew32(RCTL, rctl & ~E1000_RCTL_EN);
1870 
1871 	/* set the Receive Delay Timer Register */
1872 	ew32(RDTR, adapter->rx_int_delay);
1873 
1874 	if (hw->mac_type >= e1000_82540) {
1875 		ew32(RADV, adapter->rx_abs_int_delay);
1876 		if (adapter->itr_setting != 0)
1877 			ew32(ITR, 1000000000 / (adapter->itr * 256));
1878 	}
1879 
1880 	/* Setup the HW Rx Head and Tail Descriptor Pointers and
1881 	 * the Base and Length of the Rx Descriptor Ring
1882 	 */
1883 	switch (adapter->num_rx_queues) {
1884 	case 1:
1885 	default:
1886 		rdba = adapter->rx_ring[0].dma;
1887 		ew32(RDLEN, rdlen);
1888 		ew32(RDBAH, (rdba >> 32));
1889 		ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1890 		ew32(RDT, 0);
1891 		ew32(RDH, 0);
1892 		adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ?
1893 					   E1000_RDH : E1000_82542_RDH);
1894 		adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ?
1895 					   E1000_RDT : E1000_82542_RDT);
1896 		break;
1897 	}
1898 
1899 	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
1900 	if (hw->mac_type >= e1000_82543) {
1901 		rxcsum = er32(RXCSUM);
1902 		if (adapter->rx_csum)
1903 			rxcsum |= E1000_RXCSUM_TUOFL;
1904 		else
1905 			/* don't need to clear IPPCSE as it defaults to 0 */
1906 			rxcsum &= ~E1000_RXCSUM_TUOFL;
1907 		ew32(RXCSUM, rxcsum);
1908 	}
1909 
1910 	/* Enable Receives */
1911 	ew32(RCTL, rctl | E1000_RCTL_EN);
1912 }
1913 
1914 /**
1915  * e1000_free_tx_resources - Free Tx Resources per Queue
1916  * @adapter: board private structure
1917  * @tx_ring: Tx descriptor ring for a specific queue
1918  *
1919  * Free all transmit software resources
1920  **/
e1000_free_tx_resources(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring)1921 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1922 				    struct e1000_tx_ring *tx_ring)
1923 {
1924 	struct pci_dev *pdev = adapter->pdev;
1925 
1926 	e1000_clean_tx_ring(adapter, tx_ring);
1927 
1928 	vfree(tx_ring->buffer_info);
1929 	tx_ring->buffer_info = NULL;
1930 
1931 	dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1932 			  tx_ring->dma);
1933 
1934 	tx_ring->desc = NULL;
1935 }
1936 
1937 /**
1938  * e1000_free_all_tx_resources - Free Tx Resources for All Queues
1939  * @adapter: board private structure
1940  *
1941  * Free all transmit software resources
1942  **/
e1000_free_all_tx_resources(struct e1000_adapter * adapter)1943 void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1944 {
1945 	int i;
1946 
1947 	for (i = 0; i < adapter->num_tx_queues; i++)
1948 		e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1949 }
1950 
1951 static void
e1000_unmap_and_free_tx_resource(struct e1000_adapter * adapter,struct e1000_tx_buffer * buffer_info,int budget)1952 e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1953 				 struct e1000_tx_buffer *buffer_info,
1954 				 int budget)
1955 {
1956 	if (buffer_info->dma) {
1957 		if (buffer_info->mapped_as_page)
1958 			dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1959 				       buffer_info->length, DMA_TO_DEVICE);
1960 		else
1961 			dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1962 					 buffer_info->length,
1963 					 DMA_TO_DEVICE);
1964 		buffer_info->dma = 0;
1965 	}
1966 	if (buffer_info->skb) {
1967 		napi_consume_skb(buffer_info->skb, budget);
1968 		buffer_info->skb = NULL;
1969 	}
1970 	buffer_info->time_stamp = 0;
1971 	/* buffer_info must be completely set up in the transmit path */
1972 }
1973 
1974 /**
1975  * e1000_clean_tx_ring - Free Tx Buffers
1976  * @adapter: board private structure
1977  * @tx_ring: ring to be cleaned
1978  **/
e1000_clean_tx_ring(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring)1979 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
1980 				struct e1000_tx_ring *tx_ring)
1981 {
1982 	struct e1000_hw *hw = &adapter->hw;
1983 	struct e1000_tx_buffer *buffer_info;
1984 	unsigned long size;
1985 	unsigned int i;
1986 
1987 	/* Free all the Tx ring sk_buffs */
1988 
1989 	for (i = 0; i < tx_ring->count; i++) {
1990 		buffer_info = &tx_ring->buffer_info[i];
1991 		e1000_unmap_and_free_tx_resource(adapter, buffer_info, 0);
1992 	}
1993 
1994 	netdev_reset_queue(adapter->netdev);
1995 	size = sizeof(struct e1000_tx_buffer) * tx_ring->count;
1996 	memset(tx_ring->buffer_info, 0, size);
1997 
1998 	/* Zero out the descriptor ring */
1999 
2000 	memset(tx_ring->desc, 0, tx_ring->size);
2001 
2002 	tx_ring->next_to_use = 0;
2003 	tx_ring->next_to_clean = 0;
2004 	tx_ring->last_tx_tso = false;
2005 
2006 	writel(0, hw->hw_addr + tx_ring->tdh);
2007 	writel(0, hw->hw_addr + tx_ring->tdt);
2008 }
2009 
2010 /**
2011  * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2012  * @adapter: board private structure
2013  **/
e1000_clean_all_tx_rings(struct e1000_adapter * adapter)2014 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
2015 {
2016 	int i;
2017 
2018 	for (i = 0; i < adapter->num_tx_queues; i++)
2019 		e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2020 }
2021 
2022 /**
2023  * e1000_free_rx_resources - Free Rx Resources
2024  * @adapter: board private structure
2025  * @rx_ring: ring to clean the resources from
2026  *
2027  * Free all receive software resources
2028  **/
e1000_free_rx_resources(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring)2029 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2030 				    struct e1000_rx_ring *rx_ring)
2031 {
2032 	struct pci_dev *pdev = adapter->pdev;
2033 
2034 	e1000_clean_rx_ring(adapter, rx_ring);
2035 
2036 	vfree(rx_ring->buffer_info);
2037 	rx_ring->buffer_info = NULL;
2038 
2039 	dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2040 			  rx_ring->dma);
2041 
2042 	rx_ring->desc = NULL;
2043 }
2044 
2045 /**
2046  * e1000_free_all_rx_resources - Free Rx Resources for All Queues
2047  * @adapter: board private structure
2048  *
2049  * Free all receive software resources
2050  **/
e1000_free_all_rx_resources(struct e1000_adapter * adapter)2051 void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2052 {
2053 	int i;
2054 
2055 	for (i = 0; i < adapter->num_rx_queues; i++)
2056 		e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2057 }
2058 
2059 #define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
e1000_frag_len(const struct e1000_adapter * a)2060 static unsigned int e1000_frag_len(const struct e1000_adapter *a)
2061 {
2062 	return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) +
2063 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2064 }
2065 
e1000_alloc_frag(const struct e1000_adapter * a)2066 static void *e1000_alloc_frag(const struct e1000_adapter *a)
2067 {
2068 	unsigned int len = e1000_frag_len(a);
2069 	u8 *data = netdev_alloc_frag(len);
2070 
2071 	if (likely(data))
2072 		data += E1000_HEADROOM;
2073 	return data;
2074 }
2075 
2076 /**
2077  * e1000_clean_rx_ring - Free Rx Buffers per Queue
2078  * @adapter: board private structure
2079  * @rx_ring: ring to free buffers from
2080  **/
e1000_clean_rx_ring(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring)2081 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2082 				struct e1000_rx_ring *rx_ring)
2083 {
2084 	struct e1000_hw *hw = &adapter->hw;
2085 	struct e1000_rx_buffer *buffer_info;
2086 	struct pci_dev *pdev = adapter->pdev;
2087 	unsigned long size;
2088 	unsigned int i;
2089 
2090 	/* Free all the Rx netfrags */
2091 	for (i = 0; i < rx_ring->count; i++) {
2092 		buffer_info = &rx_ring->buffer_info[i];
2093 		if (adapter->clean_rx == e1000_clean_rx_irq) {
2094 			if (buffer_info->dma)
2095 				dma_unmap_single(&pdev->dev, buffer_info->dma,
2096 						 adapter->rx_buffer_len,
2097 						 DMA_FROM_DEVICE);
2098 			if (buffer_info->rxbuf.data) {
2099 				skb_free_frag(buffer_info->rxbuf.data);
2100 				buffer_info->rxbuf.data = NULL;
2101 			}
2102 		} else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
2103 			if (buffer_info->dma)
2104 				dma_unmap_page(&pdev->dev, buffer_info->dma,
2105 					       adapter->rx_buffer_len,
2106 					       DMA_FROM_DEVICE);
2107 			if (buffer_info->rxbuf.page) {
2108 				put_page(buffer_info->rxbuf.page);
2109 				buffer_info->rxbuf.page = NULL;
2110 			}
2111 		}
2112 
2113 		buffer_info->dma = 0;
2114 	}
2115 
2116 	/* there also may be some cached data from a chained receive */
2117 	napi_free_frags(&adapter->napi);
2118 	rx_ring->rx_skb_top = NULL;
2119 
2120 	size = sizeof(struct e1000_rx_buffer) * rx_ring->count;
2121 	memset(rx_ring->buffer_info, 0, size);
2122 
2123 	/* Zero out the descriptor ring */
2124 	memset(rx_ring->desc, 0, rx_ring->size);
2125 
2126 	rx_ring->next_to_clean = 0;
2127 	rx_ring->next_to_use = 0;
2128 
2129 	writel(0, hw->hw_addr + rx_ring->rdh);
2130 	writel(0, hw->hw_addr + rx_ring->rdt);
2131 }
2132 
2133 /**
2134  * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2135  * @adapter: board private structure
2136  **/
e1000_clean_all_rx_rings(struct e1000_adapter * adapter)2137 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2138 {
2139 	int i;
2140 
2141 	for (i = 0; i < adapter->num_rx_queues; i++)
2142 		e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2143 }
2144 
2145 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2146  * and memory write and invalidate disabled for certain operations
2147  */
e1000_enter_82542_rst(struct e1000_adapter * adapter)2148 static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
2149 {
2150 	struct e1000_hw *hw = &adapter->hw;
2151 	struct net_device *netdev = adapter->netdev;
2152 	u32 rctl;
2153 
2154 	e1000_pci_clear_mwi(hw);
2155 
2156 	rctl = er32(RCTL);
2157 	rctl |= E1000_RCTL_RST;
2158 	ew32(RCTL, rctl);
2159 	E1000_WRITE_FLUSH();
2160 	mdelay(5);
2161 
2162 	if (netif_running(netdev))
2163 		e1000_clean_all_rx_rings(adapter);
2164 }
2165 
e1000_leave_82542_rst(struct e1000_adapter * adapter)2166 static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
2167 {
2168 	struct e1000_hw *hw = &adapter->hw;
2169 	struct net_device *netdev = adapter->netdev;
2170 	u32 rctl;
2171 
2172 	rctl = er32(RCTL);
2173 	rctl &= ~E1000_RCTL_RST;
2174 	ew32(RCTL, rctl);
2175 	E1000_WRITE_FLUSH();
2176 	mdelay(5);
2177 
2178 	if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2179 		e1000_pci_set_mwi(hw);
2180 
2181 	if (netif_running(netdev)) {
2182 		/* No need to loop, because 82542 supports only 1 queue */
2183 		struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2184 		e1000_configure_rx(adapter);
2185 		adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2186 	}
2187 }
2188 
2189 /**
2190  * e1000_set_mac - Change the Ethernet Address of the NIC
2191  * @netdev: network interface device structure
2192  * @p: pointer to an address structure
2193  *
2194  * Returns 0 on success, negative on failure
2195  **/
e1000_set_mac(struct net_device * netdev,void * p)2196 static int e1000_set_mac(struct net_device *netdev, void *p)
2197 {
2198 	struct e1000_adapter *adapter = netdev_priv(netdev);
2199 	struct e1000_hw *hw = &adapter->hw;
2200 	struct sockaddr *addr = p;
2201 
2202 	if (!is_valid_ether_addr(addr->sa_data))
2203 		return -EADDRNOTAVAIL;
2204 
2205 	/* 82542 2.0 needs to be in reset to write receive address registers */
2206 
2207 	if (hw->mac_type == e1000_82542_rev2_0)
2208 		e1000_enter_82542_rst(adapter);
2209 
2210 	eth_hw_addr_set(netdev, addr->sa_data);
2211 	memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
2212 
2213 	e1000_rar_set(hw, hw->mac_addr, 0);
2214 
2215 	if (hw->mac_type == e1000_82542_rev2_0)
2216 		e1000_leave_82542_rst(adapter);
2217 
2218 	return 0;
2219 }
2220 
2221 /**
2222  * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2223  * @netdev: network interface device structure
2224  *
2225  * The set_rx_mode entry point is called whenever the unicast or multicast
2226  * address lists or the network interface flags are updated. This routine is
2227  * responsible for configuring the hardware for proper unicast, multicast,
2228  * promiscuous mode, and all-multi behavior.
2229  **/
e1000_set_rx_mode(struct net_device * netdev)2230 static void e1000_set_rx_mode(struct net_device *netdev)
2231 {
2232 	struct e1000_adapter *adapter = netdev_priv(netdev);
2233 	struct e1000_hw *hw = &adapter->hw;
2234 	struct netdev_hw_addr *ha;
2235 	bool use_uc = false;
2236 	u32 rctl;
2237 	u32 hash_value;
2238 	int i, rar_entries = E1000_RAR_ENTRIES;
2239 	int mta_reg_count = E1000_NUM_MTA_REGISTERS;
2240 	u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2241 
2242 	if (!mcarray)
2243 		return;
2244 
2245 	/* Check for Promiscuous and All Multicast modes */
2246 
2247 	rctl = er32(RCTL);
2248 
2249 	if (netdev->flags & IFF_PROMISC) {
2250 		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2251 		rctl &= ~E1000_RCTL_VFE;
2252 	} else {
2253 		if (netdev->flags & IFF_ALLMULTI)
2254 			rctl |= E1000_RCTL_MPE;
2255 		else
2256 			rctl &= ~E1000_RCTL_MPE;
2257 		/* Enable VLAN filter if there is a VLAN */
2258 		if (e1000_vlan_used(adapter))
2259 			rctl |= E1000_RCTL_VFE;
2260 	}
2261 
2262 	if (netdev_uc_count(netdev) > rar_entries - 1) {
2263 		rctl |= E1000_RCTL_UPE;
2264 	} else if (!(netdev->flags & IFF_PROMISC)) {
2265 		rctl &= ~E1000_RCTL_UPE;
2266 		use_uc = true;
2267 	}
2268 
2269 	ew32(RCTL, rctl);
2270 
2271 	/* 82542 2.0 needs to be in reset to write receive address registers */
2272 
2273 	if (hw->mac_type == e1000_82542_rev2_0)
2274 		e1000_enter_82542_rst(adapter);
2275 
2276 	/* load the first 14 addresses into the exact filters 1-14. Unicast
2277 	 * addresses take precedence to avoid disabling unicast filtering
2278 	 * when possible.
2279 	 *
2280 	 * RAR 0 is used for the station MAC address
2281 	 * if there are not 14 addresses, go ahead and clear the filters
2282 	 */
2283 	i = 1;
2284 	if (use_uc)
2285 		netdev_for_each_uc_addr(ha, netdev) {
2286 			if (i == rar_entries)
2287 				break;
2288 			e1000_rar_set(hw, ha->addr, i++);
2289 		}
2290 
2291 	netdev_for_each_mc_addr(ha, netdev) {
2292 		if (i == rar_entries) {
2293 			/* load any remaining addresses into the hash table */
2294 			u32 hash_reg, hash_bit, mta;
2295 			hash_value = e1000_hash_mc_addr(hw, ha->addr);
2296 			hash_reg = (hash_value >> 5) & 0x7F;
2297 			hash_bit = hash_value & 0x1F;
2298 			mta = (1 << hash_bit);
2299 			mcarray[hash_reg] |= mta;
2300 		} else {
2301 			e1000_rar_set(hw, ha->addr, i++);
2302 		}
2303 	}
2304 
2305 	for (; i < rar_entries; i++) {
2306 		E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2307 		E1000_WRITE_FLUSH();
2308 		E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2309 		E1000_WRITE_FLUSH();
2310 	}
2311 
2312 	/* write the hash table completely, write from bottom to avoid
2313 	 * both stupid write combining chipsets, and flushing each write
2314 	 */
2315 	for (i = mta_reg_count - 1; i >= 0 ; i--) {
2316 		/* If we are on an 82544 has an errata where writing odd
2317 		 * offsets overwrites the previous even offset, but writing
2318 		 * backwards over the range solves the issue by always
2319 		 * writing the odd offset first
2320 		 */
2321 		E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2322 	}
2323 	E1000_WRITE_FLUSH();
2324 
2325 	if (hw->mac_type == e1000_82542_rev2_0)
2326 		e1000_leave_82542_rst(adapter);
2327 
2328 	kfree(mcarray);
2329 }
2330 
2331 /**
2332  * e1000_update_phy_info_task - get phy info
2333  * @work: work struct contained inside adapter struct
2334  *
2335  * Need to wait a few seconds after link up to get diagnostic information from
2336  * the phy
2337  */
e1000_update_phy_info_task(struct work_struct * work)2338 static void e1000_update_phy_info_task(struct work_struct *work)
2339 {
2340 	struct e1000_adapter *adapter = container_of(work,
2341 						     struct e1000_adapter,
2342 						     phy_info_task.work);
2343 
2344 	e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
2345 }
2346 
2347 /**
2348  * e1000_82547_tx_fifo_stall_task - task to complete work
2349  * @work: work struct contained inside adapter struct
2350  **/
e1000_82547_tx_fifo_stall_task(struct work_struct * work)2351 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2352 {
2353 	struct e1000_adapter *adapter = container_of(work,
2354 						     struct e1000_adapter,
2355 						     fifo_stall_task.work);
2356 	struct e1000_hw *hw = &adapter->hw;
2357 	struct net_device *netdev = adapter->netdev;
2358 	u32 tctl;
2359 
2360 	if (atomic_read(&adapter->tx_fifo_stall)) {
2361 		if ((er32(TDT) == er32(TDH)) &&
2362 		   (er32(TDFT) == er32(TDFH)) &&
2363 		   (er32(TDFTS) == er32(TDFHS))) {
2364 			tctl = er32(TCTL);
2365 			ew32(TCTL, tctl & ~E1000_TCTL_EN);
2366 			ew32(TDFT, adapter->tx_head_addr);
2367 			ew32(TDFH, adapter->tx_head_addr);
2368 			ew32(TDFTS, adapter->tx_head_addr);
2369 			ew32(TDFHS, adapter->tx_head_addr);
2370 			ew32(TCTL, tctl);
2371 			E1000_WRITE_FLUSH();
2372 
2373 			adapter->tx_fifo_head = 0;
2374 			atomic_set(&adapter->tx_fifo_stall, 0);
2375 			netif_wake_queue(netdev);
2376 		} else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
2377 			schedule_delayed_work(&adapter->fifo_stall_task, 1);
2378 		}
2379 	}
2380 }
2381 
e1000_has_link(struct e1000_adapter * adapter)2382 bool e1000_has_link(struct e1000_adapter *adapter)
2383 {
2384 	struct e1000_hw *hw = &adapter->hw;
2385 	bool link_active = false;
2386 
2387 	/* get_link_status is set on LSC (link status) interrupt or rx
2388 	 * sequence error interrupt (except on intel ce4100).
2389 	 * get_link_status will stay false until the
2390 	 * e1000_check_for_link establishes link for copper adapters
2391 	 * ONLY
2392 	 */
2393 	switch (hw->media_type) {
2394 	case e1000_media_type_copper:
2395 		if (hw->mac_type == e1000_ce4100)
2396 			hw->get_link_status = 1;
2397 		if (hw->get_link_status) {
2398 			e1000_check_for_link(hw);
2399 			link_active = !hw->get_link_status;
2400 		} else {
2401 			link_active = true;
2402 		}
2403 		break;
2404 	case e1000_media_type_fiber:
2405 		e1000_check_for_link(hw);
2406 		link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2407 		break;
2408 	case e1000_media_type_internal_serdes:
2409 		e1000_check_for_link(hw);
2410 		link_active = hw->serdes_has_link;
2411 		break;
2412 	default:
2413 		break;
2414 	}
2415 
2416 	return link_active;
2417 }
2418 
2419 /**
2420  * e1000_watchdog - work function
2421  * @work: work struct contained inside adapter struct
2422  **/
e1000_watchdog(struct work_struct * work)2423 static void e1000_watchdog(struct work_struct *work)
2424 {
2425 	struct e1000_adapter *adapter = container_of(work,
2426 						     struct e1000_adapter,
2427 						     watchdog_task.work);
2428 	struct e1000_hw *hw = &adapter->hw;
2429 	struct net_device *netdev = adapter->netdev;
2430 	struct e1000_tx_ring *txdr = adapter->tx_ring;
2431 	u32 link, tctl;
2432 
2433 	link = e1000_has_link(adapter);
2434 	if ((netif_carrier_ok(netdev)) && link)
2435 		goto link_up;
2436 
2437 	if (link) {
2438 		if (!netif_carrier_ok(netdev)) {
2439 			u32 ctrl;
2440 			/* update snapshot of PHY registers on LSC */
2441 			e1000_get_speed_and_duplex(hw,
2442 						   &adapter->link_speed,
2443 						   &adapter->link_duplex);
2444 
2445 			ctrl = er32(CTRL);
2446 			pr_info("%s NIC Link is Up %d Mbps %s, "
2447 				"Flow Control: %s\n",
2448 				netdev->name,
2449 				adapter->link_speed,
2450 				adapter->link_duplex == FULL_DUPLEX ?
2451 				"Full Duplex" : "Half Duplex",
2452 				((ctrl & E1000_CTRL_TFCE) && (ctrl &
2453 				E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2454 				E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2455 				E1000_CTRL_TFCE) ? "TX" : "None")));
2456 
2457 			/* adjust timeout factor according to speed/duplex */
2458 			adapter->tx_timeout_factor = 1;
2459 			switch (adapter->link_speed) {
2460 			case SPEED_10:
2461 				adapter->tx_timeout_factor = 16;
2462 				break;
2463 			case SPEED_100:
2464 				/* maybe add some timeout factor ? */
2465 				break;
2466 			}
2467 
2468 			/* enable transmits in the hardware */
2469 			tctl = er32(TCTL);
2470 			tctl |= E1000_TCTL_EN;
2471 			ew32(TCTL, tctl);
2472 
2473 			netif_carrier_on(netdev);
2474 			if (!test_bit(__E1000_DOWN, &adapter->flags))
2475 				schedule_delayed_work(&adapter->phy_info_task,
2476 						      2 * HZ);
2477 			adapter->smartspeed = 0;
2478 		}
2479 	} else {
2480 		if (netif_carrier_ok(netdev)) {
2481 			adapter->link_speed = 0;
2482 			adapter->link_duplex = 0;
2483 			pr_info("%s NIC Link is Down\n",
2484 				netdev->name);
2485 			netif_carrier_off(netdev);
2486 
2487 			if (!test_bit(__E1000_DOWN, &adapter->flags))
2488 				schedule_delayed_work(&adapter->phy_info_task,
2489 						      2 * HZ);
2490 		}
2491 
2492 		e1000_smartspeed(adapter);
2493 	}
2494 
2495 link_up:
2496 	e1000_update_stats(adapter);
2497 
2498 	hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2499 	adapter->tpt_old = adapter->stats.tpt;
2500 	hw->collision_delta = adapter->stats.colc - adapter->colc_old;
2501 	adapter->colc_old = adapter->stats.colc;
2502 
2503 	adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2504 	adapter->gorcl_old = adapter->stats.gorcl;
2505 	adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2506 	adapter->gotcl_old = adapter->stats.gotcl;
2507 
2508 	e1000_update_adaptive(hw);
2509 
2510 	if (!netif_carrier_ok(netdev)) {
2511 		if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2512 			/* We've lost link, so the controller stops DMA,
2513 			 * but we've got queued Tx work that's never going
2514 			 * to get done, so reset controller to flush Tx.
2515 			 * (Do the reset outside of interrupt context).
2516 			 */
2517 			adapter->tx_timeout_count++;
2518 			schedule_work(&adapter->reset_task);
2519 			/* exit immediately since reset is imminent */
2520 			return;
2521 		}
2522 	}
2523 
2524 	/* Simple mode for Interrupt Throttle Rate (ITR) */
2525 	if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2526 		/* Symmetric Tx/Rx gets a reduced ITR=2000;
2527 		 * Total asymmetrical Tx or Rx gets ITR=8000;
2528 		 * everyone else is between 2000-8000.
2529 		 */
2530 		u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2531 		u32 dif = (adapter->gotcl > adapter->gorcl ?
2532 			    adapter->gotcl - adapter->gorcl :
2533 			    adapter->gorcl - adapter->gotcl) / 10000;
2534 		u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2535 
2536 		ew32(ITR, 1000000000 / (itr * 256));
2537 	}
2538 
2539 	/* Cause software interrupt to ensure rx ring is cleaned */
2540 	ew32(ICS, E1000_ICS_RXDMT0);
2541 
2542 	/* Force detection of hung controller every watchdog period */
2543 	adapter->detect_tx_hung = true;
2544 
2545 	/* Reschedule the task */
2546 	if (!test_bit(__E1000_DOWN, &adapter->flags))
2547 		schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
2548 }
2549 
2550 enum latency_range {
2551 	lowest_latency = 0,
2552 	low_latency = 1,
2553 	bulk_latency = 2,
2554 	latency_invalid = 255
2555 };
2556 
2557 /**
2558  * e1000_update_itr - update the dynamic ITR value based on statistics
2559  * @adapter: pointer to adapter
2560  * @itr_setting: current adapter->itr
2561  * @packets: the number of packets during this measurement interval
2562  * @bytes: the number of bytes during this measurement interval
2563  *
2564  *      Stores a new ITR value based on packets and byte
2565  *      counts during the last interrupt.  The advantage of per interrupt
2566  *      computation is faster updates and more accurate ITR for the current
2567  *      traffic pattern.  Constants in this function were computed
2568  *      based on theoretical maximum wire speed and thresholds were set based
2569  *      on testing data as well as attempting to minimize response time
2570  *      while increasing bulk throughput.
2571  *      this functionality is controlled by the InterruptThrottleRate module
2572  *      parameter (see e1000_param.c)
2573  **/
e1000_update_itr(struct e1000_adapter * adapter,u16 itr_setting,int packets,int bytes)2574 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2575 				     u16 itr_setting, int packets, int bytes)
2576 {
2577 	unsigned int retval = itr_setting;
2578 	struct e1000_hw *hw = &adapter->hw;
2579 
2580 	if (unlikely(hw->mac_type < e1000_82540))
2581 		goto update_itr_done;
2582 
2583 	if (packets == 0)
2584 		goto update_itr_done;
2585 
2586 	switch (itr_setting) {
2587 	case lowest_latency:
2588 		/* jumbo frames get bulk treatment*/
2589 		if (bytes/packets > 8000)
2590 			retval = bulk_latency;
2591 		else if ((packets < 5) && (bytes > 512))
2592 			retval = low_latency;
2593 		break;
2594 	case low_latency:  /* 50 usec aka 20000 ints/s */
2595 		if (bytes > 10000) {
2596 			/* jumbo frames need bulk latency setting */
2597 			if (bytes/packets > 8000)
2598 				retval = bulk_latency;
2599 			else if ((packets < 10) || ((bytes/packets) > 1200))
2600 				retval = bulk_latency;
2601 			else if ((packets > 35))
2602 				retval = lowest_latency;
2603 		} else if (bytes/packets > 2000)
2604 			retval = bulk_latency;
2605 		else if (packets <= 2 && bytes < 512)
2606 			retval = lowest_latency;
2607 		break;
2608 	case bulk_latency: /* 250 usec aka 4000 ints/s */
2609 		if (bytes > 25000) {
2610 			if (packets > 35)
2611 				retval = low_latency;
2612 		} else if (bytes < 6000) {
2613 			retval = low_latency;
2614 		}
2615 		break;
2616 	}
2617 
2618 update_itr_done:
2619 	return retval;
2620 }
2621 
e1000_set_itr(struct e1000_adapter * adapter)2622 static void e1000_set_itr(struct e1000_adapter *adapter)
2623 {
2624 	struct e1000_hw *hw = &adapter->hw;
2625 	u16 current_itr;
2626 	u32 new_itr = adapter->itr;
2627 
2628 	if (unlikely(hw->mac_type < e1000_82540))
2629 		return;
2630 
2631 	/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2632 	if (unlikely(adapter->link_speed != SPEED_1000)) {
2633 		new_itr = 4000;
2634 		goto set_itr_now;
2635 	}
2636 
2637 	adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr,
2638 					   adapter->total_tx_packets,
2639 					   adapter->total_tx_bytes);
2640 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
2641 	if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2642 		adapter->tx_itr = low_latency;
2643 
2644 	adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr,
2645 					   adapter->total_rx_packets,
2646 					   adapter->total_rx_bytes);
2647 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
2648 	if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2649 		adapter->rx_itr = low_latency;
2650 
2651 	current_itr = max(adapter->rx_itr, adapter->tx_itr);
2652 
2653 	switch (current_itr) {
2654 	/* counts and packets in update_itr are dependent on these numbers */
2655 	case lowest_latency:
2656 		new_itr = 70000;
2657 		break;
2658 	case low_latency:
2659 		new_itr = 20000; /* aka hwitr = ~200 */
2660 		break;
2661 	case bulk_latency:
2662 		new_itr = 4000;
2663 		break;
2664 	default:
2665 		break;
2666 	}
2667 
2668 set_itr_now:
2669 	if (new_itr != adapter->itr) {
2670 		/* this attempts to bias the interrupt rate towards Bulk
2671 		 * by adding intermediate steps when interrupt rate is
2672 		 * increasing
2673 		 */
2674 		new_itr = new_itr > adapter->itr ?
2675 			  min(adapter->itr + (new_itr >> 2), new_itr) :
2676 			  new_itr;
2677 		adapter->itr = new_itr;
2678 		ew32(ITR, 1000000000 / (new_itr * 256));
2679 	}
2680 }
2681 
2682 #define E1000_TX_FLAGS_CSUM		0x00000001
2683 #define E1000_TX_FLAGS_VLAN		0x00000002
2684 #define E1000_TX_FLAGS_TSO		0x00000004
2685 #define E1000_TX_FLAGS_IPV4		0x00000008
2686 #define E1000_TX_FLAGS_NO_FCS		0x00000010
2687 #define E1000_TX_FLAGS_VLAN_MASK	0xffff0000
2688 #define E1000_TX_FLAGS_VLAN_SHIFT	16
2689 
e1000_tso(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring,struct sk_buff * skb,__be16 protocol)2690 static int e1000_tso(struct e1000_adapter *adapter,
2691 		     struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2692 		     __be16 protocol)
2693 {
2694 	struct e1000_context_desc *context_desc;
2695 	struct e1000_tx_buffer *buffer_info;
2696 	unsigned int i;
2697 	u32 cmd_length = 0;
2698 	u16 ipcse = 0, tucse, mss;
2699 	u8 ipcss, ipcso, tucss, tucso, hdr_len;
2700 
2701 	if (skb_is_gso(skb)) {
2702 		int err;
2703 
2704 		err = skb_cow_head(skb, 0);
2705 		if (err < 0)
2706 			return err;
2707 
2708 		hdr_len = skb_tcp_all_headers(skb);
2709 		mss = skb_shinfo(skb)->gso_size;
2710 		if (protocol == htons(ETH_P_IP)) {
2711 			struct iphdr *iph = ip_hdr(skb);
2712 			iph->tot_len = 0;
2713 			iph->check = 0;
2714 			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2715 								 iph->daddr, 0,
2716 								 IPPROTO_TCP,
2717 								 0);
2718 			cmd_length = E1000_TXD_CMD_IP;
2719 			ipcse = skb_transport_offset(skb) - 1;
2720 		} else if (skb_is_gso_v6(skb)) {
2721 			tcp_v6_gso_csum_prep(skb);
2722 			ipcse = 0;
2723 		}
2724 		ipcss = skb_network_offset(skb);
2725 		ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
2726 		tucss = skb_transport_offset(skb);
2727 		tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
2728 		tucse = 0;
2729 
2730 		cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2731 			       E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
2732 
2733 		i = tx_ring->next_to_use;
2734 		context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2735 		buffer_info = &tx_ring->buffer_info[i];
2736 
2737 		context_desc->lower_setup.ip_fields.ipcss  = ipcss;
2738 		context_desc->lower_setup.ip_fields.ipcso  = ipcso;
2739 		context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
2740 		context_desc->upper_setup.tcp_fields.tucss = tucss;
2741 		context_desc->upper_setup.tcp_fields.tucso = tucso;
2742 		context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2743 		context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
2744 		context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2745 		context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2746 
2747 		buffer_info->time_stamp = jiffies;
2748 		buffer_info->next_to_watch = i;
2749 
2750 		if (++i == tx_ring->count)
2751 			i = 0;
2752 
2753 		tx_ring->next_to_use = i;
2754 
2755 		return true;
2756 	}
2757 	return false;
2758 }
2759 
e1000_tx_csum(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring,struct sk_buff * skb,__be16 protocol)2760 static bool e1000_tx_csum(struct e1000_adapter *adapter,
2761 			  struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2762 			  __be16 protocol)
2763 {
2764 	struct e1000_context_desc *context_desc;
2765 	struct e1000_tx_buffer *buffer_info;
2766 	unsigned int i;
2767 	u8 css;
2768 	u32 cmd_len = E1000_TXD_CMD_DEXT;
2769 
2770 	if (skb->ip_summed != CHECKSUM_PARTIAL)
2771 		return false;
2772 
2773 	switch (protocol) {
2774 	case cpu_to_be16(ETH_P_IP):
2775 		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2776 			cmd_len |= E1000_TXD_CMD_TCP;
2777 		break;
2778 	case cpu_to_be16(ETH_P_IPV6):
2779 		/* XXX not handling all IPV6 headers */
2780 		if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2781 			cmd_len |= E1000_TXD_CMD_TCP;
2782 		break;
2783 	default:
2784 		if (unlikely(net_ratelimit()))
2785 			e_warn(drv, "checksum_partial proto=%x!\n",
2786 			       skb->protocol);
2787 		break;
2788 	}
2789 
2790 	css = skb_checksum_start_offset(skb);
2791 
2792 	i = tx_ring->next_to_use;
2793 	buffer_info = &tx_ring->buffer_info[i];
2794 	context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2795 
2796 	context_desc->lower_setup.ip_config = 0;
2797 	context_desc->upper_setup.tcp_fields.tucss = css;
2798 	context_desc->upper_setup.tcp_fields.tucso =
2799 		css + skb->csum_offset;
2800 	context_desc->upper_setup.tcp_fields.tucse = 0;
2801 	context_desc->tcp_seg_setup.data = 0;
2802 	context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2803 
2804 	buffer_info->time_stamp = jiffies;
2805 	buffer_info->next_to_watch = i;
2806 
2807 	if (unlikely(++i == tx_ring->count))
2808 		i = 0;
2809 
2810 	tx_ring->next_to_use = i;
2811 
2812 	return true;
2813 }
2814 
2815 #define E1000_MAX_TXD_PWR	12
2816 #define E1000_MAX_DATA_PER_TXD	(1<<E1000_MAX_TXD_PWR)
2817 
e1000_tx_map(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring,struct sk_buff * skb,unsigned int first,unsigned int max_per_txd,unsigned int nr_frags,unsigned int mss)2818 static int e1000_tx_map(struct e1000_adapter *adapter,
2819 			struct e1000_tx_ring *tx_ring,
2820 			struct sk_buff *skb, unsigned int first,
2821 			unsigned int max_per_txd, unsigned int nr_frags,
2822 			unsigned int mss)
2823 {
2824 	struct e1000_hw *hw = &adapter->hw;
2825 	struct pci_dev *pdev = adapter->pdev;
2826 	struct e1000_tx_buffer *buffer_info;
2827 	unsigned int len = skb_headlen(skb);
2828 	unsigned int offset = 0, size, count = 0, i;
2829 	unsigned int f, bytecount, segs;
2830 
2831 	i = tx_ring->next_to_use;
2832 
2833 	while (len) {
2834 		buffer_info = &tx_ring->buffer_info[i];
2835 		size = min(len, max_per_txd);
2836 		/* Workaround for Controller erratum --
2837 		 * descriptor for non-tso packet in a linear SKB that follows a
2838 		 * tso gets written back prematurely before the data is fully
2839 		 * DMA'd to the controller
2840 		 */
2841 		if (!skb->data_len && tx_ring->last_tx_tso &&
2842 		    !skb_is_gso(skb)) {
2843 			tx_ring->last_tx_tso = false;
2844 			size -= 4;
2845 		}
2846 
2847 		/* Workaround for premature desc write-backs
2848 		 * in TSO mode.  Append 4-byte sentinel desc
2849 		 */
2850 		if (unlikely(mss && !nr_frags && size == len && size > 8))
2851 			size -= 4;
2852 		/* work-around for errata 10 and it applies
2853 		 * to all controllers in PCI-X mode
2854 		 * The fix is to make sure that the first descriptor of a
2855 		 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2856 		 */
2857 		if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
2858 			     (size > 2015) && count == 0))
2859 			size = 2015;
2860 
2861 		/* Workaround for potential 82544 hang in PCI-X.  Avoid
2862 		 * terminating buffers within evenly-aligned dwords.
2863 		 */
2864 		if (unlikely(adapter->pcix_82544 &&
2865 		   !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2866 		   size > 4))
2867 			size -= 4;
2868 
2869 		buffer_info->length = size;
2870 		/* set time_stamp *before* dma to help avoid a possible race */
2871 		buffer_info->time_stamp = jiffies;
2872 		buffer_info->mapped_as_page = false;
2873 		buffer_info->dma = dma_map_single(&pdev->dev,
2874 						  skb->data + offset,
2875 						  size, DMA_TO_DEVICE);
2876 		if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2877 			goto dma_error;
2878 		buffer_info->next_to_watch = i;
2879 
2880 		len -= size;
2881 		offset += size;
2882 		count++;
2883 		if (len) {
2884 			i++;
2885 			if (unlikely(i == tx_ring->count))
2886 				i = 0;
2887 		}
2888 	}
2889 
2890 	for (f = 0; f < nr_frags; f++) {
2891 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
2892 
2893 		len = skb_frag_size(frag);
2894 		offset = 0;
2895 
2896 		while (len) {
2897 			unsigned long bufend;
2898 			i++;
2899 			if (unlikely(i == tx_ring->count))
2900 				i = 0;
2901 
2902 			buffer_info = &tx_ring->buffer_info[i];
2903 			size = min(len, max_per_txd);
2904 			/* Workaround for premature desc write-backs
2905 			 * in TSO mode.  Append 4-byte sentinel desc
2906 			 */
2907 			if (unlikely(mss && f == (nr_frags-1) &&
2908 			    size == len && size > 8))
2909 				size -= 4;
2910 			/* Workaround for potential 82544 hang in PCI-X.
2911 			 * Avoid terminating buffers within evenly-aligned
2912 			 * dwords.
2913 			 */
2914 			bufend = (unsigned long)
2915 				page_to_phys(skb_frag_page(frag));
2916 			bufend += offset + size - 1;
2917 			if (unlikely(adapter->pcix_82544 &&
2918 				     !(bufend & 4) &&
2919 				     size > 4))
2920 				size -= 4;
2921 
2922 			buffer_info->length = size;
2923 			buffer_info->time_stamp = jiffies;
2924 			buffer_info->mapped_as_page = true;
2925 			buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
2926 						offset, size, DMA_TO_DEVICE);
2927 			if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2928 				goto dma_error;
2929 			buffer_info->next_to_watch = i;
2930 
2931 			len -= size;
2932 			offset += size;
2933 			count++;
2934 		}
2935 	}
2936 
2937 	segs = skb_shinfo(skb)->gso_segs ?: 1;
2938 	/* multiply data chunks by size of headers */
2939 	bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
2940 
2941 	tx_ring->buffer_info[i].skb = skb;
2942 	tx_ring->buffer_info[i].segs = segs;
2943 	tx_ring->buffer_info[i].bytecount = bytecount;
2944 	tx_ring->buffer_info[first].next_to_watch = i;
2945 
2946 	return count;
2947 
2948 dma_error:
2949 	dev_err(&pdev->dev, "TX DMA map failed\n");
2950 	buffer_info->dma = 0;
2951 	if (count)
2952 		count--;
2953 
2954 	while (count--) {
2955 		if (i == 0)
2956 			i += tx_ring->count;
2957 		i--;
2958 		buffer_info = &tx_ring->buffer_info[i];
2959 		e1000_unmap_and_free_tx_resource(adapter, buffer_info, 0);
2960 	}
2961 
2962 	return 0;
2963 }
2964 
e1000_tx_queue(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring,int tx_flags,int count)2965 static void e1000_tx_queue(struct e1000_adapter *adapter,
2966 			   struct e1000_tx_ring *tx_ring, int tx_flags,
2967 			   int count)
2968 {
2969 	struct e1000_tx_desc *tx_desc = NULL;
2970 	struct e1000_tx_buffer *buffer_info;
2971 	u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
2972 	unsigned int i;
2973 
2974 	if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
2975 		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
2976 			     E1000_TXD_CMD_TSE;
2977 		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2978 
2979 		if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
2980 			txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2981 	}
2982 
2983 	if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
2984 		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2985 		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2986 	}
2987 
2988 	if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
2989 		txd_lower |= E1000_TXD_CMD_VLE;
2990 		txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
2991 	}
2992 
2993 	if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
2994 		txd_lower &= ~(E1000_TXD_CMD_IFCS);
2995 
2996 	i = tx_ring->next_to_use;
2997 
2998 	while (count--) {
2999 		buffer_info = &tx_ring->buffer_info[i];
3000 		tx_desc = E1000_TX_DESC(*tx_ring, i);
3001 		tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3002 		tx_desc->lower.data =
3003 			cpu_to_le32(txd_lower | buffer_info->length);
3004 		tx_desc->upper.data = cpu_to_le32(txd_upper);
3005 		if (unlikely(++i == tx_ring->count))
3006 			i = 0;
3007 	}
3008 
3009 	tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3010 
3011 	/* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
3012 	if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3013 		tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
3014 
3015 	/* Force memory writes to complete before letting h/w
3016 	 * know there are new descriptors to fetch.  (Only
3017 	 * applicable for weak-ordered memory model archs,
3018 	 * such as IA-64).
3019 	 */
3020 	dma_wmb();
3021 
3022 	tx_ring->next_to_use = i;
3023 }
3024 
3025 /* 82547 workaround to avoid controller hang in half-duplex environment.
3026  * The workaround is to avoid queuing a large packet that would span
3027  * the internal Tx FIFO ring boundary by notifying the stack to resend
3028  * the packet at a later time.  This gives the Tx FIFO an opportunity to
3029  * flush all packets.  When that occurs, we reset the Tx FIFO pointers
3030  * to the beginning of the Tx FIFO.
3031  */
3032 
3033 #define E1000_FIFO_HDR			0x10
3034 #define E1000_82547_PAD_LEN		0x3E0
3035 
e1000_82547_fifo_workaround(struct e1000_adapter * adapter,struct sk_buff * skb)3036 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3037 				       struct sk_buff *skb)
3038 {
3039 	u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3040 	u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
3041 
3042 	skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
3043 
3044 	if (adapter->link_duplex != HALF_DUPLEX)
3045 		goto no_fifo_stall_required;
3046 
3047 	if (atomic_read(&adapter->tx_fifo_stall))
3048 		return 1;
3049 
3050 	if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
3051 		atomic_set(&adapter->tx_fifo_stall, 1);
3052 		return 1;
3053 	}
3054 
3055 no_fifo_stall_required:
3056 	adapter->tx_fifo_head += skb_fifo_len;
3057 	if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
3058 		adapter->tx_fifo_head -= adapter->tx_fifo_size;
3059 	return 0;
3060 }
3061 
__e1000_maybe_stop_tx(struct net_device * netdev,int size)3062 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3063 {
3064 	struct e1000_adapter *adapter = netdev_priv(netdev);
3065 	struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3066 
3067 	netif_stop_queue(netdev);
3068 	/* Herbert's original patch had:
3069 	 *  smp_mb__after_netif_stop_queue();
3070 	 * but since that doesn't exist yet, just open code it.
3071 	 */
3072 	smp_mb();
3073 
3074 	/* We need to check again in a case another CPU has just
3075 	 * made room available.
3076 	 */
3077 	if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3078 		return -EBUSY;
3079 
3080 	/* A reprieve! */
3081 	netif_start_queue(netdev);
3082 	++adapter->restart_queue;
3083 	return 0;
3084 }
3085 
e1000_maybe_stop_tx(struct net_device * netdev,struct e1000_tx_ring * tx_ring,int size)3086 static int e1000_maybe_stop_tx(struct net_device *netdev,
3087 			       struct e1000_tx_ring *tx_ring, int size)
3088 {
3089 	if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3090 		return 0;
3091 	return __e1000_maybe_stop_tx(netdev, size);
3092 }
3093 
3094 #define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X))
e1000_xmit_frame(struct sk_buff * skb,struct net_device * netdev)3095 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3096 				    struct net_device *netdev)
3097 {
3098 	struct e1000_adapter *adapter = netdev_priv(netdev);
3099 	struct e1000_hw *hw = &adapter->hw;
3100 	struct e1000_tx_ring *tx_ring;
3101 	unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3102 	unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3103 	unsigned int tx_flags = 0;
3104 	unsigned int len = skb_headlen(skb);
3105 	unsigned int nr_frags;
3106 	unsigned int mss;
3107 	int count = 0;
3108 	int tso;
3109 	unsigned int f;
3110 	__be16 protocol = vlan_get_protocol(skb);
3111 
3112 	/* This goes back to the question of how to logically map a Tx queue
3113 	 * to a flow.  Right now, performance is impacted slightly negatively
3114 	 * if using multiple Tx queues.  If the stack breaks away from a
3115 	 * single qdisc implementation, we can look at this again.
3116 	 */
3117 	tx_ring = adapter->tx_ring;
3118 
3119 	/* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN,
3120 	 * packets may get corrupted during padding by HW.
3121 	 * To WA this issue, pad all small packets manually.
3122 	 */
3123 	if (eth_skb_pad(skb))
3124 		return NETDEV_TX_OK;
3125 
3126 	mss = skb_shinfo(skb)->gso_size;
3127 	/* The controller does a simple calculation to
3128 	 * make sure there is enough room in the FIFO before
3129 	 * initiating the DMA for each buffer.  The calc is:
3130 	 * 4 = ceil(buffer len/mss).  To make sure we don't
3131 	 * overrun the FIFO, adjust the max buffer len if mss
3132 	 * drops.
3133 	 */
3134 	if (mss) {
3135 		u8 hdr_len;
3136 		max_per_txd = min(mss << 2, max_per_txd);
3137 		max_txd_pwr = fls(max_per_txd) - 1;
3138 
3139 		hdr_len = skb_tcp_all_headers(skb);
3140 		if (skb->data_len && hdr_len == len) {
3141 			switch (hw->mac_type) {
3142 			case e1000_82544: {
3143 				unsigned int pull_size;
3144 
3145 				/* Make sure we have room to chop off 4 bytes,
3146 				 * and that the end alignment will work out to
3147 				 * this hardware's requirements
3148 				 * NOTE: this is a TSO only workaround
3149 				 * if end byte alignment not correct move us
3150 				 * into the next dword
3151 				 */
3152 				if ((unsigned long)(skb_tail_pointer(skb) - 1)
3153 				    & 4)
3154 					break;
3155 				pull_size = min((unsigned int)4, skb->data_len);
3156 				if (!__pskb_pull_tail(skb, pull_size)) {
3157 					e_err(drv, "__pskb_pull_tail "
3158 					      "failed.\n");
3159 					dev_kfree_skb_any(skb);
3160 					return NETDEV_TX_OK;
3161 				}
3162 				len = skb_headlen(skb);
3163 				break;
3164 			}
3165 			default:
3166 				/* do nothing */
3167 				break;
3168 			}
3169 		}
3170 	}
3171 
3172 	/* reserve a descriptor for the offload context */
3173 	if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3174 		count++;
3175 	count++;
3176 
3177 	/* Controller Erratum workaround */
3178 	if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3179 		count++;
3180 
3181 	count += TXD_USE_COUNT(len, max_txd_pwr);
3182 
3183 	if (adapter->pcix_82544)
3184 		count++;
3185 
3186 	/* work-around for errata 10 and it applies to all controllers
3187 	 * in PCI-X mode, so add one more descriptor to the count
3188 	 */
3189 	if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
3190 			(len > 2015)))
3191 		count++;
3192 
3193 	nr_frags = skb_shinfo(skb)->nr_frags;
3194 	for (f = 0; f < nr_frags; f++)
3195 		count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
3196 				       max_txd_pwr);
3197 	if (adapter->pcix_82544)
3198 		count += nr_frags;
3199 
3200 	/* need: count + 2 desc gap to keep tail from touching
3201 	 * head, otherwise try next time
3202 	 */
3203 	if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
3204 		return NETDEV_TX_BUSY;
3205 
3206 	if (unlikely((hw->mac_type == e1000_82547) &&
3207 		     (e1000_82547_fifo_workaround(adapter, skb)))) {
3208 		netif_stop_queue(netdev);
3209 		if (!test_bit(__E1000_DOWN, &adapter->flags))
3210 			schedule_delayed_work(&adapter->fifo_stall_task, 1);
3211 		return NETDEV_TX_BUSY;
3212 	}
3213 
3214 	if (skb_vlan_tag_present(skb)) {
3215 		tx_flags |= E1000_TX_FLAGS_VLAN;
3216 		tx_flags |= (skb_vlan_tag_get(skb) <<
3217 			     E1000_TX_FLAGS_VLAN_SHIFT);
3218 	}
3219 
3220 	first = tx_ring->next_to_use;
3221 
3222 	tso = e1000_tso(adapter, tx_ring, skb, protocol);
3223 	if (tso < 0) {
3224 		dev_kfree_skb_any(skb);
3225 		return NETDEV_TX_OK;
3226 	}
3227 
3228 	if (likely(tso)) {
3229 		if (likely(hw->mac_type != e1000_82544))
3230 			tx_ring->last_tx_tso = true;
3231 		tx_flags |= E1000_TX_FLAGS_TSO;
3232 	} else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol)))
3233 		tx_flags |= E1000_TX_FLAGS_CSUM;
3234 
3235 	if (protocol == htons(ETH_P_IP))
3236 		tx_flags |= E1000_TX_FLAGS_IPV4;
3237 
3238 	if (unlikely(skb->no_fcs))
3239 		tx_flags |= E1000_TX_FLAGS_NO_FCS;
3240 
3241 	count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3242 			     nr_frags, mss);
3243 
3244 	if (count) {
3245 		/* The descriptors needed is higher than other Intel drivers
3246 		 * due to a number of workarounds.  The breakdown is below:
3247 		 * Data descriptors: MAX_SKB_FRAGS + 1
3248 		 * Context Descriptor: 1
3249 		 * Keep head from touching tail: 2
3250 		 * Workarounds: 3
3251 		 */
3252 		int desc_needed = MAX_SKB_FRAGS + 7;
3253 
3254 		netdev_sent_queue(netdev, skb->len);
3255 		skb_tx_timestamp(skb);
3256 
3257 		e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3258 
3259 		/* 82544 potentially requires twice as many data descriptors
3260 		 * in order to guarantee buffers don't end on evenly-aligned
3261 		 * dwords
3262 		 */
3263 		if (adapter->pcix_82544)
3264 			desc_needed += MAX_SKB_FRAGS + 1;
3265 
3266 		/* Make sure there is space in the ring for the next send. */
3267 		e1000_maybe_stop_tx(netdev, tx_ring, desc_needed);
3268 
3269 		if (!netdev_xmit_more() ||
3270 		    netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
3271 			writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt);
3272 		}
3273 	} else {
3274 		dev_kfree_skb_any(skb);
3275 		tx_ring->buffer_info[first].time_stamp = 0;
3276 		tx_ring->next_to_use = first;
3277 	}
3278 
3279 	return NETDEV_TX_OK;
3280 }
3281 
3282 #define NUM_REGS 38 /* 1 based count */
e1000_regdump(struct e1000_adapter * adapter)3283 static void e1000_regdump(struct e1000_adapter *adapter)
3284 {
3285 	struct e1000_hw *hw = &adapter->hw;
3286 	u32 regs[NUM_REGS];
3287 	u32 *regs_buff = regs;
3288 	int i = 0;
3289 
3290 	static const char * const reg_name[] = {
3291 		"CTRL",  "STATUS",
3292 		"RCTL", "RDLEN", "RDH", "RDT", "RDTR",
3293 		"TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
3294 		"TIDV", "TXDCTL", "TADV", "TARC0",
3295 		"TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
3296 		"TXDCTL1", "TARC1",
3297 		"CTRL_EXT", "ERT", "RDBAL", "RDBAH",
3298 		"TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
3299 		"RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
3300 	};
3301 
3302 	regs_buff[0]  = er32(CTRL);
3303 	regs_buff[1]  = er32(STATUS);
3304 
3305 	regs_buff[2]  = er32(RCTL);
3306 	regs_buff[3]  = er32(RDLEN);
3307 	regs_buff[4]  = er32(RDH);
3308 	regs_buff[5]  = er32(RDT);
3309 	regs_buff[6]  = er32(RDTR);
3310 
3311 	regs_buff[7]  = er32(TCTL);
3312 	regs_buff[8]  = er32(TDBAL);
3313 	regs_buff[9]  = er32(TDBAH);
3314 	regs_buff[10] = er32(TDLEN);
3315 	regs_buff[11] = er32(TDH);
3316 	regs_buff[12] = er32(TDT);
3317 	regs_buff[13] = er32(TIDV);
3318 	regs_buff[14] = er32(TXDCTL);
3319 	regs_buff[15] = er32(TADV);
3320 	regs_buff[16] = er32(TARC0);
3321 
3322 	regs_buff[17] = er32(TDBAL1);
3323 	regs_buff[18] = er32(TDBAH1);
3324 	regs_buff[19] = er32(TDLEN1);
3325 	regs_buff[20] = er32(TDH1);
3326 	regs_buff[21] = er32(TDT1);
3327 	regs_buff[22] = er32(TXDCTL1);
3328 	regs_buff[23] = er32(TARC1);
3329 	regs_buff[24] = er32(CTRL_EXT);
3330 	regs_buff[25] = er32(ERT);
3331 	regs_buff[26] = er32(RDBAL0);
3332 	regs_buff[27] = er32(RDBAH0);
3333 	regs_buff[28] = er32(TDFH);
3334 	regs_buff[29] = er32(TDFT);
3335 	regs_buff[30] = er32(TDFHS);
3336 	regs_buff[31] = er32(TDFTS);
3337 	regs_buff[32] = er32(TDFPC);
3338 	regs_buff[33] = er32(RDFH);
3339 	regs_buff[34] = er32(RDFT);
3340 	regs_buff[35] = er32(RDFHS);
3341 	regs_buff[36] = er32(RDFTS);
3342 	regs_buff[37] = er32(RDFPC);
3343 
3344 	pr_info("Register dump\n");
3345 	for (i = 0; i < NUM_REGS; i++)
3346 		pr_info("%-15s  %08x\n", reg_name[i], regs_buff[i]);
3347 }
3348 
3349 /*
3350  * e1000_dump: Print registers, tx ring and rx ring
3351  */
e1000_dump(struct e1000_adapter * adapter)3352 static void e1000_dump(struct e1000_adapter *adapter)
3353 {
3354 	/* this code doesn't handle multiple rings */
3355 	struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3356 	struct e1000_rx_ring *rx_ring = adapter->rx_ring;
3357 	int i;
3358 
3359 	if (!netif_msg_hw(adapter))
3360 		return;
3361 
3362 	/* Print Registers */
3363 	e1000_regdump(adapter);
3364 
3365 	/* transmit dump */
3366 	pr_info("TX Desc ring0 dump\n");
3367 
3368 	/* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
3369 	 *
3370 	 * Legacy Transmit Descriptor
3371 	 *   +--------------------------------------------------------------+
3372 	 * 0 |         Buffer Address [63:0] (Reserved on Write Back)       |
3373 	 *   +--------------------------------------------------------------+
3374 	 * 8 | Special  |    CSS     | Status |  CMD    |  CSO   |  Length  |
3375 	 *   +--------------------------------------------------------------+
3376 	 *   63       48 47        36 35    32 31     24 23    16 15        0
3377 	 *
3378 	 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
3379 	 *   63      48 47    40 39       32 31             16 15    8 7      0
3380 	 *   +----------------------------------------------------------------+
3381 	 * 0 |  TUCSE  | TUCS0  |   TUCSS   |     IPCSE       | IPCS0 | IPCSS |
3382 	 *   +----------------------------------------------------------------+
3383 	 * 8 |   MSS   | HDRLEN | RSV | STA | TUCMD | DTYP |      PAYLEN      |
3384 	 *   +----------------------------------------------------------------+
3385 	 *   63      48 47    40 39 36 35 32 31   24 23  20 19                0
3386 	 *
3387 	 * Extended Data Descriptor (DTYP=0x1)
3388 	 *   +----------------------------------------------------------------+
3389 	 * 0 |                     Buffer Address [63:0]                      |
3390 	 *   +----------------------------------------------------------------+
3391 	 * 8 | VLAN tag |  POPTS  | Rsvd | Status | Command | DTYP |  DTALEN  |
3392 	 *   +----------------------------------------------------------------+
3393 	 *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
3394 	 */
3395 	pr_info("Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3396 	pr_info("Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3397 
3398 	if (!netif_msg_tx_done(adapter))
3399 		goto rx_ring_summary;
3400 
3401 	for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3402 		struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
3403 		struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i];
3404 		struct my_u { __le64 a; __le64 b; };
3405 		struct my_u *u = (struct my_u *)tx_desc;
3406 		const char *type;
3407 
3408 		if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
3409 			type = "NTC/U";
3410 		else if (i == tx_ring->next_to_use)
3411 			type = "NTU";
3412 		else if (i == tx_ring->next_to_clean)
3413 			type = "NTC";
3414 		else
3415 			type = "";
3416 
3417 		pr_info("T%c[0x%03X]    %016llX %016llX %016llX %04X  %3X %016llX %p %s\n",
3418 			((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
3419 			le64_to_cpu(u->a), le64_to_cpu(u->b),
3420 			(u64)buffer_info->dma, buffer_info->length,
3421 			buffer_info->next_to_watch,
3422 			(u64)buffer_info->time_stamp, buffer_info->skb, type);
3423 	}
3424 
3425 rx_ring_summary:
3426 	/* receive dump */
3427 	pr_info("\nRX Desc ring dump\n");
3428 
3429 	/* Legacy Receive Descriptor Format
3430 	 *
3431 	 * +-----------------------------------------------------+
3432 	 * |                Buffer Address [63:0]                |
3433 	 * +-----------------------------------------------------+
3434 	 * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
3435 	 * +-----------------------------------------------------+
3436 	 * 63       48 47    40 39      32 31         16 15      0
3437 	 */
3438 	pr_info("R[desc]      [address 63:0  ] [vl er S cks ln] [bi->dma       ] [bi->skb]\n");
3439 
3440 	if (!netif_msg_rx_status(adapter))
3441 		goto exit;
3442 
3443 	for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3444 		struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
3445 		struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i];
3446 		struct my_u { __le64 a; __le64 b; };
3447 		struct my_u *u = (struct my_u *)rx_desc;
3448 		const char *type;
3449 
3450 		if (i == rx_ring->next_to_use)
3451 			type = "NTU";
3452 		else if (i == rx_ring->next_to_clean)
3453 			type = "NTC";
3454 		else
3455 			type = "";
3456 
3457 		pr_info("R[0x%03X]     %016llX %016llX %016llX %p %s\n",
3458 			i, le64_to_cpu(u->a), le64_to_cpu(u->b),
3459 			(u64)buffer_info->dma, buffer_info->rxbuf.data, type);
3460 	} /* for */
3461 
3462 	/* dump the descriptor caches */
3463 	/* rx */
3464 	pr_info("Rx descriptor cache in 64bit format\n");
3465 	for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
3466 		pr_info("R%04X: %08X|%08X %08X|%08X\n",
3467 			i,
3468 			readl(adapter->hw.hw_addr + i+4),
3469 			readl(adapter->hw.hw_addr + i),
3470 			readl(adapter->hw.hw_addr + i+12),
3471 			readl(adapter->hw.hw_addr + i+8));
3472 	}
3473 	/* tx */
3474 	pr_info("Tx descriptor cache in 64bit format\n");
3475 	for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
3476 		pr_info("T%04X: %08X|%08X %08X|%08X\n",
3477 			i,
3478 			readl(adapter->hw.hw_addr + i+4),
3479 			readl(adapter->hw.hw_addr + i),
3480 			readl(adapter->hw.hw_addr + i+12),
3481 			readl(adapter->hw.hw_addr + i+8));
3482 	}
3483 exit:
3484 	return;
3485 }
3486 
3487 /**
3488  * e1000_tx_timeout - Respond to a Tx Hang
3489  * @netdev: network interface device structure
3490  * @txqueue: number of the Tx queue that hung (unused)
3491  **/
e1000_tx_timeout(struct net_device * netdev,unsigned int __always_unused txqueue)3492 static void e1000_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
3493 {
3494 	struct e1000_adapter *adapter = netdev_priv(netdev);
3495 
3496 	/* Do the reset outside of interrupt context */
3497 	adapter->tx_timeout_count++;
3498 	schedule_work(&adapter->reset_task);
3499 }
3500 
e1000_reset_task(struct work_struct * work)3501 static void e1000_reset_task(struct work_struct *work)
3502 {
3503 	struct e1000_adapter *adapter =
3504 		container_of(work, struct e1000_adapter, reset_task);
3505 
3506 	e_err(drv, "Reset adapter\n");
3507 	e1000_reinit_locked(adapter);
3508 }
3509 
3510 /**
3511  * e1000_change_mtu - Change the Maximum Transfer Unit
3512  * @netdev: network interface device structure
3513  * @new_mtu: new value for maximum frame size
3514  *
3515  * Returns 0 on success, negative on failure
3516  **/
e1000_change_mtu(struct net_device * netdev,int new_mtu)3517 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3518 {
3519 	struct e1000_adapter *adapter = netdev_priv(netdev);
3520 	struct e1000_hw *hw = &adapter->hw;
3521 	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3522 
3523 	/* Adapter-specific max frame size limits. */
3524 	switch (hw->mac_type) {
3525 	case e1000_undefined ... e1000_82542_rev2_1:
3526 		if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3527 			e_err(probe, "Jumbo Frames not supported.\n");
3528 			return -EINVAL;
3529 		}
3530 		break;
3531 	default:
3532 		/* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3533 		break;
3534 	}
3535 
3536 	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3537 		msleep(1);
3538 	/* e1000_down has a dependency on max_frame_size */
3539 	hw->max_frame_size = max_frame;
3540 	if (netif_running(netdev)) {
3541 		/* prevent buffers from being reallocated */
3542 		adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers;
3543 		e1000_down(adapter);
3544 	}
3545 
3546 	/* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3547 	 * means we reserve 2 more, this pushes us to allocate from the next
3548 	 * larger slab size.
3549 	 * i.e. RXBUFFER_2048 --> size-4096 slab
3550 	 * however with the new *_jumbo_rx* routines, jumbo receives will use
3551 	 * fragmented skbs
3552 	 */
3553 
3554 	if (max_frame <= E1000_RXBUFFER_2048)
3555 		adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3556 	else
3557 #if (PAGE_SIZE >= E1000_RXBUFFER_16384)
3558 		adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3559 #elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3560 		adapter->rx_buffer_len = PAGE_SIZE;
3561 #endif
3562 
3563 	/* adjust allocation if LPE protects us, and we aren't using SBP */
3564 	if (!hw->tbi_compatibility_on &&
3565 	    ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
3566 	     (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3567 		adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3568 
3569 	netdev_dbg(netdev, "changing MTU from %d to %d\n",
3570 		   netdev->mtu, new_mtu);
3571 	WRITE_ONCE(netdev->mtu, new_mtu);
3572 
3573 	if (netif_running(netdev))
3574 		e1000_up(adapter);
3575 	else
3576 		e1000_reset(adapter);
3577 
3578 	clear_bit(__E1000_RESETTING, &adapter->flags);
3579 
3580 	return 0;
3581 }
3582 
3583 /**
3584  * e1000_update_stats - Update the board statistics counters
3585  * @adapter: board private structure
3586  **/
e1000_update_stats(struct e1000_adapter * adapter)3587 void e1000_update_stats(struct e1000_adapter *adapter)
3588 {
3589 	struct net_device *netdev = adapter->netdev;
3590 	struct e1000_hw *hw = &adapter->hw;
3591 	struct pci_dev *pdev = adapter->pdev;
3592 	unsigned long flags;
3593 	u16 phy_tmp;
3594 
3595 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3596 
3597 	/* Prevent stats update while adapter is being reset, or if the pci
3598 	 * connection is down.
3599 	 */
3600 	if (adapter->link_speed == 0)
3601 		return;
3602 	if (pci_channel_offline(pdev))
3603 		return;
3604 
3605 	spin_lock_irqsave(&adapter->stats_lock, flags);
3606 
3607 	/* these counters are modified from e1000_tbi_adjust_stats,
3608 	 * called from the interrupt context, so they must only
3609 	 * be written while holding adapter->stats_lock
3610 	 */
3611 
3612 	adapter->stats.crcerrs += er32(CRCERRS);
3613 	adapter->stats.gprc += er32(GPRC);
3614 	adapter->stats.gorcl += er32(GORCL);
3615 	adapter->stats.gorch += er32(GORCH);
3616 	adapter->stats.bprc += er32(BPRC);
3617 	adapter->stats.mprc += er32(MPRC);
3618 	adapter->stats.roc += er32(ROC);
3619 
3620 	adapter->stats.prc64 += er32(PRC64);
3621 	adapter->stats.prc127 += er32(PRC127);
3622 	adapter->stats.prc255 += er32(PRC255);
3623 	adapter->stats.prc511 += er32(PRC511);
3624 	adapter->stats.prc1023 += er32(PRC1023);
3625 	adapter->stats.prc1522 += er32(PRC1522);
3626 
3627 	adapter->stats.symerrs += er32(SYMERRS);
3628 	adapter->stats.mpc += er32(MPC);
3629 	adapter->stats.scc += er32(SCC);
3630 	adapter->stats.ecol += er32(ECOL);
3631 	adapter->stats.mcc += er32(MCC);
3632 	adapter->stats.latecol += er32(LATECOL);
3633 	adapter->stats.dc += er32(DC);
3634 	adapter->stats.sec += er32(SEC);
3635 	adapter->stats.rlec += er32(RLEC);
3636 	adapter->stats.xonrxc += er32(XONRXC);
3637 	adapter->stats.xontxc += er32(XONTXC);
3638 	adapter->stats.xoffrxc += er32(XOFFRXC);
3639 	adapter->stats.xofftxc += er32(XOFFTXC);
3640 	adapter->stats.fcruc += er32(FCRUC);
3641 	adapter->stats.gptc += er32(GPTC);
3642 	adapter->stats.gotcl += er32(GOTCL);
3643 	adapter->stats.gotch += er32(GOTCH);
3644 	adapter->stats.rnbc += er32(RNBC);
3645 	adapter->stats.ruc += er32(RUC);
3646 	adapter->stats.rfc += er32(RFC);
3647 	adapter->stats.rjc += er32(RJC);
3648 	adapter->stats.torl += er32(TORL);
3649 	adapter->stats.torh += er32(TORH);
3650 	adapter->stats.totl += er32(TOTL);
3651 	adapter->stats.toth += er32(TOTH);
3652 	adapter->stats.tpr += er32(TPR);
3653 
3654 	adapter->stats.ptc64 += er32(PTC64);
3655 	adapter->stats.ptc127 += er32(PTC127);
3656 	adapter->stats.ptc255 += er32(PTC255);
3657 	adapter->stats.ptc511 += er32(PTC511);
3658 	adapter->stats.ptc1023 += er32(PTC1023);
3659 	adapter->stats.ptc1522 += er32(PTC1522);
3660 
3661 	adapter->stats.mptc += er32(MPTC);
3662 	adapter->stats.bptc += er32(BPTC);
3663 
3664 	/* used for adaptive IFS */
3665 
3666 	hw->tx_packet_delta = er32(TPT);
3667 	adapter->stats.tpt += hw->tx_packet_delta;
3668 	hw->collision_delta = er32(COLC);
3669 	adapter->stats.colc += hw->collision_delta;
3670 
3671 	if (hw->mac_type >= e1000_82543) {
3672 		adapter->stats.algnerrc += er32(ALGNERRC);
3673 		adapter->stats.rxerrc += er32(RXERRC);
3674 		adapter->stats.tncrs += er32(TNCRS);
3675 		adapter->stats.cexterr += er32(CEXTERR);
3676 		adapter->stats.tsctc += er32(TSCTC);
3677 		adapter->stats.tsctfc += er32(TSCTFC);
3678 	}
3679 
3680 	/* Fill out the OS statistics structure */
3681 	netdev->stats.multicast = adapter->stats.mprc;
3682 	netdev->stats.collisions = adapter->stats.colc;
3683 
3684 	/* Rx Errors */
3685 
3686 	/* RLEC on some newer hardware can be incorrect so build
3687 	 * our own version based on RUC and ROC
3688 	 */
3689 	netdev->stats.rx_errors = adapter->stats.rxerrc +
3690 		adapter->stats.crcerrs + adapter->stats.algnerrc +
3691 		adapter->stats.ruc + adapter->stats.roc +
3692 		adapter->stats.cexterr;
3693 	adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3694 	netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3695 	netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3696 	netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3697 	netdev->stats.rx_missed_errors = adapter->stats.mpc;
3698 
3699 	/* Tx Errors */
3700 	adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3701 	netdev->stats.tx_errors = adapter->stats.txerrc;
3702 	netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3703 	netdev->stats.tx_window_errors = adapter->stats.latecol;
3704 	netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3705 	if (hw->bad_tx_carr_stats_fd &&
3706 	    adapter->link_duplex == FULL_DUPLEX) {
3707 		netdev->stats.tx_carrier_errors = 0;
3708 		adapter->stats.tncrs = 0;
3709 	}
3710 
3711 	/* Tx Dropped needs to be maintained elsewhere */
3712 
3713 	/* Phy Stats */
3714 	if (hw->media_type == e1000_media_type_copper) {
3715 		if ((adapter->link_speed == SPEED_1000) &&
3716 		   (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3717 			phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3718 			adapter->phy_stats.idle_errors += phy_tmp;
3719 		}
3720 
3721 		if ((hw->mac_type <= e1000_82546) &&
3722 		   (hw->phy_type == e1000_phy_m88) &&
3723 		   !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3724 			adapter->phy_stats.receive_errors += phy_tmp;
3725 	}
3726 
3727 	/* Management Stats */
3728 	if (hw->has_smbus) {
3729 		adapter->stats.mgptc += er32(MGTPTC);
3730 		adapter->stats.mgprc += er32(MGTPRC);
3731 		adapter->stats.mgpdc += er32(MGTPDC);
3732 	}
3733 
3734 	spin_unlock_irqrestore(&adapter->stats_lock, flags);
3735 }
3736 
3737 /**
3738  * e1000_intr - Interrupt Handler
3739  * @irq: interrupt number
3740  * @data: pointer to a network interface device structure
3741  **/
e1000_intr(int irq,void * data)3742 static irqreturn_t e1000_intr(int irq, void *data)
3743 {
3744 	struct net_device *netdev = data;
3745 	struct e1000_adapter *adapter = netdev_priv(netdev);
3746 	struct e1000_hw *hw = &adapter->hw;
3747 	u32 icr = er32(ICR);
3748 
3749 	if (unlikely((!icr)))
3750 		return IRQ_NONE;  /* Not our interrupt */
3751 
3752 	/* we might have caused the interrupt, but the above
3753 	 * read cleared it, and just in case the driver is
3754 	 * down there is nothing to do so return handled
3755 	 */
3756 	if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3757 		return IRQ_HANDLED;
3758 
3759 	if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3760 		hw->get_link_status = 1;
3761 		/* guard against interrupt when we're going down */
3762 		if (!test_bit(__E1000_DOWN, &adapter->flags))
3763 			schedule_delayed_work(&adapter->watchdog_task, 1);
3764 	}
3765 
3766 	/* disable interrupts, without the synchronize_irq bit */
3767 	ew32(IMC, ~0);
3768 	E1000_WRITE_FLUSH();
3769 
3770 	if (likely(napi_schedule_prep(&adapter->napi))) {
3771 		adapter->total_tx_bytes = 0;
3772 		adapter->total_tx_packets = 0;
3773 		adapter->total_rx_bytes = 0;
3774 		adapter->total_rx_packets = 0;
3775 		__napi_schedule(&adapter->napi);
3776 	} else {
3777 		/* this really should not happen! if it does it is basically a
3778 		 * bug, but not a hard error, so enable ints and continue
3779 		 */
3780 		if (!test_bit(__E1000_DOWN, &adapter->flags))
3781 			e1000_irq_enable(adapter);
3782 	}
3783 
3784 	return IRQ_HANDLED;
3785 }
3786 
3787 /**
3788  * e1000_clean - NAPI Rx polling callback
3789  * @napi: napi struct containing references to driver info
3790  * @budget: budget given to driver for receive packets
3791  **/
e1000_clean(struct napi_struct * napi,int budget)3792 static int e1000_clean(struct napi_struct *napi, int budget)
3793 {
3794 	struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
3795 						     napi);
3796 	int tx_clean_complete = 0, work_done = 0;
3797 
3798 	tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
3799 
3800 	adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
3801 
3802 	if (!tx_clean_complete || work_done == budget)
3803 		return budget;
3804 
3805 	/* Exit the polling mode, but don't re-enable interrupts if stack might
3806 	 * poll us due to busy-polling
3807 	 */
3808 	if (likely(napi_complete_done(napi, work_done))) {
3809 		if (likely(adapter->itr_setting & 3))
3810 			e1000_set_itr(adapter);
3811 		if (!test_bit(__E1000_DOWN, &adapter->flags))
3812 			e1000_irq_enable(adapter);
3813 	}
3814 
3815 	return work_done;
3816 }
3817 
3818 /**
3819  * e1000_clean_tx_irq - Reclaim resources after transmit completes
3820  * @adapter: board private structure
3821  * @tx_ring: ring to clean
3822  **/
e1000_clean_tx_irq(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring)3823 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3824 			       struct e1000_tx_ring *tx_ring)
3825 {
3826 	struct e1000_hw *hw = &adapter->hw;
3827 	struct net_device *netdev = adapter->netdev;
3828 	struct e1000_tx_desc *tx_desc, *eop_desc;
3829 	struct e1000_tx_buffer *buffer_info;
3830 	unsigned int i, eop;
3831 	unsigned int count = 0;
3832 	unsigned int total_tx_bytes = 0, total_tx_packets = 0;
3833 	unsigned int bytes_compl = 0, pkts_compl = 0;
3834 
3835 	i = tx_ring->next_to_clean;
3836 	eop = tx_ring->buffer_info[i].next_to_watch;
3837 	eop_desc = E1000_TX_DESC(*tx_ring, eop);
3838 
3839 	while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3840 	       (count < tx_ring->count)) {
3841 		bool cleaned = false;
3842 		dma_rmb();	/* read buffer_info after eop_desc */
3843 		for ( ; !cleaned; count++) {
3844 			tx_desc = E1000_TX_DESC(*tx_ring, i);
3845 			buffer_info = &tx_ring->buffer_info[i];
3846 			cleaned = (i == eop);
3847 
3848 			if (cleaned) {
3849 				total_tx_packets += buffer_info->segs;
3850 				total_tx_bytes += buffer_info->bytecount;
3851 				if (buffer_info->skb) {
3852 					bytes_compl += buffer_info->skb->len;
3853 					pkts_compl++;
3854 				}
3855 
3856 			}
3857 			e1000_unmap_and_free_tx_resource(adapter, buffer_info,
3858 							 64);
3859 			tx_desc->upper.data = 0;
3860 
3861 			if (unlikely(++i == tx_ring->count))
3862 				i = 0;
3863 		}
3864 
3865 		eop = tx_ring->buffer_info[i].next_to_watch;
3866 		eop_desc = E1000_TX_DESC(*tx_ring, eop);
3867 	}
3868 
3869 	/* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame,
3870 	 * which will reuse the cleaned buffers.
3871 	 */
3872 	smp_store_release(&tx_ring->next_to_clean, i);
3873 
3874 	netdev_completed_queue(netdev, pkts_compl, bytes_compl);
3875 
3876 #define TX_WAKE_THRESHOLD 32
3877 	if (unlikely(count && netif_carrier_ok(netdev) &&
3878 		     E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3879 		/* Make sure that anybody stopping the queue after this
3880 		 * sees the new next_to_clean.
3881 		 */
3882 		smp_mb();
3883 
3884 		if (netif_queue_stopped(netdev) &&
3885 		    !(test_bit(__E1000_DOWN, &adapter->flags))) {
3886 			netif_wake_queue(netdev);
3887 			++adapter->restart_queue;
3888 		}
3889 	}
3890 
3891 	if (adapter->detect_tx_hung) {
3892 		/* Detect a transmit hang in hardware, this serializes the
3893 		 * check with the clearing of time_stamp and movement of i
3894 		 */
3895 		adapter->detect_tx_hung = false;
3896 		if (tx_ring->buffer_info[eop].time_stamp &&
3897 		    time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3898 			       (adapter->tx_timeout_factor * HZ)) &&
3899 		    !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3900 
3901 			/* detected Tx unit hang */
3902 			e_err(drv, "Detected Tx Unit Hang\n"
3903 			      "  Tx Queue             <%lu>\n"
3904 			      "  TDH                  <%x>\n"
3905 			      "  TDT                  <%x>\n"
3906 			      "  next_to_use          <%x>\n"
3907 			      "  next_to_clean        <%x>\n"
3908 			      "buffer_info[next_to_clean]\n"
3909 			      "  time_stamp           <%lx>\n"
3910 			      "  next_to_watch        <%x>\n"
3911 			      "  jiffies              <%lx>\n"
3912 			      "  next_to_watch.status <%x>\n",
3913 				(unsigned long)(tx_ring - adapter->tx_ring),
3914 				readl(hw->hw_addr + tx_ring->tdh),
3915 				readl(hw->hw_addr + tx_ring->tdt),
3916 				tx_ring->next_to_use,
3917 				tx_ring->next_to_clean,
3918 				tx_ring->buffer_info[eop].time_stamp,
3919 				eop,
3920 				jiffies,
3921 				eop_desc->upper.fields.status);
3922 			e1000_dump(adapter);
3923 			netif_stop_queue(netdev);
3924 		}
3925 	}
3926 	adapter->total_tx_bytes += total_tx_bytes;
3927 	adapter->total_tx_packets += total_tx_packets;
3928 	netdev->stats.tx_bytes += total_tx_bytes;
3929 	netdev->stats.tx_packets += total_tx_packets;
3930 	return count < tx_ring->count;
3931 }
3932 
3933 /**
3934  * e1000_rx_checksum - Receive Checksum Offload for 82543
3935  * @adapter:     board private structure
3936  * @status_err:  receive descriptor status and error fields
3937  * @csum:        receive descriptor csum field
3938  * @skb:         socket buffer with received data
3939  **/
e1000_rx_checksum(struct e1000_adapter * adapter,u32 status_err,u32 csum,struct sk_buff * skb)3940 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3941 			      u32 csum, struct sk_buff *skb)
3942 {
3943 	struct e1000_hw *hw = &adapter->hw;
3944 	u16 status = (u16)status_err;
3945 	u8 errors = (u8)(status_err >> 24);
3946 
3947 	skb_checksum_none_assert(skb);
3948 
3949 	/* 82543 or newer only */
3950 	if (unlikely(hw->mac_type < e1000_82543))
3951 		return;
3952 	/* Ignore Checksum bit is set */
3953 	if (unlikely(status & E1000_RXD_STAT_IXSM))
3954 		return;
3955 	/* TCP/UDP checksum error bit is set */
3956 	if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
3957 		/* let the stack verify checksum errors */
3958 		adapter->hw_csum_err++;
3959 		return;
3960 	}
3961 	/* TCP/UDP Checksum has not been calculated */
3962 	if (!(status & E1000_RXD_STAT_TCPCS))
3963 		return;
3964 
3965 	/* It must be a TCP or UDP packet with a valid checksum */
3966 	if (likely(status & E1000_RXD_STAT_TCPCS)) {
3967 		/* TCP checksum is good */
3968 		skb->ip_summed = CHECKSUM_UNNECESSARY;
3969 	}
3970 	adapter->hw_csum_good++;
3971 }
3972 
3973 /**
3974  * e1000_consume_page - helper function for jumbo Rx path
3975  * @bi: software descriptor shadow data
3976  * @skb: skb being modified
3977  * @length: length of data being added
3978  **/
e1000_consume_page(struct e1000_rx_buffer * bi,struct sk_buff * skb,u16 length)3979 static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb,
3980 			       u16 length)
3981 {
3982 	bi->rxbuf.page = NULL;
3983 	skb->len += length;
3984 	skb->data_len += length;
3985 	skb->truesize += PAGE_SIZE;
3986 }
3987 
3988 /**
3989  * e1000_receive_skb - helper function to handle rx indications
3990  * @adapter: board private structure
3991  * @status: descriptor status field as written by hardware
3992  * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
3993  * @skb: pointer to sk_buff to be indicated to stack
3994  */
e1000_receive_skb(struct e1000_adapter * adapter,u8 status,__le16 vlan,struct sk_buff * skb)3995 static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
3996 			      __le16 vlan, struct sk_buff *skb)
3997 {
3998 	skb->protocol = eth_type_trans(skb, adapter->netdev);
3999 
4000 	if (status & E1000_RXD_STAT_VP) {
4001 		u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4002 
4003 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4004 	}
4005 	napi_gro_receive(&adapter->napi, skb);
4006 }
4007 
4008 /**
4009  * e1000_tbi_adjust_stats
4010  * @hw: Struct containing variables accessed by shared code
4011  * @stats: point to stats struct
4012  * @frame_len: The length of the frame in question
4013  * @mac_addr: The Ethernet destination address of the frame in question
4014  *
4015  * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
4016  */
e1000_tbi_adjust_stats(struct e1000_hw * hw,struct e1000_hw_stats * stats,u32 frame_len,const u8 * mac_addr)4017 static void e1000_tbi_adjust_stats(struct e1000_hw *hw,
4018 				   struct e1000_hw_stats *stats,
4019 				   u32 frame_len, const u8 *mac_addr)
4020 {
4021 	u64 carry_bit;
4022 
4023 	/* First adjust the frame length. */
4024 	frame_len--;
4025 	/* We need to adjust the statistics counters, since the hardware
4026 	 * counters overcount this packet as a CRC error and undercount
4027 	 * the packet as a good packet
4028 	 */
4029 	/* This packet should not be counted as a CRC error. */
4030 	stats->crcerrs--;
4031 	/* This packet does count as a Good Packet Received. */
4032 	stats->gprc++;
4033 
4034 	/* Adjust the Good Octets received counters */
4035 	carry_bit = 0x80000000 & stats->gorcl;
4036 	stats->gorcl += frame_len;
4037 	/* If the high bit of Gorcl (the low 32 bits of the Good Octets
4038 	 * Received Count) was one before the addition,
4039 	 * AND it is zero after, then we lost the carry out,
4040 	 * need to add one to Gorch (Good Octets Received Count High).
4041 	 * This could be simplified if all environments supported
4042 	 * 64-bit integers.
4043 	 */
4044 	if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
4045 		stats->gorch++;
4046 	/* Is this a broadcast or multicast?  Check broadcast first,
4047 	 * since the test for a multicast frame will test positive on
4048 	 * a broadcast frame.
4049 	 */
4050 	if (is_broadcast_ether_addr(mac_addr))
4051 		stats->bprc++;
4052 	else if (is_multicast_ether_addr(mac_addr))
4053 		stats->mprc++;
4054 
4055 	if (frame_len == hw->max_frame_size) {
4056 		/* In this case, the hardware has overcounted the number of
4057 		 * oversize frames.
4058 		 */
4059 		if (stats->roc > 0)
4060 			stats->roc--;
4061 	}
4062 
4063 	/* Adjust the bin counters when the extra byte put the frame in the
4064 	 * wrong bin. Remember that the frame_len was adjusted above.
4065 	 */
4066 	if (frame_len == 64) {
4067 		stats->prc64++;
4068 		stats->prc127--;
4069 	} else if (frame_len == 127) {
4070 		stats->prc127++;
4071 		stats->prc255--;
4072 	} else if (frame_len == 255) {
4073 		stats->prc255++;
4074 		stats->prc511--;
4075 	} else if (frame_len == 511) {
4076 		stats->prc511++;
4077 		stats->prc1023--;
4078 	} else if (frame_len == 1023) {
4079 		stats->prc1023++;
4080 		stats->prc1522--;
4081 	} else if (frame_len == 1522) {
4082 		stats->prc1522++;
4083 	}
4084 }
4085 
e1000_tbi_should_accept(struct e1000_adapter * adapter,u8 status,u8 errors,u32 length,const u8 * data)4086 static bool e1000_tbi_should_accept(struct e1000_adapter *adapter,
4087 				    u8 status, u8 errors,
4088 				    u32 length, const u8 *data)
4089 {
4090 	struct e1000_hw *hw = &adapter->hw;
4091 	u8 last_byte = *(data + length - 1);
4092 
4093 	if (TBI_ACCEPT(hw, status, errors, length, last_byte)) {
4094 		unsigned long irq_flags;
4095 
4096 		spin_lock_irqsave(&adapter->stats_lock, irq_flags);
4097 		e1000_tbi_adjust_stats(hw, &adapter->stats, length, data);
4098 		spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
4099 
4100 		return true;
4101 	}
4102 
4103 	return false;
4104 }
4105 
e1000_alloc_rx_skb(struct e1000_adapter * adapter,unsigned int bufsz)4106 static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter,
4107 					  unsigned int bufsz)
4108 {
4109 	struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz);
4110 
4111 	if (unlikely(!skb))
4112 		adapter->alloc_rx_buff_failed++;
4113 	return skb;
4114 }
4115 
4116 /**
4117  * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
4118  * @adapter: board private structure
4119  * @rx_ring: ring to clean
4120  * @work_done: amount of napi work completed this call
4121  * @work_to_do: max amount of work allowed for this call to do
4122  *
4123  * the return value indicates whether actual cleaning was done, there
4124  * is no guarantee that everything was cleaned
4125  */
e1000_clean_jumbo_rx_irq(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring,int * work_done,int work_to_do)4126 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4127 				     struct e1000_rx_ring *rx_ring,
4128 				     int *work_done, int work_to_do)
4129 {
4130 	struct net_device *netdev = adapter->netdev;
4131 	struct pci_dev *pdev = adapter->pdev;
4132 	struct e1000_rx_desc *rx_desc, *next_rxd;
4133 	struct e1000_rx_buffer *buffer_info, *next_buffer;
4134 	u32 length;
4135 	unsigned int i;
4136 	int cleaned_count = 0;
4137 	bool cleaned = false;
4138 	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4139 
4140 	i = rx_ring->next_to_clean;
4141 	rx_desc = E1000_RX_DESC(*rx_ring, i);
4142 	buffer_info = &rx_ring->buffer_info[i];
4143 
4144 	while (rx_desc->status & E1000_RXD_STAT_DD) {
4145 		struct sk_buff *skb;
4146 		u8 status;
4147 
4148 		if (*work_done >= work_to_do)
4149 			break;
4150 		(*work_done)++;
4151 		dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
4152 
4153 		status = rx_desc->status;
4154 
4155 		if (++i == rx_ring->count)
4156 			i = 0;
4157 
4158 		next_rxd = E1000_RX_DESC(*rx_ring, i);
4159 		prefetch(next_rxd);
4160 
4161 		next_buffer = &rx_ring->buffer_info[i];
4162 
4163 		cleaned = true;
4164 		cleaned_count++;
4165 		dma_unmap_page(&pdev->dev, buffer_info->dma,
4166 			       adapter->rx_buffer_len, DMA_FROM_DEVICE);
4167 		buffer_info->dma = 0;
4168 
4169 		length = le16_to_cpu(rx_desc->length);
4170 
4171 		/* errors is only valid for DD + EOP descriptors */
4172 		if (unlikely((status & E1000_RXD_STAT_EOP) &&
4173 		    (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
4174 			u8 *mapped = page_address(buffer_info->rxbuf.page);
4175 
4176 			if (e1000_tbi_should_accept(adapter, status,
4177 						    rx_desc->errors,
4178 						    length, mapped)) {
4179 				length--;
4180 			} else if (netdev->features & NETIF_F_RXALL) {
4181 				goto process_skb;
4182 			} else {
4183 				/* an error means any chain goes out the window
4184 				 * too
4185 				 */
4186 				dev_kfree_skb(rx_ring->rx_skb_top);
4187 				rx_ring->rx_skb_top = NULL;
4188 				goto next_desc;
4189 			}
4190 		}
4191 
4192 #define rxtop rx_ring->rx_skb_top
4193 process_skb:
4194 		if (!(status & E1000_RXD_STAT_EOP)) {
4195 			/* this descriptor is only the beginning (or middle) */
4196 			if (!rxtop) {
4197 				/* this is the beginning of a chain */
4198 				rxtop = napi_get_frags(&adapter->napi);
4199 				if (!rxtop)
4200 					break;
4201 
4202 				skb_fill_page_desc(rxtop, 0,
4203 						   buffer_info->rxbuf.page,
4204 						   0, length);
4205 			} else {
4206 				/* this is the middle of a chain */
4207 				skb_fill_page_desc(rxtop,
4208 				    skb_shinfo(rxtop)->nr_frags,
4209 				    buffer_info->rxbuf.page, 0, length);
4210 			}
4211 			e1000_consume_page(buffer_info, rxtop, length);
4212 			goto next_desc;
4213 		} else {
4214 			if (rxtop) {
4215 				/* end of the chain */
4216 				skb_fill_page_desc(rxtop,
4217 				    skb_shinfo(rxtop)->nr_frags,
4218 				    buffer_info->rxbuf.page, 0, length);
4219 				skb = rxtop;
4220 				rxtop = NULL;
4221 				e1000_consume_page(buffer_info, skb, length);
4222 			} else {
4223 				struct page *p;
4224 				/* no chain, got EOP, this buf is the packet
4225 				 * copybreak to save the put_page/alloc_page
4226 				 */
4227 				p = buffer_info->rxbuf.page;
4228 				if (length <= copybreak) {
4229 					if (likely(!(netdev->features & NETIF_F_RXFCS)))
4230 						length -= 4;
4231 					skb = e1000_alloc_rx_skb(adapter,
4232 								 length);
4233 					if (!skb)
4234 						break;
4235 
4236 					memcpy(skb_tail_pointer(skb),
4237 					       page_address(p), length);
4238 
4239 					/* re-use the page, so don't erase
4240 					 * buffer_info->rxbuf.page
4241 					 */
4242 					skb_put(skb, length);
4243 					e1000_rx_checksum(adapter,
4244 							  status | rx_desc->errors << 24,
4245 							  le16_to_cpu(rx_desc->csum), skb);
4246 
4247 					total_rx_bytes += skb->len;
4248 					total_rx_packets++;
4249 
4250 					e1000_receive_skb(adapter, status,
4251 							  rx_desc->special, skb);
4252 					goto next_desc;
4253 				} else {
4254 					skb = napi_get_frags(&adapter->napi);
4255 					if (!skb) {
4256 						adapter->alloc_rx_buff_failed++;
4257 						break;
4258 					}
4259 					skb_fill_page_desc(skb, 0, p, 0,
4260 							   length);
4261 					e1000_consume_page(buffer_info, skb,
4262 							   length);
4263 				}
4264 			}
4265 		}
4266 
4267 		/* Receive Checksum Offload XXX recompute due to CRC strip? */
4268 		e1000_rx_checksum(adapter,
4269 				  (u32)(status) |
4270 				  ((u32)(rx_desc->errors) << 24),
4271 				  le16_to_cpu(rx_desc->csum), skb);
4272 
4273 		total_rx_bytes += (skb->len - 4); /* don't count FCS */
4274 		if (likely(!(netdev->features & NETIF_F_RXFCS)))
4275 			pskb_trim(skb, skb->len - 4);
4276 		total_rx_packets++;
4277 
4278 		if (status & E1000_RXD_STAT_VP) {
4279 			__le16 vlan = rx_desc->special;
4280 			u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4281 
4282 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4283 		}
4284 
4285 		napi_gro_frags(&adapter->napi);
4286 
4287 next_desc:
4288 		rx_desc->status = 0;
4289 
4290 		/* return some buffers to hardware, one at a time is too slow */
4291 		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4292 			adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4293 			cleaned_count = 0;
4294 		}
4295 
4296 		/* use prefetched values */
4297 		rx_desc = next_rxd;
4298 		buffer_info = next_buffer;
4299 	}
4300 	rx_ring->next_to_clean = i;
4301 
4302 	cleaned_count = E1000_DESC_UNUSED(rx_ring);
4303 	if (cleaned_count)
4304 		adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4305 
4306 	adapter->total_rx_packets += total_rx_packets;
4307 	adapter->total_rx_bytes += total_rx_bytes;
4308 	netdev->stats.rx_bytes += total_rx_bytes;
4309 	netdev->stats.rx_packets += total_rx_packets;
4310 	return cleaned;
4311 }
4312 
4313 /* this should improve performance for small packets with large amounts
4314  * of reassembly being done in the stack
4315  */
e1000_copybreak(struct e1000_adapter * adapter,struct e1000_rx_buffer * buffer_info,u32 length,const void * data)4316 static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter,
4317 				       struct e1000_rx_buffer *buffer_info,
4318 				       u32 length, const void *data)
4319 {
4320 	struct sk_buff *skb;
4321 
4322 	if (length > copybreak)
4323 		return NULL;
4324 
4325 	skb = e1000_alloc_rx_skb(adapter, length);
4326 	if (!skb)
4327 		return NULL;
4328 
4329 	dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma,
4330 				length, DMA_FROM_DEVICE);
4331 
4332 	skb_put_data(skb, data, length);
4333 
4334 	return skb;
4335 }
4336 
4337 /**
4338  * e1000_clean_rx_irq - Send received data up the network stack; legacy
4339  * @adapter: board private structure
4340  * @rx_ring: ring to clean
4341  * @work_done: amount of napi work completed this call
4342  * @work_to_do: max amount of work allowed for this call to do
4343  */
e1000_clean_rx_irq(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring,int * work_done,int work_to_do)4344 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4345 			       struct e1000_rx_ring *rx_ring,
4346 			       int *work_done, int work_to_do)
4347 {
4348 	struct net_device *netdev = adapter->netdev;
4349 	struct pci_dev *pdev = adapter->pdev;
4350 	struct e1000_rx_desc *rx_desc, *next_rxd;
4351 	struct e1000_rx_buffer *buffer_info, *next_buffer;
4352 	u32 length;
4353 	unsigned int i;
4354 	int cleaned_count = 0;
4355 	bool cleaned = false;
4356 	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4357 
4358 	i = rx_ring->next_to_clean;
4359 	rx_desc = E1000_RX_DESC(*rx_ring, i);
4360 	buffer_info = &rx_ring->buffer_info[i];
4361 
4362 	while (rx_desc->status & E1000_RXD_STAT_DD) {
4363 		struct sk_buff *skb;
4364 		u8 *data;
4365 		u8 status;
4366 
4367 		if (*work_done >= work_to_do)
4368 			break;
4369 		(*work_done)++;
4370 		dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
4371 
4372 		status = rx_desc->status;
4373 		length = le16_to_cpu(rx_desc->length);
4374 
4375 		data = buffer_info->rxbuf.data;
4376 		prefetch(data);
4377 		skb = e1000_copybreak(adapter, buffer_info, length, data);
4378 		if (!skb) {
4379 			unsigned int frag_len = e1000_frag_len(adapter);
4380 
4381 			skb = napi_build_skb(data - E1000_HEADROOM, frag_len);
4382 			if (!skb) {
4383 				adapter->alloc_rx_buff_failed++;
4384 				break;
4385 			}
4386 
4387 			skb_reserve(skb, E1000_HEADROOM);
4388 			dma_unmap_single(&pdev->dev, buffer_info->dma,
4389 					 adapter->rx_buffer_len,
4390 					 DMA_FROM_DEVICE);
4391 			buffer_info->dma = 0;
4392 			buffer_info->rxbuf.data = NULL;
4393 		}
4394 
4395 		if (++i == rx_ring->count)
4396 			i = 0;
4397 
4398 		next_rxd = E1000_RX_DESC(*rx_ring, i);
4399 		prefetch(next_rxd);
4400 
4401 		next_buffer = &rx_ring->buffer_info[i];
4402 
4403 		cleaned = true;
4404 		cleaned_count++;
4405 
4406 		/* !EOP means multiple descriptors were used to store a single
4407 		 * packet, if thats the case we need to toss it.  In fact, we
4408 		 * to toss every packet with the EOP bit clear and the next
4409 		 * frame that _does_ have the EOP bit set, as it is by
4410 		 * definition only a frame fragment
4411 		 */
4412 		if (unlikely(!(status & E1000_RXD_STAT_EOP)))
4413 			adapter->discarding = true;
4414 
4415 		if (adapter->discarding) {
4416 			/* All receives must fit into a single buffer */
4417 			netdev_dbg(netdev, "Receive packet consumed multiple buffers\n");
4418 			dev_kfree_skb(skb);
4419 			if (status & E1000_RXD_STAT_EOP)
4420 				adapter->discarding = false;
4421 			goto next_desc;
4422 		}
4423 
4424 		if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
4425 			if (e1000_tbi_should_accept(adapter, status,
4426 						    rx_desc->errors,
4427 						    length, data)) {
4428 				length--;
4429 			} else if (netdev->features & NETIF_F_RXALL) {
4430 				goto process_skb;
4431 			} else {
4432 				dev_kfree_skb(skb);
4433 				goto next_desc;
4434 			}
4435 		}
4436 
4437 process_skb:
4438 		total_rx_bytes += (length - 4); /* don't count FCS */
4439 		total_rx_packets++;
4440 
4441 		if (likely(!(netdev->features & NETIF_F_RXFCS)))
4442 			/* adjust length to remove Ethernet CRC, this must be
4443 			 * done after the TBI_ACCEPT workaround above
4444 			 */
4445 			length -= 4;
4446 
4447 		if (buffer_info->rxbuf.data == NULL)
4448 			skb_put(skb, length);
4449 		else /* copybreak skb */
4450 			skb_trim(skb, length);
4451 
4452 		/* Receive Checksum Offload */
4453 		e1000_rx_checksum(adapter,
4454 				  (u32)(status) |
4455 				  ((u32)(rx_desc->errors) << 24),
4456 				  le16_to_cpu(rx_desc->csum), skb);
4457 
4458 		e1000_receive_skb(adapter, status, rx_desc->special, skb);
4459 
4460 next_desc:
4461 		rx_desc->status = 0;
4462 
4463 		/* return some buffers to hardware, one at a time is too slow */
4464 		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4465 			adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4466 			cleaned_count = 0;
4467 		}
4468 
4469 		/* use prefetched values */
4470 		rx_desc = next_rxd;
4471 		buffer_info = next_buffer;
4472 	}
4473 	rx_ring->next_to_clean = i;
4474 
4475 	cleaned_count = E1000_DESC_UNUSED(rx_ring);
4476 	if (cleaned_count)
4477 		adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4478 
4479 	adapter->total_rx_packets += total_rx_packets;
4480 	adapter->total_rx_bytes += total_rx_bytes;
4481 	netdev->stats.rx_bytes += total_rx_bytes;
4482 	netdev->stats.rx_packets += total_rx_packets;
4483 	return cleaned;
4484 }
4485 
4486 /**
4487  * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
4488  * @adapter: address of board private structure
4489  * @rx_ring: pointer to receive ring structure
4490  * @cleaned_count: number of buffers to allocate this pass
4491  **/
4492 static void
e1000_alloc_jumbo_rx_buffers(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring,int cleaned_count)4493 e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4494 			     struct e1000_rx_ring *rx_ring, int cleaned_count)
4495 {
4496 	struct pci_dev *pdev = adapter->pdev;
4497 	struct e1000_rx_desc *rx_desc;
4498 	struct e1000_rx_buffer *buffer_info;
4499 	unsigned int i;
4500 
4501 	i = rx_ring->next_to_use;
4502 	buffer_info = &rx_ring->buffer_info[i];
4503 
4504 	while (cleaned_count--) {
4505 		/* allocate a new page if necessary */
4506 		if (!buffer_info->rxbuf.page) {
4507 			buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC);
4508 			if (unlikely(!buffer_info->rxbuf.page)) {
4509 				adapter->alloc_rx_buff_failed++;
4510 				break;
4511 			}
4512 		}
4513 
4514 		if (!buffer_info->dma) {
4515 			buffer_info->dma = dma_map_page(&pdev->dev,
4516 							buffer_info->rxbuf.page, 0,
4517 							adapter->rx_buffer_len,
4518 							DMA_FROM_DEVICE);
4519 			if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4520 				put_page(buffer_info->rxbuf.page);
4521 				buffer_info->rxbuf.page = NULL;
4522 				buffer_info->dma = 0;
4523 				adapter->alloc_rx_buff_failed++;
4524 				break;
4525 			}
4526 		}
4527 
4528 		rx_desc = E1000_RX_DESC(*rx_ring, i);
4529 		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4530 
4531 		if (unlikely(++i == rx_ring->count))
4532 			i = 0;
4533 		buffer_info = &rx_ring->buffer_info[i];
4534 	}
4535 
4536 	if (likely(rx_ring->next_to_use != i)) {
4537 		rx_ring->next_to_use = i;
4538 		if (unlikely(i-- == 0))
4539 			i = (rx_ring->count - 1);
4540 
4541 		/* Force memory writes to complete before letting h/w
4542 		 * know there are new descriptors to fetch.  (Only
4543 		 * applicable for weak-ordered memory model archs,
4544 		 * such as IA-64).
4545 		 */
4546 		dma_wmb();
4547 		writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4548 	}
4549 }
4550 
4551 /**
4552  * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
4553  * @adapter: address of board private structure
4554  * @rx_ring: pointer to ring struct
4555  * @cleaned_count: number of new Rx buffers to try to allocate
4556  **/
e1000_alloc_rx_buffers(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring,int cleaned_count)4557 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4558 				   struct e1000_rx_ring *rx_ring,
4559 				   int cleaned_count)
4560 {
4561 	struct e1000_hw *hw = &adapter->hw;
4562 	struct pci_dev *pdev = adapter->pdev;
4563 	struct e1000_rx_desc *rx_desc;
4564 	struct e1000_rx_buffer *buffer_info;
4565 	unsigned int i;
4566 	unsigned int bufsz = adapter->rx_buffer_len;
4567 
4568 	i = rx_ring->next_to_use;
4569 	buffer_info = &rx_ring->buffer_info[i];
4570 
4571 	while (cleaned_count--) {
4572 		void *data;
4573 
4574 		if (buffer_info->rxbuf.data)
4575 			goto skip;
4576 
4577 		data = e1000_alloc_frag(adapter);
4578 		if (!data) {
4579 			/* Better luck next round */
4580 			adapter->alloc_rx_buff_failed++;
4581 			break;
4582 		}
4583 
4584 		/* Fix for errata 23, can't cross 64kB boundary */
4585 		if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4586 			void *olddata = data;
4587 			e_err(rx_err, "skb align check failed: %u bytes at "
4588 			      "%p\n", bufsz, data);
4589 			/* Try again, without freeing the previous */
4590 			data = e1000_alloc_frag(adapter);
4591 			/* Failed allocation, critical failure */
4592 			if (!data) {
4593 				skb_free_frag(olddata);
4594 				adapter->alloc_rx_buff_failed++;
4595 				break;
4596 			}
4597 
4598 			if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4599 				/* give up */
4600 				skb_free_frag(data);
4601 				skb_free_frag(olddata);
4602 				adapter->alloc_rx_buff_failed++;
4603 				break;
4604 			}
4605 
4606 			/* Use new allocation */
4607 			skb_free_frag(olddata);
4608 		}
4609 		buffer_info->dma = dma_map_single(&pdev->dev,
4610 						  data,
4611 						  adapter->rx_buffer_len,
4612 						  DMA_FROM_DEVICE);
4613 		if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4614 			skb_free_frag(data);
4615 			buffer_info->dma = 0;
4616 			adapter->alloc_rx_buff_failed++;
4617 			break;
4618 		}
4619 
4620 		/* XXX if it was allocated cleanly it will never map to a
4621 		 * boundary crossing
4622 		 */
4623 
4624 		/* Fix for errata 23, can't cross 64kB boundary */
4625 		if (!e1000_check_64k_bound(adapter,
4626 					(void *)(unsigned long)buffer_info->dma,
4627 					adapter->rx_buffer_len)) {
4628 			e_err(rx_err, "dma align check failed: %u bytes at "
4629 			      "%p\n", adapter->rx_buffer_len,
4630 			      (void *)(unsigned long)buffer_info->dma);
4631 
4632 			dma_unmap_single(&pdev->dev, buffer_info->dma,
4633 					 adapter->rx_buffer_len,
4634 					 DMA_FROM_DEVICE);
4635 
4636 			skb_free_frag(data);
4637 			buffer_info->rxbuf.data = NULL;
4638 			buffer_info->dma = 0;
4639 
4640 			adapter->alloc_rx_buff_failed++;
4641 			break;
4642 		}
4643 		buffer_info->rxbuf.data = data;
4644  skip:
4645 		rx_desc = E1000_RX_DESC(*rx_ring, i);
4646 		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4647 
4648 		if (unlikely(++i == rx_ring->count))
4649 			i = 0;
4650 		buffer_info = &rx_ring->buffer_info[i];
4651 	}
4652 
4653 	if (likely(rx_ring->next_to_use != i)) {
4654 		rx_ring->next_to_use = i;
4655 		if (unlikely(i-- == 0))
4656 			i = (rx_ring->count - 1);
4657 
4658 		/* Force memory writes to complete before letting h/w
4659 		 * know there are new descriptors to fetch.  (Only
4660 		 * applicable for weak-ordered memory model archs,
4661 		 * such as IA-64).
4662 		 */
4663 		dma_wmb();
4664 		writel(i, hw->hw_addr + rx_ring->rdt);
4665 	}
4666 }
4667 
4668 /**
4669  * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4670  * @adapter: address of board private structure
4671  **/
e1000_smartspeed(struct e1000_adapter * adapter)4672 static void e1000_smartspeed(struct e1000_adapter *adapter)
4673 {
4674 	struct e1000_hw *hw = &adapter->hw;
4675 	u16 phy_status;
4676 	u16 phy_ctrl;
4677 
4678 	if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4679 	   !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
4680 		return;
4681 
4682 	if (adapter->smartspeed == 0) {
4683 		/* If Master/Slave config fault is asserted twice,
4684 		 * we assume back-to-back
4685 		 */
4686 		e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4687 		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4688 			return;
4689 		e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4690 		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4691 			return;
4692 		e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4693 		if (phy_ctrl & CR_1000T_MS_ENABLE) {
4694 			phy_ctrl &= ~CR_1000T_MS_ENABLE;
4695 			e1000_write_phy_reg(hw, PHY_1000T_CTRL,
4696 					    phy_ctrl);
4697 			adapter->smartspeed++;
4698 			if (!e1000_phy_setup_autoneg(hw) &&
4699 			   !e1000_read_phy_reg(hw, PHY_CTRL,
4700 					       &phy_ctrl)) {
4701 				phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4702 					     MII_CR_RESTART_AUTO_NEG);
4703 				e1000_write_phy_reg(hw, PHY_CTRL,
4704 						    phy_ctrl);
4705 			}
4706 		}
4707 		return;
4708 	} else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4709 		/* If still no link, perhaps using 2/3 pair cable */
4710 		e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4711 		phy_ctrl |= CR_1000T_MS_ENABLE;
4712 		e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4713 		if (!e1000_phy_setup_autoneg(hw) &&
4714 		   !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
4715 			phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4716 				     MII_CR_RESTART_AUTO_NEG);
4717 			e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
4718 		}
4719 	}
4720 	/* Restart process after E1000_SMARTSPEED_MAX iterations */
4721 	if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4722 		adapter->smartspeed = 0;
4723 }
4724 
4725 /**
4726  * e1000_ioctl - handle ioctl calls
4727  * @netdev: pointer to our netdev
4728  * @ifr: pointer to interface request structure
4729  * @cmd: ioctl data
4730  **/
e1000_ioctl(struct net_device * netdev,struct ifreq * ifr,int cmd)4731 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4732 {
4733 	switch (cmd) {
4734 	case SIOCGMIIPHY:
4735 	case SIOCGMIIREG:
4736 	case SIOCSMIIREG:
4737 		return e1000_mii_ioctl(netdev, ifr, cmd);
4738 	default:
4739 		return -EOPNOTSUPP;
4740 	}
4741 }
4742 
4743 /**
4744  * e1000_mii_ioctl -
4745  * @netdev: pointer to our netdev
4746  * @ifr: pointer to interface request structure
4747  * @cmd: ioctl data
4748  **/
e1000_mii_ioctl(struct net_device * netdev,struct ifreq * ifr,int cmd)4749 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4750 			   int cmd)
4751 {
4752 	struct e1000_adapter *adapter = netdev_priv(netdev);
4753 	struct e1000_hw *hw = &adapter->hw;
4754 	struct mii_ioctl_data *data = if_mii(ifr);
4755 	int retval;
4756 	u16 mii_reg;
4757 	unsigned long flags;
4758 
4759 	if (hw->media_type != e1000_media_type_copper)
4760 		return -EOPNOTSUPP;
4761 
4762 	switch (cmd) {
4763 	case SIOCGMIIPHY:
4764 		data->phy_id = hw->phy_addr;
4765 		break;
4766 	case SIOCGMIIREG:
4767 		spin_lock_irqsave(&adapter->stats_lock, flags);
4768 		if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
4769 				   &data->val_out)) {
4770 			spin_unlock_irqrestore(&adapter->stats_lock, flags);
4771 			return -EIO;
4772 		}
4773 		spin_unlock_irqrestore(&adapter->stats_lock, flags);
4774 		break;
4775 	case SIOCSMIIREG:
4776 		if (data->reg_num & ~(0x1F))
4777 			return -EFAULT;
4778 		mii_reg = data->val_in;
4779 		spin_lock_irqsave(&adapter->stats_lock, flags);
4780 		if (e1000_write_phy_reg(hw, data->reg_num,
4781 					mii_reg)) {
4782 			spin_unlock_irqrestore(&adapter->stats_lock, flags);
4783 			return -EIO;
4784 		}
4785 		spin_unlock_irqrestore(&adapter->stats_lock, flags);
4786 		if (hw->media_type == e1000_media_type_copper) {
4787 			switch (data->reg_num) {
4788 			case PHY_CTRL:
4789 				if (mii_reg & MII_CR_POWER_DOWN)
4790 					break;
4791 				if (mii_reg & MII_CR_AUTO_NEG_EN) {
4792 					hw->autoneg = 1;
4793 					hw->autoneg_advertised = 0x2F;
4794 				} else {
4795 					u32 speed;
4796 					if (mii_reg & 0x40)
4797 						speed = SPEED_1000;
4798 					else if (mii_reg & 0x2000)
4799 						speed = SPEED_100;
4800 					else
4801 						speed = SPEED_10;
4802 					retval = e1000_set_spd_dplx(
4803 						adapter, speed,
4804 						((mii_reg & 0x100)
4805 						 ? DUPLEX_FULL :
4806 						 DUPLEX_HALF));
4807 					if (retval)
4808 						return retval;
4809 				}
4810 				if (netif_running(adapter->netdev))
4811 					e1000_reinit_locked(adapter);
4812 				else
4813 					e1000_reset(adapter);
4814 				break;
4815 			case M88E1000_PHY_SPEC_CTRL:
4816 			case M88E1000_EXT_PHY_SPEC_CTRL:
4817 				if (e1000_phy_reset(hw))
4818 					return -EIO;
4819 				break;
4820 			}
4821 		} else {
4822 			switch (data->reg_num) {
4823 			case PHY_CTRL:
4824 				if (mii_reg & MII_CR_POWER_DOWN)
4825 					break;
4826 				if (netif_running(adapter->netdev))
4827 					e1000_reinit_locked(adapter);
4828 				else
4829 					e1000_reset(adapter);
4830 				break;
4831 			}
4832 		}
4833 		break;
4834 	default:
4835 		return -EOPNOTSUPP;
4836 	}
4837 	return E1000_SUCCESS;
4838 }
4839 
e1000_pci_set_mwi(struct e1000_hw * hw)4840 void e1000_pci_set_mwi(struct e1000_hw *hw)
4841 {
4842 	struct e1000_adapter *adapter = hw->back;
4843 	int ret_val = pci_set_mwi(adapter->pdev);
4844 
4845 	if (ret_val)
4846 		e_err(probe, "Error in setting MWI\n");
4847 }
4848 
e1000_pci_clear_mwi(struct e1000_hw * hw)4849 void e1000_pci_clear_mwi(struct e1000_hw *hw)
4850 {
4851 	struct e1000_adapter *adapter = hw->back;
4852 
4853 	pci_clear_mwi(adapter->pdev);
4854 }
4855 
e1000_pcix_get_mmrbc(struct e1000_hw * hw)4856 int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
4857 {
4858 	struct e1000_adapter *adapter = hw->back;
4859 	return pcix_get_mmrbc(adapter->pdev);
4860 }
4861 
e1000_pcix_set_mmrbc(struct e1000_hw * hw,int mmrbc)4862 void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
4863 {
4864 	struct e1000_adapter *adapter = hw->back;
4865 	pcix_set_mmrbc(adapter->pdev, mmrbc);
4866 }
4867 
e1000_io_write(struct e1000_hw * hw,unsigned long port,u32 value)4868 void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
4869 {
4870 	outl(value, port);
4871 }
4872 
e1000_vlan_used(struct e1000_adapter * adapter)4873 static bool e1000_vlan_used(struct e1000_adapter *adapter)
4874 {
4875 	u16 vid;
4876 
4877 	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4878 		return true;
4879 	return false;
4880 }
4881 
__e1000_vlan_mode(struct e1000_adapter * adapter,netdev_features_t features)4882 static void __e1000_vlan_mode(struct e1000_adapter *adapter,
4883 			      netdev_features_t features)
4884 {
4885 	struct e1000_hw *hw = &adapter->hw;
4886 	u32 ctrl;
4887 
4888 	ctrl = er32(CTRL);
4889 	if (features & NETIF_F_HW_VLAN_CTAG_RX) {
4890 		/* enable VLAN tag insert/strip */
4891 		ctrl |= E1000_CTRL_VME;
4892 	} else {
4893 		/* disable VLAN tag insert/strip */
4894 		ctrl &= ~E1000_CTRL_VME;
4895 	}
4896 	ew32(CTRL, ctrl);
4897 }
e1000_vlan_filter_on_off(struct e1000_adapter * adapter,bool filter_on)4898 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4899 				     bool filter_on)
4900 {
4901 	struct e1000_hw *hw = &adapter->hw;
4902 	u32 rctl;
4903 
4904 	if (!test_bit(__E1000_DOWN, &adapter->flags))
4905 		e1000_irq_disable(adapter);
4906 
4907 	__e1000_vlan_mode(adapter, adapter->netdev->features);
4908 	if (filter_on) {
4909 		/* enable VLAN receive filtering */
4910 		rctl = er32(RCTL);
4911 		rctl &= ~E1000_RCTL_CFIEN;
4912 		if (!(adapter->netdev->flags & IFF_PROMISC))
4913 			rctl |= E1000_RCTL_VFE;
4914 		ew32(RCTL, rctl);
4915 		e1000_update_mng_vlan(adapter);
4916 	} else {
4917 		/* disable VLAN receive filtering */
4918 		rctl = er32(RCTL);
4919 		rctl &= ~E1000_RCTL_VFE;
4920 		ew32(RCTL, rctl);
4921 	}
4922 
4923 	if (!test_bit(__E1000_DOWN, &adapter->flags))
4924 		e1000_irq_enable(adapter);
4925 }
4926 
e1000_vlan_mode(struct net_device * netdev,netdev_features_t features)4927 static void e1000_vlan_mode(struct net_device *netdev,
4928 			    netdev_features_t features)
4929 {
4930 	struct e1000_adapter *adapter = netdev_priv(netdev);
4931 
4932 	if (!test_bit(__E1000_DOWN, &adapter->flags))
4933 		e1000_irq_disable(adapter);
4934 
4935 	__e1000_vlan_mode(adapter, features);
4936 
4937 	if (!test_bit(__E1000_DOWN, &adapter->flags))
4938 		e1000_irq_enable(adapter);
4939 }
4940 
e1000_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)4941 static int e1000_vlan_rx_add_vid(struct net_device *netdev,
4942 				 __be16 proto, u16 vid)
4943 {
4944 	struct e1000_adapter *adapter = netdev_priv(netdev);
4945 	struct e1000_hw *hw = &adapter->hw;
4946 	u32 vfta, index;
4947 
4948 	if ((hw->mng_cookie.status &
4949 	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4950 	    (vid == adapter->mng_vlan_id))
4951 		return 0;
4952 
4953 	if (!e1000_vlan_used(adapter))
4954 		e1000_vlan_filter_on_off(adapter, true);
4955 
4956 	/* add VID to filter table */
4957 	index = (vid >> 5) & 0x7F;
4958 	vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4959 	vfta |= (1 << (vid & 0x1F));
4960 	e1000_write_vfta(hw, index, vfta);
4961 
4962 	set_bit(vid, adapter->active_vlans);
4963 
4964 	return 0;
4965 }
4966 
e1000_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)4967 static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
4968 				  __be16 proto, u16 vid)
4969 {
4970 	struct e1000_adapter *adapter = netdev_priv(netdev);
4971 	struct e1000_hw *hw = &adapter->hw;
4972 	u32 vfta, index;
4973 
4974 	if (!test_bit(__E1000_DOWN, &adapter->flags))
4975 		e1000_irq_disable(adapter);
4976 	if (!test_bit(__E1000_DOWN, &adapter->flags))
4977 		e1000_irq_enable(adapter);
4978 
4979 	/* remove VID from filter table */
4980 	index = (vid >> 5) & 0x7F;
4981 	vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4982 	vfta &= ~(1 << (vid & 0x1F));
4983 	e1000_write_vfta(hw, index, vfta);
4984 
4985 	clear_bit(vid, adapter->active_vlans);
4986 
4987 	if (!e1000_vlan_used(adapter))
4988 		e1000_vlan_filter_on_off(adapter, false);
4989 
4990 	return 0;
4991 }
4992 
e1000_restore_vlan(struct e1000_adapter * adapter)4993 static void e1000_restore_vlan(struct e1000_adapter *adapter)
4994 {
4995 	u16 vid;
4996 
4997 	if (!e1000_vlan_used(adapter))
4998 		return;
4999 
5000 	e1000_vlan_filter_on_off(adapter, true);
5001 	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
5002 		e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
5003 }
5004 
e1000_set_spd_dplx(struct e1000_adapter * adapter,u32 spd,u8 dplx)5005 int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
5006 {
5007 	struct e1000_hw *hw = &adapter->hw;
5008 
5009 	hw->autoneg = 0;
5010 
5011 	/* Make sure dplx is at most 1 bit and lsb of speed is not set
5012 	 * for the switch() below to work
5013 	 */
5014 	if ((spd & 1) || (dplx & ~1))
5015 		goto err_inval;
5016 
5017 	/* Fiber NICs only allow 1000 gbps Full duplex */
5018 	if ((hw->media_type == e1000_media_type_fiber) &&
5019 	    spd != SPEED_1000 &&
5020 	    dplx != DUPLEX_FULL)
5021 		goto err_inval;
5022 
5023 	switch (spd + dplx) {
5024 	case SPEED_10 + DUPLEX_HALF:
5025 		hw->forced_speed_duplex = e1000_10_half;
5026 		break;
5027 	case SPEED_10 + DUPLEX_FULL:
5028 		hw->forced_speed_duplex = e1000_10_full;
5029 		break;
5030 	case SPEED_100 + DUPLEX_HALF:
5031 		hw->forced_speed_duplex = e1000_100_half;
5032 		break;
5033 	case SPEED_100 + DUPLEX_FULL:
5034 		hw->forced_speed_duplex = e1000_100_full;
5035 		break;
5036 	case SPEED_1000 + DUPLEX_FULL:
5037 		hw->autoneg = 1;
5038 		hw->autoneg_advertised = ADVERTISE_1000_FULL;
5039 		break;
5040 	case SPEED_1000 + DUPLEX_HALF: /* not supported */
5041 	default:
5042 		goto err_inval;
5043 	}
5044 
5045 	/* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
5046 	hw->mdix = AUTO_ALL_MODES;
5047 
5048 	return 0;
5049 
5050 err_inval:
5051 	e_err(probe, "Unsupported Speed/Duplex configuration\n");
5052 	return -EINVAL;
5053 }
5054 
__e1000_shutdown(struct pci_dev * pdev,bool * enable_wake)5055 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
5056 {
5057 	struct net_device *netdev = pci_get_drvdata(pdev);
5058 	struct e1000_adapter *adapter = netdev_priv(netdev);
5059 	struct e1000_hw *hw = &adapter->hw;
5060 	u32 ctrl, ctrl_ext, rctl, status;
5061 	u32 wufc = adapter->wol;
5062 
5063 	netif_device_detach(netdev);
5064 
5065 	if (netif_running(netdev)) {
5066 		int count = E1000_CHECK_RESET_COUNT;
5067 
5068 		while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
5069 			usleep_range(10000, 20000);
5070 
5071 		WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
5072 		e1000_down(adapter);
5073 	}
5074 
5075 	status = er32(STATUS);
5076 	if (status & E1000_STATUS_LU)
5077 		wufc &= ~E1000_WUFC_LNKC;
5078 
5079 	if (wufc) {
5080 		e1000_setup_rctl(adapter);
5081 		e1000_set_rx_mode(netdev);
5082 
5083 		rctl = er32(RCTL);
5084 
5085 		/* turn on all-multi mode if wake on multicast is enabled */
5086 		if (wufc & E1000_WUFC_MC)
5087 			rctl |= E1000_RCTL_MPE;
5088 
5089 		/* enable receives in the hardware */
5090 		ew32(RCTL, rctl | E1000_RCTL_EN);
5091 
5092 		if (hw->mac_type >= e1000_82540) {
5093 			ctrl = er32(CTRL);
5094 			/* advertise wake from D3Cold */
5095 			#define E1000_CTRL_ADVD3WUC 0x00100000
5096 			/* phy power management enable */
5097 			#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5098 			ctrl |= E1000_CTRL_ADVD3WUC |
5099 				E1000_CTRL_EN_PHY_PWR_MGMT;
5100 			ew32(CTRL, ctrl);
5101 		}
5102 
5103 		if (hw->media_type == e1000_media_type_fiber ||
5104 		    hw->media_type == e1000_media_type_internal_serdes) {
5105 			/* keep the laser running in D3 */
5106 			ctrl_ext = er32(CTRL_EXT);
5107 			ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
5108 			ew32(CTRL_EXT, ctrl_ext);
5109 		}
5110 
5111 		ew32(WUC, E1000_WUC_PME_EN);
5112 		ew32(WUFC, wufc);
5113 	} else {
5114 		ew32(WUC, 0);
5115 		ew32(WUFC, 0);
5116 	}
5117 
5118 	e1000_release_manageability(adapter);
5119 
5120 	*enable_wake = !!wufc;
5121 
5122 	/* make sure adapter isn't asleep if manageability is enabled */
5123 	if (adapter->en_mng_pt)
5124 		*enable_wake = true;
5125 
5126 	if (netif_running(netdev))
5127 		e1000_free_irq(adapter);
5128 
5129 	if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5130 		pci_disable_device(pdev);
5131 
5132 	return 0;
5133 }
5134 
e1000_suspend(struct device * dev)5135 static int e1000_suspend(struct device *dev)
5136 {
5137 	int retval;
5138 	struct pci_dev *pdev = to_pci_dev(dev);
5139 	bool wake;
5140 
5141 	retval = __e1000_shutdown(pdev, &wake);
5142 	device_set_wakeup_enable(dev, wake);
5143 
5144 	return retval;
5145 }
5146 
e1000_resume(struct device * dev)5147 static int e1000_resume(struct device *dev)
5148 {
5149 	struct pci_dev *pdev = to_pci_dev(dev);
5150 	struct net_device *netdev = pci_get_drvdata(pdev);
5151 	struct e1000_adapter *adapter = netdev_priv(netdev);
5152 	struct e1000_hw *hw = &adapter->hw;
5153 	u32 err;
5154 
5155 	if (adapter->need_ioport)
5156 		err = pci_enable_device(pdev);
5157 	else
5158 		err = pci_enable_device_mem(pdev);
5159 	if (err) {
5160 		pr_err("Cannot enable PCI device from suspend\n");
5161 		return err;
5162 	}
5163 
5164 	/* flush memory to make sure state is correct */
5165 	smp_mb__before_atomic();
5166 	clear_bit(__E1000_DISABLED, &adapter->flags);
5167 	pci_set_master(pdev);
5168 
5169 	pci_enable_wake(pdev, PCI_D3hot, 0);
5170 	pci_enable_wake(pdev, PCI_D3cold, 0);
5171 
5172 	if (netif_running(netdev)) {
5173 		err = e1000_request_irq(adapter);
5174 		if (err)
5175 			return err;
5176 	}
5177 
5178 	e1000_power_up_phy(adapter);
5179 	e1000_reset(adapter);
5180 	ew32(WUS, ~0);
5181 
5182 	e1000_init_manageability(adapter);
5183 
5184 	if (netif_running(netdev))
5185 		e1000_up(adapter);
5186 
5187 	netif_device_attach(netdev);
5188 
5189 	return 0;
5190 }
5191 
e1000_shutdown(struct pci_dev * pdev)5192 static void e1000_shutdown(struct pci_dev *pdev)
5193 {
5194 	bool wake;
5195 
5196 	__e1000_shutdown(pdev, &wake);
5197 
5198 	if (system_state == SYSTEM_POWER_OFF) {
5199 		pci_wake_from_d3(pdev, wake);
5200 		pci_set_power_state(pdev, PCI_D3hot);
5201 	}
5202 }
5203 
5204 #ifdef CONFIG_NET_POLL_CONTROLLER
5205 /* Polling 'interrupt' - used by things like netconsole to send skbs
5206  * without having to re-enable interrupts. It's not called while
5207  * the interrupt routine is executing.
5208  */
e1000_netpoll(struct net_device * netdev)5209 static void e1000_netpoll(struct net_device *netdev)
5210 {
5211 	struct e1000_adapter *adapter = netdev_priv(netdev);
5212 
5213 	if (disable_hardirq(adapter->pdev->irq))
5214 		e1000_intr(adapter->pdev->irq, netdev);
5215 	enable_irq(adapter->pdev->irq);
5216 }
5217 #endif
5218 
5219 /**
5220  * e1000_io_error_detected - called when PCI error is detected
5221  * @pdev: Pointer to PCI device
5222  * @state: The current pci connection state
5223  *
5224  * This function is called after a PCI bus error affecting
5225  * this device has been detected.
5226  */
e1000_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)5227 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5228 						pci_channel_state_t state)
5229 {
5230 	struct net_device *netdev = pci_get_drvdata(pdev);
5231 	struct e1000_adapter *adapter = netdev_priv(netdev);
5232 
5233 	netif_device_detach(netdev);
5234 
5235 	if (state == pci_channel_io_perm_failure)
5236 		return PCI_ERS_RESULT_DISCONNECT;
5237 
5238 	if (netif_running(netdev))
5239 		e1000_down(adapter);
5240 
5241 	if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5242 		pci_disable_device(pdev);
5243 
5244 	/* Request a slot reset. */
5245 	return PCI_ERS_RESULT_NEED_RESET;
5246 }
5247 
5248 /**
5249  * e1000_io_slot_reset - called after the pci bus has been reset.
5250  * @pdev: Pointer to PCI device
5251  *
5252  * Restart the card from scratch, as if from a cold-boot. Implementation
5253  * resembles the first-half of the e1000_resume routine.
5254  */
e1000_io_slot_reset(struct pci_dev * pdev)5255 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5256 {
5257 	struct net_device *netdev = pci_get_drvdata(pdev);
5258 	struct e1000_adapter *adapter = netdev_priv(netdev);
5259 	struct e1000_hw *hw = &adapter->hw;
5260 	int err;
5261 
5262 	if (adapter->need_ioport)
5263 		err = pci_enable_device(pdev);
5264 	else
5265 		err = pci_enable_device_mem(pdev);
5266 	if (err) {
5267 		pr_err("Cannot re-enable PCI device after reset.\n");
5268 		return PCI_ERS_RESULT_DISCONNECT;
5269 	}
5270 
5271 	/* flush memory to make sure state is correct */
5272 	smp_mb__before_atomic();
5273 	clear_bit(__E1000_DISABLED, &adapter->flags);
5274 	pci_set_master(pdev);
5275 
5276 	pci_enable_wake(pdev, PCI_D3hot, 0);
5277 	pci_enable_wake(pdev, PCI_D3cold, 0);
5278 
5279 	e1000_reset(adapter);
5280 	ew32(WUS, ~0);
5281 
5282 	return PCI_ERS_RESULT_RECOVERED;
5283 }
5284 
5285 /**
5286  * e1000_io_resume - called when traffic can start flowing again.
5287  * @pdev: Pointer to PCI device
5288  *
5289  * This callback is called when the error recovery driver tells us that
5290  * its OK to resume normal operation. Implementation resembles the
5291  * second-half of the e1000_resume routine.
5292  */
e1000_io_resume(struct pci_dev * pdev)5293 static void e1000_io_resume(struct pci_dev *pdev)
5294 {
5295 	struct net_device *netdev = pci_get_drvdata(pdev);
5296 	struct e1000_adapter *adapter = netdev_priv(netdev);
5297 
5298 	e1000_init_manageability(adapter);
5299 
5300 	if (netif_running(netdev)) {
5301 		if (e1000_up(adapter)) {
5302 			pr_info("can't bring device back up after reset\n");
5303 			return;
5304 		}
5305 	}
5306 
5307 	netif_device_attach(netdev);
5308 }
5309 
5310 /* e1000_main.c */
5311