1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2006 Intel Corporation. */
3
4 #include "e1000.h"
5 #include <net/ip6_checksum.h>
6 #include <linux/io.h>
7 #include <linux/prefetch.h>
8 #include <linux/bitops.h>
9 #include <linux/if_vlan.h>
10
11 char e1000_driver_name[] = "e1000";
12 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
13 static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
14
15 /* e1000_pci_tbl - PCI Device ID Table
16 *
17 * Last entry must be all 0s
18 *
19 * Macro expands to...
20 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
21 */
22 static const struct pci_device_id e1000_pci_tbl[] = {
23 INTEL_E1000_ETHERNET_DEVICE(0x1000),
24 INTEL_E1000_ETHERNET_DEVICE(0x1001),
25 INTEL_E1000_ETHERNET_DEVICE(0x1004),
26 INTEL_E1000_ETHERNET_DEVICE(0x1008),
27 INTEL_E1000_ETHERNET_DEVICE(0x1009),
28 INTEL_E1000_ETHERNET_DEVICE(0x100C),
29 INTEL_E1000_ETHERNET_DEVICE(0x100D),
30 INTEL_E1000_ETHERNET_DEVICE(0x100E),
31 INTEL_E1000_ETHERNET_DEVICE(0x100F),
32 INTEL_E1000_ETHERNET_DEVICE(0x1010),
33 INTEL_E1000_ETHERNET_DEVICE(0x1011),
34 INTEL_E1000_ETHERNET_DEVICE(0x1012),
35 INTEL_E1000_ETHERNET_DEVICE(0x1013),
36 INTEL_E1000_ETHERNET_DEVICE(0x1014),
37 INTEL_E1000_ETHERNET_DEVICE(0x1015),
38 INTEL_E1000_ETHERNET_DEVICE(0x1016),
39 INTEL_E1000_ETHERNET_DEVICE(0x1017),
40 INTEL_E1000_ETHERNET_DEVICE(0x1018),
41 INTEL_E1000_ETHERNET_DEVICE(0x1019),
42 INTEL_E1000_ETHERNET_DEVICE(0x101A),
43 INTEL_E1000_ETHERNET_DEVICE(0x101D),
44 INTEL_E1000_ETHERNET_DEVICE(0x101E),
45 INTEL_E1000_ETHERNET_DEVICE(0x1026),
46 INTEL_E1000_ETHERNET_DEVICE(0x1027),
47 INTEL_E1000_ETHERNET_DEVICE(0x1028),
48 INTEL_E1000_ETHERNET_DEVICE(0x1075),
49 INTEL_E1000_ETHERNET_DEVICE(0x1076),
50 INTEL_E1000_ETHERNET_DEVICE(0x1077),
51 INTEL_E1000_ETHERNET_DEVICE(0x1078),
52 INTEL_E1000_ETHERNET_DEVICE(0x1079),
53 INTEL_E1000_ETHERNET_DEVICE(0x107A),
54 INTEL_E1000_ETHERNET_DEVICE(0x107B),
55 INTEL_E1000_ETHERNET_DEVICE(0x107C),
56 INTEL_E1000_ETHERNET_DEVICE(0x108A),
57 INTEL_E1000_ETHERNET_DEVICE(0x1099),
58 INTEL_E1000_ETHERNET_DEVICE(0x10B5),
59 INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
60 /* required last entry */
61 {0,}
62 };
63
64 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
65
66 int e1000_up(struct e1000_adapter *adapter);
67 void e1000_down(struct e1000_adapter *adapter);
68 void e1000_reinit_locked(struct e1000_adapter *adapter);
69 void e1000_reset(struct e1000_adapter *adapter);
70 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
71 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
72 void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
73 void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
74 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
75 struct e1000_tx_ring *txdr);
76 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
77 struct e1000_rx_ring *rxdr);
78 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
79 struct e1000_tx_ring *tx_ring);
80 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
81 struct e1000_rx_ring *rx_ring);
82 void e1000_update_stats(struct e1000_adapter *adapter);
83
84 static int e1000_init_module(void);
85 static void e1000_exit_module(void);
86 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
87 static void e1000_remove(struct pci_dev *pdev);
88 static int e1000_alloc_queues(struct e1000_adapter *adapter);
89 static int e1000_sw_init(struct e1000_adapter *adapter);
90 int e1000_open(struct net_device *netdev);
91 int e1000_close(struct net_device *netdev);
92 static void e1000_configure_tx(struct e1000_adapter *adapter);
93 static void e1000_configure_rx(struct e1000_adapter *adapter);
94 static void e1000_setup_rctl(struct e1000_adapter *adapter);
95 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
96 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
97 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
98 struct e1000_tx_ring *tx_ring);
99 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
100 struct e1000_rx_ring *rx_ring);
101 static void e1000_set_rx_mode(struct net_device *netdev);
102 static void e1000_update_phy_info_task(struct work_struct *work);
103 static void e1000_watchdog(struct work_struct *work);
104 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
105 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
106 struct net_device *netdev);
107 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
108 static int e1000_set_mac(struct net_device *netdev, void *p);
109 static irqreturn_t e1000_intr(int irq, void *data);
110 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
111 struct e1000_tx_ring *tx_ring);
112 static int e1000_clean(struct napi_struct *napi, int budget);
113 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
114 struct e1000_rx_ring *rx_ring,
115 int *work_done, int work_to_do);
116 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
117 struct e1000_rx_ring *rx_ring,
118 int *work_done, int work_to_do);
e1000_alloc_dummy_rx_buffers(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring,int cleaned_count)119 static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter,
120 struct e1000_rx_ring *rx_ring,
121 int cleaned_count)
122 {
123 }
124 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
125 struct e1000_rx_ring *rx_ring,
126 int cleaned_count);
127 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
128 struct e1000_rx_ring *rx_ring,
129 int cleaned_count);
130 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
131 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
132 int cmd);
133 static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
134 static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
135 static void e1000_tx_timeout(struct net_device *dev, unsigned int txqueue);
136 static void e1000_reset_task(struct work_struct *work);
137 static void e1000_smartspeed(struct e1000_adapter *adapter);
138 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
139 struct sk_buff *skb);
140
141 static bool e1000_vlan_used(struct e1000_adapter *adapter);
142 static void e1000_vlan_mode(struct net_device *netdev,
143 netdev_features_t features);
144 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
145 bool filter_on);
146 static int e1000_vlan_rx_add_vid(struct net_device *netdev,
147 __be16 proto, u16 vid);
148 static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
149 __be16 proto, u16 vid);
150 static void e1000_restore_vlan(struct e1000_adapter *adapter);
151
152 static int e1000_suspend(struct device *dev);
153 static int e1000_resume(struct device *dev);
154 static void e1000_shutdown(struct pci_dev *pdev);
155
156 #ifdef CONFIG_NET_POLL_CONTROLLER
157 /* for netdump / net console */
158 static void e1000_netpoll (struct net_device *netdev);
159 #endif
160
161 #define COPYBREAK_DEFAULT 256
162 static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
163 module_param(copybreak, uint, 0644);
164 MODULE_PARM_DESC(copybreak,
165 "Maximum size of packet that is copied to a new buffer on receive");
166
167 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
168 pci_channel_state_t state);
169 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
170 static void e1000_io_resume(struct pci_dev *pdev);
171
172 static const struct pci_error_handlers e1000_err_handler = {
173 .error_detected = e1000_io_error_detected,
174 .slot_reset = e1000_io_slot_reset,
175 .resume = e1000_io_resume,
176 };
177
178 static DEFINE_SIMPLE_DEV_PM_OPS(e1000_pm_ops, e1000_suspend, e1000_resume);
179
180 static struct pci_driver e1000_driver = {
181 .name = e1000_driver_name,
182 .id_table = e1000_pci_tbl,
183 .probe = e1000_probe,
184 .remove = e1000_remove,
185 .driver.pm = pm_sleep_ptr(&e1000_pm_ops),
186 .shutdown = e1000_shutdown,
187 .err_handler = &e1000_err_handler
188 };
189
190 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
191 MODULE_LICENSE("GPL v2");
192
193 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
194 static int debug = -1;
195 module_param(debug, int, 0);
196 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
197
198 /**
199 * e1000_get_hw_dev - helper function for getting netdev
200 * @hw: pointer to HW struct
201 *
202 * return device used by hardware layer to print debugging information
203 *
204 **/
e1000_get_hw_dev(struct e1000_hw * hw)205 struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
206 {
207 struct e1000_adapter *adapter = hw->back;
208 return adapter->netdev;
209 }
210
211 /**
212 * e1000_init_module - Driver Registration Routine
213 *
214 * e1000_init_module is the first routine called when the driver is
215 * loaded. All it does is register with the PCI subsystem.
216 **/
e1000_init_module(void)217 static int __init e1000_init_module(void)
218 {
219 int ret;
220 pr_info("%s\n", e1000_driver_string);
221
222 pr_info("%s\n", e1000_copyright);
223
224 ret = pci_register_driver(&e1000_driver);
225 if (copybreak != COPYBREAK_DEFAULT) {
226 if (copybreak == 0)
227 pr_info("copybreak disabled\n");
228 else
229 pr_info("copybreak enabled for "
230 "packets <= %u bytes\n", copybreak);
231 }
232 return ret;
233 }
234
235 module_init(e1000_init_module);
236
237 /**
238 * e1000_exit_module - Driver Exit Cleanup Routine
239 *
240 * e1000_exit_module is called just before the driver is removed
241 * from memory.
242 **/
e1000_exit_module(void)243 static void __exit e1000_exit_module(void)
244 {
245 pci_unregister_driver(&e1000_driver);
246 }
247
248 module_exit(e1000_exit_module);
249
e1000_request_irq(struct e1000_adapter * adapter)250 static int e1000_request_irq(struct e1000_adapter *adapter)
251 {
252 struct net_device *netdev = adapter->netdev;
253 irq_handler_t handler = e1000_intr;
254 int irq_flags = IRQF_SHARED;
255 int err;
256
257 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
258 netdev);
259 if (err) {
260 e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
261 }
262
263 return err;
264 }
265
e1000_free_irq(struct e1000_adapter * adapter)266 static void e1000_free_irq(struct e1000_adapter *adapter)
267 {
268 struct net_device *netdev = adapter->netdev;
269
270 free_irq(adapter->pdev->irq, netdev);
271 }
272
273 /**
274 * e1000_irq_disable - Mask off interrupt generation on the NIC
275 * @adapter: board private structure
276 **/
e1000_irq_disable(struct e1000_adapter * adapter)277 static void e1000_irq_disable(struct e1000_adapter *adapter)
278 {
279 struct e1000_hw *hw = &adapter->hw;
280
281 ew32(IMC, ~0);
282 E1000_WRITE_FLUSH();
283 synchronize_irq(adapter->pdev->irq);
284 }
285
286 /**
287 * e1000_irq_enable - Enable default interrupt generation settings
288 * @adapter: board private structure
289 **/
e1000_irq_enable(struct e1000_adapter * adapter)290 static void e1000_irq_enable(struct e1000_adapter *adapter)
291 {
292 struct e1000_hw *hw = &adapter->hw;
293
294 ew32(IMS, IMS_ENABLE_MASK);
295 E1000_WRITE_FLUSH();
296 }
297
e1000_update_mng_vlan(struct e1000_adapter * adapter)298 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
299 {
300 struct e1000_hw *hw = &adapter->hw;
301 struct net_device *netdev = adapter->netdev;
302 u16 vid = hw->mng_cookie.vlan_id;
303 u16 old_vid = adapter->mng_vlan_id;
304
305 if (!e1000_vlan_used(adapter))
306 return;
307
308 if (!test_bit(vid, adapter->active_vlans)) {
309 if (hw->mng_cookie.status &
310 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
311 e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
312 adapter->mng_vlan_id = vid;
313 } else {
314 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
315 }
316 if (old_vid != E1000_MNG_VLAN_NONE && vid != old_vid &&
317 !test_bit(old_vid, adapter->active_vlans))
318 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
319 old_vid);
320 } else {
321 adapter->mng_vlan_id = vid;
322 }
323 }
324
e1000_init_manageability(struct e1000_adapter * adapter)325 static void e1000_init_manageability(struct e1000_adapter *adapter)
326 {
327 struct e1000_hw *hw = &adapter->hw;
328
329 if (adapter->en_mng_pt) {
330 u32 manc = er32(MANC);
331
332 /* disable hardware interception of ARP */
333 manc &= ~(E1000_MANC_ARP_EN);
334
335 ew32(MANC, manc);
336 }
337 }
338
e1000_release_manageability(struct e1000_adapter * adapter)339 static void e1000_release_manageability(struct e1000_adapter *adapter)
340 {
341 struct e1000_hw *hw = &adapter->hw;
342
343 if (adapter->en_mng_pt) {
344 u32 manc = er32(MANC);
345
346 /* re-enable hardware interception of ARP */
347 manc |= E1000_MANC_ARP_EN;
348
349 ew32(MANC, manc);
350 }
351 }
352
353 /**
354 * e1000_configure - configure the hardware for RX and TX
355 * @adapter: private board structure
356 **/
e1000_configure(struct e1000_adapter * adapter)357 static void e1000_configure(struct e1000_adapter *adapter)
358 {
359 struct net_device *netdev = adapter->netdev;
360 int i;
361
362 e1000_set_rx_mode(netdev);
363
364 e1000_restore_vlan(adapter);
365 e1000_init_manageability(adapter);
366
367 e1000_configure_tx(adapter);
368 e1000_setup_rctl(adapter);
369 e1000_configure_rx(adapter);
370 /* call E1000_DESC_UNUSED which always leaves
371 * at least 1 descriptor unused to make sure
372 * next_to_use != next_to_clean
373 */
374 for (i = 0; i < adapter->num_rx_queues; i++) {
375 struct e1000_rx_ring *ring = &adapter->rx_ring[i];
376 adapter->alloc_rx_buf(adapter, ring,
377 E1000_DESC_UNUSED(ring));
378 }
379 }
380
e1000_up(struct e1000_adapter * adapter)381 int e1000_up(struct e1000_adapter *adapter)
382 {
383 struct e1000_hw *hw = &adapter->hw;
384
385 /* hardware has been reset, we need to reload some things */
386 e1000_configure(adapter);
387
388 clear_bit(__E1000_DOWN, &adapter->flags);
389
390 napi_enable(&adapter->napi);
391
392 e1000_irq_enable(adapter);
393
394 netif_wake_queue(adapter->netdev);
395
396 /* fire a link change interrupt to start the watchdog */
397 ew32(ICS, E1000_ICS_LSC);
398 return 0;
399 }
400
401 /**
402 * e1000_power_up_phy - restore link in case the phy was powered down
403 * @adapter: address of board private structure
404 *
405 * The phy may be powered down to save power and turn off link when the
406 * driver is unloaded and wake on lan is not enabled (among others)
407 * *** this routine MUST be followed by a call to e1000_reset ***
408 **/
e1000_power_up_phy(struct e1000_adapter * adapter)409 void e1000_power_up_phy(struct e1000_adapter *adapter)
410 {
411 struct e1000_hw *hw = &adapter->hw;
412 u16 mii_reg = 0;
413
414 /* Just clear the power down bit to wake the phy back up */
415 if (hw->media_type == e1000_media_type_copper) {
416 /* according to the manual, the phy will retain its
417 * settings across a power-down/up cycle
418 */
419 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
420 mii_reg &= ~MII_CR_POWER_DOWN;
421 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
422 }
423 }
424
e1000_power_down_phy(struct e1000_adapter * adapter)425 static void e1000_power_down_phy(struct e1000_adapter *adapter)
426 {
427 struct e1000_hw *hw = &adapter->hw;
428
429 /* Power down the PHY so no link is implied when interface is down *
430 * The PHY cannot be powered down if any of the following is true *
431 * (a) WoL is enabled
432 * (b) AMT is active
433 * (c) SoL/IDER session is active
434 */
435 if (!adapter->wol && hw->mac_type >= e1000_82540 &&
436 hw->media_type == e1000_media_type_copper) {
437 u16 mii_reg = 0;
438
439 switch (hw->mac_type) {
440 case e1000_82540:
441 case e1000_82545:
442 case e1000_82545_rev_3:
443 case e1000_82546:
444 case e1000_ce4100:
445 case e1000_82546_rev_3:
446 case e1000_82541:
447 case e1000_82541_rev_2:
448 case e1000_82547:
449 case e1000_82547_rev_2:
450 if (er32(MANC) & E1000_MANC_SMBUS_EN)
451 goto out;
452 break;
453 default:
454 goto out;
455 }
456 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
457 mii_reg |= MII_CR_POWER_DOWN;
458 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
459 msleep(1);
460 }
461 out:
462 return;
463 }
464
e1000_down_and_stop(struct e1000_adapter * adapter)465 static void e1000_down_and_stop(struct e1000_adapter *adapter)
466 {
467 set_bit(__E1000_DOWN, &adapter->flags);
468
469 cancel_delayed_work_sync(&adapter->watchdog_task);
470
471 /*
472 * Since the watchdog task can reschedule other tasks, we should cancel
473 * it first, otherwise we can run into the situation when a work is
474 * still running after the adapter has been turned down.
475 */
476
477 cancel_delayed_work_sync(&adapter->phy_info_task);
478 cancel_delayed_work_sync(&adapter->fifo_stall_task);
479 }
480
e1000_down(struct e1000_adapter * adapter)481 void e1000_down(struct e1000_adapter *adapter)
482 {
483 struct e1000_hw *hw = &adapter->hw;
484 struct net_device *netdev = adapter->netdev;
485 u32 rctl, tctl;
486
487 /* disable receives in the hardware */
488 rctl = er32(RCTL);
489 ew32(RCTL, rctl & ~E1000_RCTL_EN);
490 /* flush and sleep below */
491
492 netif_tx_disable(netdev);
493
494 /* disable transmits in the hardware */
495 tctl = er32(TCTL);
496 tctl &= ~E1000_TCTL_EN;
497 ew32(TCTL, tctl);
498 /* flush both disables and wait for them to finish */
499 E1000_WRITE_FLUSH();
500 msleep(10);
501
502 /* Set the carrier off after transmits have been disabled in the
503 * hardware, to avoid race conditions with e1000_watchdog() (which
504 * may be running concurrently to us, checking for the carrier
505 * bit to decide whether it should enable transmits again). Such
506 * a race condition would result into transmission being disabled
507 * in the hardware until the next IFF_DOWN+IFF_UP cycle.
508 */
509 netif_carrier_off(netdev);
510
511 netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_RX, NULL);
512 netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_TX, NULL);
513 napi_disable(&adapter->napi);
514
515 e1000_irq_disable(adapter);
516
517 /* Setting DOWN must be after irq_disable to prevent
518 * a screaming interrupt. Setting DOWN also prevents
519 * tasks from rescheduling.
520 */
521 e1000_down_and_stop(adapter);
522
523 adapter->link_speed = 0;
524 adapter->link_duplex = 0;
525
526 e1000_reset(adapter);
527 e1000_clean_all_tx_rings(adapter);
528 e1000_clean_all_rx_rings(adapter);
529 }
530
e1000_reinit_locked(struct e1000_adapter * adapter)531 void e1000_reinit_locked(struct e1000_adapter *adapter)
532 {
533 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
534 msleep(1);
535
536 /* only run the task if not already down */
537 if (!test_bit(__E1000_DOWN, &adapter->flags)) {
538 e1000_down(adapter);
539 e1000_up(adapter);
540 }
541
542 clear_bit(__E1000_RESETTING, &adapter->flags);
543 }
544
e1000_reset(struct e1000_adapter * adapter)545 void e1000_reset(struct e1000_adapter *adapter)
546 {
547 struct e1000_hw *hw = &adapter->hw;
548 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
549 bool legacy_pba_adjust = false;
550 u16 hwm;
551
552 /* Repartition Pba for greater than 9k mtu
553 * To take effect CTRL.RST is required.
554 */
555
556 switch (hw->mac_type) {
557 case e1000_82542_rev2_0:
558 case e1000_82542_rev2_1:
559 case e1000_82543:
560 case e1000_82544:
561 case e1000_82540:
562 case e1000_82541:
563 case e1000_82541_rev_2:
564 legacy_pba_adjust = true;
565 pba = E1000_PBA_48K;
566 break;
567 case e1000_82545:
568 case e1000_82545_rev_3:
569 case e1000_82546:
570 case e1000_ce4100:
571 case e1000_82546_rev_3:
572 pba = E1000_PBA_48K;
573 break;
574 case e1000_82547:
575 case e1000_82547_rev_2:
576 legacy_pba_adjust = true;
577 pba = E1000_PBA_30K;
578 break;
579 case e1000_undefined:
580 case e1000_num_macs:
581 break;
582 }
583
584 if (legacy_pba_adjust) {
585 if (hw->max_frame_size > E1000_RXBUFFER_8192)
586 pba -= 8; /* allocate more FIFO for Tx */
587
588 if (hw->mac_type == e1000_82547) {
589 adapter->tx_fifo_head = 0;
590 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
591 adapter->tx_fifo_size =
592 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
593 atomic_set(&adapter->tx_fifo_stall, 0);
594 }
595 } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
596 /* adjust PBA for jumbo frames */
597 ew32(PBA, pba);
598
599 /* To maintain wire speed transmits, the Tx FIFO should be
600 * large enough to accommodate two full transmit packets,
601 * rounded up to the next 1KB and expressed in KB. Likewise,
602 * the Rx FIFO should be large enough to accommodate at least
603 * one full receive packet and is similarly rounded up and
604 * expressed in KB.
605 */
606 pba = er32(PBA);
607 /* upper 16 bits has Tx packet buffer allocation size in KB */
608 tx_space = pba >> 16;
609 /* lower 16 bits has Rx packet buffer allocation size in KB */
610 pba &= 0xffff;
611 /* the Tx fifo also stores 16 bytes of information about the Tx
612 * but don't include ethernet FCS because hardware appends it
613 */
614 min_tx_space = (hw->max_frame_size +
615 sizeof(struct e1000_tx_desc) -
616 ETH_FCS_LEN) * 2;
617 min_tx_space = ALIGN(min_tx_space, 1024);
618 min_tx_space >>= 10;
619 /* software strips receive CRC, so leave room for it */
620 min_rx_space = hw->max_frame_size;
621 min_rx_space = ALIGN(min_rx_space, 1024);
622 min_rx_space >>= 10;
623
624 /* If current Tx allocation is less than the min Tx FIFO size,
625 * and the min Tx FIFO size is less than the current Rx FIFO
626 * allocation, take space away from current Rx allocation
627 */
628 if (tx_space < min_tx_space &&
629 ((min_tx_space - tx_space) < pba)) {
630 pba = pba - (min_tx_space - tx_space);
631
632 /* PCI/PCIx hardware has PBA alignment constraints */
633 switch (hw->mac_type) {
634 case e1000_82545 ... e1000_82546_rev_3:
635 pba &= ~(E1000_PBA_8K - 1);
636 break;
637 default:
638 break;
639 }
640
641 /* if short on Rx space, Rx wins and must trump Tx
642 * adjustment or use Early Receive if available
643 */
644 if (pba < min_rx_space)
645 pba = min_rx_space;
646 }
647 }
648
649 ew32(PBA, pba);
650
651 /* flow control settings:
652 * The high water mark must be low enough to fit one full frame
653 * (or the size used for early receive) above it in the Rx FIFO.
654 * Set it to the lower of:
655 * - 90% of the Rx FIFO size, and
656 * - the full Rx FIFO size minus the early receive size (for parts
657 * with ERT support assuming ERT set to E1000_ERT_2048), or
658 * - the full Rx FIFO size minus one full frame
659 */
660 hwm = min(((pba << 10) * 9 / 10),
661 ((pba << 10) - hw->max_frame_size));
662
663 hw->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */
664 hw->fc_low_water = hw->fc_high_water - 8;
665 hw->fc_pause_time = E1000_FC_PAUSE_TIME;
666 hw->fc_send_xon = 1;
667 hw->fc = hw->original_fc;
668
669 /* Allow time for pending master requests to run */
670 e1000_reset_hw(hw);
671 if (hw->mac_type >= e1000_82544)
672 ew32(WUC, 0);
673
674 if (e1000_init_hw(hw))
675 e_dev_err("Hardware Error\n");
676 e1000_update_mng_vlan(adapter);
677
678 /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
679 if (hw->mac_type >= e1000_82544 &&
680 hw->autoneg == 1 &&
681 hw->autoneg_advertised == ADVERTISE_1000_FULL) {
682 u32 ctrl = er32(CTRL);
683 /* clear phy power management bit if we are in gig only mode,
684 * which if enabled will attempt negotiation to 100Mb, which
685 * can cause a loss of link at power off or driver unload
686 */
687 ctrl &= ~E1000_CTRL_SWDPIN3;
688 ew32(CTRL, ctrl);
689 }
690
691 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
692 ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
693
694 e1000_reset_adaptive(hw);
695 e1000_phy_get_info(hw, &adapter->phy_info);
696
697 e1000_release_manageability(adapter);
698 }
699
700 /* Dump the eeprom for users having checksum issues */
e1000_dump_eeprom(struct e1000_adapter * adapter)701 static void e1000_dump_eeprom(struct e1000_adapter *adapter)
702 {
703 struct net_device *netdev = adapter->netdev;
704 struct ethtool_eeprom eeprom;
705 const struct ethtool_ops *ops = netdev->ethtool_ops;
706 u8 *data;
707 int i;
708 u16 csum_old, csum_new = 0;
709
710 eeprom.len = ops->get_eeprom_len(netdev);
711 eeprom.offset = 0;
712
713 data = kmalloc(eeprom.len, GFP_KERNEL);
714 if (!data)
715 return;
716
717 ops->get_eeprom(netdev, &eeprom, data);
718
719 csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
720 (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
721 for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
722 csum_new += data[i] + (data[i + 1] << 8);
723 csum_new = EEPROM_SUM - csum_new;
724
725 pr_err("/*********************/\n");
726 pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
727 pr_err("Calculated : 0x%04x\n", csum_new);
728
729 pr_err("Offset Values\n");
730 pr_err("======== ======\n");
731 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
732
733 pr_err("Include this output when contacting your support provider.\n");
734 pr_err("This is not a software error! Something bad happened to\n");
735 pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
736 pr_err("result in further problems, possibly loss of data,\n");
737 pr_err("corruption or system hangs!\n");
738 pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
739 pr_err("which is invalid and requires you to set the proper MAC\n");
740 pr_err("address manually before continuing to enable this network\n");
741 pr_err("device. Please inspect the EEPROM dump and report the\n");
742 pr_err("issue to your hardware vendor or Intel Customer Support.\n");
743 pr_err("/*********************/\n");
744
745 kfree(data);
746 }
747
748 /**
749 * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
750 * @pdev: PCI device information struct
751 *
752 * Return true if an adapter needs ioport resources
753 **/
e1000_is_need_ioport(struct pci_dev * pdev)754 static int e1000_is_need_ioport(struct pci_dev *pdev)
755 {
756 switch (pdev->device) {
757 case E1000_DEV_ID_82540EM:
758 case E1000_DEV_ID_82540EM_LOM:
759 case E1000_DEV_ID_82540EP:
760 case E1000_DEV_ID_82540EP_LOM:
761 case E1000_DEV_ID_82540EP_LP:
762 case E1000_DEV_ID_82541EI:
763 case E1000_DEV_ID_82541EI_MOBILE:
764 case E1000_DEV_ID_82541ER:
765 case E1000_DEV_ID_82541ER_LOM:
766 case E1000_DEV_ID_82541GI:
767 case E1000_DEV_ID_82541GI_LF:
768 case E1000_DEV_ID_82541GI_MOBILE:
769 case E1000_DEV_ID_82544EI_COPPER:
770 case E1000_DEV_ID_82544EI_FIBER:
771 case E1000_DEV_ID_82544GC_COPPER:
772 case E1000_DEV_ID_82544GC_LOM:
773 case E1000_DEV_ID_82545EM_COPPER:
774 case E1000_DEV_ID_82545EM_FIBER:
775 case E1000_DEV_ID_82546EB_COPPER:
776 case E1000_DEV_ID_82546EB_FIBER:
777 case E1000_DEV_ID_82546EB_QUAD_COPPER:
778 return true;
779 default:
780 return false;
781 }
782 }
783
e1000_fix_features(struct net_device * netdev,netdev_features_t features)784 static netdev_features_t e1000_fix_features(struct net_device *netdev,
785 netdev_features_t features)
786 {
787 /* Since there is no support for separate Rx/Tx vlan accel
788 * enable/disable make sure Tx flag is always in same state as Rx.
789 */
790 if (features & NETIF_F_HW_VLAN_CTAG_RX)
791 features |= NETIF_F_HW_VLAN_CTAG_TX;
792 else
793 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
794
795 return features;
796 }
797
e1000_set_features(struct net_device * netdev,netdev_features_t features)798 static int e1000_set_features(struct net_device *netdev,
799 netdev_features_t features)
800 {
801 struct e1000_adapter *adapter = netdev_priv(netdev);
802 netdev_features_t changed = features ^ netdev->features;
803
804 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
805 e1000_vlan_mode(netdev, features);
806
807 if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
808 return 0;
809
810 netdev->features = features;
811 adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
812
813 if (netif_running(netdev))
814 e1000_reinit_locked(adapter);
815 else
816 e1000_reset(adapter);
817
818 return 1;
819 }
820
821 static const struct net_device_ops e1000_netdev_ops = {
822 .ndo_open = e1000_open,
823 .ndo_stop = e1000_close,
824 .ndo_start_xmit = e1000_xmit_frame,
825 .ndo_set_rx_mode = e1000_set_rx_mode,
826 .ndo_set_mac_address = e1000_set_mac,
827 .ndo_tx_timeout = e1000_tx_timeout,
828 .ndo_change_mtu = e1000_change_mtu,
829 .ndo_eth_ioctl = e1000_ioctl,
830 .ndo_validate_addr = eth_validate_addr,
831 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
832 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
833 #ifdef CONFIG_NET_POLL_CONTROLLER
834 .ndo_poll_controller = e1000_netpoll,
835 #endif
836 .ndo_fix_features = e1000_fix_features,
837 .ndo_set_features = e1000_set_features,
838 };
839
840 /**
841 * e1000_init_hw_struct - initialize members of hw struct
842 * @adapter: board private struct
843 * @hw: structure used by e1000_hw.c
844 *
845 * Factors out initialization of the e1000_hw struct to its own function
846 * that can be called very early at init (just after struct allocation).
847 * Fields are initialized based on PCI device information and
848 * OS network device settings (MTU size).
849 * Returns negative error codes if MAC type setup fails.
850 */
e1000_init_hw_struct(struct e1000_adapter * adapter,struct e1000_hw * hw)851 static int e1000_init_hw_struct(struct e1000_adapter *adapter,
852 struct e1000_hw *hw)
853 {
854 struct pci_dev *pdev = adapter->pdev;
855
856 /* PCI config space info */
857 hw->vendor_id = pdev->vendor;
858 hw->device_id = pdev->device;
859 hw->subsystem_vendor_id = pdev->subsystem_vendor;
860 hw->subsystem_id = pdev->subsystem_device;
861 hw->revision_id = pdev->revision;
862
863 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
864
865 hw->max_frame_size = adapter->netdev->mtu +
866 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
867 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
868
869 /* identify the MAC */
870 if (e1000_set_mac_type(hw)) {
871 e_err(probe, "Unknown MAC Type\n");
872 return -EIO;
873 }
874
875 switch (hw->mac_type) {
876 default:
877 break;
878 case e1000_82541:
879 case e1000_82547:
880 case e1000_82541_rev_2:
881 case e1000_82547_rev_2:
882 hw->phy_init_script = 1;
883 break;
884 }
885
886 e1000_set_media_type(hw);
887 e1000_get_bus_info(hw);
888
889 hw->wait_autoneg_complete = false;
890 hw->tbi_compatibility_en = true;
891 hw->adaptive_ifs = true;
892
893 /* Copper options */
894
895 if (hw->media_type == e1000_media_type_copper) {
896 hw->mdix = AUTO_ALL_MODES;
897 hw->disable_polarity_correction = false;
898 hw->master_slave = E1000_MASTER_SLAVE;
899 }
900
901 return 0;
902 }
903
904 /**
905 * e1000_probe - Device Initialization Routine
906 * @pdev: PCI device information struct
907 * @ent: entry in e1000_pci_tbl
908 *
909 * Returns 0 on success, negative on failure
910 *
911 * e1000_probe initializes an adapter identified by a pci_dev structure.
912 * The OS initialization, configuring of the adapter private structure,
913 * and a hardware reset occur.
914 **/
e1000_probe(struct pci_dev * pdev,const struct pci_device_id * ent)915 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
916 {
917 struct net_device *netdev;
918 struct e1000_adapter *adapter = NULL;
919 struct e1000_hw *hw;
920
921 static int cards_found;
922 static int global_quad_port_a; /* global ksp3 port a indication */
923 int i, err, pci_using_dac;
924 u16 eeprom_data = 0;
925 u16 tmp = 0;
926 u16 eeprom_apme_mask = E1000_EEPROM_APME;
927 int bars, need_ioport;
928 bool disable_dev = false;
929
930 /* do not allocate ioport bars when not needed */
931 need_ioport = e1000_is_need_ioport(pdev);
932 if (need_ioport) {
933 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
934 err = pci_enable_device(pdev);
935 } else {
936 bars = pci_select_bars(pdev, IORESOURCE_MEM);
937 err = pci_enable_device_mem(pdev);
938 }
939 if (err)
940 return err;
941
942 err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
943 if (err)
944 goto err_pci_reg;
945
946 pci_set_master(pdev);
947 err = pci_save_state(pdev);
948 if (err)
949 goto err_alloc_etherdev;
950
951 err = -ENOMEM;
952 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
953 if (!netdev)
954 goto err_alloc_etherdev;
955
956 SET_NETDEV_DEV(netdev, &pdev->dev);
957
958 pci_set_drvdata(pdev, netdev);
959 adapter = netdev_priv(netdev);
960 adapter->netdev = netdev;
961 adapter->pdev = pdev;
962 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
963 adapter->bars = bars;
964 adapter->need_ioport = need_ioport;
965
966 hw = &adapter->hw;
967 hw->back = adapter;
968
969 err = -EIO;
970 hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
971 if (!hw->hw_addr)
972 goto err_ioremap;
973
974 if (adapter->need_ioport) {
975 for (i = BAR_1; i < PCI_STD_NUM_BARS; i++) {
976 if (pci_resource_len(pdev, i) == 0)
977 continue;
978 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
979 hw->io_base = pci_resource_start(pdev, i);
980 break;
981 }
982 }
983 }
984
985 /* make ready for any if (hw->...) below */
986 err = e1000_init_hw_struct(adapter, hw);
987 if (err)
988 goto err_sw_init;
989
990 /* there is a workaround being applied below that limits
991 * 64-bit DMA addresses to 64-bit hardware. There are some
992 * 32-bit adapters that Tx hang when given 64-bit DMA addresses
993 */
994 pci_using_dac = 0;
995 if ((hw->bus_type == e1000_bus_type_pcix) &&
996 !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
997 pci_using_dac = 1;
998 } else {
999 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1000 if (err) {
1001 pr_err("No usable DMA config, aborting\n");
1002 goto err_dma;
1003 }
1004 }
1005
1006 netdev->netdev_ops = &e1000_netdev_ops;
1007 e1000_set_ethtool_ops(netdev);
1008 netdev->watchdog_timeo = 5 * HZ;
1009 netif_napi_add(netdev, &adapter->napi, e1000_clean);
1010
1011 strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
1012
1013 adapter->bd_number = cards_found;
1014
1015 /* setup the private structure */
1016
1017 err = e1000_sw_init(adapter);
1018 if (err)
1019 goto err_sw_init;
1020
1021 err = -EIO;
1022 if (hw->mac_type == e1000_ce4100) {
1023 hw->ce4100_gbe_mdio_base_virt =
1024 ioremap(pci_resource_start(pdev, BAR_1),
1025 pci_resource_len(pdev, BAR_1));
1026
1027 if (!hw->ce4100_gbe_mdio_base_virt)
1028 goto err_mdio_ioremap;
1029 }
1030
1031 if (hw->mac_type >= e1000_82543) {
1032 netdev->hw_features = NETIF_F_SG |
1033 NETIF_F_HW_CSUM |
1034 NETIF_F_HW_VLAN_CTAG_RX;
1035 netdev->features = NETIF_F_HW_VLAN_CTAG_TX |
1036 NETIF_F_HW_VLAN_CTAG_FILTER;
1037 }
1038
1039 if ((hw->mac_type >= e1000_82544) &&
1040 (hw->mac_type != e1000_82547))
1041 netdev->hw_features |= NETIF_F_TSO;
1042
1043 netdev->priv_flags |= IFF_SUPP_NOFCS;
1044
1045 netdev->features |= netdev->hw_features;
1046 netdev->hw_features |= (NETIF_F_RXCSUM |
1047 NETIF_F_RXALL |
1048 NETIF_F_RXFCS);
1049
1050 if (pci_using_dac) {
1051 netdev->features |= NETIF_F_HIGHDMA;
1052 netdev->vlan_features |= NETIF_F_HIGHDMA;
1053 }
1054
1055 netdev->vlan_features |= (NETIF_F_TSO |
1056 NETIF_F_HW_CSUM |
1057 NETIF_F_SG);
1058
1059 /* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */
1060 if (hw->device_id != E1000_DEV_ID_82545EM_COPPER ||
1061 hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE)
1062 netdev->priv_flags |= IFF_UNICAST_FLT;
1063
1064 /* MTU range: 46 - 16110 */
1065 netdev->min_mtu = ETH_ZLEN - ETH_HLEN;
1066 netdev->max_mtu = MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
1067
1068 adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
1069
1070 /* initialize eeprom parameters */
1071 if (e1000_init_eeprom_params(hw)) {
1072 e_err(probe, "EEPROM initialization failed\n");
1073 goto err_eeprom;
1074 }
1075
1076 /* before reading the EEPROM, reset the controller to
1077 * put the device in a known good starting state
1078 */
1079
1080 e1000_reset_hw(hw);
1081
1082 /* make sure the EEPROM is good */
1083 if (e1000_validate_eeprom_checksum(hw) < 0) {
1084 e_err(probe, "The EEPROM Checksum Is Not Valid\n");
1085 e1000_dump_eeprom(adapter);
1086 /* set MAC address to all zeroes to invalidate and temporary
1087 * disable this device for the user. This blocks regular
1088 * traffic while still permitting ethtool ioctls from reaching
1089 * the hardware as well as allowing the user to run the
1090 * interface after manually setting a hw addr using
1091 * `ip set address`
1092 */
1093 memset(hw->mac_addr, 0, netdev->addr_len);
1094 } else {
1095 /* copy the MAC address out of the EEPROM */
1096 if (e1000_read_mac_addr(hw))
1097 e_err(probe, "EEPROM Read Error\n");
1098 }
1099 /* don't block initialization here due to bad MAC address */
1100 eth_hw_addr_set(netdev, hw->mac_addr);
1101
1102 if (!is_valid_ether_addr(netdev->dev_addr))
1103 e_err(probe, "Invalid MAC Address\n");
1104
1105
1106 INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
1107 INIT_DELAYED_WORK(&adapter->fifo_stall_task,
1108 e1000_82547_tx_fifo_stall_task);
1109 INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
1110 INIT_WORK(&adapter->reset_task, e1000_reset_task);
1111
1112 e1000_check_options(adapter);
1113
1114 /* Initial Wake on LAN setting
1115 * If APM wake is enabled in the EEPROM,
1116 * enable the ACPI Magic Packet filter
1117 */
1118
1119 switch (hw->mac_type) {
1120 case e1000_82542_rev2_0:
1121 case e1000_82542_rev2_1:
1122 case e1000_82543:
1123 break;
1124 case e1000_82544:
1125 e1000_read_eeprom(hw,
1126 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1127 eeprom_apme_mask = E1000_EEPROM_82544_APM;
1128 break;
1129 case e1000_82546:
1130 case e1000_82546_rev_3:
1131 if (er32(STATUS) & E1000_STATUS_FUNC_1) {
1132 e1000_read_eeprom(hw,
1133 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1134 break;
1135 }
1136 fallthrough;
1137 default:
1138 e1000_read_eeprom(hw,
1139 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1140 break;
1141 }
1142 if (eeprom_data & eeprom_apme_mask)
1143 adapter->eeprom_wol |= E1000_WUFC_MAG;
1144
1145 /* now that we have the eeprom settings, apply the special cases
1146 * where the eeprom may be wrong or the board simply won't support
1147 * wake on lan on a particular port
1148 */
1149 switch (pdev->device) {
1150 case E1000_DEV_ID_82546GB_PCIE:
1151 adapter->eeprom_wol = 0;
1152 break;
1153 case E1000_DEV_ID_82546EB_FIBER:
1154 case E1000_DEV_ID_82546GB_FIBER:
1155 /* Wake events only supported on port A for dual fiber
1156 * regardless of eeprom setting
1157 */
1158 if (er32(STATUS) & E1000_STATUS_FUNC_1)
1159 adapter->eeprom_wol = 0;
1160 break;
1161 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1162 /* if quad port adapter, disable WoL on all but port A */
1163 if (global_quad_port_a != 0)
1164 adapter->eeprom_wol = 0;
1165 else
1166 adapter->quad_port_a = true;
1167 /* Reset for multiple quad port adapters */
1168 if (++global_quad_port_a == 4)
1169 global_quad_port_a = 0;
1170 break;
1171 }
1172
1173 /* initialize the wol settings based on the eeprom settings */
1174 adapter->wol = adapter->eeprom_wol;
1175 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1176
1177 /* Auto detect PHY address */
1178 if (hw->mac_type == e1000_ce4100) {
1179 for (i = 0; i < 32; i++) {
1180 hw->phy_addr = i;
1181 e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1182
1183 if (tmp != 0 && tmp != 0xFF)
1184 break;
1185 }
1186
1187 if (i >= 32)
1188 goto err_eeprom;
1189 }
1190
1191 /* reset the hardware with the new settings */
1192 e1000_reset(adapter);
1193
1194 strcpy(netdev->name, "eth%d");
1195 err = register_netdev(netdev);
1196 if (err)
1197 goto err_register;
1198
1199 e1000_vlan_filter_on_off(adapter, false);
1200
1201 /* print bus type/speed/width info */
1202 e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
1203 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1204 ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1205 (hw->bus_speed == e1000_bus_speed_120) ? 120 :
1206 (hw->bus_speed == e1000_bus_speed_100) ? 100 :
1207 (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1208 ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1209 netdev->dev_addr);
1210
1211 /* carrier off reporting is important to ethtool even BEFORE open */
1212 netif_carrier_off(netdev);
1213
1214 e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
1215
1216 cards_found++;
1217 return 0;
1218
1219 err_register:
1220 err_eeprom:
1221 e1000_phy_hw_reset(hw);
1222
1223 if (hw->flash_address)
1224 iounmap(hw->flash_address);
1225 kfree(adapter->tx_ring);
1226 kfree(adapter->rx_ring);
1227 err_dma:
1228 err_sw_init:
1229 err_mdio_ioremap:
1230 iounmap(hw->ce4100_gbe_mdio_base_virt);
1231 iounmap(hw->hw_addr);
1232 err_ioremap:
1233 disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1234 free_netdev(netdev);
1235 err_alloc_etherdev:
1236 pci_release_selected_regions(pdev, bars);
1237 err_pci_reg:
1238 if (!adapter || disable_dev)
1239 pci_disable_device(pdev);
1240 return err;
1241 }
1242
1243 /**
1244 * e1000_remove - Device Removal Routine
1245 * @pdev: PCI device information struct
1246 *
1247 * e1000_remove is called by the PCI subsystem to alert the driver
1248 * that it should release a PCI device. That could be caused by a
1249 * Hot-Plug event, or because the driver is going to be removed from
1250 * memory.
1251 **/
e1000_remove(struct pci_dev * pdev)1252 static void e1000_remove(struct pci_dev *pdev)
1253 {
1254 struct net_device *netdev = pci_get_drvdata(pdev);
1255 struct e1000_adapter *adapter = netdev_priv(netdev);
1256 struct e1000_hw *hw = &adapter->hw;
1257 bool disable_dev;
1258
1259 e1000_down_and_stop(adapter);
1260 e1000_release_manageability(adapter);
1261
1262 unregister_netdev(netdev);
1263
1264 /* Only kill reset task if adapter is not resetting */
1265 if (!test_bit(__E1000_RESETTING, &adapter->flags))
1266 cancel_work_sync(&adapter->reset_task);
1267
1268 e1000_phy_hw_reset(hw);
1269
1270 kfree(adapter->tx_ring);
1271 kfree(adapter->rx_ring);
1272
1273 if (hw->mac_type == e1000_ce4100)
1274 iounmap(hw->ce4100_gbe_mdio_base_virt);
1275 iounmap(hw->hw_addr);
1276 if (hw->flash_address)
1277 iounmap(hw->flash_address);
1278 pci_release_selected_regions(pdev, adapter->bars);
1279
1280 disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1281 free_netdev(netdev);
1282
1283 if (disable_dev)
1284 pci_disable_device(pdev);
1285 }
1286
1287 /**
1288 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1289 * @adapter: board private structure to initialize
1290 *
1291 * e1000_sw_init initializes the Adapter private data structure.
1292 * e1000_init_hw_struct MUST be called before this function
1293 **/
e1000_sw_init(struct e1000_adapter * adapter)1294 static int e1000_sw_init(struct e1000_adapter *adapter)
1295 {
1296 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1297
1298 adapter->num_tx_queues = 1;
1299 adapter->num_rx_queues = 1;
1300
1301 if (e1000_alloc_queues(adapter)) {
1302 e_err(probe, "Unable to allocate memory for queues\n");
1303 return -ENOMEM;
1304 }
1305
1306 /* Explicitly disable IRQ since the NIC can be in any state. */
1307 e1000_irq_disable(adapter);
1308
1309 spin_lock_init(&adapter->stats_lock);
1310
1311 set_bit(__E1000_DOWN, &adapter->flags);
1312
1313 return 0;
1314 }
1315
1316 /**
1317 * e1000_alloc_queues - Allocate memory for all rings
1318 * @adapter: board private structure to initialize
1319 *
1320 * We allocate one ring per queue at run-time since we don't know the
1321 * number of queues at compile-time.
1322 **/
e1000_alloc_queues(struct e1000_adapter * adapter)1323 static int e1000_alloc_queues(struct e1000_adapter *adapter)
1324 {
1325 adapter->tx_ring = kzalloc_objs(struct e1000_tx_ring,
1326 adapter->num_tx_queues);
1327 if (!adapter->tx_ring)
1328 return -ENOMEM;
1329
1330 adapter->rx_ring = kzalloc_objs(struct e1000_rx_ring,
1331 adapter->num_rx_queues);
1332 if (!adapter->rx_ring) {
1333 kfree(adapter->tx_ring);
1334 return -ENOMEM;
1335 }
1336
1337 return E1000_SUCCESS;
1338 }
1339
1340 /**
1341 * e1000_open - Called when a network interface is made active
1342 * @netdev: network interface device structure
1343 *
1344 * Returns 0 on success, negative value on failure
1345 *
1346 * The open entry point is called when a network interface is made
1347 * active by the system (IFF_UP). At this point all resources needed
1348 * for transmit and receive operations are allocated, the interrupt
1349 * handler is registered with the OS, the watchdog task is started,
1350 * and the stack is notified that the interface is ready.
1351 **/
e1000_open(struct net_device * netdev)1352 int e1000_open(struct net_device *netdev)
1353 {
1354 struct e1000_adapter *adapter = netdev_priv(netdev);
1355 struct e1000_hw *hw = &adapter->hw;
1356 int err;
1357
1358 /* disallow open during test */
1359 if (test_bit(__E1000_TESTING, &adapter->flags))
1360 return -EBUSY;
1361
1362 netif_carrier_off(netdev);
1363
1364 /* allocate transmit descriptors */
1365 err = e1000_setup_all_tx_resources(adapter);
1366 if (err)
1367 goto err_setup_tx;
1368
1369 /* allocate receive descriptors */
1370 err = e1000_setup_all_rx_resources(adapter);
1371 if (err)
1372 goto err_setup_rx;
1373
1374 e1000_power_up_phy(adapter);
1375
1376 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1377 if ((hw->mng_cookie.status &
1378 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1379 e1000_update_mng_vlan(adapter);
1380 }
1381
1382 /* before we allocate an interrupt, we must be ready to handle it.
1383 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1384 * as soon as we call pci_request_irq, so we have to setup our
1385 * clean_rx handler before we do so.
1386 */
1387 e1000_configure(adapter);
1388
1389 err = e1000_request_irq(adapter);
1390 if (err)
1391 goto err_req_irq;
1392
1393 /* From here on the code is the same as e1000_up() */
1394 clear_bit(__E1000_DOWN, &adapter->flags);
1395
1396 netif_napi_set_irq(&adapter->napi, adapter->pdev->irq);
1397 napi_enable(&adapter->napi);
1398 netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_RX, &adapter->napi);
1399 netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_TX, &adapter->napi);
1400
1401 e1000_irq_enable(adapter);
1402
1403 netif_start_queue(netdev);
1404
1405 /* fire a link status change interrupt to start the watchdog */
1406 ew32(ICS, E1000_ICS_LSC);
1407
1408 return E1000_SUCCESS;
1409
1410 err_req_irq:
1411 e1000_power_down_phy(adapter);
1412 e1000_free_all_rx_resources(adapter);
1413 err_setup_rx:
1414 e1000_free_all_tx_resources(adapter);
1415 err_setup_tx:
1416 e1000_reset(adapter);
1417
1418 return err;
1419 }
1420
1421 /**
1422 * e1000_close - Disables a network interface
1423 * @netdev: network interface device structure
1424 *
1425 * Returns 0, this is not allowed to fail
1426 *
1427 * The close entry point is called when an interface is de-activated
1428 * by the OS. The hardware is still under the drivers control, but
1429 * needs to be disabled. A global MAC reset is issued to stop the
1430 * hardware, and all transmit and receive resources are freed.
1431 **/
e1000_close(struct net_device * netdev)1432 int e1000_close(struct net_device *netdev)
1433 {
1434 struct e1000_adapter *adapter = netdev_priv(netdev);
1435 struct e1000_hw *hw = &adapter->hw;
1436 int count = E1000_CHECK_RESET_COUNT;
1437
1438 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags) && count--)
1439 usleep_range(10000, 20000);
1440
1441 WARN_ON(count < 0);
1442
1443 /* signal that we're down so that the reset task will no longer run */
1444 set_bit(__E1000_DOWN, &adapter->flags);
1445 clear_bit(__E1000_RESETTING, &adapter->flags);
1446
1447 e1000_down(adapter);
1448 e1000_power_down_phy(adapter);
1449 e1000_free_irq(adapter);
1450
1451 e1000_free_all_tx_resources(adapter);
1452 e1000_free_all_rx_resources(adapter);
1453
1454 /* kill manageability vlan ID if supported, but not if a vlan with
1455 * the same ID is registered on the host OS (let 8021q kill it)
1456 */
1457 if ((hw->mng_cookie.status &
1458 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1459 !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
1460 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
1461 adapter->mng_vlan_id);
1462 }
1463
1464 return 0;
1465 }
1466
1467 /**
1468 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1469 * @adapter: address of board private structure
1470 * @start: address of beginning of memory
1471 * @len: length of memory
1472 **/
e1000_check_64k_bound(struct e1000_adapter * adapter,void * start,unsigned long len)1473 static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1474 unsigned long len)
1475 {
1476 struct e1000_hw *hw = &adapter->hw;
1477 unsigned long begin = (unsigned long)start;
1478 unsigned long end = begin + len;
1479
1480 /* First rev 82545 and 82546 need to not allow any memory
1481 * write location to cross 64k boundary due to errata 23
1482 */
1483 if (hw->mac_type == e1000_82545 ||
1484 hw->mac_type == e1000_ce4100 ||
1485 hw->mac_type == e1000_82546) {
1486 return ((begin ^ (end - 1)) >> 16) == 0;
1487 }
1488
1489 return true;
1490 }
1491
1492 /**
1493 * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1494 * @adapter: board private structure
1495 * @txdr: tx descriptor ring (for a specific queue) to setup
1496 *
1497 * Return 0 on success, negative on failure
1498 **/
e1000_setup_tx_resources(struct e1000_adapter * adapter,struct e1000_tx_ring * txdr)1499 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1500 struct e1000_tx_ring *txdr)
1501 {
1502 struct pci_dev *pdev = adapter->pdev;
1503 int size;
1504
1505 size = sizeof(struct e1000_tx_buffer) * txdr->count;
1506 txdr->buffer_info = vzalloc(size);
1507 if (!txdr->buffer_info)
1508 return -ENOMEM;
1509
1510 /* round up to nearest 4K */
1511
1512 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1513 txdr->size = ALIGN(txdr->size, 4096);
1514
1515 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1516 GFP_KERNEL);
1517 if (!txdr->desc) {
1518 setup_tx_desc_die:
1519 vfree(txdr->buffer_info);
1520 return -ENOMEM;
1521 }
1522
1523 /* Fix for errata 23, can't cross 64kB boundary */
1524 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1525 void *olddesc = txdr->desc;
1526 dma_addr_t olddma = txdr->dma;
1527 e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
1528 txdr->size, txdr->desc);
1529 /* Try again, without freeing the previous */
1530 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1531 &txdr->dma, GFP_KERNEL);
1532 /* Failed allocation, critical failure */
1533 if (!txdr->desc) {
1534 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1535 olddma);
1536 goto setup_tx_desc_die;
1537 }
1538
1539 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1540 /* give up */
1541 dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1542 txdr->dma);
1543 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1544 olddma);
1545 e_err(probe, "Unable to allocate aligned memory "
1546 "for the transmit descriptor ring\n");
1547 vfree(txdr->buffer_info);
1548 return -ENOMEM;
1549 } else {
1550 /* Free old allocation, new allocation was successful */
1551 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1552 olddma);
1553 }
1554 }
1555 memset(txdr->desc, 0, txdr->size);
1556
1557 txdr->next_to_use = 0;
1558 txdr->next_to_clean = 0;
1559
1560 return 0;
1561 }
1562
1563 /**
1564 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1565 * (Descriptors) for all queues
1566 * @adapter: board private structure
1567 *
1568 * Return 0 on success, negative on failure
1569 **/
e1000_setup_all_tx_resources(struct e1000_adapter * adapter)1570 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1571 {
1572 int i, err = 0;
1573
1574 for (i = 0; i < adapter->num_tx_queues; i++) {
1575 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1576 if (err) {
1577 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
1578 for (i-- ; i >= 0; i--)
1579 e1000_free_tx_resources(adapter,
1580 &adapter->tx_ring[i]);
1581 break;
1582 }
1583 }
1584
1585 return err;
1586 }
1587
1588 /**
1589 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1590 * @adapter: board private structure
1591 *
1592 * Configure the Tx unit of the MAC after a reset.
1593 **/
e1000_configure_tx(struct e1000_adapter * adapter)1594 static void e1000_configure_tx(struct e1000_adapter *adapter)
1595 {
1596 u64 tdba;
1597 struct e1000_hw *hw = &adapter->hw;
1598 u32 tdlen, tctl, tipg;
1599 u32 ipgr1, ipgr2;
1600
1601 /* Setup the HW Tx Head and Tail descriptor pointers */
1602
1603 switch (adapter->num_tx_queues) {
1604 case 1:
1605 default:
1606 tdba = adapter->tx_ring[0].dma;
1607 tdlen = adapter->tx_ring[0].count *
1608 sizeof(struct e1000_tx_desc);
1609 ew32(TDLEN, tdlen);
1610 ew32(TDBAH, (tdba >> 32));
1611 ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1612 ew32(TDT, 0);
1613 ew32(TDH, 0);
1614 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ?
1615 E1000_TDH : E1000_82542_TDH);
1616 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ?
1617 E1000_TDT : E1000_82542_TDT);
1618 break;
1619 }
1620
1621 /* Set the default values for the Tx Inter Packet Gap timer */
1622 if ((hw->media_type == e1000_media_type_fiber ||
1623 hw->media_type == e1000_media_type_internal_serdes))
1624 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1625 else
1626 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1627
1628 switch (hw->mac_type) {
1629 case e1000_82542_rev2_0:
1630 case e1000_82542_rev2_1:
1631 tipg = DEFAULT_82542_TIPG_IPGT;
1632 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1633 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1634 break;
1635 default:
1636 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1637 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1638 break;
1639 }
1640 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1641 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1642 ew32(TIPG, tipg);
1643
1644 /* Set the Tx Interrupt Delay register */
1645
1646 ew32(TIDV, adapter->tx_int_delay);
1647 if (hw->mac_type >= e1000_82540)
1648 ew32(TADV, adapter->tx_abs_int_delay);
1649
1650 /* Program the Transmit Control Register */
1651
1652 tctl = er32(TCTL);
1653 tctl &= ~E1000_TCTL_CT;
1654 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1655 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1656
1657 e1000_config_collision_dist(hw);
1658
1659 /* Setup Transmit Descriptor Settings for eop descriptor */
1660 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1661
1662 /* only set IDE if we are delaying interrupts using the timers */
1663 if (adapter->tx_int_delay)
1664 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1665
1666 if (hw->mac_type < e1000_82543)
1667 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1668 else
1669 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1670
1671 /* Cache if we're 82544 running in PCI-X because we'll
1672 * need this to apply a workaround later in the send path.
1673 */
1674 if (hw->mac_type == e1000_82544 &&
1675 hw->bus_type == e1000_bus_type_pcix)
1676 adapter->pcix_82544 = true;
1677
1678 ew32(TCTL, tctl);
1679
1680 }
1681
1682 /**
1683 * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1684 * @adapter: board private structure
1685 * @rxdr: rx descriptor ring (for a specific queue) to setup
1686 *
1687 * Returns 0 on success, negative on failure
1688 **/
e1000_setup_rx_resources(struct e1000_adapter * adapter,struct e1000_rx_ring * rxdr)1689 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1690 struct e1000_rx_ring *rxdr)
1691 {
1692 struct pci_dev *pdev = adapter->pdev;
1693 int size, desc_len;
1694
1695 size = sizeof(struct e1000_rx_buffer) * rxdr->count;
1696 rxdr->buffer_info = vzalloc(size);
1697 if (!rxdr->buffer_info)
1698 return -ENOMEM;
1699
1700 desc_len = sizeof(struct e1000_rx_desc);
1701
1702 /* Round up to nearest 4K */
1703
1704 rxdr->size = rxdr->count * desc_len;
1705 rxdr->size = ALIGN(rxdr->size, 4096);
1706
1707 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1708 GFP_KERNEL);
1709 if (!rxdr->desc) {
1710 setup_rx_desc_die:
1711 vfree(rxdr->buffer_info);
1712 return -ENOMEM;
1713 }
1714
1715 /* Fix for errata 23, can't cross 64kB boundary */
1716 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1717 void *olddesc = rxdr->desc;
1718 dma_addr_t olddma = rxdr->dma;
1719 e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
1720 rxdr->size, rxdr->desc);
1721 /* Try again, without freeing the previous */
1722 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1723 &rxdr->dma, GFP_KERNEL);
1724 /* Failed allocation, critical failure */
1725 if (!rxdr->desc) {
1726 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1727 olddma);
1728 goto setup_rx_desc_die;
1729 }
1730
1731 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1732 /* give up */
1733 dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1734 rxdr->dma);
1735 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1736 olddma);
1737 e_err(probe, "Unable to allocate aligned memory for "
1738 "the Rx descriptor ring\n");
1739 goto setup_rx_desc_die;
1740 } else {
1741 /* Free old allocation, new allocation was successful */
1742 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1743 olddma);
1744 }
1745 }
1746 memset(rxdr->desc, 0, rxdr->size);
1747
1748 rxdr->next_to_clean = 0;
1749 rxdr->next_to_use = 0;
1750 rxdr->rx_skb_top = NULL;
1751
1752 return 0;
1753 }
1754
1755 /**
1756 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1757 * (Descriptors) for all queues
1758 * @adapter: board private structure
1759 *
1760 * Return 0 on success, negative on failure
1761 **/
e1000_setup_all_rx_resources(struct e1000_adapter * adapter)1762 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1763 {
1764 int i, err = 0;
1765
1766 for (i = 0; i < adapter->num_rx_queues; i++) {
1767 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1768 if (err) {
1769 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
1770 for (i-- ; i >= 0; i--)
1771 e1000_free_rx_resources(adapter,
1772 &adapter->rx_ring[i]);
1773 break;
1774 }
1775 }
1776
1777 return err;
1778 }
1779
1780 /**
1781 * e1000_setup_rctl - configure the receive control registers
1782 * @adapter: Board private structure
1783 **/
e1000_setup_rctl(struct e1000_adapter * adapter)1784 static void e1000_setup_rctl(struct e1000_adapter *adapter)
1785 {
1786 struct e1000_hw *hw = &adapter->hw;
1787 u32 rctl;
1788
1789 rctl = er32(RCTL);
1790
1791 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1792
1793 rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1794 E1000_RCTL_RDMTS_HALF |
1795 (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
1796
1797 if (hw->tbi_compatibility_on == 1)
1798 rctl |= E1000_RCTL_SBP;
1799 else
1800 rctl &= ~E1000_RCTL_SBP;
1801
1802 if (adapter->netdev->mtu <= ETH_DATA_LEN)
1803 rctl &= ~E1000_RCTL_LPE;
1804 else
1805 rctl |= E1000_RCTL_LPE;
1806
1807 /* Setup buffer sizes */
1808 rctl &= ~E1000_RCTL_SZ_4096;
1809 rctl |= E1000_RCTL_BSEX;
1810 switch (adapter->rx_buffer_len) {
1811 case E1000_RXBUFFER_2048:
1812 default:
1813 rctl |= E1000_RCTL_SZ_2048;
1814 rctl &= ~E1000_RCTL_BSEX;
1815 break;
1816 case E1000_RXBUFFER_4096:
1817 rctl |= E1000_RCTL_SZ_4096;
1818 break;
1819 case E1000_RXBUFFER_8192:
1820 rctl |= E1000_RCTL_SZ_8192;
1821 break;
1822 case E1000_RXBUFFER_16384:
1823 rctl |= E1000_RCTL_SZ_16384;
1824 break;
1825 }
1826
1827 /* This is useful for sniffing bad packets. */
1828 if (adapter->netdev->features & NETIF_F_RXALL) {
1829 /* UPE and MPE will be handled by normal PROMISC logic
1830 * in e1000e_set_rx_mode
1831 */
1832 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
1833 E1000_RCTL_BAM | /* RX All Bcast Pkts */
1834 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
1835
1836 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
1837 E1000_RCTL_DPF | /* Allow filtered pause */
1838 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
1839 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
1840 * and that breaks VLANs.
1841 */
1842 }
1843
1844 ew32(RCTL, rctl);
1845 }
1846
1847 /**
1848 * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1849 * @adapter: board private structure
1850 *
1851 * Configure the Rx unit of the MAC after a reset.
1852 **/
e1000_configure_rx(struct e1000_adapter * adapter)1853 static void e1000_configure_rx(struct e1000_adapter *adapter)
1854 {
1855 u64 rdba;
1856 struct e1000_hw *hw = &adapter->hw;
1857 u32 rdlen, rctl, rxcsum;
1858
1859 if (adapter->netdev->mtu > ETH_DATA_LEN) {
1860 rdlen = adapter->rx_ring[0].count *
1861 sizeof(struct e1000_rx_desc);
1862 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1863 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1864 } else {
1865 rdlen = adapter->rx_ring[0].count *
1866 sizeof(struct e1000_rx_desc);
1867 adapter->clean_rx = e1000_clean_rx_irq;
1868 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1869 }
1870
1871 /* disable receives while setting up the descriptors */
1872 rctl = er32(RCTL);
1873 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1874
1875 /* set the Receive Delay Timer Register */
1876 ew32(RDTR, adapter->rx_int_delay);
1877
1878 if (hw->mac_type >= e1000_82540) {
1879 ew32(RADV, adapter->rx_abs_int_delay);
1880 if (adapter->itr_setting != 0)
1881 ew32(ITR, 1000000000 / (adapter->itr * 256));
1882 }
1883
1884 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1885 * the Base and Length of the Rx Descriptor Ring
1886 */
1887 switch (adapter->num_rx_queues) {
1888 case 1:
1889 default:
1890 rdba = adapter->rx_ring[0].dma;
1891 ew32(RDLEN, rdlen);
1892 ew32(RDBAH, (rdba >> 32));
1893 ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1894 ew32(RDT, 0);
1895 ew32(RDH, 0);
1896 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ?
1897 E1000_RDH : E1000_82542_RDH);
1898 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ?
1899 E1000_RDT : E1000_82542_RDT);
1900 break;
1901 }
1902
1903 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1904 if (hw->mac_type >= e1000_82543) {
1905 rxcsum = er32(RXCSUM);
1906 if (adapter->rx_csum)
1907 rxcsum |= E1000_RXCSUM_TUOFL;
1908 else
1909 /* don't need to clear IPPCSE as it defaults to 0 */
1910 rxcsum &= ~E1000_RXCSUM_TUOFL;
1911 ew32(RXCSUM, rxcsum);
1912 }
1913
1914 /* Enable Receives */
1915 ew32(RCTL, rctl | E1000_RCTL_EN);
1916 }
1917
1918 /**
1919 * e1000_free_tx_resources - Free Tx Resources per Queue
1920 * @adapter: board private structure
1921 * @tx_ring: Tx descriptor ring for a specific queue
1922 *
1923 * Free all transmit software resources
1924 **/
e1000_free_tx_resources(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring)1925 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1926 struct e1000_tx_ring *tx_ring)
1927 {
1928 struct pci_dev *pdev = adapter->pdev;
1929
1930 e1000_clean_tx_ring(adapter, tx_ring);
1931
1932 vfree(tx_ring->buffer_info);
1933 tx_ring->buffer_info = NULL;
1934
1935 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1936 tx_ring->dma);
1937
1938 tx_ring->desc = NULL;
1939 }
1940
1941 /**
1942 * e1000_free_all_tx_resources - Free Tx Resources for All Queues
1943 * @adapter: board private structure
1944 *
1945 * Free all transmit software resources
1946 **/
e1000_free_all_tx_resources(struct e1000_adapter * adapter)1947 void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1948 {
1949 int i;
1950
1951 for (i = 0; i < adapter->num_tx_queues; i++)
1952 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1953 }
1954
1955 static void
e1000_unmap_and_free_tx_resource(struct e1000_adapter * adapter,struct e1000_tx_buffer * buffer_info,int budget)1956 e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1957 struct e1000_tx_buffer *buffer_info,
1958 int budget)
1959 {
1960 if (buffer_info->dma) {
1961 if (buffer_info->mapped_as_page)
1962 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1963 buffer_info->length, DMA_TO_DEVICE);
1964 else
1965 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1966 buffer_info->length,
1967 DMA_TO_DEVICE);
1968 buffer_info->dma = 0;
1969 }
1970 if (buffer_info->skb) {
1971 napi_consume_skb(buffer_info->skb, budget);
1972 buffer_info->skb = NULL;
1973 }
1974 buffer_info->time_stamp = 0;
1975 /* buffer_info must be completely set up in the transmit path */
1976 }
1977
1978 /**
1979 * e1000_clean_tx_ring - Free Tx Buffers
1980 * @adapter: board private structure
1981 * @tx_ring: ring to be cleaned
1982 **/
e1000_clean_tx_ring(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring)1983 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
1984 struct e1000_tx_ring *tx_ring)
1985 {
1986 struct e1000_hw *hw = &adapter->hw;
1987 struct e1000_tx_buffer *buffer_info;
1988 unsigned long size;
1989 unsigned int i;
1990
1991 /* Free all the Tx ring sk_buffs */
1992
1993 for (i = 0; i < tx_ring->count; i++) {
1994 buffer_info = &tx_ring->buffer_info[i];
1995 e1000_unmap_and_free_tx_resource(adapter, buffer_info, 0);
1996 }
1997
1998 netdev_reset_queue(adapter->netdev);
1999 size = sizeof(struct e1000_tx_buffer) * tx_ring->count;
2000 memset(tx_ring->buffer_info, 0, size);
2001
2002 /* Zero out the descriptor ring */
2003
2004 memset(tx_ring->desc, 0, tx_ring->size);
2005
2006 tx_ring->next_to_use = 0;
2007 tx_ring->next_to_clean = 0;
2008 tx_ring->last_tx_tso = false;
2009
2010 writel(0, hw->hw_addr + tx_ring->tdh);
2011 writel(0, hw->hw_addr + tx_ring->tdt);
2012 }
2013
2014 /**
2015 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2016 * @adapter: board private structure
2017 **/
e1000_clean_all_tx_rings(struct e1000_adapter * adapter)2018 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
2019 {
2020 int i;
2021
2022 for (i = 0; i < adapter->num_tx_queues; i++)
2023 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2024 }
2025
2026 /**
2027 * e1000_free_rx_resources - Free Rx Resources
2028 * @adapter: board private structure
2029 * @rx_ring: ring to clean the resources from
2030 *
2031 * Free all receive software resources
2032 **/
e1000_free_rx_resources(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring)2033 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2034 struct e1000_rx_ring *rx_ring)
2035 {
2036 struct pci_dev *pdev = adapter->pdev;
2037
2038 e1000_clean_rx_ring(adapter, rx_ring);
2039
2040 vfree(rx_ring->buffer_info);
2041 rx_ring->buffer_info = NULL;
2042
2043 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2044 rx_ring->dma);
2045
2046 rx_ring->desc = NULL;
2047 }
2048
2049 /**
2050 * e1000_free_all_rx_resources - Free Rx Resources for All Queues
2051 * @adapter: board private structure
2052 *
2053 * Free all receive software resources
2054 **/
e1000_free_all_rx_resources(struct e1000_adapter * adapter)2055 void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2056 {
2057 int i;
2058
2059 for (i = 0; i < adapter->num_rx_queues; i++)
2060 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2061 }
2062
2063 #define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
e1000_frag_len(const struct e1000_adapter * a)2064 static unsigned int e1000_frag_len(const struct e1000_adapter *a)
2065 {
2066 return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) +
2067 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2068 }
2069
e1000_alloc_frag(const struct e1000_adapter * a)2070 static void *e1000_alloc_frag(const struct e1000_adapter *a)
2071 {
2072 unsigned int len = e1000_frag_len(a);
2073 u8 *data = netdev_alloc_frag(len);
2074
2075 if (likely(data))
2076 data += E1000_HEADROOM;
2077 return data;
2078 }
2079
2080 /**
2081 * e1000_clean_rx_ring - Free Rx Buffers per Queue
2082 * @adapter: board private structure
2083 * @rx_ring: ring to free buffers from
2084 **/
e1000_clean_rx_ring(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring)2085 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2086 struct e1000_rx_ring *rx_ring)
2087 {
2088 struct e1000_hw *hw = &adapter->hw;
2089 struct e1000_rx_buffer *buffer_info;
2090 struct pci_dev *pdev = adapter->pdev;
2091 unsigned long size;
2092 unsigned int i;
2093
2094 /* Free all the Rx netfrags */
2095 for (i = 0; i < rx_ring->count; i++) {
2096 buffer_info = &rx_ring->buffer_info[i];
2097 if (adapter->clean_rx == e1000_clean_rx_irq) {
2098 if (buffer_info->dma)
2099 dma_unmap_single(&pdev->dev, buffer_info->dma,
2100 adapter->rx_buffer_len,
2101 DMA_FROM_DEVICE);
2102 if (buffer_info->rxbuf.data) {
2103 skb_free_frag(buffer_info->rxbuf.data);
2104 buffer_info->rxbuf.data = NULL;
2105 }
2106 } else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
2107 if (buffer_info->dma)
2108 dma_unmap_page(&pdev->dev, buffer_info->dma,
2109 adapter->rx_buffer_len,
2110 DMA_FROM_DEVICE);
2111 if (buffer_info->rxbuf.page) {
2112 put_page(buffer_info->rxbuf.page);
2113 buffer_info->rxbuf.page = NULL;
2114 }
2115 }
2116
2117 buffer_info->dma = 0;
2118 }
2119
2120 /* there also may be some cached data from a chained receive */
2121 napi_free_frags(&adapter->napi);
2122 rx_ring->rx_skb_top = NULL;
2123
2124 size = sizeof(struct e1000_rx_buffer) * rx_ring->count;
2125 memset(rx_ring->buffer_info, 0, size);
2126
2127 /* Zero out the descriptor ring */
2128 memset(rx_ring->desc, 0, rx_ring->size);
2129
2130 rx_ring->next_to_clean = 0;
2131 rx_ring->next_to_use = 0;
2132
2133 writel(0, hw->hw_addr + rx_ring->rdh);
2134 writel(0, hw->hw_addr + rx_ring->rdt);
2135 }
2136
2137 /**
2138 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2139 * @adapter: board private structure
2140 **/
e1000_clean_all_rx_rings(struct e1000_adapter * adapter)2141 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2142 {
2143 int i;
2144
2145 for (i = 0; i < adapter->num_rx_queues; i++)
2146 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2147 }
2148
2149 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2150 * and memory write and invalidate disabled for certain operations
2151 */
e1000_enter_82542_rst(struct e1000_adapter * adapter)2152 static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
2153 {
2154 struct e1000_hw *hw = &adapter->hw;
2155 struct net_device *netdev = adapter->netdev;
2156 u32 rctl;
2157
2158 e1000_pci_clear_mwi(hw);
2159
2160 rctl = er32(RCTL);
2161 rctl |= E1000_RCTL_RST;
2162 ew32(RCTL, rctl);
2163 E1000_WRITE_FLUSH();
2164 mdelay(5);
2165
2166 if (netif_running(netdev))
2167 e1000_clean_all_rx_rings(adapter);
2168 }
2169
e1000_leave_82542_rst(struct e1000_adapter * adapter)2170 static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
2171 {
2172 struct e1000_hw *hw = &adapter->hw;
2173 struct net_device *netdev = adapter->netdev;
2174 u32 rctl;
2175
2176 rctl = er32(RCTL);
2177 rctl &= ~E1000_RCTL_RST;
2178 ew32(RCTL, rctl);
2179 E1000_WRITE_FLUSH();
2180 mdelay(5);
2181
2182 if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2183 e1000_pci_set_mwi(hw);
2184
2185 if (netif_running(netdev)) {
2186 /* No need to loop, because 82542 supports only 1 queue */
2187 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2188 e1000_configure_rx(adapter);
2189 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2190 }
2191 }
2192
2193 /**
2194 * e1000_set_mac - Change the Ethernet Address of the NIC
2195 * @netdev: network interface device structure
2196 * @p: pointer to an address structure
2197 *
2198 * Returns 0 on success, negative on failure
2199 **/
e1000_set_mac(struct net_device * netdev,void * p)2200 static int e1000_set_mac(struct net_device *netdev, void *p)
2201 {
2202 struct e1000_adapter *adapter = netdev_priv(netdev);
2203 struct e1000_hw *hw = &adapter->hw;
2204 struct sockaddr *addr = p;
2205
2206 if (!is_valid_ether_addr(addr->sa_data))
2207 return -EADDRNOTAVAIL;
2208
2209 /* 82542 2.0 needs to be in reset to write receive address registers */
2210
2211 if (hw->mac_type == e1000_82542_rev2_0)
2212 e1000_enter_82542_rst(adapter);
2213
2214 eth_hw_addr_set(netdev, addr->sa_data);
2215 memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
2216
2217 e1000_rar_set(hw, hw->mac_addr, 0);
2218
2219 if (hw->mac_type == e1000_82542_rev2_0)
2220 e1000_leave_82542_rst(adapter);
2221
2222 return 0;
2223 }
2224
2225 /**
2226 * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2227 * @netdev: network interface device structure
2228 *
2229 * The set_rx_mode entry point is called whenever the unicast or multicast
2230 * address lists or the network interface flags are updated. This routine is
2231 * responsible for configuring the hardware for proper unicast, multicast,
2232 * promiscuous mode, and all-multi behavior.
2233 **/
e1000_set_rx_mode(struct net_device * netdev)2234 static void e1000_set_rx_mode(struct net_device *netdev)
2235 {
2236 struct e1000_adapter *adapter = netdev_priv(netdev);
2237 struct e1000_hw *hw = &adapter->hw;
2238 struct netdev_hw_addr *ha;
2239 bool use_uc = false;
2240 u32 rctl;
2241 u32 hash_value;
2242 int i, rar_entries = E1000_RAR_ENTRIES;
2243 int mta_reg_count = E1000_NUM_MTA_REGISTERS;
2244 u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2245
2246 if (!mcarray)
2247 return;
2248
2249 /* Check for Promiscuous and All Multicast modes */
2250
2251 rctl = er32(RCTL);
2252
2253 if (netdev->flags & IFF_PROMISC) {
2254 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2255 rctl &= ~E1000_RCTL_VFE;
2256 } else {
2257 if (netdev->flags & IFF_ALLMULTI)
2258 rctl |= E1000_RCTL_MPE;
2259 else
2260 rctl &= ~E1000_RCTL_MPE;
2261 /* Enable VLAN filter if there is a VLAN */
2262 if (e1000_vlan_used(adapter))
2263 rctl |= E1000_RCTL_VFE;
2264 }
2265
2266 if (netdev_uc_count(netdev) > rar_entries - 1) {
2267 rctl |= E1000_RCTL_UPE;
2268 } else if (!(netdev->flags & IFF_PROMISC)) {
2269 rctl &= ~E1000_RCTL_UPE;
2270 use_uc = true;
2271 }
2272
2273 ew32(RCTL, rctl);
2274
2275 /* 82542 2.0 needs to be in reset to write receive address registers */
2276
2277 if (hw->mac_type == e1000_82542_rev2_0)
2278 e1000_enter_82542_rst(adapter);
2279
2280 /* load the first 14 addresses into the exact filters 1-14. Unicast
2281 * addresses take precedence to avoid disabling unicast filtering
2282 * when possible.
2283 *
2284 * RAR 0 is used for the station MAC address
2285 * if there are not 14 addresses, go ahead and clear the filters
2286 */
2287 i = 1;
2288 if (use_uc)
2289 netdev_for_each_uc_addr(ha, netdev) {
2290 if (i == rar_entries)
2291 break;
2292 e1000_rar_set(hw, ha->addr, i++);
2293 }
2294
2295 netdev_for_each_mc_addr(ha, netdev) {
2296 if (i == rar_entries) {
2297 /* load any remaining addresses into the hash table */
2298 u32 hash_reg, hash_bit, mta;
2299 hash_value = e1000_hash_mc_addr(hw, ha->addr);
2300 hash_reg = (hash_value >> 5) & 0x7F;
2301 hash_bit = hash_value & 0x1F;
2302 mta = (1 << hash_bit);
2303 mcarray[hash_reg] |= mta;
2304 } else {
2305 e1000_rar_set(hw, ha->addr, i++);
2306 }
2307 }
2308
2309 for (; i < rar_entries; i++) {
2310 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2311 E1000_WRITE_FLUSH();
2312 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2313 E1000_WRITE_FLUSH();
2314 }
2315
2316 /* write the hash table completely, write from bottom to avoid
2317 * both stupid write combining chipsets, and flushing each write
2318 */
2319 for (i = mta_reg_count - 1; i >= 0 ; i--) {
2320 /* If we are on an 82544 has an errata where writing odd
2321 * offsets overwrites the previous even offset, but writing
2322 * backwards over the range solves the issue by always
2323 * writing the odd offset first
2324 */
2325 E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2326 }
2327 E1000_WRITE_FLUSH();
2328
2329 if (hw->mac_type == e1000_82542_rev2_0)
2330 e1000_leave_82542_rst(adapter);
2331
2332 kfree(mcarray);
2333 }
2334
2335 /**
2336 * e1000_update_phy_info_task - get phy info
2337 * @work: work struct contained inside adapter struct
2338 *
2339 * Need to wait a few seconds after link up to get diagnostic information from
2340 * the phy
2341 */
e1000_update_phy_info_task(struct work_struct * work)2342 static void e1000_update_phy_info_task(struct work_struct *work)
2343 {
2344 struct e1000_adapter *adapter = container_of(work,
2345 struct e1000_adapter,
2346 phy_info_task.work);
2347
2348 e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
2349 }
2350
2351 /**
2352 * e1000_82547_tx_fifo_stall_task - task to complete work
2353 * @work: work struct contained inside adapter struct
2354 **/
e1000_82547_tx_fifo_stall_task(struct work_struct * work)2355 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2356 {
2357 struct e1000_adapter *adapter = container_of(work,
2358 struct e1000_adapter,
2359 fifo_stall_task.work);
2360 struct e1000_hw *hw = &adapter->hw;
2361 struct net_device *netdev = adapter->netdev;
2362 u32 tctl;
2363
2364 if (atomic_read(&adapter->tx_fifo_stall)) {
2365 if ((er32(TDT) == er32(TDH)) &&
2366 (er32(TDFT) == er32(TDFH)) &&
2367 (er32(TDFTS) == er32(TDFHS))) {
2368 tctl = er32(TCTL);
2369 ew32(TCTL, tctl & ~E1000_TCTL_EN);
2370 ew32(TDFT, adapter->tx_head_addr);
2371 ew32(TDFH, adapter->tx_head_addr);
2372 ew32(TDFTS, adapter->tx_head_addr);
2373 ew32(TDFHS, adapter->tx_head_addr);
2374 ew32(TCTL, tctl);
2375 E1000_WRITE_FLUSH();
2376
2377 adapter->tx_fifo_head = 0;
2378 atomic_set(&adapter->tx_fifo_stall, 0);
2379 netif_wake_queue(netdev);
2380 } else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
2381 schedule_delayed_work(&adapter->fifo_stall_task, 1);
2382 }
2383 }
2384 }
2385
e1000_has_link(struct e1000_adapter * adapter)2386 bool e1000_has_link(struct e1000_adapter *adapter)
2387 {
2388 struct e1000_hw *hw = &adapter->hw;
2389 bool link_active = false;
2390
2391 /* get_link_status is set on LSC (link status) interrupt or rx
2392 * sequence error interrupt (except on intel ce4100).
2393 * get_link_status will stay false until the
2394 * e1000_check_for_link establishes link for copper adapters
2395 * ONLY
2396 */
2397 switch (hw->media_type) {
2398 case e1000_media_type_copper:
2399 if (hw->mac_type == e1000_ce4100)
2400 hw->get_link_status = 1;
2401 if (hw->get_link_status) {
2402 e1000_check_for_link(hw);
2403 link_active = !hw->get_link_status;
2404 } else {
2405 link_active = true;
2406 }
2407 break;
2408 case e1000_media_type_fiber:
2409 e1000_check_for_link(hw);
2410 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2411 break;
2412 case e1000_media_type_internal_serdes:
2413 e1000_check_for_link(hw);
2414 link_active = hw->serdes_has_link;
2415 break;
2416 default:
2417 break;
2418 }
2419
2420 return link_active;
2421 }
2422
2423 /**
2424 * e1000_watchdog - work function
2425 * @work: work struct contained inside adapter struct
2426 **/
e1000_watchdog(struct work_struct * work)2427 static void e1000_watchdog(struct work_struct *work)
2428 {
2429 struct e1000_adapter *adapter = container_of(work,
2430 struct e1000_adapter,
2431 watchdog_task.work);
2432 struct e1000_hw *hw = &adapter->hw;
2433 struct net_device *netdev = adapter->netdev;
2434 struct e1000_tx_ring *txdr = adapter->tx_ring;
2435 u32 link, tctl;
2436
2437 link = e1000_has_link(adapter);
2438 if ((netif_carrier_ok(netdev)) && link)
2439 goto link_up;
2440
2441 if (link) {
2442 if (!netif_carrier_ok(netdev)) {
2443 u32 ctrl;
2444 /* update snapshot of PHY registers on LSC */
2445 e1000_get_speed_and_duplex(hw,
2446 &adapter->link_speed,
2447 &adapter->link_duplex);
2448
2449 ctrl = er32(CTRL);
2450 pr_info("%s NIC Link is Up %d Mbps %s, "
2451 "Flow Control: %s\n",
2452 netdev->name,
2453 adapter->link_speed,
2454 adapter->link_duplex == FULL_DUPLEX ?
2455 "Full Duplex" : "Half Duplex",
2456 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2457 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2458 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2459 E1000_CTRL_TFCE) ? "TX" : "None")));
2460
2461 /* adjust timeout factor according to speed/duplex */
2462 adapter->tx_timeout_factor = 1;
2463 switch (adapter->link_speed) {
2464 case SPEED_10:
2465 adapter->tx_timeout_factor = 16;
2466 break;
2467 case SPEED_100:
2468 /* maybe add some timeout factor ? */
2469 break;
2470 }
2471
2472 /* enable transmits in the hardware */
2473 tctl = er32(TCTL);
2474 tctl |= E1000_TCTL_EN;
2475 ew32(TCTL, tctl);
2476
2477 netif_carrier_on(netdev);
2478 if (!test_bit(__E1000_DOWN, &adapter->flags))
2479 schedule_delayed_work(&adapter->phy_info_task,
2480 2 * HZ);
2481 adapter->smartspeed = 0;
2482 }
2483 } else {
2484 if (netif_carrier_ok(netdev)) {
2485 adapter->link_speed = 0;
2486 adapter->link_duplex = 0;
2487 pr_info("%s NIC Link is Down\n",
2488 netdev->name);
2489 netif_carrier_off(netdev);
2490
2491 if (!test_bit(__E1000_DOWN, &adapter->flags))
2492 schedule_delayed_work(&adapter->phy_info_task,
2493 2 * HZ);
2494 }
2495
2496 e1000_smartspeed(adapter);
2497 }
2498
2499 link_up:
2500 e1000_update_stats(adapter);
2501
2502 hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2503 adapter->tpt_old = adapter->stats.tpt;
2504 hw->collision_delta = adapter->stats.colc - adapter->colc_old;
2505 adapter->colc_old = adapter->stats.colc;
2506
2507 adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2508 adapter->gorcl_old = adapter->stats.gorcl;
2509 adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2510 adapter->gotcl_old = adapter->stats.gotcl;
2511
2512 e1000_update_adaptive(hw);
2513
2514 if (!netif_carrier_ok(netdev)) {
2515 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2516 /* We've lost link, so the controller stops DMA,
2517 * but we've got queued Tx work that's never going
2518 * to get done, so reset controller to flush Tx.
2519 * (Do the reset outside of interrupt context).
2520 */
2521 adapter->tx_timeout_count++;
2522 schedule_work(&adapter->reset_task);
2523 /* exit immediately since reset is imminent */
2524 return;
2525 }
2526 }
2527
2528 /* Simple mode for Interrupt Throttle Rate (ITR) */
2529 if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2530 /* Symmetric Tx/Rx gets a reduced ITR=2000;
2531 * Total asymmetrical Tx or Rx gets ITR=8000;
2532 * everyone else is between 2000-8000.
2533 */
2534 u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2535 u32 dif = (adapter->gotcl > adapter->gorcl ?
2536 adapter->gotcl - adapter->gorcl :
2537 adapter->gorcl - adapter->gotcl) / 10000;
2538 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2539
2540 ew32(ITR, 1000000000 / (itr * 256));
2541 }
2542
2543 /* Cause software interrupt to ensure rx ring is cleaned */
2544 ew32(ICS, E1000_ICS_RXDMT0);
2545
2546 /* Force detection of hung controller every watchdog period */
2547 adapter->detect_tx_hung = true;
2548
2549 /* Reschedule the task */
2550 if (!test_bit(__E1000_DOWN, &adapter->flags))
2551 schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
2552 }
2553
2554 enum latency_range {
2555 lowest_latency = 0,
2556 low_latency = 1,
2557 bulk_latency = 2,
2558 latency_invalid = 255
2559 };
2560
2561 /**
2562 * e1000_update_itr - update the dynamic ITR value based on statistics
2563 * @adapter: pointer to adapter
2564 * @itr_setting: current adapter->itr
2565 * @packets: the number of packets during this measurement interval
2566 * @bytes: the number of bytes during this measurement interval
2567 *
2568 * Stores a new ITR value based on packets and byte
2569 * counts during the last interrupt. The advantage of per interrupt
2570 * computation is faster updates and more accurate ITR for the current
2571 * traffic pattern. Constants in this function were computed
2572 * based on theoretical maximum wire speed and thresholds were set based
2573 * on testing data as well as attempting to minimize response time
2574 * while increasing bulk throughput.
2575 * this functionality is controlled by the InterruptThrottleRate module
2576 * parameter (see e1000_param.c)
2577 **/
e1000_update_itr(struct e1000_adapter * adapter,u16 itr_setting,int packets,int bytes)2578 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2579 u16 itr_setting, int packets, int bytes)
2580 {
2581 unsigned int retval = itr_setting;
2582 struct e1000_hw *hw = &adapter->hw;
2583
2584 if (unlikely(hw->mac_type < e1000_82540))
2585 goto update_itr_done;
2586
2587 if (packets == 0)
2588 goto update_itr_done;
2589
2590 switch (itr_setting) {
2591 case lowest_latency:
2592 /* jumbo frames get bulk treatment*/
2593 if (bytes/packets > 8000)
2594 retval = bulk_latency;
2595 else if ((packets < 5) && (bytes > 512))
2596 retval = low_latency;
2597 break;
2598 case low_latency: /* 50 usec aka 20000 ints/s */
2599 if (bytes > 10000) {
2600 /* jumbo frames need bulk latency setting */
2601 if (bytes/packets > 8000)
2602 retval = bulk_latency;
2603 else if ((packets < 10) || ((bytes/packets) > 1200))
2604 retval = bulk_latency;
2605 else if ((packets > 35))
2606 retval = lowest_latency;
2607 } else if (bytes/packets > 2000)
2608 retval = bulk_latency;
2609 else if (packets <= 2 && bytes < 512)
2610 retval = lowest_latency;
2611 break;
2612 case bulk_latency: /* 250 usec aka 4000 ints/s */
2613 if (bytes > 25000) {
2614 if (packets > 35)
2615 retval = low_latency;
2616 } else if (bytes < 6000) {
2617 retval = low_latency;
2618 }
2619 break;
2620 }
2621
2622 update_itr_done:
2623 return retval;
2624 }
2625
e1000_set_itr(struct e1000_adapter * adapter)2626 static void e1000_set_itr(struct e1000_adapter *adapter)
2627 {
2628 struct e1000_hw *hw = &adapter->hw;
2629 u16 current_itr;
2630 u32 new_itr = adapter->itr;
2631
2632 if (unlikely(hw->mac_type < e1000_82540))
2633 return;
2634
2635 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2636 if (unlikely(adapter->link_speed != SPEED_1000)) {
2637 new_itr = 4000;
2638 goto set_itr_now;
2639 }
2640
2641 adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr,
2642 adapter->total_tx_packets,
2643 adapter->total_tx_bytes);
2644 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2645 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2646 adapter->tx_itr = low_latency;
2647
2648 adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr,
2649 adapter->total_rx_packets,
2650 adapter->total_rx_bytes);
2651 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2652 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2653 adapter->rx_itr = low_latency;
2654
2655 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2656
2657 switch (current_itr) {
2658 /* counts and packets in update_itr are dependent on these numbers */
2659 case lowest_latency:
2660 new_itr = 70000;
2661 break;
2662 case low_latency:
2663 new_itr = 20000; /* aka hwitr = ~200 */
2664 break;
2665 case bulk_latency:
2666 new_itr = 4000;
2667 break;
2668 default:
2669 break;
2670 }
2671
2672 set_itr_now:
2673 if (new_itr != adapter->itr) {
2674 /* this attempts to bias the interrupt rate towards Bulk
2675 * by adding intermediate steps when interrupt rate is
2676 * increasing
2677 */
2678 new_itr = new_itr > adapter->itr ?
2679 min(adapter->itr + (new_itr >> 2), new_itr) :
2680 new_itr;
2681 adapter->itr = new_itr;
2682 ew32(ITR, 1000000000 / (new_itr * 256));
2683 }
2684 }
2685
2686 #define E1000_TX_FLAGS_CSUM 0x00000001
2687 #define E1000_TX_FLAGS_VLAN 0x00000002
2688 #define E1000_TX_FLAGS_TSO 0x00000004
2689 #define E1000_TX_FLAGS_IPV4 0x00000008
2690 #define E1000_TX_FLAGS_NO_FCS 0x00000010
2691 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
2692 #define E1000_TX_FLAGS_VLAN_SHIFT 16
2693
e1000_tso(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring,struct sk_buff * skb,__be16 protocol)2694 static int e1000_tso(struct e1000_adapter *adapter,
2695 struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2696 __be16 protocol)
2697 {
2698 struct e1000_context_desc *context_desc;
2699 struct e1000_tx_buffer *buffer_info;
2700 unsigned int i;
2701 u32 cmd_length = 0;
2702 u16 ipcse = 0, tucse, mss;
2703 u8 ipcss, ipcso, tucss, tucso, hdr_len;
2704
2705 if (skb_is_gso(skb)) {
2706 int err;
2707
2708 err = skb_cow_head(skb, 0);
2709 if (err < 0)
2710 return err;
2711
2712 hdr_len = skb_tcp_all_headers(skb);
2713 mss = skb_shinfo(skb)->gso_size;
2714 if (protocol == htons(ETH_P_IP)) {
2715 struct iphdr *iph = ip_hdr(skb);
2716 iph->tot_len = 0;
2717 iph->check = 0;
2718 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2719 iph->daddr, 0,
2720 IPPROTO_TCP,
2721 0);
2722 cmd_length = E1000_TXD_CMD_IP;
2723 ipcse = skb_transport_offset(skb) - 1;
2724 } else if (skb_is_gso_v6(skb)) {
2725 tcp_v6_gso_csum_prep(skb);
2726 ipcse = 0;
2727 }
2728 ipcss = skb_network_offset(skb);
2729 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
2730 tucss = skb_transport_offset(skb);
2731 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
2732 tucse = 0;
2733
2734 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2735 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
2736
2737 i = tx_ring->next_to_use;
2738 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2739 buffer_info = &tx_ring->buffer_info[i];
2740
2741 context_desc->lower_setup.ip_fields.ipcss = ipcss;
2742 context_desc->lower_setup.ip_fields.ipcso = ipcso;
2743 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
2744 context_desc->upper_setup.tcp_fields.tucss = tucss;
2745 context_desc->upper_setup.tcp_fields.tucso = tucso;
2746 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2747 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
2748 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2749 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2750
2751 buffer_info->time_stamp = jiffies;
2752 buffer_info->next_to_watch = i;
2753
2754 if (++i == tx_ring->count)
2755 i = 0;
2756
2757 tx_ring->next_to_use = i;
2758
2759 return true;
2760 }
2761 return false;
2762 }
2763
e1000_tx_csum(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring,struct sk_buff * skb,__be16 protocol)2764 static bool e1000_tx_csum(struct e1000_adapter *adapter,
2765 struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2766 __be16 protocol)
2767 {
2768 struct e1000_context_desc *context_desc;
2769 struct e1000_tx_buffer *buffer_info;
2770 unsigned int i;
2771 u8 css;
2772 u32 cmd_len = E1000_TXD_CMD_DEXT;
2773
2774 if (skb->ip_summed != CHECKSUM_PARTIAL)
2775 return false;
2776
2777 switch (protocol) {
2778 case cpu_to_be16(ETH_P_IP):
2779 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2780 cmd_len |= E1000_TXD_CMD_TCP;
2781 break;
2782 case cpu_to_be16(ETH_P_IPV6):
2783 /* XXX not handling all IPV6 headers */
2784 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2785 cmd_len |= E1000_TXD_CMD_TCP;
2786 break;
2787 default:
2788 if (unlikely(net_ratelimit()))
2789 e_warn(drv, "checksum_partial proto=%x!\n",
2790 skb->protocol);
2791 break;
2792 }
2793
2794 css = skb_checksum_start_offset(skb);
2795
2796 i = tx_ring->next_to_use;
2797 buffer_info = &tx_ring->buffer_info[i];
2798 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2799
2800 context_desc->lower_setup.ip_config = 0;
2801 context_desc->upper_setup.tcp_fields.tucss = css;
2802 context_desc->upper_setup.tcp_fields.tucso =
2803 css + skb->csum_offset;
2804 context_desc->upper_setup.tcp_fields.tucse = 0;
2805 context_desc->tcp_seg_setup.data = 0;
2806 context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2807
2808 buffer_info->time_stamp = jiffies;
2809 buffer_info->next_to_watch = i;
2810
2811 if (unlikely(++i == tx_ring->count))
2812 i = 0;
2813
2814 tx_ring->next_to_use = i;
2815
2816 return true;
2817 }
2818
2819 #define E1000_MAX_TXD_PWR 12
2820 #define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
2821
e1000_tx_map(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring,struct sk_buff * skb,unsigned int first,unsigned int max_per_txd,unsigned int nr_frags,unsigned int mss)2822 static int e1000_tx_map(struct e1000_adapter *adapter,
2823 struct e1000_tx_ring *tx_ring,
2824 struct sk_buff *skb, unsigned int first,
2825 unsigned int max_per_txd, unsigned int nr_frags,
2826 unsigned int mss)
2827 {
2828 struct e1000_hw *hw = &adapter->hw;
2829 struct pci_dev *pdev = adapter->pdev;
2830 struct e1000_tx_buffer *buffer_info;
2831 unsigned int len = skb_headlen(skb);
2832 unsigned int offset = 0, size, count = 0, i;
2833 unsigned int f, bytecount, segs;
2834
2835 i = tx_ring->next_to_use;
2836
2837 while (len) {
2838 buffer_info = &tx_ring->buffer_info[i];
2839 size = min(len, max_per_txd);
2840 /* Workaround for Controller erratum --
2841 * descriptor for non-tso packet in a linear SKB that follows a
2842 * tso gets written back prematurely before the data is fully
2843 * DMA'd to the controller
2844 */
2845 if (!skb->data_len && tx_ring->last_tx_tso &&
2846 !skb_is_gso(skb)) {
2847 tx_ring->last_tx_tso = false;
2848 size -= 4;
2849 }
2850
2851 /* Workaround for premature desc write-backs
2852 * in TSO mode. Append 4-byte sentinel desc
2853 */
2854 if (unlikely(mss && !nr_frags && size == len && size > 8))
2855 size -= 4;
2856 /* work-around for errata 10 and it applies
2857 * to all controllers in PCI-X mode
2858 * The fix is to make sure that the first descriptor of a
2859 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2860 */
2861 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
2862 (size > 2015) && count == 0))
2863 size = 2015;
2864
2865 /* Workaround for potential 82544 hang in PCI-X. Avoid
2866 * terminating buffers within evenly-aligned dwords.
2867 */
2868 if (unlikely(adapter->pcix_82544 &&
2869 !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2870 size > 4))
2871 size -= 4;
2872
2873 buffer_info->length = size;
2874 /* set time_stamp *before* dma to help avoid a possible race */
2875 buffer_info->time_stamp = jiffies;
2876 buffer_info->mapped_as_page = false;
2877 buffer_info->dma = dma_map_single(&pdev->dev,
2878 skb->data + offset,
2879 size, DMA_TO_DEVICE);
2880 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2881 goto dma_error;
2882 buffer_info->next_to_watch = i;
2883
2884 len -= size;
2885 offset += size;
2886 count++;
2887 if (len) {
2888 i++;
2889 if (unlikely(i == tx_ring->count))
2890 i = 0;
2891 }
2892 }
2893
2894 for (f = 0; f < nr_frags; f++) {
2895 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
2896
2897 len = skb_frag_size(frag);
2898 offset = 0;
2899
2900 while (len) {
2901 unsigned long bufend;
2902 i++;
2903 if (unlikely(i == tx_ring->count))
2904 i = 0;
2905
2906 buffer_info = &tx_ring->buffer_info[i];
2907 size = min(len, max_per_txd);
2908 /* Workaround for premature desc write-backs
2909 * in TSO mode. Append 4-byte sentinel desc
2910 */
2911 if (unlikely(mss && f == (nr_frags-1) &&
2912 size == len && size > 8))
2913 size -= 4;
2914 /* Workaround for potential 82544 hang in PCI-X.
2915 * Avoid terminating buffers within evenly-aligned
2916 * dwords.
2917 */
2918 bufend = (unsigned long)
2919 page_to_phys(skb_frag_page(frag));
2920 bufend += offset + size - 1;
2921 if (unlikely(adapter->pcix_82544 &&
2922 !(bufend & 4) &&
2923 size > 4))
2924 size -= 4;
2925
2926 buffer_info->length = size;
2927 buffer_info->time_stamp = jiffies;
2928 buffer_info->mapped_as_page = true;
2929 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
2930 offset, size, DMA_TO_DEVICE);
2931 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2932 goto dma_error;
2933 buffer_info->next_to_watch = i;
2934
2935 len -= size;
2936 offset += size;
2937 count++;
2938 }
2939 }
2940
2941 segs = skb_shinfo(skb)->gso_segs ?: 1;
2942 /* multiply data chunks by size of headers */
2943 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
2944
2945 tx_ring->buffer_info[i].skb = skb;
2946 tx_ring->buffer_info[i].segs = segs;
2947 tx_ring->buffer_info[i].bytecount = bytecount;
2948 tx_ring->buffer_info[first].next_to_watch = i;
2949
2950 return count;
2951
2952 dma_error:
2953 dev_err(&pdev->dev, "TX DMA map failed\n");
2954 buffer_info->dma = 0;
2955
2956 while (count--) {
2957 if (i == 0)
2958 i += tx_ring->count;
2959 i--;
2960 buffer_info = &tx_ring->buffer_info[i];
2961 e1000_unmap_and_free_tx_resource(adapter, buffer_info, 0);
2962 }
2963
2964 return 0;
2965 }
2966
e1000_tx_queue(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring,int tx_flags,int count)2967 static void e1000_tx_queue(struct e1000_adapter *adapter,
2968 struct e1000_tx_ring *tx_ring, int tx_flags,
2969 int count)
2970 {
2971 struct e1000_tx_desc *tx_desc = NULL;
2972 struct e1000_tx_buffer *buffer_info;
2973 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
2974 unsigned int i;
2975
2976 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
2977 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
2978 E1000_TXD_CMD_TSE;
2979 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2980
2981 if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
2982 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2983 }
2984
2985 if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
2986 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2987 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2988 }
2989
2990 if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
2991 txd_lower |= E1000_TXD_CMD_VLE;
2992 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
2993 }
2994
2995 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
2996 txd_lower &= ~(E1000_TXD_CMD_IFCS);
2997
2998 i = tx_ring->next_to_use;
2999
3000 while (count--) {
3001 buffer_info = &tx_ring->buffer_info[i];
3002 tx_desc = E1000_TX_DESC(*tx_ring, i);
3003 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3004 tx_desc->lower.data =
3005 cpu_to_le32(txd_lower | buffer_info->length);
3006 tx_desc->upper.data = cpu_to_le32(txd_upper);
3007 if (unlikely(++i == tx_ring->count))
3008 i = 0;
3009 }
3010
3011 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3012
3013 /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
3014 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3015 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
3016
3017 /* Force memory writes to complete before letting h/w
3018 * know there are new descriptors to fetch. (Only
3019 * applicable for weak-ordered memory model archs,
3020 * such as IA-64).
3021 */
3022 dma_wmb();
3023
3024 tx_ring->next_to_use = i;
3025 }
3026
3027 /* 82547 workaround to avoid controller hang in half-duplex environment.
3028 * The workaround is to avoid queuing a large packet that would span
3029 * the internal Tx FIFO ring boundary by notifying the stack to resend
3030 * the packet at a later time. This gives the Tx FIFO an opportunity to
3031 * flush all packets. When that occurs, we reset the Tx FIFO pointers
3032 * to the beginning of the Tx FIFO.
3033 */
3034
3035 #define E1000_FIFO_HDR 0x10
3036 #define E1000_82547_PAD_LEN 0x3E0
3037
e1000_82547_fifo_workaround(struct e1000_adapter * adapter,struct sk_buff * skb)3038 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3039 struct sk_buff *skb)
3040 {
3041 u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3042 u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
3043
3044 skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
3045
3046 if (adapter->link_duplex != HALF_DUPLEX)
3047 goto no_fifo_stall_required;
3048
3049 if (atomic_read(&adapter->tx_fifo_stall))
3050 return 1;
3051
3052 if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
3053 atomic_set(&adapter->tx_fifo_stall, 1);
3054 return 1;
3055 }
3056
3057 no_fifo_stall_required:
3058 adapter->tx_fifo_head += skb_fifo_len;
3059 if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
3060 adapter->tx_fifo_head -= adapter->tx_fifo_size;
3061 return 0;
3062 }
3063
__e1000_maybe_stop_tx(struct net_device * netdev,int size)3064 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3065 {
3066 struct e1000_adapter *adapter = netdev_priv(netdev);
3067 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3068
3069 netif_stop_queue(netdev);
3070 /* Herbert's original patch had:
3071 * smp_mb__after_netif_stop_queue();
3072 * but since that doesn't exist yet, just open code it.
3073 */
3074 smp_mb();
3075
3076 /* We need to check again in a case another CPU has just
3077 * made room available.
3078 */
3079 if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3080 return -EBUSY;
3081
3082 /* A reprieve! */
3083 netif_start_queue(netdev);
3084 ++adapter->restart_queue;
3085 return 0;
3086 }
3087
e1000_maybe_stop_tx(struct net_device * netdev,struct e1000_tx_ring * tx_ring,int size)3088 static int e1000_maybe_stop_tx(struct net_device *netdev,
3089 struct e1000_tx_ring *tx_ring, int size)
3090 {
3091 if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3092 return 0;
3093 return __e1000_maybe_stop_tx(netdev, size);
3094 }
3095
3096 #define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X))
e1000_xmit_frame(struct sk_buff * skb,struct net_device * netdev)3097 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3098 struct net_device *netdev)
3099 {
3100 struct e1000_adapter *adapter = netdev_priv(netdev);
3101 struct e1000_hw *hw = &adapter->hw;
3102 struct e1000_tx_ring *tx_ring;
3103 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3104 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3105 unsigned int tx_flags = 0;
3106 unsigned int len = skb_headlen(skb);
3107 unsigned int nr_frags;
3108 unsigned int mss;
3109 int count = 0;
3110 int tso;
3111 unsigned int f;
3112 __be16 protocol = vlan_get_protocol(skb);
3113
3114 /* This goes back to the question of how to logically map a Tx queue
3115 * to a flow. Right now, performance is impacted slightly negatively
3116 * if using multiple Tx queues. If the stack breaks away from a
3117 * single qdisc implementation, we can look at this again.
3118 */
3119 tx_ring = adapter->tx_ring;
3120
3121 /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN,
3122 * packets may get corrupted during padding by HW.
3123 * To WA this issue, pad all small packets manually.
3124 */
3125 if (eth_skb_pad(skb))
3126 return NETDEV_TX_OK;
3127
3128 mss = skb_shinfo(skb)->gso_size;
3129 /* The controller does a simple calculation to
3130 * make sure there is enough room in the FIFO before
3131 * initiating the DMA for each buffer. The calc is:
3132 * 4 = ceil(buffer len/mss). To make sure we don't
3133 * overrun the FIFO, adjust the max buffer len if mss
3134 * drops.
3135 */
3136 if (mss) {
3137 u8 hdr_len;
3138 max_per_txd = min(mss << 2, max_per_txd);
3139 max_txd_pwr = fls(max_per_txd) - 1;
3140
3141 hdr_len = skb_tcp_all_headers(skb);
3142 if (skb->data_len && hdr_len == len) {
3143 switch (hw->mac_type) {
3144 case e1000_82544: {
3145 unsigned int pull_size;
3146
3147 /* Make sure we have room to chop off 4 bytes,
3148 * and that the end alignment will work out to
3149 * this hardware's requirements
3150 * NOTE: this is a TSO only workaround
3151 * if end byte alignment not correct move us
3152 * into the next dword
3153 */
3154 if ((unsigned long)(skb_tail_pointer(skb) - 1)
3155 & 4)
3156 break;
3157 pull_size = min((unsigned int)4, skb->data_len);
3158 if (!__pskb_pull_tail(skb, pull_size)) {
3159 e_err(drv, "__pskb_pull_tail "
3160 "failed.\n");
3161 dev_kfree_skb_any(skb);
3162 return NETDEV_TX_OK;
3163 }
3164 len = skb_headlen(skb);
3165 break;
3166 }
3167 default:
3168 /* do nothing */
3169 break;
3170 }
3171 }
3172 }
3173
3174 /* reserve a descriptor for the offload context */
3175 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3176 count++;
3177 count++;
3178
3179 /* Controller Erratum workaround */
3180 if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3181 count++;
3182
3183 count += TXD_USE_COUNT(len, max_txd_pwr);
3184
3185 if (adapter->pcix_82544)
3186 count++;
3187
3188 /* work-around for errata 10 and it applies to all controllers
3189 * in PCI-X mode, so add one more descriptor to the count
3190 */
3191 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
3192 (len > 2015)))
3193 count++;
3194
3195 nr_frags = skb_shinfo(skb)->nr_frags;
3196 for (f = 0; f < nr_frags; f++)
3197 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
3198 max_txd_pwr);
3199 if (adapter->pcix_82544)
3200 count += nr_frags;
3201
3202 /* need: count + 2 desc gap to keep tail from touching
3203 * head, otherwise try next time
3204 */
3205 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
3206 return NETDEV_TX_BUSY;
3207
3208 if (unlikely((hw->mac_type == e1000_82547) &&
3209 (e1000_82547_fifo_workaround(adapter, skb)))) {
3210 netif_stop_queue(netdev);
3211 if (!test_bit(__E1000_DOWN, &adapter->flags))
3212 schedule_delayed_work(&adapter->fifo_stall_task, 1);
3213 return NETDEV_TX_BUSY;
3214 }
3215
3216 if (skb_vlan_tag_present(skb)) {
3217 tx_flags |= E1000_TX_FLAGS_VLAN;
3218 tx_flags |= (skb_vlan_tag_get(skb) <<
3219 E1000_TX_FLAGS_VLAN_SHIFT);
3220 }
3221
3222 first = tx_ring->next_to_use;
3223
3224 tso = e1000_tso(adapter, tx_ring, skb, protocol);
3225 if (tso < 0) {
3226 dev_kfree_skb_any(skb);
3227 return NETDEV_TX_OK;
3228 }
3229
3230 if (likely(tso)) {
3231 if (likely(hw->mac_type != e1000_82544))
3232 tx_ring->last_tx_tso = true;
3233 tx_flags |= E1000_TX_FLAGS_TSO;
3234 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol)))
3235 tx_flags |= E1000_TX_FLAGS_CSUM;
3236
3237 if (protocol == htons(ETH_P_IP))
3238 tx_flags |= E1000_TX_FLAGS_IPV4;
3239
3240 if (unlikely(skb->no_fcs))
3241 tx_flags |= E1000_TX_FLAGS_NO_FCS;
3242
3243 count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3244 nr_frags, mss);
3245
3246 if (count) {
3247 /* The descriptors needed is higher than other Intel drivers
3248 * due to a number of workarounds. The breakdown is below:
3249 * Data descriptors: MAX_SKB_FRAGS + 1
3250 * Context Descriptor: 1
3251 * Keep head from touching tail: 2
3252 * Workarounds: 3
3253 */
3254 int desc_needed = MAX_SKB_FRAGS + 7;
3255
3256 netdev_sent_queue(netdev, skb->len);
3257 skb_tx_timestamp(skb);
3258
3259 e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3260
3261 /* 82544 potentially requires twice as many data descriptors
3262 * in order to guarantee buffers don't end on evenly-aligned
3263 * dwords
3264 */
3265 if (adapter->pcix_82544)
3266 desc_needed += MAX_SKB_FRAGS + 1;
3267
3268 /* Make sure there is space in the ring for the next send. */
3269 e1000_maybe_stop_tx(netdev, tx_ring, desc_needed);
3270
3271 if (!netdev_xmit_more() ||
3272 netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
3273 writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt);
3274 }
3275 } else {
3276 dev_kfree_skb_any(skb);
3277 tx_ring->buffer_info[first].time_stamp = 0;
3278 tx_ring->next_to_use = first;
3279 }
3280
3281 return NETDEV_TX_OK;
3282 }
3283
3284 #define NUM_REGS 38 /* 1 based count */
e1000_regdump(struct e1000_adapter * adapter)3285 static void e1000_regdump(struct e1000_adapter *adapter)
3286 {
3287 struct e1000_hw *hw = &adapter->hw;
3288 u32 regs[NUM_REGS];
3289 u32 *regs_buff = regs;
3290 int i = 0;
3291
3292 static const char * const reg_name[] = {
3293 "CTRL", "STATUS",
3294 "RCTL", "RDLEN", "RDH", "RDT", "RDTR",
3295 "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
3296 "TIDV", "TXDCTL", "TADV", "TARC0",
3297 "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
3298 "TXDCTL1", "TARC1",
3299 "CTRL_EXT", "ERT", "RDBAL", "RDBAH",
3300 "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
3301 "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
3302 };
3303
3304 regs_buff[0] = er32(CTRL);
3305 regs_buff[1] = er32(STATUS);
3306
3307 regs_buff[2] = er32(RCTL);
3308 regs_buff[3] = er32(RDLEN);
3309 regs_buff[4] = er32(RDH);
3310 regs_buff[5] = er32(RDT);
3311 regs_buff[6] = er32(RDTR);
3312
3313 regs_buff[7] = er32(TCTL);
3314 regs_buff[8] = er32(TDBAL);
3315 regs_buff[9] = er32(TDBAH);
3316 regs_buff[10] = er32(TDLEN);
3317 regs_buff[11] = er32(TDH);
3318 regs_buff[12] = er32(TDT);
3319 regs_buff[13] = er32(TIDV);
3320 regs_buff[14] = er32(TXDCTL);
3321 regs_buff[15] = er32(TADV);
3322 regs_buff[16] = er32(TARC0);
3323
3324 regs_buff[17] = er32(TDBAL1);
3325 regs_buff[18] = er32(TDBAH1);
3326 regs_buff[19] = er32(TDLEN1);
3327 regs_buff[20] = er32(TDH1);
3328 regs_buff[21] = er32(TDT1);
3329 regs_buff[22] = er32(TXDCTL1);
3330 regs_buff[23] = er32(TARC1);
3331 regs_buff[24] = er32(CTRL_EXT);
3332 regs_buff[25] = er32(ERT);
3333 regs_buff[26] = er32(RDBAL0);
3334 regs_buff[27] = er32(RDBAH0);
3335 regs_buff[28] = er32(TDFH);
3336 regs_buff[29] = er32(TDFT);
3337 regs_buff[30] = er32(TDFHS);
3338 regs_buff[31] = er32(TDFTS);
3339 regs_buff[32] = er32(TDFPC);
3340 regs_buff[33] = er32(RDFH);
3341 regs_buff[34] = er32(RDFT);
3342 regs_buff[35] = er32(RDFHS);
3343 regs_buff[36] = er32(RDFTS);
3344 regs_buff[37] = er32(RDFPC);
3345
3346 pr_info("Register dump\n");
3347 for (i = 0; i < NUM_REGS; i++)
3348 pr_info("%-15s %08x\n", reg_name[i], regs_buff[i]);
3349 }
3350
3351 /*
3352 * e1000_dump: Print registers, tx ring and rx ring
3353 */
e1000_dump(struct e1000_adapter * adapter)3354 static void e1000_dump(struct e1000_adapter *adapter)
3355 {
3356 /* this code doesn't handle multiple rings */
3357 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3358 struct e1000_rx_ring *rx_ring = adapter->rx_ring;
3359 int i;
3360
3361 if (!netif_msg_hw(adapter))
3362 return;
3363
3364 /* Print Registers */
3365 e1000_regdump(adapter);
3366
3367 /* transmit dump */
3368 pr_info("TX Desc ring0 dump\n");
3369
3370 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
3371 *
3372 * Legacy Transmit Descriptor
3373 * +--------------------------------------------------------------+
3374 * 0 | Buffer Address [63:0] (Reserved on Write Back) |
3375 * +--------------------------------------------------------------+
3376 * 8 | Special | CSS | Status | CMD | CSO | Length |
3377 * +--------------------------------------------------------------+
3378 * 63 48 47 36 35 32 31 24 23 16 15 0
3379 *
3380 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
3381 * 63 48 47 40 39 32 31 16 15 8 7 0
3382 * +----------------------------------------------------------------+
3383 * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
3384 * +----------------------------------------------------------------+
3385 * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
3386 * +----------------------------------------------------------------+
3387 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
3388 *
3389 * Extended Data Descriptor (DTYP=0x1)
3390 * +----------------------------------------------------------------+
3391 * 0 | Buffer Address [63:0] |
3392 * +----------------------------------------------------------------+
3393 * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
3394 * +----------------------------------------------------------------+
3395 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
3396 */
3397 pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestmp bi->skb\n");
3398 pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestmp bi->skb\n");
3399
3400 if (!netif_msg_tx_done(adapter))
3401 goto rx_ring_summary;
3402
3403 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3404 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
3405 struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i];
3406 struct my_u { __le64 a; __le64 b; };
3407 struct my_u *u = (struct my_u *)tx_desc;
3408 const char *type;
3409
3410 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
3411 type = "NTC/U";
3412 else if (i == tx_ring->next_to_use)
3413 type = "NTU";
3414 else if (i == tx_ring->next_to_clean)
3415 type = "NTC";
3416 else
3417 type = "";
3418
3419 pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p %s\n",
3420 ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
3421 le64_to_cpu(u->a), le64_to_cpu(u->b),
3422 (u64)buffer_info->dma, buffer_info->length,
3423 buffer_info->next_to_watch,
3424 (u64)buffer_info->time_stamp, buffer_info->skb, type);
3425 }
3426
3427 rx_ring_summary:
3428 /* receive dump */
3429 pr_info("\nRX Desc ring dump\n");
3430
3431 /* Legacy Receive Descriptor Format
3432 *
3433 * +-----------------------------------------------------+
3434 * | Buffer Address [63:0] |
3435 * +-----------------------------------------------------+
3436 * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
3437 * +-----------------------------------------------------+
3438 * 63 48 47 40 39 32 31 16 15 0
3439 */
3440 pr_info("R[desc] [address 63:0 ] [vl er S cks ln] [bi->dma ] [bi->skb]\n");
3441
3442 if (!netif_msg_rx_status(adapter))
3443 goto exit;
3444
3445 for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3446 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
3447 struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i];
3448 struct my_u { __le64 a; __le64 b; };
3449 struct my_u *u = (struct my_u *)rx_desc;
3450 const char *type;
3451
3452 if (i == rx_ring->next_to_use)
3453 type = "NTU";
3454 else if (i == rx_ring->next_to_clean)
3455 type = "NTC";
3456 else
3457 type = "";
3458
3459 pr_info("R[0x%03X] %016llX %016llX %016llX %p %s\n",
3460 i, le64_to_cpu(u->a), le64_to_cpu(u->b),
3461 (u64)buffer_info->dma, buffer_info->rxbuf.data, type);
3462 } /* for */
3463
3464 /* dump the descriptor caches */
3465 /* rx */
3466 pr_info("Rx descriptor cache in 64bit format\n");
3467 for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
3468 pr_info("R%04X: %08X|%08X %08X|%08X\n",
3469 i,
3470 readl(adapter->hw.hw_addr + i+4),
3471 readl(adapter->hw.hw_addr + i),
3472 readl(adapter->hw.hw_addr + i+12),
3473 readl(adapter->hw.hw_addr + i+8));
3474 }
3475 /* tx */
3476 pr_info("Tx descriptor cache in 64bit format\n");
3477 for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
3478 pr_info("T%04X: %08X|%08X %08X|%08X\n",
3479 i,
3480 readl(adapter->hw.hw_addr + i+4),
3481 readl(adapter->hw.hw_addr + i),
3482 readl(adapter->hw.hw_addr + i+12),
3483 readl(adapter->hw.hw_addr + i+8));
3484 }
3485 exit:
3486 return;
3487 }
3488
3489 /**
3490 * e1000_tx_timeout - Respond to a Tx Hang
3491 * @netdev: network interface device structure
3492 * @txqueue: number of the Tx queue that hung (unused)
3493 **/
e1000_tx_timeout(struct net_device * netdev,unsigned int __always_unused txqueue)3494 static void e1000_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
3495 {
3496 struct e1000_adapter *adapter = netdev_priv(netdev);
3497
3498 /* Do the reset outside of interrupt context */
3499 adapter->tx_timeout_count++;
3500 schedule_work(&adapter->reset_task);
3501 }
3502
e1000_reset_task(struct work_struct * work)3503 static void e1000_reset_task(struct work_struct *work)
3504 {
3505 struct e1000_adapter *adapter =
3506 container_of(work, struct e1000_adapter, reset_task);
3507
3508 e_err(drv, "Reset adapter\n");
3509 rtnl_lock();
3510 e1000_reinit_locked(adapter);
3511 rtnl_unlock();
3512 }
3513
3514 /**
3515 * e1000_change_mtu - Change the Maximum Transfer Unit
3516 * @netdev: network interface device structure
3517 * @new_mtu: new value for maximum frame size
3518 *
3519 * Returns 0 on success, negative on failure
3520 **/
e1000_change_mtu(struct net_device * netdev,int new_mtu)3521 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3522 {
3523 struct e1000_adapter *adapter = netdev_priv(netdev);
3524 struct e1000_hw *hw = &adapter->hw;
3525 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3526
3527 /* Adapter-specific max frame size limits. */
3528 switch (hw->mac_type) {
3529 case e1000_undefined ... e1000_82542_rev2_1:
3530 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3531 e_err(probe, "Jumbo Frames not supported.\n");
3532 return -EINVAL;
3533 }
3534 break;
3535 default:
3536 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3537 break;
3538 }
3539
3540 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3541 msleep(1);
3542 /* e1000_down has a dependency on max_frame_size */
3543 hw->max_frame_size = max_frame;
3544 if (netif_running(netdev)) {
3545 /* prevent buffers from being reallocated */
3546 adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers;
3547 e1000_down(adapter);
3548 }
3549
3550 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3551 * means we reserve 2 more, this pushes us to allocate from the next
3552 * larger slab size.
3553 * i.e. RXBUFFER_2048 --> size-4096 slab
3554 * however with the new *_jumbo_rx* routines, jumbo receives will use
3555 * fragmented skbs
3556 */
3557
3558 if (max_frame <= E1000_RXBUFFER_2048)
3559 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3560 else
3561 #if (PAGE_SIZE >= E1000_RXBUFFER_16384)
3562 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3563 #elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3564 adapter->rx_buffer_len = PAGE_SIZE;
3565 #endif
3566
3567 /* adjust allocation if LPE protects us, and we aren't using SBP */
3568 if (!hw->tbi_compatibility_on &&
3569 ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
3570 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3571 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3572
3573 netdev_dbg(netdev, "changing MTU from %d to %d\n",
3574 netdev->mtu, new_mtu);
3575 WRITE_ONCE(netdev->mtu, new_mtu);
3576
3577 if (netif_running(netdev))
3578 e1000_up(adapter);
3579 else
3580 e1000_reset(adapter);
3581
3582 clear_bit(__E1000_RESETTING, &adapter->flags);
3583
3584 return 0;
3585 }
3586
3587 /**
3588 * e1000_update_stats - Update the board statistics counters
3589 * @adapter: board private structure
3590 **/
e1000_update_stats(struct e1000_adapter * adapter)3591 void e1000_update_stats(struct e1000_adapter *adapter)
3592 {
3593 struct net_device *netdev = adapter->netdev;
3594 struct e1000_hw *hw = &adapter->hw;
3595 struct pci_dev *pdev = adapter->pdev;
3596 unsigned long flags;
3597 u16 phy_tmp;
3598
3599 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3600
3601 /* Prevent stats update while adapter is being reset, or if the pci
3602 * connection is down.
3603 */
3604 if (adapter->link_speed == 0)
3605 return;
3606 if (pci_channel_offline(pdev))
3607 return;
3608
3609 spin_lock_irqsave(&adapter->stats_lock, flags);
3610
3611 /* these counters are modified from e1000_tbi_adjust_stats,
3612 * called from the interrupt context, so they must only
3613 * be written while holding adapter->stats_lock
3614 */
3615
3616 adapter->stats.crcerrs += er32(CRCERRS);
3617 adapter->stats.gprc += er32(GPRC);
3618 adapter->stats.gorcl += er32(GORCL);
3619 adapter->stats.gorch += er32(GORCH);
3620 adapter->stats.bprc += er32(BPRC);
3621 adapter->stats.mprc += er32(MPRC);
3622 adapter->stats.roc += er32(ROC);
3623
3624 adapter->stats.prc64 += er32(PRC64);
3625 adapter->stats.prc127 += er32(PRC127);
3626 adapter->stats.prc255 += er32(PRC255);
3627 adapter->stats.prc511 += er32(PRC511);
3628 adapter->stats.prc1023 += er32(PRC1023);
3629 adapter->stats.prc1522 += er32(PRC1522);
3630
3631 adapter->stats.symerrs += er32(SYMERRS);
3632 adapter->stats.mpc += er32(MPC);
3633 adapter->stats.scc += er32(SCC);
3634 adapter->stats.ecol += er32(ECOL);
3635 adapter->stats.mcc += er32(MCC);
3636 adapter->stats.latecol += er32(LATECOL);
3637 adapter->stats.dc += er32(DC);
3638 adapter->stats.sec += er32(SEC);
3639 adapter->stats.rlec += er32(RLEC);
3640 adapter->stats.xonrxc += er32(XONRXC);
3641 adapter->stats.xontxc += er32(XONTXC);
3642 adapter->stats.xoffrxc += er32(XOFFRXC);
3643 adapter->stats.xofftxc += er32(XOFFTXC);
3644 adapter->stats.fcruc += er32(FCRUC);
3645 adapter->stats.gptc += er32(GPTC);
3646 adapter->stats.gotcl += er32(GOTCL);
3647 adapter->stats.gotch += er32(GOTCH);
3648 adapter->stats.rnbc += er32(RNBC);
3649 adapter->stats.ruc += er32(RUC);
3650 adapter->stats.rfc += er32(RFC);
3651 adapter->stats.rjc += er32(RJC);
3652 adapter->stats.torl += er32(TORL);
3653 adapter->stats.torh += er32(TORH);
3654 adapter->stats.totl += er32(TOTL);
3655 adapter->stats.toth += er32(TOTH);
3656 adapter->stats.tpr += er32(TPR);
3657
3658 adapter->stats.ptc64 += er32(PTC64);
3659 adapter->stats.ptc127 += er32(PTC127);
3660 adapter->stats.ptc255 += er32(PTC255);
3661 adapter->stats.ptc511 += er32(PTC511);
3662 adapter->stats.ptc1023 += er32(PTC1023);
3663 adapter->stats.ptc1522 += er32(PTC1522);
3664
3665 adapter->stats.mptc += er32(MPTC);
3666 adapter->stats.bptc += er32(BPTC);
3667
3668 /* used for adaptive IFS */
3669
3670 hw->tx_packet_delta = er32(TPT);
3671 adapter->stats.tpt += hw->tx_packet_delta;
3672 hw->collision_delta = er32(COLC);
3673 adapter->stats.colc += hw->collision_delta;
3674
3675 if (hw->mac_type >= e1000_82543) {
3676 adapter->stats.algnerrc += er32(ALGNERRC);
3677 adapter->stats.rxerrc += er32(RXERRC);
3678 adapter->stats.tncrs += er32(TNCRS);
3679 adapter->stats.cexterr += er32(CEXTERR);
3680 adapter->stats.tsctc += er32(TSCTC);
3681 adapter->stats.tsctfc += er32(TSCTFC);
3682 }
3683
3684 /* Fill out the OS statistics structure */
3685 netdev->stats.multicast = adapter->stats.mprc;
3686 netdev->stats.collisions = adapter->stats.colc;
3687
3688 /* Rx Errors */
3689
3690 /* RLEC on some newer hardware can be incorrect so build
3691 * our own version based on RUC and ROC
3692 */
3693 netdev->stats.rx_errors = adapter->stats.rxerrc +
3694 adapter->stats.crcerrs + adapter->stats.algnerrc +
3695 adapter->stats.ruc + adapter->stats.roc +
3696 adapter->stats.cexterr;
3697 adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3698 netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3699 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3700 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3701 netdev->stats.rx_missed_errors = adapter->stats.mpc;
3702
3703 /* Tx Errors */
3704 adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3705 netdev->stats.tx_errors = adapter->stats.txerrc;
3706 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3707 netdev->stats.tx_window_errors = adapter->stats.latecol;
3708 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3709 if (hw->bad_tx_carr_stats_fd &&
3710 adapter->link_duplex == FULL_DUPLEX) {
3711 netdev->stats.tx_carrier_errors = 0;
3712 adapter->stats.tncrs = 0;
3713 }
3714
3715 /* Tx Dropped needs to be maintained elsewhere */
3716
3717 /* Phy Stats */
3718 if (hw->media_type == e1000_media_type_copper) {
3719 if ((adapter->link_speed == SPEED_1000) &&
3720 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3721 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3722 adapter->phy_stats.idle_errors += phy_tmp;
3723 }
3724
3725 if ((hw->mac_type <= e1000_82546) &&
3726 (hw->phy_type == e1000_phy_m88) &&
3727 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3728 adapter->phy_stats.receive_errors += phy_tmp;
3729 }
3730
3731 /* Management Stats */
3732 if (hw->has_smbus) {
3733 adapter->stats.mgptc += er32(MGTPTC);
3734 adapter->stats.mgprc += er32(MGTPRC);
3735 adapter->stats.mgpdc += er32(MGTPDC);
3736 }
3737
3738 spin_unlock_irqrestore(&adapter->stats_lock, flags);
3739 }
3740
3741 /**
3742 * e1000_intr - Interrupt Handler
3743 * @irq: interrupt number
3744 * @data: pointer to a network interface device structure
3745 **/
e1000_intr(int irq,void * data)3746 static irqreturn_t e1000_intr(int irq, void *data)
3747 {
3748 struct net_device *netdev = data;
3749 struct e1000_adapter *adapter = netdev_priv(netdev);
3750 struct e1000_hw *hw = &adapter->hw;
3751 u32 icr = er32(ICR);
3752
3753 if (unlikely((!icr)))
3754 return IRQ_NONE; /* Not our interrupt */
3755
3756 /* we might have caused the interrupt, but the above
3757 * read cleared it, and just in case the driver is
3758 * down there is nothing to do so return handled
3759 */
3760 if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3761 return IRQ_HANDLED;
3762
3763 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3764 hw->get_link_status = 1;
3765 /* guard against interrupt when we're going down */
3766 if (!test_bit(__E1000_DOWN, &adapter->flags))
3767 schedule_delayed_work(&adapter->watchdog_task, 1);
3768 }
3769
3770 /* disable interrupts, without the synchronize_irq bit */
3771 ew32(IMC, ~0);
3772 E1000_WRITE_FLUSH();
3773
3774 if (likely(napi_schedule_prep(&adapter->napi))) {
3775 adapter->total_tx_bytes = 0;
3776 adapter->total_tx_packets = 0;
3777 adapter->total_rx_bytes = 0;
3778 adapter->total_rx_packets = 0;
3779 __napi_schedule(&adapter->napi);
3780 } else {
3781 /* this really should not happen! if it does it is basically a
3782 * bug, but not a hard error, so enable ints and continue
3783 */
3784 if (!test_bit(__E1000_DOWN, &adapter->flags))
3785 e1000_irq_enable(adapter);
3786 }
3787
3788 return IRQ_HANDLED;
3789 }
3790
3791 /**
3792 * e1000_clean - NAPI Rx polling callback
3793 * @napi: napi struct containing references to driver info
3794 * @budget: budget given to driver for receive packets
3795 **/
e1000_clean(struct napi_struct * napi,int budget)3796 static int e1000_clean(struct napi_struct *napi, int budget)
3797 {
3798 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
3799 napi);
3800 int tx_clean_complete = 0, work_done = 0;
3801
3802 tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
3803
3804 adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
3805
3806 if (!tx_clean_complete || work_done == budget)
3807 return budget;
3808
3809 /* Exit the polling mode, but don't re-enable interrupts if stack might
3810 * poll us due to busy-polling
3811 */
3812 if (likely(napi_complete_done(napi, work_done))) {
3813 if (likely(adapter->itr_setting & 3))
3814 e1000_set_itr(adapter);
3815 if (!test_bit(__E1000_DOWN, &adapter->flags))
3816 e1000_irq_enable(adapter);
3817 }
3818
3819 return work_done;
3820 }
3821
3822 /**
3823 * e1000_clean_tx_irq - Reclaim resources after transmit completes
3824 * @adapter: board private structure
3825 * @tx_ring: ring to clean
3826 **/
e1000_clean_tx_irq(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring)3827 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3828 struct e1000_tx_ring *tx_ring)
3829 {
3830 struct e1000_hw *hw = &adapter->hw;
3831 struct net_device *netdev = adapter->netdev;
3832 struct e1000_tx_desc *tx_desc, *eop_desc;
3833 struct e1000_tx_buffer *buffer_info;
3834 unsigned int i, eop;
3835 unsigned int count = 0;
3836 unsigned int total_tx_bytes = 0, total_tx_packets = 0;
3837 unsigned int bytes_compl = 0, pkts_compl = 0;
3838
3839 i = tx_ring->next_to_clean;
3840 eop = tx_ring->buffer_info[i].next_to_watch;
3841 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3842
3843 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3844 (count < tx_ring->count)) {
3845 bool cleaned = false;
3846 dma_rmb(); /* read buffer_info after eop_desc */
3847 for ( ; !cleaned; count++) {
3848 tx_desc = E1000_TX_DESC(*tx_ring, i);
3849 buffer_info = &tx_ring->buffer_info[i];
3850 cleaned = (i == eop);
3851
3852 if (cleaned) {
3853 total_tx_packets += buffer_info->segs;
3854 total_tx_bytes += buffer_info->bytecount;
3855 if (buffer_info->skb) {
3856 bytes_compl += buffer_info->skb->len;
3857 pkts_compl++;
3858 }
3859
3860 }
3861 e1000_unmap_and_free_tx_resource(adapter, buffer_info,
3862 64);
3863 tx_desc->upper.data = 0;
3864
3865 if (unlikely(++i == tx_ring->count))
3866 i = 0;
3867 }
3868
3869 eop = tx_ring->buffer_info[i].next_to_watch;
3870 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3871 }
3872
3873 /* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame,
3874 * which will reuse the cleaned buffers.
3875 */
3876 smp_store_release(&tx_ring->next_to_clean, i);
3877
3878 netdev_completed_queue(netdev, pkts_compl, bytes_compl);
3879
3880 #define TX_WAKE_THRESHOLD 32
3881 if (unlikely(count && netif_carrier_ok(netdev) &&
3882 E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3883 /* Make sure that anybody stopping the queue after this
3884 * sees the new next_to_clean.
3885 */
3886 smp_mb();
3887
3888 if (netif_queue_stopped(netdev) &&
3889 !(test_bit(__E1000_DOWN, &adapter->flags))) {
3890 netif_wake_queue(netdev);
3891 ++adapter->restart_queue;
3892 }
3893 }
3894
3895 if (adapter->detect_tx_hung) {
3896 /* Detect a transmit hang in hardware, this serializes the
3897 * check with the clearing of time_stamp and movement of i
3898 */
3899 adapter->detect_tx_hung = false;
3900 if (tx_ring->buffer_info[eop].time_stamp &&
3901 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3902 (adapter->tx_timeout_factor * HZ)) &&
3903 !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3904
3905 /* detected Tx unit hang */
3906 e_err(drv, "Detected Tx Unit Hang\n"
3907 " Tx Queue <%lu>\n"
3908 " TDH <%x>\n"
3909 " TDT <%x>\n"
3910 " next_to_use <%x>\n"
3911 " next_to_clean <%x>\n"
3912 "buffer_info[next_to_clean]\n"
3913 " time_stamp <%lx>\n"
3914 " next_to_watch <%x>\n"
3915 " jiffies <%lx>\n"
3916 " next_to_watch.status <%x>\n",
3917 (unsigned long)(tx_ring - adapter->tx_ring),
3918 readl(hw->hw_addr + tx_ring->tdh),
3919 readl(hw->hw_addr + tx_ring->tdt),
3920 tx_ring->next_to_use,
3921 tx_ring->next_to_clean,
3922 tx_ring->buffer_info[eop].time_stamp,
3923 eop,
3924 jiffies,
3925 eop_desc->upper.fields.status);
3926 e1000_dump(adapter);
3927 netif_stop_queue(netdev);
3928 }
3929 }
3930 adapter->total_tx_bytes += total_tx_bytes;
3931 adapter->total_tx_packets += total_tx_packets;
3932 netdev->stats.tx_bytes += total_tx_bytes;
3933 netdev->stats.tx_packets += total_tx_packets;
3934 return count < tx_ring->count;
3935 }
3936
3937 /**
3938 * e1000_rx_checksum - Receive Checksum Offload for 82543
3939 * @adapter: board private structure
3940 * @status_err: receive descriptor status and error fields
3941 * @csum: receive descriptor csum field
3942 * @skb: socket buffer with received data
3943 **/
e1000_rx_checksum(struct e1000_adapter * adapter,u32 status_err,u32 csum,struct sk_buff * skb)3944 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3945 u32 csum, struct sk_buff *skb)
3946 {
3947 struct e1000_hw *hw = &adapter->hw;
3948 u16 status = (u16)status_err;
3949 u8 errors = (u8)(status_err >> 24);
3950
3951 skb_checksum_none_assert(skb);
3952
3953 /* 82543 or newer only */
3954 if (unlikely(hw->mac_type < e1000_82543))
3955 return;
3956 /* Ignore Checksum bit is set */
3957 if (unlikely(status & E1000_RXD_STAT_IXSM))
3958 return;
3959 /* TCP/UDP checksum error bit is set */
3960 if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
3961 /* let the stack verify checksum errors */
3962 adapter->hw_csum_err++;
3963 return;
3964 }
3965 /* TCP/UDP Checksum has not been calculated */
3966 if (!(status & E1000_RXD_STAT_TCPCS))
3967 return;
3968
3969 /* It must be a TCP or UDP packet with a valid checksum */
3970 if (likely(status & E1000_RXD_STAT_TCPCS)) {
3971 /* TCP checksum is good */
3972 skb->ip_summed = CHECKSUM_UNNECESSARY;
3973 }
3974 adapter->hw_csum_good++;
3975 }
3976
3977 /**
3978 * e1000_consume_page - helper function for jumbo Rx path
3979 * @bi: software descriptor shadow data
3980 * @skb: skb being modified
3981 * @length: length of data being added
3982 **/
e1000_consume_page(struct e1000_rx_buffer * bi,struct sk_buff * skb,u16 length)3983 static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb,
3984 u16 length)
3985 {
3986 bi->rxbuf.page = NULL;
3987 skb->len += length;
3988 skb->data_len += length;
3989 skb->truesize += PAGE_SIZE;
3990 }
3991
3992 /**
3993 * e1000_receive_skb - helper function to handle rx indications
3994 * @adapter: board private structure
3995 * @status: descriptor status field as written by hardware
3996 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
3997 * @skb: pointer to sk_buff to be indicated to stack
3998 */
e1000_receive_skb(struct e1000_adapter * adapter,u8 status,__le16 vlan,struct sk_buff * skb)3999 static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
4000 __le16 vlan, struct sk_buff *skb)
4001 {
4002 skb->protocol = eth_type_trans(skb, adapter->netdev);
4003
4004 if (status & E1000_RXD_STAT_VP) {
4005 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4006
4007 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4008 }
4009 napi_gro_receive(&adapter->napi, skb);
4010 }
4011
4012 /**
4013 * e1000_tbi_adjust_stats
4014 * @hw: Struct containing variables accessed by shared code
4015 * @stats: point to stats struct
4016 * @frame_len: The length of the frame in question
4017 * @mac_addr: The Ethernet destination address of the frame in question
4018 *
4019 * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
4020 */
e1000_tbi_adjust_stats(struct e1000_hw * hw,struct e1000_hw_stats * stats,u32 frame_len,const u8 * mac_addr)4021 static void e1000_tbi_adjust_stats(struct e1000_hw *hw,
4022 struct e1000_hw_stats *stats,
4023 u32 frame_len, const u8 *mac_addr)
4024 {
4025 u64 carry_bit;
4026
4027 /* First adjust the frame length. */
4028 frame_len--;
4029 /* We need to adjust the statistics counters, since the hardware
4030 * counters overcount this packet as a CRC error and undercount
4031 * the packet as a good packet
4032 */
4033 /* This packet should not be counted as a CRC error. */
4034 stats->crcerrs--;
4035 /* This packet does count as a Good Packet Received. */
4036 stats->gprc++;
4037
4038 /* Adjust the Good Octets received counters */
4039 carry_bit = 0x80000000 & stats->gorcl;
4040 stats->gorcl += frame_len;
4041 /* If the high bit of Gorcl (the low 32 bits of the Good Octets
4042 * Received Count) was one before the addition,
4043 * AND it is zero after, then we lost the carry out,
4044 * need to add one to Gorch (Good Octets Received Count High).
4045 * This could be simplified if all environments supported
4046 * 64-bit integers.
4047 */
4048 if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
4049 stats->gorch++;
4050 /* Is this a broadcast or multicast? Check broadcast first,
4051 * since the test for a multicast frame will test positive on
4052 * a broadcast frame.
4053 */
4054 if (is_broadcast_ether_addr(mac_addr))
4055 stats->bprc++;
4056 else if (is_multicast_ether_addr(mac_addr))
4057 stats->mprc++;
4058
4059 if (frame_len == hw->max_frame_size) {
4060 /* In this case, the hardware has overcounted the number of
4061 * oversize frames.
4062 */
4063 if (stats->roc > 0)
4064 stats->roc--;
4065 }
4066
4067 /* Adjust the bin counters when the extra byte put the frame in the
4068 * wrong bin. Remember that the frame_len was adjusted above.
4069 */
4070 if (frame_len == 64) {
4071 stats->prc64++;
4072 stats->prc127--;
4073 } else if (frame_len == 127) {
4074 stats->prc127++;
4075 stats->prc255--;
4076 } else if (frame_len == 255) {
4077 stats->prc255++;
4078 stats->prc511--;
4079 } else if (frame_len == 511) {
4080 stats->prc511++;
4081 stats->prc1023--;
4082 } else if (frame_len == 1023) {
4083 stats->prc1023++;
4084 stats->prc1522--;
4085 } else if (frame_len == 1522) {
4086 stats->prc1522++;
4087 }
4088 }
4089
e1000_tbi_should_accept(struct e1000_adapter * adapter,u8 status,u8 errors,u32 length,const u8 * data)4090 static bool e1000_tbi_should_accept(struct e1000_adapter *adapter,
4091 u8 status, u8 errors,
4092 u32 length, const u8 *data)
4093 {
4094 struct e1000_hw *hw = &adapter->hw;
4095 u8 last_byte;
4096
4097 /* Guard against OOB on data[length - 1] */
4098 if (unlikely(!length))
4099 return false;
4100 /* Upper bound: length must not exceed rx_buffer_len */
4101 if (unlikely(length > adapter->rx_buffer_len))
4102 return false;
4103 last_byte = *(data + length - 1);
4104
4105 if (TBI_ACCEPT(hw, status, errors, length, last_byte)) {
4106 unsigned long irq_flags;
4107
4108 spin_lock_irqsave(&adapter->stats_lock, irq_flags);
4109 e1000_tbi_adjust_stats(hw, &adapter->stats, length, data);
4110 spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
4111
4112 return true;
4113 }
4114
4115 return false;
4116 }
4117
e1000_alloc_rx_skb(struct e1000_adapter * adapter,unsigned int bufsz)4118 static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter,
4119 unsigned int bufsz)
4120 {
4121 struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz);
4122
4123 if (unlikely(!skb))
4124 adapter->alloc_rx_buff_failed++;
4125 return skb;
4126 }
4127
4128 /**
4129 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
4130 * @adapter: board private structure
4131 * @rx_ring: ring to clean
4132 * @work_done: amount of napi work completed this call
4133 * @work_to_do: max amount of work allowed for this call to do
4134 *
4135 * the return value indicates whether actual cleaning was done, there
4136 * is no guarantee that everything was cleaned
4137 */
e1000_clean_jumbo_rx_irq(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring,int * work_done,int work_to_do)4138 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4139 struct e1000_rx_ring *rx_ring,
4140 int *work_done, int work_to_do)
4141 {
4142 struct net_device *netdev = adapter->netdev;
4143 struct pci_dev *pdev = adapter->pdev;
4144 struct e1000_rx_desc *rx_desc, *next_rxd;
4145 struct e1000_rx_buffer *buffer_info, *next_buffer;
4146 u32 length;
4147 unsigned int i;
4148 int cleaned_count = 0;
4149 bool cleaned = false;
4150 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4151
4152 i = rx_ring->next_to_clean;
4153 rx_desc = E1000_RX_DESC(*rx_ring, i);
4154 buffer_info = &rx_ring->buffer_info[i];
4155
4156 while (rx_desc->status & E1000_RXD_STAT_DD) {
4157 struct sk_buff *skb;
4158 u8 status;
4159
4160 if (*work_done >= work_to_do)
4161 break;
4162 (*work_done)++;
4163 dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
4164
4165 status = rx_desc->status;
4166
4167 if (++i == rx_ring->count)
4168 i = 0;
4169
4170 next_rxd = E1000_RX_DESC(*rx_ring, i);
4171 prefetch(next_rxd);
4172
4173 next_buffer = &rx_ring->buffer_info[i];
4174
4175 cleaned = true;
4176 cleaned_count++;
4177 dma_unmap_page(&pdev->dev, buffer_info->dma,
4178 adapter->rx_buffer_len, DMA_FROM_DEVICE);
4179 buffer_info->dma = 0;
4180
4181 length = le16_to_cpu(rx_desc->length);
4182
4183 /* errors is only valid for DD + EOP descriptors */
4184 if (unlikely((status & E1000_RXD_STAT_EOP) &&
4185 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
4186 u8 *mapped = page_address(buffer_info->rxbuf.page);
4187
4188 if (e1000_tbi_should_accept(adapter, status,
4189 rx_desc->errors,
4190 length, mapped)) {
4191 length--;
4192 } else if (netdev->features & NETIF_F_RXALL) {
4193 goto process_skb;
4194 } else {
4195 /* an error means any chain goes out the window
4196 * too
4197 */
4198 dev_kfree_skb(rx_ring->rx_skb_top);
4199 rx_ring->rx_skb_top = NULL;
4200 goto next_desc;
4201 }
4202 }
4203
4204 #define rxtop rx_ring->rx_skb_top
4205 process_skb:
4206 if (!(status & E1000_RXD_STAT_EOP)) {
4207 /* this descriptor is only the beginning (or middle) */
4208 if (!rxtop) {
4209 /* this is the beginning of a chain */
4210 rxtop = napi_get_frags(&adapter->napi);
4211 if (!rxtop)
4212 break;
4213
4214 skb_fill_page_desc(rxtop, 0,
4215 buffer_info->rxbuf.page,
4216 0, length);
4217 } else {
4218 /* this is the middle of a chain */
4219 skb_fill_page_desc(rxtop,
4220 skb_shinfo(rxtop)->nr_frags,
4221 buffer_info->rxbuf.page, 0, length);
4222 }
4223 e1000_consume_page(buffer_info, rxtop, length);
4224 goto next_desc;
4225 } else {
4226 if (rxtop) {
4227 /* end of the chain */
4228 skb_fill_page_desc(rxtop,
4229 skb_shinfo(rxtop)->nr_frags,
4230 buffer_info->rxbuf.page, 0, length);
4231 skb = rxtop;
4232 rxtop = NULL;
4233 e1000_consume_page(buffer_info, skb, length);
4234 } else {
4235 struct page *p;
4236 /* no chain, got EOP, this buf is the packet
4237 * copybreak to save the put_page/alloc_page
4238 */
4239 p = buffer_info->rxbuf.page;
4240 if (length <= copybreak) {
4241 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4242 length -= 4;
4243 skb = e1000_alloc_rx_skb(adapter,
4244 length);
4245 if (!skb)
4246 break;
4247
4248 memcpy(skb_tail_pointer(skb),
4249 page_address(p), length);
4250
4251 /* re-use the page, so don't erase
4252 * buffer_info->rxbuf.page
4253 */
4254 skb_put(skb, length);
4255 e1000_rx_checksum(adapter,
4256 status | rx_desc->errors << 24,
4257 le16_to_cpu(rx_desc->csum), skb);
4258
4259 total_rx_bytes += skb->len;
4260 total_rx_packets++;
4261
4262 e1000_receive_skb(adapter, status,
4263 rx_desc->special, skb);
4264 goto next_desc;
4265 } else {
4266 skb = napi_get_frags(&adapter->napi);
4267 if (!skb) {
4268 adapter->alloc_rx_buff_failed++;
4269 break;
4270 }
4271 skb_fill_page_desc(skb, 0, p, 0,
4272 length);
4273 e1000_consume_page(buffer_info, skb,
4274 length);
4275 }
4276 }
4277 }
4278
4279 /* Receive Checksum Offload XXX recompute due to CRC strip? */
4280 e1000_rx_checksum(adapter,
4281 (u32)(status) |
4282 ((u32)(rx_desc->errors) << 24),
4283 le16_to_cpu(rx_desc->csum), skb);
4284
4285 total_rx_bytes += (skb->len - 4); /* don't count FCS */
4286 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4287 pskb_trim(skb, skb->len - 4);
4288 total_rx_packets++;
4289
4290 if (status & E1000_RXD_STAT_VP) {
4291 __le16 vlan = rx_desc->special;
4292 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4293
4294 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4295 }
4296
4297 napi_gro_frags(&adapter->napi);
4298
4299 next_desc:
4300 rx_desc->status = 0;
4301
4302 /* return some buffers to hardware, one at a time is too slow */
4303 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4304 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4305 cleaned_count = 0;
4306 }
4307
4308 /* use prefetched values */
4309 rx_desc = next_rxd;
4310 buffer_info = next_buffer;
4311 }
4312 rx_ring->next_to_clean = i;
4313
4314 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4315 if (cleaned_count)
4316 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4317
4318 adapter->total_rx_packets += total_rx_packets;
4319 adapter->total_rx_bytes += total_rx_bytes;
4320 netdev->stats.rx_bytes += total_rx_bytes;
4321 netdev->stats.rx_packets += total_rx_packets;
4322 return cleaned;
4323 }
4324
4325 /* this should improve performance for small packets with large amounts
4326 * of reassembly being done in the stack
4327 */
e1000_copybreak(struct e1000_adapter * adapter,struct e1000_rx_buffer * buffer_info,u32 length,const void * data)4328 static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter,
4329 struct e1000_rx_buffer *buffer_info,
4330 u32 length, const void *data)
4331 {
4332 struct sk_buff *skb;
4333
4334 if (length > copybreak)
4335 return NULL;
4336
4337 skb = e1000_alloc_rx_skb(adapter, length);
4338 if (!skb)
4339 return NULL;
4340
4341 dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma,
4342 length, DMA_FROM_DEVICE);
4343
4344 skb_put_data(skb, data, length);
4345
4346 return skb;
4347 }
4348
4349 /**
4350 * e1000_clean_rx_irq - Send received data up the network stack; legacy
4351 * @adapter: board private structure
4352 * @rx_ring: ring to clean
4353 * @work_done: amount of napi work completed this call
4354 * @work_to_do: max amount of work allowed for this call to do
4355 */
e1000_clean_rx_irq(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring,int * work_done,int work_to_do)4356 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4357 struct e1000_rx_ring *rx_ring,
4358 int *work_done, int work_to_do)
4359 {
4360 struct net_device *netdev = adapter->netdev;
4361 struct pci_dev *pdev = adapter->pdev;
4362 struct e1000_rx_desc *rx_desc, *next_rxd;
4363 struct e1000_rx_buffer *buffer_info, *next_buffer;
4364 u32 length;
4365 unsigned int i;
4366 int cleaned_count = 0;
4367 bool cleaned = false;
4368 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4369
4370 i = rx_ring->next_to_clean;
4371 rx_desc = E1000_RX_DESC(*rx_ring, i);
4372 buffer_info = &rx_ring->buffer_info[i];
4373
4374 while (rx_desc->status & E1000_RXD_STAT_DD) {
4375 struct sk_buff *skb;
4376 u8 *data;
4377 u8 status;
4378
4379 if (*work_done >= work_to_do)
4380 break;
4381 (*work_done)++;
4382 dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
4383
4384 status = rx_desc->status;
4385 length = le16_to_cpu(rx_desc->length);
4386
4387 data = buffer_info->rxbuf.data;
4388 prefetch(data);
4389 skb = e1000_copybreak(adapter, buffer_info, length, data);
4390 if (!skb) {
4391 unsigned int frag_len = e1000_frag_len(adapter);
4392
4393 skb = napi_build_skb(data - E1000_HEADROOM, frag_len);
4394 if (!skb) {
4395 adapter->alloc_rx_buff_failed++;
4396 break;
4397 }
4398
4399 skb_reserve(skb, E1000_HEADROOM);
4400 dma_unmap_single(&pdev->dev, buffer_info->dma,
4401 adapter->rx_buffer_len,
4402 DMA_FROM_DEVICE);
4403 buffer_info->dma = 0;
4404 buffer_info->rxbuf.data = NULL;
4405 }
4406
4407 if (++i == rx_ring->count)
4408 i = 0;
4409
4410 next_rxd = E1000_RX_DESC(*rx_ring, i);
4411 prefetch(next_rxd);
4412
4413 next_buffer = &rx_ring->buffer_info[i];
4414
4415 cleaned = true;
4416 cleaned_count++;
4417
4418 /* !EOP means multiple descriptors were used to store a single
4419 * packet, if thats the case we need to toss it. In fact, we
4420 * to toss every packet with the EOP bit clear and the next
4421 * frame that _does_ have the EOP bit set, as it is by
4422 * definition only a frame fragment
4423 */
4424 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
4425 adapter->discarding = true;
4426
4427 if (adapter->discarding) {
4428 /* All receives must fit into a single buffer */
4429 netdev_dbg(netdev, "Receive packet consumed multiple buffers\n");
4430 dev_kfree_skb(skb);
4431 if (status & E1000_RXD_STAT_EOP)
4432 adapter->discarding = false;
4433 goto next_desc;
4434 }
4435
4436 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
4437 if (e1000_tbi_should_accept(adapter, status,
4438 rx_desc->errors,
4439 length, data)) {
4440 length--;
4441 } else if (netdev->features & NETIF_F_RXALL) {
4442 goto process_skb;
4443 } else {
4444 dev_kfree_skb(skb);
4445 goto next_desc;
4446 }
4447 }
4448
4449 process_skb:
4450 total_rx_bytes += (length - 4); /* don't count FCS */
4451 total_rx_packets++;
4452
4453 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4454 /* adjust length to remove Ethernet CRC, this must be
4455 * done after the TBI_ACCEPT workaround above
4456 */
4457 length -= 4;
4458
4459 if (buffer_info->rxbuf.data == NULL)
4460 skb_put(skb, length);
4461 else /* copybreak skb */
4462 skb_trim(skb, length);
4463
4464 /* Receive Checksum Offload */
4465 e1000_rx_checksum(adapter,
4466 (u32)(status) |
4467 ((u32)(rx_desc->errors) << 24),
4468 le16_to_cpu(rx_desc->csum), skb);
4469
4470 e1000_receive_skb(adapter, status, rx_desc->special, skb);
4471
4472 next_desc:
4473 rx_desc->status = 0;
4474
4475 /* return some buffers to hardware, one at a time is too slow */
4476 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4477 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4478 cleaned_count = 0;
4479 }
4480
4481 /* use prefetched values */
4482 rx_desc = next_rxd;
4483 buffer_info = next_buffer;
4484 }
4485 rx_ring->next_to_clean = i;
4486
4487 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4488 if (cleaned_count)
4489 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4490
4491 adapter->total_rx_packets += total_rx_packets;
4492 adapter->total_rx_bytes += total_rx_bytes;
4493 netdev->stats.rx_bytes += total_rx_bytes;
4494 netdev->stats.rx_packets += total_rx_packets;
4495 return cleaned;
4496 }
4497
4498 /**
4499 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
4500 * @adapter: address of board private structure
4501 * @rx_ring: pointer to receive ring structure
4502 * @cleaned_count: number of buffers to allocate this pass
4503 **/
4504 static void
e1000_alloc_jumbo_rx_buffers(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring,int cleaned_count)4505 e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4506 struct e1000_rx_ring *rx_ring, int cleaned_count)
4507 {
4508 struct pci_dev *pdev = adapter->pdev;
4509 struct e1000_rx_desc *rx_desc;
4510 struct e1000_rx_buffer *buffer_info;
4511 unsigned int i;
4512
4513 i = rx_ring->next_to_use;
4514 buffer_info = &rx_ring->buffer_info[i];
4515
4516 while (cleaned_count--) {
4517 /* allocate a new page if necessary */
4518 if (!buffer_info->rxbuf.page) {
4519 buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC);
4520 if (unlikely(!buffer_info->rxbuf.page)) {
4521 adapter->alloc_rx_buff_failed++;
4522 break;
4523 }
4524 }
4525
4526 if (!buffer_info->dma) {
4527 buffer_info->dma = dma_map_page(&pdev->dev,
4528 buffer_info->rxbuf.page, 0,
4529 adapter->rx_buffer_len,
4530 DMA_FROM_DEVICE);
4531 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4532 put_page(buffer_info->rxbuf.page);
4533 buffer_info->rxbuf.page = NULL;
4534 buffer_info->dma = 0;
4535 adapter->alloc_rx_buff_failed++;
4536 break;
4537 }
4538 }
4539
4540 rx_desc = E1000_RX_DESC(*rx_ring, i);
4541 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4542
4543 if (unlikely(++i == rx_ring->count))
4544 i = 0;
4545 buffer_info = &rx_ring->buffer_info[i];
4546 }
4547
4548 if (likely(rx_ring->next_to_use != i)) {
4549 rx_ring->next_to_use = i;
4550 if (unlikely(i-- == 0))
4551 i = (rx_ring->count - 1);
4552
4553 /* Force memory writes to complete before letting h/w
4554 * know there are new descriptors to fetch. (Only
4555 * applicable for weak-ordered memory model archs,
4556 * such as IA-64).
4557 */
4558 dma_wmb();
4559 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4560 }
4561 }
4562
4563 /**
4564 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
4565 * @adapter: address of board private structure
4566 * @rx_ring: pointer to ring struct
4567 * @cleaned_count: number of new Rx buffers to try to allocate
4568 **/
e1000_alloc_rx_buffers(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring,int cleaned_count)4569 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4570 struct e1000_rx_ring *rx_ring,
4571 int cleaned_count)
4572 {
4573 struct e1000_hw *hw = &adapter->hw;
4574 struct pci_dev *pdev = adapter->pdev;
4575 struct e1000_rx_desc *rx_desc;
4576 struct e1000_rx_buffer *buffer_info;
4577 unsigned int i;
4578 unsigned int bufsz = adapter->rx_buffer_len;
4579
4580 i = rx_ring->next_to_use;
4581 buffer_info = &rx_ring->buffer_info[i];
4582
4583 while (cleaned_count--) {
4584 void *data;
4585
4586 if (buffer_info->rxbuf.data)
4587 goto skip;
4588
4589 data = e1000_alloc_frag(adapter);
4590 if (!data) {
4591 /* Better luck next round */
4592 adapter->alloc_rx_buff_failed++;
4593 break;
4594 }
4595
4596 /* Fix for errata 23, can't cross 64kB boundary */
4597 if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4598 void *olddata = data;
4599 e_err(rx_err, "skb align check failed: %u bytes at "
4600 "%p\n", bufsz, data);
4601 /* Try again, without freeing the previous */
4602 data = e1000_alloc_frag(adapter);
4603 /* Failed allocation, critical failure */
4604 if (!data) {
4605 skb_free_frag(olddata);
4606 adapter->alloc_rx_buff_failed++;
4607 break;
4608 }
4609
4610 if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4611 /* give up */
4612 skb_free_frag(data);
4613 skb_free_frag(olddata);
4614 adapter->alloc_rx_buff_failed++;
4615 break;
4616 }
4617
4618 /* Use new allocation */
4619 skb_free_frag(olddata);
4620 }
4621 buffer_info->dma = dma_map_single(&pdev->dev,
4622 data,
4623 adapter->rx_buffer_len,
4624 DMA_FROM_DEVICE);
4625 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4626 skb_free_frag(data);
4627 buffer_info->dma = 0;
4628 adapter->alloc_rx_buff_failed++;
4629 break;
4630 }
4631
4632 /* XXX if it was allocated cleanly it will never map to a
4633 * boundary crossing
4634 */
4635
4636 /* Fix for errata 23, can't cross 64kB boundary */
4637 if (!e1000_check_64k_bound(adapter,
4638 (void *)(unsigned long)buffer_info->dma,
4639 adapter->rx_buffer_len)) {
4640 e_err(rx_err, "dma align check failed: %u bytes at "
4641 "%p\n", adapter->rx_buffer_len,
4642 (void *)(unsigned long)buffer_info->dma);
4643
4644 dma_unmap_single(&pdev->dev, buffer_info->dma,
4645 adapter->rx_buffer_len,
4646 DMA_FROM_DEVICE);
4647
4648 skb_free_frag(data);
4649 buffer_info->rxbuf.data = NULL;
4650 buffer_info->dma = 0;
4651
4652 adapter->alloc_rx_buff_failed++;
4653 break;
4654 }
4655 buffer_info->rxbuf.data = data;
4656 skip:
4657 rx_desc = E1000_RX_DESC(*rx_ring, i);
4658 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4659
4660 if (unlikely(++i == rx_ring->count))
4661 i = 0;
4662 buffer_info = &rx_ring->buffer_info[i];
4663 }
4664
4665 if (likely(rx_ring->next_to_use != i)) {
4666 rx_ring->next_to_use = i;
4667 if (unlikely(i-- == 0))
4668 i = (rx_ring->count - 1);
4669
4670 /* Force memory writes to complete before letting h/w
4671 * know there are new descriptors to fetch. (Only
4672 * applicable for weak-ordered memory model archs,
4673 * such as IA-64).
4674 */
4675 dma_wmb();
4676 writel(i, hw->hw_addr + rx_ring->rdt);
4677 }
4678 }
4679
4680 /**
4681 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4682 * @adapter: address of board private structure
4683 **/
e1000_smartspeed(struct e1000_adapter * adapter)4684 static void e1000_smartspeed(struct e1000_adapter *adapter)
4685 {
4686 struct e1000_hw *hw = &adapter->hw;
4687 u16 phy_status;
4688 u16 phy_ctrl;
4689
4690 if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4691 !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
4692 return;
4693
4694 if (adapter->smartspeed == 0) {
4695 /* If Master/Slave config fault is asserted twice,
4696 * we assume back-to-back
4697 */
4698 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4699 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4700 return;
4701 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4702 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4703 return;
4704 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4705 if (phy_ctrl & CR_1000T_MS_ENABLE) {
4706 phy_ctrl &= ~CR_1000T_MS_ENABLE;
4707 e1000_write_phy_reg(hw, PHY_1000T_CTRL,
4708 phy_ctrl);
4709 adapter->smartspeed++;
4710 if (!e1000_phy_setup_autoneg(hw) &&
4711 !e1000_read_phy_reg(hw, PHY_CTRL,
4712 &phy_ctrl)) {
4713 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4714 MII_CR_RESTART_AUTO_NEG);
4715 e1000_write_phy_reg(hw, PHY_CTRL,
4716 phy_ctrl);
4717 }
4718 }
4719 return;
4720 } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4721 /* If still no link, perhaps using 2/3 pair cable */
4722 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4723 phy_ctrl |= CR_1000T_MS_ENABLE;
4724 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4725 if (!e1000_phy_setup_autoneg(hw) &&
4726 !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
4727 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4728 MII_CR_RESTART_AUTO_NEG);
4729 e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
4730 }
4731 }
4732 /* Restart process after E1000_SMARTSPEED_MAX iterations */
4733 if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4734 adapter->smartspeed = 0;
4735 }
4736
4737 /**
4738 * e1000_ioctl - handle ioctl calls
4739 * @netdev: pointer to our netdev
4740 * @ifr: pointer to interface request structure
4741 * @cmd: ioctl data
4742 **/
e1000_ioctl(struct net_device * netdev,struct ifreq * ifr,int cmd)4743 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4744 {
4745 switch (cmd) {
4746 case SIOCGMIIPHY:
4747 case SIOCGMIIREG:
4748 case SIOCSMIIREG:
4749 return e1000_mii_ioctl(netdev, ifr, cmd);
4750 default:
4751 return -EOPNOTSUPP;
4752 }
4753 }
4754
4755 /**
4756 * e1000_mii_ioctl -
4757 * @netdev: pointer to our netdev
4758 * @ifr: pointer to interface request structure
4759 * @cmd: ioctl data
4760 **/
e1000_mii_ioctl(struct net_device * netdev,struct ifreq * ifr,int cmd)4761 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4762 int cmd)
4763 {
4764 struct e1000_adapter *adapter = netdev_priv(netdev);
4765 struct e1000_hw *hw = &adapter->hw;
4766 struct mii_ioctl_data *data = if_mii(ifr);
4767 int retval;
4768 u16 mii_reg;
4769 unsigned long flags;
4770
4771 if (hw->media_type != e1000_media_type_copper)
4772 return -EOPNOTSUPP;
4773
4774 switch (cmd) {
4775 case SIOCGMIIPHY:
4776 data->phy_id = hw->phy_addr;
4777 break;
4778 case SIOCGMIIREG:
4779 spin_lock_irqsave(&adapter->stats_lock, flags);
4780 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
4781 &data->val_out)) {
4782 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4783 return -EIO;
4784 }
4785 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4786 break;
4787 case SIOCSMIIREG:
4788 if (data->reg_num & ~(0x1F))
4789 return -EFAULT;
4790 mii_reg = data->val_in;
4791 spin_lock_irqsave(&adapter->stats_lock, flags);
4792 if (e1000_write_phy_reg(hw, data->reg_num,
4793 mii_reg)) {
4794 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4795 return -EIO;
4796 }
4797 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4798 if (hw->media_type == e1000_media_type_copper) {
4799 switch (data->reg_num) {
4800 case PHY_CTRL:
4801 if (mii_reg & MII_CR_POWER_DOWN)
4802 break;
4803 if (mii_reg & MII_CR_AUTO_NEG_EN) {
4804 hw->autoneg = 1;
4805 hw->autoneg_advertised = 0x2F;
4806 } else {
4807 u32 speed;
4808 if (mii_reg & 0x40)
4809 speed = SPEED_1000;
4810 else if (mii_reg & 0x2000)
4811 speed = SPEED_100;
4812 else
4813 speed = SPEED_10;
4814 retval = e1000_set_spd_dplx(
4815 adapter, speed,
4816 ((mii_reg & 0x100)
4817 ? DUPLEX_FULL :
4818 DUPLEX_HALF));
4819 if (retval)
4820 return retval;
4821 }
4822 if (netif_running(adapter->netdev))
4823 e1000_reinit_locked(adapter);
4824 else
4825 e1000_reset(adapter);
4826 break;
4827 case M88E1000_PHY_SPEC_CTRL:
4828 case M88E1000_EXT_PHY_SPEC_CTRL:
4829 if (e1000_phy_reset(hw))
4830 return -EIO;
4831 break;
4832 }
4833 } else {
4834 switch (data->reg_num) {
4835 case PHY_CTRL:
4836 if (mii_reg & MII_CR_POWER_DOWN)
4837 break;
4838 if (netif_running(adapter->netdev))
4839 e1000_reinit_locked(adapter);
4840 else
4841 e1000_reset(adapter);
4842 break;
4843 }
4844 }
4845 break;
4846 default:
4847 return -EOPNOTSUPP;
4848 }
4849 return E1000_SUCCESS;
4850 }
4851
e1000_pci_set_mwi(struct e1000_hw * hw)4852 void e1000_pci_set_mwi(struct e1000_hw *hw)
4853 {
4854 struct e1000_adapter *adapter = hw->back;
4855 int ret_val = pci_set_mwi(adapter->pdev);
4856
4857 if (ret_val)
4858 e_err(probe, "Error in setting MWI\n");
4859 }
4860
e1000_pci_clear_mwi(struct e1000_hw * hw)4861 void e1000_pci_clear_mwi(struct e1000_hw *hw)
4862 {
4863 struct e1000_adapter *adapter = hw->back;
4864
4865 pci_clear_mwi(adapter->pdev);
4866 }
4867
e1000_pcix_get_mmrbc(struct e1000_hw * hw)4868 int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
4869 {
4870 struct e1000_adapter *adapter = hw->back;
4871 return pcix_get_mmrbc(adapter->pdev);
4872 }
4873
e1000_pcix_set_mmrbc(struct e1000_hw * hw,int mmrbc)4874 void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
4875 {
4876 struct e1000_adapter *adapter = hw->back;
4877 pcix_set_mmrbc(adapter->pdev, mmrbc);
4878 }
4879
e1000_io_write(struct e1000_hw * hw,unsigned long port,u32 value)4880 void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
4881 {
4882 outl(value, port);
4883 }
4884
e1000_vlan_used(struct e1000_adapter * adapter)4885 static bool e1000_vlan_used(struct e1000_adapter *adapter)
4886 {
4887 u16 vid;
4888
4889 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4890 return true;
4891 return false;
4892 }
4893
__e1000_vlan_mode(struct e1000_adapter * adapter,netdev_features_t features)4894 static void __e1000_vlan_mode(struct e1000_adapter *adapter,
4895 netdev_features_t features)
4896 {
4897 struct e1000_hw *hw = &adapter->hw;
4898 u32 ctrl;
4899
4900 ctrl = er32(CTRL);
4901 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
4902 /* enable VLAN tag insert/strip */
4903 ctrl |= E1000_CTRL_VME;
4904 } else {
4905 /* disable VLAN tag insert/strip */
4906 ctrl &= ~E1000_CTRL_VME;
4907 }
4908 ew32(CTRL, ctrl);
4909 }
e1000_vlan_filter_on_off(struct e1000_adapter * adapter,bool filter_on)4910 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4911 bool filter_on)
4912 {
4913 struct e1000_hw *hw = &adapter->hw;
4914 u32 rctl;
4915
4916 if (!test_bit(__E1000_DOWN, &adapter->flags))
4917 e1000_irq_disable(adapter);
4918
4919 __e1000_vlan_mode(adapter, adapter->netdev->features);
4920 if (filter_on) {
4921 /* enable VLAN receive filtering */
4922 rctl = er32(RCTL);
4923 rctl &= ~E1000_RCTL_CFIEN;
4924 if (!(adapter->netdev->flags & IFF_PROMISC))
4925 rctl |= E1000_RCTL_VFE;
4926 ew32(RCTL, rctl);
4927 e1000_update_mng_vlan(adapter);
4928 } else {
4929 /* disable VLAN receive filtering */
4930 rctl = er32(RCTL);
4931 rctl &= ~E1000_RCTL_VFE;
4932 ew32(RCTL, rctl);
4933 }
4934
4935 if (!test_bit(__E1000_DOWN, &adapter->flags))
4936 e1000_irq_enable(adapter);
4937 }
4938
e1000_vlan_mode(struct net_device * netdev,netdev_features_t features)4939 static void e1000_vlan_mode(struct net_device *netdev,
4940 netdev_features_t features)
4941 {
4942 struct e1000_adapter *adapter = netdev_priv(netdev);
4943
4944 if (!test_bit(__E1000_DOWN, &adapter->flags))
4945 e1000_irq_disable(adapter);
4946
4947 __e1000_vlan_mode(adapter, features);
4948
4949 if (!test_bit(__E1000_DOWN, &adapter->flags))
4950 e1000_irq_enable(adapter);
4951 }
4952
e1000_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)4953 static int e1000_vlan_rx_add_vid(struct net_device *netdev,
4954 __be16 proto, u16 vid)
4955 {
4956 struct e1000_adapter *adapter = netdev_priv(netdev);
4957 struct e1000_hw *hw = &adapter->hw;
4958 u32 vfta, index;
4959
4960 if ((hw->mng_cookie.status &
4961 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4962 (vid == adapter->mng_vlan_id))
4963 return 0;
4964
4965 if (!e1000_vlan_used(adapter))
4966 e1000_vlan_filter_on_off(adapter, true);
4967
4968 /* add VID to filter table */
4969 index = (vid >> 5) & 0x7F;
4970 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4971 vfta |= (1 << (vid & 0x1F));
4972 e1000_write_vfta(hw, index, vfta);
4973
4974 set_bit(vid, adapter->active_vlans);
4975
4976 return 0;
4977 }
4978
e1000_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)4979 static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
4980 __be16 proto, u16 vid)
4981 {
4982 struct e1000_adapter *adapter = netdev_priv(netdev);
4983 struct e1000_hw *hw = &adapter->hw;
4984 u32 vfta, index;
4985
4986 if (!test_bit(__E1000_DOWN, &adapter->flags))
4987 e1000_irq_disable(adapter);
4988 if (!test_bit(__E1000_DOWN, &adapter->flags))
4989 e1000_irq_enable(adapter);
4990
4991 /* remove VID from filter table */
4992 index = (vid >> 5) & 0x7F;
4993 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4994 vfta &= ~(1 << (vid & 0x1F));
4995 e1000_write_vfta(hw, index, vfta);
4996
4997 clear_bit(vid, adapter->active_vlans);
4998
4999 if (!e1000_vlan_used(adapter))
5000 e1000_vlan_filter_on_off(adapter, false);
5001
5002 return 0;
5003 }
5004
e1000_restore_vlan(struct e1000_adapter * adapter)5005 static void e1000_restore_vlan(struct e1000_adapter *adapter)
5006 {
5007 u16 vid;
5008
5009 if (!e1000_vlan_used(adapter))
5010 return;
5011
5012 e1000_vlan_filter_on_off(adapter, true);
5013 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
5014 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
5015 }
5016
e1000_set_spd_dplx(struct e1000_adapter * adapter,u32 spd,u8 dplx)5017 int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
5018 {
5019 struct e1000_hw *hw = &adapter->hw;
5020
5021 hw->autoneg = 0;
5022
5023 /* Make sure dplx is at most 1 bit and lsb of speed is not set
5024 * for the switch() below to work
5025 */
5026 if ((spd & 1) || (dplx & ~1))
5027 goto err_inval;
5028
5029 /* Fiber NICs only allow 1000 gbps Full duplex */
5030 if ((hw->media_type == e1000_media_type_fiber) &&
5031 spd != SPEED_1000 &&
5032 dplx != DUPLEX_FULL)
5033 goto err_inval;
5034
5035 switch (spd + dplx) {
5036 case SPEED_10 + DUPLEX_HALF:
5037 hw->forced_speed_duplex = e1000_10_half;
5038 break;
5039 case SPEED_10 + DUPLEX_FULL:
5040 hw->forced_speed_duplex = e1000_10_full;
5041 break;
5042 case SPEED_100 + DUPLEX_HALF:
5043 hw->forced_speed_duplex = e1000_100_half;
5044 break;
5045 case SPEED_100 + DUPLEX_FULL:
5046 hw->forced_speed_duplex = e1000_100_full;
5047 break;
5048 case SPEED_1000 + DUPLEX_FULL:
5049 hw->autoneg = 1;
5050 hw->autoneg_advertised = ADVERTISE_1000_FULL;
5051 break;
5052 case SPEED_1000 + DUPLEX_HALF: /* not supported */
5053 default:
5054 goto err_inval;
5055 }
5056
5057 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
5058 hw->mdix = AUTO_ALL_MODES;
5059
5060 return 0;
5061
5062 err_inval:
5063 e_err(probe, "Unsupported Speed/Duplex configuration\n");
5064 return -EINVAL;
5065 }
5066
__e1000_shutdown(struct pci_dev * pdev,bool * enable_wake)5067 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
5068 {
5069 struct net_device *netdev = pci_get_drvdata(pdev);
5070 struct e1000_adapter *adapter = netdev_priv(netdev);
5071 struct e1000_hw *hw = &adapter->hw;
5072 u32 ctrl, ctrl_ext, rctl, status;
5073 u32 wufc = adapter->wol;
5074
5075 netif_device_detach(netdev);
5076
5077 if (netif_running(netdev)) {
5078 int count = E1000_CHECK_RESET_COUNT;
5079
5080 while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
5081 usleep_range(10000, 20000);
5082
5083 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
5084 rtnl_lock();
5085 e1000_down(adapter);
5086 rtnl_unlock();
5087 }
5088
5089 status = er32(STATUS);
5090 if (status & E1000_STATUS_LU)
5091 wufc &= ~E1000_WUFC_LNKC;
5092
5093 if (wufc) {
5094 e1000_setup_rctl(adapter);
5095 e1000_set_rx_mode(netdev);
5096
5097 rctl = er32(RCTL);
5098
5099 /* turn on all-multi mode if wake on multicast is enabled */
5100 if (wufc & E1000_WUFC_MC)
5101 rctl |= E1000_RCTL_MPE;
5102
5103 /* enable receives in the hardware */
5104 ew32(RCTL, rctl | E1000_RCTL_EN);
5105
5106 if (hw->mac_type >= e1000_82540) {
5107 ctrl = er32(CTRL);
5108 /* advertise wake from D3Cold */
5109 #define E1000_CTRL_ADVD3WUC 0x00100000
5110 /* phy power management enable */
5111 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5112 ctrl |= E1000_CTRL_ADVD3WUC |
5113 E1000_CTRL_EN_PHY_PWR_MGMT;
5114 ew32(CTRL, ctrl);
5115 }
5116
5117 if (hw->media_type == e1000_media_type_fiber ||
5118 hw->media_type == e1000_media_type_internal_serdes) {
5119 /* keep the laser running in D3 */
5120 ctrl_ext = er32(CTRL_EXT);
5121 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
5122 ew32(CTRL_EXT, ctrl_ext);
5123 }
5124
5125 ew32(WUC, E1000_WUC_PME_EN);
5126 ew32(WUFC, wufc);
5127 } else {
5128 ew32(WUC, 0);
5129 ew32(WUFC, 0);
5130 }
5131
5132 e1000_release_manageability(adapter);
5133
5134 *enable_wake = !!wufc;
5135
5136 /* make sure adapter isn't asleep if manageability is enabled */
5137 if (adapter->en_mng_pt)
5138 *enable_wake = true;
5139
5140 if (netif_running(netdev))
5141 e1000_free_irq(adapter);
5142
5143 if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5144 pci_disable_device(pdev);
5145
5146 return 0;
5147 }
5148
e1000_suspend(struct device * dev)5149 static int e1000_suspend(struct device *dev)
5150 {
5151 int retval;
5152 struct pci_dev *pdev = to_pci_dev(dev);
5153 bool wake;
5154
5155 retval = __e1000_shutdown(pdev, &wake);
5156 device_set_wakeup_enable(dev, wake);
5157
5158 return retval;
5159 }
5160
e1000_resume(struct device * dev)5161 static int e1000_resume(struct device *dev)
5162 {
5163 struct pci_dev *pdev = to_pci_dev(dev);
5164 struct net_device *netdev = pci_get_drvdata(pdev);
5165 struct e1000_adapter *adapter = netdev_priv(netdev);
5166 struct e1000_hw *hw = &adapter->hw;
5167 u32 err;
5168
5169 if (adapter->need_ioport)
5170 err = pci_enable_device(pdev);
5171 else
5172 err = pci_enable_device_mem(pdev);
5173 if (err) {
5174 pr_err("Cannot enable PCI device from suspend\n");
5175 return err;
5176 }
5177
5178 /* flush memory to make sure state is correct */
5179 smp_mb__before_atomic();
5180 clear_bit(__E1000_DISABLED, &adapter->flags);
5181 pci_set_master(pdev);
5182
5183 pci_enable_wake(pdev, PCI_D3hot, 0);
5184 pci_enable_wake(pdev, PCI_D3cold, 0);
5185
5186 if (netif_running(netdev)) {
5187 err = e1000_request_irq(adapter);
5188 if (err)
5189 return err;
5190 }
5191
5192 e1000_power_up_phy(adapter);
5193 e1000_reset(adapter);
5194 ew32(WUS, ~0);
5195
5196 e1000_init_manageability(adapter);
5197
5198 if (netif_running(netdev))
5199 e1000_up(adapter);
5200
5201 netif_device_attach(netdev);
5202
5203 return 0;
5204 }
5205
e1000_shutdown(struct pci_dev * pdev)5206 static void e1000_shutdown(struct pci_dev *pdev)
5207 {
5208 bool wake;
5209
5210 __e1000_shutdown(pdev, &wake);
5211
5212 if (system_state == SYSTEM_POWER_OFF) {
5213 pci_wake_from_d3(pdev, wake);
5214 pci_set_power_state(pdev, PCI_D3hot);
5215 }
5216 }
5217
5218 #ifdef CONFIG_NET_POLL_CONTROLLER
5219 /* Polling 'interrupt' - used by things like netconsole to send skbs
5220 * without having to re-enable interrupts. It's not called while
5221 * the interrupt routine is executing.
5222 */
e1000_netpoll(struct net_device * netdev)5223 static void e1000_netpoll(struct net_device *netdev)
5224 {
5225 struct e1000_adapter *adapter = netdev_priv(netdev);
5226
5227 if (disable_hardirq(adapter->pdev->irq))
5228 e1000_intr(adapter->pdev->irq, netdev);
5229 enable_irq(adapter->pdev->irq);
5230 }
5231 #endif
5232
5233 /**
5234 * e1000_io_error_detected - called when PCI error is detected
5235 * @pdev: Pointer to PCI device
5236 * @state: The current pci connection state
5237 *
5238 * This function is called after a PCI bus error affecting
5239 * this device has been detected.
5240 */
e1000_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)5241 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5242 pci_channel_state_t state)
5243 {
5244 struct net_device *netdev = pci_get_drvdata(pdev);
5245 struct e1000_adapter *adapter = netdev_priv(netdev);
5246
5247 rtnl_lock();
5248 netif_device_detach(netdev);
5249
5250 if (state == pci_channel_io_perm_failure) {
5251 rtnl_unlock();
5252 return PCI_ERS_RESULT_DISCONNECT;
5253 }
5254
5255 if (netif_running(netdev))
5256 e1000_down(adapter);
5257
5258 if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5259 pci_disable_device(pdev);
5260 rtnl_unlock();
5261
5262 /* Request a slot reset. */
5263 return PCI_ERS_RESULT_NEED_RESET;
5264 }
5265
5266 /**
5267 * e1000_io_slot_reset - called after the pci bus has been reset.
5268 * @pdev: Pointer to PCI device
5269 *
5270 * Restart the card from scratch, as if from a cold-boot. Implementation
5271 * resembles the first-half of the e1000_resume routine.
5272 */
e1000_io_slot_reset(struct pci_dev * pdev)5273 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5274 {
5275 struct net_device *netdev = pci_get_drvdata(pdev);
5276 struct e1000_adapter *adapter = netdev_priv(netdev);
5277 struct e1000_hw *hw = &adapter->hw;
5278 int err;
5279
5280 if (adapter->need_ioport)
5281 err = pci_enable_device(pdev);
5282 else
5283 err = pci_enable_device_mem(pdev);
5284 if (err) {
5285 pr_err("Cannot re-enable PCI device after reset.\n");
5286 return PCI_ERS_RESULT_DISCONNECT;
5287 }
5288
5289 /* flush memory to make sure state is correct */
5290 smp_mb__before_atomic();
5291 clear_bit(__E1000_DISABLED, &adapter->flags);
5292 pci_set_master(pdev);
5293
5294 pci_enable_wake(pdev, PCI_D3hot, 0);
5295 pci_enable_wake(pdev, PCI_D3cold, 0);
5296
5297 e1000_reset(adapter);
5298 ew32(WUS, ~0);
5299
5300 return PCI_ERS_RESULT_RECOVERED;
5301 }
5302
5303 /**
5304 * e1000_io_resume - called when traffic can start flowing again.
5305 * @pdev: Pointer to PCI device
5306 *
5307 * This callback is called when the error recovery driver tells us that
5308 * its OK to resume normal operation. Implementation resembles the
5309 * second-half of the e1000_resume routine.
5310 */
e1000_io_resume(struct pci_dev * pdev)5311 static void e1000_io_resume(struct pci_dev *pdev)
5312 {
5313 struct net_device *netdev = pci_get_drvdata(pdev);
5314 struct e1000_adapter *adapter = netdev_priv(netdev);
5315
5316 e1000_init_manageability(adapter);
5317
5318 if (netif_running(netdev)) {
5319 if (e1000_up(adapter)) {
5320 pr_info("can't bring device back up after reset\n");
5321 return;
5322 }
5323 }
5324
5325 netif_device_attach(netdev);
5326 }
5327
5328 /* e1000_main.c */
5329