1 /* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
2 /*
3 Written 1999-2000 by Donald Becker.
4
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
11
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
15 Annapolis MD 21403
16
17 Support and updates available at
18 http://www.scyld.com/network/sundance.html
19 [link no longer provides useful info -jgarzik]
20 Archives of the mailing list are still available at
21 https://www.beowulf.org/pipermail/netdrivers/
22
23 */
24
25 #define DRV_NAME "sundance"
26
27 /* The user-configurable values.
28 These may be modified when a driver module is loaded.*/
29 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
30 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
31 Typical is a 64 element hash table based on the Ethernet CRC. */
32 static const int multicast_filter_limit = 32;
33
34 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
35 Setting to > 1518 effectively disables this feature.
36 This chip can receive into offset buffers, so the Alpha does not
37 need a copy-align. */
38 static int rx_copybreak;
39 static int flowctrl=1;
40
41 /* media[] specifies the media type the NIC operates at.
42 autosense Autosensing active media.
43 10mbps_hd 10Mbps half duplex.
44 10mbps_fd 10Mbps full duplex.
45 100mbps_hd 100Mbps half duplex.
46 100mbps_fd 100Mbps full duplex.
47 0 Autosensing active media.
48 1 10Mbps half duplex.
49 2 10Mbps full duplex.
50 3 100Mbps half duplex.
51 4 100Mbps full duplex.
52 */
53 #define MAX_UNITS 8
54 static char *media[MAX_UNITS];
55
56
57 /* Operational parameters that are set at compile time. */
58
59 /* Keep the ring sizes a power of two for compile efficiency.
60 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
61 Making the Tx ring too large decreases the effectiveness of channel
62 bonding and packet priority, and more than 128 requires modifying the
63 Tx error recovery.
64 Large receive rings merely waste memory. */
65 #define TX_RING_SIZE 32
66 #define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
67 #define RX_RING_SIZE 64
68 #define RX_BUDGET 32
69 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
70 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
71
72 /* Operational parameters that usually are not changed. */
73 /* Time in jiffies before concluding the transmitter is hung. */
74 #define TX_TIMEOUT (4*HZ)
75 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
76
77 /* Include files, designed to support most kernel versions 2.0.0 and later. */
78 #include <linux/module.h>
79 #include <linux/kernel.h>
80 #include <linux/string.h>
81 #include <linux/timer.h>
82 #include <linux/errno.h>
83 #include <linux/ioport.h>
84 #include <linux/interrupt.h>
85 #include <linux/pci.h>
86 #include <linux/netdevice.h>
87 #include <linux/etherdevice.h>
88 #include <linux/skbuff.h>
89 #include <linux/init.h>
90 #include <linux/bitops.h>
91 #include <linux/uaccess.h>
92 #include <asm/processor.h> /* Processor type for cache alignment. */
93 #include <asm/io.h>
94 #include <linux/delay.h>
95 #include <linux/spinlock.h>
96 #include <linux/dma-mapping.h>
97 #include <linux/crc32.h>
98 #include <linux/ethtool.h>
99 #include <linux/mii.h>
100
101 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
102 MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
103 MODULE_LICENSE("GPL");
104
105 module_param(debug, int, 0);
106 module_param(rx_copybreak, int, 0);
107 module_param_array(media, charp, NULL, 0);
108 module_param(flowctrl, int, 0);
109 MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
110 MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
111 MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
112
113 /*
114 Theory of Operation
115
116 I. Board Compatibility
117
118 This driver is designed for the Sundance Technologies "Alta" ST201 chip.
119
120 II. Board-specific settings
121
122 III. Driver operation
123
124 IIIa. Ring buffers
125
126 This driver uses two statically allocated fixed-size descriptor lists
127 formed into rings by a branch from the final descriptor to the beginning of
128 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
129 Some chips explicitly use only 2^N sized rings, while others use a
130 'next descriptor' pointer that the driver forms into rings.
131
132 IIIb/c. Transmit/Receive Structure
133
134 This driver uses a zero-copy receive and transmit scheme.
135 The driver allocates full frame size skbuffs for the Rx ring buffers at
136 open() time and passes the skb->data field to the chip as receive data
137 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
138 a fresh skbuff is allocated and the frame is copied to the new skbuff.
139 When the incoming frame is larger, the skbuff is passed directly up the
140 protocol stack. Buffers consumed this way are replaced by newly allocated
141 skbuffs in a later phase of receives.
142
143 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
144 using a full-sized skbuff for small frames vs. the copying costs of larger
145 frames. New boards are typically used in generously configured machines
146 and the underfilled buffers have negligible impact compared to the benefit of
147 a single allocation size, so the default value of zero results in never
148 copying packets. When copying is done, the cost is usually mitigated by using
149 a combined copy/checksum routine. Copying also preloads the cache, which is
150 most useful with small frames.
151
152 A subtle aspect of the operation is that the IP header at offset 14 in an
153 ethernet frame isn't longword aligned for further processing.
154 Unaligned buffers are permitted by the Sundance hardware, so
155 frames are received into the skbuff at an offset of "+2", 16-byte aligning
156 the IP header.
157
158 IIId. Synchronization
159
160 The driver runs as two independent, single-threaded flows of control. One
161 is the send-packet routine, which enforces single-threaded use by the
162 dev->tbusy flag. The other thread is the interrupt handler, which is single
163 threaded by the hardware and interrupt handling software.
164
165 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
166 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
167 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
168 the 'lp->tx_full' flag.
169
170 The interrupt handler has exclusive control over the Rx ring and records stats
171 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
172 empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
173 clears both the tx_full and tbusy flags.
174
175 IV. Notes
176
177 IVb. References
178
179 The Sundance ST201 datasheet, preliminary version.
180 The Kendin KS8723 datasheet, preliminary version.
181 The ICplus IP100 datasheet, preliminary version.
182 http://www.scyld.com/expert/100mbps.html
183 http://www.scyld.com/expert/NWay.html
184
185 IVc. Errata
186
187 */
188
189 /* Work-around for Kendin chip bugs. */
190 #ifndef CONFIG_SUNDANCE_MMIO
191 #define USE_IO_OPS 1
192 #endif
193
194 static const struct pci_device_id sundance_pci_tbl[] = {
195 { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
196 { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
197 { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
198 { 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
199 { 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
200 { 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
201 { 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
202 { }
203 };
204 MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
205
206 enum {
207 netdev_io_size = 128
208 };
209
210 struct pci_id_info {
211 const char *name;
212 };
213 static const struct pci_id_info pci_id_tbl[] = {
214 {"D-Link DFE-550TX FAST Ethernet Adapter"},
215 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
216 {"D-Link DFE-580TX 4 port Server Adapter"},
217 {"D-Link DFE-530TXS FAST Ethernet Adapter"},
218 {"D-Link DL10050-based FAST Ethernet Adapter"},
219 {"Sundance Technology Alta"},
220 {"IC Plus Corporation IP100A FAST Ethernet Adapter"},
221 { } /* terminate list. */
222 };
223
224 /* This driver was written to use PCI memory space, however x86-oriented
225 hardware often uses I/O space accesses. */
226
227 /* Offsets to the device registers.
228 Unlike software-only systems, device drivers interact with complex hardware.
229 It's not useful to define symbolic names for every register bit in the
230 device. The name can only partially document the semantics and make
231 the driver longer and more difficult to read.
232 In general, only the important configuration values or bits changed
233 multiple times should be defined symbolically.
234 */
235 enum alta_offsets {
236 DMACtrl = 0x00,
237 TxListPtr = 0x04,
238 TxDMABurstThresh = 0x08,
239 TxDMAUrgentThresh = 0x09,
240 TxDMAPollPeriod = 0x0a,
241 RxDMAStatus = 0x0c,
242 RxListPtr = 0x10,
243 DebugCtrl0 = 0x1a,
244 DebugCtrl1 = 0x1c,
245 RxDMABurstThresh = 0x14,
246 RxDMAUrgentThresh = 0x15,
247 RxDMAPollPeriod = 0x16,
248 LEDCtrl = 0x1a,
249 ASICCtrl = 0x30,
250 EEData = 0x34,
251 EECtrl = 0x36,
252 FlashAddr = 0x40,
253 FlashData = 0x44,
254 WakeEvent = 0x45,
255 TxStatus = 0x46,
256 TxFrameId = 0x47,
257 DownCounter = 0x18,
258 IntrClear = 0x4a,
259 IntrEnable = 0x4c,
260 IntrStatus = 0x4e,
261 MACCtrl0 = 0x50,
262 MACCtrl1 = 0x52,
263 StationAddr = 0x54,
264 MaxFrameSize = 0x5A,
265 RxMode = 0x5c,
266 MIICtrl = 0x5e,
267 MulticastFilter0 = 0x60,
268 MulticastFilter1 = 0x64,
269 RxOctetsLow = 0x68,
270 RxOctetsHigh = 0x6a,
271 TxOctetsLow = 0x6c,
272 TxOctetsHigh = 0x6e,
273 TxFramesOK = 0x70,
274 RxFramesOK = 0x72,
275 StatsCarrierError = 0x74,
276 StatsLateColl = 0x75,
277 StatsMultiColl = 0x76,
278 StatsOneColl = 0x77,
279 StatsTxDefer = 0x78,
280 RxMissed = 0x79,
281 StatsTxXSDefer = 0x7a,
282 StatsTxAbort = 0x7b,
283 StatsBcastTx = 0x7c,
284 StatsBcastRx = 0x7d,
285 StatsMcastTx = 0x7e,
286 StatsMcastRx = 0x7f,
287 /* Aliased and bogus values! */
288 RxStatus = 0x0c,
289 };
290
291 #define ASIC_HI_WORD(x) ((x) + 2)
292
293 enum ASICCtrl_HiWord_bit {
294 GlobalReset = 0x0001,
295 RxReset = 0x0002,
296 TxReset = 0x0004,
297 DMAReset = 0x0008,
298 FIFOReset = 0x0010,
299 NetworkReset = 0x0020,
300 HostReset = 0x0040,
301 ResetBusy = 0x0400,
302 };
303
304 /* Bits in the interrupt status/mask registers. */
305 enum intr_status_bits {
306 IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
307 IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
308 IntrDrvRqst=0x0040,
309 StatsMax=0x0080, LinkChange=0x0100,
310 IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
311 };
312
313 /* Bits in the RxMode register. */
314 enum rx_mode_bits {
315 AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
316 AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
317 };
318 /* Bits in MACCtrl. */
319 enum mac_ctrl0_bits {
320 EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
321 EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
322 };
323 enum mac_ctrl1_bits {
324 StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080,
325 TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
326 RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
327 };
328
329 /* Bits in WakeEvent register. */
330 enum wake_event_bits {
331 WakePktEnable = 0x01,
332 MagicPktEnable = 0x02,
333 LinkEventEnable = 0x04,
334 WolEnable = 0x80,
335 };
336
337 /* The Rx and Tx buffer descriptors. */
338 /* Note that using only 32 bit fields simplifies conversion to big-endian
339 architectures. */
340 struct netdev_desc {
341 __le32 next_desc;
342 __le32 status;
343 struct desc_frag { __le32 addr, length; } frag;
344 };
345
346 /* Bits in netdev_desc.status */
347 enum desc_status_bits {
348 DescOwn=0x8000,
349 DescEndPacket=0x4000,
350 DescEndRing=0x2000,
351 LastFrag=0x80000000,
352 DescIntrOnTx=0x8000,
353 DescIntrOnDMADone=0x80000000,
354 DisableAlign = 0x00000001,
355 };
356
357 #define PRIV_ALIGN 15 /* Required alignment mask */
358 /* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
359 within the structure. */
360 #define MII_CNT 4
361 struct netdev_private {
362 /* Descriptor rings first for alignment. */
363 struct netdev_desc *rx_ring;
364 struct netdev_desc *tx_ring;
365 struct sk_buff* rx_skbuff[RX_RING_SIZE];
366 struct sk_buff* tx_skbuff[TX_RING_SIZE];
367 dma_addr_t tx_ring_dma;
368 dma_addr_t rx_ring_dma;
369 struct timer_list timer; /* Media monitoring timer. */
370 struct net_device *ndev; /* backpointer */
371 /* ethtool extra stats */
372 struct {
373 u64 tx_multiple_collisions;
374 u64 tx_single_collisions;
375 u64 tx_late_collisions;
376 u64 tx_deferred;
377 u64 tx_deferred_excessive;
378 u64 tx_aborted;
379 u64 tx_bcasts;
380 u64 rx_bcasts;
381 u64 tx_mcasts;
382 u64 rx_mcasts;
383 } xstats;
384 /* Frequently used values: keep some adjacent for cache effect. */
385 spinlock_t lock;
386 int msg_enable;
387 int chip_id;
388 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
389 unsigned int rx_buf_sz; /* Based on MTU+slack. */
390 struct netdev_desc *last_tx; /* Last Tx descriptor used. */
391 unsigned int cur_tx, dirty_tx;
392 /* These values are keep track of the transceiver/media in use. */
393 unsigned int flowctrl:1;
394 unsigned int default_port:4; /* Last dev->if_port value. */
395 unsigned int an_enable:1;
396 unsigned int speed;
397 unsigned int wol_enabled:1; /* Wake on LAN enabled */
398 struct tasklet_struct rx_tasklet;
399 struct tasklet_struct tx_tasklet;
400 int budget;
401 int cur_task;
402 /* Multicast and receive mode. */
403 spinlock_t mcastlock; /* SMP lock multicast updates. */
404 u16 mcast_filter[4];
405 /* MII transceiver section. */
406 struct mii_if_info mii_if;
407 int mii_preamble_required;
408 unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
409 struct pci_dev *pci_dev;
410 void __iomem *base;
411 spinlock_t statlock;
412 };
413
414 /* The station address location in the EEPROM. */
415 #define EEPROM_SA_OFFSET 0x10
416 #define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
417 IntrDrvRqst | IntrTxDone | StatsMax | \
418 LinkChange)
419
420 static int change_mtu(struct net_device *dev, int new_mtu);
421 static int eeprom_read(void __iomem *ioaddr, int location);
422 static int mdio_read(struct net_device *dev, int phy_id, int location);
423 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
424 static int mdio_wait_link(struct net_device *dev, int wait);
425 static int netdev_open(struct net_device *dev);
426 static void check_duplex(struct net_device *dev);
427 static void netdev_timer(struct timer_list *t);
428 static void tx_timeout(struct net_device *dev, unsigned int txqueue);
429 static void init_ring(struct net_device *dev);
430 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
431 static int reset_tx (struct net_device *dev);
432 static irqreturn_t intr_handler(int irq, void *dev_instance);
433 static void rx_poll(struct tasklet_struct *t);
434 static void tx_poll(struct tasklet_struct *t);
435 static void refill_rx (struct net_device *dev);
436 static void netdev_error(struct net_device *dev, int intr_status);
437 static void netdev_error(struct net_device *dev, int intr_status);
438 static void set_rx_mode(struct net_device *dev);
439 static int __set_mac_addr(struct net_device *dev);
440 static int sundance_set_mac_addr(struct net_device *dev, void *data);
441 static struct net_device_stats *get_stats(struct net_device *dev);
442 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
443 static int netdev_close(struct net_device *dev);
444 static const struct ethtool_ops ethtool_ops;
445
sundance_reset(struct net_device * dev,unsigned long reset_cmd)446 static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
447 {
448 struct netdev_private *np = netdev_priv(dev);
449 void __iomem *ioaddr = np->base + ASICCtrl;
450 int countdown;
451
452 /* ST201 documentation states ASICCtrl is a 32bit register */
453 iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
454 /* ST201 documentation states reset can take up to 1 ms */
455 countdown = 10 + 1;
456 while (ioread32 (ioaddr) & (ResetBusy << 16)) {
457 if (--countdown == 0) {
458 printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
459 break;
460 }
461 udelay(100);
462 }
463 }
464
465 #ifdef CONFIG_NET_POLL_CONTROLLER
sundance_poll_controller(struct net_device * dev)466 static void sundance_poll_controller(struct net_device *dev)
467 {
468 struct netdev_private *np = netdev_priv(dev);
469
470 disable_irq(np->pci_dev->irq);
471 intr_handler(np->pci_dev->irq, dev);
472 enable_irq(np->pci_dev->irq);
473 }
474 #endif
475
476 static const struct net_device_ops netdev_ops = {
477 .ndo_open = netdev_open,
478 .ndo_stop = netdev_close,
479 .ndo_start_xmit = start_tx,
480 .ndo_get_stats = get_stats,
481 .ndo_set_rx_mode = set_rx_mode,
482 .ndo_eth_ioctl = netdev_ioctl,
483 .ndo_tx_timeout = tx_timeout,
484 .ndo_change_mtu = change_mtu,
485 .ndo_set_mac_address = sundance_set_mac_addr,
486 .ndo_validate_addr = eth_validate_addr,
487 #ifdef CONFIG_NET_POLL_CONTROLLER
488 .ndo_poll_controller = sundance_poll_controller,
489 #endif
490 };
491
sundance_probe1(struct pci_dev * pdev,const struct pci_device_id * ent)492 static int sundance_probe1(struct pci_dev *pdev,
493 const struct pci_device_id *ent)
494 {
495 struct net_device *dev;
496 struct netdev_private *np;
497 static int card_idx;
498 int chip_idx = ent->driver_data;
499 int irq;
500 int i;
501 void __iomem *ioaddr;
502 u16 mii_ctl;
503 void *ring_space;
504 dma_addr_t ring_dma;
505 #ifdef USE_IO_OPS
506 int bar = 0;
507 #else
508 int bar = 1;
509 #endif
510 int phy, phy_end, phy_idx = 0;
511 __le16 addr[ETH_ALEN / 2];
512
513 if (pci_enable_device(pdev))
514 return -EIO;
515 pci_set_master(pdev);
516
517 irq = pdev->irq;
518
519 dev = alloc_etherdev(sizeof(*np));
520 if (!dev)
521 return -ENOMEM;
522 SET_NETDEV_DEV(dev, &pdev->dev);
523
524 if (pci_request_regions(pdev, DRV_NAME))
525 goto err_out_netdev;
526
527 ioaddr = pci_iomap(pdev, bar, netdev_io_size);
528 if (!ioaddr)
529 goto err_out_res;
530
531 for (i = 0; i < 3; i++)
532 addr[i] =
533 cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
534 eth_hw_addr_set(dev, (u8 *)addr);
535
536 np = netdev_priv(dev);
537 np->ndev = dev;
538 np->base = ioaddr;
539 np->pci_dev = pdev;
540 np->chip_id = chip_idx;
541 np->msg_enable = (1 << debug) - 1;
542 spin_lock_init(&np->lock);
543 spin_lock_init(&np->statlock);
544 tasklet_setup(&np->rx_tasklet, rx_poll);
545 tasklet_setup(&np->tx_tasklet, tx_poll);
546
547 ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE,
548 &ring_dma, GFP_KERNEL);
549 if (!ring_space)
550 goto err_out_cleardev;
551 np->tx_ring = (struct netdev_desc *)ring_space;
552 np->tx_ring_dma = ring_dma;
553
554 ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE,
555 &ring_dma, GFP_KERNEL);
556 if (!ring_space)
557 goto err_out_unmap_tx;
558 np->rx_ring = (struct netdev_desc *)ring_space;
559 np->rx_ring_dma = ring_dma;
560
561 np->mii_if.dev = dev;
562 np->mii_if.mdio_read = mdio_read;
563 np->mii_if.mdio_write = mdio_write;
564 np->mii_if.phy_id_mask = 0x1f;
565 np->mii_if.reg_num_mask = 0x1f;
566
567 /* The chip-specific entries in the device structure. */
568 dev->netdev_ops = &netdev_ops;
569 dev->ethtool_ops = ðtool_ops;
570 dev->watchdog_timeo = TX_TIMEOUT;
571
572 /* MTU range: 68 - 8191 */
573 dev->min_mtu = ETH_MIN_MTU;
574 dev->max_mtu = 8191;
575
576 pci_set_drvdata(pdev, dev);
577
578 i = register_netdev(dev);
579 if (i)
580 goto err_out_unmap_rx;
581
582 printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
583 dev->name, pci_id_tbl[chip_idx].name, ioaddr,
584 dev->dev_addr, irq);
585
586 np->phys[0] = 1; /* Default setting */
587 np->mii_preamble_required++;
588
589 /*
590 * It seems some phys doesn't deal well with address 0 being accessed
591 * first
592 */
593 if (sundance_pci_tbl[np->chip_id].device == 0x0200) {
594 phy = 0;
595 phy_end = 31;
596 } else {
597 phy = 1;
598 phy_end = 32; /* wraps to zero, due to 'phy & 0x1f' */
599 }
600 for (; phy <= phy_end && phy_idx < MII_CNT; phy++) {
601 int phyx = phy & 0x1f;
602 int mii_status = mdio_read(dev, phyx, MII_BMSR);
603 if (mii_status != 0xffff && mii_status != 0x0000) {
604 np->phys[phy_idx++] = phyx;
605 np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
606 if ((mii_status & 0x0040) == 0)
607 np->mii_preamble_required++;
608 printk(KERN_INFO "%s: MII PHY found at address %d, status "
609 "0x%4.4x advertising %4.4x.\n",
610 dev->name, phyx, mii_status, np->mii_if.advertising);
611 }
612 }
613 np->mii_preamble_required--;
614
615 if (phy_idx == 0) {
616 printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n",
617 dev->name, ioread32(ioaddr + ASICCtrl));
618 goto err_out_unregister;
619 }
620
621 np->mii_if.phy_id = np->phys[0];
622
623 /* Parse override configuration */
624 np->an_enable = 1;
625 if (card_idx < MAX_UNITS) {
626 if (media[card_idx] != NULL) {
627 np->an_enable = 0;
628 if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
629 strcmp (media[card_idx], "4") == 0) {
630 np->speed = 100;
631 np->mii_if.full_duplex = 1;
632 } else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
633 strcmp (media[card_idx], "3") == 0) {
634 np->speed = 100;
635 np->mii_if.full_duplex = 0;
636 } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
637 strcmp (media[card_idx], "2") == 0) {
638 np->speed = 10;
639 np->mii_if.full_duplex = 1;
640 } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
641 strcmp (media[card_idx], "1") == 0) {
642 np->speed = 10;
643 np->mii_if.full_duplex = 0;
644 } else {
645 np->an_enable = 1;
646 }
647 }
648 if (flowctrl == 1)
649 np->flowctrl = 1;
650 }
651
652 /* Fibre PHY? */
653 if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
654 /* Default 100Mbps Full */
655 if (np->an_enable) {
656 np->speed = 100;
657 np->mii_if.full_duplex = 1;
658 np->an_enable = 0;
659 }
660 }
661 /* Reset PHY */
662 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
663 mdelay (300);
664 /* If flow control enabled, we need to advertise it.*/
665 if (np->flowctrl)
666 mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
667 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
668 /* Force media type */
669 if (!np->an_enable) {
670 mii_ctl = 0;
671 mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
672 mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
673 mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
674 printk (KERN_INFO "Override speed=%d, %s duplex\n",
675 np->speed, np->mii_if.full_duplex ? "Full" : "Half");
676
677 }
678
679 /* Perhaps move the reset here? */
680 /* Reset the chip to erase previous misconfiguration. */
681 if (netif_msg_hw(np))
682 printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
683 sundance_reset(dev, 0x00ff << 16);
684 if (netif_msg_hw(np))
685 printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
686
687 card_idx++;
688 return 0;
689
690 err_out_unregister:
691 unregister_netdev(dev);
692 err_out_unmap_rx:
693 dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
694 np->rx_ring, np->rx_ring_dma);
695 err_out_unmap_tx:
696 dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
697 np->tx_ring, np->tx_ring_dma);
698 err_out_cleardev:
699 pci_iounmap(pdev, ioaddr);
700 err_out_res:
701 pci_release_regions(pdev);
702 err_out_netdev:
703 free_netdev (dev);
704 return -ENODEV;
705 }
706
change_mtu(struct net_device * dev,int new_mtu)707 static int change_mtu(struct net_device *dev, int new_mtu)
708 {
709 if (netif_running(dev))
710 return -EBUSY;
711 WRITE_ONCE(dev->mtu, new_mtu);
712 return 0;
713 }
714
715 #define eeprom_delay(ee_addr) ioread32(ee_addr)
716 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
eeprom_read(void __iomem * ioaddr,int location)717 static int eeprom_read(void __iomem *ioaddr, int location)
718 {
719 int boguscnt = 10000; /* Typical 1900 ticks. */
720 iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
721 do {
722 eeprom_delay(ioaddr + EECtrl);
723 if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
724 return ioread16(ioaddr + EEData);
725 }
726 } while (--boguscnt > 0);
727 return 0;
728 }
729
730 /* MII transceiver control section.
731 Read and write the MII registers using software-generated serial
732 MDIO protocol. See the MII specifications or DP83840A data sheet
733 for details.
734
735 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
736 met by back-to-back 33Mhz PCI cycles. */
737 #define mdio_delay() ioread8(mdio_addr)
738
739 enum mii_reg_bits {
740 MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
741 };
742 #define MDIO_EnbIn (0)
743 #define MDIO_WRITE0 (MDIO_EnbOutput)
744 #define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
745
746 /* Generate the preamble required for initial synchronization and
747 a few older transceivers. */
mdio_sync(void __iomem * mdio_addr)748 static void mdio_sync(void __iomem *mdio_addr)
749 {
750 int bits = 32;
751
752 /* Establish sync by sending at least 32 logic ones. */
753 while (--bits >= 0) {
754 iowrite8(MDIO_WRITE1, mdio_addr);
755 mdio_delay();
756 iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
757 mdio_delay();
758 }
759 }
760
mdio_read(struct net_device * dev,int phy_id,int location)761 static int mdio_read(struct net_device *dev, int phy_id, int location)
762 {
763 struct netdev_private *np = netdev_priv(dev);
764 void __iomem *mdio_addr = np->base + MIICtrl;
765 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
766 int i, retval = 0;
767
768 if (np->mii_preamble_required)
769 mdio_sync(mdio_addr);
770
771 /* Shift the read command bits out. */
772 for (i = 15; i >= 0; i--) {
773 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
774
775 iowrite8(dataval, mdio_addr);
776 mdio_delay();
777 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
778 mdio_delay();
779 }
780 /* Read the two transition, 16 data, and wire-idle bits. */
781 for (i = 19; i > 0; i--) {
782 iowrite8(MDIO_EnbIn, mdio_addr);
783 mdio_delay();
784 retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
785 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
786 mdio_delay();
787 }
788 return (retval>>1) & 0xffff;
789 }
790
mdio_write(struct net_device * dev,int phy_id,int location,int value)791 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
792 {
793 struct netdev_private *np = netdev_priv(dev);
794 void __iomem *mdio_addr = np->base + MIICtrl;
795 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
796 int i;
797
798 if (np->mii_preamble_required)
799 mdio_sync(mdio_addr);
800
801 /* Shift the command bits out. */
802 for (i = 31; i >= 0; i--) {
803 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
804
805 iowrite8(dataval, mdio_addr);
806 mdio_delay();
807 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
808 mdio_delay();
809 }
810 /* Clear out extra bits. */
811 for (i = 2; i > 0; i--) {
812 iowrite8(MDIO_EnbIn, mdio_addr);
813 mdio_delay();
814 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
815 mdio_delay();
816 }
817 }
818
mdio_wait_link(struct net_device * dev,int wait)819 static int mdio_wait_link(struct net_device *dev, int wait)
820 {
821 int bmsr;
822 int phy_id;
823 struct netdev_private *np;
824
825 np = netdev_priv(dev);
826 phy_id = np->phys[0];
827
828 do {
829 bmsr = mdio_read(dev, phy_id, MII_BMSR);
830 if (bmsr & 0x0004)
831 return 0;
832 mdelay(1);
833 } while (--wait > 0);
834 return -1;
835 }
836
netdev_open(struct net_device * dev)837 static int netdev_open(struct net_device *dev)
838 {
839 struct netdev_private *np = netdev_priv(dev);
840 void __iomem *ioaddr = np->base;
841 const int irq = np->pci_dev->irq;
842 unsigned long flags;
843 int i;
844
845 sundance_reset(dev, 0x00ff << 16);
846
847 i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
848 if (i)
849 return i;
850
851 if (netif_msg_ifup(np))
852 printk(KERN_DEBUG "%s: netdev_open() irq %d\n", dev->name, irq);
853
854 init_ring(dev);
855
856 iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
857 /* The Tx list pointer is written as packets are queued. */
858
859 /* Initialize other registers. */
860 __set_mac_addr(dev);
861 #if IS_ENABLED(CONFIG_VLAN_8021Q)
862 iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
863 #else
864 iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
865 #endif
866 if (dev->mtu > 2047)
867 iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
868
869 /* Configure the PCI bus bursts and FIFO thresholds. */
870
871 if (dev->if_port == 0)
872 dev->if_port = np->default_port;
873
874 spin_lock_init(&np->mcastlock);
875
876 set_rx_mode(dev);
877 iowrite16(0, ioaddr + IntrEnable);
878 iowrite16(0, ioaddr + DownCounter);
879 /* Set the chip to poll every N*320nsec. */
880 iowrite8(100, ioaddr + RxDMAPollPeriod);
881 iowrite8(127, ioaddr + TxDMAPollPeriod);
882 /* Fix DFE-580TX packet drop issue */
883 if (np->pci_dev->revision >= 0x14)
884 iowrite8(0x01, ioaddr + DebugCtrl1);
885 netif_start_queue(dev);
886
887 spin_lock_irqsave(&np->lock, flags);
888 reset_tx(dev);
889 spin_unlock_irqrestore(&np->lock, flags);
890
891 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
892
893 /* Disable Wol */
894 iowrite8(ioread8(ioaddr + WakeEvent) | 0x00, ioaddr + WakeEvent);
895 np->wol_enabled = 0;
896
897 if (netif_msg_ifup(np))
898 printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
899 "MAC Control %x, %4.4x %4.4x.\n",
900 dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
901 ioread32(ioaddr + MACCtrl0),
902 ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
903
904 /* Set the timer to check for link beat. */
905 timer_setup(&np->timer, netdev_timer, 0);
906 np->timer.expires = jiffies + 3*HZ;
907 add_timer(&np->timer);
908
909 /* Enable interrupts by setting the interrupt mask. */
910 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
911
912 return 0;
913 }
914
check_duplex(struct net_device * dev)915 static void check_duplex(struct net_device *dev)
916 {
917 struct netdev_private *np = netdev_priv(dev);
918 void __iomem *ioaddr = np->base;
919 int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
920 int negotiated = mii_lpa & np->mii_if.advertising;
921 int duplex;
922
923 /* Force media */
924 if (!np->an_enable || mii_lpa == 0xffff) {
925 if (np->mii_if.full_duplex)
926 iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
927 ioaddr + MACCtrl0);
928 return;
929 }
930
931 /* Autonegotiation */
932 duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
933 if (np->mii_if.full_duplex != duplex) {
934 np->mii_if.full_duplex = duplex;
935 if (netif_msg_link(np))
936 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
937 "negotiated capability %4.4x.\n", dev->name,
938 duplex ? "full" : "half", np->phys[0], negotiated);
939 iowrite16(ioread16(ioaddr + MACCtrl0) | (duplex ? 0x20 : 0), ioaddr + MACCtrl0);
940 }
941 }
942
netdev_timer(struct timer_list * t)943 static void netdev_timer(struct timer_list *t)
944 {
945 struct netdev_private *np = timer_container_of(np, t, timer);
946 struct net_device *dev = np->mii_if.dev;
947 void __iomem *ioaddr = np->base;
948 int next_tick = 10*HZ;
949
950 if (netif_msg_timer(np)) {
951 printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
952 "Tx %x Rx %x.\n",
953 dev->name, ioread16(ioaddr + IntrEnable),
954 ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
955 }
956 check_duplex(dev);
957 np->timer.expires = jiffies + next_tick;
958 add_timer(&np->timer);
959 }
960
tx_timeout(struct net_device * dev,unsigned int txqueue)961 static void tx_timeout(struct net_device *dev, unsigned int txqueue)
962 {
963 struct netdev_private *np = netdev_priv(dev);
964 void __iomem *ioaddr = np->base;
965 unsigned long flag;
966
967 netif_stop_queue(dev);
968 tasklet_disable_in_atomic(&np->tx_tasklet);
969 iowrite16(0, ioaddr + IntrEnable);
970 printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
971 "TxFrameId %2.2x,"
972 " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
973 ioread8(ioaddr + TxFrameId));
974
975 {
976 int i;
977 for (i=0; i<TX_RING_SIZE; i++) {
978 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
979 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
980 le32_to_cpu(np->tx_ring[i].next_desc),
981 le32_to_cpu(np->tx_ring[i].status),
982 (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
983 le32_to_cpu(np->tx_ring[i].frag.addr),
984 le32_to_cpu(np->tx_ring[i].frag.length));
985 }
986 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
987 ioread32(np->base + TxListPtr),
988 netif_queue_stopped(dev));
989 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
990 np->cur_tx, np->cur_tx % TX_RING_SIZE,
991 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
992 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
993 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
994 }
995 spin_lock_irqsave(&np->lock, flag);
996
997 /* Stop and restart the chip's Tx processes . */
998 reset_tx(dev);
999 spin_unlock_irqrestore(&np->lock, flag);
1000
1001 dev->if_port = 0;
1002
1003 netif_trans_update(dev); /* prevent tx timeout */
1004 dev->stats.tx_errors++;
1005 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1006 netif_wake_queue(dev);
1007 }
1008 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1009 tasklet_enable(&np->tx_tasklet);
1010 }
1011
1012
1013 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
init_ring(struct net_device * dev)1014 static void init_ring(struct net_device *dev)
1015 {
1016 struct netdev_private *np = netdev_priv(dev);
1017 int i;
1018
1019 np->cur_rx = np->cur_tx = 0;
1020 np->dirty_rx = np->dirty_tx = 0;
1021 np->cur_task = 0;
1022
1023 np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
1024
1025 /* Initialize all Rx descriptors. */
1026 for (i = 0; i < RX_RING_SIZE; i++) {
1027 np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
1028 ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
1029 np->rx_ring[i].status = 0;
1030 np->rx_ring[i].frag.length = 0;
1031 np->rx_skbuff[i] = NULL;
1032 }
1033
1034 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1035 for (i = 0; i < RX_RING_SIZE; i++) {
1036 dma_addr_t addr;
1037
1038 struct sk_buff *skb =
1039 netdev_alloc_skb(dev, np->rx_buf_sz + 2);
1040 np->rx_skbuff[i] = skb;
1041 if (skb == NULL)
1042 break;
1043 skb_reserve(skb, 2); /* 16 byte align the IP header. */
1044 addr = dma_map_single(&np->pci_dev->dev, skb->data,
1045 np->rx_buf_sz, DMA_FROM_DEVICE);
1046 if (dma_mapping_error(&np->pci_dev->dev, addr)) {
1047 dev_kfree_skb(skb);
1048 np->rx_skbuff[i] = NULL;
1049 break;
1050 }
1051 np->rx_ring[i].frag.addr = cpu_to_le32(addr);
1052 np->rx_ring[i].frag.length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1053 }
1054 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1055
1056 for (i = 0; i < TX_RING_SIZE; i++) {
1057 np->tx_skbuff[i] = NULL;
1058 np->tx_ring[i].status = 0;
1059 }
1060 }
1061
tx_poll(struct tasklet_struct * t)1062 static void tx_poll(struct tasklet_struct *t)
1063 {
1064 struct netdev_private *np = from_tasklet(np, t, tx_tasklet);
1065 unsigned head = np->cur_task % TX_RING_SIZE;
1066 struct netdev_desc *txdesc =
1067 &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
1068
1069 /* Chain the next pointer */
1070 for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1071 int entry = np->cur_task % TX_RING_SIZE;
1072 txdesc = &np->tx_ring[entry];
1073 if (np->last_tx) {
1074 np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1075 entry*sizeof(struct netdev_desc));
1076 }
1077 np->last_tx = txdesc;
1078 }
1079 /* Indicate the latest descriptor of tx ring */
1080 txdesc->status |= cpu_to_le32(DescIntrOnTx);
1081
1082 if (ioread32 (np->base + TxListPtr) == 0)
1083 iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1084 np->base + TxListPtr);
1085 }
1086
1087 static netdev_tx_t
start_tx(struct sk_buff * skb,struct net_device * dev)1088 start_tx (struct sk_buff *skb, struct net_device *dev)
1089 {
1090 struct netdev_private *np = netdev_priv(dev);
1091 struct netdev_desc *txdesc;
1092 dma_addr_t addr;
1093 unsigned entry;
1094
1095 /* Calculate the next Tx descriptor entry. */
1096 entry = np->cur_tx % TX_RING_SIZE;
1097 np->tx_skbuff[entry] = skb;
1098 txdesc = &np->tx_ring[entry];
1099
1100 addr = dma_map_single(&np->pci_dev->dev, skb->data, skb->len,
1101 DMA_TO_DEVICE);
1102 if (dma_mapping_error(&np->pci_dev->dev, addr))
1103 goto drop_frame;
1104
1105 txdesc->next_desc = 0;
1106 txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1107 txdesc->frag.addr = cpu_to_le32(addr);
1108 txdesc->frag.length = cpu_to_le32 (skb->len | LastFrag);
1109
1110 /* Increment cur_tx before tasklet_schedule() */
1111 np->cur_tx++;
1112 mb();
1113 /* Schedule a tx_poll() task */
1114 tasklet_schedule(&np->tx_tasklet);
1115
1116 /* On some architectures: explicitly flush cache lines here. */
1117 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 &&
1118 !netif_queue_stopped(dev)) {
1119 /* do nothing */
1120 } else {
1121 netif_stop_queue (dev);
1122 }
1123 if (netif_msg_tx_queued(np)) {
1124 printk (KERN_DEBUG
1125 "%s: Transmit frame #%d queued in slot %d.\n",
1126 dev->name, np->cur_tx, entry);
1127 }
1128 return NETDEV_TX_OK;
1129
1130 drop_frame:
1131 dev_kfree_skb_any(skb);
1132 np->tx_skbuff[entry] = NULL;
1133 dev->stats.tx_dropped++;
1134 return NETDEV_TX_OK;
1135 }
1136
1137 /* Reset hardware tx and free all of tx buffers */
1138 static int
reset_tx(struct net_device * dev)1139 reset_tx (struct net_device *dev)
1140 {
1141 struct netdev_private *np = netdev_priv(dev);
1142 void __iomem *ioaddr = np->base;
1143 struct sk_buff *skb;
1144 int i;
1145
1146 /* Reset tx logic, TxListPtr will be cleaned */
1147 iowrite16 (TxDisable, ioaddr + MACCtrl1);
1148 sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
1149
1150 /* free all tx skbuff */
1151 for (i = 0; i < TX_RING_SIZE; i++) {
1152 np->tx_ring[i].next_desc = 0;
1153
1154 skb = np->tx_skbuff[i];
1155 if (skb) {
1156 dma_unmap_single(&np->pci_dev->dev,
1157 le32_to_cpu(np->tx_ring[i].frag.addr),
1158 skb->len, DMA_TO_DEVICE);
1159 dev_kfree_skb_any(skb);
1160 np->tx_skbuff[i] = NULL;
1161 dev->stats.tx_dropped++;
1162 }
1163 }
1164 np->cur_tx = np->dirty_tx = 0;
1165 np->cur_task = 0;
1166
1167 np->last_tx = NULL;
1168 iowrite8(127, ioaddr + TxDMAPollPeriod);
1169
1170 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1171 return 0;
1172 }
1173
1174 /* The interrupt handler cleans up after the Tx thread,
1175 and schedule a Rx thread work */
intr_handler(int irq,void * dev_instance)1176 static irqreturn_t intr_handler(int irq, void *dev_instance)
1177 {
1178 struct net_device *dev = (struct net_device *)dev_instance;
1179 struct netdev_private *np = netdev_priv(dev);
1180 void __iomem *ioaddr = np->base;
1181 int hw_frame_id;
1182 int tx_cnt;
1183 int tx_status;
1184 int handled = 0;
1185 int i;
1186
1187 do {
1188 int intr_status = ioread16(ioaddr + IntrStatus);
1189 iowrite16(intr_status, ioaddr + IntrStatus);
1190
1191 if (netif_msg_intr(np))
1192 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1193 dev->name, intr_status);
1194
1195 if (!(intr_status & DEFAULT_INTR))
1196 break;
1197
1198 handled = 1;
1199
1200 if (intr_status & (IntrRxDMADone)) {
1201 iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1202 ioaddr + IntrEnable);
1203 if (np->budget < 0)
1204 np->budget = RX_BUDGET;
1205 tasklet_schedule(&np->rx_tasklet);
1206 }
1207 if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1208 tx_status = ioread16 (ioaddr + TxStatus);
1209 for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1210 if (netif_msg_tx_done(np))
1211 printk
1212 ("%s: Transmit status is %2.2x.\n",
1213 dev->name, tx_status);
1214 if (tx_status & 0x1e) {
1215 if (netif_msg_tx_err(np))
1216 printk("%s: Transmit error status %4.4x.\n",
1217 dev->name, tx_status);
1218 dev->stats.tx_errors++;
1219 if (tx_status & 0x10)
1220 dev->stats.tx_fifo_errors++;
1221 if (tx_status & 0x08)
1222 dev->stats.collisions++;
1223 if (tx_status & 0x04)
1224 dev->stats.tx_fifo_errors++;
1225 if (tx_status & 0x02)
1226 dev->stats.tx_window_errors++;
1227
1228 /*
1229 ** This reset has been verified on
1230 ** DFE-580TX boards ! phdm@macqel.be.
1231 */
1232 if (tx_status & 0x10) { /* TxUnderrun */
1233 /* Restart Tx FIFO and transmitter */
1234 sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
1235 /* No need to reset the Tx pointer here */
1236 }
1237 /* Restart the Tx. Need to make sure tx enabled */
1238 i = 10;
1239 do {
1240 iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
1241 if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
1242 break;
1243 mdelay(1);
1244 } while (--i);
1245 }
1246 /* Yup, this is a documentation bug. It cost me *hours*. */
1247 iowrite16 (0, ioaddr + TxStatus);
1248 if (tx_cnt < 0) {
1249 iowrite32(5000, ioaddr + DownCounter);
1250 break;
1251 }
1252 tx_status = ioread16 (ioaddr + TxStatus);
1253 }
1254 hw_frame_id = (tx_status >> 8) & 0xff;
1255 } else {
1256 hw_frame_id = ioread8(ioaddr + TxFrameId);
1257 }
1258
1259 if (np->pci_dev->revision >= 0x14) {
1260 spin_lock(&np->lock);
1261 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1262 int entry = np->dirty_tx % TX_RING_SIZE;
1263 struct sk_buff *skb;
1264 int sw_frame_id;
1265 sw_frame_id = (le32_to_cpu(
1266 np->tx_ring[entry].status) >> 2) & 0xff;
1267 if (sw_frame_id == hw_frame_id &&
1268 !(le32_to_cpu(np->tx_ring[entry].status)
1269 & 0x00010000))
1270 break;
1271 if (sw_frame_id == (hw_frame_id + 1) %
1272 TX_RING_SIZE)
1273 break;
1274 skb = np->tx_skbuff[entry];
1275 /* Free the original skb. */
1276 dma_unmap_single(&np->pci_dev->dev,
1277 le32_to_cpu(np->tx_ring[entry].frag.addr),
1278 skb->len, DMA_TO_DEVICE);
1279 dev_consume_skb_irq(np->tx_skbuff[entry]);
1280 np->tx_skbuff[entry] = NULL;
1281 np->tx_ring[entry].frag.addr = 0;
1282 np->tx_ring[entry].frag.length = 0;
1283 }
1284 spin_unlock(&np->lock);
1285 } else {
1286 spin_lock(&np->lock);
1287 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1288 int entry = np->dirty_tx % TX_RING_SIZE;
1289 struct sk_buff *skb;
1290 if (!(le32_to_cpu(np->tx_ring[entry].status)
1291 & 0x00010000))
1292 break;
1293 skb = np->tx_skbuff[entry];
1294 /* Free the original skb. */
1295 dma_unmap_single(&np->pci_dev->dev,
1296 le32_to_cpu(np->tx_ring[entry].frag.addr),
1297 skb->len, DMA_TO_DEVICE);
1298 dev_consume_skb_irq(np->tx_skbuff[entry]);
1299 np->tx_skbuff[entry] = NULL;
1300 np->tx_ring[entry].frag.addr = 0;
1301 np->tx_ring[entry].frag.length = 0;
1302 }
1303 spin_unlock(&np->lock);
1304 }
1305
1306 if (netif_queue_stopped(dev) &&
1307 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1308 /* The ring is no longer full, clear busy flag. */
1309 netif_wake_queue (dev);
1310 }
1311 /* Abnormal error summary/uncommon events handlers. */
1312 if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1313 netdev_error(dev, intr_status);
1314 } while (0);
1315 if (netif_msg_intr(np))
1316 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1317 dev->name, ioread16(ioaddr + IntrStatus));
1318 return IRQ_RETVAL(handled);
1319 }
1320
rx_poll(struct tasklet_struct * t)1321 static void rx_poll(struct tasklet_struct *t)
1322 {
1323 struct netdev_private *np = from_tasklet(np, t, rx_tasklet);
1324 struct net_device *dev = np->ndev;
1325 int entry = np->cur_rx % RX_RING_SIZE;
1326 int boguscnt = np->budget;
1327 void __iomem *ioaddr = np->base;
1328 int received = 0;
1329
1330 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1331 while (1) {
1332 struct netdev_desc *desc = &(np->rx_ring[entry]);
1333 u32 frame_status = le32_to_cpu(desc->status);
1334 int pkt_len;
1335
1336 if (--boguscnt < 0) {
1337 goto not_done;
1338 }
1339 if (!(frame_status & DescOwn))
1340 break;
1341 pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */
1342 if (netif_msg_rx_status(np))
1343 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
1344 frame_status);
1345 if (frame_status & 0x001f4000) {
1346 /* There was a error. */
1347 if (netif_msg_rx_err(np))
1348 printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
1349 frame_status);
1350 dev->stats.rx_errors++;
1351 if (frame_status & 0x00100000)
1352 dev->stats.rx_length_errors++;
1353 if (frame_status & 0x00010000)
1354 dev->stats.rx_fifo_errors++;
1355 if (frame_status & 0x00060000)
1356 dev->stats.rx_frame_errors++;
1357 if (frame_status & 0x00080000)
1358 dev->stats.rx_crc_errors++;
1359 if (frame_status & 0x00100000) {
1360 printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1361 " status %8.8x.\n",
1362 dev->name, frame_status);
1363 }
1364 } else {
1365 struct sk_buff *skb;
1366 #ifndef final_version
1367 if (netif_msg_rx_status(np))
1368 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
1369 ", bogus_cnt %d.\n",
1370 pkt_len, boguscnt);
1371 #endif
1372 /* Check if the packet is long enough to accept without copying
1373 to a minimally-sized skbuff. */
1374 if (pkt_len < rx_copybreak &&
1375 (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1376 skb_reserve(skb, 2); /* 16 byte align the IP header */
1377 dma_sync_single_for_cpu(&np->pci_dev->dev,
1378 le32_to_cpu(desc->frag.addr),
1379 np->rx_buf_sz, DMA_FROM_DEVICE);
1380 skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1381 dma_sync_single_for_device(&np->pci_dev->dev,
1382 le32_to_cpu(desc->frag.addr),
1383 np->rx_buf_sz, DMA_FROM_DEVICE);
1384 skb_put(skb, pkt_len);
1385 } else {
1386 dma_unmap_single(&np->pci_dev->dev,
1387 le32_to_cpu(desc->frag.addr),
1388 np->rx_buf_sz, DMA_FROM_DEVICE);
1389 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1390 np->rx_skbuff[entry] = NULL;
1391 }
1392 skb->protocol = eth_type_trans(skb, dev);
1393 /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1394 netif_rx(skb);
1395 }
1396 entry = (entry + 1) % RX_RING_SIZE;
1397 received++;
1398 }
1399 np->cur_rx = entry;
1400 refill_rx (dev);
1401 np->budget -= received;
1402 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1403 return;
1404
1405 not_done:
1406 np->cur_rx = entry;
1407 refill_rx (dev);
1408 if (!received)
1409 received = 1;
1410 np->budget -= received;
1411 if (np->budget <= 0)
1412 np->budget = RX_BUDGET;
1413 tasklet_schedule(&np->rx_tasklet);
1414 }
1415
refill_rx(struct net_device * dev)1416 static void refill_rx (struct net_device *dev)
1417 {
1418 struct netdev_private *np = netdev_priv(dev);
1419 int entry;
1420
1421 /* Refill the Rx ring buffers. */
1422 for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1423 np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1424 struct sk_buff *skb;
1425 dma_addr_t addr;
1426
1427 entry = np->dirty_rx % RX_RING_SIZE;
1428 if (np->rx_skbuff[entry] == NULL) {
1429 skb = netdev_alloc_skb(dev, np->rx_buf_sz + 2);
1430 np->rx_skbuff[entry] = skb;
1431 if (skb == NULL)
1432 break; /* Better luck next round. */
1433 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1434 addr = dma_map_single(&np->pci_dev->dev, skb->data,
1435 np->rx_buf_sz, DMA_FROM_DEVICE);
1436 if (dma_mapping_error(&np->pci_dev->dev, addr)) {
1437 dev_kfree_skb_irq(skb);
1438 np->rx_skbuff[entry] = NULL;
1439 break;
1440 }
1441
1442 np->rx_ring[entry].frag.addr = cpu_to_le32(addr);
1443 }
1444 /* Perhaps we need not reset this field. */
1445 np->rx_ring[entry].frag.length =
1446 cpu_to_le32(np->rx_buf_sz | LastFrag);
1447 np->rx_ring[entry].status = 0;
1448 }
1449 }
netdev_error(struct net_device * dev,int intr_status)1450 static void netdev_error(struct net_device *dev, int intr_status)
1451 {
1452 struct netdev_private *np = netdev_priv(dev);
1453 void __iomem *ioaddr = np->base;
1454 u16 mii_ctl, mii_advertise, mii_lpa;
1455 int speed;
1456
1457 if (intr_status & LinkChange) {
1458 if (mdio_wait_link(dev, 10) == 0) {
1459 printk(KERN_INFO "%s: Link up\n", dev->name);
1460 if (np->an_enable) {
1461 mii_advertise = mdio_read(dev, np->phys[0],
1462 MII_ADVERTISE);
1463 mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
1464 mii_advertise &= mii_lpa;
1465 printk(KERN_INFO "%s: Link changed: ",
1466 dev->name);
1467 if (mii_advertise & ADVERTISE_100FULL) {
1468 np->speed = 100;
1469 printk("100Mbps, full duplex\n");
1470 } else if (mii_advertise & ADVERTISE_100HALF) {
1471 np->speed = 100;
1472 printk("100Mbps, half duplex\n");
1473 } else if (mii_advertise & ADVERTISE_10FULL) {
1474 np->speed = 10;
1475 printk("10Mbps, full duplex\n");
1476 } else if (mii_advertise & ADVERTISE_10HALF) {
1477 np->speed = 10;
1478 printk("10Mbps, half duplex\n");
1479 } else
1480 printk("\n");
1481
1482 } else {
1483 mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR);
1484 speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1485 np->speed = speed;
1486 printk(KERN_INFO "%s: Link changed: %dMbps ,",
1487 dev->name, speed);
1488 printk("%s duplex.\n",
1489 (mii_ctl & BMCR_FULLDPLX) ?
1490 "full" : "half");
1491 }
1492 check_duplex(dev);
1493 if (np->flowctrl && np->mii_if.full_duplex) {
1494 iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1495 ioaddr + MulticastFilter1+2);
1496 iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1497 ioaddr + MACCtrl0);
1498 }
1499 netif_carrier_on(dev);
1500 } else {
1501 printk(KERN_INFO "%s: Link down\n", dev->name);
1502 netif_carrier_off(dev);
1503 }
1504 }
1505 if (intr_status & StatsMax) {
1506 get_stats(dev);
1507 }
1508 if (intr_status & IntrPCIErr) {
1509 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1510 dev->name, intr_status);
1511 /* We must do a global reset of DMA to continue. */
1512 }
1513 }
1514
get_stats(struct net_device * dev)1515 static struct net_device_stats *get_stats(struct net_device *dev)
1516 {
1517 struct netdev_private *np = netdev_priv(dev);
1518 void __iomem *ioaddr = np->base;
1519 unsigned long flags;
1520 u8 late_coll, single_coll, mult_coll;
1521
1522 spin_lock_irqsave(&np->statlock, flags);
1523 /* The chip only need report frame silently dropped. */
1524 dev->stats.rx_missed_errors += ioread8(ioaddr + RxMissed);
1525 dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1526 dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1527 dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1528
1529 mult_coll = ioread8(ioaddr + StatsMultiColl);
1530 np->xstats.tx_multiple_collisions += mult_coll;
1531 single_coll = ioread8(ioaddr + StatsOneColl);
1532 np->xstats.tx_single_collisions += single_coll;
1533 late_coll = ioread8(ioaddr + StatsLateColl);
1534 np->xstats.tx_late_collisions += late_coll;
1535 dev->stats.collisions += mult_coll
1536 + single_coll
1537 + late_coll;
1538
1539 np->xstats.tx_deferred += ioread8(ioaddr + StatsTxDefer);
1540 np->xstats.tx_deferred_excessive += ioread8(ioaddr + StatsTxXSDefer);
1541 np->xstats.tx_aborted += ioread8(ioaddr + StatsTxAbort);
1542 np->xstats.tx_bcasts += ioread8(ioaddr + StatsBcastTx);
1543 np->xstats.rx_bcasts += ioread8(ioaddr + StatsBcastRx);
1544 np->xstats.tx_mcasts += ioread8(ioaddr + StatsMcastTx);
1545 np->xstats.rx_mcasts += ioread8(ioaddr + StatsMcastRx);
1546
1547 dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1548 dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1549 dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1550 dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1551
1552 spin_unlock_irqrestore(&np->statlock, flags);
1553
1554 return &dev->stats;
1555 }
1556
set_rx_mode(struct net_device * dev)1557 static void set_rx_mode(struct net_device *dev)
1558 {
1559 struct netdev_private *np = netdev_priv(dev);
1560 void __iomem *ioaddr = np->base;
1561 u16 mc_filter[4]; /* Multicast hash filter */
1562 u32 rx_mode;
1563 int i;
1564
1565 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1566 memset(mc_filter, 0xff, sizeof(mc_filter));
1567 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1568 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1569 (dev->flags & IFF_ALLMULTI)) {
1570 /* Too many to match, or accept all multicasts. */
1571 memset(mc_filter, 0xff, sizeof(mc_filter));
1572 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1573 } else if (!netdev_mc_empty(dev)) {
1574 struct netdev_hw_addr *ha;
1575 int bit;
1576 int index;
1577 int crc;
1578 memset (mc_filter, 0, sizeof (mc_filter));
1579 netdev_for_each_mc_addr(ha, dev) {
1580 crc = ether_crc_le(ETH_ALEN, ha->addr);
1581 for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1582 if (crc & 0x80000000) index |= 1 << bit;
1583 mc_filter[index/16] |= (1 << (index % 16));
1584 }
1585 rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1586 } else {
1587 iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1588 return;
1589 }
1590 if (np->mii_if.full_duplex && np->flowctrl)
1591 mc_filter[3] |= 0x0200;
1592
1593 for (i = 0; i < 4; i++)
1594 iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1595 iowrite8(rx_mode, ioaddr + RxMode);
1596 }
1597
__set_mac_addr(struct net_device * dev)1598 static int __set_mac_addr(struct net_device *dev)
1599 {
1600 struct netdev_private *np = netdev_priv(dev);
1601 u16 addr16;
1602
1603 addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1604 iowrite16(addr16, np->base + StationAddr);
1605 addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1606 iowrite16(addr16, np->base + StationAddr+2);
1607 addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1608 iowrite16(addr16, np->base + StationAddr+4);
1609 return 0;
1610 }
1611
1612 /* Invoked with rtnl_lock held */
sundance_set_mac_addr(struct net_device * dev,void * data)1613 static int sundance_set_mac_addr(struct net_device *dev, void *data)
1614 {
1615 const struct sockaddr *addr = data;
1616
1617 if (!is_valid_ether_addr(addr->sa_data))
1618 return -EADDRNOTAVAIL;
1619 eth_hw_addr_set(dev, addr->sa_data);
1620 __set_mac_addr(dev);
1621
1622 return 0;
1623 }
1624
1625 static const struct {
1626 const char name[ETH_GSTRING_LEN];
1627 } sundance_stats[] = {
1628 { "tx_multiple_collisions" },
1629 { "tx_single_collisions" },
1630 { "tx_late_collisions" },
1631 { "tx_deferred" },
1632 { "tx_deferred_excessive" },
1633 { "tx_aborted" },
1634 { "tx_bcasts" },
1635 { "rx_bcasts" },
1636 { "tx_mcasts" },
1637 { "rx_mcasts" },
1638 };
1639
check_if_running(struct net_device * dev)1640 static int check_if_running(struct net_device *dev)
1641 {
1642 if (!netif_running(dev))
1643 return -EINVAL;
1644 return 0;
1645 }
1646
get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)1647 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1648 {
1649 struct netdev_private *np = netdev_priv(dev);
1650 strscpy(info->driver, DRV_NAME, sizeof(info->driver));
1651 strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1652 }
1653
get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)1654 static int get_link_ksettings(struct net_device *dev,
1655 struct ethtool_link_ksettings *cmd)
1656 {
1657 struct netdev_private *np = netdev_priv(dev);
1658 spin_lock_irq(&np->lock);
1659 mii_ethtool_get_link_ksettings(&np->mii_if, cmd);
1660 spin_unlock_irq(&np->lock);
1661 return 0;
1662 }
1663
set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)1664 static int set_link_ksettings(struct net_device *dev,
1665 const struct ethtool_link_ksettings *cmd)
1666 {
1667 struct netdev_private *np = netdev_priv(dev);
1668 int res;
1669 spin_lock_irq(&np->lock);
1670 res = mii_ethtool_set_link_ksettings(&np->mii_if, cmd);
1671 spin_unlock_irq(&np->lock);
1672 return res;
1673 }
1674
nway_reset(struct net_device * dev)1675 static int nway_reset(struct net_device *dev)
1676 {
1677 struct netdev_private *np = netdev_priv(dev);
1678 return mii_nway_restart(&np->mii_if);
1679 }
1680
get_link(struct net_device * dev)1681 static u32 get_link(struct net_device *dev)
1682 {
1683 struct netdev_private *np = netdev_priv(dev);
1684 return mii_link_ok(&np->mii_if);
1685 }
1686
get_msglevel(struct net_device * dev)1687 static u32 get_msglevel(struct net_device *dev)
1688 {
1689 struct netdev_private *np = netdev_priv(dev);
1690 return np->msg_enable;
1691 }
1692
set_msglevel(struct net_device * dev,u32 val)1693 static void set_msglevel(struct net_device *dev, u32 val)
1694 {
1695 struct netdev_private *np = netdev_priv(dev);
1696 np->msg_enable = val;
1697 }
1698
get_strings(struct net_device * dev,u32 stringset,u8 * data)1699 static void get_strings(struct net_device *dev, u32 stringset,
1700 u8 *data)
1701 {
1702 if (stringset == ETH_SS_STATS)
1703 memcpy(data, sundance_stats, sizeof(sundance_stats));
1704 }
1705
get_sset_count(struct net_device * dev,int sset)1706 static int get_sset_count(struct net_device *dev, int sset)
1707 {
1708 switch (sset) {
1709 case ETH_SS_STATS:
1710 return ARRAY_SIZE(sundance_stats);
1711 default:
1712 return -EOPNOTSUPP;
1713 }
1714 }
1715
get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)1716 static void get_ethtool_stats(struct net_device *dev,
1717 struct ethtool_stats *stats, u64 *data)
1718 {
1719 struct netdev_private *np = netdev_priv(dev);
1720 int i = 0;
1721
1722 get_stats(dev);
1723 data[i++] = np->xstats.tx_multiple_collisions;
1724 data[i++] = np->xstats.tx_single_collisions;
1725 data[i++] = np->xstats.tx_late_collisions;
1726 data[i++] = np->xstats.tx_deferred;
1727 data[i++] = np->xstats.tx_deferred_excessive;
1728 data[i++] = np->xstats.tx_aborted;
1729 data[i++] = np->xstats.tx_bcasts;
1730 data[i++] = np->xstats.rx_bcasts;
1731 data[i++] = np->xstats.tx_mcasts;
1732 data[i++] = np->xstats.rx_mcasts;
1733 }
1734
1735 #ifdef CONFIG_PM
1736
sundance_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)1737 static void sundance_get_wol(struct net_device *dev,
1738 struct ethtool_wolinfo *wol)
1739 {
1740 struct netdev_private *np = netdev_priv(dev);
1741 void __iomem *ioaddr = np->base;
1742 u8 wol_bits;
1743
1744 wol->wolopts = 0;
1745
1746 wol->supported = (WAKE_PHY | WAKE_MAGIC);
1747 if (!np->wol_enabled)
1748 return;
1749
1750 wol_bits = ioread8(ioaddr + WakeEvent);
1751 if (wol_bits & MagicPktEnable)
1752 wol->wolopts |= WAKE_MAGIC;
1753 if (wol_bits & LinkEventEnable)
1754 wol->wolopts |= WAKE_PHY;
1755 }
1756
sundance_set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)1757 static int sundance_set_wol(struct net_device *dev,
1758 struct ethtool_wolinfo *wol)
1759 {
1760 struct netdev_private *np = netdev_priv(dev);
1761 void __iomem *ioaddr = np->base;
1762 u8 wol_bits;
1763
1764 if (!device_can_wakeup(&np->pci_dev->dev))
1765 return -EOPNOTSUPP;
1766
1767 np->wol_enabled = !!(wol->wolopts);
1768 wol_bits = ioread8(ioaddr + WakeEvent);
1769 wol_bits &= ~(WakePktEnable | MagicPktEnable |
1770 LinkEventEnable | WolEnable);
1771
1772 if (np->wol_enabled) {
1773 if (wol->wolopts & WAKE_MAGIC)
1774 wol_bits |= (MagicPktEnable | WolEnable);
1775 if (wol->wolopts & WAKE_PHY)
1776 wol_bits |= (LinkEventEnable | WolEnable);
1777 }
1778 iowrite8(wol_bits, ioaddr + WakeEvent);
1779
1780 device_set_wakeup_enable(&np->pci_dev->dev, np->wol_enabled);
1781
1782 return 0;
1783 }
1784 #else
1785 #define sundance_get_wol NULL
1786 #define sundance_set_wol NULL
1787 #endif /* CONFIG_PM */
1788
1789 static const struct ethtool_ops ethtool_ops = {
1790 .begin = check_if_running,
1791 .get_drvinfo = get_drvinfo,
1792 .nway_reset = nway_reset,
1793 .get_link = get_link,
1794 .get_wol = sundance_get_wol,
1795 .set_wol = sundance_set_wol,
1796 .get_msglevel = get_msglevel,
1797 .set_msglevel = set_msglevel,
1798 .get_strings = get_strings,
1799 .get_sset_count = get_sset_count,
1800 .get_ethtool_stats = get_ethtool_stats,
1801 .get_link_ksettings = get_link_ksettings,
1802 .set_link_ksettings = set_link_ksettings,
1803 };
1804
netdev_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)1805 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1806 {
1807 struct netdev_private *np = netdev_priv(dev);
1808 int rc;
1809
1810 if (!netif_running(dev))
1811 return -EINVAL;
1812
1813 spin_lock_irq(&np->lock);
1814 rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1815 spin_unlock_irq(&np->lock);
1816
1817 return rc;
1818 }
1819
netdev_close(struct net_device * dev)1820 static int netdev_close(struct net_device *dev)
1821 {
1822 struct netdev_private *np = netdev_priv(dev);
1823 void __iomem *ioaddr = np->base;
1824 struct sk_buff *skb;
1825 int i;
1826
1827 /* Wait and kill tasklet */
1828 tasklet_kill(&np->rx_tasklet);
1829 tasklet_kill(&np->tx_tasklet);
1830 np->cur_tx = 0;
1831 np->dirty_tx = 0;
1832 np->cur_task = 0;
1833 np->last_tx = NULL;
1834
1835 netif_stop_queue(dev);
1836
1837 if (netif_msg_ifdown(np)) {
1838 printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1839 "Rx %4.4x Int %2.2x.\n",
1840 dev->name, ioread8(ioaddr + TxStatus),
1841 ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1842 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1843 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1844 }
1845
1846 /* Disable interrupts by clearing the interrupt mask. */
1847 iowrite16(0x0000, ioaddr + IntrEnable);
1848
1849 /* Disable Rx and Tx DMA for safely release resource */
1850 iowrite32(0x500, ioaddr + DMACtrl);
1851
1852 /* Stop the chip's Tx and Rx processes. */
1853 iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1854
1855 for (i = 2000; i > 0; i--) {
1856 if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
1857 break;
1858 mdelay(1);
1859 }
1860
1861 iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
1862 ioaddr + ASIC_HI_WORD(ASICCtrl));
1863
1864 for (i = 2000; i > 0; i--) {
1865 if ((ioread16(ioaddr + ASIC_HI_WORD(ASICCtrl)) & ResetBusy) == 0)
1866 break;
1867 mdelay(1);
1868 }
1869
1870 #ifdef __i386__
1871 if (netif_msg_hw(np)) {
1872 printk(KERN_DEBUG " Tx ring at %8.8x:\n",
1873 (int)(np->tx_ring_dma));
1874 for (i = 0; i < TX_RING_SIZE; i++)
1875 printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
1876 i, np->tx_ring[i].status, np->tx_ring[i].frag.addr,
1877 np->tx_ring[i].frag.length);
1878 printk(KERN_DEBUG " Rx ring %8.8x:\n",
1879 (int)(np->rx_ring_dma));
1880 for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1881 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1882 i, np->rx_ring[i].status, np->rx_ring[i].frag.addr,
1883 np->rx_ring[i].frag.length);
1884 }
1885 }
1886 #endif /* __i386__ debugging only */
1887
1888 free_irq(np->pci_dev->irq, dev);
1889
1890 timer_delete_sync(&np->timer);
1891
1892 /* Free all the skbuffs in the Rx queue. */
1893 for (i = 0; i < RX_RING_SIZE; i++) {
1894 np->rx_ring[i].status = 0;
1895 skb = np->rx_skbuff[i];
1896 if (skb) {
1897 dma_unmap_single(&np->pci_dev->dev,
1898 le32_to_cpu(np->rx_ring[i].frag.addr),
1899 np->rx_buf_sz, DMA_FROM_DEVICE);
1900 dev_kfree_skb(skb);
1901 np->rx_skbuff[i] = NULL;
1902 }
1903 np->rx_ring[i].frag.addr = cpu_to_le32(0xBADF00D0); /* poison */
1904 }
1905 for (i = 0; i < TX_RING_SIZE; i++) {
1906 np->tx_ring[i].next_desc = 0;
1907 skb = np->tx_skbuff[i];
1908 if (skb) {
1909 dma_unmap_single(&np->pci_dev->dev,
1910 le32_to_cpu(np->tx_ring[i].frag.addr),
1911 skb->len, DMA_TO_DEVICE);
1912 dev_kfree_skb(skb);
1913 np->tx_skbuff[i] = NULL;
1914 }
1915 }
1916
1917 return 0;
1918 }
1919
sundance_remove1(struct pci_dev * pdev)1920 static void sundance_remove1(struct pci_dev *pdev)
1921 {
1922 struct net_device *dev = pci_get_drvdata(pdev);
1923
1924 if (dev) {
1925 struct netdev_private *np = netdev_priv(dev);
1926 unregister_netdev(dev);
1927 dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
1928 np->rx_ring, np->rx_ring_dma);
1929 dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
1930 np->tx_ring, np->tx_ring_dma);
1931 pci_iounmap(pdev, np->base);
1932 pci_release_regions(pdev);
1933 free_netdev(dev);
1934 }
1935 }
1936
sundance_suspend(struct device * dev_d)1937 static int __maybe_unused sundance_suspend(struct device *dev_d)
1938 {
1939 struct net_device *dev = dev_get_drvdata(dev_d);
1940 struct netdev_private *np = netdev_priv(dev);
1941 void __iomem *ioaddr = np->base;
1942
1943 if (!netif_running(dev))
1944 return 0;
1945
1946 netdev_close(dev);
1947 netif_device_detach(dev);
1948
1949 if (np->wol_enabled) {
1950 iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1951 iowrite16(RxEnable, ioaddr + MACCtrl1);
1952 }
1953
1954 device_set_wakeup_enable(dev_d, np->wol_enabled);
1955
1956 return 0;
1957 }
1958
sundance_resume(struct device * dev_d)1959 static int __maybe_unused sundance_resume(struct device *dev_d)
1960 {
1961 struct net_device *dev = dev_get_drvdata(dev_d);
1962 int err = 0;
1963
1964 if (!netif_running(dev))
1965 return 0;
1966
1967 err = netdev_open(dev);
1968 if (err) {
1969 printk(KERN_ERR "%s: Can't resume interface!\n",
1970 dev->name);
1971 goto out;
1972 }
1973
1974 netif_device_attach(dev);
1975
1976 out:
1977 return err;
1978 }
1979
1980 static SIMPLE_DEV_PM_OPS(sundance_pm_ops, sundance_suspend, sundance_resume);
1981
1982 static struct pci_driver sundance_driver = {
1983 .name = DRV_NAME,
1984 .id_table = sundance_pci_tbl,
1985 .probe = sundance_probe1,
1986 .remove = sundance_remove1,
1987 .driver.pm = &sundance_pm_ops,
1988 };
1989
1990 module_pci_driver(sundance_driver);
1991