1 /* tulip_core.c: A DEC 21x4x-family ethernet driver for Linux.
2
3 Copyright 2000,2001 The Linux Kernel Team
4 Written/copyright 1994-2001 by Donald Becker.
5
6 This software may be used and distributed according to the terms
7 of the GNU General Public License, incorporated herein by reference.
8
9 Please submit bugs to http://bugzilla.kernel.org/ .
10 */
11
12 #define pr_fmt(fmt) "tulip: " fmt
13
14 #define DRV_NAME "tulip"
15
16 #include <linux/module.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include "tulip.h"
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/etherdevice.h>
23 #include <linux/delay.h>
24 #include <linux/mii.h>
25 #include <linux/crc32.h>
26 #include <linux/unaligned.h>
27 #include <linux/uaccess.h>
28
29 #ifdef CONFIG_SPARC
30 #include <asm/prom.h>
31 #endif
32
33 /* A few user-configurable values. */
34
35 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
36 static unsigned int max_interrupt_work = 25;
37
38 #define MAX_UNITS 8
39 /* Used to pass the full-duplex flag, etc. */
40 static int full_duplex[MAX_UNITS];
41 static int options[MAX_UNITS];
42 static int mtu[MAX_UNITS]; /* Jumbo MTU for interfaces. */
43
44 /* The possible media types that can be set in options[] are: */
45 const char * const medianame[32] = {
46 "10baseT", "10base2", "AUI", "100baseTx",
47 "10baseT-FDX", "100baseTx-FDX", "100baseT4", "100baseFx",
48 "100baseFx-FDX", "MII 10baseT", "MII 10baseT-FDX", "MII",
49 "10baseT(forced)", "MII 100baseTx", "MII 100baseTx-FDX", "MII 100baseT4",
50 "MII 100baseFx-HDX", "MII 100baseFx-FDX", "Home-PNA 1Mbps", "Invalid-19",
51 "","","","", "","","","", "","","","Transceiver reset",
52 };
53
54 /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
55 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
56 defined(CONFIG_SPARC) || defined(__ia64__) || \
57 defined(__sh__) || defined(__mips__)
58 static int rx_copybreak = 1518;
59 #else
60 static int rx_copybreak = 100;
61 #endif
62
63 /*
64 Set the bus performance register.
65 Typical: Set 16 longword cache alignment, no burst limit.
66 Cache alignment bits 15:14 Burst length 13:8
67 0000 No alignment 0x00000000 unlimited 0800 8 longwords
68 4000 8 longwords 0100 1 longword 1000 16 longwords
69 8000 16 longwords 0200 2 longwords 2000 32 longwords
70 C000 32 longwords 0400 4 longwords
71 Warning: many older 486 systems are broken and require setting 0x00A04800
72 8 longword cache alignment, 8 longword burst.
73 ToDo: Non-Intel setting could be better.
74 */
75
76 #if defined(__alpha__) || defined(__ia64__)
77 static int csr0 = 0x01A00000 | 0xE000;
78 #elif defined(__i386__) || defined(__powerpc__) || defined(__x86_64__)
79 static int csr0 = 0x01A00000 | 0x8000;
80 #elif defined(CONFIG_SPARC) || defined(__hppa__)
81 /* The UltraSparc PCI controllers will disconnect at every 64-byte
82 * crossing anyways so it makes no sense to tell Tulip to burst
83 * any more than that.
84 */
85 static int csr0 = 0x01A00000 | 0x9000;
86 #elif defined(__arm__) || defined(__sh__)
87 static int csr0 = 0x01A00000 | 0x4800;
88 #elif defined(__mips__)
89 static int csr0 = 0x00200000 | 0x4000;
90 #else
91 static int csr0;
92 #endif
93
94 /* Operational parameters that usually are not changed. */
95 /* Time in jiffies before concluding the transmitter is hung. */
96 #define TX_TIMEOUT (4*HZ)
97
98
99 MODULE_AUTHOR("The Linux Kernel Team");
100 MODULE_DESCRIPTION("Digital 21*4* Tulip ethernet driver");
101 MODULE_LICENSE("GPL");
102 module_param(tulip_debug, int, 0);
103 module_param(max_interrupt_work, int, 0);
104 module_param(rx_copybreak, int, 0);
105 module_param(csr0, int, 0);
106 module_param_array(options, int, NULL, 0);
107 module_param_array(full_duplex, int, NULL, 0);
108
109 #ifdef TULIP_DEBUG
110 int tulip_debug = TULIP_DEBUG;
111 #else
112 int tulip_debug = 1;
113 #endif
114
tulip_timer(struct timer_list * t)115 static void tulip_timer(struct timer_list *t)
116 {
117 struct tulip_private *tp = from_timer(tp, t, timer);
118 struct net_device *dev = tp->dev;
119
120 if (netif_running(dev))
121 schedule_work(&tp->media_work);
122 }
123
124 /*
125 * This table use during operation for capabilities and media timer.
126 *
127 * It is indexed via the values in 'enum chips'
128 */
129
130 const struct tulip_chip_table tulip_tbl[] = {
131 { }, /* placeholder for array, slot unused currently */
132 { }, /* placeholder for array, slot unused currently */
133
134 /* DC21140 */
135 { "Digital DS21140 Tulip", 128, 0x0001ebef,
136 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_PCI_MWI, tulip_timer,
137 tulip_media_task },
138
139 /* DC21142, DC21143 */
140 { "Digital DS21142/43 Tulip", 128, 0x0801fbff,
141 HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI | HAS_NWAY
142 | HAS_INTR_MITIGATION | HAS_PCI_MWI, tulip_timer, t21142_media_task },
143
144 /* LC82C168 */
145 { "Lite-On 82c168 PNIC", 256, 0x0001fbef,
146 HAS_MII | HAS_PNICNWAY, pnic_timer, },
147
148 /* MX98713 */
149 { "Macronix 98713 PMAC", 128, 0x0001ebef,
150 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, },
151
152 /* MX98715 */
153 { "Macronix 98715 PMAC", 256, 0x0001ebef,
154 HAS_MEDIA_TABLE, mxic_timer, },
155
156 /* MX98725 */
157 { "Macronix 98725 PMAC", 256, 0x0001ebef,
158 HAS_MEDIA_TABLE, mxic_timer, },
159
160 /* AX88140 */
161 { "ASIX AX88140", 128, 0x0001fbff,
162 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | MC_HASH_ONLY
163 | IS_ASIX, tulip_timer, tulip_media_task },
164
165 /* PNIC2 */
166 { "Lite-On PNIC-II", 256, 0x0801fbff,
167 HAS_MII | HAS_NWAY | HAS_8023X | HAS_PCI_MWI, pnic2_timer, },
168
169 /* COMET */
170 { "ADMtek Comet", 256, 0x0001abef,
171 HAS_MII | MC_HASH_ONLY | COMET_MAC_ADDR, comet_timer, },
172
173 /* COMPEX9881 */
174 { "Compex 9881 PMAC", 128, 0x0001ebef,
175 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, },
176
177 /* I21145 */
178 { "Intel DS21145 Tulip", 128, 0x0801fbff,
179 HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI
180 | HAS_NWAY | HAS_PCI_MWI, tulip_timer, tulip_media_task },
181
182 /* DM910X */
183 #ifdef CONFIG_TULIP_DM910X
184 { "Davicom DM9102/DM9102A", 128, 0x0001ebef,
185 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI,
186 tulip_timer, tulip_media_task },
187 #else
188 { NULL },
189 #endif
190
191 /* RS7112 */
192 { "Conexant LANfinity", 256, 0x0001ebef,
193 HAS_MII | HAS_ACPI, tulip_timer, tulip_media_task },
194
195 };
196
197
198 static const struct pci_device_id tulip_pci_tbl[] = {
199 { 0x1011, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21140 },
200 { 0x1011, 0x0019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21143 },
201 { 0x11AD, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, LC82C168 },
202 { 0x10d9, 0x0512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98713 },
203 { 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
204 /* { 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98725 },*/
205 { 0x125B, 0x1400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AX88140 },
206 { 0x11AD, 0xc115, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PNIC2 },
207 { 0x1317, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
208 { 0x1317, 0x0985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
209 { 0x1317, 0x1985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
210 { 0x1317, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
211 { 0x13D1, 0xAB02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
212 { 0x13D1, 0xAB03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
213 { 0x13D1, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
214 { 0x104A, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
215 { 0x104A, 0x2774, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
216 { 0x1259, 0xa120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
217 { 0x11F6, 0x9881, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMPEX9881 },
218 { 0x8086, 0x0039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, I21145 },
219 #ifdef CONFIG_TULIP_DM910X
220 { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
221 { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
222 #endif
223 { 0x1113, 0x1216, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
224 { 0x1113, 0x1217, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
225 { 0x1113, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
226 { 0x1186, 0x1541, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
227 { 0x1186, 0x1561, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
228 { 0x1186, 0x1591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
229 { 0x14f1, 0x1803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CONEXANT },
230 { 0x1626, 0x8410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
231 { 0x1737, 0xAB09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
232 { 0x1737, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
233 { 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
234 { 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */
235 { 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */
236 { 0x1414, 0x0001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Microsoft MN-120 */
237 { 0x1414, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
238 { } /* terminate list */
239 };
240 MODULE_DEVICE_TABLE(pci, tulip_pci_tbl);
241
242
243 /* A full-duplex map for media types. */
244 const char tulip_media_cap[32] =
245 {0,0,0,16, 3,19,16,24, 27,4,7,5, 0,20,23,20, 28,31,0,0, };
246
247 static void tulip_tx_timeout(struct net_device *dev, unsigned int txqueue);
248 static void tulip_init_ring(struct net_device *dev);
249 static void tulip_free_ring(struct net_device *dev);
250 static netdev_tx_t tulip_start_xmit(struct sk_buff *skb,
251 struct net_device *dev);
252 static int tulip_open(struct net_device *dev);
253 static int tulip_close(struct net_device *dev);
254 static void tulip_up(struct net_device *dev);
255 static void tulip_down(struct net_device *dev);
256 static struct net_device_stats *tulip_get_stats(struct net_device *dev);
257 static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
258 static void set_rx_mode(struct net_device *dev);
259 static void tulip_set_wolopts(struct pci_dev *pdev, u32 wolopts);
260 #ifdef CONFIG_NET_POLL_CONTROLLER
261 static void poll_tulip(struct net_device *dev);
262 #endif
263
tulip_set_power_state(struct tulip_private * tp,int sleep,int snooze)264 static void tulip_set_power_state (struct tulip_private *tp,
265 int sleep, int snooze)
266 {
267 if (tp->flags & HAS_ACPI) {
268 u32 tmp, newtmp;
269 pci_read_config_dword (tp->pdev, CFDD, &tmp);
270 newtmp = tmp & ~(CFDD_Sleep | CFDD_Snooze);
271 if (sleep)
272 newtmp |= CFDD_Sleep;
273 else if (snooze)
274 newtmp |= CFDD_Snooze;
275 if (tmp != newtmp)
276 pci_write_config_dword (tp->pdev, CFDD, newtmp);
277 }
278
279 }
280
281
tulip_up(struct net_device * dev)282 static void tulip_up(struct net_device *dev)
283 {
284 struct tulip_private *tp = netdev_priv(dev);
285 void __iomem *ioaddr = tp->base_addr;
286 int next_tick = 3*HZ;
287 u32 reg;
288 int i;
289
290 #ifdef CONFIG_TULIP_NAPI
291 napi_enable(&tp->napi);
292 #endif
293
294 /* Wake the chip from sleep/snooze mode. */
295 tulip_set_power_state (tp, 0, 0);
296
297 /* Disable all WOL events */
298 pci_enable_wake(tp->pdev, PCI_D3hot, 0);
299 pci_enable_wake(tp->pdev, PCI_D3cold, 0);
300 tulip_set_wolopts(tp->pdev, 0);
301
302 /* On some chip revs we must set the MII/SYM port before the reset!? */
303 if (tp->mii_cnt || (tp->mtable && tp->mtable->has_mii))
304 iowrite32(0x00040000, ioaddr + CSR6);
305
306 /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
307 iowrite32(0x00000001, ioaddr + CSR0);
308 pci_read_config_dword(tp->pdev, PCI_COMMAND, ®); /* flush write */
309 udelay(100);
310
311 /* Deassert reset.
312 Wait the specified 50 PCI cycles after a reset by initializing
313 Tx and Rx queues and the address filter list. */
314 iowrite32(tp->csr0, ioaddr + CSR0);
315 pci_read_config_dword(tp->pdev, PCI_COMMAND, ®); /* flush write */
316 udelay(100);
317
318 if (tulip_debug > 1)
319 netdev_dbg(dev, "tulip_up(), irq==%d\n", tp->pdev->irq);
320
321 iowrite32(tp->rx_ring_dma, ioaddr + CSR3);
322 iowrite32(tp->tx_ring_dma, ioaddr + CSR4);
323 tp->cur_rx = tp->cur_tx = 0;
324 tp->dirty_rx = tp->dirty_tx = 0;
325
326 if (tp->flags & MC_HASH_ONLY) {
327 u32 addr_low = get_unaligned_le32(dev->dev_addr);
328 u32 addr_high = get_unaligned_le16(dev->dev_addr + 4);
329 if (tp->chip_id == AX88140) {
330 iowrite32(0, ioaddr + CSR13);
331 iowrite32(addr_low, ioaddr + CSR14);
332 iowrite32(1, ioaddr + CSR13);
333 iowrite32(addr_high, ioaddr + CSR14);
334 } else if (tp->flags & COMET_MAC_ADDR) {
335 iowrite32(addr_low, ioaddr + 0xA4);
336 iowrite32(addr_high, ioaddr + 0xA8);
337 iowrite32(0, ioaddr + CSR27);
338 iowrite32(0, ioaddr + CSR28);
339 }
340 } else {
341 /* This is set_rx_mode(), but without starting the transmitter. */
342 const u16 *eaddrs = (const u16 *)dev->dev_addr;
343 u16 *setup_frm = &tp->setup_frame[15*6];
344 dma_addr_t mapping;
345
346 /* 21140 bug: you must add the broadcast address. */
347 memset(tp->setup_frame, 0xff, sizeof(tp->setup_frame));
348 /* Fill the final entry of the table with our physical address. */
349 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
350 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
351 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
352
353 mapping = dma_map_single(&tp->pdev->dev, tp->setup_frame,
354 sizeof(tp->setup_frame),
355 DMA_TO_DEVICE);
356 tp->tx_buffers[tp->cur_tx].skb = NULL;
357 tp->tx_buffers[tp->cur_tx].mapping = mapping;
358
359 /* Put the setup frame on the Tx list. */
360 tp->tx_ring[tp->cur_tx].length = cpu_to_le32(0x08000000 | 192);
361 tp->tx_ring[tp->cur_tx].buffer1 = cpu_to_le32(mapping);
362 tp->tx_ring[tp->cur_tx].status = cpu_to_le32(DescOwned);
363
364 tp->cur_tx++;
365 }
366
367 tp->saved_if_port = dev->if_port;
368 if (dev->if_port == 0)
369 dev->if_port = tp->default_port;
370
371 /* Allow selecting a default media. */
372 i = 0;
373 if (tp->mtable == NULL)
374 goto media_picked;
375 if (dev->if_port) {
376 int looking_for = tulip_media_cap[dev->if_port] & MediaIsMII ? 11 :
377 (dev->if_port == 12 ? 0 : dev->if_port);
378 for (i = 0; i < tp->mtable->leafcount; i++)
379 if (tp->mtable->mleaf[i].media == looking_for) {
380 dev_info(&dev->dev,
381 "Using user-specified media %s\n",
382 medianame[dev->if_port]);
383 goto media_picked;
384 }
385 }
386 if ((tp->mtable->defaultmedia & 0x0800) == 0) {
387 int looking_for = tp->mtable->defaultmedia & MEDIA_MASK;
388 for (i = 0; i < tp->mtable->leafcount; i++)
389 if (tp->mtable->mleaf[i].media == looking_for) {
390 dev_info(&dev->dev,
391 "Using EEPROM-set media %s\n",
392 medianame[looking_for]);
393 goto media_picked;
394 }
395 }
396 /* Start sensing first non-full-duplex media. */
397 for (i = tp->mtable->leafcount - 1;
398 (tulip_media_cap[tp->mtable->mleaf[i].media] & MediaAlwaysFD) && i > 0; i--)
399 ;
400 media_picked:
401
402 tp->csr6 = 0;
403 tp->cur_index = i;
404 tp->nwayset = 0;
405
406 if (dev->if_port) {
407 if (tp->chip_id == DC21143 &&
408 (tulip_media_cap[dev->if_port] & MediaIsMII)) {
409 /* We must reset the media CSRs when we force-select MII mode. */
410 iowrite32(0x0000, ioaddr + CSR13);
411 iowrite32(0x0000, ioaddr + CSR14);
412 iowrite32(0x0008, ioaddr + CSR15);
413 }
414 tulip_select_media(dev, 1);
415 } else if (tp->chip_id == DC21142) {
416 if (tp->mii_cnt) {
417 tulip_select_media(dev, 1);
418 if (tulip_debug > 1)
419 dev_info(&dev->dev,
420 "Using MII transceiver %d, status %04x\n",
421 tp->phys[0],
422 tulip_mdio_read(dev, tp->phys[0], 1));
423 iowrite32(csr6_mask_defstate, ioaddr + CSR6);
424 tp->csr6 = csr6_mask_hdcap;
425 dev->if_port = 11;
426 iowrite32(0x0000, ioaddr + CSR13);
427 iowrite32(0x0000, ioaddr + CSR14);
428 } else
429 t21142_start_nway(dev);
430 } else if (tp->chip_id == PNIC2) {
431 /* for initial startup advertise 10/100 Full and Half */
432 tp->sym_advertise = 0x01E0;
433 /* enable autonegotiate end interrupt */
434 iowrite32(ioread32(ioaddr+CSR5)| 0x00008010, ioaddr + CSR5);
435 iowrite32(ioread32(ioaddr+CSR7)| 0x00008010, ioaddr + CSR7);
436 pnic2_start_nway(dev);
437 } else if (tp->chip_id == LC82C168 && ! tp->medialock) {
438 if (tp->mii_cnt) {
439 dev->if_port = 11;
440 tp->csr6 = 0x814C0000 | (tp->full_duplex ? 0x0200 : 0);
441 iowrite32(0x0001, ioaddr + CSR15);
442 } else if (ioread32(ioaddr + CSR5) & TPLnkPass)
443 pnic_do_nway(dev);
444 else {
445 /* Start with 10mbps to do autonegotiation. */
446 iowrite32(0x32, ioaddr + CSR12);
447 tp->csr6 = 0x00420000;
448 iowrite32(0x0001B078, ioaddr + 0xB8);
449 iowrite32(0x0201B078, ioaddr + 0xB8);
450 next_tick = 1*HZ;
451 }
452 } else if ((tp->chip_id == MX98713 || tp->chip_id == COMPEX9881) &&
453 ! tp->medialock) {
454 dev->if_port = 0;
455 tp->csr6 = 0x01880000 | (tp->full_duplex ? 0x0200 : 0);
456 iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
457 } else if (tp->chip_id == MX98715 || tp->chip_id == MX98725) {
458 /* Provided by BOLO, Macronix - 12/10/1998. */
459 dev->if_port = 0;
460 tp->csr6 = 0x01a80200;
461 iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
462 iowrite32(0x11000 | ioread16(ioaddr + 0xa0), ioaddr + 0xa0);
463 } else if (tp->chip_id == COMET || tp->chip_id == CONEXANT) {
464 /* Enable automatic Tx underrun recovery. */
465 iowrite32(ioread32(ioaddr + 0x88) | 1, ioaddr + 0x88);
466 dev->if_port = tp->mii_cnt ? 11 : 0;
467 tp->csr6 = 0x00040000;
468 } else if (tp->chip_id == AX88140) {
469 tp->csr6 = tp->mii_cnt ? 0x00040100 : 0x00000100;
470 } else
471 tulip_select_media(dev, 1);
472
473 /* Start the chip's Tx to process setup frame. */
474 tulip_stop_rxtx(tp);
475 barrier();
476 udelay(5);
477 iowrite32(tp->csr6 | TxOn, ioaddr + CSR6);
478
479 /* Enable interrupts by setting the interrupt mask. */
480 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5);
481 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
482 tulip_start_rxtx(tp);
483 iowrite32(0, ioaddr + CSR2); /* Rx poll demand */
484
485 if (tulip_debug > 2) {
486 netdev_dbg(dev, "Done tulip_up(), CSR0 %08x, CSR5 %08x CSR6 %08x\n",
487 ioread32(ioaddr + CSR0),
488 ioread32(ioaddr + CSR5),
489 ioread32(ioaddr + CSR6));
490 }
491
492 /* Set the timer to switch to check for link beat and perhaps switch
493 to an alternate media type. */
494 tp->timer.expires = RUN_AT(next_tick);
495 add_timer(&tp->timer);
496 #ifdef CONFIG_TULIP_NAPI
497 timer_setup(&tp->oom_timer, oom_timer, 0);
498 #endif
499 }
500
501 static int
tulip_open(struct net_device * dev)502 tulip_open(struct net_device *dev)
503 {
504 struct tulip_private *tp = netdev_priv(dev);
505 int retval;
506
507 tulip_init_ring (dev);
508
509 retval = request_irq(tp->pdev->irq, tulip_interrupt, IRQF_SHARED,
510 dev->name, dev);
511 if (retval)
512 goto free_ring;
513
514 tulip_up (dev);
515
516 netif_start_queue (dev);
517
518 return 0;
519
520 free_ring:
521 tulip_free_ring (dev);
522 return retval;
523 }
524
525
tulip_tx_timeout(struct net_device * dev,unsigned int txqueue)526 static void tulip_tx_timeout(struct net_device *dev, unsigned int txqueue)
527 {
528 struct tulip_private *tp = netdev_priv(dev);
529 void __iomem *ioaddr = tp->base_addr;
530 unsigned long flags;
531
532 spin_lock_irqsave (&tp->lock, flags);
533
534 if (tulip_media_cap[dev->if_port] & MediaIsMII) {
535 /* Do nothing -- the media monitor should handle this. */
536 if (tulip_debug > 1)
537 dev_warn(&dev->dev,
538 "Transmit timeout using MII device\n");
539 } else if (tp->chip_id == DC21140 || tp->chip_id == DC21142 ||
540 tp->chip_id == MX98713 || tp->chip_id == COMPEX9881 ||
541 tp->chip_id == DM910X) {
542 dev_warn(&dev->dev,
543 "21140 transmit timed out, status %08x, SIA %08x %08x %08x %08x, resetting...\n",
544 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12),
545 ioread32(ioaddr + CSR13), ioread32(ioaddr + CSR14),
546 ioread32(ioaddr + CSR15));
547 tp->timeout_recovery = 1;
548 schedule_work(&tp->media_work);
549 goto out_unlock;
550 } else if (tp->chip_id == PNIC2) {
551 dev_warn(&dev->dev,
552 "PNIC2 transmit timed out, status %08x, CSR6/7 %08x / %08x CSR12 %08x, resetting...\n",
553 (int)ioread32(ioaddr + CSR5),
554 (int)ioread32(ioaddr + CSR6),
555 (int)ioread32(ioaddr + CSR7),
556 (int)ioread32(ioaddr + CSR12));
557 } else {
558 dev_warn(&dev->dev,
559 "Transmit timed out, status %08x, CSR12 %08x, resetting...\n",
560 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12));
561 dev->if_port = 0;
562 }
563
564 #if defined(way_too_many_messages)
565 if (tulip_debug > 3) {
566 int i;
567 for (i = 0; i < RX_RING_SIZE; i++) {
568 u8 *buf = (u8 *)(tp->rx_ring[i].buffer1);
569 int j;
570 printk(KERN_DEBUG
571 "%2d: %08x %08x %08x %08x %02x %02x %02x\n",
572 i,
573 (unsigned int)tp->rx_ring[i].status,
574 (unsigned int)tp->rx_ring[i].length,
575 (unsigned int)tp->rx_ring[i].buffer1,
576 (unsigned int)tp->rx_ring[i].buffer2,
577 buf[0], buf[1], buf[2]);
578 for (j = 0; ((j < 1600) && buf[j] != 0xee); j++)
579 if (j < 100)
580 pr_cont(" %02x", buf[j]);
581 pr_cont(" j=%d\n", j);
582 }
583 printk(KERN_DEBUG " Rx ring %p: ", tp->rx_ring);
584 for (i = 0; i < RX_RING_SIZE; i++)
585 pr_cont(" %08x", (unsigned int)tp->rx_ring[i].status);
586 printk(KERN_DEBUG " Tx ring %p: ", tp->tx_ring);
587 for (i = 0; i < TX_RING_SIZE; i++)
588 pr_cont(" %08x", (unsigned int)tp->tx_ring[i].status);
589 pr_cont("\n");
590 }
591 #endif
592
593 tulip_tx_timeout_complete(tp, ioaddr);
594
595 out_unlock:
596 spin_unlock_irqrestore (&tp->lock, flags);
597 netif_trans_update(dev); /* prevent tx timeout */
598 netif_wake_queue (dev);
599 }
600
601
602 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
tulip_init_ring(struct net_device * dev)603 static void tulip_init_ring(struct net_device *dev)
604 {
605 struct tulip_private *tp = netdev_priv(dev);
606 int i;
607
608 tp->susp_rx = 0;
609 tp->ttimer = 0;
610 tp->nir = 0;
611
612 for (i = 0; i < RX_RING_SIZE; i++) {
613 tp->rx_ring[i].status = 0x00000000;
614 tp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ);
615 tp->rx_ring[i].buffer2 = cpu_to_le32(tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * (i + 1));
616 tp->rx_buffers[i].skb = NULL;
617 tp->rx_buffers[i].mapping = 0;
618 }
619 /* Mark the last entry as wrapping the ring. */
620 tp->rx_ring[i-1].length = cpu_to_le32(PKT_BUF_SZ | DESC_RING_WRAP);
621 tp->rx_ring[i-1].buffer2 = cpu_to_le32(tp->rx_ring_dma);
622
623 for (i = 0; i < RX_RING_SIZE; i++) {
624 dma_addr_t mapping;
625
626 /* Note the receive buffer must be longword aligned.
627 netdev_alloc_skb() provides 16 byte alignment. But do *not*
628 use skb_reserve() to align the IP header! */
629 struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
630 tp->rx_buffers[i].skb = skb;
631 if (skb == NULL)
632 break;
633 mapping = dma_map_single(&tp->pdev->dev, skb->data,
634 PKT_BUF_SZ, DMA_FROM_DEVICE);
635 tp->rx_buffers[i].mapping = mapping;
636 tp->rx_ring[i].status = cpu_to_le32(DescOwned); /* Owned by Tulip chip */
637 tp->rx_ring[i].buffer1 = cpu_to_le32(mapping);
638 }
639 tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
640
641 /* The Tx buffer descriptor is filled in as needed, but we
642 do need to clear the ownership bit. */
643 for (i = 0; i < TX_RING_SIZE; i++) {
644 tp->tx_buffers[i].skb = NULL;
645 tp->tx_buffers[i].mapping = 0;
646 tp->tx_ring[i].status = 0x00000000;
647 tp->tx_ring[i].buffer2 = cpu_to_le32(tp->tx_ring_dma + sizeof(struct tulip_tx_desc) * (i + 1));
648 }
649 tp->tx_ring[i-1].buffer2 = cpu_to_le32(tp->tx_ring_dma);
650 }
651
652 static netdev_tx_t
tulip_start_xmit(struct sk_buff * skb,struct net_device * dev)653 tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
654 {
655 struct tulip_private *tp = netdev_priv(dev);
656 int entry;
657 u32 flag;
658 dma_addr_t mapping;
659 unsigned long flags;
660
661 spin_lock_irqsave(&tp->lock, flags);
662
663 /* Calculate the next Tx descriptor entry. */
664 entry = tp->cur_tx % TX_RING_SIZE;
665
666 tp->tx_buffers[entry].skb = skb;
667 mapping = dma_map_single(&tp->pdev->dev, skb->data, skb->len,
668 DMA_TO_DEVICE);
669 tp->tx_buffers[entry].mapping = mapping;
670 tp->tx_ring[entry].buffer1 = cpu_to_le32(mapping);
671
672 if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
673 flag = 0x60000000; /* No interrupt */
674 } else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) {
675 flag = 0xe0000000; /* Tx-done intr. */
676 } else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) {
677 flag = 0x60000000; /* No Tx-done intr. */
678 } else { /* Leave room for set_rx_mode() to fill entries. */
679 flag = 0xe0000000; /* Tx-done intr. */
680 netif_stop_queue(dev);
681 }
682 if (entry == TX_RING_SIZE-1)
683 flag = 0xe0000000 | DESC_RING_WRAP;
684
685 tp->tx_ring[entry].length = cpu_to_le32(skb->len | flag);
686 /* if we were using Transmit Automatic Polling, we would need a
687 * wmb() here. */
688 tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
689 wmb();
690
691 tp->cur_tx++;
692
693 /* Trigger an immediate transmit demand. */
694 iowrite32(0, tp->base_addr + CSR1);
695
696 spin_unlock_irqrestore(&tp->lock, flags);
697
698 return NETDEV_TX_OK;
699 }
700
tulip_clean_tx_ring(struct tulip_private * tp)701 static void tulip_clean_tx_ring(struct tulip_private *tp)
702 {
703 unsigned int dirty_tx;
704
705 for (dirty_tx = tp->dirty_tx ; tp->cur_tx - dirty_tx > 0;
706 dirty_tx++) {
707 int entry = dirty_tx % TX_RING_SIZE;
708 int status = le32_to_cpu(tp->tx_ring[entry].status);
709
710 if (status < 0) {
711 tp->dev->stats.tx_errors++; /* It wasn't Txed */
712 tp->tx_ring[entry].status = 0;
713 }
714
715 /* Check for Tx filter setup frames. */
716 if (tp->tx_buffers[entry].skb == NULL) {
717 /* test because dummy frames not mapped */
718 if (tp->tx_buffers[entry].mapping)
719 dma_unmap_single(&tp->pdev->dev,
720 tp->tx_buffers[entry].mapping,
721 sizeof(tp->setup_frame),
722 DMA_TO_DEVICE);
723 continue;
724 }
725
726 dma_unmap_single(&tp->pdev->dev,
727 tp->tx_buffers[entry].mapping,
728 tp->tx_buffers[entry].skb->len,
729 DMA_TO_DEVICE);
730
731 /* Free the original skb. */
732 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
733 tp->tx_buffers[entry].skb = NULL;
734 tp->tx_buffers[entry].mapping = 0;
735 }
736 }
737
tulip_down(struct net_device * dev)738 static void tulip_down (struct net_device *dev)
739 {
740 struct tulip_private *tp = netdev_priv(dev);
741 void __iomem *ioaddr = tp->base_addr;
742 unsigned long flags;
743
744 cancel_work_sync(&tp->media_work);
745
746 #ifdef CONFIG_TULIP_NAPI
747 napi_disable(&tp->napi);
748 #endif
749
750 del_timer_sync (&tp->timer);
751 #ifdef CONFIG_TULIP_NAPI
752 del_timer_sync (&tp->oom_timer);
753 #endif
754 spin_lock_irqsave (&tp->lock, flags);
755
756 /* Disable interrupts by clearing the interrupt mask. */
757 iowrite32 (0x00000000, ioaddr + CSR7);
758
759 /* Stop the Tx and Rx processes. */
760 tulip_stop_rxtx(tp);
761
762 /* prepare receive buffers */
763 tulip_refill_rx(dev);
764
765 /* release any unconsumed transmit buffers */
766 tulip_clean_tx_ring(tp);
767
768 if (ioread32(ioaddr + CSR6) != 0xffffffff)
769 dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
770
771 spin_unlock_irqrestore (&tp->lock, flags);
772
773 timer_setup(&tp->timer, tulip_tbl[tp->chip_id].media_timer, 0);
774
775 dev->if_port = tp->saved_if_port;
776
777 /* Leave the driver in snooze, not sleep, mode. */
778 tulip_set_power_state (tp, 0, 1);
779 }
780
tulip_free_ring(struct net_device * dev)781 static void tulip_free_ring (struct net_device *dev)
782 {
783 struct tulip_private *tp = netdev_priv(dev);
784 int i;
785
786 /* Free all the skbuffs in the Rx queue. */
787 for (i = 0; i < RX_RING_SIZE; i++) {
788 struct sk_buff *skb = tp->rx_buffers[i].skb;
789 dma_addr_t mapping = tp->rx_buffers[i].mapping;
790
791 tp->rx_buffers[i].skb = NULL;
792 tp->rx_buffers[i].mapping = 0;
793
794 tp->rx_ring[i].status = 0; /* Not owned by Tulip chip. */
795 tp->rx_ring[i].length = 0;
796 /* An invalid address. */
797 tp->rx_ring[i].buffer1 = cpu_to_le32(0xBADF00D0);
798 if (skb) {
799 dma_unmap_single(&tp->pdev->dev, mapping, PKT_BUF_SZ,
800 DMA_FROM_DEVICE);
801 dev_kfree_skb (skb);
802 }
803 }
804
805 for (i = 0; i < TX_RING_SIZE; i++) {
806 struct sk_buff *skb = tp->tx_buffers[i].skb;
807
808 if (skb != NULL) {
809 dma_unmap_single(&tp->pdev->dev,
810 tp->tx_buffers[i].mapping, skb->len,
811 DMA_TO_DEVICE);
812 dev_kfree_skb (skb);
813 }
814 tp->tx_buffers[i].skb = NULL;
815 tp->tx_buffers[i].mapping = 0;
816 }
817 }
818
tulip_close(struct net_device * dev)819 static int tulip_close (struct net_device *dev)
820 {
821 struct tulip_private *tp = netdev_priv(dev);
822 void __iomem *ioaddr = tp->base_addr;
823
824 netif_stop_queue (dev);
825
826 tulip_down (dev);
827
828 if (tulip_debug > 1)
829 netdev_dbg(dev, "Shutting down ethercard, status was %02x\n",
830 ioread32 (ioaddr + CSR5));
831
832 free_irq (tp->pdev->irq, dev);
833
834 tulip_free_ring (dev);
835
836 return 0;
837 }
838
tulip_get_stats(struct net_device * dev)839 static struct net_device_stats *tulip_get_stats(struct net_device *dev)
840 {
841 struct tulip_private *tp = netdev_priv(dev);
842 void __iomem *ioaddr = tp->base_addr;
843
844 if (netif_running(dev)) {
845 unsigned long flags;
846
847 spin_lock_irqsave (&tp->lock, flags);
848
849 dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
850
851 spin_unlock_irqrestore(&tp->lock, flags);
852 }
853
854 return &dev->stats;
855 }
856
857
tulip_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)858 static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
859 {
860 struct tulip_private *np = netdev_priv(dev);
861 strscpy(info->driver, DRV_NAME, sizeof(info->driver));
862 strscpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
863 }
864
865
tulip_ethtool_set_wol(struct net_device * dev,struct ethtool_wolinfo * wolinfo)866 static int tulip_ethtool_set_wol(struct net_device *dev,
867 struct ethtool_wolinfo *wolinfo)
868 {
869 struct tulip_private *tp = netdev_priv(dev);
870
871 if (wolinfo->wolopts & (~tp->wolinfo.supported))
872 return -EOPNOTSUPP;
873
874 tp->wolinfo.wolopts = wolinfo->wolopts;
875 device_set_wakeup_enable(&tp->pdev->dev, tp->wolinfo.wolopts);
876 return 0;
877 }
878
tulip_ethtool_get_wol(struct net_device * dev,struct ethtool_wolinfo * wolinfo)879 static void tulip_ethtool_get_wol(struct net_device *dev,
880 struct ethtool_wolinfo *wolinfo)
881 {
882 struct tulip_private *tp = netdev_priv(dev);
883
884 wolinfo->supported = tp->wolinfo.supported;
885 wolinfo->wolopts = tp->wolinfo.wolopts;
886 return;
887 }
888
889
890 static const struct ethtool_ops ops = {
891 .get_drvinfo = tulip_get_drvinfo,
892 .set_wol = tulip_ethtool_set_wol,
893 .get_wol = tulip_ethtool_get_wol,
894 };
895
896 /* Provide ioctl() calls to examine the MII xcvr state. */
private_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)897 static int private_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
898 {
899 struct tulip_private *tp = netdev_priv(dev);
900 void __iomem *ioaddr = tp->base_addr;
901 struct mii_ioctl_data *data = if_mii(rq);
902 const unsigned int phy_idx = 0;
903 int phy = tp->phys[phy_idx] & 0x1f;
904 unsigned int regnum = data->reg_num;
905
906 switch (cmd) {
907 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
908 if (tp->mii_cnt)
909 data->phy_id = phy;
910 else if (tp->flags & HAS_NWAY)
911 data->phy_id = 32;
912 else if (tp->chip_id == COMET)
913 data->phy_id = 1;
914 else
915 return -ENODEV;
916 fallthrough;
917
918 case SIOCGMIIREG: /* Read MII PHY register. */
919 if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
920 int csr12 = ioread32 (ioaddr + CSR12);
921 int csr14 = ioread32 (ioaddr + CSR14);
922 switch (regnum) {
923 case 0:
924 if (((csr14<<5) & 0x1000) ||
925 (dev->if_port == 5 && tp->nwayset))
926 data->val_out = 0x1000;
927 else
928 data->val_out = (tulip_media_cap[dev->if_port]&MediaIs100 ? 0x2000 : 0)
929 | (tulip_media_cap[dev->if_port]&MediaIsFD ? 0x0100 : 0);
930 break;
931 case 1:
932 data->val_out =
933 0x1848 +
934 ((csr12&0x7000) == 0x5000 ? 0x20 : 0) +
935 ((csr12&0x06) == 6 ? 0 : 4);
936 data->val_out |= 0x6048;
937 break;
938 case 4:
939 /* Advertised value, bogus 10baseTx-FD value from CSR6. */
940 data->val_out =
941 ((ioread32(ioaddr + CSR6) >> 3) & 0x0040) +
942 ((csr14 >> 1) & 0x20) + 1;
943 data->val_out |= ((csr14 >> 9) & 0x03C0);
944 break;
945 case 5: data->val_out = tp->lpar; break;
946 default: data->val_out = 0; break;
947 }
948 } else {
949 data->val_out = tulip_mdio_read (dev, data->phy_id & 0x1f, regnum);
950 }
951 return 0;
952
953 case SIOCSMIIREG: /* Write MII PHY register. */
954 if (regnum & ~0x1f)
955 return -EINVAL;
956 if (data->phy_id == phy) {
957 u16 value = data->val_in;
958 switch (regnum) {
959 case 0: /* Check for autonegotiation on or reset. */
960 tp->full_duplex_lock = (value & 0x9000) ? 0 : 1;
961 if (tp->full_duplex_lock)
962 tp->full_duplex = (value & 0x0100) ? 1 : 0;
963 break;
964 case 4:
965 tp->advertising[phy_idx] =
966 tp->mii_advertise = data->val_in;
967 break;
968 }
969 }
970 if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
971 u16 value = data->val_in;
972 if (regnum == 0) {
973 if ((value & 0x1200) == 0x1200) {
974 if (tp->chip_id == PNIC2) {
975 pnic2_start_nway (dev);
976 } else {
977 t21142_start_nway (dev);
978 }
979 }
980 } else if (regnum == 4)
981 tp->sym_advertise = value;
982 } else {
983 tulip_mdio_write (dev, data->phy_id & 0x1f, regnum, data->val_in);
984 }
985 return 0;
986 default:
987 return -EOPNOTSUPP;
988 }
989
990 return -EOPNOTSUPP;
991 }
992
993
994 /* Set or clear the multicast filter for this adaptor.
995 Note that we only use exclusion around actually queueing the
996 new frame, not around filling tp->setup_frame. This is non-deterministic
997 when re-entered but still correct. */
998
build_setup_frame_hash(u16 * setup_frm,struct net_device * dev)999 static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
1000 {
1001 struct tulip_private *tp = netdev_priv(dev);
1002 u16 hash_table[32];
1003 struct netdev_hw_addr *ha;
1004 const u16 *eaddrs;
1005 int i;
1006
1007 memset(hash_table, 0, sizeof(hash_table));
1008 __set_bit_le(255, hash_table); /* Broadcast entry */
1009 /* This should work on big-endian machines as well. */
1010 netdev_for_each_mc_addr(ha, dev) {
1011 int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
1012
1013 __set_bit_le(index, hash_table);
1014 }
1015 for (i = 0; i < 32; i++) {
1016 *setup_frm++ = hash_table[i];
1017 *setup_frm++ = hash_table[i];
1018 }
1019 setup_frm = &tp->setup_frame[13*6];
1020
1021 /* Fill the final entry with our physical address. */
1022 eaddrs = (const u16 *)dev->dev_addr;
1023 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
1024 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
1025 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
1026 }
1027
build_setup_frame_perfect(u16 * setup_frm,struct net_device * dev)1028 static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
1029 {
1030 struct tulip_private *tp = netdev_priv(dev);
1031 struct netdev_hw_addr *ha;
1032 const u16 *eaddrs;
1033
1034 /* We have <= 14 addresses so we can use the wonderful
1035 16 address perfect filtering of the Tulip. */
1036 netdev_for_each_mc_addr(ha, dev) {
1037 eaddrs = (u16 *) ha->addr;
1038 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1039 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1040 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1041 }
1042 /* Fill the unused entries with the broadcast address. */
1043 memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12);
1044 setup_frm = &tp->setup_frame[15*6];
1045
1046 /* Fill the final entry with our physical address. */
1047 eaddrs = (const u16 *)dev->dev_addr;
1048 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
1049 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
1050 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
1051 }
1052
1053
set_rx_mode(struct net_device * dev)1054 static void set_rx_mode(struct net_device *dev)
1055 {
1056 struct tulip_private *tp = netdev_priv(dev);
1057 void __iomem *ioaddr = tp->base_addr;
1058 int csr6;
1059
1060 csr6 = ioread32(ioaddr + CSR6) & ~0x00D5;
1061
1062 tp->csr6 &= ~0x00D5;
1063 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1064 tp->csr6 |= AcceptAllMulticast | AcceptAllPhys;
1065 csr6 |= AcceptAllMulticast | AcceptAllPhys;
1066 } else if ((netdev_mc_count(dev) > 1000) ||
1067 (dev->flags & IFF_ALLMULTI)) {
1068 /* Too many to filter well -- accept all multicasts. */
1069 tp->csr6 |= AcceptAllMulticast;
1070 csr6 |= AcceptAllMulticast;
1071 } else if (tp->flags & MC_HASH_ONLY) {
1072 /* Some work-alikes have only a 64-entry hash filter table. */
1073 /* Should verify correctness on big-endian/__powerpc__ */
1074 struct netdev_hw_addr *ha;
1075 if (netdev_mc_count(dev) > 64) {
1076 /* Arbitrary non-effective limit. */
1077 tp->csr6 |= AcceptAllMulticast;
1078 csr6 |= AcceptAllMulticast;
1079 } else {
1080 u32 mc_filter[2] = {0, 0}; /* Multicast hash filter */
1081 int filterbit;
1082 netdev_for_each_mc_addr(ha, dev) {
1083 if (tp->flags & COMET_MAC_ADDR)
1084 filterbit = ether_crc_le(ETH_ALEN,
1085 ha->addr);
1086 else
1087 filterbit = ether_crc(ETH_ALEN,
1088 ha->addr) >> 26;
1089 filterbit &= 0x3f;
1090 mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
1091 if (tulip_debug > 2)
1092 dev_info(&dev->dev,
1093 "Added filter for %pM %08x bit %d\n",
1094 ha->addr,
1095 ether_crc(ETH_ALEN, ha->addr),
1096 filterbit);
1097 }
1098 if (mc_filter[0] == tp->mc_filter[0] &&
1099 mc_filter[1] == tp->mc_filter[1])
1100 ; /* No change. */
1101 else if (tp->flags & IS_ASIX) {
1102 iowrite32(2, ioaddr + CSR13);
1103 iowrite32(mc_filter[0], ioaddr + CSR14);
1104 iowrite32(3, ioaddr + CSR13);
1105 iowrite32(mc_filter[1], ioaddr + CSR14);
1106 } else if (tp->flags & COMET_MAC_ADDR) {
1107 iowrite32(mc_filter[0], ioaddr + CSR27);
1108 iowrite32(mc_filter[1], ioaddr + CSR28);
1109 }
1110 tp->mc_filter[0] = mc_filter[0];
1111 tp->mc_filter[1] = mc_filter[1];
1112 }
1113 } else {
1114 unsigned long flags;
1115 u32 tx_flags = 0x08000000 | 192;
1116
1117 /* Note that only the low-address shortword of setup_frame is valid!
1118 The values are doubled for big-endian architectures. */
1119 if (netdev_mc_count(dev) > 14) {
1120 /* Must use a multicast hash table. */
1121 build_setup_frame_hash(tp->setup_frame, dev);
1122 tx_flags = 0x08400000 | 192;
1123 } else {
1124 build_setup_frame_perfect(tp->setup_frame, dev);
1125 }
1126
1127 spin_lock_irqsave(&tp->lock, flags);
1128
1129 if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) {
1130 /* Same setup recently queued, we need not add it. */
1131 } else {
1132 unsigned int entry;
1133 int dummy = -1;
1134
1135 /* Now add this frame to the Tx list. */
1136
1137 entry = tp->cur_tx++ % TX_RING_SIZE;
1138
1139 if (entry != 0) {
1140 /* Avoid a chip errata by prefixing a dummy entry. */
1141 tp->tx_buffers[entry].skb = NULL;
1142 tp->tx_buffers[entry].mapping = 0;
1143 tp->tx_ring[entry].length =
1144 (entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0;
1145 tp->tx_ring[entry].buffer1 = 0;
1146 /* Must set DescOwned later to avoid race with chip */
1147 dummy = entry;
1148 entry = tp->cur_tx++ % TX_RING_SIZE;
1149
1150 }
1151
1152 tp->tx_buffers[entry].skb = NULL;
1153 tp->tx_buffers[entry].mapping =
1154 dma_map_single(&tp->pdev->dev,
1155 tp->setup_frame,
1156 sizeof(tp->setup_frame),
1157 DMA_TO_DEVICE);
1158 /* Put the setup frame on the Tx list. */
1159 if (entry == TX_RING_SIZE-1)
1160 tx_flags |= DESC_RING_WRAP; /* Wrap ring. */
1161 tp->tx_ring[entry].length = cpu_to_le32(tx_flags);
1162 tp->tx_ring[entry].buffer1 =
1163 cpu_to_le32(tp->tx_buffers[entry].mapping);
1164 tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
1165 if (dummy >= 0)
1166 tp->tx_ring[dummy].status = cpu_to_le32(DescOwned);
1167 if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2)
1168 netif_stop_queue(dev);
1169
1170 /* Trigger an immediate transmit demand. */
1171 iowrite32(0, ioaddr + CSR1);
1172 }
1173
1174 spin_unlock_irqrestore(&tp->lock, flags);
1175 }
1176
1177 iowrite32(csr6, ioaddr + CSR6);
1178 }
1179
tulip_mwi_config(struct pci_dev * pdev,struct net_device * dev)1180 static void tulip_mwi_config(struct pci_dev *pdev, struct net_device *dev)
1181 {
1182 struct tulip_private *tp = netdev_priv(dev);
1183 u8 cache;
1184 u16 pci_command;
1185 u32 csr0;
1186
1187 if (tulip_debug > 3)
1188 netdev_dbg(dev, "tulip_mwi_config()\n");
1189
1190 tp->csr0 = csr0 = 0;
1191
1192 /* if we have any cache line size at all, we can do MRM and MWI */
1193 csr0 |= MRM | MWI;
1194
1195 /* Enable MWI in the standard PCI command bit.
1196 * Check for the case where MWI is desired but not available
1197 */
1198 pci_try_set_mwi(pdev);
1199
1200 /* read result from hardware (in case bit refused to enable) */
1201 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
1202 if ((csr0 & MWI) && (!(pci_command & PCI_COMMAND_INVALIDATE)))
1203 csr0 &= ~MWI;
1204
1205 /* if cache line size hardwired to zero, no MWI */
1206 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache);
1207 if ((csr0 & MWI) && (cache == 0)) {
1208 csr0 &= ~MWI;
1209 pci_clear_mwi(pdev);
1210 }
1211
1212 /* assign per-cacheline-size cache alignment and
1213 * burst length values
1214 */
1215 switch (cache) {
1216 case 8:
1217 csr0 |= MRL | (1 << CALShift) | (16 << BurstLenShift);
1218 break;
1219 case 16:
1220 csr0 |= MRL | (2 << CALShift) | (16 << BurstLenShift);
1221 break;
1222 case 32:
1223 csr0 |= MRL | (3 << CALShift) | (32 << BurstLenShift);
1224 break;
1225 default:
1226 cache = 0;
1227 break;
1228 }
1229
1230 /* if we have a good cache line size, we by now have a good
1231 * csr0, so save it and exit
1232 */
1233 if (cache)
1234 goto out;
1235
1236 /* we don't have a good csr0 or cache line size, disable MWI */
1237 if (csr0 & MWI) {
1238 pci_clear_mwi(pdev);
1239 csr0 &= ~MWI;
1240 }
1241
1242 /* sane defaults for burst length and cache alignment
1243 * originally from de4x5 driver
1244 */
1245 csr0 |= (8 << BurstLenShift) | (1 << CALShift);
1246
1247 out:
1248 tp->csr0 = csr0;
1249 if (tulip_debug > 2)
1250 netdev_dbg(dev, "MWI config cacheline=%d, csr0=%08x\n",
1251 cache, csr0);
1252 }
1253
1254 /*
1255 * Chips that have the MRM/reserved bit quirk and the burst quirk. That
1256 * is the DM910X and the on chip ULi devices
1257 */
1258
tulip_uli_dm_quirk(struct pci_dev * pdev)1259 static int tulip_uli_dm_quirk(struct pci_dev *pdev)
1260 {
1261 if (pdev->vendor == 0x1282 && pdev->device == 0x9102)
1262 return 1;
1263 return 0;
1264 }
1265
1266 static const struct net_device_ops tulip_netdev_ops = {
1267 .ndo_open = tulip_open,
1268 .ndo_start_xmit = tulip_start_xmit,
1269 .ndo_tx_timeout = tulip_tx_timeout,
1270 .ndo_stop = tulip_close,
1271 .ndo_get_stats = tulip_get_stats,
1272 .ndo_eth_ioctl = private_ioctl,
1273 .ndo_set_rx_mode = set_rx_mode,
1274 .ndo_set_mac_address = eth_mac_addr,
1275 .ndo_validate_addr = eth_validate_addr,
1276 #ifdef CONFIG_NET_POLL_CONTROLLER
1277 .ndo_poll_controller = poll_tulip,
1278 #endif
1279 };
1280
1281 static const struct pci_device_id early_486_chipsets[] = {
1282 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82424) },
1283 { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496) },
1284 { },
1285 };
1286
tulip_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)1287 static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1288 {
1289 struct tulip_private *tp;
1290 /* See note below on the multiport cards. */
1291 static unsigned char last_phys_addr[ETH_ALEN] = {
1292 0x00, 'L', 'i', 'n', 'u', 'x'
1293 };
1294 #if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */
1295 static int last_irq;
1296 #endif
1297 int i, irq;
1298 unsigned short sum;
1299 unsigned char *ee_data;
1300 struct net_device *dev;
1301 void __iomem *ioaddr;
1302 static int board_idx = -1;
1303 int chip_idx = ent->driver_data;
1304 const char *chip_name = tulip_tbl[chip_idx].chip_name;
1305 unsigned int eeprom_missing = 0;
1306 u8 addr[ETH_ALEN] __aligned(2);
1307 unsigned int force_csr0 = 0;
1308
1309 board_idx++;
1310
1311 /*
1312 * Lan media wire a tulip chip to a wan interface. Needs a very
1313 * different driver (lmc driver)
1314 */
1315
1316 if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) {
1317 pr_err("skipping LMC card\n");
1318 return -ENODEV;
1319 } else if (pdev->subsystem_vendor == PCI_VENDOR_ID_SBE &&
1320 (pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_T3E3 ||
1321 pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P0 ||
1322 pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P1)) {
1323 pr_err("skipping SBE T3E3 port\n");
1324 return -ENODEV;
1325 }
1326
1327 /*
1328 * DM910x chips should be handled by the dmfe driver, except
1329 * on-board chips on SPARC systems. Also, early DM9100s need
1330 * software CRC which only the dmfe driver supports.
1331 */
1332
1333 #ifdef CONFIG_TULIP_DM910X
1334 if (chip_idx == DM910X) {
1335 struct device_node *dp;
1336
1337 if (pdev->vendor == 0x1282 && pdev->device == 0x9100 &&
1338 pdev->revision < 0x30) {
1339 pr_info("skipping early DM9100 with Crc bug (use dmfe)\n");
1340 return -ENODEV;
1341 }
1342
1343 dp = pci_device_to_OF_node(pdev);
1344 if (!(dp && of_get_property(dp, "local-mac-address", NULL))) {
1345 pr_info("skipping DM910x expansion card (use dmfe)\n");
1346 return -ENODEV;
1347 }
1348 }
1349 #endif
1350
1351 /*
1352 * Looks for early PCI chipsets where people report hangs
1353 * without the workarounds being on.
1354 */
1355
1356 /* 1. Intel Saturn. Switch to 8 long words burst, 8 long word cache
1357 aligned. Aries might need this too. The Saturn errata are not
1358 pretty reading but thankfully it's an old 486 chipset.
1359
1360 2. The dreaded SiS496 486 chipset. Same workaround as Intel
1361 Saturn.
1362 */
1363
1364 if (pci_dev_present(early_486_chipsets)) {
1365 csr0 = MRL | MRM | (8 << BurstLenShift) | (1 << CALShift);
1366 force_csr0 = 1;
1367 }
1368
1369 /* bugfix: the ASIX must have a burst limit or horrible things happen. */
1370 if (chip_idx == AX88140) {
1371 if ((csr0 & 0x3f00) == 0)
1372 csr0 |= 0x2000;
1373 }
1374
1375 /* PNIC doesn't have MWI/MRL/MRM... */
1376 if (chip_idx == LC82C168)
1377 csr0 &= ~0xfff10000; /* zero reserved bits 31:20, 16 */
1378
1379 /* DM9102A has troubles with MRM & clear reserved bits 24:22, 20, 16, 7:1 */
1380 if (tulip_uli_dm_quirk(pdev)) {
1381 csr0 &= ~0x01f100ff;
1382 #if defined(CONFIG_SPARC)
1383 csr0 = (csr0 & ~0xff00) | 0xe000;
1384 #endif
1385 }
1386 /*
1387 * And back to business
1388 */
1389
1390 i = pcim_enable_device(pdev);
1391 if (i) {
1392 pr_err("Cannot enable tulip board #%d, aborting\n", board_idx);
1393 return i;
1394 }
1395
1396 irq = pdev->irq;
1397
1398 /* alloc_etherdev ensures aligned and zeroed private structures */
1399 dev = devm_alloc_etherdev(&pdev->dev, sizeof(*tp));
1400 if (!dev)
1401 return -ENOMEM;
1402
1403 SET_NETDEV_DEV(dev, &pdev->dev);
1404 if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
1405 pr_err("%s: I/O region (0x%llx@0x%llx) too small, aborting\n",
1406 pci_name(pdev),
1407 (unsigned long long)pci_resource_len (pdev, 0),
1408 (unsigned long long)pci_resource_start (pdev, 0));
1409 return -ENODEV;
1410 }
1411
1412 /* grab all resources from both PIO and MMIO regions, as we
1413 * don't want anyone else messing around with our hardware */
1414 if (pci_request_regions(pdev, DRV_NAME))
1415 return -ENODEV;
1416
1417 ioaddr = pcim_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size);
1418
1419 if (!ioaddr)
1420 return -ENODEV;
1421
1422 /*
1423 * initialize private data structure 'tp'
1424 * it is zeroed and aligned in alloc_etherdev
1425 */
1426 tp = netdev_priv(dev);
1427 tp->dev = dev;
1428
1429 tp->rx_ring = dmam_alloc_coherent(&pdev->dev,
1430 sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
1431 sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
1432 &tp->rx_ring_dma, GFP_KERNEL);
1433 if (!tp->rx_ring)
1434 return -ENODEV;
1435 tp->tx_ring = (struct tulip_tx_desc *)(tp->rx_ring + RX_RING_SIZE);
1436 tp->tx_ring_dma = tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * RX_RING_SIZE;
1437
1438 tp->chip_id = chip_idx;
1439 tp->flags = tulip_tbl[chip_idx].flags;
1440
1441 tp->wolinfo.supported = 0;
1442 tp->wolinfo.wolopts = 0;
1443 /* COMET: Enable power management only for AN983B */
1444 if (chip_idx == COMET ) {
1445 u32 sig;
1446 pci_read_config_dword (pdev, 0x80, &sig);
1447 if (sig == 0x09811317) {
1448 tp->flags |= COMET_PM;
1449 tp->wolinfo.supported = WAKE_PHY | WAKE_MAGIC;
1450 pr_info("%s: Enabled WOL support for AN983B\n",
1451 __func__);
1452 }
1453 }
1454 tp->pdev = pdev;
1455 tp->base_addr = ioaddr;
1456 tp->revision = pdev->revision;
1457 tp->csr0 = csr0;
1458 spin_lock_init(&tp->lock);
1459 spin_lock_init(&tp->mii_lock);
1460 timer_setup(&tp->timer, tulip_tbl[tp->chip_id].media_timer, 0);
1461
1462 INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task);
1463
1464 if (IS_ENABLED(CONFIG_TULIP_MWI) && !force_csr0 &&
1465 (tp->flags & HAS_PCI_MWI))
1466 tulip_mwi_config (pdev, dev);
1467
1468 /* Stop the chip's Tx and Rx processes. */
1469 tulip_stop_rxtx(tp);
1470
1471 pci_set_master(pdev);
1472
1473 #ifdef CONFIG_GSC
1474 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP) {
1475 switch (pdev->subsystem_device) {
1476 default:
1477 break;
1478 case 0x1061:
1479 case 0x1062:
1480 case 0x1063:
1481 case 0x1098:
1482 case 0x1099:
1483 case 0x10EE:
1484 tp->flags |= HAS_SWAPPED_SEEPROM | NEEDS_FAKE_MEDIA_TABLE;
1485 chip_name = "GSC DS21140 Tulip";
1486 }
1487 }
1488 #endif
1489
1490 /* Clear the missed-packet counter. */
1491 ioread32(ioaddr + CSR8);
1492
1493 /* The station address ROM is read byte serially. The register must
1494 be polled, waiting for the value to be read bit serially from the
1495 EEPROM.
1496 */
1497 ee_data = tp->eeprom;
1498 memset(ee_data, 0, sizeof(tp->eeprom));
1499 sum = 0;
1500 if (chip_idx == LC82C168) {
1501 for (i = 0; i < 3; i++) {
1502 int value, boguscnt = 100000;
1503 iowrite32(0x600 | i, ioaddr + 0x98);
1504 do {
1505 value = ioread32(ioaddr + CSR9);
1506 } while (value < 0 && --boguscnt > 0);
1507 put_unaligned_le16(value, ((__le16 *)addr) + i);
1508 sum += value & 0xffff;
1509 }
1510 eth_hw_addr_set(dev, addr);
1511 } else if (chip_idx == COMET) {
1512 /* No need to read the EEPROM. */
1513 put_unaligned_le32(ioread32(ioaddr + 0xA4), addr);
1514 put_unaligned_le16(ioread32(ioaddr + 0xA8), addr + 4);
1515 eth_hw_addr_set(dev, addr);
1516 for (i = 0; i < 6; i ++)
1517 sum += dev->dev_addr[i];
1518 } else {
1519 /* A serial EEPROM interface, we read now and sort it out later. */
1520 int sa_offset = 0;
1521 int ee_addr_size = tulip_read_eeprom(dev, 0xff, 8) & 0x40000 ? 8 : 6;
1522 int ee_max_addr = ((1 << ee_addr_size) - 1) * sizeof(u16);
1523
1524 if (ee_max_addr > sizeof(tp->eeprom))
1525 ee_max_addr = sizeof(tp->eeprom);
1526
1527 for (i = 0; i < ee_max_addr ; i += sizeof(u16)) {
1528 u16 data = tulip_read_eeprom(dev, i/2, ee_addr_size);
1529 ee_data[i] = data & 0xff;
1530 ee_data[i + 1] = data >> 8;
1531 }
1532
1533 /* DEC now has a specification (see Notes) but early board makers
1534 just put the address in the first EEPROM locations. */
1535 /* This does memcmp(ee_data, ee_data+16, 8) */
1536 for (i = 0; i < 8; i ++)
1537 if (ee_data[i] != ee_data[16+i])
1538 sa_offset = 20;
1539 if (chip_idx == CONEXANT) {
1540 /* Check that the tuple type and length is correct. */
1541 if (ee_data[0x198] == 0x04 && ee_data[0x199] == 6)
1542 sa_offset = 0x19A;
1543 } else if (ee_data[0] == 0xff && ee_data[1] == 0xff &&
1544 ee_data[2] == 0) {
1545 sa_offset = 2; /* Grrr, damn Matrox boards. */
1546 }
1547 #ifdef CONFIG_MIPS_COBALT
1548 if ((pdev->bus->number == 0) &&
1549 ((PCI_SLOT(pdev->devfn) == 7) ||
1550 (PCI_SLOT(pdev->devfn) == 12))) {
1551 /* Cobalt MAC address in first EEPROM locations. */
1552 sa_offset = 0;
1553 /* Ensure our media table fixup get's applied */
1554 memcpy(ee_data + 16, ee_data, 8);
1555 }
1556 #endif
1557 #ifdef CONFIG_GSC
1558 /* Check to see if we have a broken srom */
1559 if (ee_data[0] == 0x61 && ee_data[1] == 0x10) {
1560 /* pci_vendor_id and subsystem_id are swapped */
1561 ee_data[0] = ee_data[2];
1562 ee_data[1] = ee_data[3];
1563 ee_data[2] = 0x61;
1564 ee_data[3] = 0x10;
1565
1566 /* HSC-PCI boards need to be byte-swaped and shifted
1567 * up 1 word. This shift needs to happen at the end
1568 * of the MAC first because of the 2 byte overlap.
1569 */
1570 for (i = 4; i >= 0; i -= 2) {
1571 ee_data[17 + i + 3] = ee_data[17 + i];
1572 ee_data[16 + i + 5] = ee_data[16 + i];
1573 }
1574 }
1575 #endif
1576
1577 for (i = 0; i < 6; i ++) {
1578 addr[i] = ee_data[i + sa_offset];
1579 sum += ee_data[i + sa_offset];
1580 }
1581 eth_hw_addr_set(dev, addr);
1582 }
1583 /* Lite-On boards have the address byte-swapped. */
1584 if ((dev->dev_addr[0] == 0xA0 ||
1585 dev->dev_addr[0] == 0xC0 ||
1586 dev->dev_addr[0] == 0x02) &&
1587 dev->dev_addr[1] == 0x00) {
1588 for (i = 0; i < 6; i+=2) {
1589 addr[i] = dev->dev_addr[i+1];
1590 addr[i+1] = dev->dev_addr[i];
1591 }
1592 eth_hw_addr_set(dev, addr);
1593 }
1594
1595 /* On the Zynx 315 Etherarray and other multiport boards only the
1596 first Tulip has an EEPROM.
1597 On Sparc systems the mac address is held in the OBP property
1598 "local-mac-address".
1599 The addresses of the subsequent ports are derived from the first.
1600 Many PCI BIOSes also incorrectly report the IRQ line, so we correct
1601 that here as well. */
1602 if (sum == 0 || sum == 6*0xff) {
1603 #if defined(CONFIG_SPARC)
1604 struct device_node *dp = pci_device_to_OF_node(pdev);
1605 const unsigned char *addr2;
1606 int len;
1607 #endif
1608 eeprom_missing = 1;
1609 for (i = 0; i < 5; i++)
1610 addr[i] = last_phys_addr[i];
1611 addr[i] = last_phys_addr[i] + 1;
1612 eth_hw_addr_set(dev, addr);
1613 #if defined(CONFIG_SPARC)
1614 addr2 = of_get_property(dp, "local-mac-address", &len);
1615 if (addr2 && len == ETH_ALEN)
1616 eth_hw_addr_set(dev, addr2);
1617 #endif
1618 #if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */
1619 if (last_irq)
1620 irq = last_irq;
1621 #endif
1622 }
1623
1624 for (i = 0; i < 6; i++)
1625 last_phys_addr[i] = dev->dev_addr[i];
1626 #if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */
1627 last_irq = irq;
1628 #endif
1629
1630 /* The lower four bits are the media type. */
1631 if (board_idx >= 0 && board_idx < MAX_UNITS) {
1632 if (options[board_idx] & MEDIA_MASK)
1633 tp->default_port = options[board_idx] & MEDIA_MASK;
1634 if ((options[board_idx] & FullDuplex) || full_duplex[board_idx] > 0)
1635 tp->full_duplex = 1;
1636 if (mtu[board_idx] > 0)
1637 dev->mtu = mtu[board_idx];
1638 }
1639 if (dev->mem_start & MEDIA_MASK)
1640 tp->default_port = dev->mem_start & MEDIA_MASK;
1641 if (tp->default_port) {
1642 pr_info(DRV_NAME "%d: Transceiver selection forced to %s\n",
1643 board_idx, medianame[tp->default_port & MEDIA_MASK]);
1644 tp->medialock = 1;
1645 if (tulip_media_cap[tp->default_port] & MediaAlwaysFD)
1646 tp->full_duplex = 1;
1647 }
1648 if (tp->full_duplex)
1649 tp->full_duplex_lock = 1;
1650
1651 if (tulip_media_cap[tp->default_port] & MediaIsMII) {
1652 static const u16 media2advert[] = {
1653 0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200
1654 };
1655 tp->mii_advertise = media2advert[tp->default_port - 9];
1656 tp->mii_advertise |= (tp->flags & HAS_8023X); /* Matching bits! */
1657 }
1658
1659 if (tp->flags & HAS_MEDIA_TABLE) {
1660 sprintf(dev->name, DRV_NAME "%d", board_idx); /* hack */
1661 tulip_parse_eeprom(dev);
1662 strcpy(dev->name, "eth%d"); /* un-hack */
1663 }
1664
1665 if ((tp->flags & ALWAYS_CHECK_MII) ||
1666 (tp->mtable && tp->mtable->has_mii) ||
1667 ( ! tp->mtable && (tp->flags & HAS_MII))) {
1668 if (tp->mtable && tp->mtable->has_mii) {
1669 for (i = 0; i < tp->mtable->leafcount; i++)
1670 if (tp->mtable->mleaf[i].media == 11) {
1671 tp->cur_index = i;
1672 tp->saved_if_port = dev->if_port;
1673 tulip_select_media(dev, 2);
1674 dev->if_port = tp->saved_if_port;
1675 break;
1676 }
1677 }
1678
1679 /* Find the connected MII xcvrs.
1680 Doing this in open() would allow detecting external xcvrs
1681 later, but takes much time. */
1682 tulip_find_mii (dev, board_idx);
1683 }
1684
1685 /* The Tulip-specific entries in the device structure. */
1686 dev->netdev_ops = &tulip_netdev_ops;
1687 dev->watchdog_timeo = TX_TIMEOUT;
1688 #ifdef CONFIG_TULIP_NAPI
1689 netif_napi_add_weight(dev, &tp->napi, tulip_poll, 16);
1690 #endif
1691 dev->ethtool_ops = &ops;
1692
1693 i = register_netdev(dev);
1694 if (i)
1695 return i;
1696
1697 pci_set_drvdata(pdev, dev);
1698
1699 dev_info(&dev->dev,
1700 #ifdef CONFIG_TULIP_MMIO
1701 "%s rev %d at MMIO %#llx,%s %pM, IRQ %d\n",
1702 #else
1703 "%s rev %d at Port %#llx,%s %pM, IRQ %d\n",
1704 #endif
1705 chip_name, pdev->revision,
1706 (unsigned long long)pci_resource_start(pdev, TULIP_BAR),
1707 eeprom_missing ? " EEPROM not present," : "",
1708 dev->dev_addr, irq);
1709
1710 if (tp->chip_id == PNIC2)
1711 tp->link_change = pnic2_lnk_change;
1712 else if (tp->flags & HAS_NWAY)
1713 tp->link_change = t21142_lnk_change;
1714 else if (tp->flags & HAS_PNICNWAY)
1715 tp->link_change = pnic_lnk_change;
1716
1717 /* Reset the xcvr interface and turn on heartbeat. */
1718 switch (chip_idx) {
1719 case DC21140:
1720 case DM910X:
1721 default:
1722 if (tp->mtable)
1723 iowrite32(tp->mtable->csr12dir | 0x100, ioaddr + CSR12);
1724 break;
1725 case DC21142:
1726 if (tp->mii_cnt || tulip_media_cap[dev->if_port] & MediaIsMII) {
1727 iowrite32(csr6_mask_defstate, ioaddr + CSR6);
1728 iowrite32(0x0000, ioaddr + CSR13);
1729 iowrite32(0x0000, ioaddr + CSR14);
1730 iowrite32(csr6_mask_hdcap, ioaddr + CSR6);
1731 } else
1732 t21142_start_nway(dev);
1733 break;
1734 case PNIC2:
1735 /* just do a reset for sanity sake */
1736 iowrite32(0x0000, ioaddr + CSR13);
1737 iowrite32(0x0000, ioaddr + CSR14);
1738 break;
1739 case LC82C168:
1740 if ( ! tp->mii_cnt) {
1741 tp->nway = 1;
1742 tp->nwayset = 0;
1743 iowrite32(csr6_ttm | csr6_ca, ioaddr + CSR6);
1744 iowrite32(0x30, ioaddr + CSR12);
1745 iowrite32(0x0001F078, ioaddr + CSR6);
1746 iowrite32(0x0201F078, ioaddr + CSR6); /* Turn on autonegotiation. */
1747 }
1748 break;
1749 case MX98713:
1750 case COMPEX9881:
1751 iowrite32(0x00000000, ioaddr + CSR6);
1752 iowrite32(0x000711C0, ioaddr + CSR14); /* Turn on NWay. */
1753 iowrite32(0x00000001, ioaddr + CSR13);
1754 break;
1755 case MX98715:
1756 case MX98725:
1757 iowrite32(0x01a80000, ioaddr + CSR6);
1758 iowrite32(0xFFFFFFFF, ioaddr + CSR14);
1759 iowrite32(0x00001000, ioaddr + CSR12);
1760 break;
1761 case COMET:
1762 /* No initialization necessary. */
1763 break;
1764 }
1765
1766 /* put the chip in snooze mode until opened */
1767 tulip_set_power_state (tp, 0, 1);
1768
1769 return 0;
1770 }
1771
1772
1773 /* set the registers according to the given wolopts */
tulip_set_wolopts(struct pci_dev * pdev,u32 wolopts)1774 static void tulip_set_wolopts (struct pci_dev *pdev, u32 wolopts)
1775 {
1776 struct net_device *dev = pci_get_drvdata(pdev);
1777 struct tulip_private *tp = netdev_priv(dev);
1778 void __iomem *ioaddr = tp->base_addr;
1779
1780 if (tp->flags & COMET_PM) {
1781 unsigned int tmp;
1782
1783 tmp = ioread32(ioaddr + CSR18);
1784 tmp &= ~(comet_csr18_pmes_sticky | comet_csr18_apm_mode | comet_csr18_d3a);
1785 tmp |= comet_csr18_pm_mode;
1786 iowrite32(tmp, ioaddr + CSR18);
1787
1788 /* Set the Wake-up Control/Status Register to the given WOL options*/
1789 tmp = ioread32(ioaddr + CSR13);
1790 tmp &= ~(comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_wfre | comet_csr13_lsce | comet_csr13_mpre);
1791 if (wolopts & WAKE_MAGIC)
1792 tmp |= comet_csr13_mpre;
1793 if (wolopts & WAKE_PHY)
1794 tmp |= comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_lsce;
1795 /* Clear the event flags */
1796 tmp |= comet_csr13_wfr | comet_csr13_mpr | comet_csr13_lsc;
1797 iowrite32(tmp, ioaddr + CSR13);
1798 }
1799 }
1800
tulip_suspend(struct device * dev_d)1801 static int __maybe_unused tulip_suspend(struct device *dev_d)
1802 {
1803 struct net_device *dev = dev_get_drvdata(dev_d);
1804 struct tulip_private *tp = netdev_priv(dev);
1805
1806 if (!dev)
1807 return -EINVAL;
1808
1809 if (!netif_running(dev))
1810 goto save_state;
1811
1812 tulip_down(dev);
1813
1814 netif_device_detach(dev);
1815 /* FIXME: it needlessly adds an error path. */
1816 free_irq(tp->pdev->irq, dev);
1817
1818 save_state:
1819 tulip_set_wolopts(to_pci_dev(dev_d), tp->wolinfo.wolopts);
1820 device_set_wakeup_enable(dev_d, !!tp->wolinfo.wolopts);
1821
1822 return 0;
1823 }
1824
tulip_resume(struct device * dev_d)1825 static int __maybe_unused tulip_resume(struct device *dev_d)
1826 {
1827 struct pci_dev *pdev = to_pci_dev(dev_d);
1828 struct net_device *dev = dev_get_drvdata(dev_d);
1829 struct tulip_private *tp = netdev_priv(dev);
1830 void __iomem *ioaddr = tp->base_addr;
1831 unsigned int tmp;
1832 int retval = 0;
1833
1834 if (!dev)
1835 return -EINVAL;
1836
1837 if (!netif_running(dev))
1838 return 0;
1839
1840 retval = request_irq(pdev->irq, tulip_interrupt, IRQF_SHARED,
1841 dev->name, dev);
1842 if (retval) {
1843 pr_err("request_irq failed in resume\n");
1844 return retval;
1845 }
1846
1847 if (tp->flags & COMET_PM) {
1848 device_set_wakeup_enable(dev_d, 0);
1849
1850 /* Clear the PMES flag */
1851 tmp = ioread32(ioaddr + CSR20);
1852 tmp |= comet_csr20_pmes;
1853 iowrite32(tmp, ioaddr + CSR20);
1854
1855 /* Disable all wake-up events */
1856 tulip_set_wolopts(pdev, 0);
1857 }
1858 netif_device_attach(dev);
1859
1860 if (netif_running(dev))
1861 tulip_up(dev);
1862
1863 return 0;
1864 }
1865
tulip_remove_one(struct pci_dev * pdev)1866 static void tulip_remove_one(struct pci_dev *pdev)
1867 {
1868 struct net_device *dev = pci_get_drvdata (pdev);
1869
1870 if (!dev)
1871 return;
1872
1873 unregister_netdev(dev);
1874 }
1875
1876 #ifdef CONFIG_NET_POLL_CONTROLLER
1877 /*
1878 * Polling 'interrupt' - used by things like netconsole to send skbs
1879 * without having to re-enable interrupts. It's not called while
1880 * the interrupt routine is executing.
1881 */
1882
poll_tulip(struct net_device * dev)1883 static void poll_tulip (struct net_device *dev)
1884 {
1885 struct tulip_private *tp = netdev_priv(dev);
1886 const int irq = tp->pdev->irq;
1887
1888 /* disable_irq here is not very nice, but with the lockless
1889 interrupt handler we have no other choice. */
1890 disable_irq(irq);
1891 tulip_interrupt (irq, dev);
1892 enable_irq(irq);
1893 }
1894 #endif
1895
1896 static SIMPLE_DEV_PM_OPS(tulip_pm_ops, tulip_suspend, tulip_resume);
1897
1898 static struct pci_driver tulip_driver = {
1899 .name = DRV_NAME,
1900 .id_table = tulip_pci_tbl,
1901 .probe = tulip_init_one,
1902 .remove = tulip_remove_one,
1903 .driver.pm = &tulip_pm_ops,
1904 };
1905
1906
tulip_init(void)1907 static int __init tulip_init (void)
1908 {
1909 if (!csr0) {
1910 pr_warn("tulip: unknown CPU architecture, using default csr0\n");
1911 /* default to 8 longword cache line alignment */
1912 csr0 = 0x00A00000 | 0x4800;
1913 }
1914
1915 /* copy module parms into globals */
1916 tulip_rx_copybreak = rx_copybreak;
1917 tulip_max_interrupt_work = max_interrupt_work;
1918
1919 /* probe for and init boards */
1920 return pci_register_driver(&tulip_driver);
1921 }
1922
1923
tulip_cleanup(void)1924 static void __exit tulip_cleanup (void)
1925 {
1926 pci_unregister_driver (&tulip_driver);
1927 }
1928
1929
1930 module_init(tulip_init);
1931 module_exit(tulip_cleanup);
1932