xref: /linux/drivers/net/ethernet/dec/tulip/dmfe.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2     A Davicom DM9102/DM9102A/DM9102A+DM9801/DM9102A+DM9802 NIC fast
3     ethernet driver for Linux.
4     Copyright (C) 1997  Sten Wang
5 
6     This program is free software; you can redistribute it and/or
7     modify it under the terms of the GNU General Public License
8     as published by the Free Software Foundation; either version 2
9     of the License, or (at your option) any later version.
10 
11     This program is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15 
16     DAVICOM Web-Site: www.davicom.com.tw
17 
18     Author: Sten Wang, 886-3-5798797-8517, E-mail: sten_wang@davicom.com.tw
19     Maintainer: Tobias Ringstrom <tori@unhappy.mine.nu>
20 
21     (C)Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved.
22 
23     Marcelo Tosatti <marcelo@conectiva.com.br> :
24     Made it compile in 2.3 (device to net_device)
25 
26     Alan Cox <alan@lxorguk.ukuu.org.uk> :
27     Cleaned up for kernel merge.
28     Removed the back compatibility support
29     Reformatted, fixing spelling etc as I went
30     Removed IRQ 0-15 assumption
31 
32     Jeff Garzik <jgarzik@pobox.com> :
33     Updated to use new PCI driver API.
34     Resource usage cleanups.
35     Report driver version to user.
36 
37     Tobias Ringstrom <tori@unhappy.mine.nu> :
38     Cleaned up and added SMP safety.  Thanks go to Jeff Garzik,
39     Andrew Morton and Frank Davis for the SMP safety fixes.
40 
41     Vojtech Pavlik <vojtech@suse.cz> :
42     Cleaned up pointer arithmetics.
43     Fixed a lot of 64bit issues.
44     Cleaned up printk()s a bit.
45     Fixed some obvious big endian problems.
46 
47     Tobias Ringstrom <tori@unhappy.mine.nu> :
48     Use time_after for jiffies calculation.  Added ethtool
49     support.  Updated PCI resource allocation.  Do not
50     forget to unmap PCI mapped skbs.
51 
52     Alan Cox <alan@lxorguk.ukuu.org.uk>
53     Added new PCI identifiers provided by Clear Zhang at ALi
54     for their 1563 ethernet device.
55 
56     TODO
57 
58     Check on 64 bit boxes.
59     Check and fix on big endian boxes.
60 
61     Test and make sure PCI latency is now correct for all cases.
62 */
63 
64 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65 
66 #define DRV_NAME	"dmfe"
67 #define DRV_VERSION	"1.36.4"
68 #define DRV_RELDATE	"2002-01-17"
69 
70 #include <linux/module.h>
71 #include <linux/kernel.h>
72 #include <linux/string.h>
73 #include <linux/timer.h>
74 #include <linux/ptrace.h>
75 #include <linux/errno.h>
76 #include <linux/ioport.h>
77 #include <linux/interrupt.h>
78 #include <linux/pci.h>
79 #include <linux/dma-mapping.h>
80 #include <linux/init.h>
81 #include <linux/netdevice.h>
82 #include <linux/etherdevice.h>
83 #include <linux/ethtool.h>
84 #include <linux/skbuff.h>
85 #include <linux/delay.h>
86 #include <linux/spinlock.h>
87 #include <linux/crc32.h>
88 #include <linux/bitops.h>
89 
90 #include <asm/processor.h>
91 #include <asm/io.h>
92 #include <asm/dma.h>
93 #include <asm/uaccess.h>
94 #include <asm/irq.h>
95 
96 #ifdef CONFIG_TULIP_DM910X
97 #include <linux/of.h>
98 #endif
99 
100 
101 /* Board/System/Debug information/definition ---------------- */
102 #define PCI_DM9132_ID   0x91321282      /* Davicom DM9132 ID */
103 #define PCI_DM9102_ID   0x91021282      /* Davicom DM9102 ID */
104 #define PCI_DM9100_ID   0x91001282      /* Davicom DM9100 ID */
105 #define PCI_DM9009_ID   0x90091282      /* Davicom DM9009 ID */
106 
107 #define DM9102_IO_SIZE  0x80
108 #define DM9102A_IO_SIZE 0x100
109 #define TX_MAX_SEND_CNT 0x1             /* Maximum tx packet per time */
110 #define TX_DESC_CNT     0x10            /* Allocated Tx descriptors */
111 #define RX_DESC_CNT     0x20            /* Allocated Rx descriptors */
112 #define TX_FREE_DESC_CNT (TX_DESC_CNT - 2)	/* Max TX packet count */
113 #define TX_WAKE_DESC_CNT (TX_DESC_CNT - 3)	/* TX wakeup count */
114 #define DESC_ALL_CNT    (TX_DESC_CNT + RX_DESC_CNT)
115 #define TX_BUF_ALLOC    0x600
116 #define RX_ALLOC_SIZE   0x620
117 #define DM910X_RESET    1
118 #define CR0_DEFAULT     0x00E00000      /* TX & RX burst mode */
119 #define CR6_DEFAULT     0x00080000      /* HD */
120 #define CR7_DEFAULT     0x180c1
121 #define CR15_DEFAULT    0x06            /* TxJabber RxWatchdog */
122 #define TDES0_ERR_MASK  0x4302          /* TXJT, LC, EC, FUE */
123 #define MAX_PACKET_SIZE 1514
124 #define DMFE_MAX_MULTICAST 14
125 #define RX_COPY_SIZE	100
126 #define MAX_CHECK_PACKET 0x8000
127 #define DM9801_NOISE_FLOOR 8
128 #define DM9802_NOISE_FLOOR 5
129 
130 #define DMFE_WOL_LINKCHANGE	0x20000000
131 #define DMFE_WOL_SAMPLEPACKET	0x10000000
132 #define DMFE_WOL_MAGICPACKET	0x08000000
133 
134 
135 #define DMFE_10MHF      0
136 #define DMFE_100MHF     1
137 #define DMFE_10MFD      4
138 #define DMFE_100MFD     5
139 #define DMFE_AUTO       8
140 #define DMFE_1M_HPNA    0x10
141 
142 #define DMFE_TXTH_72	0x400000	/* TX TH 72 byte */
143 #define DMFE_TXTH_96	0x404000	/* TX TH 96 byte */
144 #define DMFE_TXTH_128	0x0000		/* TX TH 128 byte */
145 #define DMFE_TXTH_256	0x4000		/* TX TH 256 byte */
146 #define DMFE_TXTH_512	0x8000		/* TX TH 512 byte */
147 #define DMFE_TXTH_1K	0xC000		/* TX TH 1K  byte */
148 
149 #define DMFE_TIMER_WUT  (jiffies + HZ * 1)/* timer wakeup time : 1 second */
150 #define DMFE_TX_TIMEOUT ((3*HZ)/2)	/* tx packet time-out time 1.5 s" */
151 #define DMFE_TX_KICK 	(HZ/2)	/* tx packet Kick-out time 0.5 s" */
152 
153 #define dw32(reg, val)	iowrite32(val, ioaddr + (reg))
154 #define dw16(reg, val)	iowrite16(val, ioaddr + (reg))
155 #define dr32(reg)	ioread32(ioaddr + (reg))
156 #define dr16(reg)	ioread16(ioaddr + (reg))
157 #define dr8(reg)	ioread8(ioaddr + (reg))
158 
159 #define DMFE_DBUG(dbug_now, msg, value)			\
160 	do {						\
161 		if (dmfe_debug || (dbug_now))		\
162 			pr_err("%s %lx\n",		\
163 			       (msg), (long) (value));	\
164 	} while (0)
165 
166 #define SHOW_MEDIA_TYPE(mode)				\
167 	pr_info("Change Speed to %sMhz %s duplex\n" ,	\
168 		(mode & 1) ? "100":"10",		\
169 		(mode & 4) ? "full":"half");
170 
171 
172 /* CR9 definition: SROM/MII */
173 #define CR9_SROM_READ   0x4800
174 #define CR9_SRCS        0x1
175 #define CR9_SRCLK       0x2
176 #define CR9_CRDOUT      0x8
177 #define SROM_DATA_0     0x0
178 #define SROM_DATA_1     0x4
179 #define PHY_DATA_1      0x20000
180 #define PHY_DATA_0      0x00000
181 #define MDCLKH          0x10000
182 
183 #define PHY_POWER_DOWN	0x800
184 
185 #define SROM_V41_CODE   0x14
186 
187 #define __CHK_IO_SIZE(pci_id, dev_rev) \
188  (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x30) ) ? \
189 	DM9102A_IO_SIZE: DM9102_IO_SIZE)
190 
191 #define CHK_IO_SIZE(pci_dev) \
192 	(__CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, \
193 	(pci_dev)->revision))
194 
195 /* Sten Check */
196 #define DEVICE net_device
197 
198 /* Structure/enum declaration ------------------------------- */
199 struct tx_desc {
200         __le32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */
201         char *tx_buf_ptr;               /* Data for us */
202         struct tx_desc *next_tx_desc;
203 } __attribute__(( aligned(32) ));
204 
205 struct rx_desc {
206 	__le32 rdes0, rdes1, rdes2, rdes3; /* Data for the card */
207 	struct sk_buff *rx_skb_ptr;	/* Data for us */
208 	struct rx_desc *next_rx_desc;
209 } __attribute__(( aligned(32) ));
210 
211 struct dmfe_board_info {
212 	u32 chip_id;			/* Chip vendor/Device ID */
213 	u8 chip_revision;		/* Chip revision */
214 	struct net_device *next_dev;	/* next device */
215 	struct pci_dev *pdev;		/* PCI device */
216 	spinlock_t lock;
217 
218 	void __iomem *ioaddr;		/* I/O base address */
219 	u32 cr0_data;
220 	u32 cr5_data;
221 	u32 cr6_data;
222 	u32 cr7_data;
223 	u32 cr15_data;
224 
225 	/* pointer for memory physical address */
226 	dma_addr_t buf_pool_dma_ptr;	/* Tx buffer pool memory */
227 	dma_addr_t buf_pool_dma_start;	/* Tx buffer pool align dword */
228 	dma_addr_t desc_pool_dma_ptr;	/* descriptor pool memory */
229 	dma_addr_t first_tx_desc_dma;
230 	dma_addr_t first_rx_desc_dma;
231 
232 	/* descriptor pointer */
233 	unsigned char *buf_pool_ptr;	/* Tx buffer pool memory */
234 	unsigned char *buf_pool_start;	/* Tx buffer pool align dword */
235 	unsigned char *desc_pool_ptr;	/* descriptor pool memory */
236 	struct tx_desc *first_tx_desc;
237 	struct tx_desc *tx_insert_ptr;
238 	struct tx_desc *tx_remove_ptr;
239 	struct rx_desc *first_rx_desc;
240 	struct rx_desc *rx_insert_ptr;
241 	struct rx_desc *rx_ready_ptr;	/* packet come pointer */
242 	unsigned long tx_packet_cnt;	/* transmitted packet count */
243 	unsigned long tx_queue_cnt;	/* wait to send packet count */
244 	unsigned long rx_avail_cnt;	/* available rx descriptor count */
245 	unsigned long interval_rx_cnt;	/* rx packet count a callback time */
246 
247 	u16 HPNA_command;		/* For HPNA register 16 */
248 	u16 HPNA_timer;			/* For HPNA remote device check */
249 	u16 dbug_cnt;
250 	u16 NIC_capability;		/* NIC media capability */
251 	u16 PHY_reg4;			/* Saved Phyxcer register 4 value */
252 
253 	u8 HPNA_present;		/* 0:none, 1:DM9801, 2:DM9802 */
254 	u8 chip_type;			/* Keep DM9102A chip type */
255 	u8 media_mode;			/* user specify media mode */
256 	u8 op_mode;			/* real work media mode */
257 	u8 phy_addr;
258 	u8 wait_reset;			/* Hardware failed, need to reset */
259 	u8 dm910x_chk_mode;		/* Operating mode check */
260 	u8 first_in_callback;		/* Flag to record state */
261 	u8 wol_mode;			/* user WOL settings */
262 	struct timer_list timer;
263 
264 	/* Driver defined statistic counter */
265 	unsigned long tx_fifo_underrun;
266 	unsigned long tx_loss_carrier;
267 	unsigned long tx_no_carrier;
268 	unsigned long tx_late_collision;
269 	unsigned long tx_excessive_collision;
270 	unsigned long tx_jabber_timeout;
271 	unsigned long reset_count;
272 	unsigned long reset_cr8;
273 	unsigned long reset_fatal;
274 	unsigned long reset_TXtimeout;
275 
276 	/* NIC SROM data */
277 	unsigned char srom[128];
278 };
279 
280 enum dmfe_offsets {
281 	DCR0 = 0x00, DCR1 = 0x08, DCR2 = 0x10, DCR3 = 0x18, DCR4 = 0x20,
282 	DCR5 = 0x28, DCR6 = 0x30, DCR7 = 0x38, DCR8 = 0x40, DCR9 = 0x48,
283 	DCR10 = 0x50, DCR11 = 0x58, DCR12 = 0x60, DCR13 = 0x68, DCR14 = 0x70,
284 	DCR15 = 0x78
285 };
286 
287 enum dmfe_CR6_bits {
288 	CR6_RXSC = 0x2, CR6_PBF = 0x8, CR6_PM = 0x40, CR6_PAM = 0x80,
289 	CR6_FDM = 0x200, CR6_TXSC = 0x2000, CR6_STI = 0x100000,
290 	CR6_SFT = 0x200000, CR6_RXA = 0x40000000, CR6_NO_PURGE = 0x20000000
291 };
292 
293 /* Global variable declaration ----------------------------- */
294 static int printed_version;
295 static const char version[] =
296 	"Davicom DM9xxx net driver, version " DRV_VERSION " (" DRV_RELDATE ")";
297 
298 static int dmfe_debug;
299 static unsigned char dmfe_media_mode = DMFE_AUTO;
300 static u32 dmfe_cr6_user_set;
301 
302 /* For module input parameter */
303 static int debug;
304 static u32 cr6set;
305 static unsigned char mode = 8;
306 static u8 chkmode = 1;
307 static u8 HPNA_mode;		/* Default: Low Power/High Speed */
308 static u8 HPNA_rx_cmd;		/* Default: Disable Rx remote command */
309 static u8 HPNA_tx_cmd;		/* Default: Don't issue remote command */
310 static u8 HPNA_NoiseFloor;	/* Default: HPNA NoiseFloor */
311 static u8 SF_mode;		/* Special Function: 1:VLAN, 2:RX Flow Control
312 				   4: TX pause packet */
313 
314 
315 /* function declaration ------------------------------------- */
316 static int dmfe_open(struct DEVICE *);
317 static netdev_tx_t dmfe_start_xmit(struct sk_buff *, struct DEVICE *);
318 static int dmfe_stop(struct DEVICE *);
319 static void dmfe_set_filter_mode(struct DEVICE *);
320 static const struct ethtool_ops netdev_ethtool_ops;
321 static u16 read_srom_word(void __iomem *, int);
322 static irqreturn_t dmfe_interrupt(int , void *);
323 #ifdef CONFIG_NET_POLL_CONTROLLER
324 static void poll_dmfe (struct net_device *dev);
325 #endif
326 static void dmfe_descriptor_init(struct net_device *);
327 static void allocate_rx_buffer(struct net_device *);
328 static void update_cr6(u32, void __iomem *);
329 static void send_filter_frame(struct DEVICE *);
330 static void dm9132_id_table(struct DEVICE *);
331 static u16 dmfe_phy_read(void __iomem *, u8, u8, u32);
332 static void dmfe_phy_write(void __iomem *, u8, u8, u16, u32);
333 static void dmfe_phy_write_1bit(void __iomem *, u32);
334 static u16 dmfe_phy_read_1bit(void __iomem *);
335 static u8 dmfe_sense_speed(struct dmfe_board_info *);
336 static void dmfe_process_mode(struct dmfe_board_info *);
337 static void dmfe_timer(unsigned long);
338 static inline u32 cal_CRC(unsigned char *, unsigned int, u8);
339 static void dmfe_rx_packet(struct DEVICE *, struct dmfe_board_info *);
340 static void dmfe_free_tx_pkt(struct DEVICE *, struct dmfe_board_info *);
341 static void dmfe_reuse_skb(struct dmfe_board_info *, struct sk_buff *);
342 static void dmfe_dynamic_reset(struct DEVICE *);
343 static void dmfe_free_rxbuffer(struct dmfe_board_info *);
344 static void dmfe_init_dm910x(struct DEVICE *);
345 static void dmfe_parse_srom(struct dmfe_board_info *);
346 static void dmfe_program_DM9801(struct dmfe_board_info *, int);
347 static void dmfe_program_DM9802(struct dmfe_board_info *);
348 static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * );
349 static void dmfe_set_phyxcer(struct dmfe_board_info *);
350 
351 /* DM910X network board routine ---------------------------- */
352 
353 static const struct net_device_ops netdev_ops = {
354 	.ndo_open 		= dmfe_open,
355 	.ndo_stop		= dmfe_stop,
356 	.ndo_start_xmit		= dmfe_start_xmit,
357 	.ndo_set_rx_mode	= dmfe_set_filter_mode,
358 	.ndo_change_mtu		= eth_change_mtu,
359 	.ndo_set_mac_address	= eth_mac_addr,
360 	.ndo_validate_addr	= eth_validate_addr,
361 #ifdef CONFIG_NET_POLL_CONTROLLER
362 	.ndo_poll_controller	= poll_dmfe,
363 #endif
364 };
365 
366 /*
367  *	Search DM910X board ,allocate space and register it
368  */
369 
370 static int dmfe_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
371 {
372 	struct dmfe_board_info *db;	/* board information structure */
373 	struct net_device *dev;
374 	u32 pci_pmr;
375 	int i, err;
376 
377 	DMFE_DBUG(0, "dmfe_init_one()", 0);
378 
379 	if (!printed_version++)
380 		pr_info("%s\n", version);
381 
382 	/*
383 	 *	SPARC on-board DM910x chips should be handled by the main
384 	 *	tulip driver, except for early DM9100s.
385 	 */
386 #ifdef CONFIG_TULIP_DM910X
387 	if ((ent->driver_data == PCI_DM9100_ID && pdev->revision >= 0x30) ||
388 	    ent->driver_data == PCI_DM9102_ID) {
389 		struct device_node *dp = pci_device_to_OF_node(pdev);
390 
391 		if (dp && of_get_property(dp, "local-mac-address", NULL)) {
392 			pr_info("skipping on-board DM910x (use tulip)\n");
393 			return -ENODEV;
394 		}
395 	}
396 #endif
397 
398 	/* Init network device */
399 	dev = alloc_etherdev(sizeof(*db));
400 	if (dev == NULL)
401 		return -ENOMEM;
402 	SET_NETDEV_DEV(dev, &pdev->dev);
403 
404 	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
405 		pr_warn("32-bit PCI DMA not available\n");
406 		err = -ENODEV;
407 		goto err_out_free;
408 	}
409 
410 	/* Enable Master/IO access, Disable memory access */
411 	err = pci_enable_device(pdev);
412 	if (err)
413 		goto err_out_free;
414 
415 	if (!pci_resource_start(pdev, 0)) {
416 		pr_err("I/O base is zero\n");
417 		err = -ENODEV;
418 		goto err_out_disable;
419 	}
420 
421 	if (pci_resource_len(pdev, 0) < (CHK_IO_SIZE(pdev)) ) {
422 		pr_err("Allocated I/O size too small\n");
423 		err = -ENODEV;
424 		goto err_out_disable;
425 	}
426 
427 #if 0	/* pci_{enable_device,set_master} sets minimum latency for us now */
428 
429 	/* Set Latency Timer 80h */
430 	/* FIXME: setting values > 32 breaks some SiS 559x stuff.
431 	   Need a PCI quirk.. */
432 
433 	pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);
434 #endif
435 
436 	if (pci_request_regions(pdev, DRV_NAME)) {
437 		pr_err("Failed to request PCI regions\n");
438 		err = -ENODEV;
439 		goto err_out_disable;
440 	}
441 
442 	/* Init system & device */
443 	db = netdev_priv(dev);
444 
445 	/* Allocate Tx/Rx descriptor memory */
446 	db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) *
447 			DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
448 	if (!db->desc_pool_ptr) {
449 		err = -ENOMEM;
450 		goto err_out_res;
451 	}
452 
453 	db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC *
454 			TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
455 	if (!db->buf_pool_ptr) {
456 		err = -ENOMEM;
457 		goto err_out_free_desc;
458 	}
459 
460 	db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
461 	db->first_tx_desc_dma = db->desc_pool_dma_ptr;
462 	db->buf_pool_start = db->buf_pool_ptr;
463 	db->buf_pool_dma_start = db->buf_pool_dma_ptr;
464 
465 	db->chip_id = ent->driver_data;
466 	/* IO type range. */
467 	db->ioaddr = pci_iomap(pdev, 0, 0);
468 	if (!db->ioaddr) {
469 		err = -ENOMEM;
470 		goto err_out_free_buf;
471 	}
472 
473 	db->chip_revision = pdev->revision;
474 	db->wol_mode = 0;
475 
476 	db->pdev = pdev;
477 
478 	pci_set_drvdata(pdev, dev);
479 	dev->netdev_ops = &netdev_ops;
480 	dev->ethtool_ops = &netdev_ethtool_ops;
481 	netif_carrier_off(dev);
482 	spin_lock_init(&db->lock);
483 
484 	pci_read_config_dword(pdev, 0x50, &pci_pmr);
485 	pci_pmr &= 0x70000;
486 	if ( (pci_pmr == 0x10000) && (db->chip_revision == 0x31) )
487 		db->chip_type = 1;	/* DM9102A E3 */
488 	else
489 		db->chip_type = 0;
490 
491 	/* read 64 word srom data */
492 	for (i = 0; i < 64; i++) {
493 		((__le16 *) db->srom)[i] =
494 			cpu_to_le16(read_srom_word(db->ioaddr, i));
495 	}
496 
497 	/* Set Node address */
498 	for (i = 0; i < 6; i++)
499 		dev->dev_addr[i] = db->srom[20 + i];
500 
501 	err = register_netdev (dev);
502 	if (err)
503 		goto err_out_unmap;
504 
505 	dev_info(&dev->dev, "Davicom DM%04lx at pci%s, %pM, irq %d\n",
506 		 ent->driver_data >> 16,
507 		 pci_name(pdev), dev->dev_addr, pdev->irq);
508 
509 	pci_set_master(pdev);
510 
511 	return 0;
512 
513 err_out_unmap:
514 	pci_iounmap(pdev, db->ioaddr);
515 err_out_free_buf:
516 	pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
517 			    db->buf_pool_ptr, db->buf_pool_dma_ptr);
518 err_out_free_desc:
519 	pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
520 			    db->desc_pool_ptr, db->desc_pool_dma_ptr);
521 err_out_res:
522 	pci_release_regions(pdev);
523 err_out_disable:
524 	pci_disable_device(pdev);
525 err_out_free:
526 	free_netdev(dev);
527 
528 	return err;
529 }
530 
531 
532 static void dmfe_remove_one(struct pci_dev *pdev)
533 {
534 	struct net_device *dev = pci_get_drvdata(pdev);
535 	struct dmfe_board_info *db = netdev_priv(dev);
536 
537 	DMFE_DBUG(0, "dmfe_remove_one()", 0);
538 
539  	if (dev) {
540 
541 		unregister_netdev(dev);
542 		pci_iounmap(db->pdev, db->ioaddr);
543 		pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
544 					DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
545  					db->desc_pool_dma_ptr);
546 		pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
547 					db->buf_pool_ptr, db->buf_pool_dma_ptr);
548 		pci_release_regions(pdev);
549 		free_netdev(dev);	/* free board information */
550 	}
551 
552 	DMFE_DBUG(0, "dmfe_remove_one() exit", 0);
553 }
554 
555 
556 /*
557  *	Open the interface.
558  *	The interface is opened whenever "ifconfig" actives it.
559  */
560 
561 static int dmfe_open(struct DEVICE *dev)
562 {
563 	struct dmfe_board_info *db = netdev_priv(dev);
564 	const int irq = db->pdev->irq;
565 	int ret;
566 
567 	DMFE_DBUG(0, "dmfe_open", 0);
568 
569 	ret = request_irq(irq, dmfe_interrupt, IRQF_SHARED, dev->name, dev);
570 	if (ret)
571 		return ret;
572 
573 	/* system variable init */
574 	db->cr6_data = CR6_DEFAULT | dmfe_cr6_user_set;
575 	db->tx_packet_cnt = 0;
576 	db->tx_queue_cnt = 0;
577 	db->rx_avail_cnt = 0;
578 	db->wait_reset = 0;
579 
580 	db->first_in_callback = 0;
581 	db->NIC_capability = 0xf;	/* All capability*/
582 	db->PHY_reg4 = 0x1e0;
583 
584 	/* CR6 operation mode decision */
585 	if ( !chkmode || (db->chip_id == PCI_DM9132_ID) ||
586 		(db->chip_revision >= 0x30) ) {
587     		db->cr6_data |= DMFE_TXTH_256;
588 		db->cr0_data = CR0_DEFAULT;
589 		db->dm910x_chk_mode=4;		/* Enter the normal mode */
590  	} else {
591 		db->cr6_data |= CR6_SFT;	/* Store & Forward mode */
592 		db->cr0_data = 0;
593 		db->dm910x_chk_mode = 1;	/* Enter the check mode */
594 	}
595 
596 	/* Initialize DM910X board */
597 	dmfe_init_dm910x(dev);
598 
599 	/* Active System Interface */
600 	netif_wake_queue(dev);
601 
602 	/* set and active a timer process */
603 	init_timer(&db->timer);
604 	db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
605 	db->timer.data = (unsigned long)dev;
606 	db->timer.function = dmfe_timer;
607 	add_timer(&db->timer);
608 
609 	return 0;
610 }
611 
612 
613 /*	Initialize DM910X board
614  *	Reset DM910X board
615  *	Initialize TX/Rx descriptor chain structure
616  *	Send the set-up frame
617  *	Enable Tx/Rx machine
618  */
619 
620 static void dmfe_init_dm910x(struct DEVICE *dev)
621 {
622 	struct dmfe_board_info *db = netdev_priv(dev);
623 	void __iomem *ioaddr = db->ioaddr;
624 
625 	DMFE_DBUG(0, "dmfe_init_dm910x()", 0);
626 
627 	/* Reset DM910x MAC controller */
628 	dw32(DCR0, DM910X_RESET);	/* RESET MAC */
629 	udelay(100);
630 	dw32(DCR0, db->cr0_data);
631 	udelay(5);
632 
633 	/* Phy addr : DM910(A)2/DM9132/9801, phy address = 1 */
634 	db->phy_addr = 1;
635 
636 	/* Parser SROM and media mode */
637 	dmfe_parse_srom(db);
638 	db->media_mode = dmfe_media_mode;
639 
640 	/* RESET Phyxcer Chip by GPR port bit 7 */
641 	dw32(DCR12, 0x180);		/* Let bit 7 output port */
642 	if (db->chip_id == PCI_DM9009_ID) {
643 		dw32(DCR12, 0x80);	/* Issue RESET signal */
644 		mdelay(300);			/* Delay 300 ms */
645 	}
646 	dw32(DCR12, 0x0);	/* Clear RESET signal */
647 
648 	/* Process Phyxcer Media Mode */
649 	if ( !(db->media_mode & 0x10) )	/* Force 1M mode */
650 		dmfe_set_phyxcer(db);
651 
652 	/* Media Mode Process */
653 	if ( !(db->media_mode & DMFE_AUTO) )
654 		db->op_mode = db->media_mode; 	/* Force Mode */
655 
656 	/* Initialize Transmit/Receive descriptor and CR3/4 */
657 	dmfe_descriptor_init(dev);
658 
659 	/* Init CR6 to program DM910x operation */
660 	update_cr6(db->cr6_data, ioaddr);
661 
662 	/* Send setup frame */
663 	if (db->chip_id == PCI_DM9132_ID)
664 		dm9132_id_table(dev);	/* DM9132 */
665 	else
666 		send_filter_frame(dev);	/* DM9102/DM9102A */
667 
668 	/* Init CR7, interrupt active bit */
669 	db->cr7_data = CR7_DEFAULT;
670 	dw32(DCR7, db->cr7_data);
671 
672 	/* Init CR15, Tx jabber and Rx watchdog timer */
673 	dw32(DCR15, db->cr15_data);
674 
675 	/* Enable DM910X Tx/Rx function */
676 	db->cr6_data |= CR6_RXSC | CR6_TXSC | 0x40000;
677 	update_cr6(db->cr6_data, ioaddr);
678 }
679 
680 
681 /*
682  *	Hardware start transmission.
683  *	Send a packet to media from the upper layer.
684  */
685 
686 static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
687 					 struct DEVICE *dev)
688 {
689 	struct dmfe_board_info *db = netdev_priv(dev);
690 	void __iomem *ioaddr = db->ioaddr;
691 	struct tx_desc *txptr;
692 	unsigned long flags;
693 
694 	DMFE_DBUG(0, "dmfe_start_xmit", 0);
695 
696 	/* Too large packet check */
697 	if (skb->len > MAX_PACKET_SIZE) {
698 		pr_err("big packet = %d\n", (u16)skb->len);
699 		dev_kfree_skb_any(skb);
700 		return NETDEV_TX_OK;
701 	}
702 
703 	/* Resource flag check */
704 	netif_stop_queue(dev);
705 
706 	spin_lock_irqsave(&db->lock, flags);
707 
708 	/* No Tx resource check, it never happen nromally */
709 	if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) {
710 		spin_unlock_irqrestore(&db->lock, flags);
711 		pr_err("No Tx resource %ld\n", db->tx_queue_cnt);
712 		return NETDEV_TX_BUSY;
713 	}
714 
715 	/* Disable NIC interrupt */
716 	dw32(DCR7, 0);
717 
718 	/* transmit this packet */
719 	txptr = db->tx_insert_ptr;
720 	skb_copy_from_linear_data(skb, txptr->tx_buf_ptr, skb->len);
721 	txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
722 
723 	/* Point to next transmit free descriptor */
724 	db->tx_insert_ptr = txptr->next_tx_desc;
725 
726 	/* Transmit Packet Process */
727 	if ( (!db->tx_queue_cnt) && (db->tx_packet_cnt < TX_MAX_SEND_CNT) ) {
728 		txptr->tdes0 = cpu_to_le32(0x80000000);	/* Set owner bit */
729 		db->tx_packet_cnt++;			/* Ready to send */
730 		dw32(DCR1, 0x1);			/* Issue Tx polling */
731 		dev->trans_start = jiffies;		/* saved time stamp */
732 	} else {
733 		db->tx_queue_cnt++;			/* queue TX packet */
734 		dw32(DCR1, 0x1);			/* Issue Tx polling */
735 	}
736 
737 	/* Tx resource check */
738 	if ( db->tx_queue_cnt < TX_FREE_DESC_CNT )
739 		netif_wake_queue(dev);
740 
741 	/* Restore CR7 to enable interrupt */
742 	spin_unlock_irqrestore(&db->lock, flags);
743 	dw32(DCR7, db->cr7_data);
744 
745 	/* free this SKB */
746 	dev_consume_skb_any(skb);
747 
748 	return NETDEV_TX_OK;
749 }
750 
751 
752 /*
753  *	Stop the interface.
754  *	The interface is stopped when it is brought.
755  */
756 
757 static int dmfe_stop(struct DEVICE *dev)
758 {
759 	struct dmfe_board_info *db = netdev_priv(dev);
760 	void __iomem *ioaddr = db->ioaddr;
761 
762 	DMFE_DBUG(0, "dmfe_stop", 0);
763 
764 	/* disable system */
765 	netif_stop_queue(dev);
766 
767 	/* deleted timer */
768 	del_timer_sync(&db->timer);
769 
770 	/* Reset & stop DM910X board */
771 	dw32(DCR0, DM910X_RESET);
772 	udelay(100);
773 	dmfe_phy_write(ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
774 
775 	/* free interrupt */
776 	free_irq(db->pdev->irq, dev);
777 
778 	/* free allocated rx buffer */
779 	dmfe_free_rxbuffer(db);
780 
781 #if 0
782 	/* show statistic counter */
783 	printk("FU:%lx EC:%lx LC:%lx NC:%lx LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
784 	       db->tx_fifo_underrun, db->tx_excessive_collision,
785 	       db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier,
786 	       db->tx_jabber_timeout, db->reset_count, db->reset_cr8,
787 	       db->reset_fatal, db->reset_TXtimeout);
788 #endif
789 
790 	return 0;
791 }
792 
793 
794 /*
795  *	DM9102 insterrupt handler
796  *	receive the packet to upper layer, free the transmitted packet
797  */
798 
799 static irqreturn_t dmfe_interrupt(int irq, void *dev_id)
800 {
801 	struct DEVICE *dev = dev_id;
802 	struct dmfe_board_info *db = netdev_priv(dev);
803 	void __iomem *ioaddr = db->ioaddr;
804 	unsigned long flags;
805 
806 	DMFE_DBUG(0, "dmfe_interrupt()", 0);
807 
808 	spin_lock_irqsave(&db->lock, flags);
809 
810 	/* Got DM910X status */
811 	db->cr5_data = dr32(DCR5);
812 	dw32(DCR5, db->cr5_data);
813 	if ( !(db->cr5_data & 0xc1) ) {
814 		spin_unlock_irqrestore(&db->lock, flags);
815 		return IRQ_HANDLED;
816 	}
817 
818 	/* Disable all interrupt in CR7 to solve the interrupt edge problem */
819 	dw32(DCR7, 0);
820 
821 	/* Check system status */
822 	if (db->cr5_data & 0x2000) {
823 		/* system bus error happen */
824 		DMFE_DBUG(1, "System bus error happen. CR5=", db->cr5_data);
825 		db->reset_fatal++;
826 		db->wait_reset = 1;	/* Need to RESET */
827 		spin_unlock_irqrestore(&db->lock, flags);
828 		return IRQ_HANDLED;
829 	}
830 
831 	 /* Received the coming packet */
832 	if ( (db->cr5_data & 0x40) && db->rx_avail_cnt )
833 		dmfe_rx_packet(dev, db);
834 
835 	/* reallocate rx descriptor buffer */
836 	if (db->rx_avail_cnt<RX_DESC_CNT)
837 		allocate_rx_buffer(dev);
838 
839 	/* Free the transmitted descriptor */
840 	if ( db->cr5_data & 0x01)
841 		dmfe_free_tx_pkt(dev, db);
842 
843 	/* Mode Check */
844 	if (db->dm910x_chk_mode & 0x2) {
845 		db->dm910x_chk_mode = 0x4;
846 		db->cr6_data |= 0x100;
847 		update_cr6(db->cr6_data, ioaddr);
848 	}
849 
850 	/* Restore CR7 to enable interrupt mask */
851 	dw32(DCR7, db->cr7_data);
852 
853 	spin_unlock_irqrestore(&db->lock, flags);
854 	return IRQ_HANDLED;
855 }
856 
857 
858 #ifdef CONFIG_NET_POLL_CONTROLLER
859 /*
860  * Polling 'interrupt' - used by things like netconsole to send skbs
861  * without having to re-enable interrupts. It's not called while
862  * the interrupt routine is executing.
863  */
864 
865 static void poll_dmfe (struct net_device *dev)
866 {
867 	struct dmfe_board_info *db = netdev_priv(dev);
868 	const int irq = db->pdev->irq;
869 
870 	/* disable_irq here is not very nice, but with the lockless
871 	   interrupt handler we have no other choice. */
872 	disable_irq(irq);
873 	dmfe_interrupt (irq, dev);
874 	enable_irq(irq);
875 }
876 #endif
877 
878 /*
879  *	Free TX resource after TX complete
880  */
881 
882 static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
883 {
884 	struct tx_desc *txptr;
885 	void __iomem *ioaddr = db->ioaddr;
886 	u32 tdes0;
887 
888 	txptr = db->tx_remove_ptr;
889 	while(db->tx_packet_cnt) {
890 		tdes0 = le32_to_cpu(txptr->tdes0);
891 		if (tdes0 & 0x80000000)
892 			break;
893 
894 		/* A packet sent completed */
895 		db->tx_packet_cnt--;
896 		dev->stats.tx_packets++;
897 
898 		/* Transmit statistic counter */
899 		if ( tdes0 != 0x7fffffff ) {
900 			dev->stats.collisions += (tdes0 >> 3) & 0xf;
901 			dev->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
902 			if (tdes0 & TDES0_ERR_MASK) {
903 				dev->stats.tx_errors++;
904 
905 				if (tdes0 & 0x0002) {	/* UnderRun */
906 					db->tx_fifo_underrun++;
907 					if ( !(db->cr6_data & CR6_SFT) ) {
908 						db->cr6_data = db->cr6_data | CR6_SFT;
909 						update_cr6(db->cr6_data, ioaddr);
910 					}
911 				}
912 				if (tdes0 & 0x0100)
913 					db->tx_excessive_collision++;
914 				if (tdes0 & 0x0200)
915 					db->tx_late_collision++;
916 				if (tdes0 & 0x0400)
917 					db->tx_no_carrier++;
918 				if (tdes0 & 0x0800)
919 					db->tx_loss_carrier++;
920 				if (tdes0 & 0x4000)
921 					db->tx_jabber_timeout++;
922 			}
923 		}
924 
925     		txptr = txptr->next_tx_desc;
926 	}/* End of while */
927 
928 	/* Update TX remove pointer to next */
929 	db->tx_remove_ptr = txptr;
930 
931 	/* Send the Tx packet in queue */
932 	if ( (db->tx_packet_cnt < TX_MAX_SEND_CNT) && db->tx_queue_cnt ) {
933 		txptr->tdes0 = cpu_to_le32(0x80000000);	/* Set owner bit */
934 		db->tx_packet_cnt++;			/* Ready to send */
935 		db->tx_queue_cnt--;
936 		dw32(DCR1, 0x1);			/* Issue Tx polling */
937 		dev->trans_start = jiffies;		/* saved time stamp */
938 	}
939 
940 	/* Resource available check */
941 	if ( db->tx_queue_cnt < TX_WAKE_DESC_CNT )
942 		netif_wake_queue(dev);	/* Active upper layer, send again */
943 }
944 
945 
946 /*
947  *	Calculate the CRC valude of the Rx packet
948  *	flag = 	1 : return the reverse CRC (for the received packet CRC)
949  *		0 : return the normal CRC (for Hash Table index)
950  */
951 
952 static inline u32 cal_CRC(unsigned char * Data, unsigned int Len, u8 flag)
953 {
954 	u32 crc = crc32(~0, Data, Len);
955 	if (flag) crc = ~crc;
956 	return crc;
957 }
958 
959 
960 /*
961  *	Receive the come packet and pass to upper layer
962  */
963 
964 static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
965 {
966 	struct rx_desc *rxptr;
967 	struct sk_buff *skb, *newskb;
968 	int rxlen;
969 	u32 rdes0;
970 
971 	rxptr = db->rx_ready_ptr;
972 
973 	while(db->rx_avail_cnt) {
974 		rdes0 = le32_to_cpu(rxptr->rdes0);
975 		if (rdes0 & 0x80000000)	/* packet owner check */
976 			break;
977 
978 		db->rx_avail_cnt--;
979 		db->interval_rx_cnt++;
980 
981 		pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2),
982 				 RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
983 
984 		if ( (rdes0 & 0x300) != 0x300) {
985 			/* A packet without First/Last flag */
986 			/* reuse this SKB */
987 			DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
988 			dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
989 		} else {
990 			/* A packet with First/Last flag */
991 			rxlen = ( (rdes0 >> 16) & 0x3fff) - 4;
992 
993 			/* error summary bit check */
994 			if (rdes0 & 0x8000) {
995 				/* This is a error packet */
996 				dev->stats.rx_errors++;
997 				if (rdes0 & 1)
998 					dev->stats.rx_fifo_errors++;
999 				if (rdes0 & 2)
1000 					dev->stats.rx_crc_errors++;
1001 				if (rdes0 & 0x80)
1002 					dev->stats.rx_length_errors++;
1003 			}
1004 
1005 			if ( !(rdes0 & 0x8000) ||
1006 				((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
1007 				skb = rxptr->rx_skb_ptr;
1008 
1009 				/* Received Packet CRC check need or not */
1010 				if ( (db->dm910x_chk_mode & 1) &&
1011 					(cal_CRC(skb->data, rxlen, 1) !=
1012 					(*(u32 *) (skb->data+rxlen) ))) { /* FIXME (?) */
1013 					/* Found a error received packet */
1014 					dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1015 					db->dm910x_chk_mode = 3;
1016 				} else {
1017 					/* Good packet, send to upper layer */
1018 					/* Shorst packet used new SKB */
1019 					if ((rxlen < RX_COPY_SIZE) &&
1020 						((newskb = netdev_alloc_skb(dev, rxlen + 2))
1021 						!= NULL)) {
1022 
1023 						skb = newskb;
1024 						/* size less than COPY_SIZE, allocate a rxlen SKB */
1025 						skb_reserve(skb, 2); /* 16byte align */
1026 						skb_copy_from_linear_data(rxptr->rx_skb_ptr,
1027 							  skb_put(skb, rxlen),
1028 									  rxlen);
1029 						dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1030 					} else
1031 						skb_put(skb, rxlen);
1032 
1033 					skb->protocol = eth_type_trans(skb, dev);
1034 					netif_rx(skb);
1035 					dev->stats.rx_packets++;
1036 					dev->stats.rx_bytes += rxlen;
1037 				}
1038 			} else {
1039 				/* Reuse SKB buffer when the packet is error */
1040 				DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
1041 				dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1042 			}
1043 		}
1044 
1045 		rxptr = rxptr->next_rx_desc;
1046 	}
1047 
1048 	db->rx_ready_ptr = rxptr;
1049 }
1050 
1051 /*
1052  * Set DM910X multicast address
1053  */
1054 
1055 static void dmfe_set_filter_mode(struct DEVICE * dev)
1056 {
1057 	struct dmfe_board_info *db = netdev_priv(dev);
1058 	unsigned long flags;
1059 	int mc_count = netdev_mc_count(dev);
1060 
1061 	DMFE_DBUG(0, "dmfe_set_filter_mode()", 0);
1062 	spin_lock_irqsave(&db->lock, flags);
1063 
1064 	if (dev->flags & IFF_PROMISC) {
1065 		DMFE_DBUG(0, "Enable PROM Mode", 0);
1066 		db->cr6_data |= CR6_PM | CR6_PBF;
1067 		update_cr6(db->cr6_data, db->ioaddr);
1068 		spin_unlock_irqrestore(&db->lock, flags);
1069 		return;
1070 	}
1071 
1072 	if (dev->flags & IFF_ALLMULTI || mc_count > DMFE_MAX_MULTICAST) {
1073 		DMFE_DBUG(0, "Pass all multicast address", mc_count);
1074 		db->cr6_data &= ~(CR6_PM | CR6_PBF);
1075 		db->cr6_data |= CR6_PAM;
1076 		spin_unlock_irqrestore(&db->lock, flags);
1077 		return;
1078 	}
1079 
1080 	DMFE_DBUG(0, "Set multicast address", mc_count);
1081 	if (db->chip_id == PCI_DM9132_ID)
1082 		dm9132_id_table(dev);	/* DM9132 */
1083 	else
1084 		send_filter_frame(dev);	/* DM9102/DM9102A */
1085 	spin_unlock_irqrestore(&db->lock, flags);
1086 }
1087 
1088 /*
1089  * 	Ethtool interace
1090  */
1091 
1092 static void dmfe_ethtool_get_drvinfo(struct net_device *dev,
1093 			       struct ethtool_drvinfo *info)
1094 {
1095 	struct dmfe_board_info *np = netdev_priv(dev);
1096 
1097 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1098 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1099 	strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
1100 }
1101 
1102 static int dmfe_ethtool_set_wol(struct net_device *dev,
1103 				struct ethtool_wolinfo *wolinfo)
1104 {
1105 	struct dmfe_board_info *db = netdev_priv(dev);
1106 
1107 	if (wolinfo->wolopts & (WAKE_UCAST | WAKE_MCAST | WAKE_BCAST |
1108 		   		WAKE_ARP | WAKE_MAGICSECURE))
1109 		   return -EOPNOTSUPP;
1110 
1111 	db->wol_mode = wolinfo->wolopts;
1112 	return 0;
1113 }
1114 
1115 static void dmfe_ethtool_get_wol(struct net_device *dev,
1116 				 struct ethtool_wolinfo *wolinfo)
1117 {
1118 	struct dmfe_board_info *db = netdev_priv(dev);
1119 
1120 	wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
1121 	wolinfo->wolopts = db->wol_mode;
1122 }
1123 
1124 
1125 static const struct ethtool_ops netdev_ethtool_ops = {
1126 	.get_drvinfo		= dmfe_ethtool_get_drvinfo,
1127 	.get_link               = ethtool_op_get_link,
1128 	.set_wol		= dmfe_ethtool_set_wol,
1129 	.get_wol		= dmfe_ethtool_get_wol,
1130 };
1131 
1132 /*
1133  *	A periodic timer routine
1134  *	Dynamic media sense, allocate Rx buffer...
1135  */
1136 
1137 static void dmfe_timer(unsigned long data)
1138 {
1139 	struct net_device *dev = (struct net_device *)data;
1140 	struct dmfe_board_info *db = netdev_priv(dev);
1141 	void __iomem *ioaddr = db->ioaddr;
1142 	u32 tmp_cr8;
1143 	unsigned char tmp_cr12;
1144  	unsigned long flags;
1145 
1146 	int link_ok, link_ok_phy;
1147 
1148 	DMFE_DBUG(0, "dmfe_timer()", 0);
1149 	spin_lock_irqsave(&db->lock, flags);
1150 
1151 	/* Media mode process when Link OK before enter this route */
1152 	if (db->first_in_callback == 0) {
1153 		db->first_in_callback = 1;
1154 		if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) {
1155 			db->cr6_data &= ~0x40000;
1156 			update_cr6(db->cr6_data, ioaddr);
1157 			dmfe_phy_write(ioaddr, db->phy_addr, 0, 0x1000, db->chip_id);
1158 			db->cr6_data |= 0x40000;
1159 			update_cr6(db->cr6_data, ioaddr);
1160 			db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
1161 			add_timer(&db->timer);
1162 			spin_unlock_irqrestore(&db->lock, flags);
1163 			return;
1164 		}
1165 	}
1166 
1167 
1168 	/* Operating Mode Check */
1169 	if ( (db->dm910x_chk_mode & 0x1) &&
1170 		(dev->stats.rx_packets > MAX_CHECK_PACKET) )
1171 		db->dm910x_chk_mode = 0x4;
1172 
1173 	/* Dynamic reset DM910X : system error or transmit time-out */
1174 	tmp_cr8 = dr32(DCR8);
1175 	if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
1176 		db->reset_cr8++;
1177 		db->wait_reset = 1;
1178 	}
1179 	db->interval_rx_cnt = 0;
1180 
1181 	/* TX polling kick monitor */
1182 	if ( db->tx_packet_cnt &&
1183 	     time_after(jiffies, dev_trans_start(dev) + DMFE_TX_KICK) ) {
1184 		dw32(DCR1, 0x1);   /* Tx polling again */
1185 
1186 		/* TX Timeout */
1187 		if (time_after(jiffies, dev_trans_start(dev) + DMFE_TX_TIMEOUT) ) {
1188 			db->reset_TXtimeout++;
1189 			db->wait_reset = 1;
1190 			dev_warn(&dev->dev, "Tx timeout - resetting\n");
1191 		}
1192 	}
1193 
1194 	if (db->wait_reset) {
1195 		DMFE_DBUG(0, "Dynamic Reset device", db->tx_packet_cnt);
1196 		db->reset_count++;
1197 		dmfe_dynamic_reset(dev);
1198 		db->first_in_callback = 0;
1199 		db->timer.expires = DMFE_TIMER_WUT;
1200 		add_timer(&db->timer);
1201 		spin_unlock_irqrestore(&db->lock, flags);
1202 		return;
1203 	}
1204 
1205 	/* Link status check, Dynamic media type change */
1206 	if (db->chip_id == PCI_DM9132_ID)
1207 		tmp_cr12 = dr8(DCR9 + 3);	/* DM9132 */
1208 	else
1209 		tmp_cr12 = dr8(DCR12);		/* DM9102/DM9102A */
1210 
1211 	if ( ((db->chip_id == PCI_DM9102_ID) &&
1212 		(db->chip_revision == 0x30)) ||
1213 		((db->chip_id == PCI_DM9132_ID) &&
1214 		(db->chip_revision == 0x10)) ) {
1215 		/* DM9102A Chip */
1216 		if (tmp_cr12 & 2)
1217 			link_ok = 0;
1218 		else
1219 			link_ok = 1;
1220 	}
1221 	else
1222 		/*0x43 is used instead of 0x3 because bit 6 should represent
1223 			link status of external PHY */
1224 		link_ok = (tmp_cr12 & 0x43) ? 1 : 0;
1225 
1226 
1227 	/* If chip reports that link is failed it could be because external
1228 		PHY link status pin is not connected correctly to chip
1229 		To be sure ask PHY too.
1230 	*/
1231 
1232 	/* need a dummy read because of PHY's register latch*/
1233 	dmfe_phy_read (db->ioaddr, db->phy_addr, 1, db->chip_id);
1234 	link_ok_phy = (dmfe_phy_read (db->ioaddr,
1235 				      db->phy_addr, 1, db->chip_id) & 0x4) ? 1 : 0;
1236 
1237 	if (link_ok_phy != link_ok) {
1238 		DMFE_DBUG (0, "PHY and chip report different link status", 0);
1239 		link_ok = link_ok | link_ok_phy;
1240  	}
1241 
1242 	if ( !link_ok && netif_carrier_ok(dev)) {
1243 		/* Link Failed */
1244 		DMFE_DBUG(0, "Link Failed", tmp_cr12);
1245 		netif_carrier_off(dev);
1246 
1247 		/* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
1248 		/* AUTO or force 1M Homerun/Longrun don't need */
1249 		if ( !(db->media_mode & 0x38) )
1250 			dmfe_phy_write(db->ioaddr, db->phy_addr,
1251 				       0, 0x1000, db->chip_id);
1252 
1253 		/* AUTO mode, if INT phyxcer link failed, select EXT device */
1254 		if (db->media_mode & DMFE_AUTO) {
1255 			/* 10/100M link failed, used 1M Home-Net */
1256 			db->cr6_data|=0x00040000;	/* bit18=1, MII */
1257 			db->cr6_data&=~0x00000200;	/* bit9=0, HD mode */
1258 			update_cr6(db->cr6_data, ioaddr);
1259 		}
1260 	} else if (!netif_carrier_ok(dev)) {
1261 
1262 		DMFE_DBUG(0, "Link link OK", tmp_cr12);
1263 
1264 		/* Auto Sense Speed */
1265 		if ( !(db->media_mode & DMFE_AUTO) || !dmfe_sense_speed(db)) {
1266 			netif_carrier_on(dev);
1267 			SHOW_MEDIA_TYPE(db->op_mode);
1268 		}
1269 
1270 		dmfe_process_mode(db);
1271 	}
1272 
1273 	/* HPNA remote command check */
1274 	if (db->HPNA_command & 0xf00) {
1275 		db->HPNA_timer--;
1276 		if (!db->HPNA_timer)
1277 			dmfe_HPNA_remote_cmd_chk(db);
1278 	}
1279 
1280 	/* Timer active again */
1281 	db->timer.expires = DMFE_TIMER_WUT;
1282 	add_timer(&db->timer);
1283 	spin_unlock_irqrestore(&db->lock, flags);
1284 }
1285 
1286 
1287 /*
1288  *	Dynamic reset the DM910X board
1289  *	Stop DM910X board
1290  *	Free Tx/Rx allocated memory
1291  *	Reset DM910X board
1292  *	Re-initialize DM910X board
1293  */
1294 
1295 static void dmfe_dynamic_reset(struct net_device *dev)
1296 {
1297 	struct dmfe_board_info *db = netdev_priv(dev);
1298 	void __iomem *ioaddr = db->ioaddr;
1299 
1300 	DMFE_DBUG(0, "dmfe_dynamic_reset()", 0);
1301 
1302 	/* Sopt MAC controller */
1303 	db->cr6_data &= ~(CR6_RXSC | CR6_TXSC);	/* Disable Tx/Rx */
1304 	update_cr6(db->cr6_data, ioaddr);
1305 	dw32(DCR7, 0);				/* Disable Interrupt */
1306 	dw32(DCR5, dr32(DCR5));
1307 
1308 	/* Disable upper layer interface */
1309 	netif_stop_queue(dev);
1310 
1311 	/* Free Rx Allocate buffer */
1312 	dmfe_free_rxbuffer(db);
1313 
1314 	/* system variable init */
1315 	db->tx_packet_cnt = 0;
1316 	db->tx_queue_cnt = 0;
1317 	db->rx_avail_cnt = 0;
1318 	netif_carrier_off(dev);
1319 	db->wait_reset = 0;
1320 
1321 	/* Re-initialize DM910X board */
1322 	dmfe_init_dm910x(dev);
1323 
1324 	/* Restart upper layer interface */
1325 	netif_wake_queue(dev);
1326 }
1327 
1328 
1329 /*
1330  *	free all allocated rx buffer
1331  */
1332 
1333 static void dmfe_free_rxbuffer(struct dmfe_board_info * db)
1334 {
1335 	DMFE_DBUG(0, "dmfe_free_rxbuffer()", 0);
1336 
1337 	/* free allocated rx buffer */
1338 	while (db->rx_avail_cnt) {
1339 		dev_kfree_skb(db->rx_ready_ptr->rx_skb_ptr);
1340 		db->rx_ready_ptr = db->rx_ready_ptr->next_rx_desc;
1341 		db->rx_avail_cnt--;
1342 	}
1343 }
1344 
1345 
1346 /*
1347  *	Reuse the SK buffer
1348  */
1349 
1350 static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb)
1351 {
1352 	struct rx_desc *rxptr = db->rx_insert_ptr;
1353 
1354 	if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
1355 		rxptr->rx_skb_ptr = skb;
1356 		rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev,
1357 			    skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1358 		wmb();
1359 		rxptr->rdes0 = cpu_to_le32(0x80000000);
1360 		db->rx_avail_cnt++;
1361 		db->rx_insert_ptr = rxptr->next_rx_desc;
1362 	} else
1363 		DMFE_DBUG(0, "SK Buffer reuse method error", db->rx_avail_cnt);
1364 }
1365 
1366 
1367 /*
1368  *	Initialize transmit/Receive descriptor
1369  *	Using Chain structure, and allocate Tx/Rx buffer
1370  */
1371 
1372 static void dmfe_descriptor_init(struct net_device *dev)
1373 {
1374 	struct dmfe_board_info *db = netdev_priv(dev);
1375 	void __iomem *ioaddr = db->ioaddr;
1376 	struct tx_desc *tmp_tx;
1377 	struct rx_desc *tmp_rx;
1378 	unsigned char *tmp_buf;
1379 	dma_addr_t tmp_tx_dma, tmp_rx_dma;
1380 	dma_addr_t tmp_buf_dma;
1381 	int i;
1382 
1383 	DMFE_DBUG(0, "dmfe_descriptor_init()", 0);
1384 
1385 	/* tx descriptor start pointer */
1386 	db->tx_insert_ptr = db->first_tx_desc;
1387 	db->tx_remove_ptr = db->first_tx_desc;
1388 	dw32(DCR4, db->first_tx_desc_dma);     /* TX DESC address */
1389 
1390 	/* rx descriptor start pointer */
1391 	db->first_rx_desc = (void *)db->first_tx_desc +
1392 			sizeof(struct tx_desc) * TX_DESC_CNT;
1393 
1394 	db->first_rx_desc_dma =  db->first_tx_desc_dma +
1395 			sizeof(struct tx_desc) * TX_DESC_CNT;
1396 	db->rx_insert_ptr = db->first_rx_desc;
1397 	db->rx_ready_ptr = db->first_rx_desc;
1398 	dw32(DCR3, db->first_rx_desc_dma);		/* RX DESC address */
1399 
1400 	/* Init Transmit chain */
1401 	tmp_buf = db->buf_pool_start;
1402 	tmp_buf_dma = db->buf_pool_dma_start;
1403 	tmp_tx_dma = db->first_tx_desc_dma;
1404 	for (tmp_tx = db->first_tx_desc, i = 0; i < TX_DESC_CNT; i++, tmp_tx++) {
1405 		tmp_tx->tx_buf_ptr = tmp_buf;
1406 		tmp_tx->tdes0 = cpu_to_le32(0);
1407 		tmp_tx->tdes1 = cpu_to_le32(0x81000000);	/* IC, chain */
1408 		tmp_tx->tdes2 = cpu_to_le32(tmp_buf_dma);
1409 		tmp_tx_dma += sizeof(struct tx_desc);
1410 		tmp_tx->tdes3 = cpu_to_le32(tmp_tx_dma);
1411 		tmp_tx->next_tx_desc = tmp_tx + 1;
1412 		tmp_buf = tmp_buf + TX_BUF_ALLOC;
1413 		tmp_buf_dma = tmp_buf_dma + TX_BUF_ALLOC;
1414 	}
1415 	(--tmp_tx)->tdes3 = cpu_to_le32(db->first_tx_desc_dma);
1416 	tmp_tx->next_tx_desc = db->first_tx_desc;
1417 
1418 	 /* Init Receive descriptor chain */
1419 	tmp_rx_dma=db->first_rx_desc_dma;
1420 	for (tmp_rx = db->first_rx_desc, i = 0; i < RX_DESC_CNT; i++, tmp_rx++) {
1421 		tmp_rx->rdes0 = cpu_to_le32(0);
1422 		tmp_rx->rdes1 = cpu_to_le32(0x01000600);
1423 		tmp_rx_dma += sizeof(struct rx_desc);
1424 		tmp_rx->rdes3 = cpu_to_le32(tmp_rx_dma);
1425 		tmp_rx->next_rx_desc = tmp_rx + 1;
1426 	}
1427 	(--tmp_rx)->rdes3 = cpu_to_le32(db->first_rx_desc_dma);
1428 	tmp_rx->next_rx_desc = db->first_rx_desc;
1429 
1430 	/* pre-allocate Rx buffer */
1431 	allocate_rx_buffer(dev);
1432 }
1433 
1434 
1435 /*
1436  *	Update CR6 value
1437  *	Firstly stop DM910X , then written value and start
1438  */
1439 
1440 static void update_cr6(u32 cr6_data, void __iomem *ioaddr)
1441 {
1442 	u32 cr6_tmp;
1443 
1444 	cr6_tmp = cr6_data & ~0x2002;           /* stop Tx/Rx */
1445 	dw32(DCR6, cr6_tmp);
1446 	udelay(5);
1447 	dw32(DCR6, cr6_data);
1448 	udelay(5);
1449 }
1450 
1451 
1452 /*
1453  *	Send a setup frame for DM9132
1454  *	This setup frame initialize DM910X address filter mode
1455 */
1456 
1457 static void dm9132_id_table(struct net_device *dev)
1458 {
1459 	struct dmfe_board_info *db = netdev_priv(dev);
1460 	void __iomem *ioaddr = db->ioaddr + 0xc0;
1461 	u16 *addrptr = (u16 *)dev->dev_addr;
1462 	struct netdev_hw_addr *ha;
1463 	u16 i, hash_table[4];
1464 
1465 	/* Node address */
1466 	for (i = 0; i < 3; i++) {
1467 		dw16(0, addrptr[i]);
1468 		ioaddr += 4;
1469 	}
1470 
1471 	/* Clear Hash Table */
1472 	memset(hash_table, 0, sizeof(hash_table));
1473 
1474 	/* broadcast address */
1475 	hash_table[3] = 0x8000;
1476 
1477 	/* the multicast address in Hash Table : 64 bits */
1478 	netdev_for_each_mc_addr(ha, dev) {
1479 		u32 hash_val = cal_CRC((char *)ha->addr, 6, 0) & 0x3f;
1480 
1481 		hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
1482 	}
1483 
1484 	/* Write the hash table to MAC MD table */
1485 	for (i = 0; i < 4; i++, ioaddr += 4)
1486 		dw16(0, hash_table[i]);
1487 }
1488 
1489 
1490 /*
1491  *	Send a setup frame for DM9102/DM9102A
1492  *	This setup frame initialize DM910X address filter mode
1493  */
1494 
1495 static void send_filter_frame(struct net_device *dev)
1496 {
1497 	struct dmfe_board_info *db = netdev_priv(dev);
1498 	struct netdev_hw_addr *ha;
1499 	struct tx_desc *txptr;
1500 	u16 * addrptr;
1501 	u32 * suptr;
1502 	int i;
1503 
1504 	DMFE_DBUG(0, "send_filter_frame()", 0);
1505 
1506 	txptr = db->tx_insert_ptr;
1507 	suptr = (u32 *) txptr->tx_buf_ptr;
1508 
1509 	/* Node address */
1510 	addrptr = (u16 *) dev->dev_addr;
1511 	*suptr++ = addrptr[0];
1512 	*suptr++ = addrptr[1];
1513 	*suptr++ = addrptr[2];
1514 
1515 	/* broadcast address */
1516 	*suptr++ = 0xffff;
1517 	*suptr++ = 0xffff;
1518 	*suptr++ = 0xffff;
1519 
1520 	/* fit the multicast address */
1521 	netdev_for_each_mc_addr(ha, dev) {
1522 		addrptr = (u16 *) ha->addr;
1523 		*suptr++ = addrptr[0];
1524 		*suptr++ = addrptr[1];
1525 		*suptr++ = addrptr[2];
1526 	}
1527 
1528 	for (i = netdev_mc_count(dev); i < 14; i++) {
1529 		*suptr++ = 0xffff;
1530 		*suptr++ = 0xffff;
1531 		*suptr++ = 0xffff;
1532 	}
1533 
1534 	/* prepare the setup frame */
1535 	db->tx_insert_ptr = txptr->next_tx_desc;
1536 	txptr->tdes1 = cpu_to_le32(0x890000c0);
1537 
1538 	/* Resource Check and Send the setup packet */
1539 	if (!db->tx_packet_cnt) {
1540 		void __iomem *ioaddr = db->ioaddr;
1541 
1542 		/* Resource Empty */
1543 		db->tx_packet_cnt++;
1544 		txptr->tdes0 = cpu_to_le32(0x80000000);
1545 		update_cr6(db->cr6_data | 0x2000, ioaddr);
1546 		dw32(DCR1, 0x1);	/* Issue Tx polling */
1547 		update_cr6(db->cr6_data, ioaddr);
1548 		dev->trans_start = jiffies;
1549 	} else
1550 		db->tx_queue_cnt++;	/* Put in TX queue */
1551 }
1552 
1553 
1554 /*
1555  *	Allocate rx buffer,
1556  *	As possible as allocate maxiumn Rx buffer
1557  */
1558 
1559 static void allocate_rx_buffer(struct net_device *dev)
1560 {
1561 	struct dmfe_board_info *db = netdev_priv(dev);
1562 	struct rx_desc *rxptr;
1563 	struct sk_buff *skb;
1564 
1565 	rxptr = db->rx_insert_ptr;
1566 
1567 	while(db->rx_avail_cnt < RX_DESC_CNT) {
1568 		if ( ( skb = netdev_alloc_skb(dev, RX_ALLOC_SIZE) ) == NULL )
1569 			break;
1570 		rxptr->rx_skb_ptr = skb; /* FIXME (?) */
1571 		rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data,
1572 				    RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1573 		wmb();
1574 		rxptr->rdes0 = cpu_to_le32(0x80000000);
1575 		rxptr = rxptr->next_rx_desc;
1576 		db->rx_avail_cnt++;
1577 	}
1578 
1579 	db->rx_insert_ptr = rxptr;
1580 }
1581 
1582 static void srom_clk_write(void __iomem *ioaddr, u32 data)
1583 {
1584 	static const u32 cmd[] = {
1585 		CR9_SROM_READ | CR9_SRCS,
1586 		CR9_SROM_READ | CR9_SRCS | CR9_SRCLK,
1587 		CR9_SROM_READ | CR9_SRCS
1588 	};
1589 	int i;
1590 
1591 	for (i = 0; i < ARRAY_SIZE(cmd); i++) {
1592 		dw32(DCR9, data | cmd[i]);
1593 		udelay(5);
1594 	}
1595 }
1596 
1597 /*
1598  *	Read one word data from the serial ROM
1599  */
1600 static u16 read_srom_word(void __iomem *ioaddr, int offset)
1601 {
1602 	u16 srom_data;
1603 	int i;
1604 
1605 	dw32(DCR9, CR9_SROM_READ);
1606 	udelay(5);
1607 	dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1608 	udelay(5);
1609 
1610 	/* Send the Read Command 110b */
1611 	srom_clk_write(ioaddr, SROM_DATA_1);
1612 	srom_clk_write(ioaddr, SROM_DATA_1);
1613 	srom_clk_write(ioaddr, SROM_DATA_0);
1614 
1615 	/* Send the offset */
1616 	for (i = 5; i >= 0; i--) {
1617 		srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
1618 		srom_clk_write(ioaddr, srom_data);
1619 	}
1620 
1621 	dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1622 	udelay(5);
1623 
1624 	for (i = 16; i > 0; i--) {
1625 		dw32(DCR9, CR9_SROM_READ | CR9_SRCS | CR9_SRCLK);
1626 		udelay(5);
1627 		srom_data = (srom_data << 1) |
1628 				((dr32(DCR9) & CR9_CRDOUT) ? 1 : 0);
1629 		dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1630 		udelay(5);
1631 	}
1632 
1633 	dw32(DCR9, CR9_SROM_READ);
1634 	udelay(5);
1635 	return srom_data;
1636 }
1637 
1638 
1639 /*
1640  *	Auto sense the media mode
1641  */
1642 
1643 static u8 dmfe_sense_speed(struct dmfe_board_info *db)
1644 {
1645 	void __iomem *ioaddr = db->ioaddr;
1646 	u8 ErrFlag = 0;
1647 	u16 phy_mode;
1648 
1649 	/* CR6 bit18=0, select 10/100M */
1650 	update_cr6(db->cr6_data & ~0x40000, ioaddr);
1651 
1652 	phy_mode = dmfe_phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1653 	phy_mode = dmfe_phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1654 
1655 	if ( (phy_mode & 0x24) == 0x24 ) {
1656 		if (db->chip_id == PCI_DM9132_ID)	/* DM9132 */
1657 			phy_mode = dmfe_phy_read(db->ioaddr,
1658 						 db->phy_addr, 7, db->chip_id) & 0xf000;
1659 		else 				/* DM9102/DM9102A */
1660 			phy_mode = dmfe_phy_read(db->ioaddr,
1661 						 db->phy_addr, 17, db->chip_id) & 0xf000;
1662 		switch (phy_mode) {
1663 		case 0x1000: db->op_mode = DMFE_10MHF; break;
1664 		case 0x2000: db->op_mode = DMFE_10MFD; break;
1665 		case 0x4000: db->op_mode = DMFE_100MHF; break;
1666 		case 0x8000: db->op_mode = DMFE_100MFD; break;
1667 		default: db->op_mode = DMFE_10MHF;
1668 			ErrFlag = 1;
1669 			break;
1670 		}
1671 	} else {
1672 		db->op_mode = DMFE_10MHF;
1673 		DMFE_DBUG(0, "Link Failed :", phy_mode);
1674 		ErrFlag = 1;
1675 	}
1676 
1677 	return ErrFlag;
1678 }
1679 
1680 
1681 /*
1682  *	Set 10/100 phyxcer capability
1683  *	AUTO mode : phyxcer register4 is NIC capability
1684  *	Force mode: phyxcer register4 is the force media
1685  */
1686 
1687 static void dmfe_set_phyxcer(struct dmfe_board_info *db)
1688 {
1689 	void __iomem *ioaddr = db->ioaddr;
1690 	u16 phy_reg;
1691 
1692 	/* Select 10/100M phyxcer */
1693 	db->cr6_data &= ~0x40000;
1694 	update_cr6(db->cr6_data, ioaddr);
1695 
1696 	/* DM9009 Chip: Phyxcer reg18 bit12=0 */
1697 	if (db->chip_id == PCI_DM9009_ID) {
1698 		phy_reg = dmfe_phy_read(db->ioaddr,
1699 					db->phy_addr, 18, db->chip_id) & ~0x1000;
1700 
1701 		dmfe_phy_write(db->ioaddr,
1702 			       db->phy_addr, 18, phy_reg, db->chip_id);
1703 	}
1704 
1705 	/* Phyxcer capability setting */
1706 	phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0;
1707 
1708 	if (db->media_mode & DMFE_AUTO) {
1709 		/* AUTO Mode */
1710 		phy_reg |= db->PHY_reg4;
1711 	} else {
1712 		/* Force Mode */
1713 		switch(db->media_mode) {
1714 		case DMFE_10MHF: phy_reg |= 0x20; break;
1715 		case DMFE_10MFD: phy_reg |= 0x40; break;
1716 		case DMFE_100MHF: phy_reg |= 0x80; break;
1717 		case DMFE_100MFD: phy_reg |= 0x100; break;
1718 		}
1719 		if (db->chip_id == PCI_DM9009_ID) phy_reg &= 0x61;
1720 	}
1721 
1722   	/* Write new capability to Phyxcer Reg4 */
1723 	if ( !(phy_reg & 0x01e0)) {
1724 		phy_reg|=db->PHY_reg4;
1725 		db->media_mode|=DMFE_AUTO;
1726 	}
1727 	dmfe_phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id);
1728 
1729  	/* Restart Auto-Negotiation */
1730 	if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1731 		dmfe_phy_write(db->ioaddr, db->phy_addr, 0, 0x1800, db->chip_id);
1732 	if ( !db->chip_type )
1733 		dmfe_phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id);
1734 }
1735 
1736 
1737 /*
1738  *	Process op-mode
1739  *	AUTO mode : PHY controller in Auto-negotiation Mode
1740  *	Force mode: PHY controller in force mode with HUB
1741  *			N-way force capability with SWITCH
1742  */
1743 
1744 static void dmfe_process_mode(struct dmfe_board_info *db)
1745 {
1746 	u16 phy_reg;
1747 
1748 	/* Full Duplex Mode Check */
1749 	if (db->op_mode & 0x4)
1750 		db->cr6_data |= CR6_FDM;	/* Set Full Duplex Bit */
1751 	else
1752 		db->cr6_data &= ~CR6_FDM;	/* Clear Full Duplex Bit */
1753 
1754 	/* Transciver Selection */
1755 	if (db->op_mode & 0x10)		/* 1M HomePNA */
1756 		db->cr6_data |= 0x40000;/* External MII select */
1757 	else
1758 		db->cr6_data &= ~0x40000;/* Internal 10/100 transciver */
1759 
1760 	update_cr6(db->cr6_data, db->ioaddr);
1761 
1762 	/* 10/100M phyxcer force mode need */
1763 	if ( !(db->media_mode & 0x18)) {
1764 		/* Forece Mode */
1765 		phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id);
1766 		if ( !(phy_reg & 0x1) ) {
1767 			/* parter without N-Way capability */
1768 			phy_reg = 0x0;
1769 			switch(db->op_mode) {
1770 			case DMFE_10MHF: phy_reg = 0x0; break;
1771 			case DMFE_10MFD: phy_reg = 0x100; break;
1772 			case DMFE_100MHF: phy_reg = 0x2000; break;
1773 			case DMFE_100MFD: phy_reg = 0x2100; break;
1774 			}
1775 			dmfe_phy_write(db->ioaddr,
1776 				       db->phy_addr, 0, phy_reg, db->chip_id);
1777        			if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1778 				mdelay(20);
1779 			dmfe_phy_write(db->ioaddr,
1780 				       db->phy_addr, 0, phy_reg, db->chip_id);
1781 		}
1782 	}
1783 }
1784 
1785 
1786 /*
1787  *	Write a word to Phy register
1788  */
1789 
1790 static void dmfe_phy_write(void __iomem *ioaddr, u8 phy_addr, u8 offset,
1791 			   u16 phy_data, u32 chip_id)
1792 {
1793 	u16 i;
1794 
1795 	if (chip_id == PCI_DM9132_ID) {
1796 		dw16(0x80 + offset * 4, phy_data);
1797 	} else {
1798 		/* DM9102/DM9102A Chip */
1799 
1800 		/* Send 33 synchronization clock to Phy controller */
1801 		for (i = 0; i < 35; i++)
1802 			dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1803 
1804 		/* Send start command(01) to Phy */
1805 		dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1806 		dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1807 
1808 		/* Send write command(01) to Phy */
1809 		dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1810 		dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1811 
1812 		/* Send Phy address */
1813 		for (i = 0x10; i > 0; i = i >> 1)
1814 			dmfe_phy_write_1bit(ioaddr,
1815 					    phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1816 
1817 		/* Send register address */
1818 		for (i = 0x10; i > 0; i = i >> 1)
1819 			dmfe_phy_write_1bit(ioaddr,
1820 					    offset & i ? PHY_DATA_1 : PHY_DATA_0);
1821 
1822 		/* written trasnition */
1823 		dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1824 		dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1825 
1826 		/* Write a word data to PHY controller */
1827 		for ( i = 0x8000; i > 0; i >>= 1)
1828 			dmfe_phy_write_1bit(ioaddr,
1829 					    phy_data & i ? PHY_DATA_1 : PHY_DATA_0);
1830 	}
1831 }
1832 
1833 
1834 /*
1835  *	Read a word data from phy register
1836  */
1837 
1838 static u16 dmfe_phy_read(void __iomem *ioaddr, u8 phy_addr, u8 offset, u32 chip_id)
1839 {
1840 	int i;
1841 	u16 phy_data;
1842 
1843 	if (chip_id == PCI_DM9132_ID) {
1844 		/* DM9132 Chip */
1845 		phy_data = dr16(0x80 + offset * 4);
1846 	} else {
1847 		/* DM9102/DM9102A Chip */
1848 
1849 		/* Send 33 synchronization clock to Phy controller */
1850 		for (i = 0; i < 35; i++)
1851 			dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1852 
1853 		/* Send start command(01) to Phy */
1854 		dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1855 		dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1856 
1857 		/* Send read command(10) to Phy */
1858 		dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1859 		dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1860 
1861 		/* Send Phy address */
1862 		for (i = 0x10; i > 0; i = i >> 1)
1863 			dmfe_phy_write_1bit(ioaddr,
1864 					    phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1865 
1866 		/* Send register address */
1867 		for (i = 0x10; i > 0; i = i >> 1)
1868 			dmfe_phy_write_1bit(ioaddr,
1869 					    offset & i ? PHY_DATA_1 : PHY_DATA_0);
1870 
1871 		/* Skip transition state */
1872 		dmfe_phy_read_1bit(ioaddr);
1873 
1874 		/* read 16bit data */
1875 		for (phy_data = 0, i = 0; i < 16; i++) {
1876 			phy_data <<= 1;
1877 			phy_data |= dmfe_phy_read_1bit(ioaddr);
1878 		}
1879 	}
1880 
1881 	return phy_data;
1882 }
1883 
1884 
1885 /*
1886  *	Write one bit data to Phy Controller
1887  */
1888 
1889 static void dmfe_phy_write_1bit(void __iomem *ioaddr, u32 phy_data)
1890 {
1891 	dw32(DCR9, phy_data);		/* MII Clock Low */
1892 	udelay(1);
1893 	dw32(DCR9, phy_data | MDCLKH);	/* MII Clock High */
1894 	udelay(1);
1895 	dw32(DCR9, phy_data);		/* MII Clock Low */
1896 	udelay(1);
1897 }
1898 
1899 
1900 /*
1901  *	Read one bit phy data from PHY controller
1902  */
1903 
1904 static u16 dmfe_phy_read_1bit(void __iomem *ioaddr)
1905 {
1906 	u16 phy_data;
1907 
1908 	dw32(DCR9, 0x50000);
1909 	udelay(1);
1910 	phy_data = (dr32(DCR9) >> 19) & 0x1;
1911 	dw32(DCR9, 0x40000);
1912 	udelay(1);
1913 
1914 	return phy_data;
1915 }
1916 
1917 
1918 /*
1919  *	Parser SROM and media mode
1920  */
1921 
1922 static void dmfe_parse_srom(struct dmfe_board_info * db)
1923 {
1924 	char * srom = db->srom;
1925 	int dmfe_mode, tmp_reg;
1926 
1927 	DMFE_DBUG(0, "dmfe_parse_srom() ", 0);
1928 
1929 	/* Init CR15 */
1930 	db->cr15_data = CR15_DEFAULT;
1931 
1932 	/* Check SROM Version */
1933 	if ( ( (int) srom[18] & 0xff) == SROM_V41_CODE) {
1934 		/* SROM V4.01 */
1935 		/* Get NIC support media mode */
1936 		db->NIC_capability = le16_to_cpup((__le16 *) (srom + 34));
1937 		db->PHY_reg4 = 0;
1938 		for (tmp_reg = 1; tmp_reg < 0x10; tmp_reg <<= 1) {
1939 			switch( db->NIC_capability & tmp_reg ) {
1940 			case 0x1: db->PHY_reg4 |= 0x0020; break;
1941 			case 0x2: db->PHY_reg4 |= 0x0040; break;
1942 			case 0x4: db->PHY_reg4 |= 0x0080; break;
1943 			case 0x8: db->PHY_reg4 |= 0x0100; break;
1944 			}
1945 		}
1946 
1947 		/* Media Mode Force or not check */
1948 		dmfe_mode = (le32_to_cpup((__le32 *) (srom + 34)) &
1949 			     le32_to_cpup((__le32 *) (srom + 36)));
1950 		switch(dmfe_mode) {
1951 		case 0x4: dmfe_media_mode = DMFE_100MHF; break;	/* 100MHF */
1952 		case 0x2: dmfe_media_mode = DMFE_10MFD; break;	/* 10MFD */
1953 		case 0x8: dmfe_media_mode = DMFE_100MFD; break;	/* 100MFD */
1954 		case 0x100:
1955 		case 0x200: dmfe_media_mode = DMFE_1M_HPNA; break;/* HomePNA */
1956 		}
1957 
1958 		/* Special Function setting */
1959 		/* VLAN function */
1960 		if ( (SF_mode & 0x1) || (srom[43] & 0x80) )
1961 			db->cr15_data |= 0x40;
1962 
1963 		/* Flow Control */
1964 		if ( (SF_mode & 0x2) || (srom[40] & 0x1) )
1965 			db->cr15_data |= 0x400;
1966 
1967 		/* TX pause packet */
1968 		if ( (SF_mode & 0x4) || (srom[40] & 0xe) )
1969 			db->cr15_data |= 0x9800;
1970 	}
1971 
1972 	/* Parse HPNA parameter */
1973 	db->HPNA_command = 1;
1974 
1975 	/* Accept remote command or not */
1976 	if (HPNA_rx_cmd == 0)
1977 		db->HPNA_command |= 0x8000;
1978 
1979 	 /* Issue remote command & operation mode */
1980 	if (HPNA_tx_cmd == 1)
1981 		switch(HPNA_mode) {	/* Issue Remote Command */
1982 		case 0: db->HPNA_command |= 0x0904; break;
1983 		case 1: db->HPNA_command |= 0x0a00; break;
1984 		case 2: db->HPNA_command |= 0x0506; break;
1985 		case 3: db->HPNA_command |= 0x0602; break;
1986 		}
1987 	else
1988 		switch(HPNA_mode) {	/* Don't Issue */
1989 		case 0: db->HPNA_command |= 0x0004; break;
1990 		case 1: db->HPNA_command |= 0x0000; break;
1991 		case 2: db->HPNA_command |= 0x0006; break;
1992 		case 3: db->HPNA_command |= 0x0002; break;
1993 		}
1994 
1995 	/* Check DM9801 or DM9802 present or not */
1996 	db->HPNA_present = 0;
1997 	update_cr6(db->cr6_data | 0x40000, db->ioaddr);
1998 	tmp_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id);
1999 	if ( ( tmp_reg & 0xfff0 ) == 0xb900 ) {
2000 		/* DM9801 or DM9802 present */
2001 		db->HPNA_timer = 8;
2002 		if ( dmfe_phy_read(db->ioaddr, db->phy_addr, 31, db->chip_id) == 0x4404) {
2003 			/* DM9801 HomeRun */
2004 			db->HPNA_present = 1;
2005 			dmfe_program_DM9801(db, tmp_reg);
2006 		} else {
2007 			/* DM9802 LongRun */
2008 			db->HPNA_present = 2;
2009 			dmfe_program_DM9802(db);
2010 		}
2011 	}
2012 
2013 }
2014 
2015 
2016 /*
2017  *	Init HomeRun DM9801
2018  */
2019 
2020 static void dmfe_program_DM9801(struct dmfe_board_info * db, int HPNA_rev)
2021 {
2022 	uint reg17, reg25;
2023 
2024 	if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9801_NOISE_FLOOR;
2025 	switch(HPNA_rev) {
2026 	case 0xb900: /* DM9801 E3 */
2027 		db->HPNA_command |= 0x1000;
2028 		reg25 = dmfe_phy_read(db->ioaddr, db->phy_addr, 24, db->chip_id);
2029 		reg25 = ( (reg25 + HPNA_NoiseFloor) & 0xff) | 0xf000;
2030 		reg17 = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2031 		break;
2032 	case 0xb901: /* DM9801 E4 */
2033 		reg25 = dmfe_phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2034 		reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor;
2035 		reg17 = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2036 		reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor + 3;
2037 		break;
2038 	case 0xb902: /* DM9801 E5 */
2039 	case 0xb903: /* DM9801 E6 */
2040 	default:
2041 		db->HPNA_command |= 0x1000;
2042 		reg25 = dmfe_phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2043 		reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor - 5;
2044 		reg17 = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2045 		reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor;
2046 		break;
2047 	}
2048 	dmfe_phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
2049 	dmfe_phy_write(db->ioaddr, db->phy_addr, 17, reg17, db->chip_id);
2050 	dmfe_phy_write(db->ioaddr, db->phy_addr, 25, reg25, db->chip_id);
2051 }
2052 
2053 
2054 /*
2055  *	Init HomeRun DM9802
2056  */
2057 
2058 static void dmfe_program_DM9802(struct dmfe_board_info * db)
2059 {
2060 	uint phy_reg;
2061 
2062 	if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9802_NOISE_FLOOR;
2063 	dmfe_phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
2064 	phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2065 	phy_reg = ( phy_reg & 0xff00) + HPNA_NoiseFloor;
2066 	dmfe_phy_write(db->ioaddr, db->phy_addr, 25, phy_reg, db->chip_id);
2067 }
2068 
2069 
2070 /*
2071  *	Check remote HPNA power and speed status. If not correct,
2072  *	issue command again.
2073 */
2074 
2075 static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db)
2076 {
2077 	uint phy_reg;
2078 
2079 	/* Got remote device status */
2080 	phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0x60;
2081 	switch(phy_reg) {
2082 	case 0x00: phy_reg = 0x0a00;break; /* LP/LS */
2083 	case 0x20: phy_reg = 0x0900;break; /* LP/HS */
2084 	case 0x40: phy_reg = 0x0600;break; /* HP/LS */
2085 	case 0x60: phy_reg = 0x0500;break; /* HP/HS */
2086 	}
2087 
2088 	/* Check remote device status match our setting ot not */
2089 	if ( phy_reg != (db->HPNA_command & 0x0f00) ) {
2090 		dmfe_phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command,
2091 			       db->chip_id);
2092 		db->HPNA_timer=8;
2093 	} else
2094 		db->HPNA_timer=600;	/* Match, every 10 minutes, check */
2095 }
2096 
2097 
2098 
2099 static const struct pci_device_id dmfe_pci_tbl[] = {
2100 	{ 0x1282, 0x9132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9132_ID },
2101 	{ 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9102_ID },
2102 	{ 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9100_ID },
2103 	{ 0x1282, 0x9009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9009_ID },
2104 	{ 0, }
2105 };
2106 MODULE_DEVICE_TABLE(pci, dmfe_pci_tbl);
2107 
2108 
2109 #ifdef CONFIG_PM
2110 static int dmfe_suspend(struct pci_dev *pci_dev, pm_message_t state)
2111 {
2112 	struct net_device *dev = pci_get_drvdata(pci_dev);
2113 	struct dmfe_board_info *db = netdev_priv(dev);
2114 	void __iomem *ioaddr = db->ioaddr;
2115 	u32 tmp;
2116 
2117 	/* Disable upper layer interface */
2118 	netif_device_detach(dev);
2119 
2120 	/* Disable Tx/Rx */
2121 	db->cr6_data &= ~(CR6_RXSC | CR6_TXSC);
2122 	update_cr6(db->cr6_data, ioaddr);
2123 
2124 	/* Disable Interrupt */
2125 	dw32(DCR7, 0);
2126 	dw32(DCR5, dr32(DCR5));
2127 
2128 	/* Fre RX buffers */
2129 	dmfe_free_rxbuffer(db);
2130 
2131 	/* Enable WOL */
2132 	pci_read_config_dword(pci_dev, 0x40, &tmp);
2133 	tmp &= ~(DMFE_WOL_LINKCHANGE|DMFE_WOL_MAGICPACKET);
2134 
2135 	if (db->wol_mode & WAKE_PHY)
2136 		tmp |= DMFE_WOL_LINKCHANGE;
2137 	if (db->wol_mode & WAKE_MAGIC)
2138 		tmp |= DMFE_WOL_MAGICPACKET;
2139 
2140 	pci_write_config_dword(pci_dev, 0x40, tmp);
2141 
2142 	pci_enable_wake(pci_dev, PCI_D3hot, 1);
2143 	pci_enable_wake(pci_dev, PCI_D3cold, 1);
2144 
2145 	/* Power down device*/
2146 	pci_save_state(pci_dev);
2147 	pci_set_power_state(pci_dev, pci_choose_state (pci_dev, state));
2148 
2149 	return 0;
2150 }
2151 
2152 static int dmfe_resume(struct pci_dev *pci_dev)
2153 {
2154 	struct net_device *dev = pci_get_drvdata(pci_dev);
2155 	u32 tmp;
2156 
2157 	pci_set_power_state(pci_dev, PCI_D0);
2158 	pci_restore_state(pci_dev);
2159 
2160 	/* Re-initialize DM910X board */
2161 	dmfe_init_dm910x(dev);
2162 
2163 	/* Disable WOL */
2164 	pci_read_config_dword(pci_dev, 0x40, &tmp);
2165 
2166 	tmp &= ~(DMFE_WOL_LINKCHANGE | DMFE_WOL_MAGICPACKET);
2167 	pci_write_config_dword(pci_dev, 0x40, tmp);
2168 
2169 	pci_enable_wake(pci_dev, PCI_D3hot, 0);
2170 	pci_enable_wake(pci_dev, PCI_D3cold, 0);
2171 
2172 	/* Restart upper layer interface */
2173 	netif_device_attach(dev);
2174 
2175 	return 0;
2176 }
2177 #else
2178 #define dmfe_suspend NULL
2179 #define dmfe_resume NULL
2180 #endif
2181 
2182 static struct pci_driver dmfe_driver = {
2183 	.name		= "dmfe",
2184 	.id_table	= dmfe_pci_tbl,
2185 	.probe		= dmfe_init_one,
2186 	.remove		= dmfe_remove_one,
2187 	.suspend        = dmfe_suspend,
2188 	.resume         = dmfe_resume
2189 };
2190 
2191 MODULE_AUTHOR("Sten Wang, sten_wang@davicom.com.tw");
2192 MODULE_DESCRIPTION("Davicom DM910X fast ethernet driver");
2193 MODULE_LICENSE("GPL");
2194 MODULE_VERSION(DRV_VERSION);
2195 
2196 module_param(debug, int, 0);
2197 module_param(mode, byte, 0);
2198 module_param(cr6set, int, 0);
2199 module_param(chkmode, byte, 0);
2200 module_param(HPNA_mode, byte, 0);
2201 module_param(HPNA_rx_cmd, byte, 0);
2202 module_param(HPNA_tx_cmd, byte, 0);
2203 module_param(HPNA_NoiseFloor, byte, 0);
2204 module_param(SF_mode, byte, 0);
2205 MODULE_PARM_DESC(debug, "Davicom DM9xxx enable debugging (0-1)");
2206 MODULE_PARM_DESC(mode, "Davicom DM9xxx: "
2207 		"Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
2208 
2209 MODULE_PARM_DESC(SF_mode, "Davicom DM9xxx special function "
2210 		"(bit 0: VLAN, bit 1 Flow Control, bit 2: TX pause packet)");
2211 
2212 /*	Description:
2213  *	when user used insmod to add module, system invoked init_module()
2214  *	to initialize and register.
2215  */
2216 
2217 static int __init dmfe_init_module(void)
2218 {
2219 	int rc;
2220 
2221 	pr_info("%s\n", version);
2222 	printed_version = 1;
2223 
2224 	DMFE_DBUG(0, "init_module() ", debug);
2225 
2226 	if (debug)
2227 		dmfe_debug = debug;	/* set debug flag */
2228 	if (cr6set)
2229 		dmfe_cr6_user_set = cr6set;
2230 
2231  	switch(mode) {
2232    	case DMFE_10MHF:
2233 	case DMFE_100MHF:
2234 	case DMFE_10MFD:
2235 	case DMFE_100MFD:
2236 	case DMFE_1M_HPNA:
2237 		dmfe_media_mode = mode;
2238 		break;
2239 	default:dmfe_media_mode = DMFE_AUTO;
2240 		break;
2241 	}
2242 
2243 	if (HPNA_mode > 4)
2244 		HPNA_mode = 0;		/* Default: LP/HS */
2245 	if (HPNA_rx_cmd > 1)
2246 		HPNA_rx_cmd = 0;	/* Default: Ignored remote cmd */
2247 	if (HPNA_tx_cmd > 1)
2248 		HPNA_tx_cmd = 0;	/* Default: Don't issue remote cmd */
2249 	if (HPNA_NoiseFloor > 15)
2250 		HPNA_NoiseFloor = 0;
2251 
2252 	rc = pci_register_driver(&dmfe_driver);
2253 	if (rc < 0)
2254 		return rc;
2255 
2256 	return 0;
2257 }
2258 
2259 
2260 /*
2261  *	Description:
2262  *	when user used rmmod to delete module, system invoked clean_module()
2263  *	to un-register all registered services.
2264  */
2265 
2266 static void __exit dmfe_cleanup_module(void)
2267 {
2268 	DMFE_DBUG(0, "dmfe_cleanup_module() ", debug);
2269 	pci_unregister_driver(&dmfe_driver);
2270 }
2271 
2272 module_init(dmfe_init_module);
2273 module_exit(dmfe_cleanup_module);
2274