xref: /linux/drivers/net/ethernet/dec/tulip/dmfe.c (revision b889fcf63cb62e7fdb7816565e28f44dbe4a76a5)
1 /*
2     A Davicom DM9102/DM9102A/DM9102A+DM9801/DM9102A+DM9802 NIC fast
3     ethernet driver for Linux.
4     Copyright (C) 1997  Sten Wang
5 
6     This program is free software; you can redistribute it and/or
7     modify it under the terms of the GNU General Public License
8     as published by the Free Software Foundation; either version 2
9     of the License, or (at your option) any later version.
10 
11     This program is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15 
16     DAVICOM Web-Site: www.davicom.com.tw
17 
18     Author: Sten Wang, 886-3-5798797-8517, E-mail: sten_wang@davicom.com.tw
19     Maintainer: Tobias Ringstrom <tori@unhappy.mine.nu>
20 
21     (C)Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved.
22 
23     Marcelo Tosatti <marcelo@conectiva.com.br> :
24     Made it compile in 2.3 (device to net_device)
25 
26     Alan Cox <alan@lxorguk.ukuu.org.uk> :
27     Cleaned up for kernel merge.
28     Removed the back compatibility support
29     Reformatted, fixing spelling etc as I went
30     Removed IRQ 0-15 assumption
31 
32     Jeff Garzik <jgarzik@pobox.com> :
33     Updated to use new PCI driver API.
34     Resource usage cleanups.
35     Report driver version to user.
36 
37     Tobias Ringstrom <tori@unhappy.mine.nu> :
38     Cleaned up and added SMP safety.  Thanks go to Jeff Garzik,
39     Andrew Morton and Frank Davis for the SMP safety fixes.
40 
41     Vojtech Pavlik <vojtech@suse.cz> :
42     Cleaned up pointer arithmetics.
43     Fixed a lot of 64bit issues.
44     Cleaned up printk()s a bit.
45     Fixed some obvious big endian problems.
46 
47     Tobias Ringstrom <tori@unhappy.mine.nu> :
48     Use time_after for jiffies calculation.  Added ethtool
49     support.  Updated PCI resource allocation.  Do not
50     forget to unmap PCI mapped skbs.
51 
52     Alan Cox <alan@lxorguk.ukuu.org.uk>
53     Added new PCI identifiers provided by Clear Zhang at ALi
54     for their 1563 ethernet device.
55 
56     TODO
57 
58     Check on 64 bit boxes.
59     Check and fix on big endian boxes.
60 
61     Test and make sure PCI latency is now correct for all cases.
62 */
63 
64 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65 
66 #define DRV_NAME	"dmfe"
67 #define DRV_VERSION	"1.36.4"
68 #define DRV_RELDATE	"2002-01-17"
69 
70 #include <linux/module.h>
71 #include <linux/kernel.h>
72 #include <linux/string.h>
73 #include <linux/timer.h>
74 #include <linux/ptrace.h>
75 #include <linux/errno.h>
76 #include <linux/ioport.h>
77 #include <linux/interrupt.h>
78 #include <linux/pci.h>
79 #include <linux/dma-mapping.h>
80 #include <linux/init.h>
81 #include <linux/netdevice.h>
82 #include <linux/etherdevice.h>
83 #include <linux/ethtool.h>
84 #include <linux/skbuff.h>
85 #include <linux/delay.h>
86 #include <linux/spinlock.h>
87 #include <linux/crc32.h>
88 #include <linux/bitops.h>
89 
90 #include <asm/processor.h>
91 #include <asm/io.h>
92 #include <asm/dma.h>
93 #include <asm/uaccess.h>
94 #include <asm/irq.h>
95 
96 #ifdef CONFIG_TULIP_DM910X
97 #include <linux/of.h>
98 #endif
99 
100 
101 /* Board/System/Debug information/definition ---------------- */
102 #define PCI_DM9132_ID   0x91321282      /* Davicom DM9132 ID */
103 #define PCI_DM9102_ID   0x91021282      /* Davicom DM9102 ID */
104 #define PCI_DM9100_ID   0x91001282      /* Davicom DM9100 ID */
105 #define PCI_DM9009_ID   0x90091282      /* Davicom DM9009 ID */
106 
107 #define DM9102_IO_SIZE  0x80
108 #define DM9102A_IO_SIZE 0x100
109 #define TX_MAX_SEND_CNT 0x1             /* Maximum tx packet per time */
110 #define TX_DESC_CNT     0x10            /* Allocated Tx descriptors */
111 #define RX_DESC_CNT     0x20            /* Allocated Rx descriptors */
112 #define TX_FREE_DESC_CNT (TX_DESC_CNT - 2)	/* Max TX packet count */
113 #define TX_WAKE_DESC_CNT (TX_DESC_CNT - 3)	/* TX wakeup count */
114 #define DESC_ALL_CNT    (TX_DESC_CNT + RX_DESC_CNT)
115 #define TX_BUF_ALLOC    0x600
116 #define RX_ALLOC_SIZE   0x620
117 #define DM910X_RESET    1
118 #define CR0_DEFAULT     0x00E00000      /* TX & RX burst mode */
119 #define CR6_DEFAULT     0x00080000      /* HD */
120 #define CR7_DEFAULT     0x180c1
121 #define CR15_DEFAULT    0x06            /* TxJabber RxWatchdog */
122 #define TDES0_ERR_MASK  0x4302          /* TXJT, LC, EC, FUE */
123 #define MAX_PACKET_SIZE 1514
124 #define DMFE_MAX_MULTICAST 14
125 #define RX_COPY_SIZE	100
126 #define MAX_CHECK_PACKET 0x8000
127 #define DM9801_NOISE_FLOOR 8
128 #define DM9802_NOISE_FLOOR 5
129 
130 #define DMFE_WOL_LINKCHANGE	0x20000000
131 #define DMFE_WOL_SAMPLEPACKET	0x10000000
132 #define DMFE_WOL_MAGICPACKET	0x08000000
133 
134 
135 #define DMFE_10MHF      0
136 #define DMFE_100MHF     1
137 #define DMFE_10MFD      4
138 #define DMFE_100MFD     5
139 #define DMFE_AUTO       8
140 #define DMFE_1M_HPNA    0x10
141 
142 #define DMFE_TXTH_72	0x400000	/* TX TH 72 byte */
143 #define DMFE_TXTH_96	0x404000	/* TX TH 96 byte */
144 #define DMFE_TXTH_128	0x0000		/* TX TH 128 byte */
145 #define DMFE_TXTH_256	0x4000		/* TX TH 256 byte */
146 #define DMFE_TXTH_512	0x8000		/* TX TH 512 byte */
147 #define DMFE_TXTH_1K	0xC000		/* TX TH 1K  byte */
148 
149 #define DMFE_TIMER_WUT  (jiffies + HZ * 1)/* timer wakeup time : 1 second */
150 #define DMFE_TX_TIMEOUT ((3*HZ)/2)	/* tx packet time-out time 1.5 s" */
151 #define DMFE_TX_KICK 	(HZ/2)	/* tx packet Kick-out time 0.5 s" */
152 
153 #define dw32(reg, val)	iowrite32(val, ioaddr + (reg))
154 #define dw16(reg, val)	iowrite16(val, ioaddr + (reg))
155 #define dr32(reg)	ioread32(ioaddr + (reg))
156 #define dr16(reg)	ioread16(ioaddr + (reg))
157 #define dr8(reg)	ioread8(ioaddr + (reg))
158 
159 #define DMFE_DBUG(dbug_now, msg, value)			\
160 	do {						\
161 		if (dmfe_debug || (dbug_now))		\
162 			pr_err("%s %lx\n",		\
163 			       (msg), (long) (value));	\
164 	} while (0)
165 
166 #define SHOW_MEDIA_TYPE(mode)				\
167 	pr_info("Change Speed to %sMhz %s duplex\n" ,	\
168 		(mode & 1) ? "100":"10",		\
169 		(mode & 4) ? "full":"half");
170 
171 
172 /* CR9 definition: SROM/MII */
173 #define CR9_SROM_READ   0x4800
174 #define CR9_SRCS        0x1
175 #define CR9_SRCLK       0x2
176 #define CR9_CRDOUT      0x8
177 #define SROM_DATA_0     0x0
178 #define SROM_DATA_1     0x4
179 #define PHY_DATA_1      0x20000
180 #define PHY_DATA_0      0x00000
181 #define MDCLKH          0x10000
182 
183 #define PHY_POWER_DOWN	0x800
184 
185 #define SROM_V41_CODE   0x14
186 
187 #define __CHK_IO_SIZE(pci_id, dev_rev) \
188  (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x30) ) ? \
189 	DM9102A_IO_SIZE: DM9102_IO_SIZE)
190 
191 #define CHK_IO_SIZE(pci_dev) \
192 	(__CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, \
193 	(pci_dev)->revision))
194 
195 /* Sten Check */
196 #define DEVICE net_device
197 
198 /* Structure/enum declaration ------------------------------- */
199 struct tx_desc {
200         __le32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */
201         char *tx_buf_ptr;               /* Data for us */
202         struct tx_desc *next_tx_desc;
203 } __attribute__(( aligned(32) ));
204 
205 struct rx_desc {
206 	__le32 rdes0, rdes1, rdes2, rdes3; /* Data for the card */
207 	struct sk_buff *rx_skb_ptr;	/* Data for us */
208 	struct rx_desc *next_rx_desc;
209 } __attribute__(( aligned(32) ));
210 
211 struct dmfe_board_info {
212 	u32 chip_id;			/* Chip vendor/Device ID */
213 	u8 chip_revision;		/* Chip revision */
214 	struct net_device *next_dev;	/* next device */
215 	struct pci_dev *pdev;		/* PCI device */
216 	spinlock_t lock;
217 
218 	void __iomem *ioaddr;		/* I/O base address */
219 	u32 cr0_data;
220 	u32 cr5_data;
221 	u32 cr6_data;
222 	u32 cr7_data;
223 	u32 cr15_data;
224 
225 	/* pointer for memory physical address */
226 	dma_addr_t buf_pool_dma_ptr;	/* Tx buffer pool memory */
227 	dma_addr_t buf_pool_dma_start;	/* Tx buffer pool align dword */
228 	dma_addr_t desc_pool_dma_ptr;	/* descriptor pool memory */
229 	dma_addr_t first_tx_desc_dma;
230 	dma_addr_t first_rx_desc_dma;
231 
232 	/* descriptor pointer */
233 	unsigned char *buf_pool_ptr;	/* Tx buffer pool memory */
234 	unsigned char *buf_pool_start;	/* Tx buffer pool align dword */
235 	unsigned char *desc_pool_ptr;	/* descriptor pool memory */
236 	struct tx_desc *first_tx_desc;
237 	struct tx_desc *tx_insert_ptr;
238 	struct tx_desc *tx_remove_ptr;
239 	struct rx_desc *first_rx_desc;
240 	struct rx_desc *rx_insert_ptr;
241 	struct rx_desc *rx_ready_ptr;	/* packet come pointer */
242 	unsigned long tx_packet_cnt;	/* transmitted packet count */
243 	unsigned long tx_queue_cnt;	/* wait to send packet count */
244 	unsigned long rx_avail_cnt;	/* available rx descriptor count */
245 	unsigned long interval_rx_cnt;	/* rx packet count a callback time */
246 
247 	u16 HPNA_command;		/* For HPNA register 16 */
248 	u16 HPNA_timer;			/* For HPNA remote device check */
249 	u16 dbug_cnt;
250 	u16 NIC_capability;		/* NIC media capability */
251 	u16 PHY_reg4;			/* Saved Phyxcer register 4 value */
252 
253 	u8 HPNA_present;		/* 0:none, 1:DM9801, 2:DM9802 */
254 	u8 chip_type;			/* Keep DM9102A chip type */
255 	u8 media_mode;			/* user specify media mode */
256 	u8 op_mode;			/* real work media mode */
257 	u8 phy_addr;
258 	u8 wait_reset;			/* Hardware failed, need to reset */
259 	u8 dm910x_chk_mode;		/* Operating mode check */
260 	u8 first_in_callback;		/* Flag to record state */
261 	u8 wol_mode;			/* user WOL settings */
262 	struct timer_list timer;
263 
264 	/* Driver defined statistic counter */
265 	unsigned long tx_fifo_underrun;
266 	unsigned long tx_loss_carrier;
267 	unsigned long tx_no_carrier;
268 	unsigned long tx_late_collision;
269 	unsigned long tx_excessive_collision;
270 	unsigned long tx_jabber_timeout;
271 	unsigned long reset_count;
272 	unsigned long reset_cr8;
273 	unsigned long reset_fatal;
274 	unsigned long reset_TXtimeout;
275 
276 	/* NIC SROM data */
277 	unsigned char srom[128];
278 };
279 
280 enum dmfe_offsets {
281 	DCR0 = 0x00, DCR1 = 0x08, DCR2 = 0x10, DCR3 = 0x18, DCR4 = 0x20,
282 	DCR5 = 0x28, DCR6 = 0x30, DCR7 = 0x38, DCR8 = 0x40, DCR9 = 0x48,
283 	DCR10 = 0x50, DCR11 = 0x58, DCR12 = 0x60, DCR13 = 0x68, DCR14 = 0x70,
284 	DCR15 = 0x78
285 };
286 
287 enum dmfe_CR6_bits {
288 	CR6_RXSC = 0x2, CR6_PBF = 0x8, CR6_PM = 0x40, CR6_PAM = 0x80,
289 	CR6_FDM = 0x200, CR6_TXSC = 0x2000, CR6_STI = 0x100000,
290 	CR6_SFT = 0x200000, CR6_RXA = 0x40000000, CR6_NO_PURGE = 0x20000000
291 };
292 
293 /* Global variable declaration ----------------------------- */
294 static int printed_version;
295 static const char version[] =
296 	"Davicom DM9xxx net driver, version " DRV_VERSION " (" DRV_RELDATE ")";
297 
298 static int dmfe_debug;
299 static unsigned char dmfe_media_mode = DMFE_AUTO;
300 static u32 dmfe_cr6_user_set;
301 
302 /* For module input parameter */
303 static int debug;
304 static u32 cr6set;
305 static unsigned char mode = 8;
306 static u8 chkmode = 1;
307 static u8 HPNA_mode;		/* Default: Low Power/High Speed */
308 static u8 HPNA_rx_cmd;		/* Default: Disable Rx remote command */
309 static u8 HPNA_tx_cmd;		/* Default: Don't issue remote command */
310 static u8 HPNA_NoiseFloor;	/* Default: HPNA NoiseFloor */
311 static u8 SF_mode;		/* Special Function: 1:VLAN, 2:RX Flow Control
312 				   4: TX pause packet */
313 
314 
315 /* function declaration ------------------------------------- */
316 static int dmfe_open(struct DEVICE *);
317 static netdev_tx_t dmfe_start_xmit(struct sk_buff *, struct DEVICE *);
318 static int dmfe_stop(struct DEVICE *);
319 static void dmfe_set_filter_mode(struct DEVICE *);
320 static const struct ethtool_ops netdev_ethtool_ops;
321 static u16 read_srom_word(void __iomem *, int);
322 static irqreturn_t dmfe_interrupt(int , void *);
323 #ifdef CONFIG_NET_POLL_CONTROLLER
324 static void poll_dmfe (struct net_device *dev);
325 #endif
326 static void dmfe_descriptor_init(struct net_device *);
327 static void allocate_rx_buffer(struct net_device *);
328 static void update_cr6(u32, void __iomem *);
329 static void send_filter_frame(struct DEVICE *);
330 static void dm9132_id_table(struct DEVICE *);
331 static u16 phy_read(void __iomem *, u8, u8, u32);
332 static void phy_write(void __iomem *, u8, u8, u16, u32);
333 static void phy_write_1bit(void __iomem *, u32);
334 static u16 phy_read_1bit(void __iomem *);
335 static u8 dmfe_sense_speed(struct dmfe_board_info *);
336 static void dmfe_process_mode(struct dmfe_board_info *);
337 static void dmfe_timer(unsigned long);
338 static inline u32 cal_CRC(unsigned char *, unsigned int, u8);
339 static void dmfe_rx_packet(struct DEVICE *, struct dmfe_board_info *);
340 static void dmfe_free_tx_pkt(struct DEVICE *, struct dmfe_board_info *);
341 static void dmfe_reuse_skb(struct dmfe_board_info *, struct sk_buff *);
342 static void dmfe_dynamic_reset(struct DEVICE *);
343 static void dmfe_free_rxbuffer(struct dmfe_board_info *);
344 static void dmfe_init_dm910x(struct DEVICE *);
345 static void dmfe_parse_srom(struct dmfe_board_info *);
346 static void dmfe_program_DM9801(struct dmfe_board_info *, int);
347 static void dmfe_program_DM9802(struct dmfe_board_info *);
348 static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * );
349 static void dmfe_set_phyxcer(struct dmfe_board_info *);
350 
351 /* DM910X network board routine ---------------------------- */
352 
353 static const struct net_device_ops netdev_ops = {
354 	.ndo_open 		= dmfe_open,
355 	.ndo_stop		= dmfe_stop,
356 	.ndo_start_xmit		= dmfe_start_xmit,
357 	.ndo_set_rx_mode	= dmfe_set_filter_mode,
358 	.ndo_change_mtu		= eth_change_mtu,
359 	.ndo_set_mac_address	= eth_mac_addr,
360 	.ndo_validate_addr	= eth_validate_addr,
361 #ifdef CONFIG_NET_POLL_CONTROLLER
362 	.ndo_poll_controller	= poll_dmfe,
363 #endif
364 };
365 
366 /*
367  *	Search DM910X board ,allocate space and register it
368  */
369 
370 static int dmfe_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
371 {
372 	struct dmfe_board_info *db;	/* board information structure */
373 	struct net_device *dev;
374 	u32 pci_pmr;
375 	int i, err;
376 
377 	DMFE_DBUG(0, "dmfe_init_one()", 0);
378 
379 	if (!printed_version++)
380 		pr_info("%s\n", version);
381 
382 	/*
383 	 *	SPARC on-board DM910x chips should be handled by the main
384 	 *	tulip driver, except for early DM9100s.
385 	 */
386 #ifdef CONFIG_TULIP_DM910X
387 	if ((ent->driver_data == PCI_DM9100_ID && pdev->revision >= 0x30) ||
388 	    ent->driver_data == PCI_DM9102_ID) {
389 		struct device_node *dp = pci_device_to_OF_node(pdev);
390 
391 		if (dp && of_get_property(dp, "local-mac-address", NULL)) {
392 			pr_info("skipping on-board DM910x (use tulip)\n");
393 			return -ENODEV;
394 		}
395 	}
396 #endif
397 
398 	/* Init network device */
399 	dev = alloc_etherdev(sizeof(*db));
400 	if (dev == NULL)
401 		return -ENOMEM;
402 	SET_NETDEV_DEV(dev, &pdev->dev);
403 
404 	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
405 		pr_warn("32-bit PCI DMA not available\n");
406 		err = -ENODEV;
407 		goto err_out_free;
408 	}
409 
410 	/* Enable Master/IO access, Disable memory access */
411 	err = pci_enable_device(pdev);
412 	if (err)
413 		goto err_out_free;
414 
415 	if (!pci_resource_start(pdev, 0)) {
416 		pr_err("I/O base is zero\n");
417 		err = -ENODEV;
418 		goto err_out_disable;
419 	}
420 
421 	if (pci_resource_len(pdev, 0) < (CHK_IO_SIZE(pdev)) ) {
422 		pr_err("Allocated I/O size too small\n");
423 		err = -ENODEV;
424 		goto err_out_disable;
425 	}
426 
427 #if 0	/* pci_{enable_device,set_master} sets minimum latency for us now */
428 
429 	/* Set Latency Timer 80h */
430 	/* FIXME: setting values > 32 breaks some SiS 559x stuff.
431 	   Need a PCI quirk.. */
432 
433 	pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);
434 #endif
435 
436 	if (pci_request_regions(pdev, DRV_NAME)) {
437 		pr_err("Failed to request PCI regions\n");
438 		err = -ENODEV;
439 		goto err_out_disable;
440 	}
441 
442 	/* Init system & device */
443 	db = netdev_priv(dev);
444 
445 	/* Allocate Tx/Rx descriptor memory */
446 	db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) *
447 			DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
448 	if (!db->desc_pool_ptr) {
449 		err = -ENOMEM;
450 		goto err_out_res;
451 	}
452 
453 	db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC *
454 			TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
455 	if (!db->buf_pool_ptr) {
456 		err = -ENOMEM;
457 		goto err_out_free_desc;
458 	}
459 
460 	db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
461 	db->first_tx_desc_dma = db->desc_pool_dma_ptr;
462 	db->buf_pool_start = db->buf_pool_ptr;
463 	db->buf_pool_dma_start = db->buf_pool_dma_ptr;
464 
465 	db->chip_id = ent->driver_data;
466 	/* IO type range. */
467 	db->ioaddr = pci_iomap(pdev, 0, 0);
468 	if (!db->ioaddr) {
469 		err = -ENOMEM;
470 		goto err_out_free_buf;
471 	}
472 
473 	db->chip_revision = pdev->revision;
474 	db->wol_mode = 0;
475 
476 	db->pdev = pdev;
477 
478 	pci_set_drvdata(pdev, dev);
479 	dev->netdev_ops = &netdev_ops;
480 	dev->ethtool_ops = &netdev_ethtool_ops;
481 	netif_carrier_off(dev);
482 	spin_lock_init(&db->lock);
483 
484 	pci_read_config_dword(pdev, 0x50, &pci_pmr);
485 	pci_pmr &= 0x70000;
486 	if ( (pci_pmr == 0x10000) && (db->chip_revision == 0x31) )
487 		db->chip_type = 1;	/* DM9102A E3 */
488 	else
489 		db->chip_type = 0;
490 
491 	/* read 64 word srom data */
492 	for (i = 0; i < 64; i++) {
493 		((__le16 *) db->srom)[i] =
494 			cpu_to_le16(read_srom_word(db->ioaddr, i));
495 	}
496 
497 	/* Set Node address */
498 	for (i = 0; i < 6; i++)
499 		dev->dev_addr[i] = db->srom[20 + i];
500 
501 	err = register_netdev (dev);
502 	if (err)
503 		goto err_out_unmap;
504 
505 	dev_info(&dev->dev, "Davicom DM%04lx at pci%s, %pM, irq %d\n",
506 		 ent->driver_data >> 16,
507 		 pci_name(pdev), dev->dev_addr, pdev->irq);
508 
509 	pci_set_master(pdev);
510 
511 	return 0;
512 
513 err_out_unmap:
514 	pci_iounmap(pdev, db->ioaddr);
515 err_out_free_buf:
516 	pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
517 			    db->buf_pool_ptr, db->buf_pool_dma_ptr);
518 err_out_free_desc:
519 	pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
520 			    db->desc_pool_ptr, db->desc_pool_dma_ptr);
521 err_out_res:
522 	pci_release_regions(pdev);
523 err_out_disable:
524 	pci_disable_device(pdev);
525 err_out_free:
526 	pci_set_drvdata(pdev, NULL);
527 	free_netdev(dev);
528 
529 	return err;
530 }
531 
532 
533 static void dmfe_remove_one(struct pci_dev *pdev)
534 {
535 	struct net_device *dev = pci_get_drvdata(pdev);
536 	struct dmfe_board_info *db = netdev_priv(dev);
537 
538 	DMFE_DBUG(0, "dmfe_remove_one()", 0);
539 
540  	if (dev) {
541 
542 		unregister_netdev(dev);
543 		pci_iounmap(db->pdev, db->ioaddr);
544 		pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
545 					DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
546  					db->desc_pool_dma_ptr);
547 		pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
548 					db->buf_pool_ptr, db->buf_pool_dma_ptr);
549 		pci_release_regions(pdev);
550 		free_netdev(dev);	/* free board information */
551 
552 		pci_set_drvdata(pdev, NULL);
553 	}
554 
555 	DMFE_DBUG(0, "dmfe_remove_one() exit", 0);
556 }
557 
558 
559 /*
560  *	Open the interface.
561  *	The interface is opened whenever "ifconfig" actives it.
562  */
563 
564 static int dmfe_open(struct DEVICE *dev)
565 {
566 	struct dmfe_board_info *db = netdev_priv(dev);
567 	const int irq = db->pdev->irq;
568 	int ret;
569 
570 	DMFE_DBUG(0, "dmfe_open", 0);
571 
572 	ret = request_irq(irq, dmfe_interrupt, IRQF_SHARED, dev->name, dev);
573 	if (ret)
574 		return ret;
575 
576 	/* system variable init */
577 	db->cr6_data = CR6_DEFAULT | dmfe_cr6_user_set;
578 	db->tx_packet_cnt = 0;
579 	db->tx_queue_cnt = 0;
580 	db->rx_avail_cnt = 0;
581 	db->wait_reset = 0;
582 
583 	db->first_in_callback = 0;
584 	db->NIC_capability = 0xf;	/* All capability*/
585 	db->PHY_reg4 = 0x1e0;
586 
587 	/* CR6 operation mode decision */
588 	if ( !chkmode || (db->chip_id == PCI_DM9132_ID) ||
589 		(db->chip_revision >= 0x30) ) {
590     		db->cr6_data |= DMFE_TXTH_256;
591 		db->cr0_data = CR0_DEFAULT;
592 		db->dm910x_chk_mode=4;		/* Enter the normal mode */
593  	} else {
594 		db->cr6_data |= CR6_SFT;	/* Store & Forward mode */
595 		db->cr0_data = 0;
596 		db->dm910x_chk_mode = 1;	/* Enter the check mode */
597 	}
598 
599 	/* Initialize DM910X board */
600 	dmfe_init_dm910x(dev);
601 
602 	/* Active System Interface */
603 	netif_wake_queue(dev);
604 
605 	/* set and active a timer process */
606 	init_timer(&db->timer);
607 	db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
608 	db->timer.data = (unsigned long)dev;
609 	db->timer.function = dmfe_timer;
610 	add_timer(&db->timer);
611 
612 	return 0;
613 }
614 
615 
616 /*	Initialize DM910X board
617  *	Reset DM910X board
618  *	Initialize TX/Rx descriptor chain structure
619  *	Send the set-up frame
620  *	Enable Tx/Rx machine
621  */
622 
623 static void dmfe_init_dm910x(struct DEVICE *dev)
624 {
625 	struct dmfe_board_info *db = netdev_priv(dev);
626 	void __iomem *ioaddr = db->ioaddr;
627 
628 	DMFE_DBUG(0, "dmfe_init_dm910x()", 0);
629 
630 	/* Reset DM910x MAC controller */
631 	dw32(DCR0, DM910X_RESET);	/* RESET MAC */
632 	udelay(100);
633 	dw32(DCR0, db->cr0_data);
634 	udelay(5);
635 
636 	/* Phy addr : DM910(A)2/DM9132/9801, phy address = 1 */
637 	db->phy_addr = 1;
638 
639 	/* Parser SROM and media mode */
640 	dmfe_parse_srom(db);
641 	db->media_mode = dmfe_media_mode;
642 
643 	/* RESET Phyxcer Chip by GPR port bit 7 */
644 	dw32(DCR12, 0x180);		/* Let bit 7 output port */
645 	if (db->chip_id == PCI_DM9009_ID) {
646 		dw32(DCR12, 0x80);	/* Issue RESET signal */
647 		mdelay(300);			/* Delay 300 ms */
648 	}
649 	dw32(DCR12, 0x0);	/* Clear RESET signal */
650 
651 	/* Process Phyxcer Media Mode */
652 	if ( !(db->media_mode & 0x10) )	/* Force 1M mode */
653 		dmfe_set_phyxcer(db);
654 
655 	/* Media Mode Process */
656 	if ( !(db->media_mode & DMFE_AUTO) )
657 		db->op_mode = db->media_mode; 	/* Force Mode */
658 
659 	/* Initialize Transmit/Receive decriptor and CR3/4 */
660 	dmfe_descriptor_init(dev);
661 
662 	/* Init CR6 to program DM910x operation */
663 	update_cr6(db->cr6_data, ioaddr);
664 
665 	/* Send setup frame */
666 	if (db->chip_id == PCI_DM9132_ID)
667 		dm9132_id_table(dev);	/* DM9132 */
668 	else
669 		send_filter_frame(dev);	/* DM9102/DM9102A */
670 
671 	/* Init CR7, interrupt active bit */
672 	db->cr7_data = CR7_DEFAULT;
673 	dw32(DCR7, db->cr7_data);
674 
675 	/* Init CR15, Tx jabber and Rx watchdog timer */
676 	dw32(DCR15, db->cr15_data);
677 
678 	/* Enable DM910X Tx/Rx function */
679 	db->cr6_data |= CR6_RXSC | CR6_TXSC | 0x40000;
680 	update_cr6(db->cr6_data, ioaddr);
681 }
682 
683 
684 /*
685  *	Hardware start transmission.
686  *	Send a packet to media from the upper layer.
687  */
688 
689 static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
690 					 struct DEVICE *dev)
691 {
692 	struct dmfe_board_info *db = netdev_priv(dev);
693 	void __iomem *ioaddr = db->ioaddr;
694 	struct tx_desc *txptr;
695 	unsigned long flags;
696 
697 	DMFE_DBUG(0, "dmfe_start_xmit", 0);
698 
699 	/* Too large packet check */
700 	if (skb->len > MAX_PACKET_SIZE) {
701 		pr_err("big packet = %d\n", (u16)skb->len);
702 		dev_kfree_skb(skb);
703 		return NETDEV_TX_OK;
704 	}
705 
706 	/* Resource flag check */
707 	netif_stop_queue(dev);
708 
709 	spin_lock_irqsave(&db->lock, flags);
710 
711 	/* No Tx resource check, it never happen nromally */
712 	if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) {
713 		spin_unlock_irqrestore(&db->lock, flags);
714 		pr_err("No Tx resource %ld\n", db->tx_queue_cnt);
715 		return NETDEV_TX_BUSY;
716 	}
717 
718 	/* Disable NIC interrupt */
719 	dw32(DCR7, 0);
720 
721 	/* transmit this packet */
722 	txptr = db->tx_insert_ptr;
723 	skb_copy_from_linear_data(skb, txptr->tx_buf_ptr, skb->len);
724 	txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
725 
726 	/* Point to next transmit free descriptor */
727 	db->tx_insert_ptr = txptr->next_tx_desc;
728 
729 	/* Transmit Packet Process */
730 	if ( (!db->tx_queue_cnt) && (db->tx_packet_cnt < TX_MAX_SEND_CNT) ) {
731 		txptr->tdes0 = cpu_to_le32(0x80000000);	/* Set owner bit */
732 		db->tx_packet_cnt++;			/* Ready to send */
733 		dw32(DCR1, 0x1);			/* Issue Tx polling */
734 		dev->trans_start = jiffies;		/* saved time stamp */
735 	} else {
736 		db->tx_queue_cnt++;			/* queue TX packet */
737 		dw32(DCR1, 0x1);			/* Issue Tx polling */
738 	}
739 
740 	/* Tx resource check */
741 	if ( db->tx_queue_cnt < TX_FREE_DESC_CNT )
742 		netif_wake_queue(dev);
743 
744 	/* Restore CR7 to enable interrupt */
745 	spin_unlock_irqrestore(&db->lock, flags);
746 	dw32(DCR7, db->cr7_data);
747 
748 	/* free this SKB */
749 	dev_kfree_skb(skb);
750 
751 	return NETDEV_TX_OK;
752 }
753 
754 
755 /*
756  *	Stop the interface.
757  *	The interface is stopped when it is brought.
758  */
759 
760 static int dmfe_stop(struct DEVICE *dev)
761 {
762 	struct dmfe_board_info *db = netdev_priv(dev);
763 	void __iomem *ioaddr = db->ioaddr;
764 
765 	DMFE_DBUG(0, "dmfe_stop", 0);
766 
767 	/* disable system */
768 	netif_stop_queue(dev);
769 
770 	/* deleted timer */
771 	del_timer_sync(&db->timer);
772 
773 	/* Reset & stop DM910X board */
774 	dw32(DCR0, DM910X_RESET);
775 	udelay(100);
776 	phy_write(ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
777 
778 	/* free interrupt */
779 	free_irq(db->pdev->irq, dev);
780 
781 	/* free allocated rx buffer */
782 	dmfe_free_rxbuffer(db);
783 
784 #if 0
785 	/* show statistic counter */
786 	printk("FU:%lx EC:%lx LC:%lx NC:%lx LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
787 	       db->tx_fifo_underrun, db->tx_excessive_collision,
788 	       db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier,
789 	       db->tx_jabber_timeout, db->reset_count, db->reset_cr8,
790 	       db->reset_fatal, db->reset_TXtimeout);
791 #endif
792 
793 	return 0;
794 }
795 
796 
797 /*
798  *	DM9102 insterrupt handler
799  *	receive the packet to upper layer, free the transmitted packet
800  */
801 
802 static irqreturn_t dmfe_interrupt(int irq, void *dev_id)
803 {
804 	struct DEVICE *dev = dev_id;
805 	struct dmfe_board_info *db = netdev_priv(dev);
806 	void __iomem *ioaddr = db->ioaddr;
807 	unsigned long flags;
808 
809 	DMFE_DBUG(0, "dmfe_interrupt()", 0);
810 
811 	spin_lock_irqsave(&db->lock, flags);
812 
813 	/* Got DM910X status */
814 	db->cr5_data = dr32(DCR5);
815 	dw32(DCR5, db->cr5_data);
816 	if ( !(db->cr5_data & 0xc1) ) {
817 		spin_unlock_irqrestore(&db->lock, flags);
818 		return IRQ_HANDLED;
819 	}
820 
821 	/* Disable all interrupt in CR7 to solve the interrupt edge problem */
822 	dw32(DCR7, 0);
823 
824 	/* Check system status */
825 	if (db->cr5_data & 0x2000) {
826 		/* system bus error happen */
827 		DMFE_DBUG(1, "System bus error happen. CR5=", db->cr5_data);
828 		db->reset_fatal++;
829 		db->wait_reset = 1;	/* Need to RESET */
830 		spin_unlock_irqrestore(&db->lock, flags);
831 		return IRQ_HANDLED;
832 	}
833 
834 	 /* Received the coming packet */
835 	if ( (db->cr5_data & 0x40) && db->rx_avail_cnt )
836 		dmfe_rx_packet(dev, db);
837 
838 	/* reallocate rx descriptor buffer */
839 	if (db->rx_avail_cnt<RX_DESC_CNT)
840 		allocate_rx_buffer(dev);
841 
842 	/* Free the transmitted descriptor */
843 	if ( db->cr5_data & 0x01)
844 		dmfe_free_tx_pkt(dev, db);
845 
846 	/* Mode Check */
847 	if (db->dm910x_chk_mode & 0x2) {
848 		db->dm910x_chk_mode = 0x4;
849 		db->cr6_data |= 0x100;
850 		update_cr6(db->cr6_data, ioaddr);
851 	}
852 
853 	/* Restore CR7 to enable interrupt mask */
854 	dw32(DCR7, db->cr7_data);
855 
856 	spin_unlock_irqrestore(&db->lock, flags);
857 	return IRQ_HANDLED;
858 }
859 
860 
861 #ifdef CONFIG_NET_POLL_CONTROLLER
862 /*
863  * Polling 'interrupt' - used by things like netconsole to send skbs
864  * without having to re-enable interrupts. It's not called while
865  * the interrupt routine is executing.
866  */
867 
868 static void poll_dmfe (struct net_device *dev)
869 {
870 	struct dmfe_board_info *db = netdev_priv(dev);
871 	const int irq = db->pdev->irq;
872 
873 	/* disable_irq here is not very nice, but with the lockless
874 	   interrupt handler we have no other choice. */
875 	disable_irq(irq);
876 	dmfe_interrupt (irq, dev);
877 	enable_irq(irq);
878 }
879 #endif
880 
881 /*
882  *	Free TX resource after TX complete
883  */
884 
885 static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
886 {
887 	struct tx_desc *txptr;
888 	void __iomem *ioaddr = db->ioaddr;
889 	u32 tdes0;
890 
891 	txptr = db->tx_remove_ptr;
892 	while(db->tx_packet_cnt) {
893 		tdes0 = le32_to_cpu(txptr->tdes0);
894 		if (tdes0 & 0x80000000)
895 			break;
896 
897 		/* A packet sent completed */
898 		db->tx_packet_cnt--;
899 		dev->stats.tx_packets++;
900 
901 		/* Transmit statistic counter */
902 		if ( tdes0 != 0x7fffffff ) {
903 			dev->stats.collisions += (tdes0 >> 3) & 0xf;
904 			dev->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
905 			if (tdes0 & TDES0_ERR_MASK) {
906 				dev->stats.tx_errors++;
907 
908 				if (tdes0 & 0x0002) {	/* UnderRun */
909 					db->tx_fifo_underrun++;
910 					if ( !(db->cr6_data & CR6_SFT) ) {
911 						db->cr6_data = db->cr6_data | CR6_SFT;
912 						update_cr6(db->cr6_data, ioaddr);
913 					}
914 				}
915 				if (tdes0 & 0x0100)
916 					db->tx_excessive_collision++;
917 				if (tdes0 & 0x0200)
918 					db->tx_late_collision++;
919 				if (tdes0 & 0x0400)
920 					db->tx_no_carrier++;
921 				if (tdes0 & 0x0800)
922 					db->tx_loss_carrier++;
923 				if (tdes0 & 0x4000)
924 					db->tx_jabber_timeout++;
925 			}
926 		}
927 
928     		txptr = txptr->next_tx_desc;
929 	}/* End of while */
930 
931 	/* Update TX remove pointer to next */
932 	db->tx_remove_ptr = txptr;
933 
934 	/* Send the Tx packet in queue */
935 	if ( (db->tx_packet_cnt < TX_MAX_SEND_CNT) && db->tx_queue_cnt ) {
936 		txptr->tdes0 = cpu_to_le32(0x80000000);	/* Set owner bit */
937 		db->tx_packet_cnt++;			/* Ready to send */
938 		db->tx_queue_cnt--;
939 		dw32(DCR1, 0x1);			/* Issue Tx polling */
940 		dev->trans_start = jiffies;		/* saved time stamp */
941 	}
942 
943 	/* Resource available check */
944 	if ( db->tx_queue_cnt < TX_WAKE_DESC_CNT )
945 		netif_wake_queue(dev);	/* Active upper layer, send again */
946 }
947 
948 
949 /*
950  *	Calculate the CRC valude of the Rx packet
951  *	flag = 	1 : return the reverse CRC (for the received packet CRC)
952  *		0 : return the normal CRC (for Hash Table index)
953  */
954 
955 static inline u32 cal_CRC(unsigned char * Data, unsigned int Len, u8 flag)
956 {
957 	u32 crc = crc32(~0, Data, Len);
958 	if (flag) crc = ~crc;
959 	return crc;
960 }
961 
962 
963 /*
964  *	Receive the come packet and pass to upper layer
965  */
966 
967 static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
968 {
969 	struct rx_desc *rxptr;
970 	struct sk_buff *skb, *newskb;
971 	int rxlen;
972 	u32 rdes0;
973 
974 	rxptr = db->rx_ready_ptr;
975 
976 	while(db->rx_avail_cnt) {
977 		rdes0 = le32_to_cpu(rxptr->rdes0);
978 		if (rdes0 & 0x80000000)	/* packet owner check */
979 			break;
980 
981 		db->rx_avail_cnt--;
982 		db->interval_rx_cnt++;
983 
984 		pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2),
985 				 RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
986 
987 		if ( (rdes0 & 0x300) != 0x300) {
988 			/* A packet without First/Last flag */
989 			/* reuse this SKB */
990 			DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
991 			dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
992 		} else {
993 			/* A packet with First/Last flag */
994 			rxlen = ( (rdes0 >> 16) & 0x3fff) - 4;
995 
996 			/* error summary bit check */
997 			if (rdes0 & 0x8000) {
998 				/* This is a error packet */
999 				dev->stats.rx_errors++;
1000 				if (rdes0 & 1)
1001 					dev->stats.rx_fifo_errors++;
1002 				if (rdes0 & 2)
1003 					dev->stats.rx_crc_errors++;
1004 				if (rdes0 & 0x80)
1005 					dev->stats.rx_length_errors++;
1006 			}
1007 
1008 			if ( !(rdes0 & 0x8000) ||
1009 				((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
1010 				skb = rxptr->rx_skb_ptr;
1011 
1012 				/* Received Packet CRC check need or not */
1013 				if ( (db->dm910x_chk_mode & 1) &&
1014 					(cal_CRC(skb->data, rxlen, 1) !=
1015 					(*(u32 *) (skb->data+rxlen) ))) { /* FIXME (?) */
1016 					/* Found a error received packet */
1017 					dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1018 					db->dm910x_chk_mode = 3;
1019 				} else {
1020 					/* Good packet, send to upper layer */
1021 					/* Shorst packet used new SKB */
1022 					if ((rxlen < RX_COPY_SIZE) &&
1023 						((newskb = netdev_alloc_skb(dev, rxlen + 2))
1024 						!= NULL)) {
1025 
1026 						skb = newskb;
1027 						/* size less than COPY_SIZE, allocate a rxlen SKB */
1028 						skb_reserve(skb, 2); /* 16byte align */
1029 						skb_copy_from_linear_data(rxptr->rx_skb_ptr,
1030 							  skb_put(skb, rxlen),
1031 									  rxlen);
1032 						dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1033 					} else
1034 						skb_put(skb, rxlen);
1035 
1036 					skb->protocol = eth_type_trans(skb, dev);
1037 					netif_rx(skb);
1038 					dev->stats.rx_packets++;
1039 					dev->stats.rx_bytes += rxlen;
1040 				}
1041 			} else {
1042 				/* Reuse SKB buffer when the packet is error */
1043 				DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
1044 				dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1045 			}
1046 		}
1047 
1048 		rxptr = rxptr->next_rx_desc;
1049 	}
1050 
1051 	db->rx_ready_ptr = rxptr;
1052 }
1053 
1054 /*
1055  * Set DM910X multicast address
1056  */
1057 
1058 static void dmfe_set_filter_mode(struct DEVICE * dev)
1059 {
1060 	struct dmfe_board_info *db = netdev_priv(dev);
1061 	unsigned long flags;
1062 	int mc_count = netdev_mc_count(dev);
1063 
1064 	DMFE_DBUG(0, "dmfe_set_filter_mode()", 0);
1065 	spin_lock_irqsave(&db->lock, flags);
1066 
1067 	if (dev->flags & IFF_PROMISC) {
1068 		DMFE_DBUG(0, "Enable PROM Mode", 0);
1069 		db->cr6_data |= CR6_PM | CR6_PBF;
1070 		update_cr6(db->cr6_data, db->ioaddr);
1071 		spin_unlock_irqrestore(&db->lock, flags);
1072 		return;
1073 	}
1074 
1075 	if (dev->flags & IFF_ALLMULTI || mc_count > DMFE_MAX_MULTICAST) {
1076 		DMFE_DBUG(0, "Pass all multicast address", mc_count);
1077 		db->cr6_data &= ~(CR6_PM | CR6_PBF);
1078 		db->cr6_data |= CR6_PAM;
1079 		spin_unlock_irqrestore(&db->lock, flags);
1080 		return;
1081 	}
1082 
1083 	DMFE_DBUG(0, "Set multicast address", mc_count);
1084 	if (db->chip_id == PCI_DM9132_ID)
1085 		dm9132_id_table(dev);	/* DM9132 */
1086 	else
1087 		send_filter_frame(dev);	/* DM9102/DM9102A */
1088 	spin_unlock_irqrestore(&db->lock, flags);
1089 }
1090 
1091 /*
1092  * 	Ethtool interace
1093  */
1094 
1095 static void dmfe_ethtool_get_drvinfo(struct net_device *dev,
1096 			       struct ethtool_drvinfo *info)
1097 {
1098 	struct dmfe_board_info *np = netdev_priv(dev);
1099 
1100 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1101 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1102 	strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
1103 }
1104 
1105 static int dmfe_ethtool_set_wol(struct net_device *dev,
1106 				struct ethtool_wolinfo *wolinfo)
1107 {
1108 	struct dmfe_board_info *db = netdev_priv(dev);
1109 
1110 	if (wolinfo->wolopts & (WAKE_UCAST | WAKE_MCAST | WAKE_BCAST |
1111 		   		WAKE_ARP | WAKE_MAGICSECURE))
1112 		   return -EOPNOTSUPP;
1113 
1114 	db->wol_mode = wolinfo->wolopts;
1115 	return 0;
1116 }
1117 
1118 static void dmfe_ethtool_get_wol(struct net_device *dev,
1119 				 struct ethtool_wolinfo *wolinfo)
1120 {
1121 	struct dmfe_board_info *db = netdev_priv(dev);
1122 
1123 	wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
1124 	wolinfo->wolopts = db->wol_mode;
1125 }
1126 
1127 
1128 static const struct ethtool_ops netdev_ethtool_ops = {
1129 	.get_drvinfo		= dmfe_ethtool_get_drvinfo,
1130 	.get_link               = ethtool_op_get_link,
1131 	.set_wol		= dmfe_ethtool_set_wol,
1132 	.get_wol		= dmfe_ethtool_get_wol,
1133 };
1134 
1135 /*
1136  *	A periodic timer routine
1137  *	Dynamic media sense, allocate Rx buffer...
1138  */
1139 
1140 static void dmfe_timer(unsigned long data)
1141 {
1142 	struct net_device *dev = (struct net_device *)data;
1143 	struct dmfe_board_info *db = netdev_priv(dev);
1144 	void __iomem *ioaddr = db->ioaddr;
1145 	u32 tmp_cr8;
1146 	unsigned char tmp_cr12;
1147  	unsigned long flags;
1148 
1149 	int link_ok, link_ok_phy;
1150 
1151 	DMFE_DBUG(0, "dmfe_timer()", 0);
1152 	spin_lock_irqsave(&db->lock, flags);
1153 
1154 	/* Media mode process when Link OK before enter this route */
1155 	if (db->first_in_callback == 0) {
1156 		db->first_in_callback = 1;
1157 		if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) {
1158 			db->cr6_data &= ~0x40000;
1159 			update_cr6(db->cr6_data, ioaddr);
1160 			phy_write(ioaddr, db->phy_addr, 0, 0x1000, db->chip_id);
1161 			db->cr6_data |= 0x40000;
1162 			update_cr6(db->cr6_data, ioaddr);
1163 			db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
1164 			add_timer(&db->timer);
1165 			spin_unlock_irqrestore(&db->lock, flags);
1166 			return;
1167 		}
1168 	}
1169 
1170 
1171 	/* Operating Mode Check */
1172 	if ( (db->dm910x_chk_mode & 0x1) &&
1173 		(dev->stats.rx_packets > MAX_CHECK_PACKET) )
1174 		db->dm910x_chk_mode = 0x4;
1175 
1176 	/* Dynamic reset DM910X : system error or transmit time-out */
1177 	tmp_cr8 = dr32(DCR8);
1178 	if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
1179 		db->reset_cr8++;
1180 		db->wait_reset = 1;
1181 	}
1182 	db->interval_rx_cnt = 0;
1183 
1184 	/* TX polling kick monitor */
1185 	if ( db->tx_packet_cnt &&
1186 	     time_after(jiffies, dev_trans_start(dev) + DMFE_TX_KICK) ) {
1187 		dw32(DCR1, 0x1);   /* Tx polling again */
1188 
1189 		/* TX Timeout */
1190 		if (time_after(jiffies, dev_trans_start(dev) + DMFE_TX_TIMEOUT) ) {
1191 			db->reset_TXtimeout++;
1192 			db->wait_reset = 1;
1193 			dev_warn(&dev->dev, "Tx timeout - resetting\n");
1194 		}
1195 	}
1196 
1197 	if (db->wait_reset) {
1198 		DMFE_DBUG(0, "Dynamic Reset device", db->tx_packet_cnt);
1199 		db->reset_count++;
1200 		dmfe_dynamic_reset(dev);
1201 		db->first_in_callback = 0;
1202 		db->timer.expires = DMFE_TIMER_WUT;
1203 		add_timer(&db->timer);
1204 		spin_unlock_irqrestore(&db->lock, flags);
1205 		return;
1206 	}
1207 
1208 	/* Link status check, Dynamic media type change */
1209 	if (db->chip_id == PCI_DM9132_ID)
1210 		tmp_cr12 = dr8(DCR9 + 3);	/* DM9132 */
1211 	else
1212 		tmp_cr12 = dr8(DCR12);		/* DM9102/DM9102A */
1213 
1214 	if ( ((db->chip_id == PCI_DM9102_ID) &&
1215 		(db->chip_revision == 0x30)) ||
1216 		((db->chip_id == PCI_DM9132_ID) &&
1217 		(db->chip_revision == 0x10)) ) {
1218 		/* DM9102A Chip */
1219 		if (tmp_cr12 & 2)
1220 			link_ok = 0;
1221 		else
1222 			link_ok = 1;
1223 	}
1224 	else
1225 		/*0x43 is used instead of 0x3 because bit 6 should represent
1226 			link status of external PHY */
1227 		link_ok = (tmp_cr12 & 0x43) ? 1 : 0;
1228 
1229 
1230 	/* If chip reports that link is failed it could be because external
1231 		PHY link status pin is not connected correctly to chip
1232 		To be sure ask PHY too.
1233 	*/
1234 
1235 	/* need a dummy read because of PHY's register latch*/
1236 	phy_read (db->ioaddr, db->phy_addr, 1, db->chip_id);
1237 	link_ok_phy = (phy_read (db->ioaddr,
1238 		       db->phy_addr, 1, db->chip_id) & 0x4) ? 1 : 0;
1239 
1240 	if (link_ok_phy != link_ok) {
1241 		DMFE_DBUG (0, "PHY and chip report different link status", 0);
1242 		link_ok = link_ok | link_ok_phy;
1243  	}
1244 
1245 	if ( !link_ok && netif_carrier_ok(dev)) {
1246 		/* Link Failed */
1247 		DMFE_DBUG(0, "Link Failed", tmp_cr12);
1248 		netif_carrier_off(dev);
1249 
1250 		/* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
1251 		/* AUTO or force 1M Homerun/Longrun don't need */
1252 		if ( !(db->media_mode & 0x38) )
1253 			phy_write(db->ioaddr, db->phy_addr,
1254 				  0, 0x1000, db->chip_id);
1255 
1256 		/* AUTO mode, if INT phyxcer link failed, select EXT device */
1257 		if (db->media_mode & DMFE_AUTO) {
1258 			/* 10/100M link failed, used 1M Home-Net */
1259 			db->cr6_data|=0x00040000;	/* bit18=1, MII */
1260 			db->cr6_data&=~0x00000200;	/* bit9=0, HD mode */
1261 			update_cr6(db->cr6_data, ioaddr);
1262 		}
1263 	} else if (!netif_carrier_ok(dev)) {
1264 
1265 		DMFE_DBUG(0, "Link link OK", tmp_cr12);
1266 
1267 		/* Auto Sense Speed */
1268 		if ( !(db->media_mode & DMFE_AUTO) || !dmfe_sense_speed(db)) {
1269 			netif_carrier_on(dev);
1270 			SHOW_MEDIA_TYPE(db->op_mode);
1271 		}
1272 
1273 		dmfe_process_mode(db);
1274 	}
1275 
1276 	/* HPNA remote command check */
1277 	if (db->HPNA_command & 0xf00) {
1278 		db->HPNA_timer--;
1279 		if (!db->HPNA_timer)
1280 			dmfe_HPNA_remote_cmd_chk(db);
1281 	}
1282 
1283 	/* Timer active again */
1284 	db->timer.expires = DMFE_TIMER_WUT;
1285 	add_timer(&db->timer);
1286 	spin_unlock_irqrestore(&db->lock, flags);
1287 }
1288 
1289 
1290 /*
1291  *	Dynamic reset the DM910X board
1292  *	Stop DM910X board
1293  *	Free Tx/Rx allocated memory
1294  *	Reset DM910X board
1295  *	Re-initialize DM910X board
1296  */
1297 
1298 static void dmfe_dynamic_reset(struct net_device *dev)
1299 {
1300 	struct dmfe_board_info *db = netdev_priv(dev);
1301 	void __iomem *ioaddr = db->ioaddr;
1302 
1303 	DMFE_DBUG(0, "dmfe_dynamic_reset()", 0);
1304 
1305 	/* Sopt MAC controller */
1306 	db->cr6_data &= ~(CR6_RXSC | CR6_TXSC);	/* Disable Tx/Rx */
1307 	update_cr6(db->cr6_data, ioaddr);
1308 	dw32(DCR7, 0);				/* Disable Interrupt */
1309 	dw32(DCR5, dr32(DCR5));
1310 
1311 	/* Disable upper layer interface */
1312 	netif_stop_queue(dev);
1313 
1314 	/* Free Rx Allocate buffer */
1315 	dmfe_free_rxbuffer(db);
1316 
1317 	/* system variable init */
1318 	db->tx_packet_cnt = 0;
1319 	db->tx_queue_cnt = 0;
1320 	db->rx_avail_cnt = 0;
1321 	netif_carrier_off(dev);
1322 	db->wait_reset = 0;
1323 
1324 	/* Re-initialize DM910X board */
1325 	dmfe_init_dm910x(dev);
1326 
1327 	/* Restart upper layer interface */
1328 	netif_wake_queue(dev);
1329 }
1330 
1331 
1332 /*
1333  *	free all allocated rx buffer
1334  */
1335 
1336 static void dmfe_free_rxbuffer(struct dmfe_board_info * db)
1337 {
1338 	DMFE_DBUG(0, "dmfe_free_rxbuffer()", 0);
1339 
1340 	/* free allocated rx buffer */
1341 	while (db->rx_avail_cnt) {
1342 		dev_kfree_skb(db->rx_ready_ptr->rx_skb_ptr);
1343 		db->rx_ready_ptr = db->rx_ready_ptr->next_rx_desc;
1344 		db->rx_avail_cnt--;
1345 	}
1346 }
1347 
1348 
1349 /*
1350  *	Reuse the SK buffer
1351  */
1352 
1353 static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb)
1354 {
1355 	struct rx_desc *rxptr = db->rx_insert_ptr;
1356 
1357 	if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
1358 		rxptr->rx_skb_ptr = skb;
1359 		rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev,
1360 			    skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1361 		wmb();
1362 		rxptr->rdes0 = cpu_to_le32(0x80000000);
1363 		db->rx_avail_cnt++;
1364 		db->rx_insert_ptr = rxptr->next_rx_desc;
1365 	} else
1366 		DMFE_DBUG(0, "SK Buffer reuse method error", db->rx_avail_cnt);
1367 }
1368 
1369 
1370 /*
1371  *	Initialize transmit/Receive descriptor
1372  *	Using Chain structure, and allocate Tx/Rx buffer
1373  */
1374 
1375 static void dmfe_descriptor_init(struct net_device *dev)
1376 {
1377 	struct dmfe_board_info *db = netdev_priv(dev);
1378 	void __iomem *ioaddr = db->ioaddr;
1379 	struct tx_desc *tmp_tx;
1380 	struct rx_desc *tmp_rx;
1381 	unsigned char *tmp_buf;
1382 	dma_addr_t tmp_tx_dma, tmp_rx_dma;
1383 	dma_addr_t tmp_buf_dma;
1384 	int i;
1385 
1386 	DMFE_DBUG(0, "dmfe_descriptor_init()", 0);
1387 
1388 	/* tx descriptor start pointer */
1389 	db->tx_insert_ptr = db->first_tx_desc;
1390 	db->tx_remove_ptr = db->first_tx_desc;
1391 	dw32(DCR4, db->first_tx_desc_dma);     /* TX DESC address */
1392 
1393 	/* rx descriptor start pointer */
1394 	db->first_rx_desc = (void *)db->first_tx_desc +
1395 			sizeof(struct tx_desc) * TX_DESC_CNT;
1396 
1397 	db->first_rx_desc_dma =  db->first_tx_desc_dma +
1398 			sizeof(struct tx_desc) * TX_DESC_CNT;
1399 	db->rx_insert_ptr = db->first_rx_desc;
1400 	db->rx_ready_ptr = db->first_rx_desc;
1401 	dw32(DCR3, db->first_rx_desc_dma);		/* RX DESC address */
1402 
1403 	/* Init Transmit chain */
1404 	tmp_buf = db->buf_pool_start;
1405 	tmp_buf_dma = db->buf_pool_dma_start;
1406 	tmp_tx_dma = db->first_tx_desc_dma;
1407 	for (tmp_tx = db->first_tx_desc, i = 0; i < TX_DESC_CNT; i++, tmp_tx++) {
1408 		tmp_tx->tx_buf_ptr = tmp_buf;
1409 		tmp_tx->tdes0 = cpu_to_le32(0);
1410 		tmp_tx->tdes1 = cpu_to_le32(0x81000000);	/* IC, chain */
1411 		tmp_tx->tdes2 = cpu_to_le32(tmp_buf_dma);
1412 		tmp_tx_dma += sizeof(struct tx_desc);
1413 		tmp_tx->tdes3 = cpu_to_le32(tmp_tx_dma);
1414 		tmp_tx->next_tx_desc = tmp_tx + 1;
1415 		tmp_buf = tmp_buf + TX_BUF_ALLOC;
1416 		tmp_buf_dma = tmp_buf_dma + TX_BUF_ALLOC;
1417 	}
1418 	(--tmp_tx)->tdes3 = cpu_to_le32(db->first_tx_desc_dma);
1419 	tmp_tx->next_tx_desc = db->first_tx_desc;
1420 
1421 	 /* Init Receive descriptor chain */
1422 	tmp_rx_dma=db->first_rx_desc_dma;
1423 	for (tmp_rx = db->first_rx_desc, i = 0; i < RX_DESC_CNT; i++, tmp_rx++) {
1424 		tmp_rx->rdes0 = cpu_to_le32(0);
1425 		tmp_rx->rdes1 = cpu_to_le32(0x01000600);
1426 		tmp_rx_dma += sizeof(struct rx_desc);
1427 		tmp_rx->rdes3 = cpu_to_le32(tmp_rx_dma);
1428 		tmp_rx->next_rx_desc = tmp_rx + 1;
1429 	}
1430 	(--tmp_rx)->rdes3 = cpu_to_le32(db->first_rx_desc_dma);
1431 	tmp_rx->next_rx_desc = db->first_rx_desc;
1432 
1433 	/* pre-allocate Rx buffer */
1434 	allocate_rx_buffer(dev);
1435 }
1436 
1437 
1438 /*
1439  *	Update CR6 value
1440  *	Firstly stop DM910X , then written value and start
1441  */
1442 
1443 static void update_cr6(u32 cr6_data, void __iomem *ioaddr)
1444 {
1445 	u32 cr6_tmp;
1446 
1447 	cr6_tmp = cr6_data & ~0x2002;           /* stop Tx/Rx */
1448 	dw32(DCR6, cr6_tmp);
1449 	udelay(5);
1450 	dw32(DCR6, cr6_data);
1451 	udelay(5);
1452 }
1453 
1454 
1455 /*
1456  *	Send a setup frame for DM9132
1457  *	This setup frame initialize DM910X address filter mode
1458 */
1459 
1460 static void dm9132_id_table(struct net_device *dev)
1461 {
1462 	struct dmfe_board_info *db = netdev_priv(dev);
1463 	void __iomem *ioaddr = db->ioaddr + 0xc0;
1464 	u16 *addrptr = (u16 *)dev->dev_addr;
1465 	struct netdev_hw_addr *ha;
1466 	u16 i, hash_table[4];
1467 
1468 	/* Node address */
1469 	for (i = 0; i < 3; i++) {
1470 		dw16(0, addrptr[i]);
1471 		ioaddr += 4;
1472 	}
1473 
1474 	/* Clear Hash Table */
1475 	memset(hash_table, 0, sizeof(hash_table));
1476 
1477 	/* broadcast address */
1478 	hash_table[3] = 0x8000;
1479 
1480 	/* the multicast address in Hash Table : 64 bits */
1481 	netdev_for_each_mc_addr(ha, dev) {
1482 		u32 hash_val = cal_CRC((char *)ha->addr, 6, 0) & 0x3f;
1483 
1484 		hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
1485 	}
1486 
1487 	/* Write the hash table to MAC MD table */
1488 	for (i = 0; i < 4; i++, ioaddr += 4)
1489 		dw16(0, hash_table[i]);
1490 }
1491 
1492 
1493 /*
1494  *	Send a setup frame for DM9102/DM9102A
1495  *	This setup frame initialize DM910X address filter mode
1496  */
1497 
1498 static void send_filter_frame(struct net_device *dev)
1499 {
1500 	struct dmfe_board_info *db = netdev_priv(dev);
1501 	struct netdev_hw_addr *ha;
1502 	struct tx_desc *txptr;
1503 	u16 * addrptr;
1504 	u32 * suptr;
1505 	int i;
1506 
1507 	DMFE_DBUG(0, "send_filter_frame()", 0);
1508 
1509 	txptr = db->tx_insert_ptr;
1510 	suptr = (u32 *) txptr->tx_buf_ptr;
1511 
1512 	/* Node address */
1513 	addrptr = (u16 *) dev->dev_addr;
1514 	*suptr++ = addrptr[0];
1515 	*suptr++ = addrptr[1];
1516 	*suptr++ = addrptr[2];
1517 
1518 	/* broadcast address */
1519 	*suptr++ = 0xffff;
1520 	*suptr++ = 0xffff;
1521 	*suptr++ = 0xffff;
1522 
1523 	/* fit the multicast address */
1524 	netdev_for_each_mc_addr(ha, dev) {
1525 		addrptr = (u16 *) ha->addr;
1526 		*suptr++ = addrptr[0];
1527 		*suptr++ = addrptr[1];
1528 		*suptr++ = addrptr[2];
1529 	}
1530 
1531 	for (i = netdev_mc_count(dev); i < 14; i++) {
1532 		*suptr++ = 0xffff;
1533 		*suptr++ = 0xffff;
1534 		*suptr++ = 0xffff;
1535 	}
1536 
1537 	/* prepare the setup frame */
1538 	db->tx_insert_ptr = txptr->next_tx_desc;
1539 	txptr->tdes1 = cpu_to_le32(0x890000c0);
1540 
1541 	/* Resource Check and Send the setup packet */
1542 	if (!db->tx_packet_cnt) {
1543 		void __iomem *ioaddr = db->ioaddr;
1544 
1545 		/* Resource Empty */
1546 		db->tx_packet_cnt++;
1547 		txptr->tdes0 = cpu_to_le32(0x80000000);
1548 		update_cr6(db->cr6_data | 0x2000, ioaddr);
1549 		dw32(DCR1, 0x1);	/* Issue Tx polling */
1550 		update_cr6(db->cr6_data, ioaddr);
1551 		dev->trans_start = jiffies;
1552 	} else
1553 		db->tx_queue_cnt++;	/* Put in TX queue */
1554 }
1555 
1556 
1557 /*
1558  *	Allocate rx buffer,
1559  *	As possible as allocate maxiumn Rx buffer
1560  */
1561 
1562 static void allocate_rx_buffer(struct net_device *dev)
1563 {
1564 	struct dmfe_board_info *db = netdev_priv(dev);
1565 	struct rx_desc *rxptr;
1566 	struct sk_buff *skb;
1567 
1568 	rxptr = db->rx_insert_ptr;
1569 
1570 	while(db->rx_avail_cnt < RX_DESC_CNT) {
1571 		if ( ( skb = netdev_alloc_skb(dev, RX_ALLOC_SIZE) ) == NULL )
1572 			break;
1573 		rxptr->rx_skb_ptr = skb; /* FIXME (?) */
1574 		rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data,
1575 				    RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1576 		wmb();
1577 		rxptr->rdes0 = cpu_to_le32(0x80000000);
1578 		rxptr = rxptr->next_rx_desc;
1579 		db->rx_avail_cnt++;
1580 	}
1581 
1582 	db->rx_insert_ptr = rxptr;
1583 }
1584 
1585 static void srom_clk_write(void __iomem *ioaddr, u32 data)
1586 {
1587 	static const u32 cmd[] = {
1588 		CR9_SROM_READ | CR9_SRCS,
1589 		CR9_SROM_READ | CR9_SRCS | CR9_SRCLK,
1590 		CR9_SROM_READ | CR9_SRCS
1591 	};
1592 	int i;
1593 
1594 	for (i = 0; i < ARRAY_SIZE(cmd); i++) {
1595 		dw32(DCR9, data | cmd[i]);
1596 		udelay(5);
1597 	}
1598 }
1599 
1600 /*
1601  *	Read one word data from the serial ROM
1602  */
1603 static u16 read_srom_word(void __iomem *ioaddr, int offset)
1604 {
1605 	u16 srom_data;
1606 	int i;
1607 
1608 	dw32(DCR9, CR9_SROM_READ);
1609 	udelay(5);
1610 	dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1611 	udelay(5);
1612 
1613 	/* Send the Read Command 110b */
1614 	srom_clk_write(ioaddr, SROM_DATA_1);
1615 	srom_clk_write(ioaddr, SROM_DATA_1);
1616 	srom_clk_write(ioaddr, SROM_DATA_0);
1617 
1618 	/* Send the offset */
1619 	for (i = 5; i >= 0; i--) {
1620 		srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
1621 		srom_clk_write(ioaddr, srom_data);
1622 	}
1623 
1624 	dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1625 	udelay(5);
1626 
1627 	for (i = 16; i > 0; i--) {
1628 		dw32(DCR9, CR9_SROM_READ | CR9_SRCS | CR9_SRCLK);
1629 		udelay(5);
1630 		srom_data = (srom_data << 1) |
1631 				((dr32(DCR9) & CR9_CRDOUT) ? 1 : 0);
1632 		dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1633 		udelay(5);
1634 	}
1635 
1636 	dw32(DCR9, CR9_SROM_READ);
1637 	udelay(5);
1638 	return srom_data;
1639 }
1640 
1641 
1642 /*
1643  *	Auto sense the media mode
1644  */
1645 
1646 static u8 dmfe_sense_speed(struct dmfe_board_info *db)
1647 {
1648 	void __iomem *ioaddr = db->ioaddr;
1649 	u8 ErrFlag = 0;
1650 	u16 phy_mode;
1651 
1652 	/* CR6 bit18=0, select 10/100M */
1653 	update_cr6(db->cr6_data & ~0x40000, ioaddr);
1654 
1655 	phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1656 	phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1657 
1658 	if ( (phy_mode & 0x24) == 0x24 ) {
1659 		if (db->chip_id == PCI_DM9132_ID)	/* DM9132 */
1660 			phy_mode = phy_read(db->ioaddr,
1661 				    db->phy_addr, 7, db->chip_id) & 0xf000;
1662 		else 				/* DM9102/DM9102A */
1663 			phy_mode = phy_read(db->ioaddr,
1664 				    db->phy_addr, 17, db->chip_id) & 0xf000;
1665 		switch (phy_mode) {
1666 		case 0x1000: db->op_mode = DMFE_10MHF; break;
1667 		case 0x2000: db->op_mode = DMFE_10MFD; break;
1668 		case 0x4000: db->op_mode = DMFE_100MHF; break;
1669 		case 0x8000: db->op_mode = DMFE_100MFD; break;
1670 		default: db->op_mode = DMFE_10MHF;
1671 			ErrFlag = 1;
1672 			break;
1673 		}
1674 	} else {
1675 		db->op_mode = DMFE_10MHF;
1676 		DMFE_DBUG(0, "Link Failed :", phy_mode);
1677 		ErrFlag = 1;
1678 	}
1679 
1680 	return ErrFlag;
1681 }
1682 
1683 
1684 /*
1685  *	Set 10/100 phyxcer capability
1686  *	AUTO mode : phyxcer register4 is NIC capability
1687  *	Force mode: phyxcer register4 is the force media
1688  */
1689 
1690 static void dmfe_set_phyxcer(struct dmfe_board_info *db)
1691 {
1692 	void __iomem *ioaddr = db->ioaddr;
1693 	u16 phy_reg;
1694 
1695 	/* Select 10/100M phyxcer */
1696 	db->cr6_data &= ~0x40000;
1697 	update_cr6(db->cr6_data, ioaddr);
1698 
1699 	/* DM9009 Chip: Phyxcer reg18 bit12=0 */
1700 	if (db->chip_id == PCI_DM9009_ID) {
1701 		phy_reg = phy_read(db->ioaddr,
1702 				   db->phy_addr, 18, db->chip_id) & ~0x1000;
1703 
1704 		phy_write(db->ioaddr,
1705 			  db->phy_addr, 18, phy_reg, db->chip_id);
1706 	}
1707 
1708 	/* Phyxcer capability setting */
1709 	phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0;
1710 
1711 	if (db->media_mode & DMFE_AUTO) {
1712 		/* AUTO Mode */
1713 		phy_reg |= db->PHY_reg4;
1714 	} else {
1715 		/* Force Mode */
1716 		switch(db->media_mode) {
1717 		case DMFE_10MHF: phy_reg |= 0x20; break;
1718 		case DMFE_10MFD: phy_reg |= 0x40; break;
1719 		case DMFE_100MHF: phy_reg |= 0x80; break;
1720 		case DMFE_100MFD: phy_reg |= 0x100; break;
1721 		}
1722 		if (db->chip_id == PCI_DM9009_ID) phy_reg &= 0x61;
1723 	}
1724 
1725   	/* Write new capability to Phyxcer Reg4 */
1726 	if ( !(phy_reg & 0x01e0)) {
1727 		phy_reg|=db->PHY_reg4;
1728 		db->media_mode|=DMFE_AUTO;
1729 	}
1730 	phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id);
1731 
1732  	/* Restart Auto-Negotiation */
1733 	if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1734 		phy_write(db->ioaddr, db->phy_addr, 0, 0x1800, db->chip_id);
1735 	if ( !db->chip_type )
1736 		phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id);
1737 }
1738 
1739 
1740 /*
1741  *	Process op-mode
1742  *	AUTO mode : PHY controller in Auto-negotiation Mode
1743  *	Force mode: PHY controller in force mode with HUB
1744  *			N-way force capability with SWITCH
1745  */
1746 
1747 static void dmfe_process_mode(struct dmfe_board_info *db)
1748 {
1749 	u16 phy_reg;
1750 
1751 	/* Full Duplex Mode Check */
1752 	if (db->op_mode & 0x4)
1753 		db->cr6_data |= CR6_FDM;	/* Set Full Duplex Bit */
1754 	else
1755 		db->cr6_data &= ~CR6_FDM;	/* Clear Full Duplex Bit */
1756 
1757 	/* Transciver Selection */
1758 	if (db->op_mode & 0x10)		/* 1M HomePNA */
1759 		db->cr6_data |= 0x40000;/* External MII select */
1760 	else
1761 		db->cr6_data &= ~0x40000;/* Internal 10/100 transciver */
1762 
1763 	update_cr6(db->cr6_data, db->ioaddr);
1764 
1765 	/* 10/100M phyxcer force mode need */
1766 	if ( !(db->media_mode & 0x18)) {
1767 		/* Forece Mode */
1768 		phy_reg = phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id);
1769 		if ( !(phy_reg & 0x1) ) {
1770 			/* parter without N-Way capability */
1771 			phy_reg = 0x0;
1772 			switch(db->op_mode) {
1773 			case DMFE_10MHF: phy_reg = 0x0; break;
1774 			case DMFE_10MFD: phy_reg = 0x100; break;
1775 			case DMFE_100MHF: phy_reg = 0x2000; break;
1776 			case DMFE_100MFD: phy_reg = 0x2100; break;
1777 			}
1778 			phy_write(db->ioaddr,
1779 				  db->phy_addr, 0, phy_reg, db->chip_id);
1780        			if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1781 				mdelay(20);
1782 			phy_write(db->ioaddr,
1783 				  db->phy_addr, 0, phy_reg, db->chip_id);
1784 		}
1785 	}
1786 }
1787 
1788 
1789 /*
1790  *	Write a word to Phy register
1791  */
1792 
1793 static void phy_write(void __iomem *ioaddr, u8 phy_addr, u8 offset,
1794 		      u16 phy_data, u32 chip_id)
1795 {
1796 	u16 i;
1797 
1798 	if (chip_id == PCI_DM9132_ID) {
1799 		dw16(0x80 + offset * 4, phy_data);
1800 	} else {
1801 		/* DM9102/DM9102A Chip */
1802 
1803 		/* Send 33 synchronization clock to Phy controller */
1804 		for (i = 0; i < 35; i++)
1805 			phy_write_1bit(ioaddr, PHY_DATA_1);
1806 
1807 		/* Send start command(01) to Phy */
1808 		phy_write_1bit(ioaddr, PHY_DATA_0);
1809 		phy_write_1bit(ioaddr, PHY_DATA_1);
1810 
1811 		/* Send write command(01) to Phy */
1812 		phy_write_1bit(ioaddr, PHY_DATA_0);
1813 		phy_write_1bit(ioaddr, PHY_DATA_1);
1814 
1815 		/* Send Phy address */
1816 		for (i = 0x10; i > 0; i = i >> 1)
1817 			phy_write_1bit(ioaddr,
1818 				       phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1819 
1820 		/* Send register address */
1821 		for (i = 0x10; i > 0; i = i >> 1)
1822 			phy_write_1bit(ioaddr,
1823 				       offset & i ? PHY_DATA_1 : PHY_DATA_0);
1824 
1825 		/* written trasnition */
1826 		phy_write_1bit(ioaddr, PHY_DATA_1);
1827 		phy_write_1bit(ioaddr, PHY_DATA_0);
1828 
1829 		/* Write a word data to PHY controller */
1830 		for ( i = 0x8000; i > 0; i >>= 1)
1831 			phy_write_1bit(ioaddr,
1832 				       phy_data & i ? PHY_DATA_1 : PHY_DATA_0);
1833 	}
1834 }
1835 
1836 
1837 /*
1838  *	Read a word data from phy register
1839  */
1840 
1841 static u16 phy_read(void __iomem *ioaddr, u8 phy_addr, u8 offset, u32 chip_id)
1842 {
1843 	int i;
1844 	u16 phy_data;
1845 
1846 	if (chip_id == PCI_DM9132_ID) {
1847 		/* DM9132 Chip */
1848 		phy_data = dr16(0x80 + offset * 4);
1849 	} else {
1850 		/* DM9102/DM9102A Chip */
1851 
1852 		/* Send 33 synchronization clock to Phy controller */
1853 		for (i = 0; i < 35; i++)
1854 			phy_write_1bit(ioaddr, PHY_DATA_1);
1855 
1856 		/* Send start command(01) to Phy */
1857 		phy_write_1bit(ioaddr, PHY_DATA_0);
1858 		phy_write_1bit(ioaddr, PHY_DATA_1);
1859 
1860 		/* Send read command(10) to Phy */
1861 		phy_write_1bit(ioaddr, PHY_DATA_1);
1862 		phy_write_1bit(ioaddr, PHY_DATA_0);
1863 
1864 		/* Send Phy address */
1865 		for (i = 0x10; i > 0; i = i >> 1)
1866 			phy_write_1bit(ioaddr,
1867 				       phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1868 
1869 		/* Send register address */
1870 		for (i = 0x10; i > 0; i = i >> 1)
1871 			phy_write_1bit(ioaddr,
1872 				       offset & i ? PHY_DATA_1 : PHY_DATA_0);
1873 
1874 		/* Skip transition state */
1875 		phy_read_1bit(ioaddr);
1876 
1877 		/* read 16bit data */
1878 		for (phy_data = 0, i = 0; i < 16; i++) {
1879 			phy_data <<= 1;
1880 			phy_data |= phy_read_1bit(ioaddr);
1881 		}
1882 	}
1883 
1884 	return phy_data;
1885 }
1886 
1887 
1888 /*
1889  *	Write one bit data to Phy Controller
1890  */
1891 
1892 static void phy_write_1bit(void __iomem *ioaddr, u32 phy_data)
1893 {
1894 	dw32(DCR9, phy_data);		/* MII Clock Low */
1895 	udelay(1);
1896 	dw32(DCR9, phy_data | MDCLKH);	/* MII Clock High */
1897 	udelay(1);
1898 	dw32(DCR9, phy_data);		/* MII Clock Low */
1899 	udelay(1);
1900 }
1901 
1902 
1903 /*
1904  *	Read one bit phy data from PHY controller
1905  */
1906 
1907 static u16 phy_read_1bit(void __iomem *ioaddr)
1908 {
1909 	u16 phy_data;
1910 
1911 	dw32(DCR9, 0x50000);
1912 	udelay(1);
1913 	phy_data = (dr32(DCR9) >> 19) & 0x1;
1914 	dw32(DCR9, 0x40000);
1915 	udelay(1);
1916 
1917 	return phy_data;
1918 }
1919 
1920 
1921 /*
1922  *	Parser SROM and media mode
1923  */
1924 
1925 static void dmfe_parse_srom(struct dmfe_board_info * db)
1926 {
1927 	char * srom = db->srom;
1928 	int dmfe_mode, tmp_reg;
1929 
1930 	DMFE_DBUG(0, "dmfe_parse_srom() ", 0);
1931 
1932 	/* Init CR15 */
1933 	db->cr15_data = CR15_DEFAULT;
1934 
1935 	/* Check SROM Version */
1936 	if ( ( (int) srom[18] & 0xff) == SROM_V41_CODE) {
1937 		/* SROM V4.01 */
1938 		/* Get NIC support media mode */
1939 		db->NIC_capability = le16_to_cpup((__le16 *) (srom + 34));
1940 		db->PHY_reg4 = 0;
1941 		for (tmp_reg = 1; tmp_reg < 0x10; tmp_reg <<= 1) {
1942 			switch( db->NIC_capability & tmp_reg ) {
1943 			case 0x1: db->PHY_reg4 |= 0x0020; break;
1944 			case 0x2: db->PHY_reg4 |= 0x0040; break;
1945 			case 0x4: db->PHY_reg4 |= 0x0080; break;
1946 			case 0x8: db->PHY_reg4 |= 0x0100; break;
1947 			}
1948 		}
1949 
1950 		/* Media Mode Force or not check */
1951 		dmfe_mode = (le32_to_cpup((__le32 *) (srom + 34)) &
1952 			     le32_to_cpup((__le32 *) (srom + 36)));
1953 		switch(dmfe_mode) {
1954 		case 0x4: dmfe_media_mode = DMFE_100MHF; break;	/* 100MHF */
1955 		case 0x2: dmfe_media_mode = DMFE_10MFD; break;	/* 10MFD */
1956 		case 0x8: dmfe_media_mode = DMFE_100MFD; break;	/* 100MFD */
1957 		case 0x100:
1958 		case 0x200: dmfe_media_mode = DMFE_1M_HPNA; break;/* HomePNA */
1959 		}
1960 
1961 		/* Special Function setting */
1962 		/* VLAN function */
1963 		if ( (SF_mode & 0x1) || (srom[43] & 0x80) )
1964 			db->cr15_data |= 0x40;
1965 
1966 		/* Flow Control */
1967 		if ( (SF_mode & 0x2) || (srom[40] & 0x1) )
1968 			db->cr15_data |= 0x400;
1969 
1970 		/* TX pause packet */
1971 		if ( (SF_mode & 0x4) || (srom[40] & 0xe) )
1972 			db->cr15_data |= 0x9800;
1973 	}
1974 
1975 	/* Parse HPNA parameter */
1976 	db->HPNA_command = 1;
1977 
1978 	/* Accept remote command or not */
1979 	if (HPNA_rx_cmd == 0)
1980 		db->HPNA_command |= 0x8000;
1981 
1982 	 /* Issue remote command & operation mode */
1983 	if (HPNA_tx_cmd == 1)
1984 		switch(HPNA_mode) {	/* Issue Remote Command */
1985 		case 0: db->HPNA_command |= 0x0904; break;
1986 		case 1: db->HPNA_command |= 0x0a00; break;
1987 		case 2: db->HPNA_command |= 0x0506; break;
1988 		case 3: db->HPNA_command |= 0x0602; break;
1989 		}
1990 	else
1991 		switch(HPNA_mode) {	/* Don't Issue */
1992 		case 0: db->HPNA_command |= 0x0004; break;
1993 		case 1: db->HPNA_command |= 0x0000; break;
1994 		case 2: db->HPNA_command |= 0x0006; break;
1995 		case 3: db->HPNA_command |= 0x0002; break;
1996 		}
1997 
1998 	/* Check DM9801 or DM9802 present or not */
1999 	db->HPNA_present = 0;
2000 	update_cr6(db->cr6_data | 0x40000, db->ioaddr);
2001 	tmp_reg = phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id);
2002 	if ( ( tmp_reg & 0xfff0 ) == 0xb900 ) {
2003 		/* DM9801 or DM9802 present */
2004 		db->HPNA_timer = 8;
2005 		if ( phy_read(db->ioaddr, db->phy_addr, 31, db->chip_id) == 0x4404) {
2006 			/* DM9801 HomeRun */
2007 			db->HPNA_present = 1;
2008 			dmfe_program_DM9801(db, tmp_reg);
2009 		} else {
2010 			/* DM9802 LongRun */
2011 			db->HPNA_present = 2;
2012 			dmfe_program_DM9802(db);
2013 		}
2014 	}
2015 
2016 }
2017 
2018 
2019 /*
2020  *	Init HomeRun DM9801
2021  */
2022 
2023 static void dmfe_program_DM9801(struct dmfe_board_info * db, int HPNA_rev)
2024 {
2025 	uint reg17, reg25;
2026 
2027 	if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9801_NOISE_FLOOR;
2028 	switch(HPNA_rev) {
2029 	case 0xb900: /* DM9801 E3 */
2030 		db->HPNA_command |= 0x1000;
2031 		reg25 = phy_read(db->ioaddr, db->phy_addr, 24, db->chip_id);
2032 		reg25 = ( (reg25 + HPNA_NoiseFloor) & 0xff) | 0xf000;
2033 		reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2034 		break;
2035 	case 0xb901: /* DM9801 E4 */
2036 		reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2037 		reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor;
2038 		reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2039 		reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor + 3;
2040 		break;
2041 	case 0xb902: /* DM9801 E5 */
2042 	case 0xb903: /* DM9801 E6 */
2043 	default:
2044 		db->HPNA_command |= 0x1000;
2045 		reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2046 		reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor - 5;
2047 		reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2048 		reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor;
2049 		break;
2050 	}
2051 	phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
2052 	phy_write(db->ioaddr, db->phy_addr, 17, reg17, db->chip_id);
2053 	phy_write(db->ioaddr, db->phy_addr, 25, reg25, db->chip_id);
2054 }
2055 
2056 
2057 /*
2058  *	Init HomeRun DM9802
2059  */
2060 
2061 static void dmfe_program_DM9802(struct dmfe_board_info * db)
2062 {
2063 	uint phy_reg;
2064 
2065 	if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9802_NOISE_FLOOR;
2066 	phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
2067 	phy_reg = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2068 	phy_reg = ( phy_reg & 0xff00) + HPNA_NoiseFloor;
2069 	phy_write(db->ioaddr, db->phy_addr, 25, phy_reg, db->chip_id);
2070 }
2071 
2072 
2073 /*
2074  *	Check remote HPNA power and speed status. If not correct,
2075  *	issue command again.
2076 */
2077 
2078 static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db)
2079 {
2080 	uint phy_reg;
2081 
2082 	/* Got remote device status */
2083 	phy_reg = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0x60;
2084 	switch(phy_reg) {
2085 	case 0x00: phy_reg = 0x0a00;break; /* LP/LS */
2086 	case 0x20: phy_reg = 0x0900;break; /* LP/HS */
2087 	case 0x40: phy_reg = 0x0600;break; /* HP/LS */
2088 	case 0x60: phy_reg = 0x0500;break; /* HP/HS */
2089 	}
2090 
2091 	/* Check remote device status match our setting ot not */
2092 	if ( phy_reg != (db->HPNA_command & 0x0f00) ) {
2093 		phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command,
2094 			  db->chip_id);
2095 		db->HPNA_timer=8;
2096 	} else
2097 		db->HPNA_timer=600;	/* Match, every 10 minutes, check */
2098 }
2099 
2100 
2101 
2102 static DEFINE_PCI_DEVICE_TABLE(dmfe_pci_tbl) = {
2103 	{ 0x1282, 0x9132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9132_ID },
2104 	{ 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9102_ID },
2105 	{ 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9100_ID },
2106 	{ 0x1282, 0x9009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9009_ID },
2107 	{ 0, }
2108 };
2109 MODULE_DEVICE_TABLE(pci, dmfe_pci_tbl);
2110 
2111 
2112 #ifdef CONFIG_PM
2113 static int dmfe_suspend(struct pci_dev *pci_dev, pm_message_t state)
2114 {
2115 	struct net_device *dev = pci_get_drvdata(pci_dev);
2116 	struct dmfe_board_info *db = netdev_priv(dev);
2117 	void __iomem *ioaddr = db->ioaddr;
2118 	u32 tmp;
2119 
2120 	/* Disable upper layer interface */
2121 	netif_device_detach(dev);
2122 
2123 	/* Disable Tx/Rx */
2124 	db->cr6_data &= ~(CR6_RXSC | CR6_TXSC);
2125 	update_cr6(db->cr6_data, ioaddr);
2126 
2127 	/* Disable Interrupt */
2128 	dw32(DCR7, 0);
2129 	dw32(DCR5, dr32(DCR5));
2130 
2131 	/* Fre RX buffers */
2132 	dmfe_free_rxbuffer(db);
2133 
2134 	/* Enable WOL */
2135 	pci_read_config_dword(pci_dev, 0x40, &tmp);
2136 	tmp &= ~(DMFE_WOL_LINKCHANGE|DMFE_WOL_MAGICPACKET);
2137 
2138 	if (db->wol_mode & WAKE_PHY)
2139 		tmp |= DMFE_WOL_LINKCHANGE;
2140 	if (db->wol_mode & WAKE_MAGIC)
2141 		tmp |= DMFE_WOL_MAGICPACKET;
2142 
2143 	pci_write_config_dword(pci_dev, 0x40, tmp);
2144 
2145 	pci_enable_wake(pci_dev, PCI_D3hot, 1);
2146 	pci_enable_wake(pci_dev, PCI_D3cold, 1);
2147 
2148 	/* Power down device*/
2149 	pci_save_state(pci_dev);
2150 	pci_set_power_state(pci_dev, pci_choose_state (pci_dev, state));
2151 
2152 	return 0;
2153 }
2154 
2155 static int dmfe_resume(struct pci_dev *pci_dev)
2156 {
2157 	struct net_device *dev = pci_get_drvdata(pci_dev);
2158 	u32 tmp;
2159 
2160 	pci_set_power_state(pci_dev, PCI_D0);
2161 	pci_restore_state(pci_dev);
2162 
2163 	/* Re-initialize DM910X board */
2164 	dmfe_init_dm910x(dev);
2165 
2166 	/* Disable WOL */
2167 	pci_read_config_dword(pci_dev, 0x40, &tmp);
2168 
2169 	tmp &= ~(DMFE_WOL_LINKCHANGE | DMFE_WOL_MAGICPACKET);
2170 	pci_write_config_dword(pci_dev, 0x40, tmp);
2171 
2172 	pci_enable_wake(pci_dev, PCI_D3hot, 0);
2173 	pci_enable_wake(pci_dev, PCI_D3cold, 0);
2174 
2175 	/* Restart upper layer interface */
2176 	netif_device_attach(dev);
2177 
2178 	return 0;
2179 }
2180 #else
2181 #define dmfe_suspend NULL
2182 #define dmfe_resume NULL
2183 #endif
2184 
2185 static struct pci_driver dmfe_driver = {
2186 	.name		= "dmfe",
2187 	.id_table	= dmfe_pci_tbl,
2188 	.probe		= dmfe_init_one,
2189 	.remove		= dmfe_remove_one,
2190 	.suspend        = dmfe_suspend,
2191 	.resume         = dmfe_resume
2192 };
2193 
2194 MODULE_AUTHOR("Sten Wang, sten_wang@davicom.com.tw");
2195 MODULE_DESCRIPTION("Davicom DM910X fast ethernet driver");
2196 MODULE_LICENSE("GPL");
2197 MODULE_VERSION(DRV_VERSION);
2198 
2199 module_param(debug, int, 0);
2200 module_param(mode, byte, 0);
2201 module_param(cr6set, int, 0);
2202 module_param(chkmode, byte, 0);
2203 module_param(HPNA_mode, byte, 0);
2204 module_param(HPNA_rx_cmd, byte, 0);
2205 module_param(HPNA_tx_cmd, byte, 0);
2206 module_param(HPNA_NoiseFloor, byte, 0);
2207 module_param(SF_mode, byte, 0);
2208 MODULE_PARM_DESC(debug, "Davicom DM9xxx enable debugging (0-1)");
2209 MODULE_PARM_DESC(mode, "Davicom DM9xxx: "
2210 		"Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
2211 
2212 MODULE_PARM_DESC(SF_mode, "Davicom DM9xxx special function "
2213 		"(bit 0: VLAN, bit 1 Flow Control, bit 2: TX pause packet)");
2214 
2215 /*	Description:
2216  *	when user used insmod to add module, system invoked init_module()
2217  *	to initialize and register.
2218  */
2219 
2220 static int __init dmfe_init_module(void)
2221 {
2222 	int rc;
2223 
2224 	pr_info("%s\n", version);
2225 	printed_version = 1;
2226 
2227 	DMFE_DBUG(0, "init_module() ", debug);
2228 
2229 	if (debug)
2230 		dmfe_debug = debug;	/* set debug flag */
2231 	if (cr6set)
2232 		dmfe_cr6_user_set = cr6set;
2233 
2234  	switch(mode) {
2235    	case DMFE_10MHF:
2236 	case DMFE_100MHF:
2237 	case DMFE_10MFD:
2238 	case DMFE_100MFD:
2239 	case DMFE_1M_HPNA:
2240 		dmfe_media_mode = mode;
2241 		break;
2242 	default:dmfe_media_mode = DMFE_AUTO;
2243 		break;
2244 	}
2245 
2246 	if (HPNA_mode > 4)
2247 		HPNA_mode = 0;		/* Default: LP/HS */
2248 	if (HPNA_rx_cmd > 1)
2249 		HPNA_rx_cmd = 0;	/* Default: Ignored remote cmd */
2250 	if (HPNA_tx_cmd > 1)
2251 		HPNA_tx_cmd = 0;	/* Default: Don't issue remote cmd */
2252 	if (HPNA_NoiseFloor > 15)
2253 		HPNA_NoiseFloor = 0;
2254 
2255 	rc = pci_register_driver(&dmfe_driver);
2256 	if (rc < 0)
2257 		return rc;
2258 
2259 	return 0;
2260 }
2261 
2262 
2263 /*
2264  *	Description:
2265  *	when user used rmmod to delete module, system invoked clean_module()
2266  *	to un-register all registered services.
2267  */
2268 
2269 static void __exit dmfe_cleanup_module(void)
2270 {
2271 	DMFE_DBUG(0, "dmfe_clean_module() ", debug);
2272 	pci_unregister_driver(&dmfe_driver);
2273 }
2274 
2275 module_init(dmfe_init_module);
2276 module_exit(dmfe_cleanup_module);
2277