xref: /freebsd/sys/dev/my/if_my.c (revision 84823cc70824c8d842f503d8c2e6d7b0c2d95b61)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Written by: yen_cw@myson.com.tw
5  * Copyright (c) 2002 Myson Technology Inc.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions, and the following disclaimer,
13  *    without modification, immediately at the beginning of the file.
14  * 2. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * Myson fast ethernet PCI NIC driver, available at: http://www.myson.com.tw/
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/sockio.h>
38 #include <sys/mbuf.h>
39 #include <sys/malloc.h>
40 #include <sys/kernel.h>
41 #include <sys/socket.h>
42 #include <sys/queue.h>
43 #include <sys/types.h>
44 #include <sys/module.h>
45 #include <sys/lock.h>
46 #include <sys/mutex.h>
47 
48 #define NBPFILTER	1
49 
50 #include <net/if.h>
51 #include <net/if_var.h>
52 #include <net/if_arp.h>
53 #include <net/ethernet.h>
54 #include <net/if_media.h>
55 #include <net/if_types.h>
56 #include <net/if_dl.h>
57 #include <net/bpf.h>
58 
59 #include <vm/vm.h>		/* for vtophys */
60 #include <vm/pmap.h>		/* for vtophys */
61 #include <machine/bus.h>
62 #include <machine/resource.h>
63 #include <sys/bus.h>
64 #include <sys/rman.h>
65 
66 #include <dev/pci/pcireg.h>
67 #include <dev/pci/pcivar.h>
68 
69 /*
70  * #define MY_USEIOSPACE
71  */
72 
73 static int      MY_USEIOSPACE = 1;
74 
75 #ifdef MY_USEIOSPACE
76 #define MY_RES                  SYS_RES_IOPORT
77 #define MY_RID                  MY_PCI_LOIO
78 #else
79 #define MY_RES                  SYS_RES_MEMORY
80 #define MY_RID                  MY_PCI_LOMEM
81 #endif
82 
83 #include <dev/my/if_myreg.h>
84 
85 /*
86  * Various supported device vendors/types and their names.
87  */
88 struct my_type *my_info_tmp;
89 static struct my_type my_devs[] = {
90 	{MYSONVENDORID, MTD800ID, "Myson MTD80X Based Fast Ethernet Card"},
91 	{MYSONVENDORID, MTD803ID, "Myson MTD80X Based Fast Ethernet Card"},
92 	{MYSONVENDORID, MTD891ID, "Myson MTD89X Based Giga Ethernet Card"},
93 	{0, 0, NULL}
94 };
95 
96 /*
97  * Various supported PHY vendors/types and their names. Note that this driver
98  * will work with pretty much any MII-compliant PHY, so failure to positively
99  * identify the chip is not a fatal error.
100  */
101 static struct my_type my_phys[] = {
102 	{MysonPHYID0, MysonPHYID0, "<MYSON MTD981>"},
103 	{SeeqPHYID0, SeeqPHYID0, "<SEEQ 80225>"},
104 	{AhdocPHYID0, AhdocPHYID0, "<AHDOC 101>"},
105 	{MarvellPHYID0, MarvellPHYID0, "<MARVELL 88E1000>"},
106 	{LevelOnePHYID0, LevelOnePHYID0, "<LevelOne LXT1000>"},
107 	{0, 0, "<MII-compliant physical interface>"}
108 };
109 
110 static int      my_probe(device_t);
111 static int      my_attach(device_t);
112 static int      my_detach(device_t);
113 static int      my_newbuf(struct my_softc *, struct my_chain_onefrag *);
114 static int      my_encap(struct my_softc *, struct my_chain *, struct mbuf *);
115 static void     my_rxeof(struct my_softc *);
116 static void     my_txeof(struct my_softc *);
117 static void     my_txeoc(struct my_softc *);
118 static void     my_intr(void *);
119 static void     my_start(struct ifnet *);
120 static void     my_start_locked(struct ifnet *);
121 static int      my_ioctl(struct ifnet *, u_long, caddr_t);
122 static void     my_init(void *);
123 static void     my_init_locked(struct my_softc *);
124 static void     my_stop(struct my_softc *);
125 static void     my_autoneg_timeout(void *);
126 static void     my_watchdog(void *);
127 static int      my_shutdown(device_t);
128 static int      my_ifmedia_upd(struct ifnet *);
129 static void     my_ifmedia_sts(struct ifnet *, struct ifmediareq *);
130 static u_int16_t my_phy_readreg(struct my_softc *, int);
131 static void     my_phy_writereg(struct my_softc *, int, int);
132 static void     my_autoneg_xmit(struct my_softc *);
133 static void     my_autoneg_mii(struct my_softc *, int, int);
134 static void     my_setmode_mii(struct my_softc *, int);
135 static void     my_getmode_mii(struct my_softc *);
136 static void     my_setcfg(struct my_softc *, int);
137 static void     my_setmulti(struct my_softc *);
138 static void     my_reset(struct my_softc *);
139 static int      my_list_rx_init(struct my_softc *);
140 static int      my_list_tx_init(struct my_softc *);
141 static long     my_send_cmd_to_phy(struct my_softc *, int, int);
142 
143 #define MY_SETBIT(sc, reg, x) CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x))
144 #define MY_CLRBIT(sc, reg, x) CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x))
145 
146 static device_method_t my_methods[] = {
147 	/* Device interface */
148 	DEVMETHOD(device_probe, my_probe),
149 	DEVMETHOD(device_attach, my_attach),
150 	DEVMETHOD(device_detach, my_detach),
151 	DEVMETHOD(device_shutdown, my_shutdown),
152 
153 	DEVMETHOD_END
154 };
155 
156 static driver_t my_driver = {
157 	"my",
158 	my_methods,
159 	sizeof(struct my_softc)
160 };
161 
162 DRIVER_MODULE(my, pci, my_driver, 0, 0);
163 MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, my, my_devs,
164     nitems(my_devs) - 1);
165 MODULE_DEPEND(my, pci, 1, 1, 1);
166 MODULE_DEPEND(my, ether, 1, 1, 1);
167 
168 static long
169 my_send_cmd_to_phy(struct my_softc * sc, int opcode, int regad)
170 {
171 	long            miir;
172 	int             i;
173 	int             mask, data;
174 
175 	MY_LOCK_ASSERT(sc);
176 
177 	/* enable MII output */
178 	miir = CSR_READ_4(sc, MY_MANAGEMENT);
179 	miir &= 0xfffffff0;
180 
181 	miir |= MY_MASK_MIIR_MII_WRITE + MY_MASK_MIIR_MII_MDO;
182 
183 	/* send 32 1's preamble */
184 	for (i = 0; i < 32; i++) {
185 		/* low MDC; MDO is already high (miir) */
186 		miir &= ~MY_MASK_MIIR_MII_MDC;
187 		CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
188 
189 		/* high MDC */
190 		miir |= MY_MASK_MIIR_MII_MDC;
191 		CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
192 	}
193 
194 	/* calculate ST+OP+PHYAD+REGAD+TA */
195 	data = opcode | (sc->my_phy_addr << 7) | (regad << 2);
196 
197 	/* sent out */
198 	mask = 0x8000;
199 	while (mask) {
200 		/* low MDC, prepare MDO */
201 		miir &= ~(MY_MASK_MIIR_MII_MDC + MY_MASK_MIIR_MII_MDO);
202 		if (mask & data)
203 			miir |= MY_MASK_MIIR_MII_MDO;
204 
205 		CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
206 		/* high MDC */
207 		miir |= MY_MASK_MIIR_MII_MDC;
208 		CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
209 		DELAY(30);
210 
211 		/* next */
212 		mask >>= 1;
213 		if (mask == 0x2 && opcode == MY_OP_READ)
214 			miir &= ~MY_MASK_MIIR_MII_WRITE;
215 	}
216 
217 	return miir;
218 }
219 
220 static u_int16_t
221 my_phy_readreg(struct my_softc * sc, int reg)
222 {
223 	long            miir;
224 	int             mask, data;
225 
226 	MY_LOCK_ASSERT(sc);
227 
228 	if (sc->my_info->my_did == MTD803ID)
229 		data = CSR_READ_2(sc, MY_PHYBASE + reg * 2);
230 	else {
231 		miir = my_send_cmd_to_phy(sc, MY_OP_READ, reg);
232 
233 		/* read data */
234 		mask = 0x8000;
235 		data = 0;
236 		while (mask) {
237 			/* low MDC */
238 			miir &= ~MY_MASK_MIIR_MII_MDC;
239 			CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
240 
241 			/* read MDI */
242 			miir = CSR_READ_4(sc, MY_MANAGEMENT);
243 			if (miir & MY_MASK_MIIR_MII_MDI)
244 				data |= mask;
245 
246 			/* high MDC, and wait */
247 			miir |= MY_MASK_MIIR_MII_MDC;
248 			CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
249 			DELAY(30);
250 
251 			/* next */
252 			mask >>= 1;
253 		}
254 
255 		/* low MDC */
256 		miir &= ~MY_MASK_MIIR_MII_MDC;
257 		CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
258 	}
259 
260 	return (u_int16_t) data;
261 }
262 
263 static void
264 my_phy_writereg(struct my_softc * sc, int reg, int data)
265 {
266 	long            miir;
267 	int             mask;
268 
269 	MY_LOCK_ASSERT(sc);
270 
271 	if (sc->my_info->my_did == MTD803ID)
272 		CSR_WRITE_2(sc, MY_PHYBASE + reg * 2, data);
273 	else {
274 		miir = my_send_cmd_to_phy(sc, MY_OP_WRITE, reg);
275 
276 		/* write data */
277 		mask = 0x8000;
278 		while (mask) {
279 			/* low MDC, prepare MDO */
280 			miir &= ~(MY_MASK_MIIR_MII_MDC + MY_MASK_MIIR_MII_MDO);
281 			if (mask & data)
282 				miir |= MY_MASK_MIIR_MII_MDO;
283 			CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
284 			DELAY(1);
285 
286 			/* high MDC */
287 			miir |= MY_MASK_MIIR_MII_MDC;
288 			CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
289 			DELAY(1);
290 
291 			/* next */
292 			mask >>= 1;
293 		}
294 
295 		/* low MDC */
296 		miir &= ~MY_MASK_MIIR_MII_MDC;
297 		CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
298 	}
299 	return;
300 }
301 
302 static u_int
303 my_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
304 {
305 	uint32_t *hashes = arg;
306 	int h;
307 
308 	h = ~ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 26;
309 	if (h < 32)
310 		hashes[0] |= (1 << h);
311 	else
312 		hashes[1] |= (1 << (h - 32));
313 
314 	return (1);
315 }
316 /*
317  * Program the 64-bit multicast hash filter.
318  */
319 static void
320 my_setmulti(struct my_softc * sc)
321 {
322 	struct ifnet   *ifp;
323 	u_int32_t       hashes[2] = {0, 0};
324 	u_int32_t       rxfilt;
325 
326 	MY_LOCK_ASSERT(sc);
327 
328 	ifp = sc->my_ifp;
329 
330 	rxfilt = CSR_READ_4(sc, MY_TCRRCR);
331 
332 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
333 		rxfilt |= MY_AM;
334 		CSR_WRITE_4(sc, MY_TCRRCR, rxfilt);
335 		CSR_WRITE_4(sc, MY_MAR0, 0xFFFFFFFF);
336 		CSR_WRITE_4(sc, MY_MAR1, 0xFFFFFFFF);
337 
338 		return;
339 	}
340 	/* first, zot all the existing hash bits */
341 	CSR_WRITE_4(sc, MY_MAR0, 0);
342 	CSR_WRITE_4(sc, MY_MAR1, 0);
343 
344 	/* now program new ones */
345 	if (if_foreach_llmaddr(ifp, my_hash_maddr, hashes) > 0)
346 		rxfilt |= MY_AM;
347 	else
348 		rxfilt &= ~MY_AM;
349 	CSR_WRITE_4(sc, MY_MAR0, hashes[0]);
350 	CSR_WRITE_4(sc, MY_MAR1, hashes[1]);
351 	CSR_WRITE_4(sc, MY_TCRRCR, rxfilt);
352 }
353 
354 /*
355  * Initiate an autonegotiation session.
356  */
357 static void
358 my_autoneg_xmit(struct my_softc * sc)
359 {
360 	u_int16_t       phy_sts = 0;
361 
362 	MY_LOCK_ASSERT(sc);
363 
364 	my_phy_writereg(sc, PHY_BMCR, PHY_BMCR_RESET);
365 	DELAY(500);
366 	while (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_RESET);
367 
368 	phy_sts = my_phy_readreg(sc, PHY_BMCR);
369 	phy_sts |= PHY_BMCR_AUTONEGENBL | PHY_BMCR_AUTONEGRSTR;
370 	my_phy_writereg(sc, PHY_BMCR, phy_sts);
371 
372 	return;
373 }
374 
375 static void
376 my_autoneg_timeout(void *arg)
377 {
378 	struct my_softc *sc;
379 
380 	sc = arg;
381 	MY_LOCK_ASSERT(sc);
382 	my_autoneg_mii(sc, MY_FLAG_DELAYTIMEO, 1);
383 }
384 
385 /*
386  * Invoke autonegotiation on a PHY.
387  */
388 static void
389 my_autoneg_mii(struct my_softc * sc, int flag, int verbose)
390 {
391 	u_int16_t       phy_sts = 0, media, advert, ability;
392 	u_int16_t       ability2 = 0;
393 	struct ifnet   *ifp;
394 	struct ifmedia *ifm;
395 
396 	MY_LOCK_ASSERT(sc);
397 
398 	ifm = &sc->ifmedia;
399 	ifp = sc->my_ifp;
400 
401 	ifm->ifm_media = IFM_ETHER | IFM_AUTO;
402 
403 #ifndef FORCE_AUTONEG_TFOUR
404 	/*
405 	 * First, see if autoneg is supported. If not, there's no point in
406 	 * continuing.
407 	 */
408 	phy_sts = my_phy_readreg(sc, PHY_BMSR);
409 	if (!(phy_sts & PHY_BMSR_CANAUTONEG)) {
410 		if (verbose)
411 			device_printf(sc->my_dev,
412 			    "autonegotiation not supported\n");
413 		ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX;
414 		return;
415 	}
416 #endif
417 	switch (flag) {
418 	case MY_FLAG_FORCEDELAY:
419 		/*
420 		 * XXX Never use this option anywhere but in the probe
421 		 * routine: making the kernel stop dead in its tracks for
422 		 * three whole seconds after we've gone multi-user is really
423 		 * bad manners.
424 		 */
425 		my_autoneg_xmit(sc);
426 		DELAY(5000000);
427 		break;
428 	case MY_FLAG_SCHEDDELAY:
429 		/*
430 		 * Wait for the transmitter to go idle before starting an
431 		 * autoneg session, otherwise my_start() may clobber our
432 		 * timeout, and we don't want to allow transmission during an
433 		 * autoneg session since that can screw it up.
434 		 */
435 		if (sc->my_cdata.my_tx_head != NULL) {
436 			sc->my_want_auto = 1;
437 			MY_UNLOCK(sc);
438 			return;
439 		}
440 		my_autoneg_xmit(sc);
441 		callout_reset(&sc->my_autoneg_timer, hz * 5, my_autoneg_timeout,
442 		    sc);
443 		sc->my_autoneg = 1;
444 		sc->my_want_auto = 0;
445 		return;
446 	case MY_FLAG_DELAYTIMEO:
447 		callout_stop(&sc->my_autoneg_timer);
448 		sc->my_autoneg = 0;
449 		break;
450 	default:
451 		device_printf(sc->my_dev, "invalid autoneg flag: %d\n", flag);
452 		return;
453 	}
454 
455 	if (my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_AUTONEGCOMP) {
456 		if (verbose)
457 			device_printf(sc->my_dev, "autoneg complete, ");
458 		phy_sts = my_phy_readreg(sc, PHY_BMSR);
459 	} else {
460 		if (verbose)
461 			device_printf(sc->my_dev, "autoneg not complete, ");
462 	}
463 
464 	media = my_phy_readreg(sc, PHY_BMCR);
465 
466 	/* Link is good. Report modes and set duplex mode. */
467 	if (my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT) {
468 		if (verbose)
469 			device_printf(sc->my_dev, "link status good. ");
470 		advert = my_phy_readreg(sc, PHY_ANAR);
471 		ability = my_phy_readreg(sc, PHY_LPAR);
472 		if ((sc->my_pinfo->my_vid == MarvellPHYID0) ||
473 		    (sc->my_pinfo->my_vid == LevelOnePHYID0)) {
474 			ability2 = my_phy_readreg(sc, PHY_1000SR);
475 			if (ability2 & PHY_1000SR_1000BTXFULL) {
476 				advert = 0;
477 				ability = 0;
478 				/*
479 				 * this version did not support 1000M,
480 				 * ifm->ifm_media =
481 				 * IFM_ETHER|IFM_1000_T|IFM_FDX;
482 				 */
483 				ifm->ifm_media =
484 				    IFM_ETHER | IFM_100_TX | IFM_FDX;
485 				media &= ~PHY_BMCR_SPEEDSEL;
486 				media |= PHY_BMCR_1000;
487 				media |= PHY_BMCR_DUPLEX;
488 				printf("(full-duplex, 1000Mbps)\n");
489 			} else if (ability2 & PHY_1000SR_1000BTXHALF) {
490 				advert = 0;
491 				ability = 0;
492 				/*
493 				 * this version did not support 1000M,
494 				 * ifm->ifm_media = IFM_ETHER|IFM_1000_T;
495 				 */
496 				ifm->ifm_media = IFM_ETHER | IFM_100_TX;
497 				media &= ~PHY_BMCR_SPEEDSEL;
498 				media &= ~PHY_BMCR_DUPLEX;
499 				media |= PHY_BMCR_1000;
500 				printf("(half-duplex, 1000Mbps)\n");
501 			}
502 		}
503 		if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4) {
504 			ifm->ifm_media = IFM_ETHER | IFM_100_T4;
505 			media |= PHY_BMCR_SPEEDSEL;
506 			media &= ~PHY_BMCR_DUPLEX;
507 			printf("(100baseT4)\n");
508 		} else if (advert & PHY_ANAR_100BTXFULL &&
509 			   ability & PHY_ANAR_100BTXFULL) {
510 			ifm->ifm_media = IFM_ETHER | IFM_100_TX | IFM_FDX;
511 			media |= PHY_BMCR_SPEEDSEL;
512 			media |= PHY_BMCR_DUPLEX;
513 			printf("(full-duplex, 100Mbps)\n");
514 		} else if (advert & PHY_ANAR_100BTXHALF &&
515 			   ability & PHY_ANAR_100BTXHALF) {
516 			ifm->ifm_media = IFM_ETHER | IFM_100_TX | IFM_HDX;
517 			media |= PHY_BMCR_SPEEDSEL;
518 			media &= ~PHY_BMCR_DUPLEX;
519 			printf("(half-duplex, 100Mbps)\n");
520 		} else if (advert & PHY_ANAR_10BTFULL &&
521 			   ability & PHY_ANAR_10BTFULL) {
522 			ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_FDX;
523 			media &= ~PHY_BMCR_SPEEDSEL;
524 			media |= PHY_BMCR_DUPLEX;
525 			printf("(full-duplex, 10Mbps)\n");
526 		} else if (advert) {
527 			ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX;
528 			media &= ~PHY_BMCR_SPEEDSEL;
529 			media &= ~PHY_BMCR_DUPLEX;
530 			printf("(half-duplex, 10Mbps)\n");
531 		}
532 		media &= ~PHY_BMCR_AUTONEGENBL;
533 
534 		/* Set ASIC's duplex mode to match the PHY. */
535 		my_phy_writereg(sc, PHY_BMCR, media);
536 		my_setcfg(sc, media);
537 	} else {
538 		if (verbose)
539 			device_printf(sc->my_dev, "no carrier\n");
540 	}
541 
542 	my_init_locked(sc);
543 	if (sc->my_tx_pend) {
544 		sc->my_autoneg = 0;
545 		sc->my_tx_pend = 0;
546 		my_start_locked(ifp);
547 	}
548 	return;
549 }
550 
551 /*
552  * To get PHY ability.
553  */
554 static void
555 my_getmode_mii(struct my_softc * sc)
556 {
557 	u_int16_t       bmsr;
558 	struct ifnet   *ifp;
559 
560 	MY_LOCK_ASSERT(sc);
561 	ifp = sc->my_ifp;
562 	bmsr = my_phy_readreg(sc, PHY_BMSR);
563 	if (bootverbose)
564 		device_printf(sc->my_dev, "PHY status word: %x\n", bmsr);
565 
566 	/* fallback */
567 	sc->ifmedia.ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX;
568 
569 	if (bmsr & PHY_BMSR_10BTHALF) {
570 		if (bootverbose)
571 			device_printf(sc->my_dev,
572 			    "10Mbps half-duplex mode supported\n");
573 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T | IFM_HDX,
574 		    0, NULL);
575 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T, 0, NULL);
576 	}
577 	if (bmsr & PHY_BMSR_10BTFULL) {
578 		if (bootverbose)
579 			device_printf(sc->my_dev,
580 			    "10Mbps full-duplex mode supported\n");
581 
582 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX,
583 		    0, NULL);
584 		sc->ifmedia.ifm_media = IFM_ETHER | IFM_10_T | IFM_FDX;
585 	}
586 	if (bmsr & PHY_BMSR_100BTXHALF) {
587 		if (bootverbose)
588 			device_printf(sc->my_dev,
589 			    "100Mbps half-duplex mode supported\n");
590 		ifp->if_baudrate = 100000000;
591 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL);
592 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX | IFM_HDX,
593 			    0, NULL);
594 		sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_TX | IFM_HDX;
595 	}
596 	if (bmsr & PHY_BMSR_100BTXFULL) {
597 		if (bootverbose)
598 			device_printf(sc->my_dev,
599 			    "100Mbps full-duplex mode supported\n");
600 		ifp->if_baudrate = 100000000;
601 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX,
602 		    0, NULL);
603 		sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_TX | IFM_FDX;
604 	}
605 	/* Some also support 100BaseT4. */
606 	if (bmsr & PHY_BMSR_100BT4) {
607 		if (bootverbose)
608 			device_printf(sc->my_dev, "100baseT4 mode supported\n");
609 		ifp->if_baudrate = 100000000;
610 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_T4, 0, NULL);
611 		sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_T4;
612 #ifdef FORCE_AUTONEG_TFOUR
613 		if (bootverbose)
614 			device_printf(sc->my_dev,
615 			    "forcing on autoneg support for BT4\n");
616 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_AUTO, 0 NULL):
617 		sc->ifmedia.ifm_media = IFM_ETHER | IFM_AUTO;
618 #endif
619 	}
620 #if 0				/* this version did not support 1000M, */
621 	if (sc->my_pinfo->my_vid == MarvellPHYID0) {
622 		if (bootverbose)
623 			device_printf(sc->my_dev,
624 			    "1000Mbps half-duplex mode supported\n");
625 
626 		ifp->if_baudrate = 1000000000;
627 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T, 0, NULL);
628 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T | IFM_HDX,
629 		    0, NULL);
630 		if (bootverbose)
631 			device_printf(sc->my_dev,
632 			    "1000Mbps full-duplex mode supported\n");
633 		ifp->if_baudrate = 1000000000;
634 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T | IFM_FDX,
635 		    0, NULL);
636 		sc->ifmedia.ifm_media = IFM_ETHER | IFM_1000_T | IFM_FDX;
637 	}
638 #endif
639 	if (bmsr & PHY_BMSR_CANAUTONEG) {
640 		if (bootverbose)
641 			device_printf(sc->my_dev, "autoneg supported\n");
642 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
643 		sc->ifmedia.ifm_media = IFM_ETHER | IFM_AUTO;
644 	}
645 	return;
646 }
647 
648 /*
649  * Set speed and duplex mode.
650  */
651 static void
652 my_setmode_mii(struct my_softc * sc, int media)
653 {
654 	u_int16_t       bmcr;
655 
656 	MY_LOCK_ASSERT(sc);
657 	/*
658 	 * If an autoneg session is in progress, stop it.
659 	 */
660 	if (sc->my_autoneg) {
661 		device_printf(sc->my_dev, "canceling autoneg session\n");
662 		callout_stop(&sc->my_autoneg_timer);
663 		sc->my_autoneg = sc->my_want_auto = 0;
664 		bmcr = my_phy_readreg(sc, PHY_BMCR);
665 		bmcr &= ~PHY_BMCR_AUTONEGENBL;
666 		my_phy_writereg(sc, PHY_BMCR, bmcr);
667 	}
668 	device_printf(sc->my_dev, "selecting MII, ");
669 	bmcr = my_phy_readreg(sc, PHY_BMCR);
670 	bmcr &= ~(PHY_BMCR_AUTONEGENBL | PHY_BMCR_SPEEDSEL | PHY_BMCR_1000 |
671 		  PHY_BMCR_DUPLEX | PHY_BMCR_LOOPBK);
672 
673 #if 0				/* this version did not support 1000M, */
674 	if (IFM_SUBTYPE(media) == IFM_1000_T) {
675 		printf("1000Mbps/T4, half-duplex\n");
676 		bmcr &= ~PHY_BMCR_SPEEDSEL;
677 		bmcr &= ~PHY_BMCR_DUPLEX;
678 		bmcr |= PHY_BMCR_1000;
679 	}
680 #endif
681 	if (IFM_SUBTYPE(media) == IFM_100_T4) {
682 		printf("100Mbps/T4, half-duplex\n");
683 		bmcr |= PHY_BMCR_SPEEDSEL;
684 		bmcr &= ~PHY_BMCR_DUPLEX;
685 	}
686 	if (IFM_SUBTYPE(media) == IFM_100_TX) {
687 		printf("100Mbps, ");
688 		bmcr |= PHY_BMCR_SPEEDSEL;
689 	}
690 	if (IFM_SUBTYPE(media) == IFM_10_T) {
691 		printf("10Mbps, ");
692 		bmcr &= ~PHY_BMCR_SPEEDSEL;
693 	}
694 	if ((media & IFM_GMASK) == IFM_FDX) {
695 		printf("full duplex\n");
696 		bmcr |= PHY_BMCR_DUPLEX;
697 	} else {
698 		printf("half duplex\n");
699 		bmcr &= ~PHY_BMCR_DUPLEX;
700 	}
701 	my_phy_writereg(sc, PHY_BMCR, bmcr);
702 	my_setcfg(sc, bmcr);
703 	return;
704 }
705 
706 /*
707  * The Myson manual states that in order to fiddle with the 'full-duplex' and
708  * '100Mbps' bits in the netconfig register, we first have to put the
709  * transmit and/or receive logic in the idle state.
710  */
711 static void
712 my_setcfg(struct my_softc * sc, int bmcr)
713 {
714 	int             i, restart = 0;
715 
716 	MY_LOCK_ASSERT(sc);
717 	if (CSR_READ_4(sc, MY_TCRRCR) & (MY_TE | MY_RE)) {
718 		restart = 1;
719 		MY_CLRBIT(sc, MY_TCRRCR, (MY_TE | MY_RE));
720 		for (i = 0; i < MY_TIMEOUT; i++) {
721 			DELAY(10);
722 			if (!(CSR_READ_4(sc, MY_TCRRCR) &
723 			    (MY_TXRUN | MY_RXRUN)))
724 				break;
725 		}
726 		if (i == MY_TIMEOUT)
727 			device_printf(sc->my_dev,
728 			    "failed to force tx and rx to idle \n");
729 	}
730 	MY_CLRBIT(sc, MY_TCRRCR, MY_PS1000);
731 	MY_CLRBIT(sc, MY_TCRRCR, MY_PS10);
732 	if (bmcr & PHY_BMCR_1000)
733 		MY_SETBIT(sc, MY_TCRRCR, MY_PS1000);
734 	else if (!(bmcr & PHY_BMCR_SPEEDSEL))
735 		MY_SETBIT(sc, MY_TCRRCR, MY_PS10);
736 	if (bmcr & PHY_BMCR_DUPLEX)
737 		MY_SETBIT(sc, MY_TCRRCR, MY_FD);
738 	else
739 		MY_CLRBIT(sc, MY_TCRRCR, MY_FD);
740 	if (restart)
741 		MY_SETBIT(sc, MY_TCRRCR, MY_TE | MY_RE);
742 	return;
743 }
744 
745 static void
746 my_reset(struct my_softc * sc)
747 {
748 	int    i;
749 
750 	MY_LOCK_ASSERT(sc);
751 	MY_SETBIT(sc, MY_BCR, MY_SWR);
752 	for (i = 0; i < MY_TIMEOUT; i++) {
753 		DELAY(10);
754 		if (!(CSR_READ_4(sc, MY_BCR) & MY_SWR))
755 			break;
756 	}
757 	if (i == MY_TIMEOUT)
758 		device_printf(sc->my_dev, "reset never completed!\n");
759 
760 	/* Wait a little while for the chip to get its brains in order. */
761 	DELAY(1000);
762 	return;
763 }
764 
765 /*
766  * Probe for a Myson chip. Check the PCI vendor and device IDs against our
767  * list and return a device name if we find a match.
768  */
769 static int
770 my_probe(device_t dev)
771 {
772 	struct my_type *t;
773 
774 	t = my_devs;
775 	while (t->my_name != NULL) {
776 		if ((pci_get_vendor(dev) == t->my_vid) &&
777 		    (pci_get_device(dev) == t->my_did)) {
778 			device_set_desc(dev, t->my_name);
779 			my_info_tmp = t;
780 			return (BUS_PROBE_DEFAULT);
781 		}
782 		t++;
783 	}
784 	return (ENXIO);
785 }
786 
787 /*
788  * Attach the interface. Allocate softc structures, do ifmedia setup and
789  * ethernet/BPF attach.
790  */
791 static int
792 my_attach(device_t dev)
793 {
794 	int             i;
795 	u_char          eaddr[ETHER_ADDR_LEN];
796 	u_int32_t       iobase;
797 	struct my_softc *sc;
798 	struct ifnet   *ifp;
799 	int             media = IFM_ETHER | IFM_100_TX | IFM_FDX;
800 	unsigned int    round;
801 	caddr_t         roundptr;
802 	struct my_type *p;
803 	u_int16_t       phy_vid, phy_did, phy_sts = 0;
804 	int             rid, error = 0;
805 
806 	sc = device_get_softc(dev);
807 	sc->my_dev = dev;
808 	mtx_init(&sc->my_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
809 	    MTX_DEF);
810 	callout_init_mtx(&sc->my_autoneg_timer, &sc->my_mtx, 0);
811 	callout_init_mtx(&sc->my_watchdog, &sc->my_mtx, 0);
812 
813 	/*
814 	 * Map control/status registers.
815 	 */
816 	pci_enable_busmaster(dev);
817 
818 	if (my_info_tmp->my_did == MTD800ID) {
819 		iobase = pci_read_config(dev, MY_PCI_LOIO, 4);
820 		if (iobase & 0x300)
821 			MY_USEIOSPACE = 0;
822 	}
823 
824 	rid = MY_RID;
825 	sc->my_res = bus_alloc_resource_any(dev, MY_RES, &rid, RF_ACTIVE);
826 
827 	if (sc->my_res == NULL) {
828 		device_printf(dev, "couldn't map ports/memory\n");
829 		error = ENXIO;
830 		goto destroy_mutex;
831 	}
832 	sc->my_btag = rman_get_bustag(sc->my_res);
833 	sc->my_bhandle = rman_get_bushandle(sc->my_res);
834 
835 	rid = 0;
836 	sc->my_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
837 					    RF_SHAREABLE | RF_ACTIVE);
838 
839 	if (sc->my_irq == NULL) {
840 		device_printf(dev, "couldn't map interrupt\n");
841 		error = ENXIO;
842 		goto release_io;
843 	}
844 
845 	sc->my_info = my_info_tmp;
846 
847 	/* Reset the adapter. */
848 	MY_LOCK(sc);
849 	my_reset(sc);
850 	MY_UNLOCK(sc);
851 
852 	/*
853 	 * Get station address
854 	 */
855 	for (i = 0; i < ETHER_ADDR_LEN; ++i)
856 		eaddr[i] = CSR_READ_1(sc, MY_PAR0 + i);
857 
858 	sc->my_ldata_ptr = malloc(sizeof(struct my_list_data) + 8,
859 				  M_DEVBUF, M_NOWAIT);
860 	if (sc->my_ldata_ptr == NULL) {
861 		device_printf(dev, "no memory for list buffers!\n");
862 		error = ENXIO;
863 		goto release_irq;
864 	}
865 	sc->my_ldata = (struct my_list_data *) sc->my_ldata_ptr;
866 	round = (uintptr_t)sc->my_ldata_ptr & 0xF;
867 	roundptr = sc->my_ldata_ptr;
868 	for (i = 0; i < 8; i++) {
869 		if (round % 8) {
870 			round++;
871 			roundptr++;
872 		} else
873 			break;
874 	}
875 	sc->my_ldata = (struct my_list_data *) roundptr;
876 	bzero(sc->my_ldata, sizeof(struct my_list_data));
877 
878 	ifp = sc->my_ifp = if_alloc(IFT_ETHER);
879 	if (ifp == NULL) {
880 		device_printf(dev, "can not if_alloc()\n");
881 		error = ENOSPC;
882 		goto free_ldata;
883 	}
884 	ifp->if_softc = sc;
885 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
886 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
887 	ifp->if_ioctl = my_ioctl;
888 	ifp->if_start = my_start;
889 	ifp->if_init = my_init;
890 	ifp->if_baudrate = 10000000;
891 	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
892 	ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
893 	IFQ_SET_READY(&ifp->if_snd);
894 
895 	if (sc->my_info->my_did == MTD803ID)
896 		sc->my_pinfo = my_phys;
897 	else {
898 		if (bootverbose)
899 			device_printf(dev, "probing for a PHY\n");
900 		MY_LOCK(sc);
901 		for (i = MY_PHYADDR_MIN; i < MY_PHYADDR_MAX + 1; i++) {
902 			if (bootverbose)
903 				device_printf(dev, "checking address: %d\n", i);
904 			sc->my_phy_addr = i;
905 			phy_sts = my_phy_readreg(sc, PHY_BMSR);
906 			if ((phy_sts != 0) && (phy_sts != 0xffff))
907 				break;
908 			else
909 				phy_sts = 0;
910 		}
911 		if (phy_sts) {
912 			phy_vid = my_phy_readreg(sc, PHY_VENID);
913 			phy_did = my_phy_readreg(sc, PHY_DEVID);
914 			if (bootverbose) {
915 				device_printf(dev, "found PHY at address %d, ",
916 				    sc->my_phy_addr);
917 				printf("vendor id: %x device id: %x\n",
918 				    phy_vid, phy_did);
919 			}
920 			p = my_phys;
921 			while (p->my_vid) {
922 				if (phy_vid == p->my_vid) {
923 					sc->my_pinfo = p;
924 					break;
925 				}
926 				p++;
927 			}
928 			if (sc->my_pinfo == NULL)
929 				sc->my_pinfo = &my_phys[PHY_UNKNOWN];
930 			if (bootverbose)
931 				device_printf(dev, "PHY type: %s\n",
932 				       sc->my_pinfo->my_name);
933 		} else {
934 			MY_UNLOCK(sc);
935 			device_printf(dev, "MII without any phy!\n");
936 			error = ENXIO;
937 			goto free_if;
938 		}
939 		MY_UNLOCK(sc);
940 	}
941 
942 	/* Do ifmedia setup. */
943 	ifmedia_init(&sc->ifmedia, 0, my_ifmedia_upd, my_ifmedia_sts);
944 	MY_LOCK(sc);
945 	my_getmode_mii(sc);
946 	my_autoneg_mii(sc, MY_FLAG_FORCEDELAY, 1);
947 	media = sc->ifmedia.ifm_media;
948 	my_stop(sc);
949 	MY_UNLOCK(sc);
950 	ifmedia_set(&sc->ifmedia, media);
951 
952 	ether_ifattach(ifp, eaddr);
953 
954 	error = bus_setup_intr(dev, sc->my_irq, INTR_TYPE_NET | INTR_MPSAFE,
955 			       NULL, my_intr, sc, &sc->my_intrhand);
956 
957 	if (error) {
958 		device_printf(dev, "couldn't set up irq\n");
959 		goto detach_if;
960 	}
961 
962 	return (0);
963 
964 detach_if:
965 	ether_ifdetach(ifp);
966 free_if:
967 	if_free(ifp);
968 free_ldata:
969 	free(sc->my_ldata_ptr, M_DEVBUF);
970 release_irq:
971 	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->my_irq);
972 release_io:
973 	bus_release_resource(dev, MY_RES, MY_RID, sc->my_res);
974 destroy_mutex:
975 	mtx_destroy(&sc->my_mtx);
976 	return (error);
977 }
978 
979 static int
980 my_detach(device_t dev)
981 {
982 	struct my_softc *sc;
983 	struct ifnet   *ifp;
984 
985 	sc = device_get_softc(dev);
986 	ifp = sc->my_ifp;
987 	ether_ifdetach(ifp);
988 	MY_LOCK(sc);
989 	my_stop(sc);
990 	MY_UNLOCK(sc);
991 	bus_teardown_intr(dev, sc->my_irq, sc->my_intrhand);
992 	callout_drain(&sc->my_watchdog);
993 	callout_drain(&sc->my_autoneg_timer);
994 
995 	if_free(ifp);
996 	free(sc->my_ldata_ptr, M_DEVBUF);
997 
998 	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->my_irq);
999 	bus_release_resource(dev, MY_RES, MY_RID, sc->my_res);
1000 	mtx_destroy(&sc->my_mtx);
1001 	return (0);
1002 }
1003 
1004 /*
1005  * Initialize the transmit descriptors.
1006  */
1007 static int
1008 my_list_tx_init(struct my_softc * sc)
1009 {
1010 	struct my_chain_data *cd;
1011 	struct my_list_data *ld;
1012 	int             i;
1013 
1014 	MY_LOCK_ASSERT(sc);
1015 	cd = &sc->my_cdata;
1016 	ld = sc->my_ldata;
1017 	for (i = 0; i < MY_TX_LIST_CNT; i++) {
1018 		cd->my_tx_chain[i].my_ptr = &ld->my_tx_list[i];
1019 		if (i == (MY_TX_LIST_CNT - 1))
1020 			cd->my_tx_chain[i].my_nextdesc = &cd->my_tx_chain[0];
1021 		else
1022 			cd->my_tx_chain[i].my_nextdesc =
1023 			    &cd->my_tx_chain[i + 1];
1024 	}
1025 	cd->my_tx_free = &cd->my_tx_chain[0];
1026 	cd->my_tx_tail = cd->my_tx_head = NULL;
1027 	return (0);
1028 }
1029 
1030 /*
1031  * Initialize the RX descriptors and allocate mbufs for them. Note that we
1032  * arrange the descriptors in a closed ring, so that the last descriptor
1033  * points back to the first.
1034  */
1035 static int
1036 my_list_rx_init(struct my_softc * sc)
1037 {
1038 	struct my_chain_data *cd;
1039 	struct my_list_data *ld;
1040 	int             i;
1041 
1042 	MY_LOCK_ASSERT(sc);
1043 	cd = &sc->my_cdata;
1044 	ld = sc->my_ldata;
1045 	for (i = 0; i < MY_RX_LIST_CNT; i++) {
1046 		cd->my_rx_chain[i].my_ptr =
1047 		    (struct my_desc *) & ld->my_rx_list[i];
1048 		if (my_newbuf(sc, &cd->my_rx_chain[i]) == ENOBUFS) {
1049 			MY_UNLOCK(sc);
1050 			return (ENOBUFS);
1051 		}
1052 		if (i == (MY_RX_LIST_CNT - 1)) {
1053 			cd->my_rx_chain[i].my_nextdesc = &cd->my_rx_chain[0];
1054 			ld->my_rx_list[i].my_next = vtophys(&ld->my_rx_list[0]);
1055 		} else {
1056 			cd->my_rx_chain[i].my_nextdesc =
1057 			    &cd->my_rx_chain[i + 1];
1058 			ld->my_rx_list[i].my_next =
1059 			    vtophys(&ld->my_rx_list[i + 1]);
1060 		}
1061 	}
1062 	cd->my_rx_head = &cd->my_rx_chain[0];
1063 	return (0);
1064 }
1065 
1066 /*
1067  * Initialize an RX descriptor and attach an MBUF cluster.
1068  */
1069 static int
1070 my_newbuf(struct my_softc * sc, struct my_chain_onefrag * c)
1071 {
1072 	struct mbuf    *m_new = NULL;
1073 
1074 	MY_LOCK_ASSERT(sc);
1075 	MGETHDR(m_new, M_NOWAIT, MT_DATA);
1076 	if (m_new == NULL) {
1077 		device_printf(sc->my_dev,
1078 		    "no memory for rx list -- packet dropped!\n");
1079 		return (ENOBUFS);
1080 	}
1081 	if (!(MCLGET(m_new, M_NOWAIT))) {
1082 		device_printf(sc->my_dev,
1083 		    "no memory for rx list -- packet dropped!\n");
1084 		m_freem(m_new);
1085 		return (ENOBUFS);
1086 	}
1087 	c->my_mbuf = m_new;
1088 	c->my_ptr->my_data = vtophys(mtod(m_new, caddr_t));
1089 	c->my_ptr->my_ctl = (MCLBYTES - 1) << MY_RBSShift;
1090 	c->my_ptr->my_status = MY_OWNByNIC;
1091 	return (0);
1092 }
1093 
1094 /*
1095  * A frame has been uploaded: pass the resulting mbuf chain up to the higher
1096  * level protocols.
1097  */
1098 static void
1099 my_rxeof(struct my_softc * sc)
1100 {
1101 	struct ether_header *eh;
1102 	struct mbuf    *m;
1103 	struct ifnet   *ifp;
1104 	struct my_chain_onefrag *cur_rx;
1105 	int             total_len = 0;
1106 	u_int32_t       rxstat;
1107 
1108 	MY_LOCK_ASSERT(sc);
1109 	ifp = sc->my_ifp;
1110 	while (!((rxstat = sc->my_cdata.my_rx_head->my_ptr->my_status)
1111 	    & MY_OWNByNIC)) {
1112 		cur_rx = sc->my_cdata.my_rx_head;
1113 		sc->my_cdata.my_rx_head = cur_rx->my_nextdesc;
1114 
1115 		if (rxstat & MY_ES) {	/* error summary: give up this rx pkt */
1116 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1117 			cur_rx->my_ptr->my_status = MY_OWNByNIC;
1118 			continue;
1119 		}
1120 		/* No errors; receive the packet. */
1121 		total_len = (rxstat & MY_FLNGMASK) >> MY_FLNGShift;
1122 		total_len -= ETHER_CRC_LEN;
1123 
1124 		if (total_len < MINCLSIZE) {
1125 			m = m_devget(mtod(cur_rx->my_mbuf, char *),
1126 			    total_len, 0, ifp, NULL);
1127 			cur_rx->my_ptr->my_status = MY_OWNByNIC;
1128 			if (m == NULL) {
1129 				if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1130 				continue;
1131 			}
1132 		} else {
1133 			m = cur_rx->my_mbuf;
1134 			/*
1135 			 * Try to conjure up a new mbuf cluster. If that
1136 			 * fails, it means we have an out of memory condition
1137 			 * and should leave the buffer in place and continue.
1138 			 * This will result in a lost packet, but there's
1139 			 * little else we can do in this situation.
1140 			 */
1141 			if (my_newbuf(sc, cur_rx) == ENOBUFS) {
1142 				if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1143 				cur_rx->my_ptr->my_status = MY_OWNByNIC;
1144 				continue;
1145 			}
1146 			m->m_pkthdr.rcvif = ifp;
1147 			m->m_pkthdr.len = m->m_len = total_len;
1148 		}
1149 		if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1150 		eh = mtod(m, struct ether_header *);
1151 #if NBPFILTER > 0
1152 		/*
1153 		 * Handle BPF listeners. Let the BPF user see the packet, but
1154 		 * don't pass it up to the ether_input() layer unless it's a
1155 		 * broadcast packet, multicast packet, matches our ethernet
1156 		 * address or the interface is in promiscuous mode.
1157 		 */
1158 		if (bpf_peers_present(ifp->if_bpf)) {
1159 			bpf_mtap(ifp->if_bpf, m);
1160 			if (ifp->if_flags & IFF_PROMISC &&
1161 			    (bcmp(eh->ether_dhost, IF_LLADDR(sc->my_ifp),
1162 				ETHER_ADDR_LEN) &&
1163 			     (eh->ether_dhost[0] & 1) == 0)) {
1164 				m_freem(m);
1165 				continue;
1166 			}
1167 		}
1168 #endif
1169 		MY_UNLOCK(sc);
1170 		(*ifp->if_input)(ifp, m);
1171 		MY_LOCK(sc);
1172 	}
1173 	return;
1174 }
1175 
1176 /*
1177  * A frame was downloaded to the chip. It's safe for us to clean up the list
1178  * buffers.
1179  */
1180 static void
1181 my_txeof(struct my_softc * sc)
1182 {
1183 	struct my_chain *cur_tx;
1184 	struct ifnet   *ifp;
1185 
1186 	MY_LOCK_ASSERT(sc);
1187 	ifp = sc->my_ifp;
1188 	/* Clear the timeout timer. */
1189 	sc->my_timer = 0;
1190 	if (sc->my_cdata.my_tx_head == NULL) {
1191 		return;
1192 	}
1193 	/*
1194 	 * Go through our tx list and free mbufs for those frames that have
1195 	 * been transmitted.
1196 	 */
1197 	while (sc->my_cdata.my_tx_head->my_mbuf != NULL) {
1198 		u_int32_t       txstat;
1199 
1200 		cur_tx = sc->my_cdata.my_tx_head;
1201 		txstat = MY_TXSTATUS(cur_tx);
1202 		if ((txstat & MY_OWNByNIC) || txstat == MY_UNSENT)
1203 			break;
1204 		if (!(CSR_READ_4(sc, MY_TCRRCR) & MY_Enhanced)) {
1205 			if (txstat & MY_TXERR) {
1206 				if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1207 				if (txstat & MY_EC) /* excessive collision */
1208 					if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
1209 				if (txstat & MY_LC)	/* late collision */
1210 					if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
1211 			}
1212 			if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
1213 			    (txstat & MY_NCRMASK) >> MY_NCRShift);
1214 		}
1215 		if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1216 		m_freem(cur_tx->my_mbuf);
1217 		cur_tx->my_mbuf = NULL;
1218 		if (sc->my_cdata.my_tx_head == sc->my_cdata.my_tx_tail) {
1219 			sc->my_cdata.my_tx_head = NULL;
1220 			sc->my_cdata.my_tx_tail = NULL;
1221 			break;
1222 		}
1223 		sc->my_cdata.my_tx_head = cur_tx->my_nextdesc;
1224 	}
1225 	if (CSR_READ_4(sc, MY_TCRRCR) & MY_Enhanced) {
1226 		if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (CSR_READ_4(sc, MY_TSR) & MY_NCRMask));
1227 	}
1228 	return;
1229 }
1230 
1231 /*
1232  * TX 'end of channel' interrupt handler.
1233  */
1234 static void
1235 my_txeoc(struct my_softc * sc)
1236 {
1237 	struct ifnet   *ifp;
1238 
1239 	MY_LOCK_ASSERT(sc);
1240 	ifp = sc->my_ifp;
1241 	sc->my_timer = 0;
1242 	if (sc->my_cdata.my_tx_head == NULL) {
1243 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1244 		sc->my_cdata.my_tx_tail = NULL;
1245 		if (sc->my_want_auto)
1246 			my_autoneg_mii(sc, MY_FLAG_SCHEDDELAY, 1);
1247 	} else {
1248 		if (MY_TXOWN(sc->my_cdata.my_tx_head) == MY_UNSENT) {
1249 			MY_TXOWN(sc->my_cdata.my_tx_head) = MY_OWNByNIC;
1250 			sc->my_timer = 5;
1251 			CSR_WRITE_4(sc, MY_TXPDR, 0xFFFFFFFF);
1252 		}
1253 	}
1254 	return;
1255 }
1256 
1257 static void
1258 my_intr(void *arg)
1259 {
1260 	struct my_softc *sc;
1261 	struct ifnet   *ifp;
1262 	u_int32_t       status;
1263 
1264 	sc = arg;
1265 	MY_LOCK(sc);
1266 	ifp = sc->my_ifp;
1267 	if (!(ifp->if_flags & IFF_UP)) {
1268 		MY_UNLOCK(sc);
1269 		return;
1270 	}
1271 	/* Disable interrupts. */
1272 	CSR_WRITE_4(sc, MY_IMR, 0x00000000);
1273 
1274 	for (;;) {
1275 		status = CSR_READ_4(sc, MY_ISR);
1276 		status &= MY_INTRS;
1277 		if (status)
1278 			CSR_WRITE_4(sc, MY_ISR, status);
1279 		else
1280 			break;
1281 
1282 		if (status & MY_RI)	/* receive interrupt */
1283 			my_rxeof(sc);
1284 
1285 		if ((status & MY_RBU) || (status & MY_RxErr)) {
1286 			/* rx buffer unavailable or rx error */
1287 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1288 #ifdef foo
1289 			my_stop(sc);
1290 			my_reset(sc);
1291 			my_init_locked(sc);
1292 #endif
1293 		}
1294 		if (status & MY_TI)	/* tx interrupt */
1295 			my_txeof(sc);
1296 		if (status & MY_ETI)	/* tx early interrupt */
1297 			my_txeof(sc);
1298 		if (status & MY_TBU)	/* tx buffer unavailable */
1299 			my_txeoc(sc);
1300 
1301 #if 0				/* 90/1/18 delete */
1302 		if (status & MY_FBE) {
1303 			my_reset(sc);
1304 			my_init_locked(sc);
1305 		}
1306 #endif
1307 	}
1308 
1309 	/* Re-enable interrupts. */
1310 	CSR_WRITE_4(sc, MY_IMR, MY_INTRS);
1311 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1312 		my_start_locked(ifp);
1313 	MY_UNLOCK(sc);
1314 	return;
1315 }
1316 
1317 /*
1318  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1319  * pointers to the fragment pointers.
1320  */
1321 static int
1322 my_encap(struct my_softc * sc, struct my_chain * c, struct mbuf * m_head)
1323 {
1324 	struct my_desc *f = NULL;
1325 	int             total_len;
1326 	struct mbuf    *m, *m_new = NULL;
1327 
1328 	MY_LOCK_ASSERT(sc);
1329 	/* calculate the total tx pkt length */
1330 	total_len = 0;
1331 	for (m = m_head; m != NULL; m = m->m_next)
1332 		total_len += m->m_len;
1333 	/*
1334 	 * Start packing the mbufs in this chain into the fragment pointers.
1335 	 * Stop when we run out of fragments or hit the end of the mbuf
1336 	 * chain.
1337 	 */
1338 	m = m_head;
1339 	MGETHDR(m_new, M_NOWAIT, MT_DATA);
1340 	if (m_new == NULL) {
1341 		device_printf(sc->my_dev, "no memory for tx list");
1342 		return (1);
1343 	}
1344 	if (m_head->m_pkthdr.len > MHLEN) {
1345 		if (!(MCLGET(m_new, M_NOWAIT))) {
1346 			m_freem(m_new);
1347 			device_printf(sc->my_dev, "no memory for tx list");
1348 			return (1);
1349 		}
1350 	}
1351 	m_copydata(m_head, 0, m_head->m_pkthdr.len, mtod(m_new, caddr_t));
1352 	m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1353 	m_freem(m_head);
1354 	m_head = m_new;
1355 	f = &c->my_ptr->my_frag[0];
1356 	f->my_status = 0;
1357 	f->my_data = vtophys(mtod(m_new, caddr_t));
1358 	total_len = m_new->m_len;
1359 	f->my_ctl = MY_TXFD | MY_TXLD | MY_CRCEnable | MY_PADEnable;
1360 	f->my_ctl |= total_len << MY_PKTShift;	/* pkt size */
1361 	f->my_ctl |= total_len;	/* buffer size */
1362 	/* 89/12/29 add, for mtd891 *//* [ 89? ] */
1363 	if (sc->my_info->my_did == MTD891ID)
1364 		f->my_ctl |= MY_ETIControl | MY_RetryTxLC;
1365 	c->my_mbuf = m_head;
1366 	c->my_lastdesc = 0;
1367 	MY_TXNEXT(c) = vtophys(&c->my_nextdesc->my_ptr->my_frag[0]);
1368 	return (0);
1369 }
1370 
1371 /*
1372  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1373  * to the mbuf data regions directly in the transmit lists. We also save a
1374  * copy of the pointers since the transmit list fragment pointers are
1375  * physical addresses.
1376  */
1377 static void
1378 my_start(struct ifnet * ifp)
1379 {
1380 	struct my_softc *sc;
1381 
1382 	sc = ifp->if_softc;
1383 	MY_LOCK(sc);
1384 	my_start_locked(ifp);
1385 	MY_UNLOCK(sc);
1386 }
1387 
1388 static void
1389 my_start_locked(struct ifnet * ifp)
1390 {
1391 	struct my_softc *sc;
1392 	struct mbuf    *m_head = NULL;
1393 	struct my_chain *cur_tx = NULL, *start_tx;
1394 
1395 	sc = ifp->if_softc;
1396 	MY_LOCK_ASSERT(sc);
1397 	if (sc->my_autoneg) {
1398 		sc->my_tx_pend = 1;
1399 		return;
1400 	}
1401 	/*
1402 	 * Check for an available queue slot. If there are none, punt.
1403 	 */
1404 	if (sc->my_cdata.my_tx_free->my_mbuf != NULL) {
1405 		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1406 		return;
1407 	}
1408 	start_tx = sc->my_cdata.my_tx_free;
1409 	while (sc->my_cdata.my_tx_free->my_mbuf == NULL) {
1410 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1411 		if (m_head == NULL)
1412 			break;
1413 
1414 		/* Pick a descriptor off the free list. */
1415 		cur_tx = sc->my_cdata.my_tx_free;
1416 		sc->my_cdata.my_tx_free = cur_tx->my_nextdesc;
1417 
1418 		/* Pack the data into the descriptor. */
1419 		my_encap(sc, cur_tx, m_head);
1420 
1421 		if (cur_tx != start_tx)
1422 			MY_TXOWN(cur_tx) = MY_OWNByNIC;
1423 #if NBPFILTER > 0
1424 		/*
1425 		 * If there's a BPF listener, bounce a copy of this frame to
1426 		 * him.
1427 		 */
1428 		BPF_MTAP(ifp, cur_tx->my_mbuf);
1429 #endif
1430 	}
1431 	/*
1432 	 * If there are no packets queued, bail.
1433 	 */
1434 	if (cur_tx == NULL) {
1435 		return;
1436 	}
1437 	/*
1438 	 * Place the request for the upload interrupt in the last descriptor
1439 	 * in the chain. This way, if we're chaining several packets at once,
1440 	 * we'll only get an interrupt once for the whole chain rather than
1441 	 * once for each packet.
1442 	 */
1443 	MY_TXCTL(cur_tx) |= MY_TXIC;
1444 	cur_tx->my_ptr->my_frag[0].my_ctl |= MY_TXIC;
1445 	sc->my_cdata.my_tx_tail = cur_tx;
1446 	if (sc->my_cdata.my_tx_head == NULL)
1447 		sc->my_cdata.my_tx_head = start_tx;
1448 	MY_TXOWN(start_tx) = MY_OWNByNIC;
1449 	CSR_WRITE_4(sc, MY_TXPDR, 0xFFFFFFFF);	/* tx polling demand */
1450 
1451 	/*
1452 	 * Set a timeout in case the chip goes out to lunch.
1453 	 */
1454 	sc->my_timer = 5;
1455 	return;
1456 }
1457 
1458 static void
1459 my_init(void *xsc)
1460 {
1461 	struct my_softc *sc = xsc;
1462 
1463 	MY_LOCK(sc);
1464 	my_init_locked(sc);
1465 	MY_UNLOCK(sc);
1466 }
1467 
1468 static void
1469 my_init_locked(struct my_softc *sc)
1470 {
1471 	struct ifnet   *ifp = sc->my_ifp;
1472 	u_int16_t       phy_bmcr = 0;
1473 
1474 	MY_LOCK_ASSERT(sc);
1475 	if (sc->my_autoneg) {
1476 		return;
1477 	}
1478 	if (sc->my_pinfo != NULL)
1479 		phy_bmcr = my_phy_readreg(sc, PHY_BMCR);
1480 	/*
1481 	 * Cancel pending I/O and free all RX/TX buffers.
1482 	 */
1483 	my_stop(sc);
1484 	my_reset(sc);
1485 
1486 	/*
1487 	 * Set cache alignment and burst length.
1488 	 */
1489 #if 0				/* 89/9/1 modify,  */
1490 	CSR_WRITE_4(sc, MY_BCR, MY_RPBLE512);
1491 	CSR_WRITE_4(sc, MY_TCRRCR, MY_TFTSF);
1492 #endif
1493 	CSR_WRITE_4(sc, MY_BCR, MY_PBL8);
1494 	CSR_WRITE_4(sc, MY_TCRRCR, MY_TFTSF | MY_RBLEN | MY_RPBLE512);
1495 	/*
1496 	 * 89/12/29 add, for mtd891,
1497 	 */
1498 	if (sc->my_info->my_did == MTD891ID) {
1499 		MY_SETBIT(sc, MY_BCR, MY_PROG);
1500 		MY_SETBIT(sc, MY_TCRRCR, MY_Enhanced);
1501 	}
1502 	my_setcfg(sc, phy_bmcr);
1503 	/* Init circular RX list. */
1504 	if (my_list_rx_init(sc) == ENOBUFS) {
1505 		device_printf(sc->my_dev, "init failed: no memory for rx buffers\n");
1506 		my_stop(sc);
1507 		return;
1508 	}
1509 	/* Init TX descriptors. */
1510 	my_list_tx_init(sc);
1511 
1512 	/* If we want promiscuous mode, set the allframes bit. */
1513 	if (ifp->if_flags & IFF_PROMISC)
1514 		MY_SETBIT(sc, MY_TCRRCR, MY_PROM);
1515 	else
1516 		MY_CLRBIT(sc, MY_TCRRCR, MY_PROM);
1517 
1518 	/*
1519 	 * Set capture broadcast bit to capture broadcast frames.
1520 	 */
1521 	if (ifp->if_flags & IFF_BROADCAST)
1522 		MY_SETBIT(sc, MY_TCRRCR, MY_AB);
1523 	else
1524 		MY_CLRBIT(sc, MY_TCRRCR, MY_AB);
1525 
1526 	/*
1527 	 * Program the multicast filter, if necessary.
1528 	 */
1529 	my_setmulti(sc);
1530 
1531 	/*
1532 	 * Load the address of the RX list.
1533 	 */
1534 	MY_CLRBIT(sc, MY_TCRRCR, MY_RE);
1535 	CSR_WRITE_4(sc, MY_RXLBA, vtophys(&sc->my_ldata->my_rx_list[0]));
1536 
1537 	/*
1538 	 * Enable interrupts.
1539 	 */
1540 	CSR_WRITE_4(sc, MY_IMR, MY_INTRS);
1541 	CSR_WRITE_4(sc, MY_ISR, 0xFFFFFFFF);
1542 
1543 	/* Enable receiver and transmitter. */
1544 	MY_SETBIT(sc, MY_TCRRCR, MY_RE);
1545 	MY_CLRBIT(sc, MY_TCRRCR, MY_TE);
1546 	CSR_WRITE_4(sc, MY_TXLBA, vtophys(&sc->my_ldata->my_tx_list[0]));
1547 	MY_SETBIT(sc, MY_TCRRCR, MY_TE);
1548 
1549 	/* Restore state of BMCR */
1550 	if (sc->my_pinfo != NULL)
1551 		my_phy_writereg(sc, PHY_BMCR, phy_bmcr);
1552 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1553 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1554 
1555 	callout_reset(&sc->my_watchdog, hz, my_watchdog, sc);
1556 	return;
1557 }
1558 
1559 /*
1560  * Set media options.
1561  */
1562 
1563 static int
1564 my_ifmedia_upd(struct ifnet * ifp)
1565 {
1566 	struct my_softc *sc;
1567 	struct ifmedia *ifm;
1568 
1569 	sc = ifp->if_softc;
1570 	MY_LOCK(sc);
1571 	ifm = &sc->ifmedia;
1572 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
1573 		MY_UNLOCK(sc);
1574 		return (EINVAL);
1575 	}
1576 	if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO)
1577 		my_autoneg_mii(sc, MY_FLAG_SCHEDDELAY, 1);
1578 	else
1579 		my_setmode_mii(sc, ifm->ifm_media);
1580 	MY_UNLOCK(sc);
1581 	return (0);
1582 }
1583 
1584 /*
1585  * Report current media status.
1586  */
1587 
1588 static void
1589 my_ifmedia_sts(struct ifnet * ifp, struct ifmediareq * ifmr)
1590 {
1591 	struct my_softc *sc;
1592 	u_int16_t advert = 0, ability = 0;
1593 
1594 	sc = ifp->if_softc;
1595 	MY_LOCK(sc);
1596 	ifmr->ifm_active = IFM_ETHER;
1597 	if (!(my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_AUTONEGENBL)) {
1598 #if 0				/* this version did not support 1000M, */
1599 		if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_1000)
1600 			ifmr->ifm_active = IFM_ETHER | IFM_1000TX;
1601 #endif
1602 		if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_SPEEDSEL)
1603 			ifmr->ifm_active = IFM_ETHER | IFM_100_TX;
1604 		else
1605 			ifmr->ifm_active = IFM_ETHER | IFM_10_T;
1606 		if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_DUPLEX)
1607 			ifmr->ifm_active |= IFM_FDX;
1608 		else
1609 			ifmr->ifm_active |= IFM_HDX;
1610 
1611 		MY_UNLOCK(sc);
1612 		return;
1613 	}
1614 	ability = my_phy_readreg(sc, PHY_LPAR);
1615 	advert = my_phy_readreg(sc, PHY_ANAR);
1616 
1617 #if 0				/* this version did not support 1000M, */
1618 	if (sc->my_pinfo->my_vid = MarvellPHYID0) {
1619 		ability2 = my_phy_readreg(sc, PHY_1000SR);
1620 		if (ability2 & PHY_1000SR_1000BTXFULL) {
1621 			advert = 0;
1622 			ability = 0;
1623 	  		ifmr->ifm_active = IFM_ETHER|IFM_1000_T|IFM_FDX;
1624 	  	} else if (ability & PHY_1000SR_1000BTXHALF) {
1625 			advert = 0;
1626 			ability = 0;
1627 			ifmr->ifm_active = IFM_ETHER|IFM_1000_T|IFM_HDX;
1628 		}
1629 	}
1630 #endif
1631 	if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4)
1632 		ifmr->ifm_active = IFM_ETHER | IFM_100_T4;
1633 	else if (advert & PHY_ANAR_100BTXFULL && ability & PHY_ANAR_100BTXFULL)
1634 		ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1635 	else if (advert & PHY_ANAR_100BTXHALF && ability & PHY_ANAR_100BTXHALF)
1636 		ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_HDX;
1637 	else if (advert & PHY_ANAR_10BTFULL && ability & PHY_ANAR_10BTFULL)
1638 		ifmr->ifm_active = IFM_ETHER | IFM_10_T | IFM_FDX;
1639 	else if (advert & PHY_ANAR_10BTHALF && ability & PHY_ANAR_10BTHALF)
1640 		ifmr->ifm_active = IFM_ETHER | IFM_10_T | IFM_HDX;
1641 	MY_UNLOCK(sc);
1642 	return;
1643 }
1644 
1645 static int
1646 my_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
1647 {
1648 	struct my_softc *sc = ifp->if_softc;
1649 	struct ifreq   *ifr = (struct ifreq *) data;
1650 	int             error;
1651 
1652 	switch (command) {
1653 	case SIOCSIFFLAGS:
1654 		MY_LOCK(sc);
1655 		if (ifp->if_flags & IFF_UP)
1656 			my_init_locked(sc);
1657 		else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1658 			my_stop(sc);
1659 		MY_UNLOCK(sc);
1660 		error = 0;
1661 		break;
1662 	case SIOCADDMULTI:
1663 	case SIOCDELMULTI:
1664 		MY_LOCK(sc);
1665 		my_setmulti(sc);
1666 		MY_UNLOCK(sc);
1667 		error = 0;
1668 		break;
1669 	case SIOCGIFMEDIA:
1670 	case SIOCSIFMEDIA:
1671 		error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
1672 		break;
1673 	default:
1674 		error = ether_ioctl(ifp, command, data);
1675 		break;
1676 	}
1677 	return (error);
1678 }
1679 
1680 static void
1681 my_watchdog(void *arg)
1682 {
1683 	struct my_softc *sc;
1684 	struct ifnet *ifp;
1685 
1686 	sc = arg;
1687 	MY_LOCK_ASSERT(sc);
1688 	callout_reset(&sc->my_watchdog, hz, my_watchdog, sc);
1689 	if (sc->my_timer == 0 || --sc->my_timer > 0)
1690 		return;
1691 
1692 	ifp = sc->my_ifp;
1693 	if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1694 	if_printf(ifp, "watchdog timeout\n");
1695 	if (!(my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT))
1696 		if_printf(ifp, "no carrier - transceiver cable problem?\n");
1697 	my_stop(sc);
1698 	my_reset(sc);
1699 	my_init_locked(sc);
1700 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1701 		my_start_locked(ifp);
1702 }
1703 
1704 /*
1705  * Stop the adapter and free any mbufs allocated to the RX and TX lists.
1706  */
1707 static void
1708 my_stop(struct my_softc * sc)
1709 {
1710 	int    i;
1711 	struct ifnet   *ifp;
1712 
1713 	MY_LOCK_ASSERT(sc);
1714 	ifp = sc->my_ifp;
1715 
1716 	callout_stop(&sc->my_autoneg_timer);
1717 	callout_stop(&sc->my_watchdog);
1718 
1719 	MY_CLRBIT(sc, MY_TCRRCR, (MY_RE | MY_TE));
1720 	CSR_WRITE_4(sc, MY_IMR, 0x00000000);
1721 	CSR_WRITE_4(sc, MY_TXLBA, 0x00000000);
1722 	CSR_WRITE_4(sc, MY_RXLBA, 0x00000000);
1723 
1724 	/*
1725 	 * Free data in the RX lists.
1726 	 */
1727 	for (i = 0; i < MY_RX_LIST_CNT; i++) {
1728 		if (sc->my_cdata.my_rx_chain[i].my_mbuf != NULL) {
1729 			m_freem(sc->my_cdata.my_rx_chain[i].my_mbuf);
1730 			sc->my_cdata.my_rx_chain[i].my_mbuf = NULL;
1731 		}
1732 	}
1733 	bzero((char *)&sc->my_ldata->my_rx_list,
1734 	    sizeof(sc->my_ldata->my_rx_list));
1735 	/*
1736 	 * Free the TX list buffers.
1737 	 */
1738 	for (i = 0; i < MY_TX_LIST_CNT; i++) {
1739 		if (sc->my_cdata.my_tx_chain[i].my_mbuf != NULL) {
1740 			m_freem(sc->my_cdata.my_tx_chain[i].my_mbuf);
1741 			sc->my_cdata.my_tx_chain[i].my_mbuf = NULL;
1742 		}
1743 	}
1744 	bzero((char *)&sc->my_ldata->my_tx_list,
1745 	    sizeof(sc->my_ldata->my_tx_list));
1746 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1747 	return;
1748 }
1749 
1750 /*
1751  * Stop all chip I/O so that the kernel's probe routines don't get confused
1752  * by errant DMAs when rebooting.
1753  */
1754 static int
1755 my_shutdown(device_t dev)
1756 {
1757 	struct my_softc *sc;
1758 
1759 	sc = device_get_softc(dev);
1760 	MY_LOCK(sc);
1761 	my_stop(sc);
1762 	MY_UNLOCK(sc);
1763 	return 0;
1764 }
1765