xref: /freebsd/sys/dev/my/if_my.c (revision 8fc257994d0ce2396196d7a06d50d20c8015f4b7)
1 /*-
2  * Written by: yen_cw@myson.com.tw
3  * Copyright (c) 2002 Myson Technology Inc.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions, and the following disclaimer,
11  *    without modification, immediately at the beginning of the file.
12  * 2. The name of the author may not be used to endorse or promote products
13  *    derived from this software without specific prior written permission.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
19  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * Myson fast ethernet PCI NIC driver, available at: http://www.myson.com.tw/
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/sockio.h>
36 #include <sys/mbuf.h>
37 #include <sys/malloc.h>
38 #include <sys/kernel.h>
39 #include <sys/socket.h>
40 #include <sys/queue.h>
41 #include <sys/types.h>
42 #include <sys/bus.h>
43 #include <sys/module.h>
44 #include <sys/lock.h>
45 #include <sys/mutex.h>
46 
47 #define NBPFILTER	1
48 
49 #include <net/if.h>
50 #include <net/if_arp.h>
51 #include <net/ethernet.h>
52 #include <net/if_media.h>
53 #include <net/if_types.h>
54 #include <net/if_dl.h>
55 #include <net/bpf.h>
56 
57 #include <vm/vm.h>		/* for vtophys */
58 #include <vm/pmap.h>		/* for vtophys */
59 #include <machine/bus.h>
60 #include <machine/resource.h>
61 #include <sys/bus.h>
62 #include <sys/rman.h>
63 
64 #include <dev/pci/pcireg.h>
65 #include <dev/pci/pcivar.h>
66 
67 /*
68  * #define MY_USEIOSPACE
69  */
70 
71 static int      MY_USEIOSPACE = 1;
72 
73 #ifdef MY_USEIOSPACE
74 #define MY_RES                  SYS_RES_IOPORT
75 #define MY_RID                  MY_PCI_LOIO
76 #else
77 #define MY_RES                  SYS_RES_MEMORY
78 #define MY_RID                  MY_PCI_LOMEM
79 #endif
80 
81 
82 #include <dev/my/if_myreg.h>
83 
84 #ifndef lint
85 static          const char rcsid[] =
86 "$Id: if_my.c,v 1.16 2003/04/15 06:37:25 mdodd Exp $";
87 #endif
88 
89 /*
90  * Various supported device vendors/types and their names.
91  */
92 struct my_type *my_info_tmp;
93 static struct my_type my_devs[] = {
94 	{MYSONVENDORID, MTD800ID, "Myson MTD80X Based Fast Ethernet Card"},
95 	{MYSONVENDORID, MTD803ID, "Myson MTD80X Based Fast Ethernet Card"},
96 	{MYSONVENDORID, MTD891ID, "Myson MTD89X Based Giga Ethernet Card"},
97 	{0, 0, NULL}
98 };
99 
100 /*
101  * Various supported PHY vendors/types and their names. Note that this driver
102  * will work with pretty much any MII-compliant PHY, so failure to positively
103  * identify the chip is not a fatal error.
104  */
105 static struct my_type my_phys[] = {
106 	{MysonPHYID0, MysonPHYID0, "<MYSON MTD981>"},
107 	{SeeqPHYID0, SeeqPHYID0, "<SEEQ 80225>"},
108 	{AhdocPHYID0, AhdocPHYID0, "<AHDOC 101>"},
109 	{MarvellPHYID0, MarvellPHYID0, "<MARVELL 88E1000>"},
110 	{LevelOnePHYID0, LevelOnePHYID0, "<LevelOne LXT1000>"},
111 	{0, 0, "<MII-compliant physical interface>"}
112 };
113 
114 static int      my_probe(device_t);
115 static int      my_attach(device_t);
116 static int      my_detach(device_t);
117 static int      my_newbuf(struct my_softc *, struct my_chain_onefrag *);
118 static int      my_encap(struct my_softc *, struct my_chain *, struct mbuf *);
119 static void     my_rxeof(struct my_softc *);
120 static void     my_txeof(struct my_softc *);
121 static void     my_txeoc(struct my_softc *);
122 static void     my_intr(void *);
123 static void     my_start(struct ifnet *);
124 static void     my_start_locked(struct ifnet *);
125 static int      my_ioctl(struct ifnet *, u_long, caddr_t);
126 static void     my_init(void *);
127 static void     my_init_locked(struct my_softc *);
128 static void     my_stop(struct my_softc *);
129 static void     my_autoneg_timeout(void *);
130 static void     my_watchdog(void *);
131 static int      my_shutdown(device_t);
132 static int      my_ifmedia_upd(struct ifnet *);
133 static void     my_ifmedia_sts(struct ifnet *, struct ifmediareq *);
134 static u_int16_t my_phy_readreg(struct my_softc *, int);
135 static void     my_phy_writereg(struct my_softc *, int, int);
136 static void     my_autoneg_xmit(struct my_softc *);
137 static void     my_autoneg_mii(struct my_softc *, int, int);
138 static void     my_setmode_mii(struct my_softc *, int);
139 static void     my_getmode_mii(struct my_softc *);
140 static void     my_setcfg(struct my_softc *, int);
141 static void     my_setmulti(struct my_softc *);
142 static void     my_reset(struct my_softc *);
143 static int      my_list_rx_init(struct my_softc *);
144 static int      my_list_tx_init(struct my_softc *);
145 static long     my_send_cmd_to_phy(struct my_softc *, int, int);
146 
147 #define MY_SETBIT(sc, reg, x) CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x))
148 #define MY_CLRBIT(sc, reg, x) CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x))
149 
150 static device_method_t my_methods[] = {
151 	/* Device interface */
152 	DEVMETHOD(device_probe, my_probe),
153 	DEVMETHOD(device_attach, my_attach),
154 	DEVMETHOD(device_detach, my_detach),
155 	DEVMETHOD(device_shutdown, my_shutdown),
156 
157 	{0, 0}
158 };
159 
160 static driver_t my_driver = {
161 	"my",
162 	my_methods,
163 	sizeof(struct my_softc)
164 };
165 
166 static devclass_t my_devclass;
167 
168 DRIVER_MODULE(my, pci, my_driver, my_devclass, 0, 0);
169 MODULE_DEPEND(my, pci, 1, 1, 1);
170 MODULE_DEPEND(my, ether, 1, 1, 1);
171 
172 static long
173 my_send_cmd_to_phy(struct my_softc * sc, int opcode, int regad)
174 {
175 	long            miir;
176 	int             i;
177 	int             mask, data;
178 
179 	MY_LOCK_ASSERT(sc);
180 
181 	/* enable MII output */
182 	miir = CSR_READ_4(sc, MY_MANAGEMENT);
183 	miir &= 0xfffffff0;
184 
185 	miir |= MY_MASK_MIIR_MII_WRITE + MY_MASK_MIIR_MII_MDO;
186 
187 	/* send 32 1's preamble */
188 	for (i = 0; i < 32; i++) {
189 		/* low MDC; MDO is already high (miir) */
190 		miir &= ~MY_MASK_MIIR_MII_MDC;
191 		CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
192 
193 		/* high MDC */
194 		miir |= MY_MASK_MIIR_MII_MDC;
195 		CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
196 	}
197 
198 	/* calculate ST+OP+PHYAD+REGAD+TA */
199 	data = opcode | (sc->my_phy_addr << 7) | (regad << 2);
200 
201 	/* sent out */
202 	mask = 0x8000;
203 	while (mask) {
204 		/* low MDC, prepare MDO */
205 		miir &= ~(MY_MASK_MIIR_MII_MDC + MY_MASK_MIIR_MII_MDO);
206 		if (mask & data)
207 			miir |= MY_MASK_MIIR_MII_MDO;
208 
209 		CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
210 		/* high MDC */
211 		miir |= MY_MASK_MIIR_MII_MDC;
212 		CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
213 		DELAY(30);
214 
215 		/* next */
216 		mask >>= 1;
217 		if (mask == 0x2 && opcode == MY_OP_READ)
218 			miir &= ~MY_MASK_MIIR_MII_WRITE;
219 	}
220 
221 	return miir;
222 }
223 
224 
225 static u_int16_t
226 my_phy_readreg(struct my_softc * sc, int reg)
227 {
228 	long            miir;
229 	int             mask, data;
230 
231 	MY_LOCK_ASSERT(sc);
232 
233 	if (sc->my_info->my_did == MTD803ID)
234 		data = CSR_READ_2(sc, MY_PHYBASE + reg * 2);
235 	else {
236 		miir = my_send_cmd_to_phy(sc, MY_OP_READ, reg);
237 
238 		/* read data */
239 		mask = 0x8000;
240 		data = 0;
241 		while (mask) {
242 			/* low MDC */
243 			miir &= ~MY_MASK_MIIR_MII_MDC;
244 			CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
245 
246 			/* read MDI */
247 			miir = CSR_READ_4(sc, MY_MANAGEMENT);
248 			if (miir & MY_MASK_MIIR_MII_MDI)
249 				data |= mask;
250 
251 			/* high MDC, and wait */
252 			miir |= MY_MASK_MIIR_MII_MDC;
253 			CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
254 			DELAY(30);
255 
256 			/* next */
257 			mask >>= 1;
258 		}
259 
260 		/* low MDC */
261 		miir &= ~MY_MASK_MIIR_MII_MDC;
262 		CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
263 	}
264 
265 	return (u_int16_t) data;
266 }
267 
268 
269 static void
270 my_phy_writereg(struct my_softc * sc, int reg, int data)
271 {
272 	long            miir;
273 	int             mask;
274 
275 	MY_LOCK_ASSERT(sc);
276 
277 	if (sc->my_info->my_did == MTD803ID)
278 		CSR_WRITE_2(sc, MY_PHYBASE + reg * 2, data);
279 	else {
280 		miir = my_send_cmd_to_phy(sc, MY_OP_WRITE, reg);
281 
282 		/* write data */
283 		mask = 0x8000;
284 		while (mask) {
285 			/* low MDC, prepare MDO */
286 			miir &= ~(MY_MASK_MIIR_MII_MDC + MY_MASK_MIIR_MII_MDO);
287 			if (mask & data)
288 				miir |= MY_MASK_MIIR_MII_MDO;
289 			CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
290 			DELAY(1);
291 
292 			/* high MDC */
293 			miir |= MY_MASK_MIIR_MII_MDC;
294 			CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
295 			DELAY(1);
296 
297 			/* next */
298 			mask >>= 1;
299 		}
300 
301 		/* low MDC */
302 		miir &= ~MY_MASK_MIIR_MII_MDC;
303 		CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
304 	}
305 	return;
306 }
307 
308 
309 /*
310  * Program the 64-bit multicast hash filter.
311  */
312 static void
313 my_setmulti(struct my_softc * sc)
314 {
315 	struct ifnet   *ifp;
316 	int             h = 0;
317 	u_int32_t       hashes[2] = {0, 0};
318 	struct ifmultiaddr *ifma;
319 	u_int32_t       rxfilt;
320 	int             mcnt = 0;
321 
322 	MY_LOCK_ASSERT(sc);
323 
324 	ifp = sc->my_ifp;
325 
326 	rxfilt = CSR_READ_4(sc, MY_TCRRCR);
327 
328 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
329 		rxfilt |= MY_AM;
330 		CSR_WRITE_4(sc, MY_TCRRCR, rxfilt);
331 		CSR_WRITE_4(sc, MY_MAR0, 0xFFFFFFFF);
332 		CSR_WRITE_4(sc, MY_MAR1, 0xFFFFFFFF);
333 
334 		return;
335 	}
336 	/* first, zot all the existing hash bits */
337 	CSR_WRITE_4(sc, MY_MAR0, 0);
338 	CSR_WRITE_4(sc, MY_MAR1, 0);
339 
340 	/* now program new ones */
341 	if_maddr_rlock(ifp);
342 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
343 		if (ifma->ifma_addr->sa_family != AF_LINK)
344 			continue;
345 		h = ~ether_crc32_be(LLADDR((struct sockaddr_dl *)
346 		    ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
347 		if (h < 32)
348 			hashes[0] |= (1 << h);
349 		else
350 			hashes[1] |= (1 << (h - 32));
351 		mcnt++;
352 	}
353 	if_maddr_runlock(ifp);
354 
355 	if (mcnt)
356 		rxfilt |= MY_AM;
357 	else
358 		rxfilt &= ~MY_AM;
359 	CSR_WRITE_4(sc, MY_MAR0, hashes[0]);
360 	CSR_WRITE_4(sc, MY_MAR1, hashes[1]);
361 	CSR_WRITE_4(sc, MY_TCRRCR, rxfilt);
362 	return;
363 }
364 
365 /*
366  * Initiate an autonegotiation session.
367  */
368 static void
369 my_autoneg_xmit(struct my_softc * sc)
370 {
371 	u_int16_t       phy_sts = 0;
372 
373 	MY_LOCK_ASSERT(sc);
374 
375 	my_phy_writereg(sc, PHY_BMCR, PHY_BMCR_RESET);
376 	DELAY(500);
377 	while (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_RESET);
378 
379 	phy_sts = my_phy_readreg(sc, PHY_BMCR);
380 	phy_sts |= PHY_BMCR_AUTONEGENBL | PHY_BMCR_AUTONEGRSTR;
381 	my_phy_writereg(sc, PHY_BMCR, phy_sts);
382 
383 	return;
384 }
385 
386 static void
387 my_autoneg_timeout(void *arg)
388 {
389 	struct my_softc *sc;
390 
391 	sc = arg;
392 	MY_LOCK_ASSERT(sc);
393 	my_autoneg_mii(sc, MY_FLAG_DELAYTIMEO, 1);
394 }
395 
396 /*
397  * Invoke autonegotiation on a PHY.
398  */
399 static void
400 my_autoneg_mii(struct my_softc * sc, int flag, int verbose)
401 {
402 	u_int16_t       phy_sts = 0, media, advert, ability;
403 	u_int16_t       ability2 = 0;
404 	struct ifnet   *ifp;
405 	struct ifmedia *ifm;
406 
407 	MY_LOCK_ASSERT(sc);
408 
409 	ifm = &sc->ifmedia;
410 	ifp = sc->my_ifp;
411 
412 	ifm->ifm_media = IFM_ETHER | IFM_AUTO;
413 
414 #ifndef FORCE_AUTONEG_TFOUR
415 	/*
416 	 * First, see if autoneg is supported. If not, there's no point in
417 	 * continuing.
418 	 */
419 	phy_sts = my_phy_readreg(sc, PHY_BMSR);
420 	if (!(phy_sts & PHY_BMSR_CANAUTONEG)) {
421 		if (verbose)
422 			device_printf(sc->my_dev,
423 			    "autonegotiation not supported\n");
424 		ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX;
425 		return;
426 	}
427 #endif
428 	switch (flag) {
429 	case MY_FLAG_FORCEDELAY:
430 		/*
431 		 * XXX Never use this option anywhere but in the probe
432 		 * routine: making the kernel stop dead in its tracks for
433 		 * three whole seconds after we've gone multi-user is really
434 		 * bad manners.
435 		 */
436 		my_autoneg_xmit(sc);
437 		DELAY(5000000);
438 		break;
439 	case MY_FLAG_SCHEDDELAY:
440 		/*
441 		 * Wait for the transmitter to go idle before starting an
442 		 * autoneg session, otherwise my_start() may clobber our
443 		 * timeout, and we don't want to allow transmission during an
444 		 * autoneg session since that can screw it up.
445 		 */
446 		if (sc->my_cdata.my_tx_head != NULL) {
447 			sc->my_want_auto = 1;
448 			MY_UNLOCK(sc);
449 			return;
450 		}
451 		my_autoneg_xmit(sc);
452 		callout_reset(&sc->my_autoneg_timer, hz * 5, my_autoneg_timeout,
453 		    sc);
454 		sc->my_autoneg = 1;
455 		sc->my_want_auto = 0;
456 		return;
457 	case MY_FLAG_DELAYTIMEO:
458 		callout_stop(&sc->my_autoneg_timer);
459 		sc->my_autoneg = 0;
460 		break;
461 	default:
462 		device_printf(sc->my_dev, "invalid autoneg flag: %d\n", flag);
463 		return;
464 	}
465 
466 	if (my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_AUTONEGCOMP) {
467 		if (verbose)
468 			device_printf(sc->my_dev, "autoneg complete, ");
469 		phy_sts = my_phy_readreg(sc, PHY_BMSR);
470 	} else {
471 		if (verbose)
472 			device_printf(sc->my_dev, "autoneg not complete, ");
473 	}
474 
475 	media = my_phy_readreg(sc, PHY_BMCR);
476 
477 	/* Link is good. Report modes and set duplex mode. */
478 	if (my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT) {
479 		if (verbose)
480 			device_printf(sc->my_dev, "link status good. ");
481 		advert = my_phy_readreg(sc, PHY_ANAR);
482 		ability = my_phy_readreg(sc, PHY_LPAR);
483 		if ((sc->my_pinfo->my_vid == MarvellPHYID0) ||
484 		    (sc->my_pinfo->my_vid == LevelOnePHYID0)) {
485 			ability2 = my_phy_readreg(sc, PHY_1000SR);
486 			if (ability2 & PHY_1000SR_1000BTXFULL) {
487 				advert = 0;
488 				ability = 0;
489 				/*
490 				 * this version did not support 1000M,
491 				 * ifm->ifm_media =
492 				 * IFM_ETHER|IFM_1000_T|IFM_FDX;
493 				 */
494 				ifm->ifm_media =
495 				    IFM_ETHER | IFM_100_TX | IFM_FDX;
496 				media &= ~PHY_BMCR_SPEEDSEL;
497 				media |= PHY_BMCR_1000;
498 				media |= PHY_BMCR_DUPLEX;
499 				printf("(full-duplex, 1000Mbps)\n");
500 			} else if (ability2 & PHY_1000SR_1000BTXHALF) {
501 				advert = 0;
502 				ability = 0;
503 				/*
504 				 * this version did not support 1000M,
505 				 * ifm->ifm_media = IFM_ETHER|IFM_1000_T;
506 				 */
507 				ifm->ifm_media = IFM_ETHER | IFM_100_TX;
508 				media &= ~PHY_BMCR_SPEEDSEL;
509 				media &= ~PHY_BMCR_DUPLEX;
510 				media |= PHY_BMCR_1000;
511 				printf("(half-duplex, 1000Mbps)\n");
512 			}
513 		}
514 		if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4) {
515 			ifm->ifm_media = IFM_ETHER | IFM_100_T4;
516 			media |= PHY_BMCR_SPEEDSEL;
517 			media &= ~PHY_BMCR_DUPLEX;
518 			printf("(100baseT4)\n");
519 		} else if (advert & PHY_ANAR_100BTXFULL &&
520 			   ability & PHY_ANAR_100BTXFULL) {
521 			ifm->ifm_media = IFM_ETHER | IFM_100_TX | IFM_FDX;
522 			media |= PHY_BMCR_SPEEDSEL;
523 			media |= PHY_BMCR_DUPLEX;
524 			printf("(full-duplex, 100Mbps)\n");
525 		} else if (advert & PHY_ANAR_100BTXHALF &&
526 			   ability & PHY_ANAR_100BTXHALF) {
527 			ifm->ifm_media = IFM_ETHER | IFM_100_TX | IFM_HDX;
528 			media |= PHY_BMCR_SPEEDSEL;
529 			media &= ~PHY_BMCR_DUPLEX;
530 			printf("(half-duplex, 100Mbps)\n");
531 		} else if (advert & PHY_ANAR_10BTFULL &&
532 			   ability & PHY_ANAR_10BTFULL) {
533 			ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_FDX;
534 			media &= ~PHY_BMCR_SPEEDSEL;
535 			media |= PHY_BMCR_DUPLEX;
536 			printf("(full-duplex, 10Mbps)\n");
537 		} else if (advert) {
538 			ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX;
539 			media &= ~PHY_BMCR_SPEEDSEL;
540 			media &= ~PHY_BMCR_DUPLEX;
541 			printf("(half-duplex, 10Mbps)\n");
542 		}
543 		media &= ~PHY_BMCR_AUTONEGENBL;
544 
545 		/* Set ASIC's duplex mode to match the PHY. */
546 		my_phy_writereg(sc, PHY_BMCR, media);
547 		my_setcfg(sc, media);
548 	} else {
549 		if (verbose)
550 			device_printf(sc->my_dev, "no carrier\n");
551 	}
552 
553 	my_init_locked(sc);
554 	if (sc->my_tx_pend) {
555 		sc->my_autoneg = 0;
556 		sc->my_tx_pend = 0;
557 		my_start_locked(ifp);
558 	}
559 	return;
560 }
561 
562 /*
563  * To get PHY ability.
564  */
565 static void
566 my_getmode_mii(struct my_softc * sc)
567 {
568 	u_int16_t       bmsr;
569 	struct ifnet   *ifp;
570 
571 	MY_LOCK_ASSERT(sc);
572 	ifp = sc->my_ifp;
573 	bmsr = my_phy_readreg(sc, PHY_BMSR);
574 	if (bootverbose)
575 		device_printf(sc->my_dev, "PHY status word: %x\n", bmsr);
576 
577 	/* fallback */
578 	sc->ifmedia.ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX;
579 
580 	if (bmsr & PHY_BMSR_10BTHALF) {
581 		if (bootverbose)
582 			device_printf(sc->my_dev,
583 			    "10Mbps half-duplex mode supported\n");
584 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T | IFM_HDX,
585 		    0, NULL);
586 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T, 0, NULL);
587 	}
588 	if (bmsr & PHY_BMSR_10BTFULL) {
589 		if (bootverbose)
590 			device_printf(sc->my_dev,
591 			    "10Mbps full-duplex mode supported\n");
592 
593 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX,
594 		    0, NULL);
595 		sc->ifmedia.ifm_media = IFM_ETHER | IFM_10_T | IFM_FDX;
596 	}
597 	if (bmsr & PHY_BMSR_100BTXHALF) {
598 		if (bootverbose)
599 			device_printf(sc->my_dev,
600 			    "100Mbps half-duplex mode supported\n");
601 		ifp->if_baudrate = 100000000;
602 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL);
603 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX | IFM_HDX,
604 			    0, NULL);
605 		sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_TX | IFM_HDX;
606 	}
607 	if (bmsr & PHY_BMSR_100BTXFULL) {
608 		if (bootverbose)
609 			device_printf(sc->my_dev,
610 			    "100Mbps full-duplex mode supported\n");
611 		ifp->if_baudrate = 100000000;
612 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX,
613 		    0, NULL);
614 		sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_TX | IFM_FDX;
615 	}
616 	/* Some also support 100BaseT4. */
617 	if (bmsr & PHY_BMSR_100BT4) {
618 		if (bootverbose)
619 			device_printf(sc->my_dev, "100baseT4 mode supported\n");
620 		ifp->if_baudrate = 100000000;
621 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_T4, 0, NULL);
622 		sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_T4;
623 #ifdef FORCE_AUTONEG_TFOUR
624 		if (bootverbose)
625 			device_printf(sc->my_dev,
626 			    "forcing on autoneg support for BT4\n");
627 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_AUTO, 0 NULL):
628 		sc->ifmedia.ifm_media = IFM_ETHER | IFM_AUTO;
629 #endif
630 	}
631 #if 0				/* this version did not support 1000M, */
632 	if (sc->my_pinfo->my_vid == MarvellPHYID0) {
633 		if (bootverbose)
634 			device_printf(sc->my_dev,
635 			    "1000Mbps half-duplex mode supported\n");
636 
637 		ifp->if_baudrate = 1000000000;
638 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T, 0, NULL);
639 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T | IFM_HDX,
640 		    0, NULL);
641 		if (bootverbose)
642 			device_printf(sc->my_dev,
643 			    "1000Mbps full-duplex mode supported\n");
644 		ifp->if_baudrate = 1000000000;
645 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T | IFM_FDX,
646 		    0, NULL);
647 		sc->ifmedia.ifm_media = IFM_ETHER | IFM_1000_T | IFM_FDX;
648 	}
649 #endif
650 	if (bmsr & PHY_BMSR_CANAUTONEG) {
651 		if (bootverbose)
652 			device_printf(sc->my_dev, "autoneg supported\n");
653 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
654 		sc->ifmedia.ifm_media = IFM_ETHER | IFM_AUTO;
655 	}
656 	return;
657 }
658 
659 /*
660  * Set speed and duplex mode.
661  */
662 static void
663 my_setmode_mii(struct my_softc * sc, int media)
664 {
665 	u_int16_t       bmcr;
666 	struct ifnet   *ifp;
667 
668 	MY_LOCK_ASSERT(sc);
669 	ifp = sc->my_ifp;
670 	/*
671 	 * If an autoneg session is in progress, stop it.
672 	 */
673 	if (sc->my_autoneg) {
674 		device_printf(sc->my_dev, "canceling autoneg session\n");
675 		callout_stop(&sc->my_autoneg_timer);
676 		sc->my_autoneg = sc->my_want_auto = 0;
677 		bmcr = my_phy_readreg(sc, PHY_BMCR);
678 		bmcr &= ~PHY_BMCR_AUTONEGENBL;
679 		my_phy_writereg(sc, PHY_BMCR, bmcr);
680 	}
681 	device_printf(sc->my_dev, "selecting MII, ");
682 	bmcr = my_phy_readreg(sc, PHY_BMCR);
683 	bmcr &= ~(PHY_BMCR_AUTONEGENBL | PHY_BMCR_SPEEDSEL | PHY_BMCR_1000 |
684 		  PHY_BMCR_DUPLEX | PHY_BMCR_LOOPBK);
685 
686 #if 0				/* this version did not support 1000M, */
687 	if (IFM_SUBTYPE(media) == IFM_1000_T) {
688 		printf("1000Mbps/T4, half-duplex\n");
689 		bmcr &= ~PHY_BMCR_SPEEDSEL;
690 		bmcr &= ~PHY_BMCR_DUPLEX;
691 		bmcr |= PHY_BMCR_1000;
692 	}
693 #endif
694 	if (IFM_SUBTYPE(media) == IFM_100_T4) {
695 		printf("100Mbps/T4, half-duplex\n");
696 		bmcr |= PHY_BMCR_SPEEDSEL;
697 		bmcr &= ~PHY_BMCR_DUPLEX;
698 	}
699 	if (IFM_SUBTYPE(media) == IFM_100_TX) {
700 		printf("100Mbps, ");
701 		bmcr |= PHY_BMCR_SPEEDSEL;
702 	}
703 	if (IFM_SUBTYPE(media) == IFM_10_T) {
704 		printf("10Mbps, ");
705 		bmcr &= ~PHY_BMCR_SPEEDSEL;
706 	}
707 	if ((media & IFM_GMASK) == IFM_FDX) {
708 		printf("full duplex\n");
709 		bmcr |= PHY_BMCR_DUPLEX;
710 	} else {
711 		printf("half duplex\n");
712 		bmcr &= ~PHY_BMCR_DUPLEX;
713 	}
714 	my_phy_writereg(sc, PHY_BMCR, bmcr);
715 	my_setcfg(sc, bmcr);
716 	return;
717 }
718 
719 /*
720  * The Myson manual states that in order to fiddle with the 'full-duplex' and
721  * '100Mbps' bits in the netconfig register, we first have to put the
722  * transmit and/or receive logic in the idle state.
723  */
724 static void
725 my_setcfg(struct my_softc * sc, int bmcr)
726 {
727 	int             i, restart = 0;
728 
729 	MY_LOCK_ASSERT(sc);
730 	if (CSR_READ_4(sc, MY_TCRRCR) & (MY_TE | MY_RE)) {
731 		restart = 1;
732 		MY_CLRBIT(sc, MY_TCRRCR, (MY_TE | MY_RE));
733 		for (i = 0; i < MY_TIMEOUT; i++) {
734 			DELAY(10);
735 			if (!(CSR_READ_4(sc, MY_TCRRCR) &
736 			    (MY_TXRUN | MY_RXRUN)))
737 				break;
738 		}
739 		if (i == MY_TIMEOUT)
740 			device_printf(sc->my_dev,
741 			    "failed to force tx and rx to idle \n");
742 	}
743 	MY_CLRBIT(sc, MY_TCRRCR, MY_PS1000);
744 	MY_CLRBIT(sc, MY_TCRRCR, MY_PS10);
745 	if (bmcr & PHY_BMCR_1000)
746 		MY_SETBIT(sc, MY_TCRRCR, MY_PS1000);
747 	else if (!(bmcr & PHY_BMCR_SPEEDSEL))
748 		MY_SETBIT(sc, MY_TCRRCR, MY_PS10);
749 	if (bmcr & PHY_BMCR_DUPLEX)
750 		MY_SETBIT(sc, MY_TCRRCR, MY_FD);
751 	else
752 		MY_CLRBIT(sc, MY_TCRRCR, MY_FD);
753 	if (restart)
754 		MY_SETBIT(sc, MY_TCRRCR, MY_TE | MY_RE);
755 	return;
756 }
757 
758 static void
759 my_reset(struct my_softc * sc)
760 {
761 	register int    i;
762 
763 	MY_LOCK_ASSERT(sc);
764 	MY_SETBIT(sc, MY_BCR, MY_SWR);
765 	for (i = 0; i < MY_TIMEOUT; i++) {
766 		DELAY(10);
767 		if (!(CSR_READ_4(sc, MY_BCR) & MY_SWR))
768 			break;
769 	}
770 	if (i == MY_TIMEOUT)
771 		device_printf(sc->my_dev, "reset never completed!\n");
772 
773 	/* Wait a little while for the chip to get its brains in order. */
774 	DELAY(1000);
775 	return;
776 }
777 
778 /*
779  * Probe for a Myson chip. Check the PCI vendor and device IDs against our
780  * list and return a device name if we find a match.
781  */
782 static int
783 my_probe(device_t dev)
784 {
785 	struct my_type *t;
786 
787 	t = my_devs;
788 	while (t->my_name != NULL) {
789 		if ((pci_get_vendor(dev) == t->my_vid) &&
790 		    (pci_get_device(dev) == t->my_did)) {
791 			device_set_desc(dev, t->my_name);
792 			my_info_tmp = t;
793 			return (BUS_PROBE_DEFAULT);
794 		}
795 		t++;
796 	}
797 	return (ENXIO);
798 }
799 
800 /*
801  * Attach the interface. Allocate softc structures, do ifmedia setup and
802  * ethernet/BPF attach.
803  */
804 static int
805 my_attach(device_t dev)
806 {
807 	int             i;
808 	u_char          eaddr[ETHER_ADDR_LEN];
809 	u_int32_t       iobase;
810 	struct my_softc *sc;
811 	struct ifnet   *ifp;
812 	int             media = IFM_ETHER | IFM_100_TX | IFM_FDX;
813 	unsigned int    round;
814 	caddr_t         roundptr;
815 	struct my_type *p;
816 	u_int16_t       phy_vid, phy_did, phy_sts = 0;
817 	int             rid, error = 0;
818 
819 	sc = device_get_softc(dev);
820 	sc->my_dev = dev;
821 	mtx_init(&sc->my_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
822 	    MTX_DEF);
823 	callout_init_mtx(&sc->my_autoneg_timer, &sc->my_mtx, 0);
824 	callout_init_mtx(&sc->my_watchdog, &sc->my_mtx, 0);
825 
826 	/*
827 	 * Map control/status registers.
828 	 */
829 	pci_enable_busmaster(dev);
830 
831 	if (my_info_tmp->my_did == MTD800ID) {
832 		iobase = pci_read_config(dev, MY_PCI_LOIO, 4);
833 		if (iobase & 0x300)
834 			MY_USEIOSPACE = 0;
835 	}
836 
837 	rid = MY_RID;
838 	sc->my_res = bus_alloc_resource_any(dev, MY_RES, &rid, RF_ACTIVE);
839 
840 	if (sc->my_res == NULL) {
841 		device_printf(dev, "couldn't map ports/memory\n");
842 		error = ENXIO;
843 		goto destroy_mutex;
844 	}
845 	sc->my_btag = rman_get_bustag(sc->my_res);
846 	sc->my_bhandle = rman_get_bushandle(sc->my_res);
847 
848 	rid = 0;
849 	sc->my_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
850 					    RF_SHAREABLE | RF_ACTIVE);
851 
852 	if (sc->my_irq == NULL) {
853 		device_printf(dev, "couldn't map interrupt\n");
854 		error = ENXIO;
855 		goto release_io;
856 	}
857 
858 	sc->my_info = my_info_tmp;
859 
860 	/* Reset the adapter. */
861 	MY_LOCK(sc);
862 	my_reset(sc);
863 	MY_UNLOCK(sc);
864 
865 	/*
866 	 * Get station address
867 	 */
868 	for (i = 0; i < ETHER_ADDR_LEN; ++i)
869 		eaddr[i] = CSR_READ_1(sc, MY_PAR0 + i);
870 
871 	sc->my_ldata_ptr = malloc(sizeof(struct my_list_data) + 8,
872 				  M_DEVBUF, M_NOWAIT);
873 	if (sc->my_ldata_ptr == NULL) {
874 		device_printf(dev, "no memory for list buffers!\n");
875 		error = ENXIO;
876 		goto release_irq;
877 	}
878 	sc->my_ldata = (struct my_list_data *) sc->my_ldata_ptr;
879 	round = (uintptr_t)sc->my_ldata_ptr & 0xF;
880 	roundptr = sc->my_ldata_ptr;
881 	for (i = 0; i < 8; i++) {
882 		if (round % 8) {
883 			round++;
884 			roundptr++;
885 		} else
886 			break;
887 	}
888 	sc->my_ldata = (struct my_list_data *) roundptr;
889 	bzero(sc->my_ldata, sizeof(struct my_list_data));
890 
891 	ifp = sc->my_ifp = if_alloc(IFT_ETHER);
892 	if (ifp == NULL) {
893 		device_printf(dev, "can not if_alloc()\n");
894 		error = ENOSPC;
895 		goto free_ldata;
896 	}
897 	ifp->if_softc = sc;
898 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
899 	ifp->if_mtu = ETHERMTU;
900 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
901 	ifp->if_ioctl = my_ioctl;
902 	ifp->if_start = my_start;
903 	ifp->if_init = my_init;
904 	ifp->if_baudrate = 10000000;
905 	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
906 	ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
907 	IFQ_SET_READY(&ifp->if_snd);
908 
909 	if (sc->my_info->my_did == MTD803ID)
910 		sc->my_pinfo = my_phys;
911 	else {
912 		if (bootverbose)
913 			device_printf(dev, "probing for a PHY\n");
914 		MY_LOCK(sc);
915 		for (i = MY_PHYADDR_MIN; i < MY_PHYADDR_MAX + 1; i++) {
916 			if (bootverbose)
917 				device_printf(dev, "checking address: %d\n", i);
918 			sc->my_phy_addr = i;
919 			phy_sts = my_phy_readreg(sc, PHY_BMSR);
920 			if ((phy_sts != 0) && (phy_sts != 0xffff))
921 				break;
922 			else
923 				phy_sts = 0;
924 		}
925 		if (phy_sts) {
926 			phy_vid = my_phy_readreg(sc, PHY_VENID);
927 			phy_did = my_phy_readreg(sc, PHY_DEVID);
928 			if (bootverbose) {
929 				device_printf(dev, "found PHY at address %d, ",
930 				    sc->my_phy_addr);
931 				printf("vendor id: %x device id: %x\n",
932 				    phy_vid, phy_did);
933 			}
934 			p = my_phys;
935 			while (p->my_vid) {
936 				if (phy_vid == p->my_vid) {
937 					sc->my_pinfo = p;
938 					break;
939 				}
940 				p++;
941 			}
942 			if (sc->my_pinfo == NULL)
943 				sc->my_pinfo = &my_phys[PHY_UNKNOWN];
944 			if (bootverbose)
945 				device_printf(dev, "PHY type: %s\n",
946 				       sc->my_pinfo->my_name);
947 		} else {
948 			MY_UNLOCK(sc);
949 			device_printf(dev, "MII without any phy!\n");
950 			error = ENXIO;
951 			goto free_if;
952 		}
953 		MY_UNLOCK(sc);
954 	}
955 
956 	/* Do ifmedia setup. */
957 	ifmedia_init(&sc->ifmedia, 0, my_ifmedia_upd, my_ifmedia_sts);
958 	MY_LOCK(sc);
959 	my_getmode_mii(sc);
960 	my_autoneg_mii(sc, MY_FLAG_FORCEDELAY, 1);
961 	media = sc->ifmedia.ifm_media;
962 	my_stop(sc);
963 	MY_UNLOCK(sc);
964 	ifmedia_set(&sc->ifmedia, media);
965 
966 	ether_ifattach(ifp, eaddr);
967 
968 	error = bus_setup_intr(dev, sc->my_irq, INTR_TYPE_NET | INTR_MPSAFE,
969 			       NULL, my_intr, sc, &sc->my_intrhand);
970 
971 	if (error) {
972 		device_printf(dev, "couldn't set up irq\n");
973 		goto detach_if;
974 	}
975 
976 	return (0);
977 
978 detach_if:
979 	ether_ifdetach(ifp);
980 free_if:
981 	if_free(ifp);
982 free_ldata:
983 	free(sc->my_ldata_ptr, M_DEVBUF);
984 release_irq:
985 	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->my_irq);
986 release_io:
987 	bus_release_resource(dev, MY_RES, MY_RID, sc->my_res);
988 destroy_mutex:
989 	mtx_destroy(&sc->my_mtx);
990 	return (error);
991 }
992 
993 static int
994 my_detach(device_t dev)
995 {
996 	struct my_softc *sc;
997 	struct ifnet   *ifp;
998 
999 	sc = device_get_softc(dev);
1000 	ifp = sc->my_ifp;
1001 	ether_ifdetach(ifp);
1002 	MY_LOCK(sc);
1003 	my_stop(sc);
1004 	MY_UNLOCK(sc);
1005 	bus_teardown_intr(dev, sc->my_irq, sc->my_intrhand);
1006 	callout_drain(&sc->my_watchdog);
1007 	callout_drain(&sc->my_autoneg_timer);
1008 
1009 	if_free(ifp);
1010 	free(sc->my_ldata_ptr, M_DEVBUF);
1011 
1012 	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->my_irq);
1013 	bus_release_resource(dev, MY_RES, MY_RID, sc->my_res);
1014 	mtx_destroy(&sc->my_mtx);
1015 	return (0);
1016 }
1017 
1018 
1019 /*
1020  * Initialize the transmit descriptors.
1021  */
1022 static int
1023 my_list_tx_init(struct my_softc * sc)
1024 {
1025 	struct my_chain_data *cd;
1026 	struct my_list_data *ld;
1027 	int             i;
1028 
1029 	MY_LOCK_ASSERT(sc);
1030 	cd = &sc->my_cdata;
1031 	ld = sc->my_ldata;
1032 	for (i = 0; i < MY_TX_LIST_CNT; i++) {
1033 		cd->my_tx_chain[i].my_ptr = &ld->my_tx_list[i];
1034 		if (i == (MY_TX_LIST_CNT - 1))
1035 			cd->my_tx_chain[i].my_nextdesc = &cd->my_tx_chain[0];
1036 		else
1037 			cd->my_tx_chain[i].my_nextdesc =
1038 			    &cd->my_tx_chain[i + 1];
1039 	}
1040 	cd->my_tx_free = &cd->my_tx_chain[0];
1041 	cd->my_tx_tail = cd->my_tx_head = NULL;
1042 	return (0);
1043 }
1044 
1045 /*
1046  * Initialize the RX descriptors and allocate mbufs for them. Note that we
1047  * arrange the descriptors in a closed ring, so that the last descriptor
1048  * points back to the first.
1049  */
1050 static int
1051 my_list_rx_init(struct my_softc * sc)
1052 {
1053 	struct my_chain_data *cd;
1054 	struct my_list_data *ld;
1055 	int             i;
1056 
1057 	MY_LOCK_ASSERT(sc);
1058 	cd = &sc->my_cdata;
1059 	ld = sc->my_ldata;
1060 	for (i = 0; i < MY_RX_LIST_CNT; i++) {
1061 		cd->my_rx_chain[i].my_ptr =
1062 		    (struct my_desc *) & ld->my_rx_list[i];
1063 		if (my_newbuf(sc, &cd->my_rx_chain[i]) == ENOBUFS) {
1064 			MY_UNLOCK(sc);
1065 			return (ENOBUFS);
1066 		}
1067 		if (i == (MY_RX_LIST_CNT - 1)) {
1068 			cd->my_rx_chain[i].my_nextdesc = &cd->my_rx_chain[0];
1069 			ld->my_rx_list[i].my_next = vtophys(&ld->my_rx_list[0]);
1070 		} else {
1071 			cd->my_rx_chain[i].my_nextdesc =
1072 			    &cd->my_rx_chain[i + 1];
1073 			ld->my_rx_list[i].my_next =
1074 			    vtophys(&ld->my_rx_list[i + 1]);
1075 		}
1076 	}
1077 	cd->my_rx_head = &cd->my_rx_chain[0];
1078 	return (0);
1079 }
1080 
1081 /*
1082  * Initialize an RX descriptor and attach an MBUF cluster.
1083  */
1084 static int
1085 my_newbuf(struct my_softc * sc, struct my_chain_onefrag * c)
1086 {
1087 	struct mbuf    *m_new = NULL;
1088 
1089 	MY_LOCK_ASSERT(sc);
1090 	MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1091 	if (m_new == NULL) {
1092 		device_printf(sc->my_dev,
1093 		    "no memory for rx list -- packet dropped!\n");
1094 		return (ENOBUFS);
1095 	}
1096 	MCLGET(m_new, M_DONTWAIT);
1097 	if (!(m_new->m_flags & M_EXT)) {
1098 		device_printf(sc->my_dev,
1099 		    "no memory for rx list -- packet dropped!\n");
1100 		m_freem(m_new);
1101 		return (ENOBUFS);
1102 	}
1103 	c->my_mbuf = m_new;
1104 	c->my_ptr->my_data = vtophys(mtod(m_new, caddr_t));
1105 	c->my_ptr->my_ctl = (MCLBYTES - 1) << MY_RBSShift;
1106 	c->my_ptr->my_status = MY_OWNByNIC;
1107 	return (0);
1108 }
1109 
1110 /*
1111  * A frame has been uploaded: pass the resulting mbuf chain up to the higher
1112  * level protocols.
1113  */
1114 static void
1115 my_rxeof(struct my_softc * sc)
1116 {
1117 	struct ether_header *eh;
1118 	struct mbuf    *m;
1119 	struct ifnet   *ifp;
1120 	struct my_chain_onefrag *cur_rx;
1121 	int             total_len = 0;
1122 	u_int32_t       rxstat;
1123 
1124 	MY_LOCK_ASSERT(sc);
1125 	ifp = sc->my_ifp;
1126 	while (!((rxstat = sc->my_cdata.my_rx_head->my_ptr->my_status)
1127 	    & MY_OWNByNIC)) {
1128 		cur_rx = sc->my_cdata.my_rx_head;
1129 		sc->my_cdata.my_rx_head = cur_rx->my_nextdesc;
1130 
1131 		if (rxstat & MY_ES) {	/* error summary: give up this rx pkt */
1132 			ifp->if_ierrors++;
1133 			cur_rx->my_ptr->my_status = MY_OWNByNIC;
1134 			continue;
1135 		}
1136 		/* No errors; receive the packet. */
1137 		total_len = (rxstat & MY_FLNGMASK) >> MY_FLNGShift;
1138 		total_len -= ETHER_CRC_LEN;
1139 
1140 		if (total_len < MINCLSIZE) {
1141 			m = m_devget(mtod(cur_rx->my_mbuf, char *),
1142 			    total_len, 0, ifp, NULL);
1143 			cur_rx->my_ptr->my_status = MY_OWNByNIC;
1144 			if (m == NULL) {
1145 				ifp->if_ierrors++;
1146 				continue;
1147 			}
1148 		} else {
1149 			m = cur_rx->my_mbuf;
1150 			/*
1151 			 * Try to conjure up a new mbuf cluster. If that
1152 			 * fails, it means we have an out of memory condition
1153 			 * and should leave the buffer in place and continue.
1154 			 * This will result in a lost packet, but there's
1155 			 * little else we can do in this situation.
1156 			 */
1157 			if (my_newbuf(sc, cur_rx) == ENOBUFS) {
1158 				ifp->if_ierrors++;
1159 				cur_rx->my_ptr->my_status = MY_OWNByNIC;
1160 				continue;
1161 			}
1162 			m->m_pkthdr.rcvif = ifp;
1163 			m->m_pkthdr.len = m->m_len = total_len;
1164 		}
1165 		ifp->if_ipackets++;
1166 		eh = mtod(m, struct ether_header *);
1167 #if NBPFILTER > 0
1168 		/*
1169 		 * Handle BPF listeners. Let the BPF user see the packet, but
1170 		 * don't pass it up to the ether_input() layer unless it's a
1171 		 * broadcast packet, multicast packet, matches our ethernet
1172 		 * address or the interface is in promiscuous mode.
1173 		 */
1174 		if (bpf_peers_present(ifp->if_bpf)) {
1175 			bpf_mtap(ifp->if_bpf, m);
1176 			if (ifp->if_flags & IFF_PROMISC &&
1177 			    (bcmp(eh->ether_dhost, IF_LLADDR(sc->my_ifp),
1178 				ETHER_ADDR_LEN) &&
1179 			     (eh->ether_dhost[0] & 1) == 0)) {
1180 				m_freem(m);
1181 				continue;
1182 			}
1183 		}
1184 #endif
1185 		MY_UNLOCK(sc);
1186 		(*ifp->if_input)(ifp, m);
1187 		MY_LOCK(sc);
1188 	}
1189 	return;
1190 }
1191 
1192 
1193 /*
1194  * A frame was downloaded to the chip. It's safe for us to clean up the list
1195  * buffers.
1196  */
1197 static void
1198 my_txeof(struct my_softc * sc)
1199 {
1200 	struct my_chain *cur_tx;
1201 	struct ifnet   *ifp;
1202 
1203 	MY_LOCK_ASSERT(sc);
1204 	ifp = sc->my_ifp;
1205 	/* Clear the timeout timer. */
1206 	sc->my_timer = 0;
1207 	if (sc->my_cdata.my_tx_head == NULL) {
1208 		return;
1209 	}
1210 	/*
1211 	 * Go through our tx list and free mbufs for those frames that have
1212 	 * been transmitted.
1213 	 */
1214 	while (sc->my_cdata.my_tx_head->my_mbuf != NULL) {
1215 		u_int32_t       txstat;
1216 
1217 		cur_tx = sc->my_cdata.my_tx_head;
1218 		txstat = MY_TXSTATUS(cur_tx);
1219 		if ((txstat & MY_OWNByNIC) || txstat == MY_UNSENT)
1220 			break;
1221 		if (!(CSR_READ_4(sc, MY_TCRRCR) & MY_Enhanced)) {
1222 			if (txstat & MY_TXERR) {
1223 				ifp->if_oerrors++;
1224 				if (txstat & MY_EC) /* excessive collision */
1225 					ifp->if_collisions++;
1226 				if (txstat & MY_LC)	/* late collision */
1227 					ifp->if_collisions++;
1228 			}
1229 			ifp->if_collisions += (txstat & MY_NCRMASK) >>
1230 			    MY_NCRShift;
1231 		}
1232 		ifp->if_opackets++;
1233 		m_freem(cur_tx->my_mbuf);
1234 		cur_tx->my_mbuf = NULL;
1235 		if (sc->my_cdata.my_tx_head == sc->my_cdata.my_tx_tail) {
1236 			sc->my_cdata.my_tx_head = NULL;
1237 			sc->my_cdata.my_tx_tail = NULL;
1238 			break;
1239 		}
1240 		sc->my_cdata.my_tx_head = cur_tx->my_nextdesc;
1241 	}
1242 	if (CSR_READ_4(sc, MY_TCRRCR) & MY_Enhanced) {
1243 		ifp->if_collisions += (CSR_READ_4(sc, MY_TSR) & MY_NCRMask);
1244 	}
1245 	return;
1246 }
1247 
1248 /*
1249  * TX 'end of channel' interrupt handler.
1250  */
1251 static void
1252 my_txeoc(struct my_softc * sc)
1253 {
1254 	struct ifnet   *ifp;
1255 
1256 	MY_LOCK_ASSERT(sc);
1257 	ifp = sc->my_ifp;
1258 	sc->my_timer = 0;
1259 	if (sc->my_cdata.my_tx_head == NULL) {
1260 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1261 		sc->my_cdata.my_tx_tail = NULL;
1262 		if (sc->my_want_auto)
1263 			my_autoneg_mii(sc, MY_FLAG_SCHEDDELAY, 1);
1264 	} else {
1265 		if (MY_TXOWN(sc->my_cdata.my_tx_head) == MY_UNSENT) {
1266 			MY_TXOWN(sc->my_cdata.my_tx_head) = MY_OWNByNIC;
1267 			sc->my_timer = 5;
1268 			CSR_WRITE_4(sc, MY_TXPDR, 0xFFFFFFFF);
1269 		}
1270 	}
1271 	return;
1272 }
1273 
1274 static void
1275 my_intr(void *arg)
1276 {
1277 	struct my_softc *sc;
1278 	struct ifnet   *ifp;
1279 	u_int32_t       status;
1280 
1281 	sc = arg;
1282 	MY_LOCK(sc);
1283 	ifp = sc->my_ifp;
1284 	if (!(ifp->if_flags & IFF_UP)) {
1285 		MY_UNLOCK(sc);
1286 		return;
1287 	}
1288 	/* Disable interrupts. */
1289 	CSR_WRITE_4(sc, MY_IMR, 0x00000000);
1290 
1291 	for (;;) {
1292 		status = CSR_READ_4(sc, MY_ISR);
1293 		status &= MY_INTRS;
1294 		if (status)
1295 			CSR_WRITE_4(sc, MY_ISR, status);
1296 		else
1297 			break;
1298 
1299 		if (status & MY_RI)	/* receive interrupt */
1300 			my_rxeof(sc);
1301 
1302 		if ((status & MY_RBU) || (status & MY_RxErr)) {
1303 			/* rx buffer unavailable or rx error */
1304 			ifp->if_ierrors++;
1305 #ifdef foo
1306 			my_stop(sc);
1307 			my_reset(sc);
1308 			my_init_locked(sc);
1309 #endif
1310 		}
1311 		if (status & MY_TI)	/* tx interrupt */
1312 			my_txeof(sc);
1313 		if (status & MY_ETI)	/* tx early interrupt */
1314 			my_txeof(sc);
1315 		if (status & MY_TBU)	/* tx buffer unavailable */
1316 			my_txeoc(sc);
1317 
1318 #if 0				/* 90/1/18 delete */
1319 		if (status & MY_FBE) {
1320 			my_reset(sc);
1321 			my_init_locked(sc);
1322 		}
1323 #endif
1324 
1325 	}
1326 
1327 	/* Re-enable interrupts. */
1328 	CSR_WRITE_4(sc, MY_IMR, MY_INTRS);
1329 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1330 		my_start_locked(ifp);
1331 	MY_UNLOCK(sc);
1332 	return;
1333 }
1334 
1335 /*
1336  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1337  * pointers to the fragment pointers.
1338  */
1339 static int
1340 my_encap(struct my_softc * sc, struct my_chain * c, struct mbuf * m_head)
1341 {
1342 	struct my_desc *f = NULL;
1343 	int             total_len;
1344 	struct mbuf    *m, *m_new = NULL;
1345 
1346 	MY_LOCK_ASSERT(sc);
1347 	/* calculate the total tx pkt length */
1348 	total_len = 0;
1349 	for (m = m_head; m != NULL; m = m->m_next)
1350 		total_len += m->m_len;
1351 	/*
1352 	 * Start packing the mbufs in this chain into the fragment pointers.
1353 	 * Stop when we run out of fragments or hit the end of the mbuf
1354 	 * chain.
1355 	 */
1356 	m = m_head;
1357 	MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1358 	if (m_new == NULL) {
1359 		device_printf(sc->my_dev, "no memory for tx list");
1360 		return (1);
1361 	}
1362 	if (m_head->m_pkthdr.len > MHLEN) {
1363 		MCLGET(m_new, M_DONTWAIT);
1364 		if (!(m_new->m_flags & M_EXT)) {
1365 			m_freem(m_new);
1366 			device_printf(sc->my_dev, "no memory for tx list");
1367 			return (1);
1368 		}
1369 	}
1370 	m_copydata(m_head, 0, m_head->m_pkthdr.len, mtod(m_new, caddr_t));
1371 	m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1372 	m_freem(m_head);
1373 	m_head = m_new;
1374 	f = &c->my_ptr->my_frag[0];
1375 	f->my_status = 0;
1376 	f->my_data = vtophys(mtod(m_new, caddr_t));
1377 	total_len = m_new->m_len;
1378 	f->my_ctl = MY_TXFD | MY_TXLD | MY_CRCEnable | MY_PADEnable;
1379 	f->my_ctl |= total_len << MY_PKTShift;	/* pkt size */
1380 	f->my_ctl |= total_len;	/* buffer size */
1381 	/* 89/12/29 add, for mtd891 *//* [ 89? ] */
1382 	if (sc->my_info->my_did == MTD891ID)
1383 		f->my_ctl |= MY_ETIControl | MY_RetryTxLC;
1384 	c->my_mbuf = m_head;
1385 	c->my_lastdesc = 0;
1386 	MY_TXNEXT(c) = vtophys(&c->my_nextdesc->my_ptr->my_frag[0]);
1387 	return (0);
1388 }
1389 
1390 /*
1391  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1392  * to the mbuf data regions directly in the transmit lists. We also save a
1393  * copy of the pointers since the transmit list fragment pointers are
1394  * physical addresses.
1395  */
1396 static void
1397 my_start(struct ifnet * ifp)
1398 {
1399 	struct my_softc *sc;
1400 
1401 	sc = ifp->if_softc;
1402 	MY_LOCK(sc);
1403 	my_start_locked(ifp);
1404 	MY_UNLOCK(sc);
1405 }
1406 
1407 static void
1408 my_start_locked(struct ifnet * ifp)
1409 {
1410 	struct my_softc *sc;
1411 	struct mbuf    *m_head = NULL;
1412 	struct my_chain *cur_tx = NULL, *start_tx;
1413 
1414 	sc = ifp->if_softc;
1415 	MY_LOCK_ASSERT(sc);
1416 	if (sc->my_autoneg) {
1417 		sc->my_tx_pend = 1;
1418 		return;
1419 	}
1420 	/*
1421 	 * Check for an available queue slot. If there are none, punt.
1422 	 */
1423 	if (sc->my_cdata.my_tx_free->my_mbuf != NULL) {
1424 		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1425 		return;
1426 	}
1427 	start_tx = sc->my_cdata.my_tx_free;
1428 	while (sc->my_cdata.my_tx_free->my_mbuf == NULL) {
1429 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1430 		if (m_head == NULL)
1431 			break;
1432 
1433 		/* Pick a descriptor off the free list. */
1434 		cur_tx = sc->my_cdata.my_tx_free;
1435 		sc->my_cdata.my_tx_free = cur_tx->my_nextdesc;
1436 
1437 		/* Pack the data into the descriptor. */
1438 		my_encap(sc, cur_tx, m_head);
1439 
1440 		if (cur_tx != start_tx)
1441 			MY_TXOWN(cur_tx) = MY_OWNByNIC;
1442 #if NBPFILTER > 0
1443 		/*
1444 		 * If there's a BPF listener, bounce a copy of this frame to
1445 		 * him.
1446 		 */
1447 		BPF_MTAP(ifp, cur_tx->my_mbuf);
1448 #endif
1449 	}
1450 	/*
1451 	 * If there are no packets queued, bail.
1452 	 */
1453 	if (cur_tx == NULL) {
1454 		return;
1455 	}
1456 	/*
1457 	 * Place the request for the upload interrupt in the last descriptor
1458 	 * in the chain. This way, if we're chaining several packets at once,
1459 	 * we'll only get an interrupt once for the whole chain rather than
1460 	 * once for each packet.
1461 	 */
1462 	MY_TXCTL(cur_tx) |= MY_TXIC;
1463 	cur_tx->my_ptr->my_frag[0].my_ctl |= MY_TXIC;
1464 	sc->my_cdata.my_tx_tail = cur_tx;
1465 	if (sc->my_cdata.my_tx_head == NULL)
1466 		sc->my_cdata.my_tx_head = start_tx;
1467 	MY_TXOWN(start_tx) = MY_OWNByNIC;
1468 	CSR_WRITE_4(sc, MY_TXPDR, 0xFFFFFFFF);	/* tx polling demand */
1469 
1470 	/*
1471 	 * Set a timeout in case the chip goes out to lunch.
1472 	 */
1473 	sc->my_timer = 5;
1474 	return;
1475 }
1476 
1477 static void
1478 my_init(void *xsc)
1479 {
1480 	struct my_softc *sc = xsc;
1481 
1482 	MY_LOCK(sc);
1483 	my_init_locked(sc);
1484 	MY_UNLOCK(sc);
1485 }
1486 
1487 static void
1488 my_init_locked(struct my_softc *sc)
1489 {
1490 	struct ifnet   *ifp = sc->my_ifp;
1491 	u_int16_t       phy_bmcr = 0;
1492 
1493 	MY_LOCK_ASSERT(sc);
1494 	if (sc->my_autoneg) {
1495 		return;
1496 	}
1497 	if (sc->my_pinfo != NULL)
1498 		phy_bmcr = my_phy_readreg(sc, PHY_BMCR);
1499 	/*
1500 	 * Cancel pending I/O and free all RX/TX buffers.
1501 	 */
1502 	my_stop(sc);
1503 	my_reset(sc);
1504 
1505 	/*
1506 	 * Set cache alignment and burst length.
1507 	 */
1508 #if 0				/* 89/9/1 modify,  */
1509 	CSR_WRITE_4(sc, MY_BCR, MY_RPBLE512);
1510 	CSR_WRITE_4(sc, MY_TCRRCR, MY_TFTSF);
1511 #endif
1512 	CSR_WRITE_4(sc, MY_BCR, MY_PBL8);
1513 	CSR_WRITE_4(sc, MY_TCRRCR, MY_TFTSF | MY_RBLEN | MY_RPBLE512);
1514 	/*
1515 	 * 89/12/29 add, for mtd891,
1516 	 */
1517 	if (sc->my_info->my_did == MTD891ID) {
1518 		MY_SETBIT(sc, MY_BCR, MY_PROG);
1519 		MY_SETBIT(sc, MY_TCRRCR, MY_Enhanced);
1520 	}
1521 	my_setcfg(sc, phy_bmcr);
1522 	/* Init circular RX list. */
1523 	if (my_list_rx_init(sc) == ENOBUFS) {
1524 		device_printf(sc->my_dev, "init failed: no memory for rx buffers\n");
1525 		my_stop(sc);
1526 		return;
1527 	}
1528 	/* Init TX descriptors. */
1529 	my_list_tx_init(sc);
1530 
1531 	/* If we want promiscuous mode, set the allframes bit. */
1532 	if (ifp->if_flags & IFF_PROMISC)
1533 		MY_SETBIT(sc, MY_TCRRCR, MY_PROM);
1534 	else
1535 		MY_CLRBIT(sc, MY_TCRRCR, MY_PROM);
1536 
1537 	/*
1538 	 * Set capture broadcast bit to capture broadcast frames.
1539 	 */
1540 	if (ifp->if_flags & IFF_BROADCAST)
1541 		MY_SETBIT(sc, MY_TCRRCR, MY_AB);
1542 	else
1543 		MY_CLRBIT(sc, MY_TCRRCR, MY_AB);
1544 
1545 	/*
1546 	 * Program the multicast filter, if necessary.
1547 	 */
1548 	my_setmulti(sc);
1549 
1550 	/*
1551 	 * Load the address of the RX list.
1552 	 */
1553 	MY_CLRBIT(sc, MY_TCRRCR, MY_RE);
1554 	CSR_WRITE_4(sc, MY_RXLBA, vtophys(&sc->my_ldata->my_rx_list[0]));
1555 
1556 	/*
1557 	 * Enable interrupts.
1558 	 */
1559 	CSR_WRITE_4(sc, MY_IMR, MY_INTRS);
1560 	CSR_WRITE_4(sc, MY_ISR, 0xFFFFFFFF);
1561 
1562 	/* Enable receiver and transmitter. */
1563 	MY_SETBIT(sc, MY_TCRRCR, MY_RE);
1564 	MY_CLRBIT(sc, MY_TCRRCR, MY_TE);
1565 	CSR_WRITE_4(sc, MY_TXLBA, vtophys(&sc->my_ldata->my_tx_list[0]));
1566 	MY_SETBIT(sc, MY_TCRRCR, MY_TE);
1567 
1568 	/* Restore state of BMCR */
1569 	if (sc->my_pinfo != NULL)
1570 		my_phy_writereg(sc, PHY_BMCR, phy_bmcr);
1571 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1572 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1573 
1574 	callout_reset(&sc->my_watchdog, hz, my_watchdog, sc);
1575 	return;
1576 }
1577 
1578 /*
1579  * Set media options.
1580  */
1581 
1582 static int
1583 my_ifmedia_upd(struct ifnet * ifp)
1584 {
1585 	struct my_softc *sc;
1586 	struct ifmedia *ifm;
1587 
1588 	sc = ifp->if_softc;
1589 	MY_LOCK(sc);
1590 	ifm = &sc->ifmedia;
1591 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
1592 		MY_UNLOCK(sc);
1593 		return (EINVAL);
1594 	}
1595 	if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO)
1596 		my_autoneg_mii(sc, MY_FLAG_SCHEDDELAY, 1);
1597 	else
1598 		my_setmode_mii(sc, ifm->ifm_media);
1599 	MY_UNLOCK(sc);
1600 	return (0);
1601 }
1602 
1603 /*
1604  * Report current media status.
1605  */
1606 
1607 static void
1608 my_ifmedia_sts(struct ifnet * ifp, struct ifmediareq * ifmr)
1609 {
1610 	struct my_softc *sc;
1611 	u_int16_t advert = 0, ability = 0;
1612 
1613 	sc = ifp->if_softc;
1614 	MY_LOCK(sc);
1615 	ifmr->ifm_active = IFM_ETHER;
1616 	if (!(my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_AUTONEGENBL)) {
1617 #if 0				/* this version did not support 1000M, */
1618 		if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_1000)
1619 			ifmr->ifm_active = IFM_ETHER | IFM_1000TX;
1620 #endif
1621 		if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_SPEEDSEL)
1622 			ifmr->ifm_active = IFM_ETHER | IFM_100_TX;
1623 		else
1624 			ifmr->ifm_active = IFM_ETHER | IFM_10_T;
1625 		if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_DUPLEX)
1626 			ifmr->ifm_active |= IFM_FDX;
1627 		else
1628 			ifmr->ifm_active |= IFM_HDX;
1629 
1630 		MY_UNLOCK(sc);
1631 		return;
1632 	}
1633 	ability = my_phy_readreg(sc, PHY_LPAR);
1634 	advert = my_phy_readreg(sc, PHY_ANAR);
1635 
1636 #if 0				/* this version did not support 1000M, */
1637 	if (sc->my_pinfo->my_vid = MarvellPHYID0) {
1638 		ability2 = my_phy_readreg(sc, PHY_1000SR);
1639 		if (ability2 & PHY_1000SR_1000BTXFULL) {
1640 			advert = 0;
1641 			ability = 0;
1642 	  		ifmr->ifm_active = IFM_ETHER|IFM_1000_T|IFM_FDX;
1643 	  	} else if (ability & PHY_1000SR_1000BTXHALF) {
1644 			advert = 0;
1645 			ability = 0;
1646 			ifmr->ifm_active = IFM_ETHER|IFM_1000_T|IFM_HDX;
1647 		}
1648 	}
1649 #endif
1650 	if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4)
1651 		ifmr->ifm_active = IFM_ETHER | IFM_100_T4;
1652 	else if (advert & PHY_ANAR_100BTXFULL && ability & PHY_ANAR_100BTXFULL)
1653 		ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1654 	else if (advert & PHY_ANAR_100BTXHALF && ability & PHY_ANAR_100BTXHALF)
1655 		ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_HDX;
1656 	else if (advert & PHY_ANAR_10BTFULL && ability & PHY_ANAR_10BTFULL)
1657 		ifmr->ifm_active = IFM_ETHER | IFM_10_T | IFM_FDX;
1658 	else if (advert & PHY_ANAR_10BTHALF && ability & PHY_ANAR_10BTHALF)
1659 		ifmr->ifm_active = IFM_ETHER | IFM_10_T | IFM_HDX;
1660 	MY_UNLOCK(sc);
1661 	return;
1662 }
1663 
1664 static int
1665 my_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
1666 {
1667 	struct my_softc *sc = ifp->if_softc;
1668 	struct ifreq   *ifr = (struct ifreq *) data;
1669 	int             error;
1670 
1671 	switch (command) {
1672 	case SIOCSIFFLAGS:
1673 		MY_LOCK(sc);
1674 		if (ifp->if_flags & IFF_UP)
1675 			my_init_locked(sc);
1676 		else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1677 			my_stop(sc);
1678 		MY_UNLOCK(sc);
1679 		error = 0;
1680 		break;
1681 	case SIOCADDMULTI:
1682 	case SIOCDELMULTI:
1683 		MY_LOCK(sc);
1684 		my_setmulti(sc);
1685 		MY_UNLOCK(sc);
1686 		error = 0;
1687 		break;
1688 	case SIOCGIFMEDIA:
1689 	case SIOCSIFMEDIA:
1690 		error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
1691 		break;
1692 	default:
1693 		error = ether_ioctl(ifp, command, data);
1694 		break;
1695 	}
1696 	return (error);
1697 }
1698 
1699 static void
1700 my_watchdog(void *arg)
1701 {
1702 	struct my_softc *sc;
1703 	struct ifnet *ifp;
1704 
1705 	sc = arg;
1706 	MY_LOCK_ASSERT(sc);
1707 	callout_reset(&sc->my_watchdog, hz, my_watchdog, sc);
1708 	if (sc->my_timer == 0 || --sc->my_timer > 0)
1709 		return;
1710 
1711 	ifp = sc->my_ifp;
1712 	ifp->if_oerrors++;
1713 	if_printf(ifp, "watchdog timeout\n");
1714 	if (!(my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT))
1715 		if_printf(ifp, "no carrier - transceiver cable problem?\n");
1716 	my_stop(sc);
1717 	my_reset(sc);
1718 	my_init_locked(sc);
1719 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1720 		my_start_locked(ifp);
1721 }
1722 
1723 
1724 /*
1725  * Stop the adapter and free any mbufs allocated to the RX and TX lists.
1726  */
1727 static void
1728 my_stop(struct my_softc * sc)
1729 {
1730 	register int    i;
1731 	struct ifnet   *ifp;
1732 
1733 	MY_LOCK_ASSERT(sc);
1734 	ifp = sc->my_ifp;
1735 
1736 	callout_stop(&sc->my_autoneg_timer);
1737 	callout_stop(&sc->my_watchdog);
1738 
1739 	MY_CLRBIT(sc, MY_TCRRCR, (MY_RE | MY_TE));
1740 	CSR_WRITE_4(sc, MY_IMR, 0x00000000);
1741 	CSR_WRITE_4(sc, MY_TXLBA, 0x00000000);
1742 	CSR_WRITE_4(sc, MY_RXLBA, 0x00000000);
1743 
1744 	/*
1745 	 * Free data in the RX lists.
1746 	 */
1747 	for (i = 0; i < MY_RX_LIST_CNT; i++) {
1748 		if (sc->my_cdata.my_rx_chain[i].my_mbuf != NULL) {
1749 			m_freem(sc->my_cdata.my_rx_chain[i].my_mbuf);
1750 			sc->my_cdata.my_rx_chain[i].my_mbuf = NULL;
1751 		}
1752 	}
1753 	bzero((char *)&sc->my_ldata->my_rx_list,
1754 	    sizeof(sc->my_ldata->my_rx_list));
1755 	/*
1756 	 * Free the TX list buffers.
1757 	 */
1758 	for (i = 0; i < MY_TX_LIST_CNT; i++) {
1759 		if (sc->my_cdata.my_tx_chain[i].my_mbuf != NULL) {
1760 			m_freem(sc->my_cdata.my_tx_chain[i].my_mbuf);
1761 			sc->my_cdata.my_tx_chain[i].my_mbuf = NULL;
1762 		}
1763 	}
1764 	bzero((char *)&sc->my_ldata->my_tx_list,
1765 	    sizeof(sc->my_ldata->my_tx_list));
1766 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1767 	return;
1768 }
1769 
1770 /*
1771  * Stop all chip I/O so that the kernel's probe routines don't get confused
1772  * by errant DMAs when rebooting.
1773  */
1774 static int
1775 my_shutdown(device_t dev)
1776 {
1777 	struct my_softc *sc;
1778 
1779 	sc = device_get_softc(dev);
1780 	MY_LOCK(sc);
1781 	my_stop(sc);
1782 	MY_UNLOCK(sc);
1783 	return 0;
1784 }
1785