xref: /freebsd/sys/dev/my/if_my.c (revision e3466a89fd9c3d0be2f831d42a5b5cf65cb0fd53)
1 /*-
2  * Written by: yen_cw@myson.com.tw
3  * Copyright (c) 2002 Myson Technology Inc.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions, and the following disclaimer,
11  *    without modification, immediately at the beginning of the file.
12  * 2. The name of the author may not be used to endorse or promote products
13  *    derived from this software without specific prior written permission.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
19  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * Myson fast ethernet PCI NIC driver, available at: http://www.myson.com.tw/
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/sockio.h>
36 #include <sys/mbuf.h>
37 #include <sys/malloc.h>
38 #include <sys/kernel.h>
39 #include <sys/socket.h>
40 #include <sys/queue.h>
41 #include <sys/types.h>
42 #include <sys/module.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45 
46 #define NBPFILTER	1
47 
48 #include <net/if.h>
49 #include <net/if_arp.h>
50 #include <net/ethernet.h>
51 #include <net/if_media.h>
52 #include <net/if_types.h>
53 #include <net/if_dl.h>
54 #include <net/bpf.h>
55 
56 #include <vm/vm.h>		/* for vtophys */
57 #include <vm/pmap.h>		/* for vtophys */
58 #include <machine/bus.h>
59 #include <machine/resource.h>
60 #include <sys/bus.h>
61 #include <sys/rman.h>
62 
63 #include <dev/pci/pcireg.h>
64 #include <dev/pci/pcivar.h>
65 
66 /*
67  * #define MY_USEIOSPACE
68  */
69 
70 static int      MY_USEIOSPACE = 1;
71 
72 #ifdef MY_USEIOSPACE
73 #define MY_RES                  SYS_RES_IOPORT
74 #define MY_RID                  MY_PCI_LOIO
75 #else
76 #define MY_RES                  SYS_RES_MEMORY
77 #define MY_RID                  MY_PCI_LOMEM
78 #endif
79 
80 
81 #include <dev/my/if_myreg.h>
82 
83 #ifndef lint
84 static          const char rcsid[] =
85 "$Id: if_my.c,v 1.16 2003/04/15 06:37:25 mdodd Exp $";
86 #endif
87 
88 /*
89  * Various supported device vendors/types and their names.
90  */
91 struct my_type *my_info_tmp;
92 static struct my_type my_devs[] = {
93 	{MYSONVENDORID, MTD800ID, "Myson MTD80X Based Fast Ethernet Card"},
94 	{MYSONVENDORID, MTD803ID, "Myson MTD80X Based Fast Ethernet Card"},
95 	{MYSONVENDORID, MTD891ID, "Myson MTD89X Based Giga Ethernet Card"},
96 	{0, 0, NULL}
97 };
98 
99 /*
100  * Various supported PHY vendors/types and their names. Note that this driver
101  * will work with pretty much any MII-compliant PHY, so failure to positively
102  * identify the chip is not a fatal error.
103  */
104 static struct my_type my_phys[] = {
105 	{MysonPHYID0, MysonPHYID0, "<MYSON MTD981>"},
106 	{SeeqPHYID0, SeeqPHYID0, "<SEEQ 80225>"},
107 	{AhdocPHYID0, AhdocPHYID0, "<AHDOC 101>"},
108 	{MarvellPHYID0, MarvellPHYID0, "<MARVELL 88E1000>"},
109 	{LevelOnePHYID0, LevelOnePHYID0, "<LevelOne LXT1000>"},
110 	{0, 0, "<MII-compliant physical interface>"}
111 };
112 
113 static int      my_probe(device_t);
114 static int      my_attach(device_t);
115 static int      my_detach(device_t);
116 static int      my_newbuf(struct my_softc *, struct my_chain_onefrag *);
117 static int      my_encap(struct my_softc *, struct my_chain *, struct mbuf *);
118 static void     my_rxeof(struct my_softc *);
119 static void     my_txeof(struct my_softc *);
120 static void     my_txeoc(struct my_softc *);
121 static void     my_intr(void *);
122 static void     my_start(struct ifnet *);
123 static void     my_start_locked(struct ifnet *);
124 static int      my_ioctl(struct ifnet *, u_long, caddr_t);
125 static void     my_init(void *);
126 static void     my_init_locked(struct my_softc *);
127 static void     my_stop(struct my_softc *);
128 static void     my_autoneg_timeout(void *);
129 static void     my_watchdog(void *);
130 static int      my_shutdown(device_t);
131 static int      my_ifmedia_upd(struct ifnet *);
132 static void     my_ifmedia_sts(struct ifnet *, struct ifmediareq *);
133 static u_int16_t my_phy_readreg(struct my_softc *, int);
134 static void     my_phy_writereg(struct my_softc *, int, int);
135 static void     my_autoneg_xmit(struct my_softc *);
136 static void     my_autoneg_mii(struct my_softc *, int, int);
137 static void     my_setmode_mii(struct my_softc *, int);
138 static void     my_getmode_mii(struct my_softc *);
139 static void     my_setcfg(struct my_softc *, int);
140 static void     my_setmulti(struct my_softc *);
141 static void     my_reset(struct my_softc *);
142 static int      my_list_rx_init(struct my_softc *);
143 static int      my_list_tx_init(struct my_softc *);
144 static long     my_send_cmd_to_phy(struct my_softc *, int, int);
145 
146 #define MY_SETBIT(sc, reg, x) CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x))
147 #define MY_CLRBIT(sc, reg, x) CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x))
148 
149 static device_method_t my_methods[] = {
150 	/* Device interface */
151 	DEVMETHOD(device_probe, my_probe),
152 	DEVMETHOD(device_attach, my_attach),
153 	DEVMETHOD(device_detach, my_detach),
154 	DEVMETHOD(device_shutdown, my_shutdown),
155 
156 	DEVMETHOD_END
157 };
158 
159 static driver_t my_driver = {
160 	"my",
161 	my_methods,
162 	sizeof(struct my_softc)
163 };
164 
165 static devclass_t my_devclass;
166 
167 DRIVER_MODULE(my, pci, my_driver, my_devclass, 0, 0);
168 MODULE_DEPEND(my, pci, 1, 1, 1);
169 MODULE_DEPEND(my, ether, 1, 1, 1);
170 
171 static long
172 my_send_cmd_to_phy(struct my_softc * sc, int opcode, int regad)
173 {
174 	long            miir;
175 	int             i;
176 	int             mask, data;
177 
178 	MY_LOCK_ASSERT(sc);
179 
180 	/* enable MII output */
181 	miir = CSR_READ_4(sc, MY_MANAGEMENT);
182 	miir &= 0xfffffff0;
183 
184 	miir |= MY_MASK_MIIR_MII_WRITE + MY_MASK_MIIR_MII_MDO;
185 
186 	/* send 32 1's preamble */
187 	for (i = 0; i < 32; i++) {
188 		/* low MDC; MDO is already high (miir) */
189 		miir &= ~MY_MASK_MIIR_MII_MDC;
190 		CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
191 
192 		/* high MDC */
193 		miir |= MY_MASK_MIIR_MII_MDC;
194 		CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
195 	}
196 
197 	/* calculate ST+OP+PHYAD+REGAD+TA */
198 	data = opcode | (sc->my_phy_addr << 7) | (regad << 2);
199 
200 	/* sent out */
201 	mask = 0x8000;
202 	while (mask) {
203 		/* low MDC, prepare MDO */
204 		miir &= ~(MY_MASK_MIIR_MII_MDC + MY_MASK_MIIR_MII_MDO);
205 		if (mask & data)
206 			miir |= MY_MASK_MIIR_MII_MDO;
207 
208 		CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
209 		/* high MDC */
210 		miir |= MY_MASK_MIIR_MII_MDC;
211 		CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
212 		DELAY(30);
213 
214 		/* next */
215 		mask >>= 1;
216 		if (mask == 0x2 && opcode == MY_OP_READ)
217 			miir &= ~MY_MASK_MIIR_MII_WRITE;
218 	}
219 
220 	return miir;
221 }
222 
223 
224 static u_int16_t
225 my_phy_readreg(struct my_softc * sc, int reg)
226 {
227 	long            miir;
228 	int             mask, data;
229 
230 	MY_LOCK_ASSERT(sc);
231 
232 	if (sc->my_info->my_did == MTD803ID)
233 		data = CSR_READ_2(sc, MY_PHYBASE + reg * 2);
234 	else {
235 		miir = my_send_cmd_to_phy(sc, MY_OP_READ, reg);
236 
237 		/* read data */
238 		mask = 0x8000;
239 		data = 0;
240 		while (mask) {
241 			/* low MDC */
242 			miir &= ~MY_MASK_MIIR_MII_MDC;
243 			CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
244 
245 			/* read MDI */
246 			miir = CSR_READ_4(sc, MY_MANAGEMENT);
247 			if (miir & MY_MASK_MIIR_MII_MDI)
248 				data |= mask;
249 
250 			/* high MDC, and wait */
251 			miir |= MY_MASK_MIIR_MII_MDC;
252 			CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
253 			DELAY(30);
254 
255 			/* next */
256 			mask >>= 1;
257 		}
258 
259 		/* low MDC */
260 		miir &= ~MY_MASK_MIIR_MII_MDC;
261 		CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
262 	}
263 
264 	return (u_int16_t) data;
265 }
266 
267 
268 static void
269 my_phy_writereg(struct my_softc * sc, int reg, int data)
270 {
271 	long            miir;
272 	int             mask;
273 
274 	MY_LOCK_ASSERT(sc);
275 
276 	if (sc->my_info->my_did == MTD803ID)
277 		CSR_WRITE_2(sc, MY_PHYBASE + reg * 2, data);
278 	else {
279 		miir = my_send_cmd_to_phy(sc, MY_OP_WRITE, reg);
280 
281 		/* write data */
282 		mask = 0x8000;
283 		while (mask) {
284 			/* low MDC, prepare MDO */
285 			miir &= ~(MY_MASK_MIIR_MII_MDC + MY_MASK_MIIR_MII_MDO);
286 			if (mask & data)
287 				miir |= MY_MASK_MIIR_MII_MDO;
288 			CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
289 			DELAY(1);
290 
291 			/* high MDC */
292 			miir |= MY_MASK_MIIR_MII_MDC;
293 			CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
294 			DELAY(1);
295 
296 			/* next */
297 			mask >>= 1;
298 		}
299 
300 		/* low MDC */
301 		miir &= ~MY_MASK_MIIR_MII_MDC;
302 		CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
303 	}
304 	return;
305 }
306 
307 
308 /*
309  * Program the 64-bit multicast hash filter.
310  */
311 static void
312 my_setmulti(struct my_softc * sc)
313 {
314 	struct ifnet   *ifp;
315 	int             h = 0;
316 	u_int32_t       hashes[2] = {0, 0};
317 	struct ifmultiaddr *ifma;
318 	u_int32_t       rxfilt;
319 	int             mcnt = 0;
320 
321 	MY_LOCK_ASSERT(sc);
322 
323 	ifp = sc->my_ifp;
324 
325 	rxfilt = CSR_READ_4(sc, MY_TCRRCR);
326 
327 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
328 		rxfilt |= MY_AM;
329 		CSR_WRITE_4(sc, MY_TCRRCR, rxfilt);
330 		CSR_WRITE_4(sc, MY_MAR0, 0xFFFFFFFF);
331 		CSR_WRITE_4(sc, MY_MAR1, 0xFFFFFFFF);
332 
333 		return;
334 	}
335 	/* first, zot all the existing hash bits */
336 	CSR_WRITE_4(sc, MY_MAR0, 0);
337 	CSR_WRITE_4(sc, MY_MAR1, 0);
338 
339 	/* now program new ones */
340 	if_maddr_rlock(ifp);
341 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
342 		if (ifma->ifma_addr->sa_family != AF_LINK)
343 			continue;
344 		h = ~ether_crc32_be(LLADDR((struct sockaddr_dl *)
345 		    ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
346 		if (h < 32)
347 			hashes[0] |= (1 << h);
348 		else
349 			hashes[1] |= (1 << (h - 32));
350 		mcnt++;
351 	}
352 	if_maddr_runlock(ifp);
353 
354 	if (mcnt)
355 		rxfilt |= MY_AM;
356 	else
357 		rxfilt &= ~MY_AM;
358 	CSR_WRITE_4(sc, MY_MAR0, hashes[0]);
359 	CSR_WRITE_4(sc, MY_MAR1, hashes[1]);
360 	CSR_WRITE_4(sc, MY_TCRRCR, rxfilt);
361 	return;
362 }
363 
364 /*
365  * Initiate an autonegotiation session.
366  */
367 static void
368 my_autoneg_xmit(struct my_softc * sc)
369 {
370 	u_int16_t       phy_sts = 0;
371 
372 	MY_LOCK_ASSERT(sc);
373 
374 	my_phy_writereg(sc, PHY_BMCR, PHY_BMCR_RESET);
375 	DELAY(500);
376 	while (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_RESET);
377 
378 	phy_sts = my_phy_readreg(sc, PHY_BMCR);
379 	phy_sts |= PHY_BMCR_AUTONEGENBL | PHY_BMCR_AUTONEGRSTR;
380 	my_phy_writereg(sc, PHY_BMCR, phy_sts);
381 
382 	return;
383 }
384 
385 static void
386 my_autoneg_timeout(void *arg)
387 {
388 	struct my_softc *sc;
389 
390 	sc = arg;
391 	MY_LOCK_ASSERT(sc);
392 	my_autoneg_mii(sc, MY_FLAG_DELAYTIMEO, 1);
393 }
394 
395 /*
396  * Invoke autonegotiation on a PHY.
397  */
398 static void
399 my_autoneg_mii(struct my_softc * sc, int flag, int verbose)
400 {
401 	u_int16_t       phy_sts = 0, media, advert, ability;
402 	u_int16_t       ability2 = 0;
403 	struct ifnet   *ifp;
404 	struct ifmedia *ifm;
405 
406 	MY_LOCK_ASSERT(sc);
407 
408 	ifm = &sc->ifmedia;
409 	ifp = sc->my_ifp;
410 
411 	ifm->ifm_media = IFM_ETHER | IFM_AUTO;
412 
413 #ifndef FORCE_AUTONEG_TFOUR
414 	/*
415 	 * First, see if autoneg is supported. If not, there's no point in
416 	 * continuing.
417 	 */
418 	phy_sts = my_phy_readreg(sc, PHY_BMSR);
419 	if (!(phy_sts & PHY_BMSR_CANAUTONEG)) {
420 		if (verbose)
421 			device_printf(sc->my_dev,
422 			    "autonegotiation not supported\n");
423 		ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX;
424 		return;
425 	}
426 #endif
427 	switch (flag) {
428 	case MY_FLAG_FORCEDELAY:
429 		/*
430 		 * XXX Never use this option anywhere but in the probe
431 		 * routine: making the kernel stop dead in its tracks for
432 		 * three whole seconds after we've gone multi-user is really
433 		 * bad manners.
434 		 */
435 		my_autoneg_xmit(sc);
436 		DELAY(5000000);
437 		break;
438 	case MY_FLAG_SCHEDDELAY:
439 		/*
440 		 * Wait for the transmitter to go idle before starting an
441 		 * autoneg session, otherwise my_start() may clobber our
442 		 * timeout, and we don't want to allow transmission during an
443 		 * autoneg session since that can screw it up.
444 		 */
445 		if (sc->my_cdata.my_tx_head != NULL) {
446 			sc->my_want_auto = 1;
447 			MY_UNLOCK(sc);
448 			return;
449 		}
450 		my_autoneg_xmit(sc);
451 		callout_reset(&sc->my_autoneg_timer, hz * 5, my_autoneg_timeout,
452 		    sc);
453 		sc->my_autoneg = 1;
454 		sc->my_want_auto = 0;
455 		return;
456 	case MY_FLAG_DELAYTIMEO:
457 		callout_stop(&sc->my_autoneg_timer);
458 		sc->my_autoneg = 0;
459 		break;
460 	default:
461 		device_printf(sc->my_dev, "invalid autoneg flag: %d\n", flag);
462 		return;
463 	}
464 
465 	if (my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_AUTONEGCOMP) {
466 		if (verbose)
467 			device_printf(sc->my_dev, "autoneg complete, ");
468 		phy_sts = my_phy_readreg(sc, PHY_BMSR);
469 	} else {
470 		if (verbose)
471 			device_printf(sc->my_dev, "autoneg not complete, ");
472 	}
473 
474 	media = my_phy_readreg(sc, PHY_BMCR);
475 
476 	/* Link is good. Report modes and set duplex mode. */
477 	if (my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT) {
478 		if (verbose)
479 			device_printf(sc->my_dev, "link status good. ");
480 		advert = my_phy_readreg(sc, PHY_ANAR);
481 		ability = my_phy_readreg(sc, PHY_LPAR);
482 		if ((sc->my_pinfo->my_vid == MarvellPHYID0) ||
483 		    (sc->my_pinfo->my_vid == LevelOnePHYID0)) {
484 			ability2 = my_phy_readreg(sc, PHY_1000SR);
485 			if (ability2 & PHY_1000SR_1000BTXFULL) {
486 				advert = 0;
487 				ability = 0;
488 				/*
489 				 * this version did not support 1000M,
490 				 * ifm->ifm_media =
491 				 * IFM_ETHER|IFM_1000_T|IFM_FDX;
492 				 */
493 				ifm->ifm_media =
494 				    IFM_ETHER | IFM_100_TX | IFM_FDX;
495 				media &= ~PHY_BMCR_SPEEDSEL;
496 				media |= PHY_BMCR_1000;
497 				media |= PHY_BMCR_DUPLEX;
498 				printf("(full-duplex, 1000Mbps)\n");
499 			} else if (ability2 & PHY_1000SR_1000BTXHALF) {
500 				advert = 0;
501 				ability = 0;
502 				/*
503 				 * this version did not support 1000M,
504 				 * ifm->ifm_media = IFM_ETHER|IFM_1000_T;
505 				 */
506 				ifm->ifm_media = IFM_ETHER | IFM_100_TX;
507 				media &= ~PHY_BMCR_SPEEDSEL;
508 				media &= ~PHY_BMCR_DUPLEX;
509 				media |= PHY_BMCR_1000;
510 				printf("(half-duplex, 1000Mbps)\n");
511 			}
512 		}
513 		if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4) {
514 			ifm->ifm_media = IFM_ETHER | IFM_100_T4;
515 			media |= PHY_BMCR_SPEEDSEL;
516 			media &= ~PHY_BMCR_DUPLEX;
517 			printf("(100baseT4)\n");
518 		} else if (advert & PHY_ANAR_100BTXFULL &&
519 			   ability & PHY_ANAR_100BTXFULL) {
520 			ifm->ifm_media = IFM_ETHER | IFM_100_TX | IFM_FDX;
521 			media |= PHY_BMCR_SPEEDSEL;
522 			media |= PHY_BMCR_DUPLEX;
523 			printf("(full-duplex, 100Mbps)\n");
524 		} else if (advert & PHY_ANAR_100BTXHALF &&
525 			   ability & PHY_ANAR_100BTXHALF) {
526 			ifm->ifm_media = IFM_ETHER | IFM_100_TX | IFM_HDX;
527 			media |= PHY_BMCR_SPEEDSEL;
528 			media &= ~PHY_BMCR_DUPLEX;
529 			printf("(half-duplex, 100Mbps)\n");
530 		} else if (advert & PHY_ANAR_10BTFULL &&
531 			   ability & PHY_ANAR_10BTFULL) {
532 			ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_FDX;
533 			media &= ~PHY_BMCR_SPEEDSEL;
534 			media |= PHY_BMCR_DUPLEX;
535 			printf("(full-duplex, 10Mbps)\n");
536 		} else if (advert) {
537 			ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX;
538 			media &= ~PHY_BMCR_SPEEDSEL;
539 			media &= ~PHY_BMCR_DUPLEX;
540 			printf("(half-duplex, 10Mbps)\n");
541 		}
542 		media &= ~PHY_BMCR_AUTONEGENBL;
543 
544 		/* Set ASIC's duplex mode to match the PHY. */
545 		my_phy_writereg(sc, PHY_BMCR, media);
546 		my_setcfg(sc, media);
547 	} else {
548 		if (verbose)
549 			device_printf(sc->my_dev, "no carrier\n");
550 	}
551 
552 	my_init_locked(sc);
553 	if (sc->my_tx_pend) {
554 		sc->my_autoneg = 0;
555 		sc->my_tx_pend = 0;
556 		my_start_locked(ifp);
557 	}
558 	return;
559 }
560 
561 /*
562  * To get PHY ability.
563  */
564 static void
565 my_getmode_mii(struct my_softc * sc)
566 {
567 	u_int16_t       bmsr;
568 	struct ifnet   *ifp;
569 
570 	MY_LOCK_ASSERT(sc);
571 	ifp = sc->my_ifp;
572 	bmsr = my_phy_readreg(sc, PHY_BMSR);
573 	if (bootverbose)
574 		device_printf(sc->my_dev, "PHY status word: %x\n", bmsr);
575 
576 	/* fallback */
577 	sc->ifmedia.ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX;
578 
579 	if (bmsr & PHY_BMSR_10BTHALF) {
580 		if (bootverbose)
581 			device_printf(sc->my_dev,
582 			    "10Mbps half-duplex mode supported\n");
583 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T | IFM_HDX,
584 		    0, NULL);
585 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T, 0, NULL);
586 	}
587 	if (bmsr & PHY_BMSR_10BTFULL) {
588 		if (bootverbose)
589 			device_printf(sc->my_dev,
590 			    "10Mbps full-duplex mode supported\n");
591 
592 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX,
593 		    0, NULL);
594 		sc->ifmedia.ifm_media = IFM_ETHER | IFM_10_T | IFM_FDX;
595 	}
596 	if (bmsr & PHY_BMSR_100BTXHALF) {
597 		if (bootverbose)
598 			device_printf(sc->my_dev,
599 			    "100Mbps half-duplex mode supported\n");
600 		ifp->if_baudrate = 100000000;
601 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL);
602 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX | IFM_HDX,
603 			    0, NULL);
604 		sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_TX | IFM_HDX;
605 	}
606 	if (bmsr & PHY_BMSR_100BTXFULL) {
607 		if (bootverbose)
608 			device_printf(sc->my_dev,
609 			    "100Mbps full-duplex mode supported\n");
610 		ifp->if_baudrate = 100000000;
611 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX,
612 		    0, NULL);
613 		sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_TX | IFM_FDX;
614 	}
615 	/* Some also support 100BaseT4. */
616 	if (bmsr & PHY_BMSR_100BT4) {
617 		if (bootverbose)
618 			device_printf(sc->my_dev, "100baseT4 mode supported\n");
619 		ifp->if_baudrate = 100000000;
620 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_T4, 0, NULL);
621 		sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_T4;
622 #ifdef FORCE_AUTONEG_TFOUR
623 		if (bootverbose)
624 			device_printf(sc->my_dev,
625 			    "forcing on autoneg support for BT4\n");
626 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_AUTO, 0 NULL):
627 		sc->ifmedia.ifm_media = IFM_ETHER | IFM_AUTO;
628 #endif
629 	}
630 #if 0				/* this version did not support 1000M, */
631 	if (sc->my_pinfo->my_vid == MarvellPHYID0) {
632 		if (bootverbose)
633 			device_printf(sc->my_dev,
634 			    "1000Mbps half-duplex mode supported\n");
635 
636 		ifp->if_baudrate = 1000000000;
637 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T, 0, NULL);
638 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T | IFM_HDX,
639 		    0, NULL);
640 		if (bootverbose)
641 			device_printf(sc->my_dev,
642 			    "1000Mbps full-duplex mode supported\n");
643 		ifp->if_baudrate = 1000000000;
644 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T | IFM_FDX,
645 		    0, NULL);
646 		sc->ifmedia.ifm_media = IFM_ETHER | IFM_1000_T | IFM_FDX;
647 	}
648 #endif
649 	if (bmsr & PHY_BMSR_CANAUTONEG) {
650 		if (bootverbose)
651 			device_printf(sc->my_dev, "autoneg supported\n");
652 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
653 		sc->ifmedia.ifm_media = IFM_ETHER | IFM_AUTO;
654 	}
655 	return;
656 }
657 
658 /*
659  * Set speed and duplex mode.
660  */
661 static void
662 my_setmode_mii(struct my_softc * sc, int media)
663 {
664 	u_int16_t       bmcr;
665 	struct ifnet   *ifp;
666 
667 	MY_LOCK_ASSERT(sc);
668 	ifp = sc->my_ifp;
669 	/*
670 	 * If an autoneg session is in progress, stop it.
671 	 */
672 	if (sc->my_autoneg) {
673 		device_printf(sc->my_dev, "canceling autoneg session\n");
674 		callout_stop(&sc->my_autoneg_timer);
675 		sc->my_autoneg = sc->my_want_auto = 0;
676 		bmcr = my_phy_readreg(sc, PHY_BMCR);
677 		bmcr &= ~PHY_BMCR_AUTONEGENBL;
678 		my_phy_writereg(sc, PHY_BMCR, bmcr);
679 	}
680 	device_printf(sc->my_dev, "selecting MII, ");
681 	bmcr = my_phy_readreg(sc, PHY_BMCR);
682 	bmcr &= ~(PHY_BMCR_AUTONEGENBL | PHY_BMCR_SPEEDSEL | PHY_BMCR_1000 |
683 		  PHY_BMCR_DUPLEX | PHY_BMCR_LOOPBK);
684 
685 #if 0				/* this version did not support 1000M, */
686 	if (IFM_SUBTYPE(media) == IFM_1000_T) {
687 		printf("1000Mbps/T4, half-duplex\n");
688 		bmcr &= ~PHY_BMCR_SPEEDSEL;
689 		bmcr &= ~PHY_BMCR_DUPLEX;
690 		bmcr |= PHY_BMCR_1000;
691 	}
692 #endif
693 	if (IFM_SUBTYPE(media) == IFM_100_T4) {
694 		printf("100Mbps/T4, half-duplex\n");
695 		bmcr |= PHY_BMCR_SPEEDSEL;
696 		bmcr &= ~PHY_BMCR_DUPLEX;
697 	}
698 	if (IFM_SUBTYPE(media) == IFM_100_TX) {
699 		printf("100Mbps, ");
700 		bmcr |= PHY_BMCR_SPEEDSEL;
701 	}
702 	if (IFM_SUBTYPE(media) == IFM_10_T) {
703 		printf("10Mbps, ");
704 		bmcr &= ~PHY_BMCR_SPEEDSEL;
705 	}
706 	if ((media & IFM_GMASK) == IFM_FDX) {
707 		printf("full duplex\n");
708 		bmcr |= PHY_BMCR_DUPLEX;
709 	} else {
710 		printf("half duplex\n");
711 		bmcr &= ~PHY_BMCR_DUPLEX;
712 	}
713 	my_phy_writereg(sc, PHY_BMCR, bmcr);
714 	my_setcfg(sc, bmcr);
715 	return;
716 }
717 
718 /*
719  * The Myson manual states that in order to fiddle with the 'full-duplex' and
720  * '100Mbps' bits in the netconfig register, we first have to put the
721  * transmit and/or receive logic in the idle state.
722  */
723 static void
724 my_setcfg(struct my_softc * sc, int bmcr)
725 {
726 	int             i, restart = 0;
727 
728 	MY_LOCK_ASSERT(sc);
729 	if (CSR_READ_4(sc, MY_TCRRCR) & (MY_TE | MY_RE)) {
730 		restart = 1;
731 		MY_CLRBIT(sc, MY_TCRRCR, (MY_TE | MY_RE));
732 		for (i = 0; i < MY_TIMEOUT; i++) {
733 			DELAY(10);
734 			if (!(CSR_READ_4(sc, MY_TCRRCR) &
735 			    (MY_TXRUN | MY_RXRUN)))
736 				break;
737 		}
738 		if (i == MY_TIMEOUT)
739 			device_printf(sc->my_dev,
740 			    "failed to force tx and rx to idle \n");
741 	}
742 	MY_CLRBIT(sc, MY_TCRRCR, MY_PS1000);
743 	MY_CLRBIT(sc, MY_TCRRCR, MY_PS10);
744 	if (bmcr & PHY_BMCR_1000)
745 		MY_SETBIT(sc, MY_TCRRCR, MY_PS1000);
746 	else if (!(bmcr & PHY_BMCR_SPEEDSEL))
747 		MY_SETBIT(sc, MY_TCRRCR, MY_PS10);
748 	if (bmcr & PHY_BMCR_DUPLEX)
749 		MY_SETBIT(sc, MY_TCRRCR, MY_FD);
750 	else
751 		MY_CLRBIT(sc, MY_TCRRCR, MY_FD);
752 	if (restart)
753 		MY_SETBIT(sc, MY_TCRRCR, MY_TE | MY_RE);
754 	return;
755 }
756 
757 static void
758 my_reset(struct my_softc * sc)
759 {
760 	register int    i;
761 
762 	MY_LOCK_ASSERT(sc);
763 	MY_SETBIT(sc, MY_BCR, MY_SWR);
764 	for (i = 0; i < MY_TIMEOUT; i++) {
765 		DELAY(10);
766 		if (!(CSR_READ_4(sc, MY_BCR) & MY_SWR))
767 			break;
768 	}
769 	if (i == MY_TIMEOUT)
770 		device_printf(sc->my_dev, "reset never completed!\n");
771 
772 	/* Wait a little while for the chip to get its brains in order. */
773 	DELAY(1000);
774 	return;
775 }
776 
777 /*
778  * Probe for a Myson chip. Check the PCI vendor and device IDs against our
779  * list and return a device name if we find a match.
780  */
781 static int
782 my_probe(device_t dev)
783 {
784 	struct my_type *t;
785 
786 	t = my_devs;
787 	while (t->my_name != NULL) {
788 		if ((pci_get_vendor(dev) == t->my_vid) &&
789 		    (pci_get_device(dev) == t->my_did)) {
790 			device_set_desc(dev, t->my_name);
791 			my_info_tmp = t;
792 			return (BUS_PROBE_DEFAULT);
793 		}
794 		t++;
795 	}
796 	return (ENXIO);
797 }
798 
799 /*
800  * Attach the interface. Allocate softc structures, do ifmedia setup and
801  * ethernet/BPF attach.
802  */
803 static int
804 my_attach(device_t dev)
805 {
806 	int             i;
807 	u_char          eaddr[ETHER_ADDR_LEN];
808 	u_int32_t       iobase;
809 	struct my_softc *sc;
810 	struct ifnet   *ifp;
811 	int             media = IFM_ETHER | IFM_100_TX | IFM_FDX;
812 	unsigned int    round;
813 	caddr_t         roundptr;
814 	struct my_type *p;
815 	u_int16_t       phy_vid, phy_did, phy_sts = 0;
816 	int             rid, error = 0;
817 
818 	sc = device_get_softc(dev);
819 	sc->my_dev = dev;
820 	mtx_init(&sc->my_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
821 	    MTX_DEF);
822 	callout_init_mtx(&sc->my_autoneg_timer, &sc->my_mtx, 0);
823 	callout_init_mtx(&sc->my_watchdog, &sc->my_mtx, 0);
824 
825 	/*
826 	 * Map control/status registers.
827 	 */
828 	pci_enable_busmaster(dev);
829 
830 	if (my_info_tmp->my_did == MTD800ID) {
831 		iobase = pci_read_config(dev, MY_PCI_LOIO, 4);
832 		if (iobase & 0x300)
833 			MY_USEIOSPACE = 0;
834 	}
835 
836 	rid = MY_RID;
837 	sc->my_res = bus_alloc_resource_any(dev, MY_RES, &rid, RF_ACTIVE);
838 
839 	if (sc->my_res == NULL) {
840 		device_printf(dev, "couldn't map ports/memory\n");
841 		error = ENXIO;
842 		goto destroy_mutex;
843 	}
844 	sc->my_btag = rman_get_bustag(sc->my_res);
845 	sc->my_bhandle = rman_get_bushandle(sc->my_res);
846 
847 	rid = 0;
848 	sc->my_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
849 					    RF_SHAREABLE | RF_ACTIVE);
850 
851 	if (sc->my_irq == NULL) {
852 		device_printf(dev, "couldn't map interrupt\n");
853 		error = ENXIO;
854 		goto release_io;
855 	}
856 
857 	sc->my_info = my_info_tmp;
858 
859 	/* Reset the adapter. */
860 	MY_LOCK(sc);
861 	my_reset(sc);
862 	MY_UNLOCK(sc);
863 
864 	/*
865 	 * Get station address
866 	 */
867 	for (i = 0; i < ETHER_ADDR_LEN; ++i)
868 		eaddr[i] = CSR_READ_1(sc, MY_PAR0 + i);
869 
870 	sc->my_ldata_ptr = malloc(sizeof(struct my_list_data) + 8,
871 				  M_DEVBUF, M_NOWAIT);
872 	if (sc->my_ldata_ptr == NULL) {
873 		device_printf(dev, "no memory for list buffers!\n");
874 		error = ENXIO;
875 		goto release_irq;
876 	}
877 	sc->my_ldata = (struct my_list_data *) sc->my_ldata_ptr;
878 	round = (uintptr_t)sc->my_ldata_ptr & 0xF;
879 	roundptr = sc->my_ldata_ptr;
880 	for (i = 0; i < 8; i++) {
881 		if (round % 8) {
882 			round++;
883 			roundptr++;
884 		} else
885 			break;
886 	}
887 	sc->my_ldata = (struct my_list_data *) roundptr;
888 	bzero(sc->my_ldata, sizeof(struct my_list_data));
889 
890 	ifp = sc->my_ifp = if_alloc(IFT_ETHER);
891 	if (ifp == NULL) {
892 		device_printf(dev, "can not if_alloc()\n");
893 		error = ENOSPC;
894 		goto free_ldata;
895 	}
896 	ifp->if_softc = sc;
897 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
898 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
899 	ifp->if_ioctl = my_ioctl;
900 	ifp->if_start = my_start;
901 	ifp->if_init = my_init;
902 	ifp->if_baudrate = 10000000;
903 	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
904 	ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
905 	IFQ_SET_READY(&ifp->if_snd);
906 
907 	if (sc->my_info->my_did == MTD803ID)
908 		sc->my_pinfo = my_phys;
909 	else {
910 		if (bootverbose)
911 			device_printf(dev, "probing for a PHY\n");
912 		MY_LOCK(sc);
913 		for (i = MY_PHYADDR_MIN; i < MY_PHYADDR_MAX + 1; i++) {
914 			if (bootverbose)
915 				device_printf(dev, "checking address: %d\n", i);
916 			sc->my_phy_addr = i;
917 			phy_sts = my_phy_readreg(sc, PHY_BMSR);
918 			if ((phy_sts != 0) && (phy_sts != 0xffff))
919 				break;
920 			else
921 				phy_sts = 0;
922 		}
923 		if (phy_sts) {
924 			phy_vid = my_phy_readreg(sc, PHY_VENID);
925 			phy_did = my_phy_readreg(sc, PHY_DEVID);
926 			if (bootverbose) {
927 				device_printf(dev, "found PHY at address %d, ",
928 				    sc->my_phy_addr);
929 				printf("vendor id: %x device id: %x\n",
930 				    phy_vid, phy_did);
931 			}
932 			p = my_phys;
933 			while (p->my_vid) {
934 				if (phy_vid == p->my_vid) {
935 					sc->my_pinfo = p;
936 					break;
937 				}
938 				p++;
939 			}
940 			if (sc->my_pinfo == NULL)
941 				sc->my_pinfo = &my_phys[PHY_UNKNOWN];
942 			if (bootverbose)
943 				device_printf(dev, "PHY type: %s\n",
944 				       sc->my_pinfo->my_name);
945 		} else {
946 			MY_UNLOCK(sc);
947 			device_printf(dev, "MII without any phy!\n");
948 			error = ENXIO;
949 			goto free_if;
950 		}
951 		MY_UNLOCK(sc);
952 	}
953 
954 	/* Do ifmedia setup. */
955 	ifmedia_init(&sc->ifmedia, 0, my_ifmedia_upd, my_ifmedia_sts);
956 	MY_LOCK(sc);
957 	my_getmode_mii(sc);
958 	my_autoneg_mii(sc, MY_FLAG_FORCEDELAY, 1);
959 	media = sc->ifmedia.ifm_media;
960 	my_stop(sc);
961 	MY_UNLOCK(sc);
962 	ifmedia_set(&sc->ifmedia, media);
963 
964 	ether_ifattach(ifp, eaddr);
965 
966 	error = bus_setup_intr(dev, sc->my_irq, INTR_TYPE_NET | INTR_MPSAFE,
967 			       NULL, my_intr, sc, &sc->my_intrhand);
968 
969 	if (error) {
970 		device_printf(dev, "couldn't set up irq\n");
971 		goto detach_if;
972 	}
973 
974 	return (0);
975 
976 detach_if:
977 	ether_ifdetach(ifp);
978 free_if:
979 	if_free(ifp);
980 free_ldata:
981 	free(sc->my_ldata_ptr, M_DEVBUF);
982 release_irq:
983 	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->my_irq);
984 release_io:
985 	bus_release_resource(dev, MY_RES, MY_RID, sc->my_res);
986 destroy_mutex:
987 	mtx_destroy(&sc->my_mtx);
988 	return (error);
989 }
990 
991 static int
992 my_detach(device_t dev)
993 {
994 	struct my_softc *sc;
995 	struct ifnet   *ifp;
996 
997 	sc = device_get_softc(dev);
998 	ifp = sc->my_ifp;
999 	ether_ifdetach(ifp);
1000 	MY_LOCK(sc);
1001 	my_stop(sc);
1002 	MY_UNLOCK(sc);
1003 	bus_teardown_intr(dev, sc->my_irq, sc->my_intrhand);
1004 	callout_drain(&sc->my_watchdog);
1005 	callout_drain(&sc->my_autoneg_timer);
1006 
1007 	if_free(ifp);
1008 	free(sc->my_ldata_ptr, M_DEVBUF);
1009 
1010 	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->my_irq);
1011 	bus_release_resource(dev, MY_RES, MY_RID, sc->my_res);
1012 	mtx_destroy(&sc->my_mtx);
1013 	return (0);
1014 }
1015 
1016 
1017 /*
1018  * Initialize the transmit descriptors.
1019  */
1020 static int
1021 my_list_tx_init(struct my_softc * sc)
1022 {
1023 	struct my_chain_data *cd;
1024 	struct my_list_data *ld;
1025 	int             i;
1026 
1027 	MY_LOCK_ASSERT(sc);
1028 	cd = &sc->my_cdata;
1029 	ld = sc->my_ldata;
1030 	for (i = 0; i < MY_TX_LIST_CNT; i++) {
1031 		cd->my_tx_chain[i].my_ptr = &ld->my_tx_list[i];
1032 		if (i == (MY_TX_LIST_CNT - 1))
1033 			cd->my_tx_chain[i].my_nextdesc = &cd->my_tx_chain[0];
1034 		else
1035 			cd->my_tx_chain[i].my_nextdesc =
1036 			    &cd->my_tx_chain[i + 1];
1037 	}
1038 	cd->my_tx_free = &cd->my_tx_chain[0];
1039 	cd->my_tx_tail = cd->my_tx_head = NULL;
1040 	return (0);
1041 }
1042 
1043 /*
1044  * Initialize the RX descriptors and allocate mbufs for them. Note that we
1045  * arrange the descriptors in a closed ring, so that the last descriptor
1046  * points back to the first.
1047  */
1048 static int
1049 my_list_rx_init(struct my_softc * sc)
1050 {
1051 	struct my_chain_data *cd;
1052 	struct my_list_data *ld;
1053 	int             i;
1054 
1055 	MY_LOCK_ASSERT(sc);
1056 	cd = &sc->my_cdata;
1057 	ld = sc->my_ldata;
1058 	for (i = 0; i < MY_RX_LIST_CNT; i++) {
1059 		cd->my_rx_chain[i].my_ptr =
1060 		    (struct my_desc *) & ld->my_rx_list[i];
1061 		if (my_newbuf(sc, &cd->my_rx_chain[i]) == ENOBUFS) {
1062 			MY_UNLOCK(sc);
1063 			return (ENOBUFS);
1064 		}
1065 		if (i == (MY_RX_LIST_CNT - 1)) {
1066 			cd->my_rx_chain[i].my_nextdesc = &cd->my_rx_chain[0];
1067 			ld->my_rx_list[i].my_next = vtophys(&ld->my_rx_list[0]);
1068 		} else {
1069 			cd->my_rx_chain[i].my_nextdesc =
1070 			    &cd->my_rx_chain[i + 1];
1071 			ld->my_rx_list[i].my_next =
1072 			    vtophys(&ld->my_rx_list[i + 1]);
1073 		}
1074 	}
1075 	cd->my_rx_head = &cd->my_rx_chain[0];
1076 	return (0);
1077 }
1078 
1079 /*
1080  * Initialize an RX descriptor and attach an MBUF cluster.
1081  */
1082 static int
1083 my_newbuf(struct my_softc * sc, struct my_chain_onefrag * c)
1084 {
1085 	struct mbuf    *m_new = NULL;
1086 
1087 	MY_LOCK_ASSERT(sc);
1088 	MGETHDR(m_new, M_NOWAIT, MT_DATA);
1089 	if (m_new == NULL) {
1090 		device_printf(sc->my_dev,
1091 		    "no memory for rx list -- packet dropped!\n");
1092 		return (ENOBUFS);
1093 	}
1094 	MCLGET(m_new, M_NOWAIT);
1095 	if (!(m_new->m_flags & M_EXT)) {
1096 		device_printf(sc->my_dev,
1097 		    "no memory for rx list -- packet dropped!\n");
1098 		m_freem(m_new);
1099 		return (ENOBUFS);
1100 	}
1101 	c->my_mbuf = m_new;
1102 	c->my_ptr->my_data = vtophys(mtod(m_new, caddr_t));
1103 	c->my_ptr->my_ctl = (MCLBYTES - 1) << MY_RBSShift;
1104 	c->my_ptr->my_status = MY_OWNByNIC;
1105 	return (0);
1106 }
1107 
1108 /*
1109  * A frame has been uploaded: pass the resulting mbuf chain up to the higher
1110  * level protocols.
1111  */
1112 static void
1113 my_rxeof(struct my_softc * sc)
1114 {
1115 	struct ether_header *eh;
1116 	struct mbuf    *m;
1117 	struct ifnet   *ifp;
1118 	struct my_chain_onefrag *cur_rx;
1119 	int             total_len = 0;
1120 	u_int32_t       rxstat;
1121 
1122 	MY_LOCK_ASSERT(sc);
1123 	ifp = sc->my_ifp;
1124 	while (!((rxstat = sc->my_cdata.my_rx_head->my_ptr->my_status)
1125 	    & MY_OWNByNIC)) {
1126 		cur_rx = sc->my_cdata.my_rx_head;
1127 		sc->my_cdata.my_rx_head = cur_rx->my_nextdesc;
1128 
1129 		if (rxstat & MY_ES) {	/* error summary: give up this rx pkt */
1130 			ifp->if_ierrors++;
1131 			cur_rx->my_ptr->my_status = MY_OWNByNIC;
1132 			continue;
1133 		}
1134 		/* No errors; receive the packet. */
1135 		total_len = (rxstat & MY_FLNGMASK) >> MY_FLNGShift;
1136 		total_len -= ETHER_CRC_LEN;
1137 
1138 		if (total_len < MINCLSIZE) {
1139 			m = m_devget(mtod(cur_rx->my_mbuf, char *),
1140 			    total_len, 0, ifp, NULL);
1141 			cur_rx->my_ptr->my_status = MY_OWNByNIC;
1142 			if (m == NULL) {
1143 				ifp->if_ierrors++;
1144 				continue;
1145 			}
1146 		} else {
1147 			m = cur_rx->my_mbuf;
1148 			/*
1149 			 * Try to conjure up a new mbuf cluster. If that
1150 			 * fails, it means we have an out of memory condition
1151 			 * and should leave the buffer in place and continue.
1152 			 * This will result in a lost packet, but there's
1153 			 * little else we can do in this situation.
1154 			 */
1155 			if (my_newbuf(sc, cur_rx) == ENOBUFS) {
1156 				ifp->if_ierrors++;
1157 				cur_rx->my_ptr->my_status = MY_OWNByNIC;
1158 				continue;
1159 			}
1160 			m->m_pkthdr.rcvif = ifp;
1161 			m->m_pkthdr.len = m->m_len = total_len;
1162 		}
1163 		ifp->if_ipackets++;
1164 		eh = mtod(m, struct ether_header *);
1165 #if NBPFILTER > 0
1166 		/*
1167 		 * Handle BPF listeners. Let the BPF user see the packet, but
1168 		 * don't pass it up to the ether_input() layer unless it's a
1169 		 * broadcast packet, multicast packet, matches our ethernet
1170 		 * address or the interface is in promiscuous mode.
1171 		 */
1172 		if (bpf_peers_present(ifp->if_bpf)) {
1173 			bpf_mtap(ifp->if_bpf, m);
1174 			if (ifp->if_flags & IFF_PROMISC &&
1175 			    (bcmp(eh->ether_dhost, IF_LLADDR(sc->my_ifp),
1176 				ETHER_ADDR_LEN) &&
1177 			     (eh->ether_dhost[0] & 1) == 0)) {
1178 				m_freem(m);
1179 				continue;
1180 			}
1181 		}
1182 #endif
1183 		MY_UNLOCK(sc);
1184 		(*ifp->if_input)(ifp, m);
1185 		MY_LOCK(sc);
1186 	}
1187 	return;
1188 }
1189 
1190 
1191 /*
1192  * A frame was downloaded to the chip. It's safe for us to clean up the list
1193  * buffers.
1194  */
1195 static void
1196 my_txeof(struct my_softc * sc)
1197 {
1198 	struct my_chain *cur_tx;
1199 	struct ifnet   *ifp;
1200 
1201 	MY_LOCK_ASSERT(sc);
1202 	ifp = sc->my_ifp;
1203 	/* Clear the timeout timer. */
1204 	sc->my_timer = 0;
1205 	if (sc->my_cdata.my_tx_head == NULL) {
1206 		return;
1207 	}
1208 	/*
1209 	 * Go through our tx list and free mbufs for those frames that have
1210 	 * been transmitted.
1211 	 */
1212 	while (sc->my_cdata.my_tx_head->my_mbuf != NULL) {
1213 		u_int32_t       txstat;
1214 
1215 		cur_tx = sc->my_cdata.my_tx_head;
1216 		txstat = MY_TXSTATUS(cur_tx);
1217 		if ((txstat & MY_OWNByNIC) || txstat == MY_UNSENT)
1218 			break;
1219 		if (!(CSR_READ_4(sc, MY_TCRRCR) & MY_Enhanced)) {
1220 			if (txstat & MY_TXERR) {
1221 				ifp->if_oerrors++;
1222 				if (txstat & MY_EC) /* excessive collision */
1223 					ifp->if_collisions++;
1224 				if (txstat & MY_LC)	/* late collision */
1225 					ifp->if_collisions++;
1226 			}
1227 			ifp->if_collisions += (txstat & MY_NCRMASK) >>
1228 			    MY_NCRShift;
1229 		}
1230 		ifp->if_opackets++;
1231 		m_freem(cur_tx->my_mbuf);
1232 		cur_tx->my_mbuf = NULL;
1233 		if (sc->my_cdata.my_tx_head == sc->my_cdata.my_tx_tail) {
1234 			sc->my_cdata.my_tx_head = NULL;
1235 			sc->my_cdata.my_tx_tail = NULL;
1236 			break;
1237 		}
1238 		sc->my_cdata.my_tx_head = cur_tx->my_nextdesc;
1239 	}
1240 	if (CSR_READ_4(sc, MY_TCRRCR) & MY_Enhanced) {
1241 		ifp->if_collisions += (CSR_READ_4(sc, MY_TSR) & MY_NCRMask);
1242 	}
1243 	return;
1244 }
1245 
1246 /*
1247  * TX 'end of channel' interrupt handler.
1248  */
1249 static void
1250 my_txeoc(struct my_softc * sc)
1251 {
1252 	struct ifnet   *ifp;
1253 
1254 	MY_LOCK_ASSERT(sc);
1255 	ifp = sc->my_ifp;
1256 	sc->my_timer = 0;
1257 	if (sc->my_cdata.my_tx_head == NULL) {
1258 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1259 		sc->my_cdata.my_tx_tail = NULL;
1260 		if (sc->my_want_auto)
1261 			my_autoneg_mii(sc, MY_FLAG_SCHEDDELAY, 1);
1262 	} else {
1263 		if (MY_TXOWN(sc->my_cdata.my_tx_head) == MY_UNSENT) {
1264 			MY_TXOWN(sc->my_cdata.my_tx_head) = MY_OWNByNIC;
1265 			sc->my_timer = 5;
1266 			CSR_WRITE_4(sc, MY_TXPDR, 0xFFFFFFFF);
1267 		}
1268 	}
1269 	return;
1270 }
1271 
1272 static void
1273 my_intr(void *arg)
1274 {
1275 	struct my_softc *sc;
1276 	struct ifnet   *ifp;
1277 	u_int32_t       status;
1278 
1279 	sc = arg;
1280 	MY_LOCK(sc);
1281 	ifp = sc->my_ifp;
1282 	if (!(ifp->if_flags & IFF_UP)) {
1283 		MY_UNLOCK(sc);
1284 		return;
1285 	}
1286 	/* Disable interrupts. */
1287 	CSR_WRITE_4(sc, MY_IMR, 0x00000000);
1288 
1289 	for (;;) {
1290 		status = CSR_READ_4(sc, MY_ISR);
1291 		status &= MY_INTRS;
1292 		if (status)
1293 			CSR_WRITE_4(sc, MY_ISR, status);
1294 		else
1295 			break;
1296 
1297 		if (status & MY_RI)	/* receive interrupt */
1298 			my_rxeof(sc);
1299 
1300 		if ((status & MY_RBU) || (status & MY_RxErr)) {
1301 			/* rx buffer unavailable or rx error */
1302 			ifp->if_ierrors++;
1303 #ifdef foo
1304 			my_stop(sc);
1305 			my_reset(sc);
1306 			my_init_locked(sc);
1307 #endif
1308 		}
1309 		if (status & MY_TI)	/* tx interrupt */
1310 			my_txeof(sc);
1311 		if (status & MY_ETI)	/* tx early interrupt */
1312 			my_txeof(sc);
1313 		if (status & MY_TBU)	/* tx buffer unavailable */
1314 			my_txeoc(sc);
1315 
1316 #if 0				/* 90/1/18 delete */
1317 		if (status & MY_FBE) {
1318 			my_reset(sc);
1319 			my_init_locked(sc);
1320 		}
1321 #endif
1322 
1323 	}
1324 
1325 	/* Re-enable interrupts. */
1326 	CSR_WRITE_4(sc, MY_IMR, MY_INTRS);
1327 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1328 		my_start_locked(ifp);
1329 	MY_UNLOCK(sc);
1330 	return;
1331 }
1332 
1333 /*
1334  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1335  * pointers to the fragment pointers.
1336  */
1337 static int
1338 my_encap(struct my_softc * sc, struct my_chain * c, struct mbuf * m_head)
1339 {
1340 	struct my_desc *f = NULL;
1341 	int             total_len;
1342 	struct mbuf    *m, *m_new = NULL;
1343 
1344 	MY_LOCK_ASSERT(sc);
1345 	/* calculate the total tx pkt length */
1346 	total_len = 0;
1347 	for (m = m_head; m != NULL; m = m->m_next)
1348 		total_len += m->m_len;
1349 	/*
1350 	 * Start packing the mbufs in this chain into the fragment pointers.
1351 	 * Stop when we run out of fragments or hit the end of the mbuf
1352 	 * chain.
1353 	 */
1354 	m = m_head;
1355 	MGETHDR(m_new, M_NOWAIT, MT_DATA);
1356 	if (m_new == NULL) {
1357 		device_printf(sc->my_dev, "no memory for tx list");
1358 		return (1);
1359 	}
1360 	if (m_head->m_pkthdr.len > MHLEN) {
1361 		MCLGET(m_new, M_NOWAIT);
1362 		if (!(m_new->m_flags & M_EXT)) {
1363 			m_freem(m_new);
1364 			device_printf(sc->my_dev, "no memory for tx list");
1365 			return (1);
1366 		}
1367 	}
1368 	m_copydata(m_head, 0, m_head->m_pkthdr.len, mtod(m_new, caddr_t));
1369 	m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1370 	m_freem(m_head);
1371 	m_head = m_new;
1372 	f = &c->my_ptr->my_frag[0];
1373 	f->my_status = 0;
1374 	f->my_data = vtophys(mtod(m_new, caddr_t));
1375 	total_len = m_new->m_len;
1376 	f->my_ctl = MY_TXFD | MY_TXLD | MY_CRCEnable | MY_PADEnable;
1377 	f->my_ctl |= total_len << MY_PKTShift;	/* pkt size */
1378 	f->my_ctl |= total_len;	/* buffer size */
1379 	/* 89/12/29 add, for mtd891 *//* [ 89? ] */
1380 	if (sc->my_info->my_did == MTD891ID)
1381 		f->my_ctl |= MY_ETIControl | MY_RetryTxLC;
1382 	c->my_mbuf = m_head;
1383 	c->my_lastdesc = 0;
1384 	MY_TXNEXT(c) = vtophys(&c->my_nextdesc->my_ptr->my_frag[0]);
1385 	return (0);
1386 }
1387 
1388 /*
1389  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1390  * to the mbuf data regions directly in the transmit lists. We also save a
1391  * copy of the pointers since the transmit list fragment pointers are
1392  * physical addresses.
1393  */
1394 static void
1395 my_start(struct ifnet * ifp)
1396 {
1397 	struct my_softc *sc;
1398 
1399 	sc = ifp->if_softc;
1400 	MY_LOCK(sc);
1401 	my_start_locked(ifp);
1402 	MY_UNLOCK(sc);
1403 }
1404 
1405 static void
1406 my_start_locked(struct ifnet * ifp)
1407 {
1408 	struct my_softc *sc;
1409 	struct mbuf    *m_head = NULL;
1410 	struct my_chain *cur_tx = NULL, *start_tx;
1411 
1412 	sc = ifp->if_softc;
1413 	MY_LOCK_ASSERT(sc);
1414 	if (sc->my_autoneg) {
1415 		sc->my_tx_pend = 1;
1416 		return;
1417 	}
1418 	/*
1419 	 * Check for an available queue slot. If there are none, punt.
1420 	 */
1421 	if (sc->my_cdata.my_tx_free->my_mbuf != NULL) {
1422 		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1423 		return;
1424 	}
1425 	start_tx = sc->my_cdata.my_tx_free;
1426 	while (sc->my_cdata.my_tx_free->my_mbuf == NULL) {
1427 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1428 		if (m_head == NULL)
1429 			break;
1430 
1431 		/* Pick a descriptor off the free list. */
1432 		cur_tx = sc->my_cdata.my_tx_free;
1433 		sc->my_cdata.my_tx_free = cur_tx->my_nextdesc;
1434 
1435 		/* Pack the data into the descriptor. */
1436 		my_encap(sc, cur_tx, m_head);
1437 
1438 		if (cur_tx != start_tx)
1439 			MY_TXOWN(cur_tx) = MY_OWNByNIC;
1440 #if NBPFILTER > 0
1441 		/*
1442 		 * If there's a BPF listener, bounce a copy of this frame to
1443 		 * him.
1444 		 */
1445 		BPF_MTAP(ifp, cur_tx->my_mbuf);
1446 #endif
1447 	}
1448 	/*
1449 	 * If there are no packets queued, bail.
1450 	 */
1451 	if (cur_tx == NULL) {
1452 		return;
1453 	}
1454 	/*
1455 	 * Place the request for the upload interrupt in the last descriptor
1456 	 * in the chain. This way, if we're chaining several packets at once,
1457 	 * we'll only get an interrupt once for the whole chain rather than
1458 	 * once for each packet.
1459 	 */
1460 	MY_TXCTL(cur_tx) |= MY_TXIC;
1461 	cur_tx->my_ptr->my_frag[0].my_ctl |= MY_TXIC;
1462 	sc->my_cdata.my_tx_tail = cur_tx;
1463 	if (sc->my_cdata.my_tx_head == NULL)
1464 		sc->my_cdata.my_tx_head = start_tx;
1465 	MY_TXOWN(start_tx) = MY_OWNByNIC;
1466 	CSR_WRITE_4(sc, MY_TXPDR, 0xFFFFFFFF);	/* tx polling demand */
1467 
1468 	/*
1469 	 * Set a timeout in case the chip goes out to lunch.
1470 	 */
1471 	sc->my_timer = 5;
1472 	return;
1473 }
1474 
1475 static void
1476 my_init(void *xsc)
1477 {
1478 	struct my_softc *sc = xsc;
1479 
1480 	MY_LOCK(sc);
1481 	my_init_locked(sc);
1482 	MY_UNLOCK(sc);
1483 }
1484 
1485 static void
1486 my_init_locked(struct my_softc *sc)
1487 {
1488 	struct ifnet   *ifp = sc->my_ifp;
1489 	u_int16_t       phy_bmcr = 0;
1490 
1491 	MY_LOCK_ASSERT(sc);
1492 	if (sc->my_autoneg) {
1493 		return;
1494 	}
1495 	if (sc->my_pinfo != NULL)
1496 		phy_bmcr = my_phy_readreg(sc, PHY_BMCR);
1497 	/*
1498 	 * Cancel pending I/O and free all RX/TX buffers.
1499 	 */
1500 	my_stop(sc);
1501 	my_reset(sc);
1502 
1503 	/*
1504 	 * Set cache alignment and burst length.
1505 	 */
1506 #if 0				/* 89/9/1 modify,  */
1507 	CSR_WRITE_4(sc, MY_BCR, MY_RPBLE512);
1508 	CSR_WRITE_4(sc, MY_TCRRCR, MY_TFTSF);
1509 #endif
1510 	CSR_WRITE_4(sc, MY_BCR, MY_PBL8);
1511 	CSR_WRITE_4(sc, MY_TCRRCR, MY_TFTSF | MY_RBLEN | MY_RPBLE512);
1512 	/*
1513 	 * 89/12/29 add, for mtd891,
1514 	 */
1515 	if (sc->my_info->my_did == MTD891ID) {
1516 		MY_SETBIT(sc, MY_BCR, MY_PROG);
1517 		MY_SETBIT(sc, MY_TCRRCR, MY_Enhanced);
1518 	}
1519 	my_setcfg(sc, phy_bmcr);
1520 	/* Init circular RX list. */
1521 	if (my_list_rx_init(sc) == ENOBUFS) {
1522 		device_printf(sc->my_dev, "init failed: no memory for rx buffers\n");
1523 		my_stop(sc);
1524 		return;
1525 	}
1526 	/* Init TX descriptors. */
1527 	my_list_tx_init(sc);
1528 
1529 	/* If we want promiscuous mode, set the allframes bit. */
1530 	if (ifp->if_flags & IFF_PROMISC)
1531 		MY_SETBIT(sc, MY_TCRRCR, MY_PROM);
1532 	else
1533 		MY_CLRBIT(sc, MY_TCRRCR, MY_PROM);
1534 
1535 	/*
1536 	 * Set capture broadcast bit to capture broadcast frames.
1537 	 */
1538 	if (ifp->if_flags & IFF_BROADCAST)
1539 		MY_SETBIT(sc, MY_TCRRCR, MY_AB);
1540 	else
1541 		MY_CLRBIT(sc, MY_TCRRCR, MY_AB);
1542 
1543 	/*
1544 	 * Program the multicast filter, if necessary.
1545 	 */
1546 	my_setmulti(sc);
1547 
1548 	/*
1549 	 * Load the address of the RX list.
1550 	 */
1551 	MY_CLRBIT(sc, MY_TCRRCR, MY_RE);
1552 	CSR_WRITE_4(sc, MY_RXLBA, vtophys(&sc->my_ldata->my_rx_list[0]));
1553 
1554 	/*
1555 	 * Enable interrupts.
1556 	 */
1557 	CSR_WRITE_4(sc, MY_IMR, MY_INTRS);
1558 	CSR_WRITE_4(sc, MY_ISR, 0xFFFFFFFF);
1559 
1560 	/* Enable receiver and transmitter. */
1561 	MY_SETBIT(sc, MY_TCRRCR, MY_RE);
1562 	MY_CLRBIT(sc, MY_TCRRCR, MY_TE);
1563 	CSR_WRITE_4(sc, MY_TXLBA, vtophys(&sc->my_ldata->my_tx_list[0]));
1564 	MY_SETBIT(sc, MY_TCRRCR, MY_TE);
1565 
1566 	/* Restore state of BMCR */
1567 	if (sc->my_pinfo != NULL)
1568 		my_phy_writereg(sc, PHY_BMCR, phy_bmcr);
1569 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1570 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1571 
1572 	callout_reset(&sc->my_watchdog, hz, my_watchdog, sc);
1573 	return;
1574 }
1575 
1576 /*
1577  * Set media options.
1578  */
1579 
1580 static int
1581 my_ifmedia_upd(struct ifnet * ifp)
1582 {
1583 	struct my_softc *sc;
1584 	struct ifmedia *ifm;
1585 
1586 	sc = ifp->if_softc;
1587 	MY_LOCK(sc);
1588 	ifm = &sc->ifmedia;
1589 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
1590 		MY_UNLOCK(sc);
1591 		return (EINVAL);
1592 	}
1593 	if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO)
1594 		my_autoneg_mii(sc, MY_FLAG_SCHEDDELAY, 1);
1595 	else
1596 		my_setmode_mii(sc, ifm->ifm_media);
1597 	MY_UNLOCK(sc);
1598 	return (0);
1599 }
1600 
1601 /*
1602  * Report current media status.
1603  */
1604 
1605 static void
1606 my_ifmedia_sts(struct ifnet * ifp, struct ifmediareq * ifmr)
1607 {
1608 	struct my_softc *sc;
1609 	u_int16_t advert = 0, ability = 0;
1610 
1611 	sc = ifp->if_softc;
1612 	MY_LOCK(sc);
1613 	ifmr->ifm_active = IFM_ETHER;
1614 	if (!(my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_AUTONEGENBL)) {
1615 #if 0				/* this version did not support 1000M, */
1616 		if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_1000)
1617 			ifmr->ifm_active = IFM_ETHER | IFM_1000TX;
1618 #endif
1619 		if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_SPEEDSEL)
1620 			ifmr->ifm_active = IFM_ETHER | IFM_100_TX;
1621 		else
1622 			ifmr->ifm_active = IFM_ETHER | IFM_10_T;
1623 		if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_DUPLEX)
1624 			ifmr->ifm_active |= IFM_FDX;
1625 		else
1626 			ifmr->ifm_active |= IFM_HDX;
1627 
1628 		MY_UNLOCK(sc);
1629 		return;
1630 	}
1631 	ability = my_phy_readreg(sc, PHY_LPAR);
1632 	advert = my_phy_readreg(sc, PHY_ANAR);
1633 
1634 #if 0				/* this version did not support 1000M, */
1635 	if (sc->my_pinfo->my_vid = MarvellPHYID0) {
1636 		ability2 = my_phy_readreg(sc, PHY_1000SR);
1637 		if (ability2 & PHY_1000SR_1000BTXFULL) {
1638 			advert = 0;
1639 			ability = 0;
1640 	  		ifmr->ifm_active = IFM_ETHER|IFM_1000_T|IFM_FDX;
1641 	  	} else if (ability & PHY_1000SR_1000BTXHALF) {
1642 			advert = 0;
1643 			ability = 0;
1644 			ifmr->ifm_active = IFM_ETHER|IFM_1000_T|IFM_HDX;
1645 		}
1646 	}
1647 #endif
1648 	if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4)
1649 		ifmr->ifm_active = IFM_ETHER | IFM_100_T4;
1650 	else if (advert & PHY_ANAR_100BTXFULL && ability & PHY_ANAR_100BTXFULL)
1651 		ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1652 	else if (advert & PHY_ANAR_100BTXHALF && ability & PHY_ANAR_100BTXHALF)
1653 		ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_HDX;
1654 	else if (advert & PHY_ANAR_10BTFULL && ability & PHY_ANAR_10BTFULL)
1655 		ifmr->ifm_active = IFM_ETHER | IFM_10_T | IFM_FDX;
1656 	else if (advert & PHY_ANAR_10BTHALF && ability & PHY_ANAR_10BTHALF)
1657 		ifmr->ifm_active = IFM_ETHER | IFM_10_T | IFM_HDX;
1658 	MY_UNLOCK(sc);
1659 	return;
1660 }
1661 
1662 static int
1663 my_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
1664 {
1665 	struct my_softc *sc = ifp->if_softc;
1666 	struct ifreq   *ifr = (struct ifreq *) data;
1667 	int             error;
1668 
1669 	switch (command) {
1670 	case SIOCSIFFLAGS:
1671 		MY_LOCK(sc);
1672 		if (ifp->if_flags & IFF_UP)
1673 			my_init_locked(sc);
1674 		else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1675 			my_stop(sc);
1676 		MY_UNLOCK(sc);
1677 		error = 0;
1678 		break;
1679 	case SIOCADDMULTI:
1680 	case SIOCDELMULTI:
1681 		MY_LOCK(sc);
1682 		my_setmulti(sc);
1683 		MY_UNLOCK(sc);
1684 		error = 0;
1685 		break;
1686 	case SIOCGIFMEDIA:
1687 	case SIOCSIFMEDIA:
1688 		error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
1689 		break;
1690 	default:
1691 		error = ether_ioctl(ifp, command, data);
1692 		break;
1693 	}
1694 	return (error);
1695 }
1696 
1697 static void
1698 my_watchdog(void *arg)
1699 {
1700 	struct my_softc *sc;
1701 	struct ifnet *ifp;
1702 
1703 	sc = arg;
1704 	MY_LOCK_ASSERT(sc);
1705 	callout_reset(&sc->my_watchdog, hz, my_watchdog, sc);
1706 	if (sc->my_timer == 0 || --sc->my_timer > 0)
1707 		return;
1708 
1709 	ifp = sc->my_ifp;
1710 	ifp->if_oerrors++;
1711 	if_printf(ifp, "watchdog timeout\n");
1712 	if (!(my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT))
1713 		if_printf(ifp, "no carrier - transceiver cable problem?\n");
1714 	my_stop(sc);
1715 	my_reset(sc);
1716 	my_init_locked(sc);
1717 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1718 		my_start_locked(ifp);
1719 }
1720 
1721 
1722 /*
1723  * Stop the adapter and free any mbufs allocated to the RX and TX lists.
1724  */
1725 static void
1726 my_stop(struct my_softc * sc)
1727 {
1728 	register int    i;
1729 	struct ifnet   *ifp;
1730 
1731 	MY_LOCK_ASSERT(sc);
1732 	ifp = sc->my_ifp;
1733 
1734 	callout_stop(&sc->my_autoneg_timer);
1735 	callout_stop(&sc->my_watchdog);
1736 
1737 	MY_CLRBIT(sc, MY_TCRRCR, (MY_RE | MY_TE));
1738 	CSR_WRITE_4(sc, MY_IMR, 0x00000000);
1739 	CSR_WRITE_4(sc, MY_TXLBA, 0x00000000);
1740 	CSR_WRITE_4(sc, MY_RXLBA, 0x00000000);
1741 
1742 	/*
1743 	 * Free data in the RX lists.
1744 	 */
1745 	for (i = 0; i < MY_RX_LIST_CNT; i++) {
1746 		if (sc->my_cdata.my_rx_chain[i].my_mbuf != NULL) {
1747 			m_freem(sc->my_cdata.my_rx_chain[i].my_mbuf);
1748 			sc->my_cdata.my_rx_chain[i].my_mbuf = NULL;
1749 		}
1750 	}
1751 	bzero((char *)&sc->my_ldata->my_rx_list,
1752 	    sizeof(sc->my_ldata->my_rx_list));
1753 	/*
1754 	 * Free the TX list buffers.
1755 	 */
1756 	for (i = 0; i < MY_TX_LIST_CNT; i++) {
1757 		if (sc->my_cdata.my_tx_chain[i].my_mbuf != NULL) {
1758 			m_freem(sc->my_cdata.my_tx_chain[i].my_mbuf);
1759 			sc->my_cdata.my_tx_chain[i].my_mbuf = NULL;
1760 		}
1761 	}
1762 	bzero((char *)&sc->my_ldata->my_tx_list,
1763 	    sizeof(sc->my_ldata->my_tx_list));
1764 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1765 	return;
1766 }
1767 
1768 /*
1769  * Stop all chip I/O so that the kernel's probe routines don't get confused
1770  * by errant DMAs when rebooting.
1771  */
1772 static int
1773 my_shutdown(device_t dev)
1774 {
1775 	struct my_softc *sc;
1776 
1777 	sc = device_get_softc(dev);
1778 	MY_LOCK(sc);
1779 	my_stop(sc);
1780 	MY_UNLOCK(sc);
1781 	return 0;
1782 }
1783