xref: /freebsd/sys/dev/my/if_my.c (revision 22cf89c938886d14f5796fc49f9f020c23ea8eaf)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Written by: yen_cw@myson.com.tw
5  * Copyright (c) 2002 Myson Technology Inc.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions, and the following disclaimer,
13  *    without modification, immediately at the beginning of the file.
14  * 2. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * Myson fast ethernet PCI NIC driver, available at: http://www.myson.com.tw/
30  */
31 
32 #include <sys/cdefs.h>
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/sockio.h>
36 #include <sys/mbuf.h>
37 #include <sys/malloc.h>
38 #include <sys/kernel.h>
39 #include <sys/socket.h>
40 #include <sys/queue.h>
41 #include <sys/types.h>
42 #include <sys/module.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45 
46 #define NBPFILTER	1
47 
48 #include <net/if.h>
49 #include <net/if_var.h>
50 #include <net/if_arp.h>
51 #include <net/ethernet.h>
52 #include <net/if_media.h>
53 #include <net/if_types.h>
54 #include <net/if_dl.h>
55 #include <net/bpf.h>
56 
57 #include <vm/vm.h>		/* for vtophys */
58 #include <vm/pmap.h>		/* for vtophys */
59 #include <machine/bus.h>
60 #include <machine/resource.h>
61 #include <sys/bus.h>
62 #include <sys/rman.h>
63 
64 #include <dev/pci/pcireg.h>
65 #include <dev/pci/pcivar.h>
66 
67 /*
68  * #define MY_USEIOSPACE
69  */
70 
71 static int      MY_USEIOSPACE = 1;
72 
73 #ifdef MY_USEIOSPACE
74 #define MY_RES                  SYS_RES_IOPORT
75 #define MY_RID                  MY_PCI_LOIO
76 #else
77 #define MY_RES                  SYS_RES_MEMORY
78 #define MY_RID                  MY_PCI_LOMEM
79 #endif
80 
81 #include <dev/my/if_myreg.h>
82 
83 /*
84  * Various supported device vendors/types and their names.
85  */
86 struct my_type *my_info_tmp;
87 static struct my_type my_devs[] = {
88 	{MYSONVENDORID, MTD800ID, "Myson MTD80X Based Fast Ethernet Card"},
89 	{MYSONVENDORID, MTD803ID, "Myson MTD80X Based Fast Ethernet Card"},
90 	{MYSONVENDORID, MTD891ID, "Myson MTD89X Based Giga Ethernet Card"},
91 	{0, 0, NULL}
92 };
93 
94 /*
95  * Various supported PHY vendors/types and their names. Note that this driver
96  * will work with pretty much any MII-compliant PHY, so failure to positively
97  * identify the chip is not a fatal error.
98  */
99 static struct my_type my_phys[] = {
100 	{MysonPHYID0, MysonPHYID0, "<MYSON MTD981>"},
101 	{SeeqPHYID0, SeeqPHYID0, "<SEEQ 80225>"},
102 	{AhdocPHYID0, AhdocPHYID0, "<AHDOC 101>"},
103 	{MarvellPHYID0, MarvellPHYID0, "<MARVELL 88E1000>"},
104 	{LevelOnePHYID0, LevelOnePHYID0, "<LevelOne LXT1000>"},
105 	{0, 0, "<MII-compliant physical interface>"}
106 };
107 
108 static int      my_probe(device_t);
109 static int      my_attach(device_t);
110 static int      my_detach(device_t);
111 static int      my_newbuf(struct my_softc *, struct my_chain_onefrag *);
112 static int      my_encap(struct my_softc *, struct my_chain *, struct mbuf *);
113 static void     my_rxeof(struct my_softc *);
114 static void     my_txeof(struct my_softc *);
115 static void     my_txeoc(struct my_softc *);
116 static void     my_intr(void *);
117 static void     my_start(if_t);
118 static void     my_start_locked(if_t);
119 static int      my_ioctl(if_t, u_long, caddr_t);
120 static void     my_init(void *);
121 static void     my_init_locked(struct my_softc *);
122 static void     my_stop(struct my_softc *);
123 static void     my_autoneg_timeout(void *);
124 static void     my_watchdog(void *);
125 static int      my_shutdown(device_t);
126 static int      my_ifmedia_upd(if_t);
127 static void     my_ifmedia_sts(if_t, struct ifmediareq *);
128 static u_int16_t my_phy_readreg(struct my_softc *, int);
129 static void     my_phy_writereg(struct my_softc *, int, int);
130 static void     my_autoneg_xmit(struct my_softc *);
131 static void     my_autoneg_mii(struct my_softc *, int, int);
132 static void     my_setmode_mii(struct my_softc *, int);
133 static void     my_getmode_mii(struct my_softc *);
134 static void     my_setcfg(struct my_softc *, int);
135 static void     my_setmulti(struct my_softc *);
136 static void     my_reset(struct my_softc *);
137 static int      my_list_rx_init(struct my_softc *);
138 static int      my_list_tx_init(struct my_softc *);
139 static long     my_send_cmd_to_phy(struct my_softc *, int, int);
140 
141 #define MY_SETBIT(sc, reg, x) CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x))
142 #define MY_CLRBIT(sc, reg, x) CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x))
143 
144 static device_method_t my_methods[] = {
145 	/* Device interface */
146 	DEVMETHOD(device_probe, my_probe),
147 	DEVMETHOD(device_attach, my_attach),
148 	DEVMETHOD(device_detach, my_detach),
149 	DEVMETHOD(device_shutdown, my_shutdown),
150 
151 	DEVMETHOD_END
152 };
153 
154 static driver_t my_driver = {
155 	"my",
156 	my_methods,
157 	sizeof(struct my_softc)
158 };
159 
160 DRIVER_MODULE(my, pci, my_driver, 0, 0);
161 MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, my, my_devs,
162     nitems(my_devs) - 1);
163 MODULE_DEPEND(my, pci, 1, 1, 1);
164 MODULE_DEPEND(my, ether, 1, 1, 1);
165 
166 static long
167 my_send_cmd_to_phy(struct my_softc * sc, int opcode, int regad)
168 {
169 	long            miir;
170 	int             i;
171 	int             mask, data;
172 
173 	MY_LOCK_ASSERT(sc);
174 
175 	/* enable MII output */
176 	miir = CSR_READ_4(sc, MY_MANAGEMENT);
177 	miir &= 0xfffffff0;
178 
179 	miir |= MY_MASK_MIIR_MII_WRITE + MY_MASK_MIIR_MII_MDO;
180 
181 	/* send 32 1's preamble */
182 	for (i = 0; i < 32; i++) {
183 		/* low MDC; MDO is already high (miir) */
184 		miir &= ~MY_MASK_MIIR_MII_MDC;
185 		CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
186 
187 		/* high MDC */
188 		miir |= MY_MASK_MIIR_MII_MDC;
189 		CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
190 	}
191 
192 	/* calculate ST+OP+PHYAD+REGAD+TA */
193 	data = opcode | (sc->my_phy_addr << 7) | (regad << 2);
194 
195 	/* sent out */
196 	mask = 0x8000;
197 	while (mask) {
198 		/* low MDC, prepare MDO */
199 		miir &= ~(MY_MASK_MIIR_MII_MDC + MY_MASK_MIIR_MII_MDO);
200 		if (mask & data)
201 			miir |= MY_MASK_MIIR_MII_MDO;
202 
203 		CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
204 		/* high MDC */
205 		miir |= MY_MASK_MIIR_MII_MDC;
206 		CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
207 		DELAY(30);
208 
209 		/* next */
210 		mask >>= 1;
211 		if (mask == 0x2 && opcode == MY_OP_READ)
212 			miir &= ~MY_MASK_MIIR_MII_WRITE;
213 	}
214 
215 	return miir;
216 }
217 
218 static u_int16_t
219 my_phy_readreg(struct my_softc * sc, int reg)
220 {
221 	long            miir;
222 	int             mask, data;
223 
224 	MY_LOCK_ASSERT(sc);
225 
226 	if (sc->my_info->my_did == MTD803ID)
227 		data = CSR_READ_2(sc, MY_PHYBASE + reg * 2);
228 	else {
229 		miir = my_send_cmd_to_phy(sc, MY_OP_READ, reg);
230 
231 		/* read data */
232 		mask = 0x8000;
233 		data = 0;
234 		while (mask) {
235 			/* low MDC */
236 			miir &= ~MY_MASK_MIIR_MII_MDC;
237 			CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
238 
239 			/* read MDI */
240 			miir = CSR_READ_4(sc, MY_MANAGEMENT);
241 			if (miir & MY_MASK_MIIR_MII_MDI)
242 				data |= mask;
243 
244 			/* high MDC, and wait */
245 			miir |= MY_MASK_MIIR_MII_MDC;
246 			CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
247 			DELAY(30);
248 
249 			/* next */
250 			mask >>= 1;
251 		}
252 
253 		/* low MDC */
254 		miir &= ~MY_MASK_MIIR_MII_MDC;
255 		CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
256 	}
257 
258 	return (u_int16_t) data;
259 }
260 
261 static void
262 my_phy_writereg(struct my_softc * sc, int reg, int data)
263 {
264 	long            miir;
265 	int             mask;
266 
267 	MY_LOCK_ASSERT(sc);
268 
269 	if (sc->my_info->my_did == MTD803ID)
270 		CSR_WRITE_2(sc, MY_PHYBASE + reg * 2, data);
271 	else {
272 		miir = my_send_cmd_to_phy(sc, MY_OP_WRITE, reg);
273 
274 		/* write data */
275 		mask = 0x8000;
276 		while (mask) {
277 			/* low MDC, prepare MDO */
278 			miir &= ~(MY_MASK_MIIR_MII_MDC + MY_MASK_MIIR_MII_MDO);
279 			if (mask & data)
280 				miir |= MY_MASK_MIIR_MII_MDO;
281 			CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
282 			DELAY(1);
283 
284 			/* high MDC */
285 			miir |= MY_MASK_MIIR_MII_MDC;
286 			CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
287 			DELAY(1);
288 
289 			/* next */
290 			mask >>= 1;
291 		}
292 
293 		/* low MDC */
294 		miir &= ~MY_MASK_MIIR_MII_MDC;
295 		CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
296 	}
297 	return;
298 }
299 
300 static u_int
301 my_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
302 {
303 	uint32_t *hashes = arg;
304 	int h;
305 
306 	h = ~ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 26;
307 	if (h < 32)
308 		hashes[0] |= (1 << h);
309 	else
310 		hashes[1] |= (1 << (h - 32));
311 
312 	return (1);
313 }
314 /*
315  * Program the 64-bit multicast hash filter.
316  */
317 static void
318 my_setmulti(struct my_softc * sc)
319 {
320 	if_t		ifp;
321 	u_int32_t       hashes[2] = {0, 0};
322 	u_int32_t       rxfilt;
323 
324 	MY_LOCK_ASSERT(sc);
325 
326 	ifp = sc->my_ifp;
327 
328 	rxfilt = CSR_READ_4(sc, MY_TCRRCR);
329 
330 	if (if_getflags(ifp) & IFF_ALLMULTI || if_getflags(ifp) & IFF_PROMISC) {
331 		rxfilt |= MY_AM;
332 		CSR_WRITE_4(sc, MY_TCRRCR, rxfilt);
333 		CSR_WRITE_4(sc, MY_MAR0, 0xFFFFFFFF);
334 		CSR_WRITE_4(sc, MY_MAR1, 0xFFFFFFFF);
335 
336 		return;
337 	}
338 	/* first, zot all the existing hash bits */
339 	CSR_WRITE_4(sc, MY_MAR0, 0);
340 	CSR_WRITE_4(sc, MY_MAR1, 0);
341 
342 	/* now program new ones */
343 	if (if_foreach_llmaddr(ifp, my_hash_maddr, hashes) > 0)
344 		rxfilt |= MY_AM;
345 	else
346 		rxfilt &= ~MY_AM;
347 	CSR_WRITE_4(sc, MY_MAR0, hashes[0]);
348 	CSR_WRITE_4(sc, MY_MAR1, hashes[1]);
349 	CSR_WRITE_4(sc, MY_TCRRCR, rxfilt);
350 }
351 
352 /*
353  * Initiate an autonegotiation session.
354  */
355 static void
356 my_autoneg_xmit(struct my_softc * sc)
357 {
358 	u_int16_t       phy_sts = 0;
359 
360 	MY_LOCK_ASSERT(sc);
361 
362 	my_phy_writereg(sc, PHY_BMCR, PHY_BMCR_RESET);
363 	DELAY(500);
364 	while (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_RESET);
365 
366 	phy_sts = my_phy_readreg(sc, PHY_BMCR);
367 	phy_sts |= PHY_BMCR_AUTONEGENBL | PHY_BMCR_AUTONEGRSTR;
368 	my_phy_writereg(sc, PHY_BMCR, phy_sts);
369 
370 	return;
371 }
372 
373 static void
374 my_autoneg_timeout(void *arg)
375 {
376 	struct my_softc *sc;
377 
378 	sc = arg;
379 	MY_LOCK_ASSERT(sc);
380 	my_autoneg_mii(sc, MY_FLAG_DELAYTIMEO, 1);
381 }
382 
383 /*
384  * Invoke autonegotiation on a PHY.
385  */
386 static void
387 my_autoneg_mii(struct my_softc * sc, int flag, int verbose)
388 {
389 	u_int16_t       phy_sts = 0, media, advert, ability;
390 	u_int16_t       ability2 = 0;
391 	if_t		ifp;
392 	struct ifmedia *ifm;
393 
394 	MY_LOCK_ASSERT(sc);
395 
396 	ifm = &sc->ifmedia;
397 	ifp = sc->my_ifp;
398 
399 	ifm->ifm_media = IFM_ETHER | IFM_AUTO;
400 
401 #ifndef FORCE_AUTONEG_TFOUR
402 	/*
403 	 * First, see if autoneg is supported. If not, there's no point in
404 	 * continuing.
405 	 */
406 	phy_sts = my_phy_readreg(sc, PHY_BMSR);
407 	if (!(phy_sts & PHY_BMSR_CANAUTONEG)) {
408 		if (verbose)
409 			device_printf(sc->my_dev,
410 			    "autonegotiation not supported\n");
411 		ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX;
412 		return;
413 	}
414 #endif
415 	switch (flag) {
416 	case MY_FLAG_FORCEDELAY:
417 		/*
418 		 * XXX Never use this option anywhere but in the probe
419 		 * routine: making the kernel stop dead in its tracks for
420 		 * three whole seconds after we've gone multi-user is really
421 		 * bad manners.
422 		 */
423 		my_autoneg_xmit(sc);
424 		DELAY(5000000);
425 		break;
426 	case MY_FLAG_SCHEDDELAY:
427 		/*
428 		 * Wait for the transmitter to go idle before starting an
429 		 * autoneg session, otherwise my_start() may clobber our
430 		 * timeout, and we don't want to allow transmission during an
431 		 * autoneg session since that can screw it up.
432 		 */
433 		if (sc->my_cdata.my_tx_head != NULL) {
434 			sc->my_want_auto = 1;
435 			MY_UNLOCK(sc);
436 			return;
437 		}
438 		my_autoneg_xmit(sc);
439 		callout_reset(&sc->my_autoneg_timer, hz * 5, my_autoneg_timeout,
440 		    sc);
441 		sc->my_autoneg = 1;
442 		sc->my_want_auto = 0;
443 		return;
444 	case MY_FLAG_DELAYTIMEO:
445 		callout_stop(&sc->my_autoneg_timer);
446 		sc->my_autoneg = 0;
447 		break;
448 	default:
449 		device_printf(sc->my_dev, "invalid autoneg flag: %d\n", flag);
450 		return;
451 	}
452 
453 	if (my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_AUTONEGCOMP) {
454 		if (verbose)
455 			device_printf(sc->my_dev, "autoneg complete, ");
456 		phy_sts = my_phy_readreg(sc, PHY_BMSR);
457 	} else {
458 		if (verbose)
459 			device_printf(sc->my_dev, "autoneg not complete, ");
460 	}
461 
462 	media = my_phy_readreg(sc, PHY_BMCR);
463 
464 	/* Link is good. Report modes and set duplex mode. */
465 	if (my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT) {
466 		if (verbose)
467 			device_printf(sc->my_dev, "link status good. ");
468 		advert = my_phy_readreg(sc, PHY_ANAR);
469 		ability = my_phy_readreg(sc, PHY_LPAR);
470 		if ((sc->my_pinfo->my_vid == MarvellPHYID0) ||
471 		    (sc->my_pinfo->my_vid == LevelOnePHYID0)) {
472 			ability2 = my_phy_readreg(sc, PHY_1000SR);
473 			if (ability2 & PHY_1000SR_1000BTXFULL) {
474 				advert = 0;
475 				ability = 0;
476 				/*
477 				 * this version did not support 1000M,
478 				 * ifm->ifm_media =
479 				 * IFM_ETHER|IFM_1000_T|IFM_FDX;
480 				 */
481 				ifm->ifm_media =
482 				    IFM_ETHER | IFM_100_TX | IFM_FDX;
483 				media &= ~PHY_BMCR_SPEEDSEL;
484 				media |= PHY_BMCR_1000;
485 				media |= PHY_BMCR_DUPLEX;
486 				printf("(full-duplex, 1000Mbps)\n");
487 			} else if (ability2 & PHY_1000SR_1000BTXHALF) {
488 				advert = 0;
489 				ability = 0;
490 				/*
491 				 * this version did not support 1000M,
492 				 * ifm->ifm_media = IFM_ETHER|IFM_1000_T;
493 				 */
494 				ifm->ifm_media = IFM_ETHER | IFM_100_TX;
495 				media &= ~PHY_BMCR_SPEEDSEL;
496 				media &= ~PHY_BMCR_DUPLEX;
497 				media |= PHY_BMCR_1000;
498 				printf("(half-duplex, 1000Mbps)\n");
499 			}
500 		}
501 		if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4) {
502 			ifm->ifm_media = IFM_ETHER | IFM_100_T4;
503 			media |= PHY_BMCR_SPEEDSEL;
504 			media &= ~PHY_BMCR_DUPLEX;
505 			printf("(100baseT4)\n");
506 		} else if (advert & PHY_ANAR_100BTXFULL &&
507 			   ability & PHY_ANAR_100BTXFULL) {
508 			ifm->ifm_media = IFM_ETHER | IFM_100_TX | IFM_FDX;
509 			media |= PHY_BMCR_SPEEDSEL;
510 			media |= PHY_BMCR_DUPLEX;
511 			printf("(full-duplex, 100Mbps)\n");
512 		} else if (advert & PHY_ANAR_100BTXHALF &&
513 			   ability & PHY_ANAR_100BTXHALF) {
514 			ifm->ifm_media = IFM_ETHER | IFM_100_TX | IFM_HDX;
515 			media |= PHY_BMCR_SPEEDSEL;
516 			media &= ~PHY_BMCR_DUPLEX;
517 			printf("(half-duplex, 100Mbps)\n");
518 		} else if (advert & PHY_ANAR_10BTFULL &&
519 			   ability & PHY_ANAR_10BTFULL) {
520 			ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_FDX;
521 			media &= ~PHY_BMCR_SPEEDSEL;
522 			media |= PHY_BMCR_DUPLEX;
523 			printf("(full-duplex, 10Mbps)\n");
524 		} else if (advert) {
525 			ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX;
526 			media &= ~PHY_BMCR_SPEEDSEL;
527 			media &= ~PHY_BMCR_DUPLEX;
528 			printf("(half-duplex, 10Mbps)\n");
529 		}
530 		media &= ~PHY_BMCR_AUTONEGENBL;
531 
532 		/* Set ASIC's duplex mode to match the PHY. */
533 		my_phy_writereg(sc, PHY_BMCR, media);
534 		my_setcfg(sc, media);
535 	} else {
536 		if (verbose)
537 			device_printf(sc->my_dev, "no carrier\n");
538 	}
539 
540 	my_init_locked(sc);
541 	if (sc->my_tx_pend) {
542 		sc->my_autoneg = 0;
543 		sc->my_tx_pend = 0;
544 		my_start_locked(ifp);
545 	}
546 	return;
547 }
548 
549 /*
550  * To get PHY ability.
551  */
552 static void
553 my_getmode_mii(struct my_softc * sc)
554 {
555 	u_int16_t       bmsr;
556 	if_t		ifp;
557 
558 	MY_LOCK_ASSERT(sc);
559 	ifp = sc->my_ifp;
560 	bmsr = my_phy_readreg(sc, PHY_BMSR);
561 	if (bootverbose)
562 		device_printf(sc->my_dev, "PHY status word: %x\n", bmsr);
563 
564 	/* fallback */
565 	sc->ifmedia.ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX;
566 
567 	if (bmsr & PHY_BMSR_10BTHALF) {
568 		if (bootverbose)
569 			device_printf(sc->my_dev,
570 			    "10Mbps half-duplex mode supported\n");
571 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T | IFM_HDX,
572 		    0, NULL);
573 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T, 0, NULL);
574 	}
575 	if (bmsr & PHY_BMSR_10BTFULL) {
576 		if (bootverbose)
577 			device_printf(sc->my_dev,
578 			    "10Mbps full-duplex mode supported\n");
579 
580 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX,
581 		    0, NULL);
582 		sc->ifmedia.ifm_media = IFM_ETHER | IFM_10_T | IFM_FDX;
583 	}
584 	if (bmsr & PHY_BMSR_100BTXHALF) {
585 		if (bootverbose)
586 			device_printf(sc->my_dev,
587 			    "100Mbps half-duplex mode supported\n");
588 		if_setbaudrate(ifp, 100000000);
589 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL);
590 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX | IFM_HDX,
591 			    0, NULL);
592 		sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_TX | IFM_HDX;
593 	}
594 	if (bmsr & PHY_BMSR_100BTXFULL) {
595 		if (bootverbose)
596 			device_printf(sc->my_dev,
597 			    "100Mbps full-duplex mode supported\n");
598 		if_setbaudrate(ifp, 100000000);
599 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX,
600 		    0, NULL);
601 		sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_TX | IFM_FDX;
602 	}
603 	/* Some also support 100BaseT4. */
604 	if (bmsr & PHY_BMSR_100BT4) {
605 		if (bootverbose)
606 			device_printf(sc->my_dev, "100baseT4 mode supported\n");
607 		if_setbaudrate(ifp, 100000000);
608 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_T4, 0, NULL);
609 		sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_T4;
610 #ifdef FORCE_AUTONEG_TFOUR
611 		if (bootverbose)
612 			device_printf(sc->my_dev,
613 			    "forcing on autoneg support for BT4\n");
614 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_AUTO, 0 NULL):
615 		sc->ifmedia.ifm_media = IFM_ETHER | IFM_AUTO;
616 #endif
617 	}
618 #if 0				/* this version did not support 1000M, */
619 	if (sc->my_pinfo->my_vid == MarvellPHYID0) {
620 		if (bootverbose)
621 			device_printf(sc->my_dev,
622 			    "1000Mbps half-duplex mode supported\n");
623 
624 		if_setbaudrate(ifp, 1000000000);
625 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T, 0, NULL);
626 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T | IFM_HDX,
627 		    0, NULL);
628 		if (bootverbose)
629 			device_printf(sc->my_dev,
630 			    "1000Mbps full-duplex mode supported\n");
631 		if_setbaudrate(ifp, 1000000000);
632 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T | IFM_FDX,
633 		    0, NULL);
634 		sc->ifmedia.ifm_media = IFM_ETHER | IFM_1000_T | IFM_FDX;
635 	}
636 #endif
637 	if (bmsr & PHY_BMSR_CANAUTONEG) {
638 		if (bootverbose)
639 			device_printf(sc->my_dev, "autoneg supported\n");
640 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
641 		sc->ifmedia.ifm_media = IFM_ETHER | IFM_AUTO;
642 	}
643 	return;
644 }
645 
646 /*
647  * Set speed and duplex mode.
648  */
649 static void
650 my_setmode_mii(struct my_softc * sc, int media)
651 {
652 	u_int16_t       bmcr;
653 
654 	MY_LOCK_ASSERT(sc);
655 	/*
656 	 * If an autoneg session is in progress, stop it.
657 	 */
658 	if (sc->my_autoneg) {
659 		device_printf(sc->my_dev, "canceling autoneg session\n");
660 		callout_stop(&sc->my_autoneg_timer);
661 		sc->my_autoneg = sc->my_want_auto = 0;
662 		bmcr = my_phy_readreg(sc, PHY_BMCR);
663 		bmcr &= ~PHY_BMCR_AUTONEGENBL;
664 		my_phy_writereg(sc, PHY_BMCR, bmcr);
665 	}
666 	device_printf(sc->my_dev, "selecting MII, ");
667 	bmcr = my_phy_readreg(sc, PHY_BMCR);
668 	bmcr &= ~(PHY_BMCR_AUTONEGENBL | PHY_BMCR_SPEEDSEL | PHY_BMCR_1000 |
669 		  PHY_BMCR_DUPLEX | PHY_BMCR_LOOPBK);
670 
671 #if 0				/* this version did not support 1000M, */
672 	if (IFM_SUBTYPE(media) == IFM_1000_T) {
673 		printf("1000Mbps/T4, half-duplex\n");
674 		bmcr &= ~PHY_BMCR_SPEEDSEL;
675 		bmcr &= ~PHY_BMCR_DUPLEX;
676 		bmcr |= PHY_BMCR_1000;
677 	}
678 #endif
679 	if (IFM_SUBTYPE(media) == IFM_100_T4) {
680 		printf("100Mbps/T4, half-duplex\n");
681 		bmcr |= PHY_BMCR_SPEEDSEL;
682 		bmcr &= ~PHY_BMCR_DUPLEX;
683 	}
684 	if (IFM_SUBTYPE(media) == IFM_100_TX) {
685 		printf("100Mbps, ");
686 		bmcr |= PHY_BMCR_SPEEDSEL;
687 	}
688 	if (IFM_SUBTYPE(media) == IFM_10_T) {
689 		printf("10Mbps, ");
690 		bmcr &= ~PHY_BMCR_SPEEDSEL;
691 	}
692 	if ((media & IFM_GMASK) == IFM_FDX) {
693 		printf("full duplex\n");
694 		bmcr |= PHY_BMCR_DUPLEX;
695 	} else {
696 		printf("half duplex\n");
697 		bmcr &= ~PHY_BMCR_DUPLEX;
698 	}
699 	my_phy_writereg(sc, PHY_BMCR, bmcr);
700 	my_setcfg(sc, bmcr);
701 	return;
702 }
703 
704 /*
705  * The Myson manual states that in order to fiddle with the 'full-duplex' and
706  * '100Mbps' bits in the netconfig register, we first have to put the
707  * transmit and/or receive logic in the idle state.
708  */
709 static void
710 my_setcfg(struct my_softc * sc, int bmcr)
711 {
712 	int             i, restart = 0;
713 
714 	MY_LOCK_ASSERT(sc);
715 	if (CSR_READ_4(sc, MY_TCRRCR) & (MY_TE | MY_RE)) {
716 		restart = 1;
717 		MY_CLRBIT(sc, MY_TCRRCR, (MY_TE | MY_RE));
718 		for (i = 0; i < MY_TIMEOUT; i++) {
719 			DELAY(10);
720 			if (!(CSR_READ_4(sc, MY_TCRRCR) &
721 			    (MY_TXRUN | MY_RXRUN)))
722 				break;
723 		}
724 		if (i == MY_TIMEOUT)
725 			device_printf(sc->my_dev,
726 			    "failed to force tx and rx to idle \n");
727 	}
728 	MY_CLRBIT(sc, MY_TCRRCR, MY_PS1000);
729 	MY_CLRBIT(sc, MY_TCRRCR, MY_PS10);
730 	if (bmcr & PHY_BMCR_1000)
731 		MY_SETBIT(sc, MY_TCRRCR, MY_PS1000);
732 	else if (!(bmcr & PHY_BMCR_SPEEDSEL))
733 		MY_SETBIT(sc, MY_TCRRCR, MY_PS10);
734 	if (bmcr & PHY_BMCR_DUPLEX)
735 		MY_SETBIT(sc, MY_TCRRCR, MY_FD);
736 	else
737 		MY_CLRBIT(sc, MY_TCRRCR, MY_FD);
738 	if (restart)
739 		MY_SETBIT(sc, MY_TCRRCR, MY_TE | MY_RE);
740 	return;
741 }
742 
743 static void
744 my_reset(struct my_softc * sc)
745 {
746 	int    i;
747 
748 	MY_LOCK_ASSERT(sc);
749 	MY_SETBIT(sc, MY_BCR, MY_SWR);
750 	for (i = 0; i < MY_TIMEOUT; i++) {
751 		DELAY(10);
752 		if (!(CSR_READ_4(sc, MY_BCR) & MY_SWR))
753 			break;
754 	}
755 	if (i == MY_TIMEOUT)
756 		device_printf(sc->my_dev, "reset never completed!\n");
757 
758 	/* Wait a little while for the chip to get its brains in order. */
759 	DELAY(1000);
760 	return;
761 }
762 
763 /*
764  * Probe for a Myson chip. Check the PCI vendor and device IDs against our
765  * list and return a device name if we find a match.
766  */
767 static int
768 my_probe(device_t dev)
769 {
770 	struct my_type *t;
771 
772 	t = my_devs;
773 	while (t->my_name != NULL) {
774 		if ((pci_get_vendor(dev) == t->my_vid) &&
775 		    (pci_get_device(dev) == t->my_did)) {
776 			device_set_desc(dev, t->my_name);
777 			my_info_tmp = t;
778 			return (BUS_PROBE_DEFAULT);
779 		}
780 		t++;
781 	}
782 	return (ENXIO);
783 }
784 
785 /*
786  * Attach the interface. Allocate softc structures, do ifmedia setup and
787  * ethernet/BPF attach.
788  */
789 static int
790 my_attach(device_t dev)
791 {
792 	int             i;
793 	u_char          eaddr[ETHER_ADDR_LEN];
794 	u_int32_t       iobase;
795 	struct my_softc *sc;
796 	if_t		ifp;
797 	int             media = IFM_ETHER | IFM_100_TX | IFM_FDX;
798 	unsigned int    round;
799 	caddr_t         roundptr;
800 	struct my_type *p;
801 	u_int16_t       phy_vid, phy_did, phy_sts = 0;
802 	int             rid, error = 0;
803 
804 	sc = device_get_softc(dev);
805 	sc->my_dev = dev;
806 	mtx_init(&sc->my_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
807 	    MTX_DEF);
808 	callout_init_mtx(&sc->my_autoneg_timer, &sc->my_mtx, 0);
809 	callout_init_mtx(&sc->my_watchdog, &sc->my_mtx, 0);
810 
811 	/*
812 	 * Map control/status registers.
813 	 */
814 	pci_enable_busmaster(dev);
815 
816 	if (my_info_tmp->my_did == MTD800ID) {
817 		iobase = pci_read_config(dev, MY_PCI_LOIO, 4);
818 		if (iobase & 0x300)
819 			MY_USEIOSPACE = 0;
820 	}
821 
822 	rid = MY_RID;
823 	sc->my_res = bus_alloc_resource_any(dev, MY_RES, &rid, RF_ACTIVE);
824 
825 	if (sc->my_res == NULL) {
826 		device_printf(dev, "couldn't map ports/memory\n");
827 		error = ENXIO;
828 		goto destroy_mutex;
829 	}
830 	sc->my_btag = rman_get_bustag(sc->my_res);
831 	sc->my_bhandle = rman_get_bushandle(sc->my_res);
832 
833 	rid = 0;
834 	sc->my_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
835 					    RF_SHAREABLE | RF_ACTIVE);
836 
837 	if (sc->my_irq == NULL) {
838 		device_printf(dev, "couldn't map interrupt\n");
839 		error = ENXIO;
840 		goto release_io;
841 	}
842 
843 	sc->my_info = my_info_tmp;
844 
845 	/* Reset the adapter. */
846 	MY_LOCK(sc);
847 	my_reset(sc);
848 	MY_UNLOCK(sc);
849 
850 	/*
851 	 * Get station address
852 	 */
853 	for (i = 0; i < ETHER_ADDR_LEN; ++i)
854 		eaddr[i] = CSR_READ_1(sc, MY_PAR0 + i);
855 
856 	sc->my_ldata_ptr = malloc(sizeof(struct my_list_data) + 8,
857 				  M_DEVBUF, M_NOWAIT);
858 	if (sc->my_ldata_ptr == NULL) {
859 		device_printf(dev, "no memory for list buffers!\n");
860 		error = ENXIO;
861 		goto release_irq;
862 	}
863 	sc->my_ldata = (struct my_list_data *) sc->my_ldata_ptr;
864 	round = (uintptr_t)sc->my_ldata_ptr & 0xF;
865 	roundptr = sc->my_ldata_ptr;
866 	for (i = 0; i < 8; i++) {
867 		if (round % 8) {
868 			round++;
869 			roundptr++;
870 		} else
871 			break;
872 	}
873 	sc->my_ldata = (struct my_list_data *) roundptr;
874 	bzero(sc->my_ldata, sizeof(struct my_list_data));
875 
876 	ifp = sc->my_ifp = if_alloc(IFT_ETHER);
877 	if (ifp == NULL) {
878 		device_printf(dev, "can not if_alloc()\n");
879 		error = ENOSPC;
880 		goto free_ldata;
881 	}
882 	if_setsoftc(ifp, sc);
883 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
884 	if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
885 	if_setioctlfn(ifp, my_ioctl);
886 	if_setstartfn(ifp, my_start);
887 	if_setinitfn(ifp, my_init);
888 	if_setbaudrate(ifp, 10000000);
889 	if_setsendqlen(ifp, ifqmaxlen);
890 	if_setsendqready(ifp);
891 
892 	if (sc->my_info->my_did == MTD803ID)
893 		sc->my_pinfo = my_phys;
894 	else {
895 		if (bootverbose)
896 			device_printf(dev, "probing for a PHY\n");
897 		MY_LOCK(sc);
898 		for (i = MY_PHYADDR_MIN; i < MY_PHYADDR_MAX + 1; i++) {
899 			if (bootverbose)
900 				device_printf(dev, "checking address: %d\n", i);
901 			sc->my_phy_addr = i;
902 			phy_sts = my_phy_readreg(sc, PHY_BMSR);
903 			if ((phy_sts != 0) && (phy_sts != 0xffff))
904 				break;
905 			else
906 				phy_sts = 0;
907 		}
908 		if (phy_sts) {
909 			phy_vid = my_phy_readreg(sc, PHY_VENID);
910 			phy_did = my_phy_readreg(sc, PHY_DEVID);
911 			if (bootverbose) {
912 				device_printf(dev, "found PHY at address %d, ",
913 				    sc->my_phy_addr);
914 				printf("vendor id: %x device id: %x\n",
915 				    phy_vid, phy_did);
916 			}
917 			p = my_phys;
918 			while (p->my_vid) {
919 				if (phy_vid == p->my_vid) {
920 					sc->my_pinfo = p;
921 					break;
922 				}
923 				p++;
924 			}
925 			if (sc->my_pinfo == NULL)
926 				sc->my_pinfo = &my_phys[PHY_UNKNOWN];
927 			if (bootverbose)
928 				device_printf(dev, "PHY type: %s\n",
929 				       sc->my_pinfo->my_name);
930 		} else {
931 			MY_UNLOCK(sc);
932 			device_printf(dev, "MII without any phy!\n");
933 			error = ENXIO;
934 			goto free_if;
935 		}
936 		MY_UNLOCK(sc);
937 	}
938 
939 	/* Do ifmedia setup. */
940 	ifmedia_init(&sc->ifmedia, 0, my_ifmedia_upd, my_ifmedia_sts);
941 	MY_LOCK(sc);
942 	my_getmode_mii(sc);
943 	my_autoneg_mii(sc, MY_FLAG_FORCEDELAY, 1);
944 	media = sc->ifmedia.ifm_media;
945 	my_stop(sc);
946 	MY_UNLOCK(sc);
947 	ifmedia_set(&sc->ifmedia, media);
948 
949 	ether_ifattach(ifp, eaddr);
950 
951 	error = bus_setup_intr(dev, sc->my_irq, INTR_TYPE_NET | INTR_MPSAFE,
952 			       NULL, my_intr, sc, &sc->my_intrhand);
953 
954 	if (error) {
955 		device_printf(dev, "couldn't set up irq\n");
956 		goto detach_if;
957 	}
958 
959 	return (0);
960 
961 detach_if:
962 	ether_ifdetach(ifp);
963 free_if:
964 	if_free(ifp);
965 free_ldata:
966 	free(sc->my_ldata_ptr, M_DEVBUF);
967 release_irq:
968 	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->my_irq);
969 release_io:
970 	bus_release_resource(dev, MY_RES, MY_RID, sc->my_res);
971 destroy_mutex:
972 	mtx_destroy(&sc->my_mtx);
973 	return (error);
974 }
975 
976 static int
977 my_detach(device_t dev)
978 {
979 	struct my_softc *sc;
980 	if_t		ifp;
981 
982 	sc = device_get_softc(dev);
983 	ifp = sc->my_ifp;
984 	ether_ifdetach(ifp);
985 	MY_LOCK(sc);
986 	my_stop(sc);
987 	MY_UNLOCK(sc);
988 	bus_teardown_intr(dev, sc->my_irq, sc->my_intrhand);
989 	callout_drain(&sc->my_watchdog);
990 	callout_drain(&sc->my_autoneg_timer);
991 
992 	if_free(ifp);
993 	free(sc->my_ldata_ptr, M_DEVBUF);
994 
995 	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->my_irq);
996 	bus_release_resource(dev, MY_RES, MY_RID, sc->my_res);
997 	mtx_destroy(&sc->my_mtx);
998 	return (0);
999 }
1000 
1001 /*
1002  * Initialize the transmit descriptors.
1003  */
1004 static int
1005 my_list_tx_init(struct my_softc * sc)
1006 {
1007 	struct my_chain_data *cd;
1008 	struct my_list_data *ld;
1009 	int             i;
1010 
1011 	MY_LOCK_ASSERT(sc);
1012 	cd = &sc->my_cdata;
1013 	ld = sc->my_ldata;
1014 	for (i = 0; i < MY_TX_LIST_CNT; i++) {
1015 		cd->my_tx_chain[i].my_ptr = &ld->my_tx_list[i];
1016 		if (i == (MY_TX_LIST_CNT - 1))
1017 			cd->my_tx_chain[i].my_nextdesc = &cd->my_tx_chain[0];
1018 		else
1019 			cd->my_tx_chain[i].my_nextdesc =
1020 			    &cd->my_tx_chain[i + 1];
1021 	}
1022 	cd->my_tx_free = &cd->my_tx_chain[0];
1023 	cd->my_tx_tail = cd->my_tx_head = NULL;
1024 	return (0);
1025 }
1026 
1027 /*
1028  * Initialize the RX descriptors and allocate mbufs for them. Note that we
1029  * arrange the descriptors in a closed ring, so that the last descriptor
1030  * points back to the first.
1031  */
1032 static int
1033 my_list_rx_init(struct my_softc * sc)
1034 {
1035 	struct my_chain_data *cd;
1036 	struct my_list_data *ld;
1037 	int             i;
1038 
1039 	MY_LOCK_ASSERT(sc);
1040 	cd = &sc->my_cdata;
1041 	ld = sc->my_ldata;
1042 	for (i = 0; i < MY_RX_LIST_CNT; i++) {
1043 		cd->my_rx_chain[i].my_ptr =
1044 		    (struct my_desc *) & ld->my_rx_list[i];
1045 		if (my_newbuf(sc, &cd->my_rx_chain[i]) == ENOBUFS) {
1046 			MY_UNLOCK(sc);
1047 			return (ENOBUFS);
1048 		}
1049 		if (i == (MY_RX_LIST_CNT - 1)) {
1050 			cd->my_rx_chain[i].my_nextdesc = &cd->my_rx_chain[0];
1051 			ld->my_rx_list[i].my_next = vtophys(&ld->my_rx_list[0]);
1052 		} else {
1053 			cd->my_rx_chain[i].my_nextdesc =
1054 			    &cd->my_rx_chain[i + 1];
1055 			ld->my_rx_list[i].my_next =
1056 			    vtophys(&ld->my_rx_list[i + 1]);
1057 		}
1058 	}
1059 	cd->my_rx_head = &cd->my_rx_chain[0];
1060 	return (0);
1061 }
1062 
1063 /*
1064  * Initialize an RX descriptor and attach an MBUF cluster.
1065  */
1066 static int
1067 my_newbuf(struct my_softc * sc, struct my_chain_onefrag * c)
1068 {
1069 	struct mbuf    *m_new = NULL;
1070 
1071 	MY_LOCK_ASSERT(sc);
1072 	MGETHDR(m_new, M_NOWAIT, MT_DATA);
1073 	if (m_new == NULL) {
1074 		device_printf(sc->my_dev,
1075 		    "no memory for rx list -- packet dropped!\n");
1076 		return (ENOBUFS);
1077 	}
1078 	if (!(MCLGET(m_new, M_NOWAIT))) {
1079 		device_printf(sc->my_dev,
1080 		    "no memory for rx list -- packet dropped!\n");
1081 		m_freem(m_new);
1082 		return (ENOBUFS);
1083 	}
1084 	c->my_mbuf = m_new;
1085 	c->my_ptr->my_data = vtophys(mtod(m_new, caddr_t));
1086 	c->my_ptr->my_ctl = (MCLBYTES - 1) << MY_RBSShift;
1087 	c->my_ptr->my_status = MY_OWNByNIC;
1088 	return (0);
1089 }
1090 
1091 /*
1092  * A frame has been uploaded: pass the resulting mbuf chain up to the higher
1093  * level protocols.
1094  */
1095 static void
1096 my_rxeof(struct my_softc * sc)
1097 {
1098 	struct ether_header *eh;
1099 	struct mbuf    *m;
1100 	if_t		ifp;
1101 	struct my_chain_onefrag *cur_rx;
1102 	int             total_len = 0;
1103 	u_int32_t       rxstat;
1104 
1105 	MY_LOCK_ASSERT(sc);
1106 	ifp = sc->my_ifp;
1107 	while (!((rxstat = sc->my_cdata.my_rx_head->my_ptr->my_status)
1108 	    & MY_OWNByNIC)) {
1109 		cur_rx = sc->my_cdata.my_rx_head;
1110 		sc->my_cdata.my_rx_head = cur_rx->my_nextdesc;
1111 
1112 		if (rxstat & MY_ES) {	/* error summary: give up this rx pkt */
1113 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1114 			cur_rx->my_ptr->my_status = MY_OWNByNIC;
1115 			continue;
1116 		}
1117 		/* No errors; receive the packet. */
1118 		total_len = (rxstat & MY_FLNGMASK) >> MY_FLNGShift;
1119 		total_len -= ETHER_CRC_LEN;
1120 
1121 		if (total_len < MINCLSIZE) {
1122 			m = m_devget(mtod(cur_rx->my_mbuf, char *),
1123 			    total_len, 0, ifp, NULL);
1124 			cur_rx->my_ptr->my_status = MY_OWNByNIC;
1125 			if (m == NULL) {
1126 				if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1127 				continue;
1128 			}
1129 		} else {
1130 			m = cur_rx->my_mbuf;
1131 			/*
1132 			 * Try to conjure up a new mbuf cluster. If that
1133 			 * fails, it means we have an out of memory condition
1134 			 * and should leave the buffer in place and continue.
1135 			 * This will result in a lost packet, but there's
1136 			 * little else we can do in this situation.
1137 			 */
1138 			if (my_newbuf(sc, cur_rx) == ENOBUFS) {
1139 				if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1140 				cur_rx->my_ptr->my_status = MY_OWNByNIC;
1141 				continue;
1142 			}
1143 			m->m_pkthdr.rcvif = ifp;
1144 			m->m_pkthdr.len = m->m_len = total_len;
1145 		}
1146 		if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1147 		eh = mtod(m, struct ether_header *);
1148 #if NBPFILTER > 0
1149 		/*
1150 		 * Handle BPF listeners. Let the BPF user see the packet, but
1151 		 * don't pass it up to the ether_input() layer unless it's a
1152 		 * broadcast packet, multicast packet, matches our ethernet
1153 		 * address or the interface is in promiscuous mode.
1154 		 */
1155 		if (bpf_peers_present(if_getbpf(ifp))) {
1156 			bpf_mtap_if(ifp, m);
1157 			if (if_getflags(ifp) & IFF_PROMISC &&
1158 			    (bcmp(eh->ether_dhost, if_getlladdr(sc->my_ifp),
1159 				ETHER_ADDR_LEN) &&
1160 			     (eh->ether_dhost[0] & 1) == 0)) {
1161 				m_freem(m);
1162 				continue;
1163 			}
1164 		}
1165 #endif
1166 		MY_UNLOCK(sc);
1167 		if_input(ifp, m);
1168 		MY_LOCK(sc);
1169 	}
1170 	return;
1171 }
1172 
1173 /*
1174  * A frame was downloaded to the chip. It's safe for us to clean up the list
1175  * buffers.
1176  */
1177 static void
1178 my_txeof(struct my_softc * sc)
1179 {
1180 	struct my_chain *cur_tx;
1181 	if_t		ifp;
1182 
1183 	MY_LOCK_ASSERT(sc);
1184 	ifp = sc->my_ifp;
1185 	/* Clear the timeout timer. */
1186 	sc->my_timer = 0;
1187 	if (sc->my_cdata.my_tx_head == NULL) {
1188 		return;
1189 	}
1190 	/*
1191 	 * Go through our tx list and free mbufs for those frames that have
1192 	 * been transmitted.
1193 	 */
1194 	while (sc->my_cdata.my_tx_head->my_mbuf != NULL) {
1195 		u_int32_t       txstat;
1196 
1197 		cur_tx = sc->my_cdata.my_tx_head;
1198 		txstat = MY_TXSTATUS(cur_tx);
1199 		if ((txstat & MY_OWNByNIC) || txstat == MY_UNSENT)
1200 			break;
1201 		if (!(CSR_READ_4(sc, MY_TCRRCR) & MY_Enhanced)) {
1202 			if (txstat & MY_TXERR) {
1203 				if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1204 				if (txstat & MY_EC) /* excessive collision */
1205 					if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
1206 				if (txstat & MY_LC)	/* late collision */
1207 					if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
1208 			}
1209 			if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
1210 			    (txstat & MY_NCRMASK) >> MY_NCRShift);
1211 		}
1212 		if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1213 		m_freem(cur_tx->my_mbuf);
1214 		cur_tx->my_mbuf = NULL;
1215 		if (sc->my_cdata.my_tx_head == sc->my_cdata.my_tx_tail) {
1216 			sc->my_cdata.my_tx_head = NULL;
1217 			sc->my_cdata.my_tx_tail = NULL;
1218 			break;
1219 		}
1220 		sc->my_cdata.my_tx_head = cur_tx->my_nextdesc;
1221 	}
1222 	if (CSR_READ_4(sc, MY_TCRRCR) & MY_Enhanced) {
1223 		if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (CSR_READ_4(sc, MY_TSR) & MY_NCRMask));
1224 	}
1225 	return;
1226 }
1227 
1228 /*
1229  * TX 'end of channel' interrupt handler.
1230  */
1231 static void
1232 my_txeoc(struct my_softc * sc)
1233 {
1234 	if_t		ifp;
1235 
1236 	MY_LOCK_ASSERT(sc);
1237 	ifp = sc->my_ifp;
1238 	sc->my_timer = 0;
1239 	if (sc->my_cdata.my_tx_head == NULL) {
1240 		if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1241 		sc->my_cdata.my_tx_tail = NULL;
1242 		if (sc->my_want_auto)
1243 			my_autoneg_mii(sc, MY_FLAG_SCHEDDELAY, 1);
1244 	} else {
1245 		if (MY_TXOWN(sc->my_cdata.my_tx_head) == MY_UNSENT) {
1246 			MY_TXOWN(sc->my_cdata.my_tx_head) = MY_OWNByNIC;
1247 			sc->my_timer = 5;
1248 			CSR_WRITE_4(sc, MY_TXPDR, 0xFFFFFFFF);
1249 		}
1250 	}
1251 	return;
1252 }
1253 
1254 static void
1255 my_intr(void *arg)
1256 {
1257 	struct my_softc *sc;
1258 	if_t		ifp;
1259 	u_int32_t       status;
1260 
1261 	sc = arg;
1262 	MY_LOCK(sc);
1263 	ifp = sc->my_ifp;
1264 	if (!(if_getflags(ifp) & IFF_UP)) {
1265 		MY_UNLOCK(sc);
1266 		return;
1267 	}
1268 	/* Disable interrupts. */
1269 	CSR_WRITE_4(sc, MY_IMR, 0x00000000);
1270 
1271 	for (;;) {
1272 		status = CSR_READ_4(sc, MY_ISR);
1273 		status &= MY_INTRS;
1274 		if (status)
1275 			CSR_WRITE_4(sc, MY_ISR, status);
1276 		else
1277 			break;
1278 
1279 		if (status & MY_RI)	/* receive interrupt */
1280 			my_rxeof(sc);
1281 
1282 		if ((status & MY_RBU) || (status & MY_RxErr)) {
1283 			/* rx buffer unavailable or rx error */
1284 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1285 #ifdef foo
1286 			my_stop(sc);
1287 			my_reset(sc);
1288 			my_init_locked(sc);
1289 #endif
1290 		}
1291 		if (status & MY_TI)	/* tx interrupt */
1292 			my_txeof(sc);
1293 		if (status & MY_ETI)	/* tx early interrupt */
1294 			my_txeof(sc);
1295 		if (status & MY_TBU)	/* tx buffer unavailable */
1296 			my_txeoc(sc);
1297 
1298 #if 0				/* 90/1/18 delete */
1299 		if (status & MY_FBE) {
1300 			my_reset(sc);
1301 			my_init_locked(sc);
1302 		}
1303 #endif
1304 	}
1305 
1306 	/* Re-enable interrupts. */
1307 	CSR_WRITE_4(sc, MY_IMR, MY_INTRS);
1308 	if (!if_sendq_empty(ifp))
1309 		my_start_locked(ifp);
1310 	MY_UNLOCK(sc);
1311 	return;
1312 }
1313 
1314 /*
1315  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1316  * pointers to the fragment pointers.
1317  */
1318 static int
1319 my_encap(struct my_softc * sc, struct my_chain * c, struct mbuf * m_head)
1320 {
1321 	struct my_desc *f = NULL;
1322 	int             total_len;
1323 	struct mbuf    *m, *m_new = NULL;
1324 
1325 	MY_LOCK_ASSERT(sc);
1326 	/* calculate the total tx pkt length */
1327 	total_len = 0;
1328 	for (m = m_head; m != NULL; m = m->m_next)
1329 		total_len += m->m_len;
1330 	/*
1331 	 * Start packing the mbufs in this chain into the fragment pointers.
1332 	 * Stop when we run out of fragments or hit the end of the mbuf
1333 	 * chain.
1334 	 */
1335 	m = m_head;
1336 	MGETHDR(m_new, M_NOWAIT, MT_DATA);
1337 	if (m_new == NULL) {
1338 		device_printf(sc->my_dev, "no memory for tx list");
1339 		return (1);
1340 	}
1341 	if (m_head->m_pkthdr.len > MHLEN) {
1342 		if (!(MCLGET(m_new, M_NOWAIT))) {
1343 			m_freem(m_new);
1344 			device_printf(sc->my_dev, "no memory for tx list");
1345 			return (1);
1346 		}
1347 	}
1348 	m_copydata(m_head, 0, m_head->m_pkthdr.len, mtod(m_new, caddr_t));
1349 	m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1350 	m_freem(m_head);
1351 	m_head = m_new;
1352 	f = &c->my_ptr->my_frag[0];
1353 	f->my_status = 0;
1354 	f->my_data = vtophys(mtod(m_new, caddr_t));
1355 	total_len = m_new->m_len;
1356 	f->my_ctl = MY_TXFD | MY_TXLD | MY_CRCEnable | MY_PADEnable;
1357 	f->my_ctl |= total_len << MY_PKTShift;	/* pkt size */
1358 	f->my_ctl |= total_len;	/* buffer size */
1359 	/* 89/12/29 add, for mtd891 *//* [ 89? ] */
1360 	if (sc->my_info->my_did == MTD891ID)
1361 		f->my_ctl |= MY_ETIControl | MY_RetryTxLC;
1362 	c->my_mbuf = m_head;
1363 	c->my_lastdesc = 0;
1364 	MY_TXNEXT(c) = vtophys(&c->my_nextdesc->my_ptr->my_frag[0]);
1365 	return (0);
1366 }
1367 
1368 /*
1369  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1370  * to the mbuf data regions directly in the transmit lists. We also save a
1371  * copy of the pointers since the transmit list fragment pointers are
1372  * physical addresses.
1373  */
1374 static void
1375 my_start(if_t ifp)
1376 {
1377 	struct my_softc *sc;
1378 
1379 	sc = if_getsoftc(ifp);
1380 	MY_LOCK(sc);
1381 	my_start_locked(ifp);
1382 	MY_UNLOCK(sc);
1383 }
1384 
1385 static void
1386 my_start_locked(if_t ifp)
1387 {
1388 	struct my_softc *sc;
1389 	struct mbuf    *m_head = NULL;
1390 	struct my_chain *cur_tx = NULL, *start_tx;
1391 
1392 	sc = if_getsoftc(ifp);
1393 	MY_LOCK_ASSERT(sc);
1394 	if (sc->my_autoneg) {
1395 		sc->my_tx_pend = 1;
1396 		return;
1397 	}
1398 	/*
1399 	 * Check for an available queue slot. If there are none, punt.
1400 	 */
1401 	if (sc->my_cdata.my_tx_free->my_mbuf != NULL) {
1402 		if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
1403 		return;
1404 	}
1405 	start_tx = sc->my_cdata.my_tx_free;
1406 	while (sc->my_cdata.my_tx_free->my_mbuf == NULL) {
1407 		m_head = if_dequeue(ifp);
1408 		if (m_head == NULL)
1409 			break;
1410 
1411 		/* Pick a descriptor off the free list. */
1412 		cur_tx = sc->my_cdata.my_tx_free;
1413 		sc->my_cdata.my_tx_free = cur_tx->my_nextdesc;
1414 
1415 		/* Pack the data into the descriptor. */
1416 		my_encap(sc, cur_tx, m_head);
1417 
1418 		if (cur_tx != start_tx)
1419 			MY_TXOWN(cur_tx) = MY_OWNByNIC;
1420 #if NBPFILTER > 0
1421 		/*
1422 		 * If there's a BPF listener, bounce a copy of this frame to
1423 		 * him.
1424 		 */
1425 		BPF_MTAP(ifp, cur_tx->my_mbuf);
1426 #endif
1427 	}
1428 	/*
1429 	 * If there are no packets queued, bail.
1430 	 */
1431 	if (cur_tx == NULL) {
1432 		return;
1433 	}
1434 	/*
1435 	 * Place the request for the upload interrupt in the last descriptor
1436 	 * in the chain. This way, if we're chaining several packets at once,
1437 	 * we'll only get an interrupt once for the whole chain rather than
1438 	 * once for each packet.
1439 	 */
1440 	MY_TXCTL(cur_tx) |= MY_TXIC;
1441 	cur_tx->my_ptr->my_frag[0].my_ctl |= MY_TXIC;
1442 	sc->my_cdata.my_tx_tail = cur_tx;
1443 	if (sc->my_cdata.my_tx_head == NULL)
1444 		sc->my_cdata.my_tx_head = start_tx;
1445 	MY_TXOWN(start_tx) = MY_OWNByNIC;
1446 	CSR_WRITE_4(sc, MY_TXPDR, 0xFFFFFFFF);	/* tx polling demand */
1447 
1448 	/*
1449 	 * Set a timeout in case the chip goes out to lunch.
1450 	 */
1451 	sc->my_timer = 5;
1452 	return;
1453 }
1454 
1455 static void
1456 my_init(void *xsc)
1457 {
1458 	struct my_softc *sc = xsc;
1459 
1460 	MY_LOCK(sc);
1461 	my_init_locked(sc);
1462 	MY_UNLOCK(sc);
1463 }
1464 
1465 static void
1466 my_init_locked(struct my_softc *sc)
1467 {
1468 	if_t		ifp = sc->my_ifp;
1469 	u_int16_t       phy_bmcr = 0;
1470 
1471 	MY_LOCK_ASSERT(sc);
1472 	if (sc->my_autoneg) {
1473 		return;
1474 	}
1475 	if (sc->my_pinfo != NULL)
1476 		phy_bmcr = my_phy_readreg(sc, PHY_BMCR);
1477 	/*
1478 	 * Cancel pending I/O and free all RX/TX buffers.
1479 	 */
1480 	my_stop(sc);
1481 	my_reset(sc);
1482 
1483 	/*
1484 	 * Set cache alignment and burst length.
1485 	 */
1486 #if 0				/* 89/9/1 modify,  */
1487 	CSR_WRITE_4(sc, MY_BCR, MY_RPBLE512);
1488 	CSR_WRITE_4(sc, MY_TCRRCR, MY_TFTSF);
1489 #endif
1490 	CSR_WRITE_4(sc, MY_BCR, MY_PBL8);
1491 	CSR_WRITE_4(sc, MY_TCRRCR, MY_TFTSF | MY_RBLEN | MY_RPBLE512);
1492 	/*
1493 	 * 89/12/29 add, for mtd891,
1494 	 */
1495 	if (sc->my_info->my_did == MTD891ID) {
1496 		MY_SETBIT(sc, MY_BCR, MY_PROG);
1497 		MY_SETBIT(sc, MY_TCRRCR, MY_Enhanced);
1498 	}
1499 	my_setcfg(sc, phy_bmcr);
1500 	/* Init circular RX list. */
1501 	if (my_list_rx_init(sc) == ENOBUFS) {
1502 		device_printf(sc->my_dev, "init failed: no memory for rx buffers\n");
1503 		my_stop(sc);
1504 		return;
1505 	}
1506 	/* Init TX descriptors. */
1507 	my_list_tx_init(sc);
1508 
1509 	/* If we want promiscuous mode, set the allframes bit. */
1510 	if (if_getflags(ifp) & IFF_PROMISC)
1511 		MY_SETBIT(sc, MY_TCRRCR, MY_PROM);
1512 	else
1513 		MY_CLRBIT(sc, MY_TCRRCR, MY_PROM);
1514 
1515 	/*
1516 	 * Set capture broadcast bit to capture broadcast frames.
1517 	 */
1518 	if (if_getflags(ifp) & IFF_BROADCAST)
1519 		MY_SETBIT(sc, MY_TCRRCR, MY_AB);
1520 	else
1521 		MY_CLRBIT(sc, MY_TCRRCR, MY_AB);
1522 
1523 	/*
1524 	 * Program the multicast filter, if necessary.
1525 	 */
1526 	my_setmulti(sc);
1527 
1528 	/*
1529 	 * Load the address of the RX list.
1530 	 */
1531 	MY_CLRBIT(sc, MY_TCRRCR, MY_RE);
1532 	CSR_WRITE_4(sc, MY_RXLBA, vtophys(&sc->my_ldata->my_rx_list[0]));
1533 
1534 	/*
1535 	 * Enable interrupts.
1536 	 */
1537 	CSR_WRITE_4(sc, MY_IMR, MY_INTRS);
1538 	CSR_WRITE_4(sc, MY_ISR, 0xFFFFFFFF);
1539 
1540 	/* Enable receiver and transmitter. */
1541 	MY_SETBIT(sc, MY_TCRRCR, MY_RE);
1542 	MY_CLRBIT(sc, MY_TCRRCR, MY_TE);
1543 	CSR_WRITE_4(sc, MY_TXLBA, vtophys(&sc->my_ldata->my_tx_list[0]));
1544 	MY_SETBIT(sc, MY_TCRRCR, MY_TE);
1545 
1546 	/* Restore state of BMCR */
1547 	if (sc->my_pinfo != NULL)
1548 		my_phy_writereg(sc, PHY_BMCR, phy_bmcr);
1549 	if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
1550 	if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1551 
1552 	callout_reset(&sc->my_watchdog, hz, my_watchdog, sc);
1553 	return;
1554 }
1555 
1556 /*
1557  * Set media options.
1558  */
1559 
1560 static int
1561 my_ifmedia_upd(if_t ifp)
1562 {
1563 	struct my_softc *sc;
1564 	struct ifmedia *ifm;
1565 
1566 	sc = if_getsoftc(ifp);
1567 	MY_LOCK(sc);
1568 	ifm = &sc->ifmedia;
1569 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
1570 		MY_UNLOCK(sc);
1571 		return (EINVAL);
1572 	}
1573 	if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO)
1574 		my_autoneg_mii(sc, MY_FLAG_SCHEDDELAY, 1);
1575 	else
1576 		my_setmode_mii(sc, ifm->ifm_media);
1577 	MY_UNLOCK(sc);
1578 	return (0);
1579 }
1580 
1581 /*
1582  * Report current media status.
1583  */
1584 
1585 static void
1586 my_ifmedia_sts(if_t ifp, struct ifmediareq * ifmr)
1587 {
1588 	struct my_softc *sc;
1589 	u_int16_t advert = 0, ability = 0;
1590 
1591 	sc = if_getsoftc(ifp);
1592 	MY_LOCK(sc);
1593 	ifmr->ifm_active = IFM_ETHER;
1594 	if (!(my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_AUTONEGENBL)) {
1595 #if 0				/* this version did not support 1000M, */
1596 		if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_1000)
1597 			ifmr->ifm_active = IFM_ETHER | IFM_1000TX;
1598 #endif
1599 		if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_SPEEDSEL)
1600 			ifmr->ifm_active = IFM_ETHER | IFM_100_TX;
1601 		else
1602 			ifmr->ifm_active = IFM_ETHER | IFM_10_T;
1603 		if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_DUPLEX)
1604 			ifmr->ifm_active |= IFM_FDX;
1605 		else
1606 			ifmr->ifm_active |= IFM_HDX;
1607 
1608 		MY_UNLOCK(sc);
1609 		return;
1610 	}
1611 	ability = my_phy_readreg(sc, PHY_LPAR);
1612 	advert = my_phy_readreg(sc, PHY_ANAR);
1613 
1614 #if 0				/* this version did not support 1000M, */
1615 	if (sc->my_pinfo->my_vid = MarvellPHYID0) {
1616 		ability2 = my_phy_readreg(sc, PHY_1000SR);
1617 		if (ability2 & PHY_1000SR_1000BTXFULL) {
1618 			advert = 0;
1619 			ability = 0;
1620 	  		ifmr->ifm_active = IFM_ETHER|IFM_1000_T|IFM_FDX;
1621 	  	} else if (ability & PHY_1000SR_1000BTXHALF) {
1622 			advert = 0;
1623 			ability = 0;
1624 			ifmr->ifm_active = IFM_ETHER|IFM_1000_T|IFM_HDX;
1625 		}
1626 	}
1627 #endif
1628 	if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4)
1629 		ifmr->ifm_active = IFM_ETHER | IFM_100_T4;
1630 	else if (advert & PHY_ANAR_100BTXFULL && ability & PHY_ANAR_100BTXFULL)
1631 		ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1632 	else if (advert & PHY_ANAR_100BTXHALF && ability & PHY_ANAR_100BTXHALF)
1633 		ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_HDX;
1634 	else if (advert & PHY_ANAR_10BTFULL && ability & PHY_ANAR_10BTFULL)
1635 		ifmr->ifm_active = IFM_ETHER | IFM_10_T | IFM_FDX;
1636 	else if (advert & PHY_ANAR_10BTHALF && ability & PHY_ANAR_10BTHALF)
1637 		ifmr->ifm_active = IFM_ETHER | IFM_10_T | IFM_HDX;
1638 	MY_UNLOCK(sc);
1639 	return;
1640 }
1641 
1642 static int
1643 my_ioctl(if_t ifp, u_long command, caddr_t data)
1644 {
1645 	struct my_softc *sc = if_getsoftc(ifp);
1646 	struct ifreq   *ifr = (struct ifreq *) data;
1647 	int             error;
1648 
1649 	switch (command) {
1650 	case SIOCSIFFLAGS:
1651 		MY_LOCK(sc);
1652 		if (if_getflags(ifp) & IFF_UP)
1653 			my_init_locked(sc);
1654 		else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1655 			my_stop(sc);
1656 		MY_UNLOCK(sc);
1657 		error = 0;
1658 		break;
1659 	case SIOCADDMULTI:
1660 	case SIOCDELMULTI:
1661 		MY_LOCK(sc);
1662 		my_setmulti(sc);
1663 		MY_UNLOCK(sc);
1664 		error = 0;
1665 		break;
1666 	case SIOCGIFMEDIA:
1667 	case SIOCSIFMEDIA:
1668 		error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
1669 		break;
1670 	default:
1671 		error = ether_ioctl(ifp, command, data);
1672 		break;
1673 	}
1674 	return (error);
1675 }
1676 
1677 static void
1678 my_watchdog(void *arg)
1679 {
1680 	struct my_softc *sc;
1681 	if_t		ifp;
1682 
1683 	sc = arg;
1684 	MY_LOCK_ASSERT(sc);
1685 	callout_reset(&sc->my_watchdog, hz, my_watchdog, sc);
1686 	if (sc->my_timer == 0 || --sc->my_timer > 0)
1687 		return;
1688 
1689 	ifp = sc->my_ifp;
1690 	if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1691 	if_printf(ifp, "watchdog timeout\n");
1692 	if (!(my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT))
1693 		if_printf(ifp, "no carrier - transceiver cable problem?\n");
1694 	my_stop(sc);
1695 	my_reset(sc);
1696 	my_init_locked(sc);
1697 	if (!if_sendq_empty(ifp))
1698 		my_start_locked(ifp);
1699 }
1700 
1701 /*
1702  * Stop the adapter and free any mbufs allocated to the RX and TX lists.
1703  */
1704 static void
1705 my_stop(struct my_softc * sc)
1706 {
1707 	int    i;
1708 	if_t   ifp;
1709 
1710 	MY_LOCK_ASSERT(sc);
1711 	ifp = sc->my_ifp;
1712 
1713 	callout_stop(&sc->my_autoneg_timer);
1714 	callout_stop(&sc->my_watchdog);
1715 
1716 	MY_CLRBIT(sc, MY_TCRRCR, (MY_RE | MY_TE));
1717 	CSR_WRITE_4(sc, MY_IMR, 0x00000000);
1718 	CSR_WRITE_4(sc, MY_TXLBA, 0x00000000);
1719 	CSR_WRITE_4(sc, MY_RXLBA, 0x00000000);
1720 
1721 	/*
1722 	 * Free data in the RX lists.
1723 	 */
1724 	for (i = 0; i < MY_RX_LIST_CNT; i++) {
1725 		if (sc->my_cdata.my_rx_chain[i].my_mbuf != NULL) {
1726 			m_freem(sc->my_cdata.my_rx_chain[i].my_mbuf);
1727 			sc->my_cdata.my_rx_chain[i].my_mbuf = NULL;
1728 		}
1729 	}
1730 	bzero((char *)&sc->my_ldata->my_rx_list,
1731 	    sizeof(sc->my_ldata->my_rx_list));
1732 	/*
1733 	 * Free the TX list buffers.
1734 	 */
1735 	for (i = 0; i < MY_TX_LIST_CNT; i++) {
1736 		if (sc->my_cdata.my_tx_chain[i].my_mbuf != NULL) {
1737 			m_freem(sc->my_cdata.my_tx_chain[i].my_mbuf);
1738 			sc->my_cdata.my_tx_chain[i].my_mbuf = NULL;
1739 		}
1740 	}
1741 	bzero((char *)&sc->my_ldata->my_tx_list,
1742 	    sizeof(sc->my_ldata->my_tx_list));
1743 	if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
1744 	return;
1745 }
1746 
1747 /*
1748  * Stop all chip I/O so that the kernel's probe routines don't get confused
1749  * by errant DMAs when rebooting.
1750  */
1751 static int
1752 my_shutdown(device_t dev)
1753 {
1754 	struct my_softc *sc;
1755 
1756 	sc = device_get_softc(dev);
1757 	MY_LOCK(sc);
1758 	my_stop(sc);
1759 	MY_UNLOCK(sc);
1760 	return 0;
1761 }
1762