xref: /freebsd/sys/dev/my/if_my.c (revision a7623790fb345e6dc986dfd31df0ace115e6f2e4)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Written by: yen_cw@myson.com.tw
5  * Copyright (c) 2002 Myson Technology Inc.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions, and the following disclaimer,
13  *    without modification, immediately at the beginning of the file.
14  * 2. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * Myson fast ethernet PCI NIC driver, available at: http://www.myson.com.tw/
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/sockio.h>
38 #include <sys/mbuf.h>
39 #include <sys/malloc.h>
40 #include <sys/kernel.h>
41 #include <sys/socket.h>
42 #include <sys/queue.h>
43 #include <sys/types.h>
44 #include <sys/module.h>
45 #include <sys/lock.h>
46 #include <sys/mutex.h>
47 
48 #define NBPFILTER	1
49 
50 #include <net/if.h>
51 #include <net/if_var.h>
52 #include <net/if_arp.h>
53 #include <net/ethernet.h>
54 #include <net/if_media.h>
55 #include <net/if_types.h>
56 #include <net/if_dl.h>
57 #include <net/bpf.h>
58 
59 #include <vm/vm.h>		/* for vtophys */
60 #include <vm/pmap.h>		/* for vtophys */
61 #include <machine/bus.h>
62 #include <machine/resource.h>
63 #include <sys/bus.h>
64 #include <sys/rman.h>
65 
66 #include <dev/pci/pcireg.h>
67 #include <dev/pci/pcivar.h>
68 
69 /*
70  * #define MY_USEIOSPACE
71  */
72 
73 static int      MY_USEIOSPACE = 1;
74 
75 #ifdef MY_USEIOSPACE
76 #define MY_RES                  SYS_RES_IOPORT
77 #define MY_RID                  MY_PCI_LOIO
78 #else
79 #define MY_RES                  SYS_RES_MEMORY
80 #define MY_RID                  MY_PCI_LOMEM
81 #endif
82 
83 #include <dev/my/if_myreg.h>
84 
85 /*
86  * Various supported device vendors/types and their names.
87  */
88 struct my_type *my_info_tmp;
89 static struct my_type my_devs[] = {
90 	{MYSONVENDORID, MTD800ID, "Myson MTD80X Based Fast Ethernet Card"},
91 	{MYSONVENDORID, MTD803ID, "Myson MTD80X Based Fast Ethernet Card"},
92 	{MYSONVENDORID, MTD891ID, "Myson MTD89X Based Giga Ethernet Card"},
93 	{0, 0, NULL}
94 };
95 
96 /*
97  * Various supported PHY vendors/types and their names. Note that this driver
98  * will work with pretty much any MII-compliant PHY, so failure to positively
99  * identify the chip is not a fatal error.
100  */
101 static struct my_type my_phys[] = {
102 	{MysonPHYID0, MysonPHYID0, "<MYSON MTD981>"},
103 	{SeeqPHYID0, SeeqPHYID0, "<SEEQ 80225>"},
104 	{AhdocPHYID0, AhdocPHYID0, "<AHDOC 101>"},
105 	{MarvellPHYID0, MarvellPHYID0, "<MARVELL 88E1000>"},
106 	{LevelOnePHYID0, LevelOnePHYID0, "<LevelOne LXT1000>"},
107 	{0, 0, "<MII-compliant physical interface>"}
108 };
109 
110 static int      my_probe(device_t);
111 static int      my_attach(device_t);
112 static int      my_detach(device_t);
113 static int      my_newbuf(struct my_softc *, struct my_chain_onefrag *);
114 static int      my_encap(struct my_softc *, struct my_chain *, struct mbuf *);
115 static void     my_rxeof(struct my_softc *);
116 static void     my_txeof(struct my_softc *);
117 static void     my_txeoc(struct my_softc *);
118 static void     my_intr(void *);
119 static void     my_start(struct ifnet *);
120 static void     my_start_locked(struct ifnet *);
121 static int      my_ioctl(struct ifnet *, u_long, caddr_t);
122 static void     my_init(void *);
123 static void     my_init_locked(struct my_softc *);
124 static void     my_stop(struct my_softc *);
125 static void     my_autoneg_timeout(void *);
126 static void     my_watchdog(void *);
127 static int      my_shutdown(device_t);
128 static int      my_ifmedia_upd(struct ifnet *);
129 static void     my_ifmedia_sts(struct ifnet *, struct ifmediareq *);
130 static u_int16_t my_phy_readreg(struct my_softc *, int);
131 static void     my_phy_writereg(struct my_softc *, int, int);
132 static void     my_autoneg_xmit(struct my_softc *);
133 static void     my_autoneg_mii(struct my_softc *, int, int);
134 static void     my_setmode_mii(struct my_softc *, int);
135 static void     my_getmode_mii(struct my_softc *);
136 static void     my_setcfg(struct my_softc *, int);
137 static void     my_setmulti(struct my_softc *);
138 static void     my_reset(struct my_softc *);
139 static int      my_list_rx_init(struct my_softc *);
140 static int      my_list_tx_init(struct my_softc *);
141 static long     my_send_cmd_to_phy(struct my_softc *, int, int);
142 
143 #define MY_SETBIT(sc, reg, x) CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x))
144 #define MY_CLRBIT(sc, reg, x) CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x))
145 
146 static device_method_t my_methods[] = {
147 	/* Device interface */
148 	DEVMETHOD(device_probe, my_probe),
149 	DEVMETHOD(device_attach, my_attach),
150 	DEVMETHOD(device_detach, my_detach),
151 	DEVMETHOD(device_shutdown, my_shutdown),
152 
153 	DEVMETHOD_END
154 };
155 
156 static driver_t my_driver = {
157 	"my",
158 	my_methods,
159 	sizeof(struct my_softc)
160 };
161 
162 static devclass_t my_devclass;
163 
164 DRIVER_MODULE(my, pci, my_driver, my_devclass, 0, 0);
165 MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, my, my_devs,
166     nitems(my_devs) - 1);
167 MODULE_DEPEND(my, pci, 1, 1, 1);
168 MODULE_DEPEND(my, ether, 1, 1, 1);
169 
170 static long
171 my_send_cmd_to_phy(struct my_softc * sc, int opcode, int regad)
172 {
173 	long            miir;
174 	int             i;
175 	int             mask, data;
176 
177 	MY_LOCK_ASSERT(sc);
178 
179 	/* enable MII output */
180 	miir = CSR_READ_4(sc, MY_MANAGEMENT);
181 	miir &= 0xfffffff0;
182 
183 	miir |= MY_MASK_MIIR_MII_WRITE + MY_MASK_MIIR_MII_MDO;
184 
185 	/* send 32 1's preamble */
186 	for (i = 0; i < 32; i++) {
187 		/* low MDC; MDO is already high (miir) */
188 		miir &= ~MY_MASK_MIIR_MII_MDC;
189 		CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
190 
191 		/* high MDC */
192 		miir |= MY_MASK_MIIR_MII_MDC;
193 		CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
194 	}
195 
196 	/* calculate ST+OP+PHYAD+REGAD+TA */
197 	data = opcode | (sc->my_phy_addr << 7) | (regad << 2);
198 
199 	/* sent out */
200 	mask = 0x8000;
201 	while (mask) {
202 		/* low MDC, prepare MDO */
203 		miir &= ~(MY_MASK_MIIR_MII_MDC + MY_MASK_MIIR_MII_MDO);
204 		if (mask & data)
205 			miir |= MY_MASK_MIIR_MII_MDO;
206 
207 		CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
208 		/* high MDC */
209 		miir |= MY_MASK_MIIR_MII_MDC;
210 		CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
211 		DELAY(30);
212 
213 		/* next */
214 		mask >>= 1;
215 		if (mask == 0x2 && opcode == MY_OP_READ)
216 			miir &= ~MY_MASK_MIIR_MII_WRITE;
217 	}
218 
219 	return miir;
220 }
221 
222 static u_int16_t
223 my_phy_readreg(struct my_softc * sc, int reg)
224 {
225 	long            miir;
226 	int             mask, data;
227 
228 	MY_LOCK_ASSERT(sc);
229 
230 	if (sc->my_info->my_did == MTD803ID)
231 		data = CSR_READ_2(sc, MY_PHYBASE + reg * 2);
232 	else {
233 		miir = my_send_cmd_to_phy(sc, MY_OP_READ, reg);
234 
235 		/* read data */
236 		mask = 0x8000;
237 		data = 0;
238 		while (mask) {
239 			/* low MDC */
240 			miir &= ~MY_MASK_MIIR_MII_MDC;
241 			CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
242 
243 			/* read MDI */
244 			miir = CSR_READ_4(sc, MY_MANAGEMENT);
245 			if (miir & MY_MASK_MIIR_MII_MDI)
246 				data |= mask;
247 
248 			/* high MDC, and wait */
249 			miir |= MY_MASK_MIIR_MII_MDC;
250 			CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
251 			DELAY(30);
252 
253 			/* next */
254 			mask >>= 1;
255 		}
256 
257 		/* low MDC */
258 		miir &= ~MY_MASK_MIIR_MII_MDC;
259 		CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
260 	}
261 
262 	return (u_int16_t) data;
263 }
264 
265 static void
266 my_phy_writereg(struct my_softc * sc, int reg, int data)
267 {
268 	long            miir;
269 	int             mask;
270 
271 	MY_LOCK_ASSERT(sc);
272 
273 	if (sc->my_info->my_did == MTD803ID)
274 		CSR_WRITE_2(sc, MY_PHYBASE + reg * 2, data);
275 	else {
276 		miir = my_send_cmd_to_phy(sc, MY_OP_WRITE, reg);
277 
278 		/* write data */
279 		mask = 0x8000;
280 		while (mask) {
281 			/* low MDC, prepare MDO */
282 			miir &= ~(MY_MASK_MIIR_MII_MDC + MY_MASK_MIIR_MII_MDO);
283 			if (mask & data)
284 				miir |= MY_MASK_MIIR_MII_MDO;
285 			CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
286 			DELAY(1);
287 
288 			/* high MDC */
289 			miir |= MY_MASK_MIIR_MII_MDC;
290 			CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
291 			DELAY(1);
292 
293 			/* next */
294 			mask >>= 1;
295 		}
296 
297 		/* low MDC */
298 		miir &= ~MY_MASK_MIIR_MII_MDC;
299 		CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
300 	}
301 	return;
302 }
303 
304 static u_int
305 my_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
306 {
307 	uint32_t *hashes = arg;
308 	int h;
309 
310 	h = ~ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 26;
311 	if (h < 32)
312 		hashes[0] |= (1 << h);
313 	else
314 		hashes[1] |= (1 << (h - 32));
315 
316 	return (1);
317 }
318 /*
319  * Program the 64-bit multicast hash filter.
320  */
321 static void
322 my_setmulti(struct my_softc * sc)
323 {
324 	struct ifnet   *ifp;
325 	u_int32_t       hashes[2] = {0, 0};
326 	u_int32_t       rxfilt;
327 
328 	MY_LOCK_ASSERT(sc);
329 
330 	ifp = sc->my_ifp;
331 
332 	rxfilt = CSR_READ_4(sc, MY_TCRRCR);
333 
334 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
335 		rxfilt |= MY_AM;
336 		CSR_WRITE_4(sc, MY_TCRRCR, rxfilt);
337 		CSR_WRITE_4(sc, MY_MAR0, 0xFFFFFFFF);
338 		CSR_WRITE_4(sc, MY_MAR1, 0xFFFFFFFF);
339 
340 		return;
341 	}
342 	/* first, zot all the existing hash bits */
343 	CSR_WRITE_4(sc, MY_MAR0, 0);
344 	CSR_WRITE_4(sc, MY_MAR1, 0);
345 
346 	/* now program new ones */
347 	if (if_foreach_llmaddr(ifp, my_hash_maddr, hashes) > 0)
348 		rxfilt |= MY_AM;
349 	else
350 		rxfilt &= ~MY_AM;
351 	CSR_WRITE_4(sc, MY_MAR0, hashes[0]);
352 	CSR_WRITE_4(sc, MY_MAR1, hashes[1]);
353 	CSR_WRITE_4(sc, MY_TCRRCR, rxfilt);
354 }
355 
356 /*
357  * Initiate an autonegotiation session.
358  */
359 static void
360 my_autoneg_xmit(struct my_softc * sc)
361 {
362 	u_int16_t       phy_sts = 0;
363 
364 	MY_LOCK_ASSERT(sc);
365 
366 	my_phy_writereg(sc, PHY_BMCR, PHY_BMCR_RESET);
367 	DELAY(500);
368 	while (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_RESET);
369 
370 	phy_sts = my_phy_readreg(sc, PHY_BMCR);
371 	phy_sts |= PHY_BMCR_AUTONEGENBL | PHY_BMCR_AUTONEGRSTR;
372 	my_phy_writereg(sc, PHY_BMCR, phy_sts);
373 
374 	return;
375 }
376 
377 static void
378 my_autoneg_timeout(void *arg)
379 {
380 	struct my_softc *sc;
381 
382 	sc = arg;
383 	MY_LOCK_ASSERT(sc);
384 	my_autoneg_mii(sc, MY_FLAG_DELAYTIMEO, 1);
385 }
386 
387 /*
388  * Invoke autonegotiation on a PHY.
389  */
390 static void
391 my_autoneg_mii(struct my_softc * sc, int flag, int verbose)
392 {
393 	u_int16_t       phy_sts = 0, media, advert, ability;
394 	u_int16_t       ability2 = 0;
395 	struct ifnet   *ifp;
396 	struct ifmedia *ifm;
397 
398 	MY_LOCK_ASSERT(sc);
399 
400 	ifm = &sc->ifmedia;
401 	ifp = sc->my_ifp;
402 
403 	ifm->ifm_media = IFM_ETHER | IFM_AUTO;
404 
405 #ifndef FORCE_AUTONEG_TFOUR
406 	/*
407 	 * First, see if autoneg is supported. If not, there's no point in
408 	 * continuing.
409 	 */
410 	phy_sts = my_phy_readreg(sc, PHY_BMSR);
411 	if (!(phy_sts & PHY_BMSR_CANAUTONEG)) {
412 		if (verbose)
413 			device_printf(sc->my_dev,
414 			    "autonegotiation not supported\n");
415 		ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX;
416 		return;
417 	}
418 #endif
419 	switch (flag) {
420 	case MY_FLAG_FORCEDELAY:
421 		/*
422 		 * XXX Never use this option anywhere but in the probe
423 		 * routine: making the kernel stop dead in its tracks for
424 		 * three whole seconds after we've gone multi-user is really
425 		 * bad manners.
426 		 */
427 		my_autoneg_xmit(sc);
428 		DELAY(5000000);
429 		break;
430 	case MY_FLAG_SCHEDDELAY:
431 		/*
432 		 * Wait for the transmitter to go idle before starting an
433 		 * autoneg session, otherwise my_start() may clobber our
434 		 * timeout, and we don't want to allow transmission during an
435 		 * autoneg session since that can screw it up.
436 		 */
437 		if (sc->my_cdata.my_tx_head != NULL) {
438 			sc->my_want_auto = 1;
439 			MY_UNLOCK(sc);
440 			return;
441 		}
442 		my_autoneg_xmit(sc);
443 		callout_reset(&sc->my_autoneg_timer, hz * 5, my_autoneg_timeout,
444 		    sc);
445 		sc->my_autoneg = 1;
446 		sc->my_want_auto = 0;
447 		return;
448 	case MY_FLAG_DELAYTIMEO:
449 		callout_stop(&sc->my_autoneg_timer);
450 		sc->my_autoneg = 0;
451 		break;
452 	default:
453 		device_printf(sc->my_dev, "invalid autoneg flag: %d\n", flag);
454 		return;
455 	}
456 
457 	if (my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_AUTONEGCOMP) {
458 		if (verbose)
459 			device_printf(sc->my_dev, "autoneg complete, ");
460 		phy_sts = my_phy_readreg(sc, PHY_BMSR);
461 	} else {
462 		if (verbose)
463 			device_printf(sc->my_dev, "autoneg not complete, ");
464 	}
465 
466 	media = my_phy_readreg(sc, PHY_BMCR);
467 
468 	/* Link is good. Report modes and set duplex mode. */
469 	if (my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT) {
470 		if (verbose)
471 			device_printf(sc->my_dev, "link status good. ");
472 		advert = my_phy_readreg(sc, PHY_ANAR);
473 		ability = my_phy_readreg(sc, PHY_LPAR);
474 		if ((sc->my_pinfo->my_vid == MarvellPHYID0) ||
475 		    (sc->my_pinfo->my_vid == LevelOnePHYID0)) {
476 			ability2 = my_phy_readreg(sc, PHY_1000SR);
477 			if (ability2 & PHY_1000SR_1000BTXFULL) {
478 				advert = 0;
479 				ability = 0;
480 				/*
481 				 * this version did not support 1000M,
482 				 * ifm->ifm_media =
483 				 * IFM_ETHER|IFM_1000_T|IFM_FDX;
484 				 */
485 				ifm->ifm_media =
486 				    IFM_ETHER | IFM_100_TX | IFM_FDX;
487 				media &= ~PHY_BMCR_SPEEDSEL;
488 				media |= PHY_BMCR_1000;
489 				media |= PHY_BMCR_DUPLEX;
490 				printf("(full-duplex, 1000Mbps)\n");
491 			} else if (ability2 & PHY_1000SR_1000BTXHALF) {
492 				advert = 0;
493 				ability = 0;
494 				/*
495 				 * this version did not support 1000M,
496 				 * ifm->ifm_media = IFM_ETHER|IFM_1000_T;
497 				 */
498 				ifm->ifm_media = IFM_ETHER | IFM_100_TX;
499 				media &= ~PHY_BMCR_SPEEDSEL;
500 				media &= ~PHY_BMCR_DUPLEX;
501 				media |= PHY_BMCR_1000;
502 				printf("(half-duplex, 1000Mbps)\n");
503 			}
504 		}
505 		if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4) {
506 			ifm->ifm_media = IFM_ETHER | IFM_100_T4;
507 			media |= PHY_BMCR_SPEEDSEL;
508 			media &= ~PHY_BMCR_DUPLEX;
509 			printf("(100baseT4)\n");
510 		} else if (advert & PHY_ANAR_100BTXFULL &&
511 			   ability & PHY_ANAR_100BTXFULL) {
512 			ifm->ifm_media = IFM_ETHER | IFM_100_TX | IFM_FDX;
513 			media |= PHY_BMCR_SPEEDSEL;
514 			media |= PHY_BMCR_DUPLEX;
515 			printf("(full-duplex, 100Mbps)\n");
516 		} else if (advert & PHY_ANAR_100BTXHALF &&
517 			   ability & PHY_ANAR_100BTXHALF) {
518 			ifm->ifm_media = IFM_ETHER | IFM_100_TX | IFM_HDX;
519 			media |= PHY_BMCR_SPEEDSEL;
520 			media &= ~PHY_BMCR_DUPLEX;
521 			printf("(half-duplex, 100Mbps)\n");
522 		} else if (advert & PHY_ANAR_10BTFULL &&
523 			   ability & PHY_ANAR_10BTFULL) {
524 			ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_FDX;
525 			media &= ~PHY_BMCR_SPEEDSEL;
526 			media |= PHY_BMCR_DUPLEX;
527 			printf("(full-duplex, 10Mbps)\n");
528 		} else if (advert) {
529 			ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX;
530 			media &= ~PHY_BMCR_SPEEDSEL;
531 			media &= ~PHY_BMCR_DUPLEX;
532 			printf("(half-duplex, 10Mbps)\n");
533 		}
534 		media &= ~PHY_BMCR_AUTONEGENBL;
535 
536 		/* Set ASIC's duplex mode to match the PHY. */
537 		my_phy_writereg(sc, PHY_BMCR, media);
538 		my_setcfg(sc, media);
539 	} else {
540 		if (verbose)
541 			device_printf(sc->my_dev, "no carrier\n");
542 	}
543 
544 	my_init_locked(sc);
545 	if (sc->my_tx_pend) {
546 		sc->my_autoneg = 0;
547 		sc->my_tx_pend = 0;
548 		my_start_locked(ifp);
549 	}
550 	return;
551 }
552 
553 /*
554  * To get PHY ability.
555  */
556 static void
557 my_getmode_mii(struct my_softc * sc)
558 {
559 	u_int16_t       bmsr;
560 	struct ifnet   *ifp;
561 
562 	MY_LOCK_ASSERT(sc);
563 	ifp = sc->my_ifp;
564 	bmsr = my_phy_readreg(sc, PHY_BMSR);
565 	if (bootverbose)
566 		device_printf(sc->my_dev, "PHY status word: %x\n", bmsr);
567 
568 	/* fallback */
569 	sc->ifmedia.ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX;
570 
571 	if (bmsr & PHY_BMSR_10BTHALF) {
572 		if (bootverbose)
573 			device_printf(sc->my_dev,
574 			    "10Mbps half-duplex mode supported\n");
575 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T | IFM_HDX,
576 		    0, NULL);
577 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T, 0, NULL);
578 	}
579 	if (bmsr & PHY_BMSR_10BTFULL) {
580 		if (bootverbose)
581 			device_printf(sc->my_dev,
582 			    "10Mbps full-duplex mode supported\n");
583 
584 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX,
585 		    0, NULL);
586 		sc->ifmedia.ifm_media = IFM_ETHER | IFM_10_T | IFM_FDX;
587 	}
588 	if (bmsr & PHY_BMSR_100BTXHALF) {
589 		if (bootverbose)
590 			device_printf(sc->my_dev,
591 			    "100Mbps half-duplex mode supported\n");
592 		ifp->if_baudrate = 100000000;
593 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL);
594 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX | IFM_HDX,
595 			    0, NULL);
596 		sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_TX | IFM_HDX;
597 	}
598 	if (bmsr & PHY_BMSR_100BTXFULL) {
599 		if (bootverbose)
600 			device_printf(sc->my_dev,
601 			    "100Mbps full-duplex mode supported\n");
602 		ifp->if_baudrate = 100000000;
603 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX,
604 		    0, NULL);
605 		sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_TX | IFM_FDX;
606 	}
607 	/* Some also support 100BaseT4. */
608 	if (bmsr & PHY_BMSR_100BT4) {
609 		if (bootverbose)
610 			device_printf(sc->my_dev, "100baseT4 mode supported\n");
611 		ifp->if_baudrate = 100000000;
612 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_T4, 0, NULL);
613 		sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_T4;
614 #ifdef FORCE_AUTONEG_TFOUR
615 		if (bootverbose)
616 			device_printf(sc->my_dev,
617 			    "forcing on autoneg support for BT4\n");
618 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_AUTO, 0 NULL):
619 		sc->ifmedia.ifm_media = IFM_ETHER | IFM_AUTO;
620 #endif
621 	}
622 #if 0				/* this version did not support 1000M, */
623 	if (sc->my_pinfo->my_vid == MarvellPHYID0) {
624 		if (bootverbose)
625 			device_printf(sc->my_dev,
626 			    "1000Mbps half-duplex mode supported\n");
627 
628 		ifp->if_baudrate = 1000000000;
629 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T, 0, NULL);
630 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T | IFM_HDX,
631 		    0, NULL);
632 		if (bootverbose)
633 			device_printf(sc->my_dev,
634 			    "1000Mbps full-duplex mode supported\n");
635 		ifp->if_baudrate = 1000000000;
636 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T | IFM_FDX,
637 		    0, NULL);
638 		sc->ifmedia.ifm_media = IFM_ETHER | IFM_1000_T | IFM_FDX;
639 	}
640 #endif
641 	if (bmsr & PHY_BMSR_CANAUTONEG) {
642 		if (bootverbose)
643 			device_printf(sc->my_dev, "autoneg supported\n");
644 		ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
645 		sc->ifmedia.ifm_media = IFM_ETHER | IFM_AUTO;
646 	}
647 	return;
648 }
649 
650 /*
651  * Set speed and duplex mode.
652  */
653 static void
654 my_setmode_mii(struct my_softc * sc, int media)
655 {
656 	u_int16_t       bmcr;
657 
658 	MY_LOCK_ASSERT(sc);
659 	/*
660 	 * If an autoneg session is in progress, stop it.
661 	 */
662 	if (sc->my_autoneg) {
663 		device_printf(sc->my_dev, "canceling autoneg session\n");
664 		callout_stop(&sc->my_autoneg_timer);
665 		sc->my_autoneg = sc->my_want_auto = 0;
666 		bmcr = my_phy_readreg(sc, PHY_BMCR);
667 		bmcr &= ~PHY_BMCR_AUTONEGENBL;
668 		my_phy_writereg(sc, PHY_BMCR, bmcr);
669 	}
670 	device_printf(sc->my_dev, "selecting MII, ");
671 	bmcr = my_phy_readreg(sc, PHY_BMCR);
672 	bmcr &= ~(PHY_BMCR_AUTONEGENBL | PHY_BMCR_SPEEDSEL | PHY_BMCR_1000 |
673 		  PHY_BMCR_DUPLEX | PHY_BMCR_LOOPBK);
674 
675 #if 0				/* this version did not support 1000M, */
676 	if (IFM_SUBTYPE(media) == IFM_1000_T) {
677 		printf("1000Mbps/T4, half-duplex\n");
678 		bmcr &= ~PHY_BMCR_SPEEDSEL;
679 		bmcr &= ~PHY_BMCR_DUPLEX;
680 		bmcr |= PHY_BMCR_1000;
681 	}
682 #endif
683 	if (IFM_SUBTYPE(media) == IFM_100_T4) {
684 		printf("100Mbps/T4, half-duplex\n");
685 		bmcr |= PHY_BMCR_SPEEDSEL;
686 		bmcr &= ~PHY_BMCR_DUPLEX;
687 	}
688 	if (IFM_SUBTYPE(media) == IFM_100_TX) {
689 		printf("100Mbps, ");
690 		bmcr |= PHY_BMCR_SPEEDSEL;
691 	}
692 	if (IFM_SUBTYPE(media) == IFM_10_T) {
693 		printf("10Mbps, ");
694 		bmcr &= ~PHY_BMCR_SPEEDSEL;
695 	}
696 	if ((media & IFM_GMASK) == IFM_FDX) {
697 		printf("full duplex\n");
698 		bmcr |= PHY_BMCR_DUPLEX;
699 	} else {
700 		printf("half duplex\n");
701 		bmcr &= ~PHY_BMCR_DUPLEX;
702 	}
703 	my_phy_writereg(sc, PHY_BMCR, bmcr);
704 	my_setcfg(sc, bmcr);
705 	return;
706 }
707 
708 /*
709  * The Myson manual states that in order to fiddle with the 'full-duplex' and
710  * '100Mbps' bits in the netconfig register, we first have to put the
711  * transmit and/or receive logic in the idle state.
712  */
713 static void
714 my_setcfg(struct my_softc * sc, int bmcr)
715 {
716 	int             i, restart = 0;
717 
718 	MY_LOCK_ASSERT(sc);
719 	if (CSR_READ_4(sc, MY_TCRRCR) & (MY_TE | MY_RE)) {
720 		restart = 1;
721 		MY_CLRBIT(sc, MY_TCRRCR, (MY_TE | MY_RE));
722 		for (i = 0; i < MY_TIMEOUT; i++) {
723 			DELAY(10);
724 			if (!(CSR_READ_4(sc, MY_TCRRCR) &
725 			    (MY_TXRUN | MY_RXRUN)))
726 				break;
727 		}
728 		if (i == MY_TIMEOUT)
729 			device_printf(sc->my_dev,
730 			    "failed to force tx and rx to idle \n");
731 	}
732 	MY_CLRBIT(sc, MY_TCRRCR, MY_PS1000);
733 	MY_CLRBIT(sc, MY_TCRRCR, MY_PS10);
734 	if (bmcr & PHY_BMCR_1000)
735 		MY_SETBIT(sc, MY_TCRRCR, MY_PS1000);
736 	else if (!(bmcr & PHY_BMCR_SPEEDSEL))
737 		MY_SETBIT(sc, MY_TCRRCR, MY_PS10);
738 	if (bmcr & PHY_BMCR_DUPLEX)
739 		MY_SETBIT(sc, MY_TCRRCR, MY_FD);
740 	else
741 		MY_CLRBIT(sc, MY_TCRRCR, MY_FD);
742 	if (restart)
743 		MY_SETBIT(sc, MY_TCRRCR, MY_TE | MY_RE);
744 	return;
745 }
746 
747 static void
748 my_reset(struct my_softc * sc)
749 {
750 	int    i;
751 
752 	MY_LOCK_ASSERT(sc);
753 	MY_SETBIT(sc, MY_BCR, MY_SWR);
754 	for (i = 0; i < MY_TIMEOUT; i++) {
755 		DELAY(10);
756 		if (!(CSR_READ_4(sc, MY_BCR) & MY_SWR))
757 			break;
758 	}
759 	if (i == MY_TIMEOUT)
760 		device_printf(sc->my_dev, "reset never completed!\n");
761 
762 	/* Wait a little while for the chip to get its brains in order. */
763 	DELAY(1000);
764 	return;
765 }
766 
767 /*
768  * Probe for a Myson chip. Check the PCI vendor and device IDs against our
769  * list and return a device name if we find a match.
770  */
771 static int
772 my_probe(device_t dev)
773 {
774 	struct my_type *t;
775 
776 	t = my_devs;
777 	while (t->my_name != NULL) {
778 		if ((pci_get_vendor(dev) == t->my_vid) &&
779 		    (pci_get_device(dev) == t->my_did)) {
780 			device_set_desc(dev, t->my_name);
781 			my_info_tmp = t;
782 			return (BUS_PROBE_DEFAULT);
783 		}
784 		t++;
785 	}
786 	return (ENXIO);
787 }
788 
789 /*
790  * Attach the interface. Allocate softc structures, do ifmedia setup and
791  * ethernet/BPF attach.
792  */
793 static int
794 my_attach(device_t dev)
795 {
796 	int             i;
797 	u_char          eaddr[ETHER_ADDR_LEN];
798 	u_int32_t       iobase;
799 	struct my_softc *sc;
800 	struct ifnet   *ifp;
801 	int             media = IFM_ETHER | IFM_100_TX | IFM_FDX;
802 	unsigned int    round;
803 	caddr_t         roundptr;
804 	struct my_type *p;
805 	u_int16_t       phy_vid, phy_did, phy_sts = 0;
806 	int             rid, error = 0;
807 
808 	sc = device_get_softc(dev);
809 	sc->my_dev = dev;
810 	mtx_init(&sc->my_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
811 	    MTX_DEF);
812 	callout_init_mtx(&sc->my_autoneg_timer, &sc->my_mtx, 0);
813 	callout_init_mtx(&sc->my_watchdog, &sc->my_mtx, 0);
814 
815 	/*
816 	 * Map control/status registers.
817 	 */
818 	pci_enable_busmaster(dev);
819 
820 	if (my_info_tmp->my_did == MTD800ID) {
821 		iobase = pci_read_config(dev, MY_PCI_LOIO, 4);
822 		if (iobase & 0x300)
823 			MY_USEIOSPACE = 0;
824 	}
825 
826 	rid = MY_RID;
827 	sc->my_res = bus_alloc_resource_any(dev, MY_RES, &rid, RF_ACTIVE);
828 
829 	if (sc->my_res == NULL) {
830 		device_printf(dev, "couldn't map ports/memory\n");
831 		error = ENXIO;
832 		goto destroy_mutex;
833 	}
834 	sc->my_btag = rman_get_bustag(sc->my_res);
835 	sc->my_bhandle = rman_get_bushandle(sc->my_res);
836 
837 	rid = 0;
838 	sc->my_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
839 					    RF_SHAREABLE | RF_ACTIVE);
840 
841 	if (sc->my_irq == NULL) {
842 		device_printf(dev, "couldn't map interrupt\n");
843 		error = ENXIO;
844 		goto release_io;
845 	}
846 
847 	sc->my_info = my_info_tmp;
848 
849 	/* Reset the adapter. */
850 	MY_LOCK(sc);
851 	my_reset(sc);
852 	MY_UNLOCK(sc);
853 
854 	/*
855 	 * Get station address
856 	 */
857 	for (i = 0; i < ETHER_ADDR_LEN; ++i)
858 		eaddr[i] = CSR_READ_1(sc, MY_PAR0 + i);
859 
860 	sc->my_ldata_ptr = malloc(sizeof(struct my_list_data) + 8,
861 				  M_DEVBUF, M_NOWAIT);
862 	if (sc->my_ldata_ptr == NULL) {
863 		device_printf(dev, "no memory for list buffers!\n");
864 		error = ENXIO;
865 		goto release_irq;
866 	}
867 	sc->my_ldata = (struct my_list_data *) sc->my_ldata_ptr;
868 	round = (uintptr_t)sc->my_ldata_ptr & 0xF;
869 	roundptr = sc->my_ldata_ptr;
870 	for (i = 0; i < 8; i++) {
871 		if (round % 8) {
872 			round++;
873 			roundptr++;
874 		} else
875 			break;
876 	}
877 	sc->my_ldata = (struct my_list_data *) roundptr;
878 	bzero(sc->my_ldata, sizeof(struct my_list_data));
879 
880 	ifp = sc->my_ifp = if_alloc(IFT_ETHER);
881 	if (ifp == NULL) {
882 		device_printf(dev, "can not if_alloc()\n");
883 		error = ENOSPC;
884 		goto free_ldata;
885 	}
886 	ifp->if_softc = sc;
887 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
888 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
889 	ifp->if_ioctl = my_ioctl;
890 	ifp->if_start = my_start;
891 	ifp->if_init = my_init;
892 	ifp->if_baudrate = 10000000;
893 	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
894 	ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
895 	IFQ_SET_READY(&ifp->if_snd);
896 
897 	if (sc->my_info->my_did == MTD803ID)
898 		sc->my_pinfo = my_phys;
899 	else {
900 		if (bootverbose)
901 			device_printf(dev, "probing for a PHY\n");
902 		MY_LOCK(sc);
903 		for (i = MY_PHYADDR_MIN; i < MY_PHYADDR_MAX + 1; i++) {
904 			if (bootverbose)
905 				device_printf(dev, "checking address: %d\n", i);
906 			sc->my_phy_addr = i;
907 			phy_sts = my_phy_readreg(sc, PHY_BMSR);
908 			if ((phy_sts != 0) && (phy_sts != 0xffff))
909 				break;
910 			else
911 				phy_sts = 0;
912 		}
913 		if (phy_sts) {
914 			phy_vid = my_phy_readreg(sc, PHY_VENID);
915 			phy_did = my_phy_readreg(sc, PHY_DEVID);
916 			if (bootverbose) {
917 				device_printf(dev, "found PHY at address %d, ",
918 				    sc->my_phy_addr);
919 				printf("vendor id: %x device id: %x\n",
920 				    phy_vid, phy_did);
921 			}
922 			p = my_phys;
923 			while (p->my_vid) {
924 				if (phy_vid == p->my_vid) {
925 					sc->my_pinfo = p;
926 					break;
927 				}
928 				p++;
929 			}
930 			if (sc->my_pinfo == NULL)
931 				sc->my_pinfo = &my_phys[PHY_UNKNOWN];
932 			if (bootverbose)
933 				device_printf(dev, "PHY type: %s\n",
934 				       sc->my_pinfo->my_name);
935 		} else {
936 			MY_UNLOCK(sc);
937 			device_printf(dev, "MII without any phy!\n");
938 			error = ENXIO;
939 			goto free_if;
940 		}
941 		MY_UNLOCK(sc);
942 	}
943 
944 	/* Do ifmedia setup. */
945 	ifmedia_init(&sc->ifmedia, 0, my_ifmedia_upd, my_ifmedia_sts);
946 	MY_LOCK(sc);
947 	my_getmode_mii(sc);
948 	my_autoneg_mii(sc, MY_FLAG_FORCEDELAY, 1);
949 	media = sc->ifmedia.ifm_media;
950 	my_stop(sc);
951 	MY_UNLOCK(sc);
952 	ifmedia_set(&sc->ifmedia, media);
953 
954 	ether_ifattach(ifp, eaddr);
955 
956 	error = bus_setup_intr(dev, sc->my_irq, INTR_TYPE_NET | INTR_MPSAFE,
957 			       NULL, my_intr, sc, &sc->my_intrhand);
958 
959 	if (error) {
960 		device_printf(dev, "couldn't set up irq\n");
961 		goto detach_if;
962 	}
963 
964 	return (0);
965 
966 detach_if:
967 	ether_ifdetach(ifp);
968 free_if:
969 	if_free(ifp);
970 free_ldata:
971 	free(sc->my_ldata_ptr, M_DEVBUF);
972 release_irq:
973 	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->my_irq);
974 release_io:
975 	bus_release_resource(dev, MY_RES, MY_RID, sc->my_res);
976 destroy_mutex:
977 	mtx_destroy(&sc->my_mtx);
978 	return (error);
979 }
980 
981 static int
982 my_detach(device_t dev)
983 {
984 	struct my_softc *sc;
985 	struct ifnet   *ifp;
986 
987 	sc = device_get_softc(dev);
988 	ifp = sc->my_ifp;
989 	ether_ifdetach(ifp);
990 	MY_LOCK(sc);
991 	my_stop(sc);
992 	MY_UNLOCK(sc);
993 	bus_teardown_intr(dev, sc->my_irq, sc->my_intrhand);
994 	callout_drain(&sc->my_watchdog);
995 	callout_drain(&sc->my_autoneg_timer);
996 
997 	if_free(ifp);
998 	free(sc->my_ldata_ptr, M_DEVBUF);
999 
1000 	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->my_irq);
1001 	bus_release_resource(dev, MY_RES, MY_RID, sc->my_res);
1002 	mtx_destroy(&sc->my_mtx);
1003 	return (0);
1004 }
1005 
1006 /*
1007  * Initialize the transmit descriptors.
1008  */
1009 static int
1010 my_list_tx_init(struct my_softc * sc)
1011 {
1012 	struct my_chain_data *cd;
1013 	struct my_list_data *ld;
1014 	int             i;
1015 
1016 	MY_LOCK_ASSERT(sc);
1017 	cd = &sc->my_cdata;
1018 	ld = sc->my_ldata;
1019 	for (i = 0; i < MY_TX_LIST_CNT; i++) {
1020 		cd->my_tx_chain[i].my_ptr = &ld->my_tx_list[i];
1021 		if (i == (MY_TX_LIST_CNT - 1))
1022 			cd->my_tx_chain[i].my_nextdesc = &cd->my_tx_chain[0];
1023 		else
1024 			cd->my_tx_chain[i].my_nextdesc =
1025 			    &cd->my_tx_chain[i + 1];
1026 	}
1027 	cd->my_tx_free = &cd->my_tx_chain[0];
1028 	cd->my_tx_tail = cd->my_tx_head = NULL;
1029 	return (0);
1030 }
1031 
1032 /*
1033  * Initialize the RX descriptors and allocate mbufs for them. Note that we
1034  * arrange the descriptors in a closed ring, so that the last descriptor
1035  * points back to the first.
1036  */
1037 static int
1038 my_list_rx_init(struct my_softc * sc)
1039 {
1040 	struct my_chain_data *cd;
1041 	struct my_list_data *ld;
1042 	int             i;
1043 
1044 	MY_LOCK_ASSERT(sc);
1045 	cd = &sc->my_cdata;
1046 	ld = sc->my_ldata;
1047 	for (i = 0; i < MY_RX_LIST_CNT; i++) {
1048 		cd->my_rx_chain[i].my_ptr =
1049 		    (struct my_desc *) & ld->my_rx_list[i];
1050 		if (my_newbuf(sc, &cd->my_rx_chain[i]) == ENOBUFS) {
1051 			MY_UNLOCK(sc);
1052 			return (ENOBUFS);
1053 		}
1054 		if (i == (MY_RX_LIST_CNT - 1)) {
1055 			cd->my_rx_chain[i].my_nextdesc = &cd->my_rx_chain[0];
1056 			ld->my_rx_list[i].my_next = vtophys(&ld->my_rx_list[0]);
1057 		} else {
1058 			cd->my_rx_chain[i].my_nextdesc =
1059 			    &cd->my_rx_chain[i + 1];
1060 			ld->my_rx_list[i].my_next =
1061 			    vtophys(&ld->my_rx_list[i + 1]);
1062 		}
1063 	}
1064 	cd->my_rx_head = &cd->my_rx_chain[0];
1065 	return (0);
1066 }
1067 
1068 /*
1069  * Initialize an RX descriptor and attach an MBUF cluster.
1070  */
1071 static int
1072 my_newbuf(struct my_softc * sc, struct my_chain_onefrag * c)
1073 {
1074 	struct mbuf    *m_new = NULL;
1075 
1076 	MY_LOCK_ASSERT(sc);
1077 	MGETHDR(m_new, M_NOWAIT, MT_DATA);
1078 	if (m_new == NULL) {
1079 		device_printf(sc->my_dev,
1080 		    "no memory for rx list -- packet dropped!\n");
1081 		return (ENOBUFS);
1082 	}
1083 	if (!(MCLGET(m_new, M_NOWAIT))) {
1084 		device_printf(sc->my_dev,
1085 		    "no memory for rx list -- packet dropped!\n");
1086 		m_freem(m_new);
1087 		return (ENOBUFS);
1088 	}
1089 	c->my_mbuf = m_new;
1090 	c->my_ptr->my_data = vtophys(mtod(m_new, caddr_t));
1091 	c->my_ptr->my_ctl = (MCLBYTES - 1) << MY_RBSShift;
1092 	c->my_ptr->my_status = MY_OWNByNIC;
1093 	return (0);
1094 }
1095 
1096 /*
1097  * A frame has been uploaded: pass the resulting mbuf chain up to the higher
1098  * level protocols.
1099  */
1100 static void
1101 my_rxeof(struct my_softc * sc)
1102 {
1103 	struct ether_header *eh;
1104 	struct mbuf    *m;
1105 	struct ifnet   *ifp;
1106 	struct my_chain_onefrag *cur_rx;
1107 	int             total_len = 0;
1108 	u_int32_t       rxstat;
1109 
1110 	MY_LOCK_ASSERT(sc);
1111 	ifp = sc->my_ifp;
1112 	while (!((rxstat = sc->my_cdata.my_rx_head->my_ptr->my_status)
1113 	    & MY_OWNByNIC)) {
1114 		cur_rx = sc->my_cdata.my_rx_head;
1115 		sc->my_cdata.my_rx_head = cur_rx->my_nextdesc;
1116 
1117 		if (rxstat & MY_ES) {	/* error summary: give up this rx pkt */
1118 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1119 			cur_rx->my_ptr->my_status = MY_OWNByNIC;
1120 			continue;
1121 		}
1122 		/* No errors; receive the packet. */
1123 		total_len = (rxstat & MY_FLNGMASK) >> MY_FLNGShift;
1124 		total_len -= ETHER_CRC_LEN;
1125 
1126 		if (total_len < MINCLSIZE) {
1127 			m = m_devget(mtod(cur_rx->my_mbuf, char *),
1128 			    total_len, 0, ifp, NULL);
1129 			cur_rx->my_ptr->my_status = MY_OWNByNIC;
1130 			if (m == NULL) {
1131 				if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1132 				continue;
1133 			}
1134 		} else {
1135 			m = cur_rx->my_mbuf;
1136 			/*
1137 			 * Try to conjure up a new mbuf cluster. If that
1138 			 * fails, it means we have an out of memory condition
1139 			 * and should leave the buffer in place and continue.
1140 			 * This will result in a lost packet, but there's
1141 			 * little else we can do in this situation.
1142 			 */
1143 			if (my_newbuf(sc, cur_rx) == ENOBUFS) {
1144 				if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1145 				cur_rx->my_ptr->my_status = MY_OWNByNIC;
1146 				continue;
1147 			}
1148 			m->m_pkthdr.rcvif = ifp;
1149 			m->m_pkthdr.len = m->m_len = total_len;
1150 		}
1151 		if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1152 		eh = mtod(m, struct ether_header *);
1153 #if NBPFILTER > 0
1154 		/*
1155 		 * Handle BPF listeners. Let the BPF user see the packet, but
1156 		 * don't pass it up to the ether_input() layer unless it's a
1157 		 * broadcast packet, multicast packet, matches our ethernet
1158 		 * address or the interface is in promiscuous mode.
1159 		 */
1160 		if (bpf_peers_present(ifp->if_bpf)) {
1161 			bpf_mtap(ifp->if_bpf, m);
1162 			if (ifp->if_flags & IFF_PROMISC &&
1163 			    (bcmp(eh->ether_dhost, IF_LLADDR(sc->my_ifp),
1164 				ETHER_ADDR_LEN) &&
1165 			     (eh->ether_dhost[0] & 1) == 0)) {
1166 				m_freem(m);
1167 				continue;
1168 			}
1169 		}
1170 #endif
1171 		MY_UNLOCK(sc);
1172 		(*ifp->if_input)(ifp, m);
1173 		MY_LOCK(sc);
1174 	}
1175 	return;
1176 }
1177 
1178 /*
1179  * A frame was downloaded to the chip. It's safe for us to clean up the list
1180  * buffers.
1181  */
1182 static void
1183 my_txeof(struct my_softc * sc)
1184 {
1185 	struct my_chain *cur_tx;
1186 	struct ifnet   *ifp;
1187 
1188 	MY_LOCK_ASSERT(sc);
1189 	ifp = sc->my_ifp;
1190 	/* Clear the timeout timer. */
1191 	sc->my_timer = 0;
1192 	if (sc->my_cdata.my_tx_head == NULL) {
1193 		return;
1194 	}
1195 	/*
1196 	 * Go through our tx list and free mbufs for those frames that have
1197 	 * been transmitted.
1198 	 */
1199 	while (sc->my_cdata.my_tx_head->my_mbuf != NULL) {
1200 		u_int32_t       txstat;
1201 
1202 		cur_tx = sc->my_cdata.my_tx_head;
1203 		txstat = MY_TXSTATUS(cur_tx);
1204 		if ((txstat & MY_OWNByNIC) || txstat == MY_UNSENT)
1205 			break;
1206 		if (!(CSR_READ_4(sc, MY_TCRRCR) & MY_Enhanced)) {
1207 			if (txstat & MY_TXERR) {
1208 				if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1209 				if (txstat & MY_EC) /* excessive collision */
1210 					if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
1211 				if (txstat & MY_LC)	/* late collision */
1212 					if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
1213 			}
1214 			if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
1215 			    (txstat & MY_NCRMASK) >> MY_NCRShift);
1216 		}
1217 		if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1218 		m_freem(cur_tx->my_mbuf);
1219 		cur_tx->my_mbuf = NULL;
1220 		if (sc->my_cdata.my_tx_head == sc->my_cdata.my_tx_tail) {
1221 			sc->my_cdata.my_tx_head = NULL;
1222 			sc->my_cdata.my_tx_tail = NULL;
1223 			break;
1224 		}
1225 		sc->my_cdata.my_tx_head = cur_tx->my_nextdesc;
1226 	}
1227 	if (CSR_READ_4(sc, MY_TCRRCR) & MY_Enhanced) {
1228 		if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (CSR_READ_4(sc, MY_TSR) & MY_NCRMask));
1229 	}
1230 	return;
1231 }
1232 
1233 /*
1234  * TX 'end of channel' interrupt handler.
1235  */
1236 static void
1237 my_txeoc(struct my_softc * sc)
1238 {
1239 	struct ifnet   *ifp;
1240 
1241 	MY_LOCK_ASSERT(sc);
1242 	ifp = sc->my_ifp;
1243 	sc->my_timer = 0;
1244 	if (sc->my_cdata.my_tx_head == NULL) {
1245 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1246 		sc->my_cdata.my_tx_tail = NULL;
1247 		if (sc->my_want_auto)
1248 			my_autoneg_mii(sc, MY_FLAG_SCHEDDELAY, 1);
1249 	} else {
1250 		if (MY_TXOWN(sc->my_cdata.my_tx_head) == MY_UNSENT) {
1251 			MY_TXOWN(sc->my_cdata.my_tx_head) = MY_OWNByNIC;
1252 			sc->my_timer = 5;
1253 			CSR_WRITE_4(sc, MY_TXPDR, 0xFFFFFFFF);
1254 		}
1255 	}
1256 	return;
1257 }
1258 
1259 static void
1260 my_intr(void *arg)
1261 {
1262 	struct my_softc *sc;
1263 	struct ifnet   *ifp;
1264 	u_int32_t       status;
1265 
1266 	sc = arg;
1267 	MY_LOCK(sc);
1268 	ifp = sc->my_ifp;
1269 	if (!(ifp->if_flags & IFF_UP)) {
1270 		MY_UNLOCK(sc);
1271 		return;
1272 	}
1273 	/* Disable interrupts. */
1274 	CSR_WRITE_4(sc, MY_IMR, 0x00000000);
1275 
1276 	for (;;) {
1277 		status = CSR_READ_4(sc, MY_ISR);
1278 		status &= MY_INTRS;
1279 		if (status)
1280 			CSR_WRITE_4(sc, MY_ISR, status);
1281 		else
1282 			break;
1283 
1284 		if (status & MY_RI)	/* receive interrupt */
1285 			my_rxeof(sc);
1286 
1287 		if ((status & MY_RBU) || (status & MY_RxErr)) {
1288 			/* rx buffer unavailable or rx error */
1289 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1290 #ifdef foo
1291 			my_stop(sc);
1292 			my_reset(sc);
1293 			my_init_locked(sc);
1294 #endif
1295 		}
1296 		if (status & MY_TI)	/* tx interrupt */
1297 			my_txeof(sc);
1298 		if (status & MY_ETI)	/* tx early interrupt */
1299 			my_txeof(sc);
1300 		if (status & MY_TBU)	/* tx buffer unavailable */
1301 			my_txeoc(sc);
1302 
1303 #if 0				/* 90/1/18 delete */
1304 		if (status & MY_FBE) {
1305 			my_reset(sc);
1306 			my_init_locked(sc);
1307 		}
1308 #endif
1309 	}
1310 
1311 	/* Re-enable interrupts. */
1312 	CSR_WRITE_4(sc, MY_IMR, MY_INTRS);
1313 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1314 		my_start_locked(ifp);
1315 	MY_UNLOCK(sc);
1316 	return;
1317 }
1318 
1319 /*
1320  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1321  * pointers to the fragment pointers.
1322  */
1323 static int
1324 my_encap(struct my_softc * sc, struct my_chain * c, struct mbuf * m_head)
1325 {
1326 	struct my_desc *f = NULL;
1327 	int             total_len;
1328 	struct mbuf    *m, *m_new = NULL;
1329 
1330 	MY_LOCK_ASSERT(sc);
1331 	/* calculate the total tx pkt length */
1332 	total_len = 0;
1333 	for (m = m_head; m != NULL; m = m->m_next)
1334 		total_len += m->m_len;
1335 	/*
1336 	 * Start packing the mbufs in this chain into the fragment pointers.
1337 	 * Stop when we run out of fragments or hit the end of the mbuf
1338 	 * chain.
1339 	 */
1340 	m = m_head;
1341 	MGETHDR(m_new, M_NOWAIT, MT_DATA);
1342 	if (m_new == NULL) {
1343 		device_printf(sc->my_dev, "no memory for tx list");
1344 		return (1);
1345 	}
1346 	if (m_head->m_pkthdr.len > MHLEN) {
1347 		if (!(MCLGET(m_new, M_NOWAIT))) {
1348 			m_freem(m_new);
1349 			device_printf(sc->my_dev, "no memory for tx list");
1350 			return (1);
1351 		}
1352 	}
1353 	m_copydata(m_head, 0, m_head->m_pkthdr.len, mtod(m_new, caddr_t));
1354 	m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1355 	m_freem(m_head);
1356 	m_head = m_new;
1357 	f = &c->my_ptr->my_frag[0];
1358 	f->my_status = 0;
1359 	f->my_data = vtophys(mtod(m_new, caddr_t));
1360 	total_len = m_new->m_len;
1361 	f->my_ctl = MY_TXFD | MY_TXLD | MY_CRCEnable | MY_PADEnable;
1362 	f->my_ctl |= total_len << MY_PKTShift;	/* pkt size */
1363 	f->my_ctl |= total_len;	/* buffer size */
1364 	/* 89/12/29 add, for mtd891 *//* [ 89? ] */
1365 	if (sc->my_info->my_did == MTD891ID)
1366 		f->my_ctl |= MY_ETIControl | MY_RetryTxLC;
1367 	c->my_mbuf = m_head;
1368 	c->my_lastdesc = 0;
1369 	MY_TXNEXT(c) = vtophys(&c->my_nextdesc->my_ptr->my_frag[0]);
1370 	return (0);
1371 }
1372 
1373 /*
1374  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1375  * to the mbuf data regions directly in the transmit lists. We also save a
1376  * copy of the pointers since the transmit list fragment pointers are
1377  * physical addresses.
1378  */
1379 static void
1380 my_start(struct ifnet * ifp)
1381 {
1382 	struct my_softc *sc;
1383 
1384 	sc = ifp->if_softc;
1385 	MY_LOCK(sc);
1386 	my_start_locked(ifp);
1387 	MY_UNLOCK(sc);
1388 }
1389 
1390 static void
1391 my_start_locked(struct ifnet * ifp)
1392 {
1393 	struct my_softc *sc;
1394 	struct mbuf    *m_head = NULL;
1395 	struct my_chain *cur_tx = NULL, *start_tx;
1396 
1397 	sc = ifp->if_softc;
1398 	MY_LOCK_ASSERT(sc);
1399 	if (sc->my_autoneg) {
1400 		sc->my_tx_pend = 1;
1401 		return;
1402 	}
1403 	/*
1404 	 * Check for an available queue slot. If there are none, punt.
1405 	 */
1406 	if (sc->my_cdata.my_tx_free->my_mbuf != NULL) {
1407 		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1408 		return;
1409 	}
1410 	start_tx = sc->my_cdata.my_tx_free;
1411 	while (sc->my_cdata.my_tx_free->my_mbuf == NULL) {
1412 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1413 		if (m_head == NULL)
1414 			break;
1415 
1416 		/* Pick a descriptor off the free list. */
1417 		cur_tx = sc->my_cdata.my_tx_free;
1418 		sc->my_cdata.my_tx_free = cur_tx->my_nextdesc;
1419 
1420 		/* Pack the data into the descriptor. */
1421 		my_encap(sc, cur_tx, m_head);
1422 
1423 		if (cur_tx != start_tx)
1424 			MY_TXOWN(cur_tx) = MY_OWNByNIC;
1425 #if NBPFILTER > 0
1426 		/*
1427 		 * If there's a BPF listener, bounce a copy of this frame to
1428 		 * him.
1429 		 */
1430 		BPF_MTAP(ifp, cur_tx->my_mbuf);
1431 #endif
1432 	}
1433 	/*
1434 	 * If there are no packets queued, bail.
1435 	 */
1436 	if (cur_tx == NULL) {
1437 		return;
1438 	}
1439 	/*
1440 	 * Place the request for the upload interrupt in the last descriptor
1441 	 * in the chain. This way, if we're chaining several packets at once,
1442 	 * we'll only get an interrupt once for the whole chain rather than
1443 	 * once for each packet.
1444 	 */
1445 	MY_TXCTL(cur_tx) |= MY_TXIC;
1446 	cur_tx->my_ptr->my_frag[0].my_ctl |= MY_TXIC;
1447 	sc->my_cdata.my_tx_tail = cur_tx;
1448 	if (sc->my_cdata.my_tx_head == NULL)
1449 		sc->my_cdata.my_tx_head = start_tx;
1450 	MY_TXOWN(start_tx) = MY_OWNByNIC;
1451 	CSR_WRITE_4(sc, MY_TXPDR, 0xFFFFFFFF);	/* tx polling demand */
1452 
1453 	/*
1454 	 * Set a timeout in case the chip goes out to lunch.
1455 	 */
1456 	sc->my_timer = 5;
1457 	return;
1458 }
1459 
1460 static void
1461 my_init(void *xsc)
1462 {
1463 	struct my_softc *sc = xsc;
1464 
1465 	MY_LOCK(sc);
1466 	my_init_locked(sc);
1467 	MY_UNLOCK(sc);
1468 }
1469 
1470 static void
1471 my_init_locked(struct my_softc *sc)
1472 {
1473 	struct ifnet   *ifp = sc->my_ifp;
1474 	u_int16_t       phy_bmcr = 0;
1475 
1476 	MY_LOCK_ASSERT(sc);
1477 	if (sc->my_autoneg) {
1478 		return;
1479 	}
1480 	if (sc->my_pinfo != NULL)
1481 		phy_bmcr = my_phy_readreg(sc, PHY_BMCR);
1482 	/*
1483 	 * Cancel pending I/O and free all RX/TX buffers.
1484 	 */
1485 	my_stop(sc);
1486 	my_reset(sc);
1487 
1488 	/*
1489 	 * Set cache alignment and burst length.
1490 	 */
1491 #if 0				/* 89/9/1 modify,  */
1492 	CSR_WRITE_4(sc, MY_BCR, MY_RPBLE512);
1493 	CSR_WRITE_4(sc, MY_TCRRCR, MY_TFTSF);
1494 #endif
1495 	CSR_WRITE_4(sc, MY_BCR, MY_PBL8);
1496 	CSR_WRITE_4(sc, MY_TCRRCR, MY_TFTSF | MY_RBLEN | MY_RPBLE512);
1497 	/*
1498 	 * 89/12/29 add, for mtd891,
1499 	 */
1500 	if (sc->my_info->my_did == MTD891ID) {
1501 		MY_SETBIT(sc, MY_BCR, MY_PROG);
1502 		MY_SETBIT(sc, MY_TCRRCR, MY_Enhanced);
1503 	}
1504 	my_setcfg(sc, phy_bmcr);
1505 	/* Init circular RX list. */
1506 	if (my_list_rx_init(sc) == ENOBUFS) {
1507 		device_printf(sc->my_dev, "init failed: no memory for rx buffers\n");
1508 		my_stop(sc);
1509 		return;
1510 	}
1511 	/* Init TX descriptors. */
1512 	my_list_tx_init(sc);
1513 
1514 	/* If we want promiscuous mode, set the allframes bit. */
1515 	if (ifp->if_flags & IFF_PROMISC)
1516 		MY_SETBIT(sc, MY_TCRRCR, MY_PROM);
1517 	else
1518 		MY_CLRBIT(sc, MY_TCRRCR, MY_PROM);
1519 
1520 	/*
1521 	 * Set capture broadcast bit to capture broadcast frames.
1522 	 */
1523 	if (ifp->if_flags & IFF_BROADCAST)
1524 		MY_SETBIT(sc, MY_TCRRCR, MY_AB);
1525 	else
1526 		MY_CLRBIT(sc, MY_TCRRCR, MY_AB);
1527 
1528 	/*
1529 	 * Program the multicast filter, if necessary.
1530 	 */
1531 	my_setmulti(sc);
1532 
1533 	/*
1534 	 * Load the address of the RX list.
1535 	 */
1536 	MY_CLRBIT(sc, MY_TCRRCR, MY_RE);
1537 	CSR_WRITE_4(sc, MY_RXLBA, vtophys(&sc->my_ldata->my_rx_list[0]));
1538 
1539 	/*
1540 	 * Enable interrupts.
1541 	 */
1542 	CSR_WRITE_4(sc, MY_IMR, MY_INTRS);
1543 	CSR_WRITE_4(sc, MY_ISR, 0xFFFFFFFF);
1544 
1545 	/* Enable receiver and transmitter. */
1546 	MY_SETBIT(sc, MY_TCRRCR, MY_RE);
1547 	MY_CLRBIT(sc, MY_TCRRCR, MY_TE);
1548 	CSR_WRITE_4(sc, MY_TXLBA, vtophys(&sc->my_ldata->my_tx_list[0]));
1549 	MY_SETBIT(sc, MY_TCRRCR, MY_TE);
1550 
1551 	/* Restore state of BMCR */
1552 	if (sc->my_pinfo != NULL)
1553 		my_phy_writereg(sc, PHY_BMCR, phy_bmcr);
1554 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1555 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1556 
1557 	callout_reset(&sc->my_watchdog, hz, my_watchdog, sc);
1558 	return;
1559 }
1560 
1561 /*
1562  * Set media options.
1563  */
1564 
1565 static int
1566 my_ifmedia_upd(struct ifnet * ifp)
1567 {
1568 	struct my_softc *sc;
1569 	struct ifmedia *ifm;
1570 
1571 	sc = ifp->if_softc;
1572 	MY_LOCK(sc);
1573 	ifm = &sc->ifmedia;
1574 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
1575 		MY_UNLOCK(sc);
1576 		return (EINVAL);
1577 	}
1578 	if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO)
1579 		my_autoneg_mii(sc, MY_FLAG_SCHEDDELAY, 1);
1580 	else
1581 		my_setmode_mii(sc, ifm->ifm_media);
1582 	MY_UNLOCK(sc);
1583 	return (0);
1584 }
1585 
1586 /*
1587  * Report current media status.
1588  */
1589 
1590 static void
1591 my_ifmedia_sts(struct ifnet * ifp, struct ifmediareq * ifmr)
1592 {
1593 	struct my_softc *sc;
1594 	u_int16_t advert = 0, ability = 0;
1595 
1596 	sc = ifp->if_softc;
1597 	MY_LOCK(sc);
1598 	ifmr->ifm_active = IFM_ETHER;
1599 	if (!(my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_AUTONEGENBL)) {
1600 #if 0				/* this version did not support 1000M, */
1601 		if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_1000)
1602 			ifmr->ifm_active = IFM_ETHER | IFM_1000TX;
1603 #endif
1604 		if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_SPEEDSEL)
1605 			ifmr->ifm_active = IFM_ETHER | IFM_100_TX;
1606 		else
1607 			ifmr->ifm_active = IFM_ETHER | IFM_10_T;
1608 		if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_DUPLEX)
1609 			ifmr->ifm_active |= IFM_FDX;
1610 		else
1611 			ifmr->ifm_active |= IFM_HDX;
1612 
1613 		MY_UNLOCK(sc);
1614 		return;
1615 	}
1616 	ability = my_phy_readreg(sc, PHY_LPAR);
1617 	advert = my_phy_readreg(sc, PHY_ANAR);
1618 
1619 #if 0				/* this version did not support 1000M, */
1620 	if (sc->my_pinfo->my_vid = MarvellPHYID0) {
1621 		ability2 = my_phy_readreg(sc, PHY_1000SR);
1622 		if (ability2 & PHY_1000SR_1000BTXFULL) {
1623 			advert = 0;
1624 			ability = 0;
1625 	  		ifmr->ifm_active = IFM_ETHER|IFM_1000_T|IFM_FDX;
1626 	  	} else if (ability & PHY_1000SR_1000BTXHALF) {
1627 			advert = 0;
1628 			ability = 0;
1629 			ifmr->ifm_active = IFM_ETHER|IFM_1000_T|IFM_HDX;
1630 		}
1631 	}
1632 #endif
1633 	if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4)
1634 		ifmr->ifm_active = IFM_ETHER | IFM_100_T4;
1635 	else if (advert & PHY_ANAR_100BTXFULL && ability & PHY_ANAR_100BTXFULL)
1636 		ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1637 	else if (advert & PHY_ANAR_100BTXHALF && ability & PHY_ANAR_100BTXHALF)
1638 		ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_HDX;
1639 	else if (advert & PHY_ANAR_10BTFULL && ability & PHY_ANAR_10BTFULL)
1640 		ifmr->ifm_active = IFM_ETHER | IFM_10_T | IFM_FDX;
1641 	else if (advert & PHY_ANAR_10BTHALF && ability & PHY_ANAR_10BTHALF)
1642 		ifmr->ifm_active = IFM_ETHER | IFM_10_T | IFM_HDX;
1643 	MY_UNLOCK(sc);
1644 	return;
1645 }
1646 
1647 static int
1648 my_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
1649 {
1650 	struct my_softc *sc = ifp->if_softc;
1651 	struct ifreq   *ifr = (struct ifreq *) data;
1652 	int             error;
1653 
1654 	switch (command) {
1655 	case SIOCSIFFLAGS:
1656 		MY_LOCK(sc);
1657 		if (ifp->if_flags & IFF_UP)
1658 			my_init_locked(sc);
1659 		else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1660 			my_stop(sc);
1661 		MY_UNLOCK(sc);
1662 		error = 0;
1663 		break;
1664 	case SIOCADDMULTI:
1665 	case SIOCDELMULTI:
1666 		MY_LOCK(sc);
1667 		my_setmulti(sc);
1668 		MY_UNLOCK(sc);
1669 		error = 0;
1670 		break;
1671 	case SIOCGIFMEDIA:
1672 	case SIOCSIFMEDIA:
1673 		error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
1674 		break;
1675 	default:
1676 		error = ether_ioctl(ifp, command, data);
1677 		break;
1678 	}
1679 	return (error);
1680 }
1681 
1682 static void
1683 my_watchdog(void *arg)
1684 {
1685 	struct my_softc *sc;
1686 	struct ifnet *ifp;
1687 
1688 	sc = arg;
1689 	MY_LOCK_ASSERT(sc);
1690 	callout_reset(&sc->my_watchdog, hz, my_watchdog, sc);
1691 	if (sc->my_timer == 0 || --sc->my_timer > 0)
1692 		return;
1693 
1694 	ifp = sc->my_ifp;
1695 	if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1696 	if_printf(ifp, "watchdog timeout\n");
1697 	if (!(my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT))
1698 		if_printf(ifp, "no carrier - transceiver cable problem?\n");
1699 	my_stop(sc);
1700 	my_reset(sc);
1701 	my_init_locked(sc);
1702 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1703 		my_start_locked(ifp);
1704 }
1705 
1706 /*
1707  * Stop the adapter and free any mbufs allocated to the RX and TX lists.
1708  */
1709 static void
1710 my_stop(struct my_softc * sc)
1711 {
1712 	int    i;
1713 	struct ifnet   *ifp;
1714 
1715 	MY_LOCK_ASSERT(sc);
1716 	ifp = sc->my_ifp;
1717 
1718 	callout_stop(&sc->my_autoneg_timer);
1719 	callout_stop(&sc->my_watchdog);
1720 
1721 	MY_CLRBIT(sc, MY_TCRRCR, (MY_RE | MY_TE));
1722 	CSR_WRITE_4(sc, MY_IMR, 0x00000000);
1723 	CSR_WRITE_4(sc, MY_TXLBA, 0x00000000);
1724 	CSR_WRITE_4(sc, MY_RXLBA, 0x00000000);
1725 
1726 	/*
1727 	 * Free data in the RX lists.
1728 	 */
1729 	for (i = 0; i < MY_RX_LIST_CNT; i++) {
1730 		if (sc->my_cdata.my_rx_chain[i].my_mbuf != NULL) {
1731 			m_freem(sc->my_cdata.my_rx_chain[i].my_mbuf);
1732 			sc->my_cdata.my_rx_chain[i].my_mbuf = NULL;
1733 		}
1734 	}
1735 	bzero((char *)&sc->my_ldata->my_rx_list,
1736 	    sizeof(sc->my_ldata->my_rx_list));
1737 	/*
1738 	 * Free the TX list buffers.
1739 	 */
1740 	for (i = 0; i < MY_TX_LIST_CNT; i++) {
1741 		if (sc->my_cdata.my_tx_chain[i].my_mbuf != NULL) {
1742 			m_freem(sc->my_cdata.my_tx_chain[i].my_mbuf);
1743 			sc->my_cdata.my_tx_chain[i].my_mbuf = NULL;
1744 		}
1745 	}
1746 	bzero((char *)&sc->my_ldata->my_tx_list,
1747 	    sizeof(sc->my_ldata->my_tx_list));
1748 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1749 	return;
1750 }
1751 
1752 /*
1753  * Stop all chip I/O so that the kernel's probe routines don't get confused
1754  * by errant DMAs when rebooting.
1755  */
1756 static int
1757 my_shutdown(device_t dev)
1758 {
1759 	struct my_softc *sc;
1760 
1761 	sc = device_get_softc(dev);
1762 	MY_LOCK(sc);
1763 	my_stop(sc);
1764 	MY_UNLOCK(sc);
1765 	return 0;
1766 }
1767