xref: /freebsd/sys/dev/xl/if_xl.c (revision c0020399a650364d0134f79f3fa319f84064372d)
1 /*-
2  * Copyright (c) 1997, 1998, 1999
3  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by Bill Paul.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 /*
37  * 3Com 3c90x Etherlink XL PCI NIC driver
38  *
39  * Supports the 3Com "boomerang", "cyclone" and "hurricane" PCI
40  * bus-master chips (3c90x cards and embedded controllers) including
41  * the following:
42  *
43  * 3Com 3c900-TPO	10Mbps/RJ-45
44  * 3Com 3c900-COMBO	10Mbps/RJ-45,AUI,BNC
45  * 3Com 3c905-TX	10/100Mbps/RJ-45
46  * 3Com 3c905-T4	10/100Mbps/RJ-45
47  * 3Com 3c900B-TPO	10Mbps/RJ-45
48  * 3Com 3c900B-COMBO	10Mbps/RJ-45,AUI,BNC
49  * 3Com 3c900B-TPC	10Mbps/RJ-45,BNC
50  * 3Com 3c900B-FL	10Mbps/Fiber-optic
51  * 3Com 3c905B-COMBO	10/100Mbps/RJ-45,AUI,BNC
52  * 3Com 3c905B-TX	10/100Mbps/RJ-45
53  * 3Com 3c905B-FL/FX	10/100Mbps/Fiber-optic
54  * 3Com 3c905C-TX	10/100Mbps/RJ-45 (Tornado ASIC)
55  * 3Com 3c980-TX	10/100Mbps server adapter (Hurricane ASIC)
56  * 3Com 3c980C-TX	10/100Mbps server adapter (Tornado ASIC)
57  * 3Com 3cSOHO100-TX	10/100Mbps/RJ-45 (Hurricane ASIC)
58  * 3Com 3c450-TX	10/100Mbps/RJ-45 (Tornado ASIC)
59  * 3Com 3c555		10/100Mbps/RJ-45 (MiniPCI, Laptop Hurricane)
60  * 3Com 3c556		10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC)
61  * 3Com 3c556B		10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC)
62  * 3Com 3c575TX		10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
63  * 3Com 3c575B		10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
64  * 3Com 3c575C		10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
65  * 3Com 3cxfem656	10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
66  * 3Com 3cxfem656b	10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
67  * 3Com 3cxfem656c	10/100Mbps/RJ-45 (Cardbus, Tornado ASIC)
68  * Dell Optiplex GX1 on-board 3c918 10/100Mbps/RJ-45
69  * Dell on-board 3c920 10/100Mbps/RJ-45
70  * Dell Precision on-board 3c905B 10/100Mbps/RJ-45
71  * Dell Latitude laptop docking station embedded 3c905-TX
72  *
73  * Written by Bill Paul <wpaul@ctr.columbia.edu>
74  * Electrical Engineering Department
75  * Columbia University, New York City
76  */
77 /*
78  * The 3c90x series chips use a bus-master DMA interface for transfering
79  * packets to and from the controller chip. Some of the "vortex" cards
80  * (3c59x) also supported a bus master mode, however for those chips
81  * you could only DMA packets to/from a contiguous memory buffer. For
82  * transmission this would mean copying the contents of the queued mbuf
83  * chain into an mbuf cluster and then DMAing the cluster. This extra
84  * copy would sort of defeat the purpose of the bus master support for
85  * any packet that doesn't fit into a single mbuf.
86  *
87  * By contrast, the 3c90x cards support a fragment-based bus master
88  * mode where mbuf chains can be encapsulated using TX descriptors.
89  * This is similar to other PCI chips such as the Texas Instruments
90  * ThunderLAN and the Intel 82557/82558.
91  *
92  * The "vortex" driver (if_vx.c) happens to work for the "boomerang"
93  * bus master chips because they maintain the old PIO interface for
94  * backwards compatibility, but starting with the 3c905B and the
95  * "cyclone" chips, the compatibility interface has been dropped.
96  * Since using bus master DMA is a big win, we use this driver to
97  * support the PCI "boomerang" chips even though they work with the
98  * "vortex" driver in order to obtain better performance.
99  */
100 
101 #ifdef HAVE_KERNEL_OPTION_HEADERS
102 #include "opt_device_polling.h"
103 #endif
104 
105 #include <sys/param.h>
106 #include <sys/systm.h>
107 #include <sys/sockio.h>
108 #include <sys/endian.h>
109 #include <sys/mbuf.h>
110 #include <sys/kernel.h>
111 #include <sys/module.h>
112 #include <sys/socket.h>
113 #include <sys/taskqueue.h>
114 
115 #include <net/if.h>
116 #include <net/if_arp.h>
117 #include <net/ethernet.h>
118 #include <net/if_dl.h>
119 #include <net/if_media.h>
120 #include <net/if_types.h>
121 
122 #include <net/bpf.h>
123 
124 #include <machine/bus.h>
125 #include <machine/resource.h>
126 #include <sys/bus.h>
127 #include <sys/rman.h>
128 
129 #include <dev/mii/mii.h>
130 #include <dev/mii/miivar.h>
131 
132 #include <dev/pci/pcireg.h>
133 #include <dev/pci/pcivar.h>
134 
135 MODULE_DEPEND(xl, pci, 1, 1, 1);
136 MODULE_DEPEND(xl, ether, 1, 1, 1);
137 MODULE_DEPEND(xl, miibus, 1, 1, 1);
138 
139 /* "device miibus" required.  See GENERIC if you get errors here. */
140 #include "miibus_if.h"
141 
142 #include <dev/xl/if_xlreg.h>
143 
144 /*
145  * TX Checksumming is disabled by default for two reasons:
146  * - TX Checksumming will occasionally produce corrupt packets
147  * - TX Checksumming seems to reduce performance
148  *
149  * Only 905B/C cards were reported to have this problem, it is possible
150  * that later chips _may_ be immune.
151  */
152 #define	XL905B_TXCSUM_BROKEN	1
153 
154 #ifdef XL905B_TXCSUM_BROKEN
155 #define XL905B_CSUM_FEATURES	0
156 #else
157 #define XL905B_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
158 #endif
159 
160 /*
161  * Various supported device vendors/types and their names.
162  */
163 static const struct xl_type xl_devs[] = {
164 	{ TC_VENDORID, TC_DEVICEID_BOOMERANG_10BT,
165 		"3Com 3c900-TPO Etherlink XL" },
166 	{ TC_VENDORID, TC_DEVICEID_BOOMERANG_10BT_COMBO,
167 		"3Com 3c900-COMBO Etherlink XL" },
168 	{ TC_VENDORID, TC_DEVICEID_BOOMERANG_10_100BT,
169 		"3Com 3c905-TX Fast Etherlink XL" },
170 	{ TC_VENDORID, TC_DEVICEID_BOOMERANG_100BT4,
171 		"3Com 3c905-T4 Fast Etherlink XL" },
172 	{ TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT,
173 		"3Com 3c900B-TPO Etherlink XL" },
174 	{ TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT_COMBO,
175 		"3Com 3c900B-COMBO Etherlink XL" },
176 	{ TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT_TPC,
177 		"3Com 3c900B-TPC Etherlink XL" },
178 	{ TC_VENDORID, TC_DEVICEID_CYCLONE_10FL,
179 		"3Com 3c900B-FL Etherlink XL" },
180 	{ TC_VENDORID, TC_DEVICEID_HURRICANE_10_100BT,
181 		"3Com 3c905B-TX Fast Etherlink XL" },
182 	{ TC_VENDORID, TC_DEVICEID_CYCLONE_10_100BT4,
183 		"3Com 3c905B-T4 Fast Etherlink XL" },
184 	{ TC_VENDORID, TC_DEVICEID_CYCLONE_10_100FX,
185 		"3Com 3c905B-FX/SC Fast Etherlink XL" },
186 	{ TC_VENDORID, TC_DEVICEID_CYCLONE_10_100_COMBO,
187 		"3Com 3c905B-COMBO Fast Etherlink XL" },
188 	{ TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT,
189 		"3Com 3c905C-TX Fast Etherlink XL" },
190 	{ TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_920B,
191 		"3Com 3c920B-EMB Integrated Fast Etherlink XL" },
192 	{ TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_920B_WNM,
193 		"3Com 3c920B-EMB-WNM Integrated Fast Etherlink XL" },
194 	{ TC_VENDORID, TC_DEVICEID_HURRICANE_10_100BT_SERV,
195 		"3Com 3c980 Fast Etherlink XL" },
196 	{ TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_SERV,
197 		"3Com 3c980C Fast Etherlink XL" },
198 	{ TC_VENDORID, TC_DEVICEID_HURRICANE_SOHO100TX,
199 		"3Com 3cSOHO100-TX OfficeConnect" },
200 	{ TC_VENDORID, TC_DEVICEID_TORNADO_HOMECONNECT,
201 		"3Com 3c450-TX HomeConnect" },
202 	{ TC_VENDORID, TC_DEVICEID_HURRICANE_555,
203 		"3Com 3c555 Fast Etherlink XL" },
204 	{ TC_VENDORID, TC_DEVICEID_HURRICANE_556,
205 		"3Com 3c556 Fast Etherlink XL" },
206 	{ TC_VENDORID, TC_DEVICEID_HURRICANE_556B,
207 		"3Com 3c556B Fast Etherlink XL" },
208 	{ TC_VENDORID, TC_DEVICEID_HURRICANE_575A,
209 		"3Com 3c575TX Fast Etherlink XL" },
210 	{ TC_VENDORID, TC_DEVICEID_HURRICANE_575B,
211 		"3Com 3c575B Fast Etherlink XL" },
212 	{ TC_VENDORID, TC_DEVICEID_HURRICANE_575C,
213 		"3Com 3c575C Fast Etherlink XL" },
214 	{ TC_VENDORID, TC_DEVICEID_HURRICANE_656,
215 		"3Com 3c656 Fast Etherlink XL" },
216 	{ TC_VENDORID, TC_DEVICEID_HURRICANE_656B,
217 		"3Com 3c656B Fast Etherlink XL" },
218 	{ TC_VENDORID, TC_DEVICEID_TORNADO_656C,
219 		"3Com 3c656C Fast Etherlink XL" },
220 	{ 0, 0, NULL }
221 };
222 
223 static int xl_probe(device_t);
224 static int xl_attach(device_t);
225 static int xl_detach(device_t);
226 
227 static int xl_newbuf(struct xl_softc *, struct xl_chain_onefrag *);
228 static void xl_stats_update(void *);
229 static void xl_stats_update_locked(struct xl_softc *);
230 static int xl_encap(struct xl_softc *, struct xl_chain *, struct mbuf **);
231 static void xl_rxeof(struct xl_softc *);
232 static void xl_rxeof_task(void *, int);
233 static int xl_rx_resync(struct xl_softc *);
234 static void xl_txeof(struct xl_softc *);
235 static void xl_txeof_90xB(struct xl_softc *);
236 static void xl_txeoc(struct xl_softc *);
237 static void xl_intr(void *);
238 static void xl_start(struct ifnet *);
239 static void xl_start_locked(struct ifnet *);
240 static void xl_start_90xB_locked(struct ifnet *);
241 static int xl_ioctl(struct ifnet *, u_long, caddr_t);
242 static void xl_init(void *);
243 static void xl_init_locked(struct xl_softc *);
244 static void xl_stop(struct xl_softc *);
245 static int xl_watchdog(struct xl_softc *);
246 static int xl_shutdown(device_t);
247 static int xl_suspend(device_t);
248 static int xl_resume(device_t);
249 
250 #ifdef DEVICE_POLLING
251 static void xl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
252 static void xl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count);
253 #endif
254 
255 static int xl_ifmedia_upd(struct ifnet *);
256 static void xl_ifmedia_sts(struct ifnet *, struct ifmediareq *);
257 
258 static int xl_eeprom_wait(struct xl_softc *);
259 static int xl_read_eeprom(struct xl_softc *, caddr_t, int, int, int);
260 static void xl_mii_sync(struct xl_softc *);
261 static void xl_mii_send(struct xl_softc *, u_int32_t, int);
262 static int xl_mii_readreg(struct xl_softc *, struct xl_mii_frame *);
263 static int xl_mii_writereg(struct xl_softc *, struct xl_mii_frame *);
264 
265 static void xl_setcfg(struct xl_softc *);
266 static void xl_setmode(struct xl_softc *, int);
267 static void xl_setmulti(struct xl_softc *);
268 static void xl_setmulti_hash(struct xl_softc *);
269 static void xl_reset(struct xl_softc *);
270 static int xl_list_rx_init(struct xl_softc *);
271 static int xl_list_tx_init(struct xl_softc *);
272 static int xl_list_tx_init_90xB(struct xl_softc *);
273 static void xl_wait(struct xl_softc *);
274 static void xl_mediacheck(struct xl_softc *);
275 static void xl_choose_media(struct xl_softc *sc, int *media);
276 static void xl_choose_xcvr(struct xl_softc *, int);
277 static void xl_dma_map_addr(void *, bus_dma_segment_t *, int, int);
278 #ifdef notdef
279 static void xl_testpacket(struct xl_softc *);
280 #endif
281 
282 static int xl_miibus_readreg(device_t, int, int);
283 static int xl_miibus_writereg(device_t, int, int, int);
284 static void xl_miibus_statchg(device_t);
285 static void xl_miibus_mediainit(device_t);
286 
287 static device_method_t xl_methods[] = {
288 	/* Device interface */
289 	DEVMETHOD(device_probe,		xl_probe),
290 	DEVMETHOD(device_attach,	xl_attach),
291 	DEVMETHOD(device_detach,	xl_detach),
292 	DEVMETHOD(device_shutdown,	xl_shutdown),
293 	DEVMETHOD(device_suspend,	xl_suspend),
294 	DEVMETHOD(device_resume,	xl_resume),
295 
296 	/* bus interface */
297 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
298 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
299 
300 	/* MII interface */
301 	DEVMETHOD(miibus_readreg,	xl_miibus_readreg),
302 	DEVMETHOD(miibus_writereg,	xl_miibus_writereg),
303 	DEVMETHOD(miibus_statchg,	xl_miibus_statchg),
304 	DEVMETHOD(miibus_mediainit,	xl_miibus_mediainit),
305 
306 	{ 0, 0 }
307 };
308 
309 static driver_t xl_driver = {
310 	"xl",
311 	xl_methods,
312 	sizeof(struct xl_softc)
313 };
314 
315 static devclass_t xl_devclass;
316 
317 DRIVER_MODULE(xl, pci, xl_driver, xl_devclass, 0, 0);
318 DRIVER_MODULE(miibus, xl, miibus_driver, miibus_devclass, 0, 0);
319 
320 static void
321 xl_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
322 {
323 	u_int32_t *paddr;
324 
325 	paddr = arg;
326 	*paddr = segs->ds_addr;
327 }
328 
329 /*
330  * Murphy's law says that it's possible the chip can wedge and
331  * the 'command in progress' bit may never clear. Hence, we wait
332  * only a finite amount of time to avoid getting caught in an
333  * infinite loop. Normally this delay routine would be a macro,
334  * but it isn't called during normal operation so we can afford
335  * to make it a function.
336  */
337 static void
338 xl_wait(struct xl_softc *sc)
339 {
340 	register int		i;
341 
342 	for (i = 0; i < XL_TIMEOUT; i++) {
343 		if ((CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY) == 0)
344 			break;
345 	}
346 
347 	if (i == XL_TIMEOUT)
348 		device_printf(sc->xl_dev, "command never completed!\n");
349 }
350 
351 /*
352  * MII access routines are provided for adapters with external
353  * PHYs (3c905-TX, 3c905-T4, 3c905B-T4) and those with built-in
354  * autoneg logic that's faked up to look like a PHY (3c905B-TX).
355  * Note: if you don't perform the MDIO operations just right,
356  * it's possible to end up with code that works correctly with
357  * some chips/CPUs/processor speeds/bus speeds/etc but not
358  * with others.
359  */
360 #define MII_SET(x)					\
361 	CSR_WRITE_2(sc, XL_W4_PHY_MGMT,			\
362 		CSR_READ_2(sc, XL_W4_PHY_MGMT) | (x))
363 
364 #define MII_CLR(x)					\
365 	CSR_WRITE_2(sc, XL_W4_PHY_MGMT,			\
366 		CSR_READ_2(sc, XL_W4_PHY_MGMT) & ~(x))
367 
368 /*
369  * Sync the PHYs by setting data bit and strobing the clock 32 times.
370  */
371 static void
372 xl_mii_sync(struct xl_softc *sc)
373 {
374 	register int		i;
375 
376 	XL_SEL_WIN(4);
377 	MII_SET(XL_MII_DIR|XL_MII_DATA);
378 
379 	for (i = 0; i < 32; i++) {
380 		MII_SET(XL_MII_CLK);
381 		MII_SET(XL_MII_DATA);
382 		MII_SET(XL_MII_DATA);
383 		MII_CLR(XL_MII_CLK);
384 		MII_SET(XL_MII_DATA);
385 		MII_SET(XL_MII_DATA);
386 	}
387 }
388 
389 /*
390  * Clock a series of bits through the MII.
391  */
392 static void
393 xl_mii_send(struct xl_softc *sc, u_int32_t bits, int cnt)
394 {
395 	int			i;
396 
397 	XL_SEL_WIN(4);
398 	MII_CLR(XL_MII_CLK);
399 
400 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
401 		if (bits & i) {
402 			MII_SET(XL_MII_DATA);
403 		} else {
404 			MII_CLR(XL_MII_DATA);
405 		}
406 		MII_CLR(XL_MII_CLK);
407 		MII_SET(XL_MII_CLK);
408 	}
409 }
410 
411 /*
412  * Read an PHY register through the MII.
413  */
414 static int
415 xl_mii_readreg(struct xl_softc *sc, struct xl_mii_frame *frame)
416 {
417 	int			i, ack;
418 
419 	/* Set up frame for RX. */
420 	frame->mii_stdelim = XL_MII_STARTDELIM;
421 	frame->mii_opcode = XL_MII_READOP;
422 	frame->mii_turnaround = 0;
423 	frame->mii_data = 0;
424 
425 	/* Select register window 4. */
426 	XL_SEL_WIN(4);
427 
428 	CSR_WRITE_2(sc, XL_W4_PHY_MGMT, 0);
429 	/* Turn on data xmit. */
430 	MII_SET(XL_MII_DIR);
431 
432 	xl_mii_sync(sc);
433 
434 	/* Send command/address info. */
435 	xl_mii_send(sc, frame->mii_stdelim, 2);
436 	xl_mii_send(sc, frame->mii_opcode, 2);
437 	xl_mii_send(sc, frame->mii_phyaddr, 5);
438 	xl_mii_send(sc, frame->mii_regaddr, 5);
439 
440 	/* Idle bit */
441 	MII_CLR((XL_MII_CLK|XL_MII_DATA));
442 	MII_SET(XL_MII_CLK);
443 
444 	/* Turn off xmit. */
445 	MII_CLR(XL_MII_DIR);
446 
447 	/* Check for ack */
448 	MII_CLR(XL_MII_CLK);
449 	ack = CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA;
450 	MII_SET(XL_MII_CLK);
451 
452 	/*
453 	 * Now try reading data bits. If the ack failed, we still
454 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
455 	 */
456 	if (ack) {
457 		for (i = 0; i < 16; i++) {
458 			MII_CLR(XL_MII_CLK);
459 			MII_SET(XL_MII_CLK);
460 		}
461 		goto fail;
462 	}
463 
464 	for (i = 0x8000; i; i >>= 1) {
465 		MII_CLR(XL_MII_CLK);
466 		if (!ack) {
467 			if (CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA)
468 				frame->mii_data |= i;
469 		}
470 		MII_SET(XL_MII_CLK);
471 	}
472 
473 fail:
474 	MII_CLR(XL_MII_CLK);
475 	MII_SET(XL_MII_CLK);
476 
477 	return (ack ? 1 : 0);
478 }
479 
480 /*
481  * Write to a PHY register through the MII.
482  */
483 static int
484 xl_mii_writereg(struct xl_softc *sc, struct xl_mii_frame *frame)
485 {
486 
487 	/* Set up frame for TX. */
488 	frame->mii_stdelim = XL_MII_STARTDELIM;
489 	frame->mii_opcode = XL_MII_WRITEOP;
490 	frame->mii_turnaround = XL_MII_TURNAROUND;
491 
492 	/* Select the window 4. */
493 	XL_SEL_WIN(4);
494 
495 	/* Turn on data output. */
496 	MII_SET(XL_MII_DIR);
497 
498 	xl_mii_sync(sc);
499 
500 	xl_mii_send(sc, frame->mii_stdelim, 2);
501 	xl_mii_send(sc, frame->mii_opcode, 2);
502 	xl_mii_send(sc, frame->mii_phyaddr, 5);
503 	xl_mii_send(sc, frame->mii_regaddr, 5);
504 	xl_mii_send(sc, frame->mii_turnaround, 2);
505 	xl_mii_send(sc, frame->mii_data, 16);
506 
507 	/* Idle bit. */
508 	MII_SET(XL_MII_CLK);
509 	MII_CLR(XL_MII_CLK);
510 
511 	/* Turn off xmit. */
512 	MII_CLR(XL_MII_DIR);
513 
514 	return (0);
515 }
516 
517 static int
518 xl_miibus_readreg(device_t dev, int phy, int reg)
519 {
520 	struct xl_softc		*sc;
521 	struct xl_mii_frame	frame;
522 
523 	sc = device_get_softc(dev);
524 
525 	/*
526 	 * Pretend that PHYs are only available at MII address 24.
527 	 * This is to guard against problems with certain 3Com ASIC
528 	 * revisions that incorrectly map the internal transceiver
529 	 * control registers at all MII addresses. This can cause
530 	 * the miibus code to attach the same PHY several times over.
531 	 */
532 	if ((sc->xl_flags & XL_FLAG_PHYOK) == 0 && phy != 24)
533 		return (0);
534 
535 	bzero((char *)&frame, sizeof(frame));
536 	frame.mii_phyaddr = phy;
537 	frame.mii_regaddr = reg;
538 
539 	xl_mii_readreg(sc, &frame);
540 
541 	return (frame.mii_data);
542 }
543 
544 static int
545 xl_miibus_writereg(device_t dev, int phy, int reg, int data)
546 {
547 	struct xl_softc		*sc;
548 	struct xl_mii_frame	frame;
549 
550 	sc = device_get_softc(dev);
551 
552 	if ((sc->xl_flags & XL_FLAG_PHYOK) == 0 && phy != 24)
553 		return (0);
554 
555 	bzero((char *)&frame, sizeof(frame));
556 	frame.mii_phyaddr = phy;
557 	frame.mii_regaddr = reg;
558 	frame.mii_data = data;
559 
560 	xl_mii_writereg(sc, &frame);
561 
562 	return (0);
563 }
564 
565 static void
566 xl_miibus_statchg(device_t dev)
567 {
568 	struct xl_softc		*sc;
569 	struct mii_data		*mii;
570 
571 	sc = device_get_softc(dev);
572 	mii = device_get_softc(sc->xl_miibus);
573 
574 	xl_setcfg(sc);
575 
576 	/* Set ASIC's duplex mode to match the PHY. */
577 	XL_SEL_WIN(3);
578 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
579 		CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
580 	else
581 		CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
582 		    (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX));
583 }
584 
585 /*
586  * Special support for the 3c905B-COMBO. This card has 10/100 support
587  * plus BNC and AUI ports. This means we will have both an miibus attached
588  * plus some non-MII media settings. In order to allow this, we have to
589  * add the extra media to the miibus's ifmedia struct, but we can't do
590  * that during xl_attach() because the miibus hasn't been attached yet.
591  * So instead, we wait until the miibus probe/attach is done, at which
592  * point we will get a callback telling is that it's safe to add our
593  * extra media.
594  */
595 static void
596 xl_miibus_mediainit(device_t dev)
597 {
598 	struct xl_softc		*sc;
599 	struct mii_data		*mii;
600 	struct ifmedia		*ifm;
601 
602 	sc = device_get_softc(dev);
603 	mii = device_get_softc(sc->xl_miibus);
604 	ifm = &mii->mii_media;
605 
606 	if (sc->xl_media & (XL_MEDIAOPT_AUI | XL_MEDIAOPT_10FL)) {
607 		/*
608 		 * Check for a 10baseFL board in disguise.
609 		 */
610 		if (sc->xl_type == XL_TYPE_905B &&
611 		    sc->xl_media == XL_MEDIAOPT_10FL) {
612 			if (bootverbose)
613 				device_printf(sc->xl_dev, "found 10baseFL\n");
614 			ifmedia_add(ifm, IFM_ETHER | IFM_10_FL, 0, NULL);
615 			ifmedia_add(ifm, IFM_ETHER | IFM_10_FL|IFM_HDX, 0,
616 			    NULL);
617 			if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
618 				ifmedia_add(ifm,
619 				    IFM_ETHER | IFM_10_FL | IFM_FDX, 0, NULL);
620 		} else {
621 			if (bootverbose)
622 				device_printf(sc->xl_dev, "found AUI\n");
623 			ifmedia_add(ifm, IFM_ETHER | IFM_10_5, 0, NULL);
624 		}
625 	}
626 
627 	if (sc->xl_media & XL_MEDIAOPT_BNC) {
628 		if (bootverbose)
629 			device_printf(sc->xl_dev, "found BNC\n");
630 		ifmedia_add(ifm, IFM_ETHER | IFM_10_2, 0, NULL);
631 	}
632 }
633 
634 /*
635  * The EEPROM is slow: give it time to come ready after issuing
636  * it a command.
637  */
638 static int
639 xl_eeprom_wait(struct xl_softc *sc)
640 {
641 	int			i;
642 
643 	for (i = 0; i < 100; i++) {
644 		if (CSR_READ_2(sc, XL_W0_EE_CMD) & XL_EE_BUSY)
645 			DELAY(162);
646 		else
647 			break;
648 	}
649 
650 	if (i == 100) {
651 		device_printf(sc->xl_dev, "eeprom failed to come ready\n");
652 		return (1);
653 	}
654 
655 	return (0);
656 }
657 
658 /*
659  * Read a sequence of words from the EEPROM. Note that ethernet address
660  * data is stored in the EEPROM in network byte order.
661  */
662 static int
663 xl_read_eeprom(struct xl_softc *sc, caddr_t dest, int off, int cnt, int swap)
664 {
665 	int			err = 0, i;
666 	u_int16_t		word = 0, *ptr;
667 
668 #define EEPROM_5BIT_OFFSET(A) ((((A) << 2) & 0x7F00) | ((A) & 0x003F))
669 #define EEPROM_8BIT_OFFSET(A) ((A) & 0x003F)
670 	/*
671 	 * XXX: WARNING! DANGER!
672 	 * It's easy to accidentally overwrite the rom content!
673 	 * Note: the 3c575 uses 8bit EEPROM offsets.
674 	 */
675 	XL_SEL_WIN(0);
676 
677 	if (xl_eeprom_wait(sc))
678 		return (1);
679 
680 	if (sc->xl_flags & XL_FLAG_EEPROM_OFFSET_30)
681 		off += 0x30;
682 
683 	for (i = 0; i < cnt; i++) {
684 		if (sc->xl_flags & XL_FLAG_8BITROM)
685 			CSR_WRITE_2(sc, XL_W0_EE_CMD,
686 			    XL_EE_8BIT_READ | EEPROM_8BIT_OFFSET(off + i));
687 		else
688 			CSR_WRITE_2(sc, XL_W0_EE_CMD,
689 			    XL_EE_READ | EEPROM_5BIT_OFFSET(off + i));
690 		err = xl_eeprom_wait(sc);
691 		if (err)
692 			break;
693 		word = CSR_READ_2(sc, XL_W0_EE_DATA);
694 		ptr = (u_int16_t *)(dest + (i * 2));
695 		if (swap)
696 			*ptr = ntohs(word);
697 		else
698 			*ptr = word;
699 	}
700 
701 	return (err ? 1 : 0);
702 }
703 
704 /*
705  * NICs older than the 3c905B have only one multicast option, which
706  * is to enable reception of all multicast frames.
707  */
708 static void
709 xl_setmulti(struct xl_softc *sc)
710 {
711 	struct ifnet		*ifp = sc->xl_ifp;
712 	struct ifmultiaddr	*ifma;
713 	u_int8_t		rxfilt;
714 	int			mcnt = 0;
715 
716 	XL_LOCK_ASSERT(sc);
717 
718 	XL_SEL_WIN(5);
719 	rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
720 
721 	if (ifp->if_flags & IFF_ALLMULTI) {
722 		rxfilt |= XL_RXFILTER_ALLMULTI;
723 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
724 		return;
725 	}
726 
727 	IF_ADDR_LOCK(ifp);
728 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link)
729 		mcnt++;
730 	IF_ADDR_UNLOCK(ifp);
731 
732 	if (mcnt)
733 		rxfilt |= XL_RXFILTER_ALLMULTI;
734 	else
735 		rxfilt &= ~XL_RXFILTER_ALLMULTI;
736 
737 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
738 }
739 
740 /*
741  * 3c905B adapters have a hash filter that we can program.
742  */
743 static void
744 xl_setmulti_hash(struct xl_softc *sc)
745 {
746 	struct ifnet		*ifp = sc->xl_ifp;
747 	int			h = 0, i;
748 	struct ifmultiaddr	*ifma;
749 	u_int8_t		rxfilt;
750 	int			mcnt = 0;
751 
752 	XL_LOCK_ASSERT(sc);
753 
754 	XL_SEL_WIN(5);
755 	rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
756 
757 	if (ifp->if_flags & IFF_ALLMULTI) {
758 		rxfilt |= XL_RXFILTER_ALLMULTI;
759 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
760 		return;
761 	} else
762 		rxfilt &= ~XL_RXFILTER_ALLMULTI;
763 
764 	/* first, zot all the existing hash bits */
765 	for (i = 0; i < XL_HASHFILT_SIZE; i++)
766 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH|i);
767 
768 	/* now program new ones */
769 	IF_ADDR_LOCK(ifp);
770 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
771 		if (ifma->ifma_addr->sa_family != AF_LINK)
772 			continue;
773 		/*
774 		 * Note: the 3c905B currently only supports a 64-bit hash
775 		 * table, which means we really only need 6 bits, but the
776 		 * manual indicates that future chip revisions will have a
777 		 * 256-bit hash table, hence the routine is set up to
778 		 * calculate 8 bits of position info in case we need it some
779 		 * day.
780 		 * Note II, The Sequel: _CURRENT_ versions of the 3c905B have
781 		 * a 256 bit hash table. This means we have to use all 8 bits
782 		 * regardless. On older cards, the upper 2 bits will be
783 		 * ignored. Grrrr....
784 		 */
785 		h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
786 		    ifma->ifma_addr), ETHER_ADDR_LEN) & 0xFF;
787 		CSR_WRITE_2(sc, XL_COMMAND,
788 		    h | XL_CMD_RX_SET_HASH | XL_HASH_SET);
789 		mcnt++;
790 	}
791 	IF_ADDR_UNLOCK(ifp);
792 
793 	if (mcnt)
794 		rxfilt |= XL_RXFILTER_MULTIHASH;
795 	else
796 		rxfilt &= ~XL_RXFILTER_MULTIHASH;
797 
798 	CSR_WRITE_2(sc, XL_COMMAND, rxfilt | XL_CMD_RX_SET_FILT);
799 }
800 
801 #ifdef notdef
802 static void
803 xl_testpacket(struct xl_softc *sc)
804 {
805 	struct mbuf		*m;
806 	struct ifnet		*ifp = sc->xl_ifp;
807 
808 	MGETHDR(m, M_DONTWAIT, MT_DATA);
809 
810 	if (m == NULL)
811 		return;
812 
813 	bcopy(IF_LLADDR(sc->xl_ifp),
814 		mtod(m, struct ether_header *)->ether_dhost, ETHER_ADDR_LEN);
815 	bcopy(IF_LLADDR(sc->xl_ifp),
816 		mtod(m, struct ether_header *)->ether_shost, ETHER_ADDR_LEN);
817 	mtod(m, struct ether_header *)->ether_type = htons(3);
818 	mtod(m, unsigned char *)[14] = 0;
819 	mtod(m, unsigned char *)[15] = 0;
820 	mtod(m, unsigned char *)[16] = 0xE3;
821 	m->m_len = m->m_pkthdr.len = sizeof(struct ether_header) + 3;
822 	IFQ_ENQUEUE(&ifp->if_snd, m);
823 	xl_start(ifp);
824 }
825 #endif
826 
827 static void
828 xl_setcfg(struct xl_softc *sc)
829 {
830 	u_int32_t		icfg;
831 
832 	/*XL_LOCK_ASSERT(sc);*/
833 
834 	XL_SEL_WIN(3);
835 	icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
836 	icfg &= ~XL_ICFG_CONNECTOR_MASK;
837 	if (sc->xl_media & XL_MEDIAOPT_MII ||
838 		sc->xl_media & XL_MEDIAOPT_BT4)
839 		icfg |= (XL_XCVR_MII << XL_ICFG_CONNECTOR_BITS);
840 	if (sc->xl_media & XL_MEDIAOPT_BTX)
841 		icfg |= (XL_XCVR_AUTO << XL_ICFG_CONNECTOR_BITS);
842 
843 	CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
844 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
845 }
846 
847 static void
848 xl_setmode(struct xl_softc *sc, int media)
849 {
850 	u_int32_t		icfg;
851 	u_int16_t		mediastat;
852 	char			*pmsg = "", *dmsg = "";
853 
854 	XL_LOCK_ASSERT(sc);
855 
856 	XL_SEL_WIN(4);
857 	mediastat = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
858 	XL_SEL_WIN(3);
859 	icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
860 
861 	if (sc->xl_media & XL_MEDIAOPT_BT) {
862 		if (IFM_SUBTYPE(media) == IFM_10_T) {
863 			pmsg = "10baseT transceiver";
864 			sc->xl_xcvr = XL_XCVR_10BT;
865 			icfg &= ~XL_ICFG_CONNECTOR_MASK;
866 			icfg |= (XL_XCVR_10BT << XL_ICFG_CONNECTOR_BITS);
867 			mediastat |= XL_MEDIASTAT_LINKBEAT |
868 			    XL_MEDIASTAT_JABGUARD;
869 			mediastat &= ~XL_MEDIASTAT_SQEENB;
870 		}
871 	}
872 
873 	if (sc->xl_media & XL_MEDIAOPT_BFX) {
874 		if (IFM_SUBTYPE(media) == IFM_100_FX) {
875 			pmsg = "100baseFX port";
876 			sc->xl_xcvr = XL_XCVR_100BFX;
877 			icfg &= ~XL_ICFG_CONNECTOR_MASK;
878 			icfg |= (XL_XCVR_100BFX << XL_ICFG_CONNECTOR_BITS);
879 			mediastat |= XL_MEDIASTAT_LINKBEAT;
880 			mediastat &= ~XL_MEDIASTAT_SQEENB;
881 		}
882 	}
883 
884 	if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
885 		if (IFM_SUBTYPE(media) == IFM_10_5) {
886 			pmsg = "AUI port";
887 			sc->xl_xcvr = XL_XCVR_AUI;
888 			icfg &= ~XL_ICFG_CONNECTOR_MASK;
889 			icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS);
890 			mediastat &= ~(XL_MEDIASTAT_LINKBEAT |
891 			    XL_MEDIASTAT_JABGUARD);
892 			mediastat |= ~XL_MEDIASTAT_SQEENB;
893 		}
894 		if (IFM_SUBTYPE(media) == IFM_10_FL) {
895 			pmsg = "10baseFL transceiver";
896 			sc->xl_xcvr = XL_XCVR_AUI;
897 			icfg &= ~XL_ICFG_CONNECTOR_MASK;
898 			icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS);
899 			mediastat &= ~(XL_MEDIASTAT_LINKBEAT |
900 			    XL_MEDIASTAT_JABGUARD);
901 			mediastat |= ~XL_MEDIASTAT_SQEENB;
902 		}
903 	}
904 
905 	if (sc->xl_media & XL_MEDIAOPT_BNC) {
906 		if (IFM_SUBTYPE(media) == IFM_10_2) {
907 			pmsg = "AUI port";
908 			sc->xl_xcvr = XL_XCVR_COAX;
909 			icfg &= ~XL_ICFG_CONNECTOR_MASK;
910 			icfg |= (XL_XCVR_COAX << XL_ICFG_CONNECTOR_BITS);
911 			mediastat &= ~(XL_MEDIASTAT_LINKBEAT |
912 			    XL_MEDIASTAT_JABGUARD | XL_MEDIASTAT_SQEENB);
913 		}
914 	}
915 
916 	if ((media & IFM_GMASK) == IFM_FDX ||
917 			IFM_SUBTYPE(media) == IFM_100_FX) {
918 		dmsg = "full";
919 		XL_SEL_WIN(3);
920 		CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
921 	} else {
922 		dmsg = "half";
923 		XL_SEL_WIN(3);
924 		CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
925 			(CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX));
926 	}
927 
928 	if (IFM_SUBTYPE(media) == IFM_10_2)
929 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
930 	else
931 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
932 
933 	CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
934 	XL_SEL_WIN(4);
935 	CSR_WRITE_2(sc, XL_W4_MEDIA_STATUS, mediastat);
936 
937 	DELAY(800);
938 	XL_SEL_WIN(7);
939 
940 	device_printf(sc->xl_dev, "selecting %s, %s duplex\n", pmsg, dmsg);
941 }
942 
943 static void
944 xl_reset(struct xl_softc *sc)
945 {
946 	register int		i;
947 
948 	XL_LOCK_ASSERT(sc);
949 
950 	XL_SEL_WIN(0);
951 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RESET |
952 	    ((sc->xl_flags & XL_FLAG_WEIRDRESET) ?
953 	     XL_RESETOPT_DISADVFD:0));
954 
955 	/*
956 	 * If we're using memory mapped register mode, pause briefly
957 	 * after issuing the reset command before trying to access any
958 	 * other registers. With my 3c575C cardbus card, failing to do
959 	 * this results in the system locking up while trying to poll
960 	 * the command busy bit in the status register.
961 	 */
962 	if (sc->xl_flags & XL_FLAG_USE_MMIO)
963 		DELAY(100000);
964 
965 	for (i = 0; i < XL_TIMEOUT; i++) {
966 		DELAY(10);
967 		if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY))
968 			break;
969 	}
970 
971 	if (i == XL_TIMEOUT)
972 		device_printf(sc->xl_dev, "reset didn't complete\n");
973 
974 	/* Reset TX and RX. */
975 	/* Note: the RX reset takes an absurd amount of time
976 	 * on newer versions of the Tornado chips such as those
977 	 * on the 3c905CX and newer 3c908C cards. We wait an
978 	 * extra amount of time so that xl_wait() doesn't complain
979 	 * and annoy the users.
980 	 */
981 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
982 	DELAY(100000);
983 	xl_wait(sc);
984 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
985 	xl_wait(sc);
986 
987 	if (sc->xl_flags & XL_FLAG_INVERT_LED_PWR ||
988 	    sc->xl_flags & XL_FLAG_INVERT_MII_PWR) {
989 		XL_SEL_WIN(2);
990 		CSR_WRITE_2(sc, XL_W2_RESET_OPTIONS,
991 		    CSR_READ_2(sc, XL_W2_RESET_OPTIONS) |
992 		    ((sc->xl_flags & XL_FLAG_INVERT_LED_PWR) ?
993 		    XL_RESETOPT_INVERT_LED : 0) |
994 		    ((sc->xl_flags & XL_FLAG_INVERT_MII_PWR) ?
995 		    XL_RESETOPT_INVERT_MII : 0));
996 	}
997 
998 	/* Wait a little while for the chip to get its brains in order. */
999 	DELAY(100000);
1000 }
1001 
1002 /*
1003  * Probe for a 3Com Etherlink XL chip. Check the PCI vendor and device
1004  * IDs against our list and return a device name if we find a match.
1005  */
1006 static int
1007 xl_probe(device_t dev)
1008 {
1009 	const struct xl_type	*t;
1010 
1011 	t = xl_devs;
1012 
1013 	while (t->xl_name != NULL) {
1014 		if ((pci_get_vendor(dev) == t->xl_vid) &&
1015 		    (pci_get_device(dev) == t->xl_did)) {
1016 			device_set_desc(dev, t->xl_name);
1017 			return (BUS_PROBE_DEFAULT);
1018 		}
1019 		t++;
1020 	}
1021 
1022 	return (ENXIO);
1023 }
1024 
1025 /*
1026  * This routine is a kludge to work around possible hardware faults
1027  * or manufacturing defects that can cause the media options register
1028  * (or reset options register, as it's called for the first generation
1029  * 3c90x adapters) to return an incorrect result. I have encountered
1030  * one Dell Latitude laptop docking station with an integrated 3c905-TX
1031  * which doesn't have any of the 'mediaopt' bits set. This screws up
1032  * the attach routine pretty badly because it doesn't know what media
1033  * to look for. If we find ourselves in this predicament, this routine
1034  * will try to guess the media options values and warn the user of a
1035  * possible manufacturing defect with his adapter/system/whatever.
1036  */
1037 static void
1038 xl_mediacheck(struct xl_softc *sc)
1039 {
1040 
1041 	/*
1042 	 * If some of the media options bits are set, assume they are
1043 	 * correct. If not, try to figure it out down below.
1044 	 * XXX I should check for 10baseFL, but I don't have an adapter
1045 	 * to test with.
1046 	 */
1047 	if (sc->xl_media & (XL_MEDIAOPT_MASK & ~XL_MEDIAOPT_VCO)) {
1048 		/*
1049 		 * Check the XCVR value. If it's not in the normal range
1050 		 * of values, we need to fake it up here.
1051 		 */
1052 		if (sc->xl_xcvr <= XL_XCVR_AUTO)
1053 			return;
1054 		else {
1055 			device_printf(sc->xl_dev,
1056 			    "bogus xcvr value in EEPROM (%x)\n", sc->xl_xcvr);
1057 			device_printf(sc->xl_dev,
1058 			    "choosing new default based on card type\n");
1059 		}
1060 	} else {
1061 		if (sc->xl_type == XL_TYPE_905B &&
1062 		    sc->xl_media & XL_MEDIAOPT_10FL)
1063 			return;
1064 		device_printf(sc->xl_dev,
1065 "WARNING: no media options bits set in the media options register!!\n");
1066 		device_printf(sc->xl_dev,
1067 "this could be a manufacturing defect in your adapter or system\n");
1068 		device_printf(sc->xl_dev,
1069 "attempting to guess media type; you should probably consult your vendor\n");
1070 	}
1071 
1072 	xl_choose_xcvr(sc, 1);
1073 }
1074 
1075 static void
1076 xl_choose_xcvr(struct xl_softc *sc, int verbose)
1077 {
1078 	u_int16_t		devid;
1079 
1080 	/*
1081 	 * Read the device ID from the EEPROM.
1082 	 * This is what's loaded into the PCI device ID register, so it has
1083 	 * to be correct otherwise we wouldn't have gotten this far.
1084 	 */
1085 	xl_read_eeprom(sc, (caddr_t)&devid, XL_EE_PRODID, 1, 0);
1086 
1087 	switch (devid) {
1088 	case TC_DEVICEID_BOOMERANG_10BT:	/* 3c900-TPO */
1089 	case TC_DEVICEID_KRAKATOA_10BT:		/* 3c900B-TPO */
1090 		sc->xl_media = XL_MEDIAOPT_BT;
1091 		sc->xl_xcvr = XL_XCVR_10BT;
1092 		if (verbose)
1093 			device_printf(sc->xl_dev,
1094 			    "guessing 10BaseT transceiver\n");
1095 		break;
1096 	case TC_DEVICEID_BOOMERANG_10BT_COMBO:	/* 3c900-COMBO */
1097 	case TC_DEVICEID_KRAKATOA_10BT_COMBO:	/* 3c900B-COMBO */
1098 		sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI;
1099 		sc->xl_xcvr = XL_XCVR_10BT;
1100 		if (verbose)
1101 			device_printf(sc->xl_dev,
1102 			    "guessing COMBO (AUI/BNC/TP)\n");
1103 		break;
1104 	case TC_DEVICEID_KRAKATOA_10BT_TPC:	/* 3c900B-TPC */
1105 		sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC;
1106 		sc->xl_xcvr = XL_XCVR_10BT;
1107 		if (verbose)
1108 			device_printf(sc->xl_dev, "guessing TPC (BNC/TP)\n");
1109 		break;
1110 	case TC_DEVICEID_CYCLONE_10FL:		/* 3c900B-FL */
1111 		sc->xl_media = XL_MEDIAOPT_10FL;
1112 		sc->xl_xcvr = XL_XCVR_AUI;
1113 		if (verbose)
1114 			device_printf(sc->xl_dev, "guessing 10baseFL\n");
1115 		break;
1116 	case TC_DEVICEID_BOOMERANG_10_100BT:	/* 3c905-TX */
1117 	case TC_DEVICEID_HURRICANE_555:		/* 3c555 */
1118 	case TC_DEVICEID_HURRICANE_556:		/* 3c556 */
1119 	case TC_DEVICEID_HURRICANE_556B:	/* 3c556B */
1120 	case TC_DEVICEID_HURRICANE_575A:	/* 3c575TX */
1121 	case TC_DEVICEID_HURRICANE_575B:	/* 3c575B */
1122 	case TC_DEVICEID_HURRICANE_575C:	/* 3c575C */
1123 	case TC_DEVICEID_HURRICANE_656:		/* 3c656 */
1124 	case TC_DEVICEID_HURRICANE_656B:	/* 3c656B */
1125 	case TC_DEVICEID_TORNADO_656C:		/* 3c656C */
1126 	case TC_DEVICEID_TORNADO_10_100BT_920B:	/* 3c920B-EMB */
1127 	case TC_DEVICEID_TORNADO_10_100BT_920B_WNM:	/* 3c920B-EMB-WNM */
1128 		sc->xl_media = XL_MEDIAOPT_MII;
1129 		sc->xl_xcvr = XL_XCVR_MII;
1130 		if (verbose)
1131 			device_printf(sc->xl_dev, "guessing MII\n");
1132 		break;
1133 	case TC_DEVICEID_BOOMERANG_100BT4:	/* 3c905-T4 */
1134 	case TC_DEVICEID_CYCLONE_10_100BT4:	/* 3c905B-T4 */
1135 		sc->xl_media = XL_MEDIAOPT_BT4;
1136 		sc->xl_xcvr = XL_XCVR_MII;
1137 		if (verbose)
1138 			device_printf(sc->xl_dev, "guessing 100baseT4/MII\n");
1139 		break;
1140 	case TC_DEVICEID_HURRICANE_10_100BT:	/* 3c905B-TX */
1141 	case TC_DEVICEID_HURRICANE_10_100BT_SERV:/*3c980-TX */
1142 	case TC_DEVICEID_TORNADO_10_100BT_SERV:	/* 3c980C-TX */
1143 	case TC_DEVICEID_HURRICANE_SOHO100TX:	/* 3cSOHO100-TX */
1144 	case TC_DEVICEID_TORNADO_10_100BT:	/* 3c905C-TX */
1145 	case TC_DEVICEID_TORNADO_HOMECONNECT:	/* 3c450-TX */
1146 		sc->xl_media = XL_MEDIAOPT_BTX;
1147 		sc->xl_xcvr = XL_XCVR_AUTO;
1148 		if (verbose)
1149 			device_printf(sc->xl_dev, "guessing 10/100 internal\n");
1150 		break;
1151 	case TC_DEVICEID_CYCLONE_10_100_COMBO:	/* 3c905B-COMBO */
1152 		sc->xl_media = XL_MEDIAOPT_BTX|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI;
1153 		sc->xl_xcvr = XL_XCVR_AUTO;
1154 		if (verbose)
1155 			device_printf(sc->xl_dev,
1156 			    "guessing 10/100 plus BNC/AUI\n");
1157 		break;
1158 	default:
1159 		device_printf(sc->xl_dev,
1160 		    "unknown device ID: %x -- defaulting to 10baseT\n", devid);
1161 		sc->xl_media = XL_MEDIAOPT_BT;
1162 		break;
1163 	}
1164 }
1165 
1166 /*
1167  * Attach the interface. Allocate softc structures, do ifmedia
1168  * setup and ethernet/BPF attach.
1169  */
1170 static int
1171 xl_attach(device_t dev)
1172 {
1173 	u_char			eaddr[ETHER_ADDR_LEN];
1174 	u_int16_t		xcvr[2];
1175 	struct xl_softc		*sc;
1176 	struct ifnet		*ifp;
1177 	int			media;
1178 	int			unit, error = 0, rid, res;
1179 	uint16_t		did;
1180 
1181 	sc = device_get_softc(dev);
1182 	sc->xl_dev = dev;
1183 
1184 	unit = device_get_unit(dev);
1185 
1186 	mtx_init(&sc->xl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1187 	    MTX_DEF);
1188 	ifmedia_init(&sc->ifmedia, 0, xl_ifmedia_upd, xl_ifmedia_sts);
1189 
1190 	did = pci_get_device(dev);
1191 
1192 	sc->xl_flags = 0;
1193 	if (did == TC_DEVICEID_HURRICANE_555)
1194 		sc->xl_flags |= XL_FLAG_EEPROM_OFFSET_30 | XL_FLAG_PHYOK;
1195 	if (did == TC_DEVICEID_HURRICANE_556 ||
1196 	    did == TC_DEVICEID_HURRICANE_556B)
1197 		sc->xl_flags |= XL_FLAG_FUNCREG | XL_FLAG_PHYOK |
1198 		    XL_FLAG_EEPROM_OFFSET_30 | XL_FLAG_WEIRDRESET |
1199 		    XL_FLAG_INVERT_LED_PWR | XL_FLAG_INVERT_MII_PWR;
1200 	if (did == TC_DEVICEID_HURRICANE_555 ||
1201 	    did == TC_DEVICEID_HURRICANE_556)
1202 		sc->xl_flags |= XL_FLAG_8BITROM;
1203 	if (did == TC_DEVICEID_HURRICANE_556B)
1204 		sc->xl_flags |= XL_FLAG_NO_XCVR_PWR;
1205 
1206 	if (did == TC_DEVICEID_HURRICANE_575B ||
1207 	    did == TC_DEVICEID_HURRICANE_575C ||
1208 	    did == TC_DEVICEID_HURRICANE_656B ||
1209 	    did == TC_DEVICEID_TORNADO_656C)
1210 		sc->xl_flags |= XL_FLAG_FUNCREG;
1211 	if (did == TC_DEVICEID_HURRICANE_575A ||
1212 	    did == TC_DEVICEID_HURRICANE_575B ||
1213 	    did == TC_DEVICEID_HURRICANE_575C ||
1214 	    did == TC_DEVICEID_HURRICANE_656B ||
1215 	    did == TC_DEVICEID_TORNADO_656C)
1216 		sc->xl_flags |= XL_FLAG_PHYOK | XL_FLAG_EEPROM_OFFSET_30 |
1217 		  XL_FLAG_8BITROM;
1218 	if (did == TC_DEVICEID_HURRICANE_656)
1219 		sc->xl_flags |= XL_FLAG_FUNCREG | XL_FLAG_PHYOK;
1220 	if (did == TC_DEVICEID_HURRICANE_575B)
1221 		sc->xl_flags |= XL_FLAG_INVERT_LED_PWR;
1222 	if (did == TC_DEVICEID_HURRICANE_575C)
1223 		sc->xl_flags |= XL_FLAG_INVERT_MII_PWR;
1224 	if (did == TC_DEVICEID_TORNADO_656C)
1225 		sc->xl_flags |= XL_FLAG_INVERT_MII_PWR;
1226 	if (did == TC_DEVICEID_HURRICANE_656 ||
1227 	    did == TC_DEVICEID_HURRICANE_656B)
1228 		sc->xl_flags |= XL_FLAG_INVERT_MII_PWR |
1229 		    XL_FLAG_INVERT_LED_PWR;
1230 	if (did == TC_DEVICEID_TORNADO_10_100BT_920B ||
1231 	    did == TC_DEVICEID_TORNADO_10_100BT_920B_WNM)
1232 		sc->xl_flags |= XL_FLAG_PHYOK;
1233 
1234 	switch (did) {
1235 	case TC_DEVICEID_BOOMERANG_10_100BT:	/* 3c905-TX */
1236 	case TC_DEVICEID_HURRICANE_575A:
1237 	case TC_DEVICEID_HURRICANE_575B:
1238 	case TC_DEVICEID_HURRICANE_575C:
1239 		sc->xl_flags |= XL_FLAG_NO_MMIO;
1240 		break;
1241 	default:
1242 		break;
1243 	}
1244 
1245 	/*
1246 	 * Map control/status registers.
1247 	 */
1248 	pci_enable_busmaster(dev);
1249 
1250 	if ((sc->xl_flags & XL_FLAG_NO_MMIO) == 0) {
1251 		rid = XL_PCI_LOMEM;
1252 		res = SYS_RES_MEMORY;
1253 
1254 		sc->xl_res = bus_alloc_resource_any(dev, res, &rid, RF_ACTIVE);
1255 	}
1256 
1257 	if (sc->xl_res != NULL) {
1258 		sc->xl_flags |= XL_FLAG_USE_MMIO;
1259 		if (bootverbose)
1260 			device_printf(dev, "using memory mapped I/O\n");
1261 	} else {
1262 		rid = XL_PCI_LOIO;
1263 		res = SYS_RES_IOPORT;
1264 		sc->xl_res = bus_alloc_resource_any(dev, res, &rid, RF_ACTIVE);
1265 		if (sc->xl_res == NULL) {
1266 			device_printf(dev, "couldn't map ports/memory\n");
1267 			error = ENXIO;
1268 			goto fail;
1269 		}
1270 		if (bootverbose)
1271 			device_printf(dev, "using port I/O\n");
1272 	}
1273 
1274 	sc->xl_btag = rman_get_bustag(sc->xl_res);
1275 	sc->xl_bhandle = rman_get_bushandle(sc->xl_res);
1276 
1277 	if (sc->xl_flags & XL_FLAG_FUNCREG) {
1278 		rid = XL_PCI_FUNCMEM;
1279 		sc->xl_fres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1280 		    RF_ACTIVE);
1281 
1282 		if (sc->xl_fres == NULL) {
1283 			device_printf(dev, "couldn't map funcreg memory\n");
1284 			error = ENXIO;
1285 			goto fail;
1286 		}
1287 
1288 		sc->xl_ftag = rman_get_bustag(sc->xl_fres);
1289 		sc->xl_fhandle = rman_get_bushandle(sc->xl_fres);
1290 	}
1291 
1292 	/* Allocate interrupt */
1293 	rid = 0;
1294 	sc->xl_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1295 	    RF_SHAREABLE | RF_ACTIVE);
1296 	if (sc->xl_irq == NULL) {
1297 		device_printf(dev, "couldn't map interrupt\n");
1298 		error = ENXIO;
1299 		goto fail;
1300 	}
1301 
1302 	/* Initialize interface name. */
1303 	ifp = sc->xl_ifp = if_alloc(IFT_ETHER);
1304 	if (ifp == NULL) {
1305 		device_printf(dev, "can not if_alloc()\n");
1306 		error = ENOSPC;
1307 		goto fail;
1308 	}
1309 	ifp->if_softc = sc;
1310 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1311 
1312 	/* Reset the adapter. */
1313 	XL_LOCK(sc);
1314 	xl_reset(sc);
1315 	XL_UNLOCK(sc);
1316 
1317 	/*
1318 	 * Get station address from the EEPROM.
1319 	 */
1320 	if (xl_read_eeprom(sc, (caddr_t)&eaddr, XL_EE_OEM_ADR0, 3, 1)) {
1321 		device_printf(dev, "failed to read station address\n");
1322 		error = ENXIO;
1323 		goto fail;
1324 	}
1325 
1326 	callout_init_mtx(&sc->xl_stat_callout, &sc->xl_mtx, 0);
1327 	TASK_INIT(&sc->xl_task, 0, xl_rxeof_task, sc);
1328 
1329 	/*
1330 	 * Now allocate a tag for the DMA descriptor lists and a chunk
1331 	 * of DMA-able memory based on the tag.  Also obtain the DMA
1332 	 * addresses of the RX and TX ring, which we'll need later.
1333 	 * All of our lists are allocated as a contiguous block
1334 	 * of memory.
1335 	 */
1336 	error = bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0,
1337 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1338 	    XL_RX_LIST_SZ, 1, XL_RX_LIST_SZ, 0, NULL, NULL,
1339 	    &sc->xl_ldata.xl_rx_tag);
1340 	if (error) {
1341 		device_printf(dev, "failed to allocate rx dma tag\n");
1342 		goto fail;
1343 	}
1344 
1345 	error = bus_dmamem_alloc(sc->xl_ldata.xl_rx_tag,
1346 	    (void **)&sc->xl_ldata.xl_rx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1347 	    &sc->xl_ldata.xl_rx_dmamap);
1348 	if (error) {
1349 		device_printf(dev, "no memory for rx list buffers!\n");
1350 		bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag);
1351 		sc->xl_ldata.xl_rx_tag = NULL;
1352 		goto fail;
1353 	}
1354 
1355 	error = bus_dmamap_load(sc->xl_ldata.xl_rx_tag,
1356 	    sc->xl_ldata.xl_rx_dmamap, sc->xl_ldata.xl_rx_list,
1357 	    XL_RX_LIST_SZ, xl_dma_map_addr,
1358 	    &sc->xl_ldata.xl_rx_dmaaddr, BUS_DMA_NOWAIT);
1359 	if (error) {
1360 		device_printf(dev, "cannot get dma address of the rx ring!\n");
1361 		bus_dmamem_free(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_list,
1362 		    sc->xl_ldata.xl_rx_dmamap);
1363 		bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag);
1364 		sc->xl_ldata.xl_rx_tag = NULL;
1365 		goto fail;
1366 	}
1367 
1368 	error = bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0,
1369 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1370 	    XL_TX_LIST_SZ, 1, XL_TX_LIST_SZ, 0, NULL, NULL,
1371 	    &sc->xl_ldata.xl_tx_tag);
1372 	if (error) {
1373 		device_printf(dev, "failed to allocate tx dma tag\n");
1374 		goto fail;
1375 	}
1376 
1377 	error = bus_dmamem_alloc(sc->xl_ldata.xl_tx_tag,
1378 	    (void **)&sc->xl_ldata.xl_tx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1379 	    &sc->xl_ldata.xl_tx_dmamap);
1380 	if (error) {
1381 		device_printf(dev, "no memory for list buffers!\n");
1382 		bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag);
1383 		sc->xl_ldata.xl_tx_tag = NULL;
1384 		goto fail;
1385 	}
1386 
1387 	error = bus_dmamap_load(sc->xl_ldata.xl_tx_tag,
1388 	    sc->xl_ldata.xl_tx_dmamap, sc->xl_ldata.xl_tx_list,
1389 	    XL_TX_LIST_SZ, xl_dma_map_addr,
1390 	    &sc->xl_ldata.xl_tx_dmaaddr, BUS_DMA_NOWAIT);
1391 	if (error) {
1392 		device_printf(dev, "cannot get dma address of the tx ring!\n");
1393 		bus_dmamem_free(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_list,
1394 		    sc->xl_ldata.xl_tx_dmamap);
1395 		bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag);
1396 		sc->xl_ldata.xl_tx_tag = NULL;
1397 		goto fail;
1398 	}
1399 
1400 	/*
1401 	 * Allocate a DMA tag for the mapping of mbufs.
1402 	 */
1403 	error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
1404 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1405 	    MCLBYTES * XL_MAXFRAGS, XL_MAXFRAGS, MCLBYTES, 0, NULL,
1406 	    NULL, &sc->xl_mtag);
1407 	if (error) {
1408 		device_printf(dev, "failed to allocate mbuf dma tag\n");
1409 		goto fail;
1410 	}
1411 
1412 	/* We need a spare DMA map for the RX ring. */
1413 	error = bus_dmamap_create(sc->xl_mtag, 0, &sc->xl_tmpmap);
1414 	if (error)
1415 		goto fail;
1416 
1417 	/*
1418 	 * Figure out the card type. 3c905B adapters have the
1419 	 * 'supportsNoTxLength' bit set in the capabilities
1420 	 * word in the EEPROM.
1421 	 * Note: my 3c575C cardbus card lies. It returns a value
1422 	 * of 0x1578 for its capabilities word, which is somewhat
1423 	 * nonsensical. Another way to distinguish a 3c90x chip
1424 	 * from a 3c90xB/C chip is to check for the 'supportsLargePackets'
1425 	 * bit. This will only be set for 3c90x boomerage chips.
1426 	 */
1427 	xl_read_eeprom(sc, (caddr_t)&sc->xl_caps, XL_EE_CAPS, 1, 0);
1428 	if (sc->xl_caps & XL_CAPS_NO_TXLENGTH ||
1429 	    !(sc->xl_caps & XL_CAPS_LARGE_PKTS))
1430 		sc->xl_type = XL_TYPE_905B;
1431 	else
1432 		sc->xl_type = XL_TYPE_90X;
1433 
1434 	/* Set the TX start threshold for best performance. */
1435 	sc->xl_tx_thresh = XL_MIN_FRAMELEN;
1436 
1437 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1438 	ifp->if_ioctl = xl_ioctl;
1439 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1440 	if (sc->xl_type == XL_TYPE_905B) {
1441 		ifp->if_hwassist = XL905B_CSUM_FEATURES;
1442 #ifdef XL905B_TXCSUM_BROKEN
1443 		ifp->if_capabilities |= IFCAP_RXCSUM;
1444 #else
1445 		ifp->if_capabilities |= IFCAP_HWCSUM;
1446 #endif
1447 	}
1448 	ifp->if_capenable = ifp->if_capabilities;
1449 #ifdef DEVICE_POLLING
1450 	ifp->if_capabilities |= IFCAP_POLLING;
1451 #endif
1452 	ifp->if_start = xl_start;
1453 	ifp->if_init = xl_init;
1454 	IFQ_SET_MAXLEN(&ifp->if_snd, XL_TX_LIST_CNT - 1);
1455 	ifp->if_snd.ifq_drv_maxlen = XL_TX_LIST_CNT - 1;
1456 	IFQ_SET_READY(&ifp->if_snd);
1457 
1458 	/*
1459 	 * Now we have to see what sort of media we have.
1460 	 * This includes probing for an MII interace and a
1461 	 * possible PHY.
1462 	 */
1463 	XL_SEL_WIN(3);
1464 	sc->xl_media = CSR_READ_2(sc, XL_W3_MEDIA_OPT);
1465 	if (bootverbose)
1466 		device_printf(dev, "media options word: %x\n", sc->xl_media);
1467 
1468 	xl_read_eeprom(sc, (char *)&xcvr, XL_EE_ICFG_0, 2, 0);
1469 	sc->xl_xcvr = xcvr[0] | xcvr[1] << 16;
1470 	sc->xl_xcvr &= XL_ICFG_CONNECTOR_MASK;
1471 	sc->xl_xcvr >>= XL_ICFG_CONNECTOR_BITS;
1472 
1473 	xl_mediacheck(sc);
1474 
1475 	if (sc->xl_media & XL_MEDIAOPT_MII ||
1476 	    sc->xl_media & XL_MEDIAOPT_BTX ||
1477 	    sc->xl_media & XL_MEDIAOPT_BT4) {
1478 		if (bootverbose)
1479 			device_printf(dev, "found MII/AUTO\n");
1480 		xl_setcfg(sc);
1481 		if (mii_phy_probe(dev, &sc->xl_miibus,
1482 		    xl_ifmedia_upd, xl_ifmedia_sts)) {
1483 			device_printf(dev, "no PHY found!\n");
1484 			error = ENXIO;
1485 			goto fail;
1486 		}
1487 		goto done;
1488 	}
1489 
1490 	/*
1491 	 * Sanity check. If the user has selected "auto" and this isn't
1492 	 * a 10/100 card of some kind, we need to force the transceiver
1493 	 * type to something sane.
1494 	 */
1495 	if (sc->xl_xcvr == XL_XCVR_AUTO)
1496 		xl_choose_xcvr(sc, bootverbose);
1497 
1498 	/*
1499 	 * Do ifmedia setup.
1500 	 */
1501 	if (sc->xl_media & XL_MEDIAOPT_BT) {
1502 		if (bootverbose)
1503 			device_printf(dev, "found 10baseT\n");
1504 		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
1505 		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
1506 		if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
1507 			ifmedia_add(&sc->ifmedia,
1508 			    IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
1509 	}
1510 
1511 	if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
1512 		/*
1513 		 * Check for a 10baseFL board in disguise.
1514 		 */
1515 		if (sc->xl_type == XL_TYPE_905B &&
1516 		    sc->xl_media == XL_MEDIAOPT_10FL) {
1517 			if (bootverbose)
1518 				device_printf(dev, "found 10baseFL\n");
1519 			ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_FL, 0, NULL);
1520 			ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_FL|IFM_HDX,
1521 			    0, NULL);
1522 			if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
1523 				ifmedia_add(&sc->ifmedia,
1524 				    IFM_ETHER|IFM_10_FL|IFM_FDX, 0, NULL);
1525 		} else {
1526 			if (bootverbose)
1527 				device_printf(dev, "found AUI\n");
1528 			ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL);
1529 		}
1530 	}
1531 
1532 	if (sc->xl_media & XL_MEDIAOPT_BNC) {
1533 		if (bootverbose)
1534 			device_printf(dev, "found BNC\n");
1535 		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_2, 0, NULL);
1536 	}
1537 
1538 	if (sc->xl_media & XL_MEDIAOPT_BFX) {
1539 		if (bootverbose)
1540 			device_printf(dev, "found 100baseFX\n");
1541 		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_FX, 0, NULL);
1542 	}
1543 
1544 	media = IFM_ETHER|IFM_100_TX|IFM_FDX;
1545 	xl_choose_media(sc, &media);
1546 
1547 	if (sc->xl_miibus == NULL)
1548 		ifmedia_set(&sc->ifmedia, media);
1549 
1550 done:
1551 	if (sc->xl_flags & XL_FLAG_NO_XCVR_PWR) {
1552 		XL_SEL_WIN(0);
1553 		CSR_WRITE_2(sc, XL_W0_MFG_ID, XL_NO_XCVR_PWR_MAGICBITS);
1554 	}
1555 
1556 	/*
1557 	 * Call MI attach routine.
1558 	 */
1559 	ether_ifattach(ifp, eaddr);
1560 
1561 	error = bus_setup_intr(dev, sc->xl_irq, INTR_TYPE_NET | INTR_MPSAFE,
1562 	    NULL, xl_intr, sc, &sc->xl_intrhand);
1563 	if (error) {
1564 		device_printf(dev, "couldn't set up irq\n");
1565 		ether_ifdetach(ifp);
1566 		goto fail;
1567 	}
1568 
1569 fail:
1570 	if (error)
1571 		xl_detach(dev);
1572 
1573 	return (error);
1574 }
1575 
1576 /*
1577  * Choose a default media.
1578  * XXX This is a leaf function only called by xl_attach() and
1579  *     acquires/releases the non-recursible driver mutex to
1580  *     satisfy lock assertions.
1581  */
1582 static void
1583 xl_choose_media(struct xl_softc *sc, int *media)
1584 {
1585 
1586 	XL_LOCK(sc);
1587 
1588 	switch (sc->xl_xcvr) {
1589 	case XL_XCVR_10BT:
1590 		*media = IFM_ETHER|IFM_10_T;
1591 		xl_setmode(sc, *media);
1592 		break;
1593 	case XL_XCVR_AUI:
1594 		if (sc->xl_type == XL_TYPE_905B &&
1595 		    sc->xl_media == XL_MEDIAOPT_10FL) {
1596 			*media = IFM_ETHER|IFM_10_FL;
1597 			xl_setmode(sc, *media);
1598 		} else {
1599 			*media = IFM_ETHER|IFM_10_5;
1600 			xl_setmode(sc, *media);
1601 		}
1602 		break;
1603 	case XL_XCVR_COAX:
1604 		*media = IFM_ETHER|IFM_10_2;
1605 		xl_setmode(sc, *media);
1606 		break;
1607 	case XL_XCVR_AUTO:
1608 	case XL_XCVR_100BTX:
1609 	case XL_XCVR_MII:
1610 		/* Chosen by miibus */
1611 		break;
1612 	case XL_XCVR_100BFX:
1613 		*media = IFM_ETHER|IFM_100_FX;
1614 		break;
1615 	default:
1616 		device_printf(sc->xl_dev, "unknown XCVR type: %d\n",
1617 		    sc->xl_xcvr);
1618 		/*
1619 		 * This will probably be wrong, but it prevents
1620 		 * the ifmedia code from panicking.
1621 		 */
1622 		*media = IFM_ETHER|IFM_10_T;
1623 		break;
1624 	}
1625 
1626 	XL_UNLOCK(sc);
1627 }
1628 
1629 /*
1630  * Shutdown hardware and free up resources. This can be called any
1631  * time after the mutex has been initialized. It is called in both
1632  * the error case in attach and the normal detach case so it needs
1633  * to be careful about only freeing resources that have actually been
1634  * allocated.
1635  */
1636 static int
1637 xl_detach(device_t dev)
1638 {
1639 	struct xl_softc		*sc;
1640 	struct ifnet		*ifp;
1641 	int			rid, res;
1642 
1643 	sc = device_get_softc(dev);
1644 	ifp = sc->xl_ifp;
1645 
1646 	KASSERT(mtx_initialized(&sc->xl_mtx), ("xl mutex not initialized"));
1647 
1648 #ifdef DEVICE_POLLING
1649 	if (ifp && ifp->if_capenable & IFCAP_POLLING)
1650 		ether_poll_deregister(ifp);
1651 #endif
1652 
1653 	if (sc->xl_flags & XL_FLAG_USE_MMIO) {
1654 		rid = XL_PCI_LOMEM;
1655 		res = SYS_RES_MEMORY;
1656 	} else {
1657 		rid = XL_PCI_LOIO;
1658 		res = SYS_RES_IOPORT;
1659 	}
1660 
1661 	/* These should only be active if attach succeeded */
1662 	if (device_is_attached(dev)) {
1663 		XL_LOCK(sc);
1664 		xl_reset(sc);
1665 		xl_stop(sc);
1666 		XL_UNLOCK(sc);
1667 		taskqueue_drain(taskqueue_swi, &sc->xl_task);
1668 		callout_drain(&sc->xl_stat_callout);
1669 		ether_ifdetach(ifp);
1670 	}
1671 	if (sc->xl_miibus)
1672 		device_delete_child(dev, sc->xl_miibus);
1673 	bus_generic_detach(dev);
1674 	ifmedia_removeall(&sc->ifmedia);
1675 
1676 	if (sc->xl_intrhand)
1677 		bus_teardown_intr(dev, sc->xl_irq, sc->xl_intrhand);
1678 	if (sc->xl_irq)
1679 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->xl_irq);
1680 	if (sc->xl_fres != NULL)
1681 		bus_release_resource(dev, SYS_RES_MEMORY,
1682 		    XL_PCI_FUNCMEM, sc->xl_fres);
1683 	if (sc->xl_res)
1684 		bus_release_resource(dev, res, rid, sc->xl_res);
1685 
1686 	if (ifp)
1687 		if_free(ifp);
1688 
1689 	if (sc->xl_mtag) {
1690 		bus_dmamap_destroy(sc->xl_mtag, sc->xl_tmpmap);
1691 		bus_dma_tag_destroy(sc->xl_mtag);
1692 	}
1693 	if (sc->xl_ldata.xl_rx_tag) {
1694 		bus_dmamap_unload(sc->xl_ldata.xl_rx_tag,
1695 		    sc->xl_ldata.xl_rx_dmamap);
1696 		bus_dmamem_free(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_list,
1697 		    sc->xl_ldata.xl_rx_dmamap);
1698 		bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag);
1699 	}
1700 	if (sc->xl_ldata.xl_tx_tag) {
1701 		bus_dmamap_unload(sc->xl_ldata.xl_tx_tag,
1702 		    sc->xl_ldata.xl_tx_dmamap);
1703 		bus_dmamem_free(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_list,
1704 		    sc->xl_ldata.xl_tx_dmamap);
1705 		bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag);
1706 	}
1707 
1708 	mtx_destroy(&sc->xl_mtx);
1709 
1710 	return (0);
1711 }
1712 
1713 /*
1714  * Initialize the transmit descriptors.
1715  */
1716 static int
1717 xl_list_tx_init(struct xl_softc *sc)
1718 {
1719 	struct xl_chain_data	*cd;
1720 	struct xl_list_data	*ld;
1721 	int			error, i;
1722 
1723 	XL_LOCK_ASSERT(sc);
1724 
1725 	cd = &sc->xl_cdata;
1726 	ld = &sc->xl_ldata;
1727 	for (i = 0; i < XL_TX_LIST_CNT; i++) {
1728 		cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i];
1729 		error = bus_dmamap_create(sc->xl_mtag, 0,
1730 		    &cd->xl_tx_chain[i].xl_map);
1731 		if (error)
1732 			return (error);
1733 		cd->xl_tx_chain[i].xl_phys = ld->xl_tx_dmaaddr +
1734 		    i * sizeof(struct xl_list);
1735 		if (i == (XL_TX_LIST_CNT - 1))
1736 			cd->xl_tx_chain[i].xl_next = NULL;
1737 		else
1738 			cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1];
1739 	}
1740 
1741 	cd->xl_tx_free = &cd->xl_tx_chain[0];
1742 	cd->xl_tx_tail = cd->xl_tx_head = NULL;
1743 
1744 	bus_dmamap_sync(ld->xl_tx_tag, ld->xl_tx_dmamap, BUS_DMASYNC_PREWRITE);
1745 	return (0);
1746 }
1747 
1748 /*
1749  * Initialize the transmit descriptors.
1750  */
1751 static int
1752 xl_list_tx_init_90xB(struct xl_softc *sc)
1753 {
1754 	struct xl_chain_data	*cd;
1755 	struct xl_list_data	*ld;
1756 	int			error, i;
1757 
1758 	XL_LOCK_ASSERT(sc);
1759 
1760 	cd = &sc->xl_cdata;
1761 	ld = &sc->xl_ldata;
1762 	for (i = 0; i < XL_TX_LIST_CNT; i++) {
1763 		cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i];
1764 		error = bus_dmamap_create(sc->xl_mtag, 0,
1765 		    &cd->xl_tx_chain[i].xl_map);
1766 		if (error)
1767 			return (error);
1768 		cd->xl_tx_chain[i].xl_phys = ld->xl_tx_dmaaddr +
1769 		    i * sizeof(struct xl_list);
1770 		if (i == (XL_TX_LIST_CNT - 1))
1771 			cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[0];
1772 		else
1773 			cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1];
1774 		if (i == 0)
1775 			cd->xl_tx_chain[i].xl_prev =
1776 			    &cd->xl_tx_chain[XL_TX_LIST_CNT - 1];
1777 		else
1778 			cd->xl_tx_chain[i].xl_prev =
1779 			    &cd->xl_tx_chain[i - 1];
1780 	}
1781 
1782 	bzero(ld->xl_tx_list, XL_TX_LIST_SZ);
1783 	ld->xl_tx_list[0].xl_status = htole32(XL_TXSTAT_EMPTY);
1784 
1785 	cd->xl_tx_prod = 1;
1786 	cd->xl_tx_cons = 1;
1787 	cd->xl_tx_cnt = 0;
1788 
1789 	bus_dmamap_sync(ld->xl_tx_tag, ld->xl_tx_dmamap, BUS_DMASYNC_PREWRITE);
1790 	return (0);
1791 }
1792 
1793 /*
1794  * Initialize the RX descriptors and allocate mbufs for them. Note that
1795  * we arrange the descriptors in a closed ring, so that the last descriptor
1796  * points back to the first.
1797  */
1798 static int
1799 xl_list_rx_init(struct xl_softc *sc)
1800 {
1801 	struct xl_chain_data	*cd;
1802 	struct xl_list_data	*ld;
1803 	int			error, i, next;
1804 	u_int32_t		nextptr;
1805 
1806 	XL_LOCK_ASSERT(sc);
1807 
1808 	cd = &sc->xl_cdata;
1809 	ld = &sc->xl_ldata;
1810 
1811 	for (i = 0; i < XL_RX_LIST_CNT; i++) {
1812 		cd->xl_rx_chain[i].xl_ptr = &ld->xl_rx_list[i];
1813 		error = bus_dmamap_create(sc->xl_mtag, 0,
1814 		    &cd->xl_rx_chain[i].xl_map);
1815 		if (error)
1816 			return (error);
1817 		error = xl_newbuf(sc, &cd->xl_rx_chain[i]);
1818 		if (error)
1819 			return (error);
1820 		if (i == (XL_RX_LIST_CNT - 1))
1821 			next = 0;
1822 		else
1823 			next = i + 1;
1824 		nextptr = ld->xl_rx_dmaaddr +
1825 		    next * sizeof(struct xl_list_onefrag);
1826 		cd->xl_rx_chain[i].xl_next = &cd->xl_rx_chain[next];
1827 		ld->xl_rx_list[i].xl_next = htole32(nextptr);
1828 	}
1829 
1830 	bus_dmamap_sync(ld->xl_rx_tag, ld->xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
1831 	cd->xl_rx_head = &cd->xl_rx_chain[0];
1832 
1833 	return (0);
1834 }
1835 
1836 /*
1837  * Initialize an RX descriptor and attach an MBUF cluster.
1838  * If we fail to do so, we need to leave the old mbuf and
1839  * the old DMA map untouched so that it can be reused.
1840  */
1841 static int
1842 xl_newbuf(struct xl_softc *sc, struct xl_chain_onefrag *c)
1843 {
1844 	struct mbuf		*m_new = NULL;
1845 	bus_dmamap_t		map;
1846 	bus_dma_segment_t	segs[1];
1847 	int			error, nseg;
1848 
1849 	XL_LOCK_ASSERT(sc);
1850 
1851 	m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1852 	if (m_new == NULL)
1853 		return (ENOBUFS);
1854 
1855 	m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1856 
1857 	/* Force longword alignment for packet payload. */
1858 	m_adj(m_new, ETHER_ALIGN);
1859 
1860 	error = bus_dmamap_load_mbuf_sg(sc->xl_mtag, sc->xl_tmpmap, m_new,
1861 	    segs, &nseg, BUS_DMA_NOWAIT);
1862 	if (error) {
1863 		m_freem(m_new);
1864 		device_printf(sc->xl_dev, "can't map mbuf (error %d)\n",
1865 		    error);
1866 		return (error);
1867 	}
1868 	KASSERT(nseg == 1,
1869 	    ("%s: too many DMA segments (%d)", __func__, nseg));
1870 
1871 	bus_dmamap_unload(sc->xl_mtag, c->xl_map);
1872 	map = c->xl_map;
1873 	c->xl_map = sc->xl_tmpmap;
1874 	sc->xl_tmpmap = map;
1875 	c->xl_mbuf = m_new;
1876 	c->xl_ptr->xl_frag.xl_len = htole32(m_new->m_len | XL_LAST_FRAG);
1877 	c->xl_ptr->xl_status = 0;
1878 	c->xl_ptr->xl_frag.xl_addr = htole32(segs->ds_addr);
1879 	bus_dmamap_sync(sc->xl_mtag, c->xl_map, BUS_DMASYNC_PREREAD);
1880 	return (0);
1881 }
1882 
1883 static int
1884 xl_rx_resync(struct xl_softc *sc)
1885 {
1886 	struct xl_chain_onefrag	*pos;
1887 	int			i;
1888 
1889 	XL_LOCK_ASSERT(sc);
1890 
1891 	pos = sc->xl_cdata.xl_rx_head;
1892 
1893 	for (i = 0; i < XL_RX_LIST_CNT; i++) {
1894 		if (pos->xl_ptr->xl_status)
1895 			break;
1896 		pos = pos->xl_next;
1897 	}
1898 
1899 	if (i == XL_RX_LIST_CNT)
1900 		return (0);
1901 
1902 	sc->xl_cdata.xl_rx_head = pos;
1903 
1904 	return (EAGAIN);
1905 }
1906 
1907 /*
1908  * A frame has been uploaded: pass the resulting mbuf chain up to
1909  * the higher level protocols.
1910  */
1911 static void
1912 xl_rxeof(struct xl_softc *sc)
1913 {
1914 	struct mbuf		*m;
1915 	struct ifnet		*ifp = sc->xl_ifp;
1916 	struct xl_chain_onefrag	*cur_rx;
1917 	int			total_len = 0;
1918 	u_int32_t		rxstat;
1919 
1920 	XL_LOCK_ASSERT(sc);
1921 again:
1922 	bus_dmamap_sync(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_dmamap,
1923 	    BUS_DMASYNC_POSTREAD);
1924 	while ((rxstat = le32toh(sc->xl_cdata.xl_rx_head->xl_ptr->xl_status))) {
1925 #ifdef DEVICE_POLLING
1926 		if (ifp->if_capenable & IFCAP_POLLING) {
1927 			if (sc->rxcycles <= 0)
1928 				break;
1929 			sc->rxcycles--;
1930 		}
1931 #endif
1932 		cur_rx = sc->xl_cdata.xl_rx_head;
1933 		sc->xl_cdata.xl_rx_head = cur_rx->xl_next;
1934 		total_len = rxstat & XL_RXSTAT_LENMASK;
1935 
1936 		/*
1937 		 * Since we have told the chip to allow large frames,
1938 		 * we need to trap giant frame errors in software. We allow
1939 		 * a little more than the normal frame size to account for
1940 		 * frames with VLAN tags.
1941 		 */
1942 		if (total_len > XL_MAX_FRAMELEN)
1943 			rxstat |= (XL_RXSTAT_UP_ERROR|XL_RXSTAT_OVERSIZE);
1944 
1945 		/*
1946 		 * If an error occurs, update stats, clear the
1947 		 * status word and leave the mbuf cluster in place:
1948 		 * it should simply get re-used next time this descriptor
1949 		 * comes up in the ring.
1950 		 */
1951 		if (rxstat & XL_RXSTAT_UP_ERROR) {
1952 			ifp->if_ierrors++;
1953 			cur_rx->xl_ptr->xl_status = 0;
1954 			bus_dmamap_sync(sc->xl_ldata.xl_rx_tag,
1955 			    sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
1956 			continue;
1957 		}
1958 
1959 		/*
1960 		 * If the error bit was not set, the upload complete
1961 		 * bit should be set which means we have a valid packet.
1962 		 * If not, something truly strange has happened.
1963 		 */
1964 		if (!(rxstat & XL_RXSTAT_UP_CMPLT)) {
1965 			device_printf(sc->xl_dev,
1966 			    "bad receive status -- packet dropped\n");
1967 			ifp->if_ierrors++;
1968 			cur_rx->xl_ptr->xl_status = 0;
1969 			bus_dmamap_sync(sc->xl_ldata.xl_rx_tag,
1970 			    sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
1971 			continue;
1972 		}
1973 
1974 		/* No errors; receive the packet. */
1975 		bus_dmamap_sync(sc->xl_mtag, cur_rx->xl_map,
1976 		    BUS_DMASYNC_POSTREAD);
1977 		m = cur_rx->xl_mbuf;
1978 
1979 		/*
1980 		 * Try to conjure up a new mbuf cluster. If that
1981 		 * fails, it means we have an out of memory condition and
1982 		 * should leave the buffer in place and continue. This will
1983 		 * result in a lost packet, but there's little else we
1984 		 * can do in this situation.
1985 		 */
1986 		if (xl_newbuf(sc, cur_rx)) {
1987 			ifp->if_ierrors++;
1988 			cur_rx->xl_ptr->xl_status = 0;
1989 			bus_dmamap_sync(sc->xl_ldata.xl_rx_tag,
1990 			    sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
1991 			continue;
1992 		}
1993 		bus_dmamap_sync(sc->xl_ldata.xl_rx_tag,
1994 		    sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
1995 
1996 		ifp->if_ipackets++;
1997 		m->m_pkthdr.rcvif = ifp;
1998 		m->m_pkthdr.len = m->m_len = total_len;
1999 
2000 		if (ifp->if_capenable & IFCAP_RXCSUM) {
2001 			/* Do IP checksum checking. */
2002 			if (rxstat & XL_RXSTAT_IPCKOK)
2003 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2004 			if (!(rxstat & XL_RXSTAT_IPCKERR))
2005 				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2006 			if ((rxstat & XL_RXSTAT_TCPCOK &&
2007 			     !(rxstat & XL_RXSTAT_TCPCKERR)) ||
2008 			    (rxstat & XL_RXSTAT_UDPCKOK &&
2009 			     !(rxstat & XL_RXSTAT_UDPCKERR))) {
2010 				m->m_pkthdr.csum_flags |=
2011 					CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
2012 				m->m_pkthdr.csum_data = 0xffff;
2013 			}
2014 		}
2015 
2016 		XL_UNLOCK(sc);
2017 		(*ifp->if_input)(ifp, m);
2018 		XL_LOCK(sc);
2019 
2020 		/*
2021 		 * If we are running from the taskqueue, the interface
2022 		 * might have been stopped while we were passing the last
2023 		 * packet up the network stack.
2024 		 */
2025 		if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
2026 			return;
2027 	}
2028 
2029 	/*
2030 	 * Handle the 'end of channel' condition. When the upload
2031 	 * engine hits the end of the RX ring, it will stall. This
2032 	 * is our cue to flush the RX ring, reload the uplist pointer
2033 	 * register and unstall the engine.
2034 	 * XXX This is actually a little goofy. With the ThunderLAN
2035 	 * chip, you get an interrupt when the receiver hits the end
2036 	 * of the receive ring, which tells you exactly when you
2037 	 * you need to reload the ring pointer. Here we have to
2038 	 * fake it. I'm mad at myself for not being clever enough
2039 	 * to avoid the use of a goto here.
2040 	 */
2041 	if (CSR_READ_4(sc, XL_UPLIST_PTR) == 0 ||
2042 		CSR_READ_4(sc, XL_UPLIST_STATUS) & XL_PKTSTAT_UP_STALLED) {
2043 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
2044 		xl_wait(sc);
2045 		CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->xl_ldata.xl_rx_dmaaddr);
2046 		sc->xl_cdata.xl_rx_head = &sc->xl_cdata.xl_rx_chain[0];
2047 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
2048 		goto again;
2049 	}
2050 }
2051 
2052 /*
2053  * Taskqueue wrapper for xl_rxeof().
2054  */
2055 static void
2056 xl_rxeof_task(void *arg, int pending)
2057 {
2058 	struct xl_softc *sc = (struct xl_softc *)arg;
2059 
2060 	XL_LOCK(sc);
2061 	if (sc->xl_ifp->if_drv_flags & IFF_DRV_RUNNING)
2062 		xl_rxeof(sc);
2063 	XL_UNLOCK(sc);
2064 }
2065 
2066 /*
2067  * A frame was downloaded to the chip. It's safe for us to clean up
2068  * the list buffers.
2069  */
2070 static void
2071 xl_txeof(struct xl_softc *sc)
2072 {
2073 	struct xl_chain		*cur_tx;
2074 	struct ifnet		*ifp = sc->xl_ifp;
2075 
2076 	XL_LOCK_ASSERT(sc);
2077 
2078 	/*
2079 	 * Go through our tx list and free mbufs for those
2080 	 * frames that have been uploaded. Note: the 3c905B
2081 	 * sets a special bit in the status word to let us
2082 	 * know that a frame has been downloaded, but the
2083 	 * original 3c900/3c905 adapters don't do that.
2084 	 * Consequently, we have to use a different test if
2085 	 * xl_type != XL_TYPE_905B.
2086 	 */
2087 	while (sc->xl_cdata.xl_tx_head != NULL) {
2088 		cur_tx = sc->xl_cdata.xl_tx_head;
2089 
2090 		if (CSR_READ_4(sc, XL_DOWNLIST_PTR))
2091 			break;
2092 
2093 		sc->xl_cdata.xl_tx_head = cur_tx->xl_next;
2094 		bus_dmamap_sync(sc->xl_mtag, cur_tx->xl_map,
2095 		    BUS_DMASYNC_POSTWRITE);
2096 		bus_dmamap_unload(sc->xl_mtag, cur_tx->xl_map);
2097 		m_freem(cur_tx->xl_mbuf);
2098 		cur_tx->xl_mbuf = NULL;
2099 		ifp->if_opackets++;
2100 
2101 		cur_tx->xl_next = sc->xl_cdata.xl_tx_free;
2102 		sc->xl_cdata.xl_tx_free = cur_tx;
2103 	}
2104 
2105 	if (sc->xl_cdata.xl_tx_head == NULL) {
2106 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2107 		sc->xl_wdog_timer = 0;
2108 		sc->xl_cdata.xl_tx_tail = NULL;
2109 	} else {
2110 		if (CSR_READ_4(sc, XL_DMACTL) & XL_DMACTL_DOWN_STALLED ||
2111 			!CSR_READ_4(sc, XL_DOWNLIST_PTR)) {
2112 			CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
2113 				sc->xl_cdata.xl_tx_head->xl_phys);
2114 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
2115 		}
2116 	}
2117 }
2118 
2119 static void
2120 xl_txeof_90xB(struct xl_softc *sc)
2121 {
2122 	struct xl_chain		*cur_tx = NULL;
2123 	struct ifnet		*ifp = sc->xl_ifp;
2124 	int			idx;
2125 
2126 	XL_LOCK_ASSERT(sc);
2127 
2128 	bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap,
2129 	    BUS_DMASYNC_POSTREAD);
2130 	idx = sc->xl_cdata.xl_tx_cons;
2131 	while (idx != sc->xl_cdata.xl_tx_prod) {
2132 		cur_tx = &sc->xl_cdata.xl_tx_chain[idx];
2133 
2134 		if (!(le32toh(cur_tx->xl_ptr->xl_status) &
2135 		      XL_TXSTAT_DL_COMPLETE))
2136 			break;
2137 
2138 		if (cur_tx->xl_mbuf != NULL) {
2139 			bus_dmamap_sync(sc->xl_mtag, cur_tx->xl_map,
2140 			    BUS_DMASYNC_POSTWRITE);
2141 			bus_dmamap_unload(sc->xl_mtag, cur_tx->xl_map);
2142 			m_freem(cur_tx->xl_mbuf);
2143 			cur_tx->xl_mbuf = NULL;
2144 		}
2145 
2146 		ifp->if_opackets++;
2147 
2148 		sc->xl_cdata.xl_tx_cnt--;
2149 		XL_INC(idx, XL_TX_LIST_CNT);
2150 	}
2151 
2152 	if (sc->xl_cdata.xl_tx_cnt == 0)
2153 		sc->xl_wdog_timer = 0;
2154 	sc->xl_cdata.xl_tx_cons = idx;
2155 
2156 	if (cur_tx != NULL)
2157 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2158 }
2159 
2160 /*
2161  * TX 'end of channel' interrupt handler. Actually, we should
2162  * only get a 'TX complete' interrupt if there's a transmit error,
2163  * so this is really TX error handler.
2164  */
2165 static void
2166 xl_txeoc(struct xl_softc *sc)
2167 {
2168 	u_int8_t		txstat;
2169 
2170 	XL_LOCK_ASSERT(sc);
2171 
2172 	while ((txstat = CSR_READ_1(sc, XL_TX_STATUS))) {
2173 		if (txstat & XL_TXSTATUS_UNDERRUN ||
2174 			txstat & XL_TXSTATUS_JABBER ||
2175 			txstat & XL_TXSTATUS_RECLAIM) {
2176 			device_printf(sc->xl_dev,
2177 			    "transmission error: %x\n", txstat);
2178 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
2179 			xl_wait(sc);
2180 			if (sc->xl_type == XL_TYPE_905B) {
2181 				if (sc->xl_cdata.xl_tx_cnt) {
2182 					int			i;
2183 					struct xl_chain		*c;
2184 
2185 					i = sc->xl_cdata.xl_tx_cons;
2186 					c = &sc->xl_cdata.xl_tx_chain[i];
2187 					CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
2188 					    c->xl_phys);
2189 					CSR_WRITE_1(sc, XL_DOWN_POLL, 64);
2190 				}
2191 			} else {
2192 				if (sc->xl_cdata.xl_tx_head != NULL)
2193 					CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
2194 					    sc->xl_cdata.xl_tx_head->xl_phys);
2195 			}
2196 			/*
2197 			 * Remember to set this for the
2198 			 * first generation 3c90X chips.
2199 			 */
2200 			CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
2201 			if (txstat & XL_TXSTATUS_UNDERRUN &&
2202 			    sc->xl_tx_thresh < XL_PACKET_SIZE) {
2203 				sc->xl_tx_thresh += XL_MIN_FRAMELEN;
2204 				device_printf(sc->xl_dev,
2205 "tx underrun, increasing tx start threshold to %d bytes\n", sc->xl_tx_thresh);
2206 			}
2207 			CSR_WRITE_2(sc, XL_COMMAND,
2208 			    XL_CMD_TX_SET_START|sc->xl_tx_thresh);
2209 			if (sc->xl_type == XL_TYPE_905B) {
2210 				CSR_WRITE_2(sc, XL_COMMAND,
2211 				XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
2212 			}
2213 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
2214 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
2215 		} else {
2216 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
2217 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
2218 		}
2219 		/*
2220 		 * Write an arbitrary byte to the TX_STATUS register
2221 		 * to clear this interrupt/error and advance to the next.
2222 		 */
2223 		CSR_WRITE_1(sc, XL_TX_STATUS, 0x01);
2224 	}
2225 }
2226 
2227 static void
2228 xl_intr(void *arg)
2229 {
2230 	struct xl_softc		*sc = arg;
2231 	struct ifnet		*ifp = sc->xl_ifp;
2232 	u_int16_t		status;
2233 
2234 	XL_LOCK(sc);
2235 
2236 #ifdef DEVICE_POLLING
2237 	if (ifp->if_capenable & IFCAP_POLLING) {
2238 		XL_UNLOCK(sc);
2239 		return;
2240 	}
2241 #endif
2242 
2243 	while ((status = CSR_READ_2(sc, XL_STATUS)) & XL_INTRS &&
2244 	    status != 0xFFFF) {
2245 		CSR_WRITE_2(sc, XL_COMMAND,
2246 		    XL_CMD_INTR_ACK|(status & XL_INTRS));
2247 
2248 		if (status & XL_STAT_UP_COMPLETE) {
2249 			int	curpkts;
2250 
2251 			curpkts = ifp->if_ipackets;
2252 			xl_rxeof(sc);
2253 			if (curpkts == ifp->if_ipackets) {
2254 				while (xl_rx_resync(sc))
2255 					xl_rxeof(sc);
2256 			}
2257 		}
2258 
2259 		if (status & XL_STAT_DOWN_COMPLETE) {
2260 			if (sc->xl_type == XL_TYPE_905B)
2261 				xl_txeof_90xB(sc);
2262 			else
2263 				xl_txeof(sc);
2264 		}
2265 
2266 		if (status & XL_STAT_TX_COMPLETE) {
2267 			ifp->if_oerrors++;
2268 			xl_txeoc(sc);
2269 		}
2270 
2271 		if (status & XL_STAT_ADFAIL) {
2272 			xl_reset(sc);
2273 			xl_init_locked(sc);
2274 		}
2275 
2276 		if (status & XL_STAT_STATSOFLOW) {
2277 			sc->xl_stats_no_timeout = 1;
2278 			xl_stats_update_locked(sc);
2279 			sc->xl_stats_no_timeout = 0;
2280 		}
2281 	}
2282 
2283 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
2284 		if (sc->xl_type == XL_TYPE_905B)
2285 			xl_start_90xB_locked(ifp);
2286 		else
2287 			xl_start_locked(ifp);
2288 	}
2289 
2290 	XL_UNLOCK(sc);
2291 }
2292 
2293 #ifdef DEVICE_POLLING
2294 static void
2295 xl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2296 {
2297 	struct xl_softc *sc = ifp->if_softc;
2298 
2299 	XL_LOCK(sc);
2300 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2301 		xl_poll_locked(ifp, cmd, count);
2302 	XL_UNLOCK(sc);
2303 }
2304 
2305 static void
2306 xl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
2307 {
2308 	struct xl_softc *sc = ifp->if_softc;
2309 
2310 	XL_LOCK_ASSERT(sc);
2311 
2312 	sc->rxcycles = count;
2313 	xl_rxeof(sc);
2314 	if (sc->xl_type == XL_TYPE_905B)
2315 		xl_txeof_90xB(sc);
2316 	else
2317 		xl_txeof(sc);
2318 
2319 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
2320 		if (sc->xl_type == XL_TYPE_905B)
2321 			xl_start_90xB_locked(ifp);
2322 		else
2323 			xl_start_locked(ifp);
2324 	}
2325 
2326 	if (cmd == POLL_AND_CHECK_STATUS) {
2327 		u_int16_t status;
2328 
2329 		status = CSR_READ_2(sc, XL_STATUS);
2330 		if (status & XL_INTRS && status != 0xFFFF) {
2331 			CSR_WRITE_2(sc, XL_COMMAND,
2332 			    XL_CMD_INTR_ACK|(status & XL_INTRS));
2333 
2334 			if (status & XL_STAT_TX_COMPLETE) {
2335 				ifp->if_oerrors++;
2336 				xl_txeoc(sc);
2337 			}
2338 
2339 			if (status & XL_STAT_ADFAIL) {
2340 				xl_reset(sc);
2341 				xl_init_locked(sc);
2342 			}
2343 
2344 			if (status & XL_STAT_STATSOFLOW) {
2345 				sc->xl_stats_no_timeout = 1;
2346 				xl_stats_update_locked(sc);
2347 				sc->xl_stats_no_timeout = 0;
2348 			}
2349 		}
2350 	}
2351 }
2352 #endif /* DEVICE_POLLING */
2353 
2354 /*
2355  * XXX: This is an entry point for callout which needs to take the lock.
2356  */
2357 static void
2358 xl_stats_update(void *xsc)
2359 {
2360 	struct xl_softc *sc = xsc;
2361 
2362 	XL_LOCK_ASSERT(sc);
2363 
2364 	if (xl_watchdog(sc) == EJUSTRETURN)
2365 		return;
2366 
2367 	xl_stats_update_locked(sc);
2368 }
2369 
2370 static void
2371 xl_stats_update_locked(struct xl_softc *sc)
2372 {
2373 	struct ifnet		*ifp = sc->xl_ifp;
2374 	struct xl_stats		xl_stats;
2375 	u_int8_t		*p;
2376 	int			i;
2377 	struct mii_data		*mii = NULL;
2378 
2379 	XL_LOCK_ASSERT(sc);
2380 
2381 	bzero((char *)&xl_stats, sizeof(struct xl_stats));
2382 
2383 	if (sc->xl_miibus != NULL)
2384 		mii = device_get_softc(sc->xl_miibus);
2385 
2386 	p = (u_int8_t *)&xl_stats;
2387 
2388 	/* Read all the stats registers. */
2389 	XL_SEL_WIN(6);
2390 
2391 	for (i = 0; i < 16; i++)
2392 		*p++ = CSR_READ_1(sc, XL_W6_CARRIER_LOST + i);
2393 
2394 	ifp->if_ierrors += xl_stats.xl_rx_overrun;
2395 
2396 	ifp->if_collisions += xl_stats.xl_tx_multi_collision +
2397 	    xl_stats.xl_tx_single_collision + xl_stats.xl_tx_late_collision;
2398 
2399 	/*
2400 	 * Boomerang and cyclone chips have an extra stats counter
2401 	 * in window 4 (BadSSD). We have to read this too in order
2402 	 * to clear out all the stats registers and avoid a statsoflow
2403 	 * interrupt.
2404 	 */
2405 	XL_SEL_WIN(4);
2406 	CSR_READ_1(sc, XL_W4_BADSSD);
2407 
2408 	if ((mii != NULL) && (!sc->xl_stats_no_timeout))
2409 		mii_tick(mii);
2410 
2411 	XL_SEL_WIN(7);
2412 
2413 	if (!sc->xl_stats_no_timeout)
2414 		callout_reset(&sc->xl_stat_callout, hz, xl_stats_update, sc);
2415 }
2416 
2417 /*
2418  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
2419  * pointers to the fragment pointers.
2420  */
2421 static int
2422 xl_encap(struct xl_softc *sc, struct xl_chain *c, struct mbuf **m_head)
2423 {
2424 	struct mbuf		*m_new;
2425 	struct ifnet		*ifp = sc->xl_ifp;
2426 	int			error, i, nseg, total_len;
2427 	u_int32_t		status;
2428 
2429 	XL_LOCK_ASSERT(sc);
2430 
2431 	error = bus_dmamap_load_mbuf_sg(sc->xl_mtag, c->xl_map, *m_head,
2432 	    sc->xl_cdata.xl_tx_segs, &nseg, BUS_DMA_NOWAIT);
2433 
2434 	if (error && error != EFBIG) {
2435 		if_printf(ifp, "can't map mbuf (error %d)\n", error);
2436 		return (error);
2437 	}
2438 
2439 	/*
2440 	 * Handle special case: we used up all 63 fragments,
2441 	 * but we have more mbufs left in the chain. Copy the
2442 	 * data into an mbuf cluster. Note that we don't
2443 	 * bother clearing the values in the other fragment
2444 	 * pointers/counters; it wouldn't gain us anything,
2445 	 * and would waste cycles.
2446 	 */
2447 	if (error) {
2448 		m_new = m_collapse(*m_head, M_DONTWAIT, XL_MAXFRAGS);
2449 		if (m_new == NULL) {
2450 			m_freem(*m_head);
2451 			*m_head = NULL;
2452 			return (ENOBUFS);
2453 		}
2454 		*m_head = m_new;
2455 
2456 		error = bus_dmamap_load_mbuf_sg(sc->xl_mtag, c->xl_map,
2457 		    *m_head, sc->xl_cdata.xl_tx_segs, &nseg, BUS_DMA_NOWAIT);
2458 		if (error) {
2459 			m_freem(*m_head);
2460 			*m_head = NULL;
2461 			if_printf(ifp, "can't map mbuf (error %d)\n", error);
2462 			return (error);
2463 		}
2464 	}
2465 
2466 	KASSERT(nseg <= XL_MAXFRAGS,
2467 	    ("%s: too many DMA segments (%d)", __func__, nseg));
2468 	if (nseg == 0) {
2469 		m_freem(*m_head);
2470 		*m_head = NULL;
2471 		return (EIO);
2472 	}
2473 
2474 	total_len = 0;
2475 	for (i = 0; i < nseg; i++) {
2476 		KASSERT(sc->xl_cdata.xl_tx_segs[i].ds_len <= MCLBYTES,
2477 		    ("segment size too large"));
2478 		c->xl_ptr->xl_frag[i].xl_addr =
2479 		    htole32(sc->xl_cdata.xl_tx_segs[i].ds_addr);
2480 		c->xl_ptr->xl_frag[i].xl_len =
2481 		    htole32(sc->xl_cdata.xl_tx_segs[i].ds_len);
2482 		total_len += sc->xl_cdata.xl_tx_segs[i].ds_len;
2483 	}
2484 	c->xl_ptr->xl_frag[nseg - 1].xl_len =
2485 	    htole32(sc->xl_cdata.xl_tx_segs[nseg - 1].ds_len | XL_LAST_FRAG);
2486 	c->xl_ptr->xl_status = htole32(total_len);
2487 	c->xl_ptr->xl_next = 0;
2488 
2489 	if (sc->xl_type == XL_TYPE_905B) {
2490 		status = XL_TXSTAT_RND_DEFEAT;
2491 
2492 #ifndef XL905B_TXCSUM_BROKEN
2493 		if (m_head->m_pkthdr.csum_flags) {
2494 			if (m_head->m_pkthdr.csum_flags & CSUM_IP)
2495 				status |= XL_TXSTAT_IPCKSUM;
2496 			if (m_head->m_pkthdr.csum_flags & CSUM_TCP)
2497 				status |= XL_TXSTAT_TCPCKSUM;
2498 			if (m_head->m_pkthdr.csum_flags & CSUM_UDP)
2499 				status |= XL_TXSTAT_UDPCKSUM;
2500 		}
2501 #endif
2502 		c->xl_ptr->xl_status = htole32(status);
2503 	}
2504 
2505 	c->xl_mbuf = *m_head;
2506 	bus_dmamap_sync(sc->xl_mtag, c->xl_map, BUS_DMASYNC_PREWRITE);
2507 	return (0);
2508 }
2509 
2510 /*
2511  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2512  * to the mbuf data regions directly in the transmit lists. We also save a
2513  * copy of the pointers since the transmit list fragment pointers are
2514  * physical addresses.
2515  */
2516 
2517 static void
2518 xl_start(struct ifnet *ifp)
2519 {
2520 	struct xl_softc		*sc = ifp->if_softc;
2521 
2522 	XL_LOCK(sc);
2523 
2524 	if (sc->xl_type == XL_TYPE_905B)
2525 		xl_start_90xB_locked(ifp);
2526 	else
2527 		xl_start_locked(ifp);
2528 
2529 	XL_UNLOCK(sc);
2530 }
2531 
2532 static void
2533 xl_start_locked(struct ifnet *ifp)
2534 {
2535 	struct xl_softc		*sc = ifp->if_softc;
2536 	struct mbuf		*m_head = NULL;
2537 	struct xl_chain		*prev = NULL, *cur_tx = NULL, *start_tx;
2538 	u_int32_t		status;
2539 	int			error;
2540 
2541 	XL_LOCK_ASSERT(sc);
2542 
2543 	/*
2544 	 * Check for an available queue slot. If there are none,
2545 	 * punt.
2546 	 */
2547 	if (sc->xl_cdata.xl_tx_free == NULL) {
2548 		xl_txeoc(sc);
2549 		xl_txeof(sc);
2550 		if (sc->xl_cdata.xl_tx_free == NULL) {
2551 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2552 			return;
2553 		}
2554 	}
2555 
2556 	start_tx = sc->xl_cdata.xl_tx_free;
2557 
2558 	for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
2559 	    sc->xl_cdata.xl_tx_free != NULL;) {
2560 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2561 		if (m_head == NULL)
2562 			break;
2563 
2564 		/* Pick a descriptor off the free list. */
2565 		cur_tx = sc->xl_cdata.xl_tx_free;
2566 
2567 		/* Pack the data into the descriptor. */
2568 		error = xl_encap(sc, cur_tx, &m_head);
2569 		if (error) {
2570 			if (m_head == NULL)
2571 				break;
2572 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2573 			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2574 			break;
2575 		}
2576 
2577 		sc->xl_cdata.xl_tx_free = cur_tx->xl_next;
2578 		cur_tx->xl_next = NULL;
2579 
2580 		/* Chain it together. */
2581 		if (prev != NULL) {
2582 			prev->xl_next = cur_tx;
2583 			prev->xl_ptr->xl_next = htole32(cur_tx->xl_phys);
2584 		}
2585 		prev = cur_tx;
2586 
2587 		/*
2588 		 * If there's a BPF listener, bounce a copy of this frame
2589 		 * to him.
2590 		 */
2591 		BPF_MTAP(ifp, cur_tx->xl_mbuf);
2592 	}
2593 
2594 	/*
2595 	 * If there are no packets queued, bail.
2596 	 */
2597 	if (cur_tx == NULL)
2598 		return;
2599 
2600 	/*
2601 	 * Place the request for the upload interrupt
2602 	 * in the last descriptor in the chain. This way, if
2603 	 * we're chaining several packets at once, we'll only
2604 	 * get an interrupt once for the whole chain rather than
2605 	 * once for each packet.
2606 	 */
2607 	cur_tx->xl_ptr->xl_status = htole32(le32toh(cur_tx->xl_ptr->xl_status) |
2608 	    XL_TXSTAT_DL_INTR);
2609 	bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap,
2610 	    BUS_DMASYNC_PREWRITE);
2611 
2612 	/*
2613 	 * Queue the packets. If the TX channel is clear, update
2614 	 * the downlist pointer register.
2615 	 */
2616 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL);
2617 	xl_wait(sc);
2618 
2619 	if (sc->xl_cdata.xl_tx_head != NULL) {
2620 		sc->xl_cdata.xl_tx_tail->xl_next = start_tx;
2621 		sc->xl_cdata.xl_tx_tail->xl_ptr->xl_next =
2622 		    htole32(start_tx->xl_phys);
2623 		status = sc->xl_cdata.xl_tx_tail->xl_ptr->xl_status;
2624 		sc->xl_cdata.xl_tx_tail->xl_ptr->xl_status =
2625 		    htole32(le32toh(status) & ~XL_TXSTAT_DL_INTR);
2626 		sc->xl_cdata.xl_tx_tail = cur_tx;
2627 	} else {
2628 		sc->xl_cdata.xl_tx_head = start_tx;
2629 		sc->xl_cdata.xl_tx_tail = cur_tx;
2630 	}
2631 	if (!CSR_READ_4(sc, XL_DOWNLIST_PTR))
2632 		CSR_WRITE_4(sc, XL_DOWNLIST_PTR, start_tx->xl_phys);
2633 
2634 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
2635 
2636 	XL_SEL_WIN(7);
2637 
2638 	/*
2639 	 * Set a timeout in case the chip goes out to lunch.
2640 	 */
2641 	sc->xl_wdog_timer = 5;
2642 
2643 	/*
2644 	 * XXX Under certain conditions, usually on slower machines
2645 	 * where interrupts may be dropped, it's possible for the
2646 	 * adapter to chew up all the buffers in the receive ring
2647 	 * and stall, without us being able to do anything about it.
2648 	 * To guard against this, we need to make a pass over the
2649 	 * RX queue to make sure there aren't any packets pending.
2650 	 * Doing it here means we can flush the receive ring at the
2651 	 * same time the chip is DMAing the transmit descriptors we
2652 	 * just gave it.
2653 	 *
2654 	 * 3Com goes to some lengths to emphasize the Parallel Tasking (tm)
2655 	 * nature of their chips in all their marketing literature;
2656 	 * we may as well take advantage of it. :)
2657 	 */
2658 	taskqueue_enqueue(taskqueue_swi, &sc->xl_task);
2659 }
2660 
2661 static void
2662 xl_start_90xB_locked(struct ifnet *ifp)
2663 {
2664 	struct xl_softc		*sc = ifp->if_softc;
2665 	struct mbuf		*m_head = NULL;
2666 	struct xl_chain		*prev = NULL, *cur_tx = NULL, *start_tx;
2667 	int			error, idx;
2668 
2669 	XL_LOCK_ASSERT(sc);
2670 
2671 	if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
2672 		return;
2673 
2674 	idx = sc->xl_cdata.xl_tx_prod;
2675 	start_tx = &sc->xl_cdata.xl_tx_chain[idx];
2676 
2677 	for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
2678 	    sc->xl_cdata.xl_tx_chain[idx].xl_mbuf == NULL;) {
2679 		if ((XL_TX_LIST_CNT - sc->xl_cdata.xl_tx_cnt) < 3) {
2680 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2681 			break;
2682 		}
2683 
2684 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2685 		if (m_head == NULL)
2686 			break;
2687 
2688 		cur_tx = &sc->xl_cdata.xl_tx_chain[idx];
2689 
2690 		/* Pack the data into the descriptor. */
2691 		error = xl_encap(sc, cur_tx, &m_head);
2692 		if (error) {
2693 			if (m_head == NULL)
2694 				break;
2695 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2696 			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2697 			break;
2698 		}
2699 
2700 		/* Chain it together. */
2701 		if (prev != NULL)
2702 			prev->xl_ptr->xl_next = htole32(cur_tx->xl_phys);
2703 		prev = cur_tx;
2704 
2705 		/*
2706 		 * If there's a BPF listener, bounce a copy of this frame
2707 		 * to him.
2708 		 */
2709 		BPF_MTAP(ifp, cur_tx->xl_mbuf);
2710 
2711 		XL_INC(idx, XL_TX_LIST_CNT);
2712 		sc->xl_cdata.xl_tx_cnt++;
2713 	}
2714 
2715 	/*
2716 	 * If there are no packets queued, bail.
2717 	 */
2718 	if (cur_tx == NULL)
2719 		return;
2720 
2721 	/*
2722 	 * Place the request for the upload interrupt
2723 	 * in the last descriptor in the chain. This way, if
2724 	 * we're chaining several packets at once, we'll only
2725 	 * get an interrupt once for the whole chain rather than
2726 	 * once for each packet.
2727 	 */
2728 	cur_tx->xl_ptr->xl_status = htole32(le32toh(cur_tx->xl_ptr->xl_status) |
2729 	    XL_TXSTAT_DL_INTR);
2730 	bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap,
2731 	    BUS_DMASYNC_PREWRITE);
2732 
2733 	/* Start transmission */
2734 	sc->xl_cdata.xl_tx_prod = idx;
2735 	start_tx->xl_prev->xl_ptr->xl_next = htole32(start_tx->xl_phys);
2736 
2737 	/*
2738 	 * Set a timeout in case the chip goes out to lunch.
2739 	 */
2740 	sc->xl_wdog_timer = 5;
2741 }
2742 
2743 static void
2744 xl_init(void *xsc)
2745 {
2746 	struct xl_softc		*sc = xsc;
2747 
2748 	XL_LOCK(sc);
2749 	xl_init_locked(sc);
2750 	XL_UNLOCK(sc);
2751 }
2752 
2753 static void
2754 xl_init_locked(struct xl_softc *sc)
2755 {
2756 	struct ifnet		*ifp = sc->xl_ifp;
2757 	int			error, i;
2758 	u_int16_t		rxfilt = 0;
2759 	struct mii_data		*mii = NULL;
2760 
2761 	XL_LOCK_ASSERT(sc);
2762 
2763 	/*
2764 	 * Cancel pending I/O and free all RX/TX buffers.
2765 	 */
2766 	xl_stop(sc);
2767 
2768 	if (sc->xl_miibus == NULL) {
2769 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
2770 		xl_wait(sc);
2771 	}
2772 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
2773 	xl_wait(sc);
2774 	DELAY(10000);
2775 
2776 	if (sc->xl_miibus != NULL)
2777 		mii = device_get_softc(sc->xl_miibus);
2778 
2779 	/* Init our MAC address */
2780 	XL_SEL_WIN(2);
2781 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
2782 		CSR_WRITE_1(sc, XL_W2_STATION_ADDR_LO + i,
2783 				IF_LLADDR(sc->xl_ifp)[i]);
2784 	}
2785 
2786 	/* Clear the station mask. */
2787 	for (i = 0; i < 3; i++)
2788 		CSR_WRITE_2(sc, XL_W2_STATION_MASK_LO + (i * 2), 0);
2789 #ifdef notdef
2790 	/* Reset TX and RX. */
2791 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
2792 	xl_wait(sc);
2793 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
2794 	xl_wait(sc);
2795 #endif
2796 	/* Init circular RX list. */
2797 	error = xl_list_rx_init(sc);
2798 	if (error) {
2799 		device_printf(sc->xl_dev, "initialization of the rx ring failed (%d)\n",
2800 		    error);
2801 		xl_stop(sc);
2802 		return;
2803 	}
2804 
2805 	/* Init TX descriptors. */
2806 	if (sc->xl_type == XL_TYPE_905B)
2807 		error = xl_list_tx_init_90xB(sc);
2808 	else
2809 		error = xl_list_tx_init(sc);
2810 	if (error) {
2811 		device_printf(sc->xl_dev, "initialization of the tx ring failed (%d)\n",
2812 		    error);
2813 		xl_stop(sc);
2814 		return;
2815 	}
2816 
2817 	/*
2818 	 * Set the TX freethresh value.
2819 	 * Note that this has no effect on 3c905B "cyclone"
2820 	 * cards but is required for 3c900/3c905 "boomerang"
2821 	 * cards in order to enable the download engine.
2822 	 */
2823 	CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
2824 
2825 	/* Set the TX start threshold for best performance. */
2826 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_SET_START|sc->xl_tx_thresh);
2827 
2828 	/*
2829 	 * If this is a 3c905B, also set the tx reclaim threshold.
2830 	 * This helps cut down on the number of tx reclaim errors
2831 	 * that could happen on a busy network. The chip multiplies
2832 	 * the register value by 16 to obtain the actual threshold
2833 	 * in bytes, so we divide by 16 when setting the value here.
2834 	 * The existing threshold value can be examined by reading
2835 	 * the register at offset 9 in window 5.
2836 	 */
2837 	if (sc->xl_type == XL_TYPE_905B) {
2838 		CSR_WRITE_2(sc, XL_COMMAND,
2839 		    XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
2840 	}
2841 
2842 	/* Set RX filter bits. */
2843 	XL_SEL_WIN(5);
2844 	rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
2845 
2846 	/* Set the individual bit to receive frames for this host only. */
2847 	rxfilt |= XL_RXFILTER_INDIVIDUAL;
2848 
2849 	/* If we want promiscuous mode, set the allframes bit. */
2850 	if (ifp->if_flags & IFF_PROMISC) {
2851 		rxfilt |= XL_RXFILTER_ALLFRAMES;
2852 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
2853 	} else {
2854 		rxfilt &= ~XL_RXFILTER_ALLFRAMES;
2855 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
2856 	}
2857 
2858 	/*
2859 	 * Set capture broadcast bit to capture broadcast frames.
2860 	 */
2861 	if (ifp->if_flags & IFF_BROADCAST) {
2862 		rxfilt |= XL_RXFILTER_BROADCAST;
2863 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
2864 	} else {
2865 		rxfilt &= ~XL_RXFILTER_BROADCAST;
2866 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
2867 	}
2868 
2869 	/*
2870 	 * Program the multicast filter, if necessary.
2871 	 */
2872 	if (sc->xl_type == XL_TYPE_905B)
2873 		xl_setmulti_hash(sc);
2874 	else
2875 		xl_setmulti(sc);
2876 
2877 	/*
2878 	 * Load the address of the RX list. We have to
2879 	 * stall the upload engine before we can manipulate
2880 	 * the uplist pointer register, then unstall it when
2881 	 * we're finished. We also have to wait for the
2882 	 * stall command to complete before proceeding.
2883 	 * Note that we have to do this after any RX resets
2884 	 * have completed since the uplist register is cleared
2885 	 * by a reset.
2886 	 */
2887 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
2888 	xl_wait(sc);
2889 	CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->xl_ldata.xl_rx_dmaaddr);
2890 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
2891 	xl_wait(sc);
2892 
2893 	if (sc->xl_type == XL_TYPE_905B) {
2894 		/* Set polling interval */
2895 		CSR_WRITE_1(sc, XL_DOWN_POLL, 64);
2896 		/* Load the address of the TX list */
2897 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL);
2898 		xl_wait(sc);
2899 		CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
2900 		    sc->xl_cdata.xl_tx_chain[0].xl_phys);
2901 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
2902 		xl_wait(sc);
2903 	}
2904 
2905 	/*
2906 	 * If the coax transceiver is on, make sure to enable
2907 	 * the DC-DC converter.
2908 	 */
2909 	XL_SEL_WIN(3);
2910 	if (sc->xl_xcvr == XL_XCVR_COAX)
2911 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
2912 	else
2913 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
2914 
2915 	/*
2916 	 * increase packet size to allow reception of 802.1q or ISL packets.
2917 	 * For the 3c90x chip, set the 'allow large packets' bit in the MAC
2918 	 * control register. For 3c90xB/C chips, use the RX packet size
2919 	 * register.
2920 	 */
2921 
2922 	if (sc->xl_type == XL_TYPE_905B)
2923 		CSR_WRITE_2(sc, XL_W3_MAXPKTSIZE, XL_PACKET_SIZE);
2924 	else {
2925 		u_int8_t macctl;
2926 		macctl = CSR_READ_1(sc, XL_W3_MAC_CTRL);
2927 		macctl |= XL_MACCTRL_ALLOW_LARGE_PACK;
2928 		CSR_WRITE_1(sc, XL_W3_MAC_CTRL, macctl);
2929 	}
2930 
2931 	/* Clear out the stats counters. */
2932 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
2933 	sc->xl_stats_no_timeout = 1;
2934 	xl_stats_update_locked(sc);
2935 	sc->xl_stats_no_timeout = 0;
2936 	XL_SEL_WIN(4);
2937 	CSR_WRITE_2(sc, XL_W4_NET_DIAG, XL_NETDIAG_UPPER_BYTES_ENABLE);
2938 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_ENABLE);
2939 
2940 	/*
2941 	 * Enable interrupts.
2942 	 */
2943 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|0xFF);
2944 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|XL_INTRS);
2945 #ifdef DEVICE_POLLING
2946 	/* Disable interrupts if we are polling. */
2947 	if (ifp->if_capenable & IFCAP_POLLING)
2948 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0);
2949 	else
2950 #endif
2951 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|XL_INTRS);
2952 	if (sc->xl_flags & XL_FLAG_FUNCREG)
2953 	    bus_space_write_4(sc->xl_ftag, sc->xl_fhandle, 4, 0x8000);
2954 
2955 	/* Set the RX early threshold */
2956 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_THRESH|(XL_PACKET_SIZE >>2));
2957 	CSR_WRITE_2(sc, XL_DMACTL, XL_DMACTL_UP_RX_EARLY);
2958 
2959 	/* Enable receiver and transmitter. */
2960 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
2961 	xl_wait(sc);
2962 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE);
2963 	xl_wait(sc);
2964 
2965 	/* XXX Downcall to miibus. */
2966 	if (mii != NULL)
2967 		mii_mediachg(mii);
2968 
2969 	/* Select window 7 for normal operations. */
2970 	XL_SEL_WIN(7);
2971 
2972 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2973 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2974 
2975 	sc->xl_wdog_timer = 0;
2976 	callout_reset(&sc->xl_stat_callout, hz, xl_stats_update, sc);
2977 }
2978 
2979 /*
2980  * Set media options.
2981  */
2982 static int
2983 xl_ifmedia_upd(struct ifnet *ifp)
2984 {
2985 	struct xl_softc		*sc = ifp->if_softc;
2986 	struct ifmedia		*ifm = NULL;
2987 	struct mii_data		*mii = NULL;
2988 
2989 	XL_LOCK(sc);
2990 
2991 	if (sc->xl_miibus != NULL)
2992 		mii = device_get_softc(sc->xl_miibus);
2993 	if (mii == NULL)
2994 		ifm = &sc->ifmedia;
2995 	else
2996 		ifm = &mii->mii_media;
2997 
2998 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
2999 	case IFM_100_FX:
3000 	case IFM_10_FL:
3001 	case IFM_10_2:
3002 	case IFM_10_5:
3003 		xl_setmode(sc, ifm->ifm_media);
3004 		XL_UNLOCK(sc);
3005 		return (0);
3006 	}
3007 
3008 	if (sc->xl_media & XL_MEDIAOPT_MII ||
3009 	    sc->xl_media & XL_MEDIAOPT_BTX ||
3010 	    sc->xl_media & XL_MEDIAOPT_BT4) {
3011 		xl_init_locked(sc);
3012 	} else {
3013 		xl_setmode(sc, ifm->ifm_media);
3014 	}
3015 
3016 	XL_UNLOCK(sc);
3017 
3018 	return (0);
3019 }
3020 
3021 /*
3022  * Report current media status.
3023  */
3024 static void
3025 xl_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3026 {
3027 	struct xl_softc		*sc = ifp->if_softc;
3028 	u_int32_t		icfg;
3029 	u_int16_t		status = 0;
3030 	struct mii_data		*mii = NULL;
3031 
3032 	XL_LOCK(sc);
3033 
3034 	if (sc->xl_miibus != NULL)
3035 		mii = device_get_softc(sc->xl_miibus);
3036 
3037 	XL_SEL_WIN(4);
3038 	status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
3039 
3040 	XL_SEL_WIN(3);
3041 	icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG) & XL_ICFG_CONNECTOR_MASK;
3042 	icfg >>= XL_ICFG_CONNECTOR_BITS;
3043 
3044 	ifmr->ifm_active = IFM_ETHER;
3045 	ifmr->ifm_status = IFM_AVALID;
3046 
3047 	if ((status & XL_MEDIASTAT_CARRIER) == 0)
3048 		ifmr->ifm_status |= IFM_ACTIVE;
3049 
3050 	switch (icfg) {
3051 	case XL_XCVR_10BT:
3052 		ifmr->ifm_active = IFM_ETHER|IFM_10_T;
3053 		if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX)
3054 			ifmr->ifm_active |= IFM_FDX;
3055 		else
3056 			ifmr->ifm_active |= IFM_HDX;
3057 		break;
3058 	case XL_XCVR_AUI:
3059 		if (sc->xl_type == XL_TYPE_905B &&
3060 		    sc->xl_media == XL_MEDIAOPT_10FL) {
3061 			ifmr->ifm_active = IFM_ETHER|IFM_10_FL;
3062 			if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX)
3063 				ifmr->ifm_active |= IFM_FDX;
3064 			else
3065 				ifmr->ifm_active |= IFM_HDX;
3066 		} else
3067 			ifmr->ifm_active = IFM_ETHER|IFM_10_5;
3068 		break;
3069 	case XL_XCVR_COAX:
3070 		ifmr->ifm_active = IFM_ETHER|IFM_10_2;
3071 		break;
3072 	/*
3073 	 * XXX MII and BTX/AUTO should be separate cases.
3074 	 */
3075 
3076 	case XL_XCVR_100BTX:
3077 	case XL_XCVR_AUTO:
3078 	case XL_XCVR_MII:
3079 		if (mii != NULL) {
3080 			mii_pollstat(mii);
3081 			ifmr->ifm_active = mii->mii_media_active;
3082 			ifmr->ifm_status = mii->mii_media_status;
3083 		}
3084 		break;
3085 	case XL_XCVR_100BFX:
3086 		ifmr->ifm_active = IFM_ETHER|IFM_100_FX;
3087 		break;
3088 	default:
3089 		if_printf(ifp, "unknown XCVR type: %d\n", icfg);
3090 		break;
3091 	}
3092 
3093 	XL_UNLOCK(sc);
3094 }
3095 
3096 static int
3097 xl_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
3098 {
3099 	struct xl_softc		*sc = ifp->if_softc;
3100 	struct ifreq		*ifr = (struct ifreq *) data;
3101 	int			error = 0;
3102 	struct mii_data		*mii = NULL;
3103 	u_int8_t		rxfilt;
3104 
3105 	switch (command) {
3106 	case SIOCSIFFLAGS:
3107 		XL_LOCK(sc);
3108 
3109 		XL_SEL_WIN(5);
3110 		rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
3111 		if (ifp->if_flags & IFF_UP) {
3112 			if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3113 			    ifp->if_flags & IFF_PROMISC &&
3114 			    !(sc->xl_if_flags & IFF_PROMISC)) {
3115 				rxfilt |= XL_RXFILTER_ALLFRAMES;
3116 				CSR_WRITE_2(sc, XL_COMMAND,
3117 				    XL_CMD_RX_SET_FILT|rxfilt);
3118 				XL_SEL_WIN(7);
3119 			} else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3120 			    !(ifp->if_flags & IFF_PROMISC) &&
3121 			    sc->xl_if_flags & IFF_PROMISC) {
3122 				rxfilt &= ~XL_RXFILTER_ALLFRAMES;
3123 				CSR_WRITE_2(sc, XL_COMMAND,
3124 				    XL_CMD_RX_SET_FILT|rxfilt);
3125 				XL_SEL_WIN(7);
3126 			} else {
3127 				if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
3128 					xl_init_locked(sc);
3129 			}
3130 		} else {
3131 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3132 				xl_stop(sc);
3133 		}
3134 		sc->xl_if_flags = ifp->if_flags;
3135 		XL_UNLOCK(sc);
3136 		error = 0;
3137 		break;
3138 	case SIOCADDMULTI:
3139 	case SIOCDELMULTI:
3140 		/* XXX Downcall from if_addmulti() possibly with locks held. */
3141 		XL_LOCK(sc);
3142 		if (sc->xl_type == XL_TYPE_905B)
3143 			xl_setmulti_hash(sc);
3144 		else
3145 			xl_setmulti(sc);
3146 		XL_UNLOCK(sc);
3147 		error = 0;
3148 		break;
3149 	case SIOCGIFMEDIA:
3150 	case SIOCSIFMEDIA:
3151 		if (sc->xl_miibus != NULL)
3152 			mii = device_get_softc(sc->xl_miibus);
3153 		if (mii == NULL)
3154 			error = ifmedia_ioctl(ifp, ifr,
3155 			    &sc->ifmedia, command);
3156 		else
3157 			error = ifmedia_ioctl(ifp, ifr,
3158 			    &mii->mii_media, command);
3159 		break;
3160 	case SIOCSIFCAP:
3161 #ifdef DEVICE_POLLING
3162 		if (ifr->ifr_reqcap & IFCAP_POLLING &&
3163 		    !(ifp->if_capenable & IFCAP_POLLING)) {
3164 			error = ether_poll_register(xl_poll, ifp);
3165 			if (error)
3166 				return(error);
3167 			XL_LOCK(sc);
3168 			/* Disable interrupts */
3169 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0);
3170 			ifp->if_capenable |= IFCAP_POLLING;
3171 			XL_UNLOCK(sc);
3172 			return (error);
3173 		}
3174 		if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
3175 		    ifp->if_capenable & IFCAP_POLLING) {
3176 			error = ether_poll_deregister(ifp);
3177 			/* Enable interrupts. */
3178 			XL_LOCK(sc);
3179 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|0xFF);
3180 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|XL_INTRS);
3181 			if (sc->xl_flags & XL_FLAG_FUNCREG)
3182 				bus_space_write_4(sc->xl_ftag, sc->xl_fhandle,
3183 				    4, 0x8000);
3184 			ifp->if_capenable &= ~IFCAP_POLLING;
3185 			XL_UNLOCK(sc);
3186 			return (error);
3187 		}
3188 #endif /* DEVICE_POLLING */
3189 		XL_LOCK(sc);
3190 		ifp->if_capenable = ifr->ifr_reqcap;
3191 		if (ifp->if_capenable & IFCAP_TXCSUM)
3192 			ifp->if_hwassist = XL905B_CSUM_FEATURES;
3193 		else
3194 			ifp->if_hwassist = 0;
3195 		XL_UNLOCK(sc);
3196 		break;
3197 	default:
3198 		error = ether_ioctl(ifp, command, data);
3199 		break;
3200 	}
3201 
3202 	return (error);
3203 }
3204 
3205 static int
3206 xl_watchdog(struct xl_softc *sc)
3207 {
3208 	struct ifnet		*ifp = sc->xl_ifp;
3209 	u_int16_t		status = 0;
3210 
3211 	XL_LOCK_ASSERT(sc);
3212 
3213 	if (sc->xl_wdog_timer == 0 || --sc->xl_wdog_timer != 0)
3214 		return (0);
3215 
3216 	ifp->if_oerrors++;
3217 	XL_SEL_WIN(4);
3218 	status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
3219 	device_printf(sc->xl_dev, "watchdog timeout\n");
3220 
3221 	if (status & XL_MEDIASTAT_CARRIER)
3222 		device_printf(sc->xl_dev,
3223 		    "no carrier - transceiver cable problem?\n");
3224 
3225 	xl_txeoc(sc);
3226 	xl_txeof(sc);
3227 	xl_rxeof(sc);
3228 	xl_reset(sc);
3229 	xl_init_locked(sc);
3230 
3231 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
3232 		if (sc->xl_type == XL_TYPE_905B)
3233 			xl_start_90xB_locked(ifp);
3234 		else
3235 			xl_start_locked(ifp);
3236 	}
3237 
3238 	return (EJUSTRETURN);
3239 }
3240 
3241 /*
3242  * Stop the adapter and free any mbufs allocated to the
3243  * RX and TX lists.
3244  */
3245 static void
3246 xl_stop(struct xl_softc *sc)
3247 {
3248 	register int		i;
3249 	struct ifnet		*ifp = sc->xl_ifp;
3250 
3251 	XL_LOCK_ASSERT(sc);
3252 
3253 	sc->xl_wdog_timer = 0;
3254 
3255 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISABLE);
3256 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
3257 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB);
3258 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISCARD);
3259 	xl_wait(sc);
3260 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_DISABLE);
3261 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
3262 	DELAY(800);
3263 
3264 #ifdef foo
3265 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
3266 	xl_wait(sc);
3267 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
3268 	xl_wait(sc);
3269 #endif
3270 
3271 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|XL_STAT_INTLATCH);
3272 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|0);
3273 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0);
3274 	if (sc->xl_flags & XL_FLAG_FUNCREG)
3275 		bus_space_write_4(sc->xl_ftag, sc->xl_fhandle, 4, 0x8000);
3276 
3277 	/* Stop the stats updater. */
3278 	callout_stop(&sc->xl_stat_callout);
3279 
3280 	/*
3281 	 * Free data in the RX lists.
3282 	 */
3283 	for (i = 0; i < XL_RX_LIST_CNT; i++) {
3284 		if (sc->xl_cdata.xl_rx_chain[i].xl_mbuf != NULL) {
3285 			bus_dmamap_unload(sc->xl_mtag,
3286 			    sc->xl_cdata.xl_rx_chain[i].xl_map);
3287 			bus_dmamap_destroy(sc->xl_mtag,
3288 			    sc->xl_cdata.xl_rx_chain[i].xl_map);
3289 			m_freem(sc->xl_cdata.xl_rx_chain[i].xl_mbuf);
3290 			sc->xl_cdata.xl_rx_chain[i].xl_mbuf = NULL;
3291 		}
3292 	}
3293 	if (sc->xl_ldata.xl_rx_list != NULL)
3294 		bzero(sc->xl_ldata.xl_rx_list, XL_RX_LIST_SZ);
3295 	/*
3296 	 * Free the TX list buffers.
3297 	 */
3298 	for (i = 0; i < XL_TX_LIST_CNT; i++) {
3299 		if (sc->xl_cdata.xl_tx_chain[i].xl_mbuf != NULL) {
3300 			bus_dmamap_unload(sc->xl_mtag,
3301 			    sc->xl_cdata.xl_tx_chain[i].xl_map);
3302 			bus_dmamap_destroy(sc->xl_mtag,
3303 			    sc->xl_cdata.xl_tx_chain[i].xl_map);
3304 			m_freem(sc->xl_cdata.xl_tx_chain[i].xl_mbuf);
3305 			sc->xl_cdata.xl_tx_chain[i].xl_mbuf = NULL;
3306 		}
3307 	}
3308 	if (sc->xl_ldata.xl_tx_list != NULL)
3309 		bzero(sc->xl_ldata.xl_tx_list, XL_TX_LIST_SZ);
3310 
3311 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3312 }
3313 
3314 /*
3315  * Stop all chip I/O so that the kernel's probe routines don't
3316  * get confused by errant DMAs when rebooting.
3317  */
3318 static int
3319 xl_shutdown(device_t dev)
3320 {
3321 	struct xl_softc		*sc;
3322 
3323 	sc = device_get_softc(dev);
3324 
3325 	XL_LOCK(sc);
3326 	xl_reset(sc);
3327 	xl_stop(sc);
3328 	XL_UNLOCK(sc);
3329 
3330 	return (0);
3331 }
3332 
3333 static int
3334 xl_suspend(device_t dev)
3335 {
3336 	struct xl_softc		*sc;
3337 
3338 	sc = device_get_softc(dev);
3339 
3340 	XL_LOCK(sc);
3341 	xl_stop(sc);
3342 	XL_UNLOCK(sc);
3343 
3344 	return (0);
3345 }
3346 
3347 static int
3348 xl_resume(device_t dev)
3349 {
3350 	struct xl_softc		*sc;
3351 	struct ifnet		*ifp;
3352 
3353 	sc = device_get_softc(dev);
3354 	ifp = sc->xl_ifp;
3355 
3356 	XL_LOCK(sc);
3357 
3358 	xl_reset(sc);
3359 	if (ifp->if_flags & IFF_UP)
3360 		xl_init_locked(sc);
3361 
3362 	XL_UNLOCK(sc);
3363 
3364 	return (0);
3365 }
3366