xref: /freebsd/sys/dev/dc/if_dc.c (revision 0bf56da32d83fbd3b5db8d6c72cd1e7cc26fbc66)
1 /*-
2  * SPDX-License-Identifier: BSD-4-Clause
3  *
4  * Copyright (c) 1997, 1998, 1999
5  *	Bill Paul <wpaul@ee.columbia.edu>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 /*
39  * DEC "tulip" clone ethernet driver. Supports the DEC/Intel 21143
40  * series chips and several workalikes including the following:
41  *
42  * Macronix 98713/98715/98725/98727/98732 PMAC (www.macronix.com)
43  * Macronix/Lite-On 82c115 PNIC II (www.macronix.com)
44  * Lite-On 82c168/82c169 PNIC (www.litecom.com)
45  * ASIX Electronics AX88140A (www.asix.com.tw)
46  * ASIX Electronics AX88141 (www.asix.com.tw)
47  * ADMtek AL981 (www.admtek.com.tw)
48  * ADMtek AN983 (www.admtek.com.tw)
49  * ADMtek CardBus AN985 (www.admtek.com.tw)
50  * Netgear FA511 (www.netgear.com) Appears to be rebadged ADMTek CardBus AN985
51  * Davicom DM9100, DM9102, DM9102A (www.davicom8.com)
52  * Accton EN1217 (www.accton.com)
53  * Xircom X3201 (www.xircom.com)
54  * Abocom FE2500
55  * Conexant LANfinity (www.conexant.com)
56  * 3Com OfficeConnect 10/100B 3CSOHO100B (www.3com.com)
57  *
58  * Datasheets for the 21143 are available at developer.intel.com.
59  * Datasheets for the clone parts can be found at their respective sites.
60  * (Except for the PNIC; see www.freebsd.org/~wpaul/PNIC/pnic.ps.gz.)
61  * The PNIC II is essentially a Macronix 98715A chip; the only difference
62  * worth noting is that its multicast hash table is only 128 bits wide
63  * instead of 512.
64  *
65  * Written by Bill Paul <wpaul@ee.columbia.edu>
66  * Electrical Engineering Department
67  * Columbia University, New York City
68  */
69 /*
70  * The Intel 21143 is the successor to the DEC 21140. It is basically
71  * the same as the 21140 but with a few new features. The 21143 supports
72  * three kinds of media attachments:
73  *
74  * o MII port, for 10Mbps and 100Mbps support and NWAY
75  *   autonegotiation provided by an external PHY.
76  * o SYM port, for symbol mode 100Mbps support.
77  * o 10baseT port.
78  * o AUI/BNC port.
79  *
80  * The 100Mbps SYM port and 10baseT port can be used together in
81  * combination with the internal NWAY support to create a 10/100
82  * autosensing configuration.
83  *
84  * Note that not all tulip workalikes are handled in this driver: we only
85  * deal with those which are relatively well behaved. The Winbond is
86  * handled separately due to its different register offsets and the
87  * special handling needed for its various bugs. The PNIC is handled
88  * here, but I'm not thrilled about it.
89  *
90  * All of the workalike chips use some form of MII transceiver support
91  * with the exception of the Macronix chips, which also have a SYM port.
92  * The ASIX AX88140A is also documented to have a SYM port, but all
93  * the cards I've seen use an MII transceiver, probably because the
94  * AX88140A doesn't support internal NWAY.
95  */
96 
97 #ifdef HAVE_KERNEL_OPTION_HEADERS
98 #include "opt_device_polling.h"
99 #endif
100 
101 #include <sys/param.h>
102 #include <sys/endian.h>
103 #include <sys/systm.h>
104 #include <sys/sockio.h>
105 #include <sys/mbuf.h>
106 #include <sys/malloc.h>
107 #include <sys/kernel.h>
108 #include <sys/module.h>
109 #include <sys/socket.h>
110 
111 #include <net/if.h>
112 #include <net/if_var.h>
113 #include <net/if_arp.h>
114 #include <net/ethernet.h>
115 #include <net/if_dl.h>
116 #include <net/if_media.h>
117 #include <net/if_types.h>
118 #include <net/if_vlan_var.h>
119 
120 #include <net/bpf.h>
121 
122 #include <machine/bus.h>
123 #include <machine/resource.h>
124 #include <sys/bus.h>
125 #include <sys/rman.h>
126 
127 #include <dev/mii/mii.h>
128 #include <dev/mii/mii_bitbang.h>
129 #include <dev/mii/miivar.h>
130 
131 #include <dev/pci/pcireg.h>
132 #include <dev/pci/pcivar.h>
133 
134 #define	DC_USEIOSPACE
135 
136 #include <dev/dc/if_dcreg.h>
137 
138 MODULE_DEPEND(dc, pci, 1, 1, 1);
139 MODULE_DEPEND(dc, ether, 1, 1, 1);
140 MODULE_DEPEND(dc, miibus, 1, 1, 1);
141 
142 /*
143  * "device miibus" is required in kernel config.  See GENERIC if you get
144  * errors here.
145  */
146 #include "miibus_if.h"
147 
148 /*
149  * Various supported device vendors/types and their names.
150  */
151 static const struct dc_type dc_devs[] = {
152 	{ DC_DEVID(DC_VENDORID_DEC, DC_DEVICEID_21143), 0,
153 		"Intel 21143 10/100BaseTX" },
154 	{ DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9009), 0,
155 		"Davicom DM9009 10/100BaseTX" },
156 	{ DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9100), 0,
157 		"Davicom DM9100 10/100BaseTX" },
158 	{ DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9102), DC_REVISION_DM9102A,
159 		"Davicom DM9102A 10/100BaseTX" },
160 	{ DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9102), 0,
161 		"Davicom DM9102 10/100BaseTX" },
162 	{ DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_AL981), 0,
163 		"ADMtek AL981 10/100BaseTX" },
164 	{ DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_AN983), 0,
165 		"ADMtek AN983 10/100BaseTX" },
166 	{ DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_AN985), 0,
167 		"ADMtek AN985 CardBus 10/100BaseTX or clone" },
168 	{ DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_ADM9511), 0,
169 		"ADMtek ADM9511 10/100BaseTX" },
170 	{ DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_ADM9513), 0,
171 		"ADMtek ADM9513 10/100BaseTX" },
172 	{ DC_DEVID(DC_VENDORID_ASIX, DC_DEVICEID_AX88140A), DC_REVISION_88141,
173 		"ASIX AX88141 10/100BaseTX" },
174 	{ DC_DEVID(DC_VENDORID_ASIX, DC_DEVICEID_AX88140A), 0,
175 		"ASIX AX88140A 10/100BaseTX" },
176 	{ DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_98713), DC_REVISION_98713A,
177 		"Macronix 98713A 10/100BaseTX" },
178 	{ DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_98713), 0,
179 		"Macronix 98713 10/100BaseTX" },
180 	{ DC_DEVID(DC_VENDORID_CP, DC_DEVICEID_98713_CP), DC_REVISION_98713A,
181 		"Compex RL100-TX 10/100BaseTX" },
182 	{ DC_DEVID(DC_VENDORID_CP, DC_DEVICEID_98713_CP), 0,
183 		"Compex RL100-TX 10/100BaseTX" },
184 	{ DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_987x5), DC_REVISION_98725,
185 		"Macronix 98725 10/100BaseTX" },
186 	{ DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_987x5), DC_REVISION_98715AEC_C,
187 		"Macronix 98715AEC-C 10/100BaseTX" },
188 	{ DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_987x5), 0,
189 		"Macronix 98715/98715A 10/100BaseTX" },
190 	{ DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_98727), 0,
191 		"Macronix 98727/98732 10/100BaseTX" },
192 	{ DC_DEVID(DC_VENDORID_LO, DC_DEVICEID_82C115), 0,
193 		"LC82C115 PNIC II 10/100BaseTX" },
194 	{ DC_DEVID(DC_VENDORID_LO, DC_DEVICEID_82C168), DC_REVISION_82C169,
195 		"82c169 PNIC 10/100BaseTX" },
196 	{ DC_DEVID(DC_VENDORID_LO, DC_DEVICEID_82C168), 0,
197 		"82c168 PNIC 10/100BaseTX" },
198 	{ DC_DEVID(DC_VENDORID_ACCTON, DC_DEVICEID_EN1217), 0,
199 		"Accton EN1217 10/100BaseTX" },
200 	{ DC_DEVID(DC_VENDORID_ACCTON, DC_DEVICEID_EN2242), 0,
201 		"Accton EN2242 MiniPCI 10/100BaseTX" },
202 	{ DC_DEVID(DC_VENDORID_XIRCOM, DC_DEVICEID_X3201), 0,
203 		"Xircom X3201 10/100BaseTX" },
204 	{ DC_DEVID(DC_VENDORID_DLINK, DC_DEVICEID_DRP32TXD), 0,
205 		"Neteasy DRP-32TXD Cardbus 10/100" },
206 	{ DC_DEVID(DC_VENDORID_ABOCOM, DC_DEVICEID_FE2500), 0,
207 		"Abocom FE2500 10/100BaseTX" },
208 	{ DC_DEVID(DC_VENDORID_ABOCOM, DC_DEVICEID_FE2500MX), 0,
209 		"Abocom FE2500MX 10/100BaseTX" },
210 	{ DC_DEVID(DC_VENDORID_CONEXANT, DC_DEVICEID_RS7112), 0,
211 		"Conexant LANfinity MiniPCI 10/100BaseTX" },
212 	{ DC_DEVID(DC_VENDORID_HAWKING, DC_DEVICEID_HAWKING_PN672TX), 0,
213 		"Hawking CB102 CardBus 10/100" },
214 	{ DC_DEVID(DC_VENDORID_PLANEX, DC_DEVICEID_FNW3602T), 0,
215 		"PlaneX FNW-3602-T CardBus 10/100" },
216 	{ DC_DEVID(DC_VENDORID_3COM, DC_DEVICEID_3CSOHOB), 0,
217 		"3Com OfficeConnect 10/100B" },
218 	{ DC_DEVID(DC_VENDORID_MICROSOFT, DC_DEVICEID_MSMN120), 0,
219 		"Microsoft MN-120 CardBus 10/100" },
220 	{ DC_DEVID(DC_VENDORID_MICROSOFT, DC_DEVICEID_MSMN130), 0,
221 		"Microsoft MN-130 10/100" },
222 	{ DC_DEVID(DC_VENDORID_LINKSYS, DC_DEVICEID_PCMPC200_AB08), 0,
223 		"Linksys PCMPC200 CardBus 10/100" },
224 	{ DC_DEVID(DC_VENDORID_LINKSYS, DC_DEVICEID_PCMPC200_AB09), 0,
225 		"Linksys PCMPC200 CardBus 10/100" },
226 	{ DC_DEVID(DC_VENDORID_ULI, DC_DEVICEID_M5261), 0,
227 		"ULi M5261 FastEthernet" },
228 	{ DC_DEVID(DC_VENDORID_ULI, DC_DEVICEID_M5263), 0,
229 		"ULi M5263 FastEthernet" },
230 	{ 0, 0, NULL }
231 };
232 
233 static int dc_probe(device_t);
234 static int dc_attach(device_t);
235 static int dc_detach(device_t);
236 static int dc_suspend(device_t);
237 static int dc_resume(device_t);
238 static const struct dc_type *dc_devtype(device_t);
239 static void dc_discard_rxbuf(struct dc_softc *, int);
240 static int dc_newbuf(struct dc_softc *, int);
241 static int dc_encap(struct dc_softc *, struct mbuf **);
242 static void dc_pnic_rx_bug_war(struct dc_softc *, int);
243 static int dc_rx_resync(struct dc_softc *);
244 static int dc_rxeof(struct dc_softc *);
245 static void dc_txeof(struct dc_softc *);
246 static void dc_tick(void *);
247 static void dc_tx_underrun(struct dc_softc *);
248 static void dc_intr(void *);
249 static void dc_start(struct ifnet *);
250 static void dc_start_locked(struct ifnet *);
251 static int dc_ioctl(struct ifnet *, u_long, caddr_t);
252 static void dc_init(void *);
253 static void dc_init_locked(struct dc_softc *);
254 static void dc_stop(struct dc_softc *);
255 static void dc_watchdog(void *);
256 static int dc_shutdown(device_t);
257 static int dc_ifmedia_upd(struct ifnet *);
258 static int dc_ifmedia_upd_locked(struct dc_softc *);
259 static void dc_ifmedia_sts(struct ifnet *, struct ifmediareq *);
260 
261 static int dc_dma_alloc(struct dc_softc *);
262 static void dc_dma_free(struct dc_softc *);
263 static void dc_dma_map_addr(void *, bus_dma_segment_t *, int, int);
264 
265 static void dc_delay(struct dc_softc *);
266 static void dc_eeprom_idle(struct dc_softc *);
267 static void dc_eeprom_putbyte(struct dc_softc *, int);
268 static void dc_eeprom_getword(struct dc_softc *, int, uint16_t *);
269 static void dc_eeprom_getword_pnic(struct dc_softc *, int, uint16_t *);
270 static void dc_eeprom_getword_xircom(struct dc_softc *, int, uint16_t *);
271 static void dc_eeprom_width(struct dc_softc *);
272 static void dc_read_eeprom(struct dc_softc *, caddr_t, int, int, int);
273 
274 static int dc_miibus_readreg(device_t, int, int);
275 static int dc_miibus_writereg(device_t, int, int, int);
276 static void dc_miibus_statchg(device_t);
277 static void dc_miibus_mediainit(device_t);
278 
279 static void dc_setcfg(struct dc_softc *, int);
280 static void dc_netcfg_wait(struct dc_softc *);
281 static uint32_t dc_mchash_le(struct dc_softc *, const uint8_t *);
282 static uint32_t dc_mchash_be(const uint8_t *);
283 static void dc_setfilt_21143(struct dc_softc *);
284 static void dc_setfilt_asix(struct dc_softc *);
285 static void dc_setfilt_admtek(struct dc_softc *);
286 static void dc_setfilt_uli(struct dc_softc *);
287 static void dc_setfilt_xircom(struct dc_softc *);
288 
289 static void dc_setfilt(struct dc_softc *);
290 
291 static void dc_reset(struct dc_softc *);
292 static int dc_list_rx_init(struct dc_softc *);
293 static int dc_list_tx_init(struct dc_softc *);
294 
295 static int dc_read_srom(struct dc_softc *, int);
296 static int dc_parse_21143_srom(struct dc_softc *);
297 static int dc_decode_leaf_sia(struct dc_softc *, struct dc_eblock_sia *);
298 static int dc_decode_leaf_mii(struct dc_softc *, struct dc_eblock_mii *);
299 static int dc_decode_leaf_sym(struct dc_softc *, struct dc_eblock_sym *);
300 static void dc_apply_fixup(struct dc_softc *, int);
301 static int dc_check_multiport(struct dc_softc *);
302 
303 /*
304  * MII bit-bang glue
305  */
306 static uint32_t dc_mii_bitbang_read(device_t);
307 static void dc_mii_bitbang_write(device_t, uint32_t);
308 
309 static const struct mii_bitbang_ops dc_mii_bitbang_ops = {
310 	dc_mii_bitbang_read,
311 	dc_mii_bitbang_write,
312 	{
313 		DC_SIO_MII_DATAOUT,	/* MII_BIT_MDO */
314 		DC_SIO_MII_DATAIN,	/* MII_BIT_MDI */
315 		DC_SIO_MII_CLK,		/* MII_BIT_MDC */
316 		0,			/* MII_BIT_DIR_HOST_PHY */
317 		DC_SIO_MII_DIR,		/* MII_BIT_DIR_PHY_HOST */
318 	}
319 };
320 
321 #ifdef DC_USEIOSPACE
322 #define	DC_RES			SYS_RES_IOPORT
323 #define	DC_RID			DC_PCI_CFBIO
324 #else
325 #define	DC_RES			SYS_RES_MEMORY
326 #define	DC_RID			DC_PCI_CFBMA
327 #endif
328 
329 static device_method_t dc_methods[] = {
330 	/* Device interface */
331 	DEVMETHOD(device_probe,		dc_probe),
332 	DEVMETHOD(device_attach,	dc_attach),
333 	DEVMETHOD(device_detach,	dc_detach),
334 	DEVMETHOD(device_suspend,	dc_suspend),
335 	DEVMETHOD(device_resume,	dc_resume),
336 	DEVMETHOD(device_shutdown,	dc_shutdown),
337 
338 	/* MII interface */
339 	DEVMETHOD(miibus_readreg,	dc_miibus_readreg),
340 	DEVMETHOD(miibus_writereg,	dc_miibus_writereg),
341 	DEVMETHOD(miibus_statchg,	dc_miibus_statchg),
342 	DEVMETHOD(miibus_mediainit,	dc_miibus_mediainit),
343 
344 	DEVMETHOD_END
345 };
346 
347 static driver_t dc_driver = {
348 	"dc",
349 	dc_methods,
350 	sizeof(struct dc_softc)
351 };
352 
353 static devclass_t dc_devclass;
354 
355 DRIVER_MODULE_ORDERED(dc, pci, dc_driver, dc_devclass, NULL, NULL,
356     SI_ORDER_ANY);
357 MODULE_PNP_INFO("W32:vendor/device;U8:revision;D:#", pci, dc, dc_devs,
358     nitems(dc_devs) - 1);
359 DRIVER_MODULE(miibus, dc, miibus_driver, miibus_devclass, NULL, NULL);
360 
361 #define	DC_SETBIT(sc, reg, x)				\
362 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x))
363 
364 #define	DC_CLRBIT(sc, reg, x)				\
365 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x))
366 
367 #define	SIO_SET(x)	DC_SETBIT(sc, DC_SIO, (x))
368 #define	SIO_CLR(x)	DC_CLRBIT(sc, DC_SIO, (x))
369 
370 static void
371 dc_delay(struct dc_softc *sc)
372 {
373 	int idx;
374 
375 	for (idx = (300 / 33) + 1; idx > 0; idx--)
376 		CSR_READ_4(sc, DC_BUSCTL);
377 }
378 
379 static void
380 dc_eeprom_width(struct dc_softc *sc)
381 {
382 	int i;
383 
384 	/* Force EEPROM to idle state. */
385 	dc_eeprom_idle(sc);
386 
387 	/* Enter EEPROM access mode. */
388 	CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
389 	dc_delay(sc);
390 	DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ);
391 	dc_delay(sc);
392 	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
393 	dc_delay(sc);
394 	DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
395 	dc_delay(sc);
396 
397 	for (i = 3; i--;) {
398 		if (6 & (1 << i))
399 			DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
400 		else
401 			DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
402 		dc_delay(sc);
403 		DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
404 		dc_delay(sc);
405 		DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
406 		dc_delay(sc);
407 	}
408 
409 	for (i = 1; i <= 12; i++) {
410 		DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
411 		dc_delay(sc);
412 		if (!(CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT)) {
413 			DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
414 			dc_delay(sc);
415 			break;
416 		}
417 		DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
418 		dc_delay(sc);
419 	}
420 
421 	/* Turn off EEPROM access mode. */
422 	dc_eeprom_idle(sc);
423 
424 	if (i < 4 || i > 12)
425 		sc->dc_romwidth = 6;
426 	else
427 		sc->dc_romwidth = i;
428 
429 	/* Enter EEPROM access mode. */
430 	CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
431 	dc_delay(sc);
432 	DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ);
433 	dc_delay(sc);
434 	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
435 	dc_delay(sc);
436 	DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
437 	dc_delay(sc);
438 
439 	/* Turn off EEPROM access mode. */
440 	dc_eeprom_idle(sc);
441 }
442 
443 static void
444 dc_eeprom_idle(struct dc_softc *sc)
445 {
446 	int i;
447 
448 	CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
449 	dc_delay(sc);
450 	DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ);
451 	dc_delay(sc);
452 	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
453 	dc_delay(sc);
454 	DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
455 	dc_delay(sc);
456 
457 	for (i = 0; i < 25; i++) {
458 		DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
459 		dc_delay(sc);
460 		DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
461 		dc_delay(sc);
462 	}
463 
464 	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
465 	dc_delay(sc);
466 	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CS);
467 	dc_delay(sc);
468 	CSR_WRITE_4(sc, DC_SIO, 0x00000000);
469 }
470 
471 /*
472  * Send a read command and address to the EEPROM, check for ACK.
473  */
474 static void
475 dc_eeprom_putbyte(struct dc_softc *sc, int addr)
476 {
477 	int d, i;
478 
479 	d = DC_EECMD_READ >> 6;
480 	for (i = 3; i--; ) {
481 		if (d & (1 << i))
482 			DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
483 		else
484 			DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
485 		dc_delay(sc);
486 		DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
487 		dc_delay(sc);
488 		DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
489 		dc_delay(sc);
490 	}
491 
492 	/*
493 	 * Feed in each bit and strobe the clock.
494 	 */
495 	for (i = sc->dc_romwidth; i--;) {
496 		if (addr & (1 << i)) {
497 			SIO_SET(DC_SIO_EE_DATAIN);
498 		} else {
499 			SIO_CLR(DC_SIO_EE_DATAIN);
500 		}
501 		dc_delay(sc);
502 		SIO_SET(DC_SIO_EE_CLK);
503 		dc_delay(sc);
504 		SIO_CLR(DC_SIO_EE_CLK);
505 		dc_delay(sc);
506 	}
507 }
508 
509 /*
510  * Read a word of data stored in the EEPROM at address 'addr.'
511  * The PNIC 82c168/82c169 has its own non-standard way to read
512  * the EEPROM.
513  */
514 static void
515 dc_eeprom_getword_pnic(struct dc_softc *sc, int addr, uint16_t *dest)
516 {
517 	int i;
518 	uint32_t r;
519 
520 	CSR_WRITE_4(sc, DC_PN_SIOCTL, DC_PN_EEOPCODE_READ | addr);
521 
522 	for (i = 0; i < DC_TIMEOUT; i++) {
523 		DELAY(1);
524 		r = CSR_READ_4(sc, DC_SIO);
525 		if (!(r & DC_PN_SIOCTL_BUSY)) {
526 			*dest = (uint16_t)(r & 0xFFFF);
527 			return;
528 		}
529 	}
530 }
531 
532 /*
533  * Read a word of data stored in the EEPROM at address 'addr.'
534  * The Xircom X3201 has its own non-standard way to read
535  * the EEPROM, too.
536  */
537 static void
538 dc_eeprom_getword_xircom(struct dc_softc *sc, int addr, uint16_t *dest)
539 {
540 
541 	SIO_SET(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ);
542 
543 	addr *= 2;
544 	CSR_WRITE_4(sc, DC_ROM, addr | 0x160);
545 	*dest = (uint16_t)CSR_READ_4(sc, DC_SIO) & 0xff;
546 	addr += 1;
547 	CSR_WRITE_4(sc, DC_ROM, addr | 0x160);
548 	*dest |= ((uint16_t)CSR_READ_4(sc, DC_SIO) & 0xff) << 8;
549 
550 	SIO_CLR(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ);
551 }
552 
553 /*
554  * Read a word of data stored in the EEPROM at address 'addr.'
555  */
556 static void
557 dc_eeprom_getword(struct dc_softc *sc, int addr, uint16_t *dest)
558 {
559 	int i;
560 	uint16_t word = 0;
561 
562 	/* Force EEPROM to idle state. */
563 	dc_eeprom_idle(sc);
564 
565 	/* Enter EEPROM access mode. */
566 	CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
567 	dc_delay(sc);
568 	DC_SETBIT(sc, DC_SIO,  DC_SIO_ROMCTL_READ);
569 	dc_delay(sc);
570 	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
571 	dc_delay(sc);
572 	DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
573 	dc_delay(sc);
574 
575 	/*
576 	 * Send address of word we want to read.
577 	 */
578 	dc_eeprom_putbyte(sc, addr);
579 
580 	/*
581 	 * Start reading bits from EEPROM.
582 	 */
583 	for (i = 0x8000; i; i >>= 1) {
584 		SIO_SET(DC_SIO_EE_CLK);
585 		dc_delay(sc);
586 		if (CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT)
587 			word |= i;
588 		dc_delay(sc);
589 		SIO_CLR(DC_SIO_EE_CLK);
590 		dc_delay(sc);
591 	}
592 
593 	/* Turn off EEPROM access mode. */
594 	dc_eeprom_idle(sc);
595 
596 	*dest = word;
597 }
598 
599 /*
600  * Read a sequence of words from the EEPROM.
601  */
602 static void
603 dc_read_eeprom(struct dc_softc *sc, caddr_t dest, int off, int cnt, int be)
604 {
605 	int i;
606 	uint16_t word = 0, *ptr;
607 
608 	for (i = 0; i < cnt; i++) {
609 		if (DC_IS_PNIC(sc))
610 			dc_eeprom_getword_pnic(sc, off + i, &word);
611 		else if (DC_IS_XIRCOM(sc))
612 			dc_eeprom_getword_xircom(sc, off + i, &word);
613 		else
614 			dc_eeprom_getword(sc, off + i, &word);
615 		ptr = (uint16_t *)(dest + (i * 2));
616 		if (be)
617 			*ptr = be16toh(word);
618 		else
619 			*ptr = le16toh(word);
620 	}
621 }
622 
623 /*
624  * Write the MII serial port for the MII bit-bang module.
625  */
626 static void
627 dc_mii_bitbang_write(device_t dev, uint32_t val)
628 {
629 	struct dc_softc *sc;
630 
631 	sc = device_get_softc(dev);
632 
633 	CSR_WRITE_4(sc, DC_SIO, val);
634 	CSR_BARRIER_4(sc, DC_SIO,
635 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
636 }
637 
638 /*
639  * Read the MII serial port for the MII bit-bang module.
640  */
641 static uint32_t
642 dc_mii_bitbang_read(device_t dev)
643 {
644 	struct dc_softc *sc;
645 	uint32_t val;
646 
647 	sc = device_get_softc(dev);
648 
649 	val = CSR_READ_4(sc, DC_SIO);
650 	CSR_BARRIER_4(sc, DC_SIO,
651 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
652 
653 	return (val);
654 }
655 
656 static int
657 dc_miibus_readreg(device_t dev, int phy, int reg)
658 {
659 	struct dc_softc *sc;
660 	int i, rval, phy_reg = 0;
661 
662 	sc = device_get_softc(dev);
663 
664 	if (sc->dc_pmode != DC_PMODE_MII) {
665 		if (phy == (MII_NPHY - 1)) {
666 			switch (reg) {
667 			case MII_BMSR:
668 			/*
669 			 * Fake something to make the probe
670 			 * code think there's a PHY here.
671 			 */
672 				return (BMSR_MEDIAMASK);
673 			case MII_PHYIDR1:
674 				if (DC_IS_PNIC(sc))
675 					return (DC_VENDORID_LO);
676 				return (DC_VENDORID_DEC);
677 			case MII_PHYIDR2:
678 				if (DC_IS_PNIC(sc))
679 					return (DC_DEVICEID_82C168);
680 				return (DC_DEVICEID_21143);
681 			default:
682 				return (0);
683 			}
684 		} else
685 			return (0);
686 	}
687 
688 	if (DC_IS_PNIC(sc)) {
689 		CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_READ |
690 		    (phy << 23) | (reg << 18));
691 		for (i = 0; i < DC_TIMEOUT; i++) {
692 			DELAY(1);
693 			rval = CSR_READ_4(sc, DC_PN_MII);
694 			if (!(rval & DC_PN_MII_BUSY)) {
695 				rval &= 0xFFFF;
696 				return (rval == 0xFFFF ? 0 : rval);
697 			}
698 		}
699 		return (0);
700 	}
701 
702 	if (sc->dc_type == DC_TYPE_ULI_M5263) {
703 		CSR_WRITE_4(sc, DC_ROM,
704 		    ((phy << DC_ULI_PHY_ADDR_SHIFT) & DC_ULI_PHY_ADDR_MASK) |
705 		    ((reg << DC_ULI_PHY_REG_SHIFT) & DC_ULI_PHY_REG_MASK) |
706 		    DC_ULI_PHY_OP_READ);
707 		for (i = 0; i < DC_TIMEOUT; i++) {
708 			DELAY(1);
709 			rval = CSR_READ_4(sc, DC_ROM);
710 			if ((rval & DC_ULI_PHY_OP_DONE) != 0) {
711 				return (rval & DC_ULI_PHY_DATA_MASK);
712 			}
713 		}
714 		if (i == DC_TIMEOUT)
715 			device_printf(dev, "phy read timed out\n");
716 		return (0);
717 	}
718 
719 	if (DC_IS_COMET(sc)) {
720 		switch (reg) {
721 		case MII_BMCR:
722 			phy_reg = DC_AL_BMCR;
723 			break;
724 		case MII_BMSR:
725 			phy_reg = DC_AL_BMSR;
726 			break;
727 		case MII_PHYIDR1:
728 			phy_reg = DC_AL_VENID;
729 			break;
730 		case MII_PHYIDR2:
731 			phy_reg = DC_AL_DEVID;
732 			break;
733 		case MII_ANAR:
734 			phy_reg = DC_AL_ANAR;
735 			break;
736 		case MII_ANLPAR:
737 			phy_reg = DC_AL_LPAR;
738 			break;
739 		case MII_ANER:
740 			phy_reg = DC_AL_ANER;
741 			break;
742 		default:
743 			device_printf(dev, "phy_read: bad phy register %x\n",
744 			    reg);
745 			return (0);
746 		}
747 
748 		rval = CSR_READ_4(sc, phy_reg) & 0x0000FFFF;
749 		if (rval == 0xFFFF)
750 			return (0);
751 		return (rval);
752 	}
753 
754 	if (sc->dc_type == DC_TYPE_98713) {
755 		phy_reg = CSR_READ_4(sc, DC_NETCFG);
756 		CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL);
757 	}
758 	rval = mii_bitbang_readreg(dev, &dc_mii_bitbang_ops, phy, reg);
759 	if (sc->dc_type == DC_TYPE_98713)
760 		CSR_WRITE_4(sc, DC_NETCFG, phy_reg);
761 
762 	return (rval);
763 }
764 
765 static int
766 dc_miibus_writereg(device_t dev, int phy, int reg, int data)
767 {
768 	struct dc_softc *sc;
769 	int i, phy_reg = 0;
770 
771 	sc = device_get_softc(dev);
772 
773 	if (DC_IS_PNIC(sc)) {
774 		CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_WRITE |
775 		    (phy << 23) | (reg << 10) | data);
776 		for (i = 0; i < DC_TIMEOUT; i++) {
777 			if (!(CSR_READ_4(sc, DC_PN_MII) & DC_PN_MII_BUSY))
778 				break;
779 		}
780 		return (0);
781 	}
782 
783 	if (sc->dc_type == DC_TYPE_ULI_M5263) {
784 		CSR_WRITE_4(sc, DC_ROM,
785 		    ((phy << DC_ULI_PHY_ADDR_SHIFT) & DC_ULI_PHY_ADDR_MASK) |
786 		    ((reg << DC_ULI_PHY_REG_SHIFT) & DC_ULI_PHY_REG_MASK) |
787 		    ((data << DC_ULI_PHY_DATA_SHIFT) & DC_ULI_PHY_DATA_MASK) |
788 		    DC_ULI_PHY_OP_WRITE);
789 		DELAY(1);
790 		return (0);
791 	}
792 
793 	if (DC_IS_COMET(sc)) {
794 		switch (reg) {
795 		case MII_BMCR:
796 			phy_reg = DC_AL_BMCR;
797 			break;
798 		case MII_BMSR:
799 			phy_reg = DC_AL_BMSR;
800 			break;
801 		case MII_PHYIDR1:
802 			phy_reg = DC_AL_VENID;
803 			break;
804 		case MII_PHYIDR2:
805 			phy_reg = DC_AL_DEVID;
806 			break;
807 		case MII_ANAR:
808 			phy_reg = DC_AL_ANAR;
809 			break;
810 		case MII_ANLPAR:
811 			phy_reg = DC_AL_LPAR;
812 			break;
813 		case MII_ANER:
814 			phy_reg = DC_AL_ANER;
815 			break;
816 		default:
817 			device_printf(dev, "phy_write: bad phy register %x\n",
818 			    reg);
819 			return (0);
820 			break;
821 		}
822 
823 		CSR_WRITE_4(sc, phy_reg, data);
824 		return (0);
825 	}
826 
827 	if (sc->dc_type == DC_TYPE_98713) {
828 		phy_reg = CSR_READ_4(sc, DC_NETCFG);
829 		CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL);
830 	}
831 	mii_bitbang_writereg(dev, &dc_mii_bitbang_ops, phy, reg, data);
832 	if (sc->dc_type == DC_TYPE_98713)
833 		CSR_WRITE_4(sc, DC_NETCFG, phy_reg);
834 
835 	return (0);
836 }
837 
838 static void
839 dc_miibus_statchg(device_t dev)
840 {
841 	struct dc_softc *sc;
842 	struct ifnet *ifp;
843 	struct mii_data *mii;
844 	struct ifmedia *ifm;
845 
846 	sc = device_get_softc(dev);
847 
848 	mii = device_get_softc(sc->dc_miibus);
849 	ifp = sc->dc_ifp;
850 	if (mii == NULL || ifp == NULL ||
851 	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
852 		return;
853 
854 	ifm = &mii->mii_media;
855 	if (DC_IS_DAVICOM(sc) && IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) {
856 		dc_setcfg(sc, ifm->ifm_media);
857 		return;
858 	} else if (!DC_IS_ADMTEK(sc))
859 		dc_setcfg(sc, mii->mii_media_active);
860 
861 	sc->dc_link = 0;
862 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
863 	    (IFM_ACTIVE | IFM_AVALID)) {
864 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
865 		case IFM_10_T:
866 		case IFM_100_TX:
867 			sc->dc_link = 1;
868 			break;
869 		}
870 	}
871 }
872 
873 /*
874  * Special support for DM9102A cards with HomePNA PHYs. Note:
875  * with the Davicom DM9102A/DM9801 eval board that I have, it seems
876  * to be impossible to talk to the management interface of the DM9801
877  * PHY (its MDIO pin is not connected to anything). Consequently,
878  * the driver has to just 'know' about the additional mode and deal
879  * with it itself. *sigh*
880  */
881 static void
882 dc_miibus_mediainit(device_t dev)
883 {
884 	struct dc_softc *sc;
885 	struct mii_data *mii;
886 	struct ifmedia *ifm;
887 	int rev;
888 
889 	rev = pci_get_revid(dev);
890 
891 	sc = device_get_softc(dev);
892 	mii = device_get_softc(sc->dc_miibus);
893 	ifm = &mii->mii_media;
894 
895 	if (DC_IS_DAVICOM(sc) && rev >= DC_REVISION_DM9102A)
896 		ifmedia_add(ifm, IFM_ETHER | IFM_HPNA_1, 0, NULL);
897 }
898 
899 #define	DC_BITS_512	9
900 #define	DC_BITS_128	7
901 #define	DC_BITS_64	6
902 
903 static uint32_t
904 dc_mchash_le(struct dc_softc *sc, const uint8_t *addr)
905 {
906 	uint32_t crc;
907 
908 	/* Compute CRC for the address value. */
909 	crc = ether_crc32_le(addr, ETHER_ADDR_LEN);
910 
911 	/*
912 	 * The hash table on the PNIC II and the MX98715AEC-C/D/E
913 	 * chips is only 128 bits wide.
914 	 */
915 	if (sc->dc_flags & DC_128BIT_HASH)
916 		return (crc & ((1 << DC_BITS_128) - 1));
917 
918 	/* The hash table on the MX98715BEC is only 64 bits wide. */
919 	if (sc->dc_flags & DC_64BIT_HASH)
920 		return (crc & ((1 << DC_BITS_64) - 1));
921 
922 	/* Xircom's hash filtering table is different (read: weird) */
923 	/* Xircom uses the LEAST significant bits */
924 	if (DC_IS_XIRCOM(sc)) {
925 		if ((crc & 0x180) == 0x180)
926 			return ((crc & 0x0F) + (crc & 0x70) * 3 + (14 << 4));
927 		else
928 			return ((crc & 0x1F) + ((crc >> 1) & 0xF0) * 3 +
929 			    (12 << 4));
930 	}
931 
932 	return (crc & ((1 << DC_BITS_512) - 1));
933 }
934 
935 /*
936  * Calculate CRC of a multicast group address, return the lower 6 bits.
937  */
938 static uint32_t
939 dc_mchash_be(const uint8_t *addr)
940 {
941 	uint32_t crc;
942 
943 	/* Compute CRC for the address value. */
944 	crc = ether_crc32_be(addr, ETHER_ADDR_LEN);
945 
946 	/* Return the filter bit position. */
947 	return ((crc >> 26) & 0x0000003F);
948 }
949 
950 /*
951  * 21143-style RX filter setup routine. Filter programming is done by
952  * downloading a special setup frame into the TX engine. 21143, Macronix,
953  * PNIC, PNIC II and Davicom chips are programmed this way.
954  *
955  * We always program the chip using 'hash perfect' mode, i.e. one perfect
956  * address (our node address) and a 512-bit hash filter for multicast
957  * frames. We also sneak the broadcast address into the hash filter since
958  * we need that too.
959  */
960 static u_int
961 dc_hash_maddr_21143(void *arg, struct sockaddr_dl *sdl, u_int cnt)
962 {
963 	struct dc_softc *sc = arg;
964 	uint32_t h;
965 
966 	h = dc_mchash_le(sc, LLADDR(sdl));
967 	sc->dc_cdata.dc_sbuf[h >> 4] |= htole32(1 << (h & 0xF));
968 
969 	return (1);
970 }
971 
972 static void
973 dc_setfilt_21143(struct dc_softc *sc)
974 {
975 	uint16_t eaddr[(ETHER_ADDR_LEN+1)/2];
976 	struct dc_desc *sframe;
977 	uint32_t h, *sp;
978 	struct ifnet *ifp;
979 	int i;
980 
981 	ifp = sc->dc_ifp;
982 
983 	i = sc->dc_cdata.dc_tx_prod;
984 	DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT);
985 	sc->dc_cdata.dc_tx_cnt++;
986 	sframe = &sc->dc_ldata.dc_tx_list[i];
987 	sp = sc->dc_cdata.dc_sbuf;
988 	bzero(sp, DC_SFRAME_LEN);
989 
990 	sframe->dc_data = htole32(DC_ADDR_LO(sc->dc_saddr));
991 	sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP |
992 	    DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT);
993 
994 	sc->dc_cdata.dc_tx_chain[i] = (struct mbuf *)sc->dc_cdata.dc_sbuf;
995 
996 	/* If we want promiscuous mode, set the allframes bit. */
997 	if (ifp->if_flags & IFF_PROMISC)
998 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
999 	else
1000 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1001 
1002 	if (ifp->if_flags & IFF_ALLMULTI)
1003 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1004 	else
1005 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1006 
1007 	if_foreach_llmaddr(ifp, dc_hash_maddr_21143, sp);
1008 
1009 	if (ifp->if_flags & IFF_BROADCAST) {
1010 		h = dc_mchash_le(sc, ifp->if_broadcastaddr);
1011 		sp[h >> 4] |= htole32(1 << (h & 0xF));
1012 	}
1013 
1014 	/* Set our MAC address. */
1015 	bcopy(IF_LLADDR(sc->dc_ifp), eaddr, ETHER_ADDR_LEN);
1016 	sp[39] = DC_SP_MAC(eaddr[0]);
1017 	sp[40] = DC_SP_MAC(eaddr[1]);
1018 	sp[41] = DC_SP_MAC(eaddr[2]);
1019 
1020 	sframe->dc_status = htole32(DC_TXSTAT_OWN);
1021 	bus_dmamap_sync(sc->dc_tx_ltag, sc->dc_tx_lmap, BUS_DMASYNC_PREREAD |
1022 	    BUS_DMASYNC_PREWRITE);
1023 	bus_dmamap_sync(sc->dc_stag, sc->dc_smap, BUS_DMASYNC_PREWRITE);
1024 	CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
1025 
1026 	/*
1027 	 * The PNIC takes an exceedingly long time to process its
1028 	 * setup frame; wait 10ms after posting the setup frame
1029 	 * before proceeding, just so it has time to swallow its
1030 	 * medicine.
1031 	 */
1032 	DELAY(10000);
1033 
1034 	sc->dc_wdog_timer = 5;
1035 }
1036 
1037 static u_int
1038 dc_hash_maddr_admtek_be(void *arg, struct sockaddr_dl *sdl, u_int cnt)
1039 {
1040 	uint32_t *hashes = arg;
1041 	int h = 0;
1042 
1043 	h = dc_mchash_be(LLADDR(sdl));
1044 	if (h < 32)
1045 		hashes[0] |= (1 << h);
1046 	else
1047 		hashes[1] |= (1 << (h - 32));
1048 
1049 	return (1);
1050 }
1051 
1052 struct dc_hash_maddr_admtek_le_ctx {
1053 	struct dc_softc *sc;
1054 	uint32_t hashes[2];
1055 };
1056 
1057 static u_int
1058 dc_hash_maddr_admtek_le(void *arg, struct sockaddr_dl *sdl, u_int cnt)
1059 {
1060 	struct dc_hash_maddr_admtek_le_ctx *ctx = arg;
1061 	int h = 0;
1062 
1063 	h = dc_mchash_le(ctx->sc, LLADDR(sdl));
1064 	if (h < 32)
1065 		ctx->hashes[0] |= (1 << h);
1066 	else
1067 		ctx->hashes[1] |= (1 << (h - 32));
1068 
1069 	return (1);
1070 }
1071 
1072 static void
1073 dc_setfilt_admtek(struct dc_softc *sc)
1074 {
1075 	uint8_t eaddr[ETHER_ADDR_LEN];
1076 	struct ifnet *ifp;
1077 	struct dc_hash_maddr_admtek_le_ctx ctx = { sc, { 0, 0 }};
1078 
1079 	ifp = sc->dc_ifp;
1080 
1081 	/* Init our MAC address. */
1082 	bcopy(IF_LLADDR(sc->dc_ifp), eaddr, ETHER_ADDR_LEN);
1083 	CSR_WRITE_4(sc, DC_AL_PAR0, eaddr[3] << 24 | eaddr[2] << 16 |
1084 	    eaddr[1] << 8 | eaddr[0]);
1085 	CSR_WRITE_4(sc, DC_AL_PAR1, eaddr[5] << 8 | eaddr[4]);
1086 
1087 	/* If we want promiscuous mode, set the allframes bit. */
1088 	if (ifp->if_flags & IFF_PROMISC)
1089 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1090 	else
1091 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1092 
1093 	if (ifp->if_flags & IFF_ALLMULTI)
1094 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1095 	else
1096 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1097 
1098 	/* First, zot all the existing hash bits. */
1099 	CSR_WRITE_4(sc, DC_AL_MAR0, 0);
1100 	CSR_WRITE_4(sc, DC_AL_MAR1, 0);
1101 
1102 	/*
1103 	 * If we're already in promisc or allmulti mode, we
1104 	 * don't have to bother programming the multicast filter.
1105 	 */
1106 	if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI))
1107 		return;
1108 
1109 	/* Now program new ones. */
1110 	if (DC_IS_CENTAUR(sc))
1111 		if_foreach_llmaddr(ifp, dc_hash_maddr_admtek_le, &ctx);
1112 	else
1113 		if_foreach_llmaddr(ifp, dc_hash_maddr_admtek_be, &ctx.hashes);
1114 
1115 	CSR_WRITE_4(sc, DC_AL_MAR0, ctx.hashes[0]);
1116 	CSR_WRITE_4(sc, DC_AL_MAR1, ctx.hashes[1]);
1117 }
1118 
1119 static void
1120 dc_setfilt_asix(struct dc_softc *sc)
1121 {
1122 	uint32_t eaddr[(ETHER_ADDR_LEN+3)/4];
1123 	struct ifnet *ifp;
1124 	uint32_t hashes[2] = { 0, 0 };
1125 
1126 	ifp = sc->dc_ifp;
1127 
1128 	/* Init our MAC address. */
1129 	bcopy(IF_LLADDR(sc->dc_ifp), eaddr, ETHER_ADDR_LEN);
1130 	CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR0);
1131 	CSR_WRITE_4(sc, DC_AX_FILTDATA, eaddr[0]);
1132 	CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR1);
1133 	CSR_WRITE_4(sc, DC_AX_FILTDATA, eaddr[1]);
1134 
1135 	/* If we want promiscuous mode, set the allframes bit. */
1136 	if (ifp->if_flags & IFF_PROMISC)
1137 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1138 	else
1139 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1140 
1141 	if (ifp->if_flags & IFF_ALLMULTI)
1142 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1143 	else
1144 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1145 
1146 	/*
1147 	 * The ASIX chip has a special bit to enable reception
1148 	 * of broadcast frames.
1149 	 */
1150 	if (ifp->if_flags & IFF_BROADCAST)
1151 		DC_SETBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD);
1152 	else
1153 		DC_CLRBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD);
1154 
1155 	/* first, zot all the existing hash bits */
1156 	CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0);
1157 	CSR_WRITE_4(sc, DC_AX_FILTDATA, 0);
1158 	CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1);
1159 	CSR_WRITE_4(sc, DC_AX_FILTDATA, 0);
1160 
1161 	/*
1162 	 * If we're already in promisc or allmulti mode, we
1163 	 * don't have to bother programming the multicast filter.
1164 	 */
1165 	if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI))
1166 		return;
1167 
1168 	/* now program new ones */
1169 	if_foreach_llmaddr(ifp, dc_hash_maddr_admtek_be, hashes);
1170 
1171 	CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0);
1172 	CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[0]);
1173 	CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1);
1174 	CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[1]);
1175 }
1176 
1177 static u_int
1178 dc_hash_maddr_uli(void *arg, struct sockaddr_dl *sdl, u_int mcnt)
1179 {
1180 	uint32_t **sp = arg;
1181 	uint8_t *ma;
1182 
1183 	if (mcnt == DC_ULI_FILTER_NPERF)
1184 		return (0);
1185 	ma = LLADDR(sdl);
1186 	*(*sp)++ = DC_SP_MAC(ma[1] << 8 | ma[0]);
1187 	*(*sp)++ = DC_SP_MAC(ma[3] << 8 | ma[2]);
1188 	*(*sp)++ = DC_SP_MAC(ma[5] << 8 | ma[4]);
1189 
1190 	return (1);
1191 }
1192 
1193 static void
1194 dc_setfilt_uli(struct dc_softc *sc)
1195 {
1196 	uint8_t eaddr[ETHER_ADDR_LEN];
1197 	struct ifnet *ifp;
1198 	struct dc_desc *sframe;
1199 	uint32_t filter, *sp;
1200 	int i, mcnt;
1201 
1202 	ifp = sc->dc_ifp;
1203 
1204 	i = sc->dc_cdata.dc_tx_prod;
1205 	DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT);
1206 	sc->dc_cdata.dc_tx_cnt++;
1207 	sframe = &sc->dc_ldata.dc_tx_list[i];
1208 	sp = sc->dc_cdata.dc_sbuf;
1209 	bzero(sp, DC_SFRAME_LEN);
1210 
1211 	sframe->dc_data = htole32(DC_ADDR_LO(sc->dc_saddr));
1212 	sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP |
1213 	    DC_TXCTL_TLINK | DC_FILTER_PERFECT | DC_TXCTL_FINT);
1214 
1215 	sc->dc_cdata.dc_tx_chain[i] = (struct mbuf *)sc->dc_cdata.dc_sbuf;
1216 
1217 	/* Set station address. */
1218 	bcopy(IF_LLADDR(sc->dc_ifp), eaddr, ETHER_ADDR_LEN);
1219 	*sp++ = DC_SP_MAC(eaddr[1] << 8 | eaddr[0]);
1220 	*sp++ = DC_SP_MAC(eaddr[3] << 8 | eaddr[2]);
1221 	*sp++ = DC_SP_MAC(eaddr[5] << 8 | eaddr[4]);
1222 
1223 	/* Set broadcast address. */
1224 	*sp++ = DC_SP_MAC(0xFFFF);
1225 	*sp++ = DC_SP_MAC(0xFFFF);
1226 	*sp++ = DC_SP_MAC(0xFFFF);
1227 
1228 	/* Extract current filter configuration. */
1229 	filter = CSR_READ_4(sc, DC_NETCFG);
1230 	filter &= ~(DC_NETCFG_RX_PROMISC | DC_NETCFG_RX_ALLMULTI);
1231 
1232 	/* Now build perfect filters. */
1233 	mcnt = if_foreach_llmaddr(ifp, dc_hash_maddr_uli, &sp);
1234 
1235 	if (mcnt == DC_ULI_FILTER_NPERF)
1236 		filter |= DC_NETCFG_RX_ALLMULTI;
1237 	else
1238 		for (; mcnt < DC_ULI_FILTER_NPERF; mcnt++) {
1239 			*sp++ = DC_SP_MAC(0xFFFF);
1240 			*sp++ = DC_SP_MAC(0xFFFF);
1241 			*sp++ = DC_SP_MAC(0xFFFF);
1242 		}
1243 
1244 	if (filter & (DC_NETCFG_TX_ON | DC_NETCFG_RX_ON))
1245 		CSR_WRITE_4(sc, DC_NETCFG,
1246 		    filter & ~(DC_NETCFG_TX_ON | DC_NETCFG_RX_ON));
1247 	if (ifp->if_flags & IFF_PROMISC)
1248 		filter |= DC_NETCFG_RX_PROMISC | DC_NETCFG_RX_ALLMULTI;
1249 	if (ifp->if_flags & IFF_ALLMULTI)
1250 		filter |= DC_NETCFG_RX_ALLMULTI;
1251 	CSR_WRITE_4(sc, DC_NETCFG,
1252 	    filter & ~(DC_NETCFG_TX_ON | DC_NETCFG_RX_ON));
1253 	if (filter & (DC_NETCFG_TX_ON | DC_NETCFG_RX_ON))
1254 		CSR_WRITE_4(sc, DC_NETCFG, filter);
1255 
1256 	sframe->dc_status = htole32(DC_TXSTAT_OWN);
1257 	bus_dmamap_sync(sc->dc_tx_ltag, sc->dc_tx_lmap, BUS_DMASYNC_PREREAD |
1258 	    BUS_DMASYNC_PREWRITE);
1259 	bus_dmamap_sync(sc->dc_stag, sc->dc_smap, BUS_DMASYNC_PREWRITE);
1260 	CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
1261 
1262 	/*
1263 	 * Wait some time...
1264 	 */
1265 	DELAY(1000);
1266 
1267 	sc->dc_wdog_timer = 5;
1268 }
1269 
1270 static u_int
1271 dc_hash_maddr_xircom(void *arg, struct sockaddr_dl *sdl, u_int cnt)
1272 {
1273 	struct dc_softc *sc = arg;
1274 	uint32_t h;
1275 
1276 	h = dc_mchash_le(sc, LLADDR(sdl));
1277 	sc->dc_cdata.dc_sbuf[h >> 4] |= htole32(1 << (h & 0xF));
1278 	return (1);
1279 }
1280 
1281 static void
1282 dc_setfilt_xircom(struct dc_softc *sc)
1283 {
1284 	uint16_t eaddr[(ETHER_ADDR_LEN+1)/2];
1285 	struct ifnet *ifp;
1286 	struct dc_desc *sframe;
1287 	uint32_t h, *sp;
1288 	int i;
1289 
1290 	ifp = sc->dc_ifp;
1291 	DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON | DC_NETCFG_RX_ON));
1292 
1293 	i = sc->dc_cdata.dc_tx_prod;
1294 	DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT);
1295 	sc->dc_cdata.dc_tx_cnt++;
1296 	sframe = &sc->dc_ldata.dc_tx_list[i];
1297 	sp = sc->dc_cdata.dc_sbuf;
1298 	bzero(sp, DC_SFRAME_LEN);
1299 
1300 	sframe->dc_data = htole32(DC_ADDR_LO(sc->dc_saddr));
1301 	sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP |
1302 	    DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT);
1303 
1304 	sc->dc_cdata.dc_tx_chain[i] = (struct mbuf *)sc->dc_cdata.dc_sbuf;
1305 
1306 	/* If we want promiscuous mode, set the allframes bit. */
1307 	if (ifp->if_flags & IFF_PROMISC)
1308 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1309 	else
1310 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1311 
1312 	if (ifp->if_flags & IFF_ALLMULTI)
1313 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1314 	else
1315 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1316 
1317 	if_foreach_llmaddr(ifp, dc_hash_maddr_xircom, &sp);
1318 
1319 	if (ifp->if_flags & IFF_BROADCAST) {
1320 		h = dc_mchash_le(sc, ifp->if_broadcastaddr);
1321 		sp[h >> 4] |= htole32(1 << (h & 0xF));
1322 	}
1323 
1324 	/* Set our MAC address. */
1325 	bcopy(IF_LLADDR(sc->dc_ifp), eaddr, ETHER_ADDR_LEN);
1326 	sp[0] = DC_SP_MAC(eaddr[0]);
1327 	sp[1] = DC_SP_MAC(eaddr[1]);
1328 	sp[2] = DC_SP_MAC(eaddr[2]);
1329 
1330 	DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
1331 	DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON);
1332 	sframe->dc_status = htole32(DC_TXSTAT_OWN);
1333 	bus_dmamap_sync(sc->dc_tx_ltag, sc->dc_tx_lmap, BUS_DMASYNC_PREREAD |
1334 	    BUS_DMASYNC_PREWRITE);
1335 	bus_dmamap_sync(sc->dc_stag, sc->dc_smap, BUS_DMASYNC_PREWRITE);
1336 	CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
1337 
1338 	/*
1339 	 * Wait some time...
1340 	 */
1341 	DELAY(1000);
1342 
1343 	sc->dc_wdog_timer = 5;
1344 }
1345 
1346 static void
1347 dc_setfilt(struct dc_softc *sc)
1348 {
1349 
1350 	if (DC_IS_INTEL(sc) || DC_IS_MACRONIX(sc) || DC_IS_PNIC(sc) ||
1351 	    DC_IS_PNICII(sc) || DC_IS_DAVICOM(sc) || DC_IS_CONEXANT(sc))
1352 		dc_setfilt_21143(sc);
1353 
1354 	if (DC_IS_ASIX(sc))
1355 		dc_setfilt_asix(sc);
1356 
1357 	if (DC_IS_ADMTEK(sc))
1358 		dc_setfilt_admtek(sc);
1359 
1360 	if (DC_IS_ULI(sc))
1361 		dc_setfilt_uli(sc);
1362 
1363 	if (DC_IS_XIRCOM(sc))
1364 		dc_setfilt_xircom(sc);
1365 }
1366 
1367 static void
1368 dc_netcfg_wait(struct dc_softc *sc)
1369 {
1370 	uint32_t isr;
1371 	int i;
1372 
1373 	for (i = 0; i < DC_TIMEOUT; i++) {
1374 		isr = CSR_READ_4(sc, DC_ISR);
1375 		if (isr & DC_ISR_TX_IDLE &&
1376 		    ((isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED ||
1377 		    (isr & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT))
1378 			break;
1379 		DELAY(10);
1380 	}
1381 	if (i == DC_TIMEOUT && bus_child_present(sc->dc_dev)) {
1382 		if (!(isr & DC_ISR_TX_IDLE) && !DC_IS_ASIX(sc))
1383 			device_printf(sc->dc_dev,
1384 			    "%s: failed to force tx to idle state\n", __func__);
1385 		if (!((isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED ||
1386 		    (isr & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT) &&
1387 		    !DC_HAS_BROKEN_RXSTATE(sc))
1388 			device_printf(sc->dc_dev,
1389 			    "%s: failed to force rx to idle state\n", __func__);
1390 	}
1391 }
1392 
1393 /*
1394  * In order to fiddle with the 'full-duplex' and '100Mbps' bits in
1395  * the netconfig register, we first have to put the transmit and/or
1396  * receive logic in the idle state.
1397  */
1398 static void
1399 dc_setcfg(struct dc_softc *sc, int media)
1400 {
1401 	int restart = 0, watchdogreg;
1402 
1403 	if (IFM_SUBTYPE(media) == IFM_NONE)
1404 		return;
1405 
1406 	if (CSR_READ_4(sc, DC_NETCFG) & (DC_NETCFG_TX_ON | DC_NETCFG_RX_ON)) {
1407 		restart = 1;
1408 		DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON | DC_NETCFG_RX_ON));
1409 		dc_netcfg_wait(sc);
1410 	}
1411 
1412 	if (IFM_SUBTYPE(media) == IFM_100_TX) {
1413 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL);
1414 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT);
1415 		if (sc->dc_pmode == DC_PMODE_MII) {
1416 			if (DC_IS_INTEL(sc)) {
1417 			/* There's a write enable bit here that reads as 1. */
1418 				watchdogreg = CSR_READ_4(sc, DC_WATCHDOG);
1419 				watchdogreg &= ~DC_WDOG_CTLWREN;
1420 				watchdogreg |= DC_WDOG_JABBERDIS;
1421 				CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg);
1422 			} else {
1423 				DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS);
1424 			}
1425 			DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS |
1426 			    DC_NETCFG_PORTSEL | DC_NETCFG_SCRAMBLER));
1427 			if (sc->dc_type == DC_TYPE_98713)
1428 				DC_SETBIT(sc, DC_NETCFG, (DC_NETCFG_PCS |
1429 				    DC_NETCFG_SCRAMBLER));
1430 			if (!DC_IS_DAVICOM(sc))
1431 				DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1432 			DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF);
1433 		} else {
1434 			if (DC_IS_PNIC(sc)) {
1435 				DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_SPEEDSEL);
1436 				DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP);
1437 				DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL);
1438 			}
1439 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1440 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS);
1441 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER);
1442 		}
1443 	}
1444 
1445 	if (IFM_SUBTYPE(media) == IFM_10_T) {
1446 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL);
1447 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT);
1448 		if (sc->dc_pmode == DC_PMODE_MII) {
1449 			/* There's a write enable bit here that reads as 1. */
1450 			if (DC_IS_INTEL(sc)) {
1451 				watchdogreg = CSR_READ_4(sc, DC_WATCHDOG);
1452 				watchdogreg &= ~DC_WDOG_CTLWREN;
1453 				watchdogreg |= DC_WDOG_JABBERDIS;
1454 				CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg);
1455 			} else {
1456 				DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS);
1457 			}
1458 			DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS |
1459 			    DC_NETCFG_PORTSEL | DC_NETCFG_SCRAMBLER));
1460 			if (sc->dc_type == DC_TYPE_98713)
1461 				DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS);
1462 			if (!DC_IS_DAVICOM(sc))
1463 				DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1464 			DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF);
1465 		} else {
1466 			if (DC_IS_PNIC(sc)) {
1467 				DC_PN_GPIO_CLRBIT(sc, DC_PN_GPIO_SPEEDSEL);
1468 				DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP);
1469 				DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL);
1470 			}
1471 			DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1472 			DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PCS);
1473 			DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER);
1474 			if (DC_IS_INTEL(sc)) {
1475 				DC_CLRBIT(sc, DC_SIARESET, DC_SIA_RESET);
1476 				DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF);
1477 				if ((media & IFM_GMASK) == IFM_FDX)
1478 					DC_SETBIT(sc, DC_10BTCTRL, 0x7F3D);
1479 				else
1480 					DC_SETBIT(sc, DC_10BTCTRL, 0x7F3F);
1481 				DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET);
1482 				DC_CLRBIT(sc, DC_10BTCTRL,
1483 				    DC_TCTL_AUTONEGENBL);
1484 				DELAY(20000);
1485 			}
1486 		}
1487 	}
1488 
1489 	/*
1490 	 * If this is a Davicom DM9102A card with a DM9801 HomePNA
1491 	 * PHY and we want HomePNA mode, set the portsel bit to turn
1492 	 * on the external MII port.
1493 	 */
1494 	if (DC_IS_DAVICOM(sc)) {
1495 		if (IFM_SUBTYPE(media) == IFM_HPNA_1) {
1496 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1497 			sc->dc_link = 1;
1498 		} else {
1499 			DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1500 		}
1501 	}
1502 
1503 	if ((media & IFM_GMASK) == IFM_FDX) {
1504 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX);
1505 		if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc))
1506 			DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX);
1507 	} else {
1508 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX);
1509 		if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc))
1510 			DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX);
1511 	}
1512 
1513 	if (restart)
1514 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON | DC_NETCFG_RX_ON);
1515 }
1516 
1517 static void
1518 dc_reset(struct dc_softc *sc)
1519 {
1520 	int i;
1521 
1522 	DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET);
1523 
1524 	for (i = 0; i < DC_TIMEOUT; i++) {
1525 		DELAY(10);
1526 		if (!(CSR_READ_4(sc, DC_BUSCTL) & DC_BUSCTL_RESET))
1527 			break;
1528 	}
1529 
1530 	if (DC_IS_ASIX(sc) || DC_IS_ADMTEK(sc) || DC_IS_CONEXANT(sc) ||
1531 	    DC_IS_XIRCOM(sc) || DC_IS_INTEL(sc) || DC_IS_ULI(sc)) {
1532 		DELAY(10000);
1533 		DC_CLRBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET);
1534 		i = 0;
1535 	}
1536 
1537 	if (i == DC_TIMEOUT)
1538 		device_printf(sc->dc_dev, "reset never completed!\n");
1539 
1540 	/* Wait a little while for the chip to get its brains in order. */
1541 	DELAY(1000);
1542 
1543 	CSR_WRITE_4(sc, DC_IMR, 0x00000000);
1544 	CSR_WRITE_4(sc, DC_BUSCTL, 0x00000000);
1545 	CSR_WRITE_4(sc, DC_NETCFG, 0x00000000);
1546 
1547 	/*
1548 	 * Bring the SIA out of reset. In some cases, it looks
1549 	 * like failing to unreset the SIA soon enough gets it
1550 	 * into a state where it will never come out of reset
1551 	 * until we reset the whole chip again.
1552 	 */
1553 	if (DC_IS_INTEL(sc)) {
1554 		DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET);
1555 		CSR_WRITE_4(sc, DC_10BTCTRL, 0xFFFFFFFF);
1556 		CSR_WRITE_4(sc, DC_WATCHDOG, 0);
1557 	}
1558 }
1559 
1560 static const struct dc_type *
1561 dc_devtype(device_t dev)
1562 {
1563 	const struct dc_type *t;
1564 	uint32_t devid;
1565 	uint8_t rev;
1566 
1567 	t = dc_devs;
1568 	devid = pci_get_devid(dev);
1569 	rev = pci_get_revid(dev);
1570 
1571 	while (t->dc_name != NULL) {
1572 		if (devid == t->dc_devid && rev >= t->dc_minrev)
1573 			return (t);
1574 		t++;
1575 	}
1576 
1577 	return (NULL);
1578 }
1579 
1580 /*
1581  * Probe for a 21143 or clone chip. Check the PCI vendor and device
1582  * IDs against our list and return a device name if we find a match.
1583  * We do a little bit of extra work to identify the exact type of
1584  * chip. The MX98713 and MX98713A have the same PCI vendor/device ID,
1585  * but different revision IDs. The same is true for 98715/98715A
1586  * chips and the 98725, as well as the ASIX and ADMtek chips. In some
1587  * cases, the exact chip revision affects driver behavior.
1588  */
1589 static int
1590 dc_probe(device_t dev)
1591 {
1592 	const struct dc_type *t;
1593 
1594 	t = dc_devtype(dev);
1595 
1596 	if (t != NULL) {
1597 		device_set_desc(dev, t->dc_name);
1598 		return (BUS_PROBE_DEFAULT);
1599 	}
1600 
1601 	return (ENXIO);
1602 }
1603 
1604 static void
1605 dc_apply_fixup(struct dc_softc *sc, int media)
1606 {
1607 	struct dc_mediainfo *m;
1608 	uint8_t *p;
1609 	int i;
1610 	uint32_t reg;
1611 
1612 	m = sc->dc_mi;
1613 
1614 	while (m != NULL) {
1615 		if (m->dc_media == media)
1616 			break;
1617 		m = m->dc_next;
1618 	}
1619 
1620 	if (m == NULL)
1621 		return;
1622 
1623 	for (i = 0, p = m->dc_reset_ptr; i < m->dc_reset_len; i++, p += 2) {
1624 		reg = (p[0] | (p[1] << 8)) << 16;
1625 		CSR_WRITE_4(sc, DC_WATCHDOG, reg);
1626 	}
1627 
1628 	for (i = 0, p = m->dc_gp_ptr; i < m->dc_gp_len; i++, p += 2) {
1629 		reg = (p[0] | (p[1] << 8)) << 16;
1630 		CSR_WRITE_4(sc, DC_WATCHDOG, reg);
1631 	}
1632 }
1633 
1634 static int
1635 dc_decode_leaf_sia(struct dc_softc *sc, struct dc_eblock_sia *l)
1636 {
1637 	struct dc_mediainfo *m;
1638 
1639 	m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT | M_ZERO);
1640 	if (m == NULL) {
1641 		device_printf(sc->dc_dev, "Could not allocate mediainfo\n");
1642 		return (ENOMEM);
1643 	}
1644 	switch (l->dc_sia_code & ~DC_SIA_CODE_EXT) {
1645 	case DC_SIA_CODE_10BT:
1646 		m->dc_media = IFM_10_T;
1647 		break;
1648 	case DC_SIA_CODE_10BT_FDX:
1649 		m->dc_media = IFM_10_T | IFM_FDX;
1650 		break;
1651 	case DC_SIA_CODE_10B2:
1652 		m->dc_media = IFM_10_2;
1653 		break;
1654 	case DC_SIA_CODE_10B5:
1655 		m->dc_media = IFM_10_5;
1656 		break;
1657 	default:
1658 		break;
1659 	}
1660 
1661 	/*
1662 	 * We need to ignore CSR13, CSR14, CSR15 for SIA mode.
1663 	 * Things apparently already work for cards that do
1664 	 * supply Media Specific Data.
1665 	 */
1666 	if (l->dc_sia_code & DC_SIA_CODE_EXT) {
1667 		m->dc_gp_len = 2;
1668 		m->dc_gp_ptr =
1669 		(uint8_t *)&l->dc_un.dc_sia_ext.dc_sia_gpio_ctl;
1670 	} else {
1671 		m->dc_gp_len = 2;
1672 		m->dc_gp_ptr =
1673 		(uint8_t *)&l->dc_un.dc_sia_noext.dc_sia_gpio_ctl;
1674 	}
1675 
1676 	m->dc_next = sc->dc_mi;
1677 	sc->dc_mi = m;
1678 
1679 	sc->dc_pmode = DC_PMODE_SIA;
1680 	return (0);
1681 }
1682 
1683 static int
1684 dc_decode_leaf_sym(struct dc_softc *sc, struct dc_eblock_sym *l)
1685 {
1686 	struct dc_mediainfo *m;
1687 
1688 	m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT | M_ZERO);
1689 	if (m == NULL) {
1690 		device_printf(sc->dc_dev, "Could not allocate mediainfo\n");
1691 		return (ENOMEM);
1692 	}
1693 	if (l->dc_sym_code == DC_SYM_CODE_100BT)
1694 		m->dc_media = IFM_100_TX;
1695 
1696 	if (l->dc_sym_code == DC_SYM_CODE_100BT_FDX)
1697 		m->dc_media = IFM_100_TX | IFM_FDX;
1698 
1699 	m->dc_gp_len = 2;
1700 	m->dc_gp_ptr = (uint8_t *)&l->dc_sym_gpio_ctl;
1701 
1702 	m->dc_next = sc->dc_mi;
1703 	sc->dc_mi = m;
1704 
1705 	sc->dc_pmode = DC_PMODE_SYM;
1706 	return (0);
1707 }
1708 
1709 static int
1710 dc_decode_leaf_mii(struct dc_softc *sc, struct dc_eblock_mii *l)
1711 {
1712 	struct dc_mediainfo *m;
1713 	uint8_t *p;
1714 
1715 	m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT | M_ZERO);
1716 	if (m == NULL) {
1717 		device_printf(sc->dc_dev, "Could not allocate mediainfo\n");
1718 		return (ENOMEM);
1719 	}
1720 	/* We abuse IFM_AUTO to represent MII. */
1721 	m->dc_media = IFM_AUTO;
1722 	m->dc_gp_len = l->dc_gpr_len;
1723 
1724 	p = (uint8_t *)l;
1725 	p += sizeof(struct dc_eblock_mii);
1726 	m->dc_gp_ptr = p;
1727 	p += 2 * l->dc_gpr_len;
1728 	m->dc_reset_len = *p;
1729 	p++;
1730 	m->dc_reset_ptr = p;
1731 
1732 	m->dc_next = sc->dc_mi;
1733 	sc->dc_mi = m;
1734 	return (0);
1735 }
1736 
1737 static int
1738 dc_read_srom(struct dc_softc *sc, int bits)
1739 {
1740 	int size;
1741 
1742 	size = DC_ROM_SIZE(bits);
1743 	sc->dc_srom = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
1744 	if (sc->dc_srom == NULL) {
1745 		device_printf(sc->dc_dev, "Could not allocate SROM buffer\n");
1746 		return (ENOMEM);
1747 	}
1748 	dc_read_eeprom(sc, (caddr_t)sc->dc_srom, 0, (size / 2), 0);
1749 	return (0);
1750 }
1751 
1752 static int
1753 dc_parse_21143_srom(struct dc_softc *sc)
1754 {
1755 	struct dc_leaf_hdr *lhdr;
1756 	struct dc_eblock_hdr *hdr;
1757 	int error, have_mii, i, loff;
1758 	char *ptr;
1759 
1760 	have_mii = 0;
1761 	loff = sc->dc_srom[27];
1762 	lhdr = (struct dc_leaf_hdr *)&(sc->dc_srom[loff]);
1763 
1764 	ptr = (char *)lhdr;
1765 	ptr += sizeof(struct dc_leaf_hdr) - 1;
1766 	/*
1767 	 * Look if we got a MII media block.
1768 	 */
1769 	for (i = 0; i < lhdr->dc_mcnt; i++) {
1770 		hdr = (struct dc_eblock_hdr *)ptr;
1771 		if (hdr->dc_type == DC_EBLOCK_MII)
1772 		    have_mii++;
1773 
1774 		ptr += (hdr->dc_len & 0x7F);
1775 		ptr++;
1776 	}
1777 
1778 	/*
1779 	 * Do the same thing again. Only use SIA and SYM media
1780 	 * blocks if no MII media block is available.
1781 	 */
1782 	ptr = (char *)lhdr;
1783 	ptr += sizeof(struct dc_leaf_hdr) - 1;
1784 	error = 0;
1785 	for (i = 0; i < lhdr->dc_mcnt; i++) {
1786 		hdr = (struct dc_eblock_hdr *)ptr;
1787 		switch (hdr->dc_type) {
1788 		case DC_EBLOCK_MII:
1789 			error = dc_decode_leaf_mii(sc, (struct dc_eblock_mii *)hdr);
1790 			break;
1791 		case DC_EBLOCK_SIA:
1792 			if (! have_mii)
1793 				error = dc_decode_leaf_sia(sc,
1794 				    (struct dc_eblock_sia *)hdr);
1795 			break;
1796 		case DC_EBLOCK_SYM:
1797 			if (! have_mii)
1798 				error = dc_decode_leaf_sym(sc,
1799 				    (struct dc_eblock_sym *)hdr);
1800 			break;
1801 		default:
1802 			/* Don't care. Yet. */
1803 			break;
1804 		}
1805 		ptr += (hdr->dc_len & 0x7F);
1806 		ptr++;
1807 	}
1808 	return (error);
1809 }
1810 
1811 static void
1812 dc_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1813 {
1814 	bus_addr_t *paddr;
1815 
1816 	KASSERT(nseg == 1,
1817 	    ("%s: wrong number of segments (%d)", __func__, nseg));
1818 	paddr = arg;
1819 	*paddr = segs->ds_addr;
1820 }
1821 
1822 static int
1823 dc_dma_alloc(struct dc_softc *sc)
1824 {
1825 	int error, i;
1826 
1827 	error = bus_dma_tag_create(bus_get_dma_tag(sc->dc_dev), 1, 0,
1828 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1829 	    BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0,
1830 	    NULL, NULL, &sc->dc_ptag);
1831 	if (error) {
1832 		device_printf(sc->dc_dev,
1833 		    "failed to allocate parent DMA tag\n");
1834 		goto fail;
1835 	}
1836 
1837 	/* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
1838 	error = bus_dma_tag_create(sc->dc_ptag, DC_LIST_ALIGN, 0,
1839 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, DC_RX_LIST_SZ, 1,
1840 	    DC_RX_LIST_SZ, 0, NULL, NULL, &sc->dc_rx_ltag);
1841 	if (error) {
1842 		device_printf(sc->dc_dev, "failed to create RX list DMA tag\n");
1843 		goto fail;
1844 	}
1845 
1846 	error = bus_dma_tag_create(sc->dc_ptag, DC_LIST_ALIGN, 0,
1847 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, DC_TX_LIST_SZ, 1,
1848 	    DC_TX_LIST_SZ, 0, NULL, NULL, &sc->dc_tx_ltag);
1849 	if (error) {
1850 		device_printf(sc->dc_dev, "failed to create TX list DMA tag\n");
1851 		goto fail;
1852 	}
1853 
1854 	/* RX descriptor list. */
1855 	error = bus_dmamem_alloc(sc->dc_rx_ltag,
1856 	    (void **)&sc->dc_ldata.dc_rx_list, BUS_DMA_NOWAIT |
1857 	    BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->dc_rx_lmap);
1858 	if (error) {
1859 		device_printf(sc->dc_dev,
1860 		    "failed to allocate DMA'able memory for RX list\n");
1861 		goto fail;
1862 	}
1863 	error = bus_dmamap_load(sc->dc_rx_ltag, sc->dc_rx_lmap,
1864 	    sc->dc_ldata.dc_rx_list, DC_RX_LIST_SZ, dc_dma_map_addr,
1865 	    &sc->dc_ldata.dc_rx_list_paddr, BUS_DMA_NOWAIT);
1866 	if (error) {
1867 		device_printf(sc->dc_dev,
1868 		    "failed to load DMA'able memory for RX list\n");
1869 		goto fail;
1870 	}
1871 	/* TX descriptor list. */
1872 	error = bus_dmamem_alloc(sc->dc_tx_ltag,
1873 	    (void **)&sc->dc_ldata.dc_tx_list, BUS_DMA_NOWAIT |
1874 	    BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->dc_tx_lmap);
1875 	if (error) {
1876 		device_printf(sc->dc_dev,
1877 		    "failed to allocate DMA'able memory for TX list\n");
1878 		goto fail;
1879 	}
1880 	error = bus_dmamap_load(sc->dc_tx_ltag, sc->dc_tx_lmap,
1881 	    sc->dc_ldata.dc_tx_list, DC_TX_LIST_SZ, dc_dma_map_addr,
1882 	    &sc->dc_ldata.dc_tx_list_paddr, BUS_DMA_NOWAIT);
1883 	if (error) {
1884 		device_printf(sc->dc_dev,
1885 		    "cannot load DMA'able memory for TX list\n");
1886 		goto fail;
1887 	}
1888 
1889 	/*
1890 	 * Allocate a busdma tag and DMA safe memory for the multicast
1891 	 * setup frame.
1892 	 */
1893 	error = bus_dma_tag_create(sc->dc_ptag, DC_LIST_ALIGN, 0,
1894 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1895 	    DC_SFRAME_LEN + DC_MIN_FRAMELEN, 1, DC_SFRAME_LEN + DC_MIN_FRAMELEN,
1896 	    0, NULL, NULL, &sc->dc_stag);
1897 	if (error) {
1898 		device_printf(sc->dc_dev,
1899 		    "failed to create DMA tag for setup frame\n");
1900 		goto fail;
1901 	}
1902 	error = bus_dmamem_alloc(sc->dc_stag, (void **)&sc->dc_cdata.dc_sbuf,
1903 	    BUS_DMA_NOWAIT, &sc->dc_smap);
1904 	if (error) {
1905 		device_printf(sc->dc_dev,
1906 		    "failed to allocate DMA'able memory for setup frame\n");
1907 		goto fail;
1908 	}
1909 	error = bus_dmamap_load(sc->dc_stag, sc->dc_smap, sc->dc_cdata.dc_sbuf,
1910 	    DC_SFRAME_LEN, dc_dma_map_addr, &sc->dc_saddr, BUS_DMA_NOWAIT);
1911 	if (error) {
1912 		device_printf(sc->dc_dev,
1913 		    "cannot load DMA'able memory for setup frame\n");
1914 		goto fail;
1915 	}
1916 
1917 	/* Allocate a busdma tag for RX mbufs. */
1918 	error = bus_dma_tag_create(sc->dc_ptag, DC_RXBUF_ALIGN, 0,
1919 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1920 	    MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &sc->dc_rx_mtag);
1921 	if (error) {
1922 		device_printf(sc->dc_dev, "failed to create RX mbuf tag\n");
1923 		goto fail;
1924 	}
1925 
1926 	/* Allocate a busdma tag for TX mbufs. */
1927 	error = bus_dma_tag_create(sc->dc_ptag, 1, 0,
1928 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1929 	    MCLBYTES * DC_MAXFRAGS, DC_MAXFRAGS, MCLBYTES,
1930 	    0, NULL, NULL, &sc->dc_tx_mtag);
1931 	if (error) {
1932 		device_printf(sc->dc_dev, "failed to create TX mbuf tag\n");
1933 		goto fail;
1934 	}
1935 
1936 	/* Create the TX/RX busdma maps. */
1937 	for (i = 0; i < DC_TX_LIST_CNT; i++) {
1938 		error = bus_dmamap_create(sc->dc_tx_mtag, 0,
1939 		    &sc->dc_cdata.dc_tx_map[i]);
1940 		if (error) {
1941 			device_printf(sc->dc_dev,
1942 			    "failed to create TX mbuf dmamap\n");
1943 			goto fail;
1944 		}
1945 	}
1946 	for (i = 0; i < DC_RX_LIST_CNT; i++) {
1947 		error = bus_dmamap_create(sc->dc_rx_mtag, 0,
1948 		    &sc->dc_cdata.dc_rx_map[i]);
1949 		if (error) {
1950 			device_printf(sc->dc_dev,
1951 			    "failed to create RX mbuf dmamap\n");
1952 			goto fail;
1953 		}
1954 	}
1955 	error = bus_dmamap_create(sc->dc_rx_mtag, 0, &sc->dc_sparemap);
1956 	if (error) {
1957 		device_printf(sc->dc_dev,
1958 		    "failed to create spare RX mbuf dmamap\n");
1959 		goto fail;
1960 	}
1961 
1962 fail:
1963 	return (error);
1964 }
1965 
1966 static void
1967 dc_dma_free(struct dc_softc *sc)
1968 {
1969 	int i;
1970 
1971 	/* RX buffers. */
1972 	if (sc->dc_rx_mtag != NULL) {
1973 		for (i = 0; i < DC_RX_LIST_CNT; i++) {
1974 			if (sc->dc_cdata.dc_rx_map[i] != NULL)
1975 				bus_dmamap_destroy(sc->dc_rx_mtag,
1976 				    sc->dc_cdata.dc_rx_map[i]);
1977 		}
1978 		if (sc->dc_sparemap != NULL)
1979 			bus_dmamap_destroy(sc->dc_rx_mtag, sc->dc_sparemap);
1980 		bus_dma_tag_destroy(sc->dc_rx_mtag);
1981 	}
1982 
1983 	/* TX buffers. */
1984 	if (sc->dc_rx_mtag != NULL) {
1985 		for (i = 0; i < DC_TX_LIST_CNT; i++) {
1986 			if (sc->dc_cdata.dc_tx_map[i] != NULL)
1987 				bus_dmamap_destroy(sc->dc_tx_mtag,
1988 				    sc->dc_cdata.dc_tx_map[i]);
1989 		}
1990 		bus_dma_tag_destroy(sc->dc_tx_mtag);
1991 	}
1992 
1993 	/* RX descriptor list. */
1994 	if (sc->dc_rx_ltag) {
1995 		if (sc->dc_ldata.dc_rx_list_paddr != 0)
1996 			bus_dmamap_unload(sc->dc_rx_ltag, sc->dc_rx_lmap);
1997 		if (sc->dc_ldata.dc_rx_list != NULL)
1998 			bus_dmamem_free(sc->dc_rx_ltag, sc->dc_ldata.dc_rx_list,
1999 			    sc->dc_rx_lmap);
2000 		bus_dma_tag_destroy(sc->dc_rx_ltag);
2001 	}
2002 
2003 	/* TX descriptor list. */
2004 	if (sc->dc_tx_ltag) {
2005 		if (sc->dc_ldata.dc_tx_list_paddr != 0)
2006 			bus_dmamap_unload(sc->dc_tx_ltag, sc->dc_tx_lmap);
2007 		if (sc->dc_ldata.dc_tx_list != NULL)
2008 			bus_dmamem_free(sc->dc_tx_ltag, sc->dc_ldata.dc_tx_list,
2009 			    sc->dc_tx_lmap);
2010 		bus_dma_tag_destroy(sc->dc_tx_ltag);
2011 	}
2012 
2013 	/* multicast setup frame. */
2014 	if (sc->dc_stag) {
2015 		if (sc->dc_saddr != 0)
2016 			bus_dmamap_unload(sc->dc_stag, sc->dc_smap);
2017 		if (sc->dc_cdata.dc_sbuf != NULL)
2018 			bus_dmamem_free(sc->dc_stag, sc->dc_cdata.dc_sbuf,
2019 			    sc->dc_smap);
2020 		bus_dma_tag_destroy(sc->dc_stag);
2021 	}
2022 }
2023 
2024 /*
2025  * Attach the interface. Allocate softc structures, do ifmedia
2026  * setup and ethernet/BPF attach.
2027  */
2028 static int
2029 dc_attach(device_t dev)
2030 {
2031 	uint32_t eaddr[(ETHER_ADDR_LEN+3)/4];
2032 	uint32_t command;
2033 	struct dc_softc *sc;
2034 	struct ifnet *ifp;
2035 	struct dc_mediainfo *m;
2036 	uint32_t reg, revision;
2037 	uint16_t *srom;
2038 	int error, mac_offset, n, phy, rid, tmp;
2039 	uint8_t *mac;
2040 
2041 	sc = device_get_softc(dev);
2042 	sc->dc_dev = dev;
2043 
2044 	mtx_init(&sc->dc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
2045 	    MTX_DEF);
2046 
2047 	/*
2048 	 * Map control/status registers.
2049 	 */
2050 	pci_enable_busmaster(dev);
2051 
2052 	rid = DC_RID;
2053 	sc->dc_res = bus_alloc_resource_any(dev, DC_RES, &rid, RF_ACTIVE);
2054 
2055 	if (sc->dc_res == NULL) {
2056 		device_printf(dev, "couldn't map ports/memory\n");
2057 		error = ENXIO;
2058 		goto fail;
2059 	}
2060 
2061 	sc->dc_btag = rman_get_bustag(sc->dc_res);
2062 	sc->dc_bhandle = rman_get_bushandle(sc->dc_res);
2063 
2064 	/* Allocate interrupt. */
2065 	rid = 0;
2066 	sc->dc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2067 	    RF_SHAREABLE | RF_ACTIVE);
2068 
2069 	if (sc->dc_irq == NULL) {
2070 		device_printf(dev, "couldn't map interrupt\n");
2071 		error = ENXIO;
2072 		goto fail;
2073 	}
2074 
2075 	/* Need this info to decide on a chip type. */
2076 	sc->dc_info = dc_devtype(dev);
2077 	revision = pci_get_revid(dev);
2078 
2079 	error = 0;
2080 	/* Get the eeprom width, but PNIC and XIRCOM have diff eeprom */
2081 	if (sc->dc_info->dc_devid !=
2082 	    DC_DEVID(DC_VENDORID_LO, DC_DEVICEID_82C168) &&
2083 	    sc->dc_info->dc_devid !=
2084 	    DC_DEVID(DC_VENDORID_XIRCOM, DC_DEVICEID_X3201))
2085 		dc_eeprom_width(sc);
2086 
2087 	switch (sc->dc_info->dc_devid) {
2088 	case DC_DEVID(DC_VENDORID_DEC, DC_DEVICEID_21143):
2089 		sc->dc_type = DC_TYPE_21143;
2090 		sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR;
2091 		sc->dc_flags |= DC_REDUCED_MII_POLL;
2092 		/* Save EEPROM contents so we can parse them later. */
2093 		error = dc_read_srom(sc, sc->dc_romwidth);
2094 		if (error != 0)
2095 			goto fail;
2096 		break;
2097 	case DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9009):
2098 	case DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9100):
2099 	case DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9102):
2100 		sc->dc_type = DC_TYPE_DM9102;
2101 		sc->dc_flags |= DC_TX_COALESCE | DC_TX_INTR_ALWAYS;
2102 		sc->dc_flags |= DC_REDUCED_MII_POLL | DC_TX_STORENFWD;
2103 		sc->dc_flags |= DC_TX_ALIGN;
2104 		sc->dc_pmode = DC_PMODE_MII;
2105 
2106 		/* Increase the latency timer value. */
2107 		pci_write_config(dev, PCIR_LATTIMER, 0x80, 1);
2108 		break;
2109 	case DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_AL981):
2110 		sc->dc_type = DC_TYPE_AL981;
2111 		sc->dc_flags |= DC_TX_USE_TX_INTR;
2112 		sc->dc_flags |= DC_TX_ADMTEK_WAR;
2113 		sc->dc_pmode = DC_PMODE_MII;
2114 		error = dc_read_srom(sc, sc->dc_romwidth);
2115 		if (error != 0)
2116 			goto fail;
2117 		break;
2118 	case DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_AN983):
2119 	case DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_AN985):
2120 	case DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_ADM9511):
2121 	case DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_ADM9513):
2122 	case DC_DEVID(DC_VENDORID_DLINK, DC_DEVICEID_DRP32TXD):
2123 	case DC_DEVID(DC_VENDORID_ABOCOM, DC_DEVICEID_FE2500):
2124 	case DC_DEVID(DC_VENDORID_ABOCOM, DC_DEVICEID_FE2500MX):
2125 	case DC_DEVID(DC_VENDORID_ACCTON, DC_DEVICEID_EN2242):
2126 	case DC_DEVID(DC_VENDORID_HAWKING, DC_DEVICEID_HAWKING_PN672TX):
2127 	case DC_DEVID(DC_VENDORID_PLANEX, DC_DEVICEID_FNW3602T):
2128 	case DC_DEVID(DC_VENDORID_3COM, DC_DEVICEID_3CSOHOB):
2129 	case DC_DEVID(DC_VENDORID_MICROSOFT, DC_DEVICEID_MSMN120):
2130 	case DC_DEVID(DC_VENDORID_MICROSOFT, DC_DEVICEID_MSMN130):
2131 	case DC_DEVID(DC_VENDORID_LINKSYS, DC_DEVICEID_PCMPC200_AB08):
2132 	case DC_DEVID(DC_VENDORID_LINKSYS, DC_DEVICEID_PCMPC200_AB09):
2133 		sc->dc_type = DC_TYPE_AN983;
2134 		sc->dc_flags |= DC_64BIT_HASH;
2135 		sc->dc_flags |= DC_TX_USE_TX_INTR;
2136 		sc->dc_flags |= DC_TX_ADMTEK_WAR;
2137 		sc->dc_pmode = DC_PMODE_MII;
2138 		/* Don't read SROM for - auto-loaded on reset */
2139 		break;
2140 	case DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_98713):
2141 	case DC_DEVID(DC_VENDORID_CP, DC_DEVICEID_98713_CP):
2142 		if (revision < DC_REVISION_98713A) {
2143 			sc->dc_type = DC_TYPE_98713;
2144 		}
2145 		if (revision >= DC_REVISION_98713A) {
2146 			sc->dc_type = DC_TYPE_98713A;
2147 			sc->dc_flags |= DC_21143_NWAY;
2148 		}
2149 		sc->dc_flags |= DC_REDUCED_MII_POLL;
2150 		sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR;
2151 		break;
2152 	case DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_987x5):
2153 	case DC_DEVID(DC_VENDORID_ACCTON, DC_DEVICEID_EN1217):
2154 		/*
2155 		 * Macronix MX98715AEC-C/D/E parts have only a
2156 		 * 128-bit hash table. We need to deal with these
2157 		 * in the same manner as the PNIC II so that we
2158 		 * get the right number of bits out of the
2159 		 * CRC routine.
2160 		 */
2161 		if (revision >= DC_REVISION_98715AEC_C &&
2162 		    revision < DC_REVISION_98725)
2163 			sc->dc_flags |= DC_128BIT_HASH;
2164 		sc->dc_type = DC_TYPE_987x5;
2165 		sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR;
2166 		sc->dc_flags |= DC_REDUCED_MII_POLL | DC_21143_NWAY;
2167 		break;
2168 	case DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_98727):
2169 		sc->dc_type = DC_TYPE_987x5;
2170 		sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR;
2171 		sc->dc_flags |= DC_REDUCED_MII_POLL | DC_21143_NWAY;
2172 		break;
2173 	case DC_DEVID(DC_VENDORID_LO, DC_DEVICEID_82C115):
2174 		sc->dc_type = DC_TYPE_PNICII;
2175 		sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR | DC_128BIT_HASH;
2176 		sc->dc_flags |= DC_REDUCED_MII_POLL | DC_21143_NWAY;
2177 		break;
2178 	case DC_DEVID(DC_VENDORID_LO, DC_DEVICEID_82C168):
2179 		sc->dc_type = DC_TYPE_PNIC;
2180 		sc->dc_flags |= DC_TX_STORENFWD | DC_TX_INTR_ALWAYS;
2181 		sc->dc_flags |= DC_PNIC_RX_BUG_WAR;
2182 		sc->dc_pnic_rx_buf = malloc(DC_RXLEN * 5, M_DEVBUF, M_NOWAIT);
2183 		if (sc->dc_pnic_rx_buf == NULL) {
2184 			device_printf(sc->dc_dev,
2185 			    "Could not allocate PNIC RX buffer\n");
2186 			error = ENOMEM;
2187 			goto fail;
2188 		}
2189 		if (revision < DC_REVISION_82C169)
2190 			sc->dc_pmode = DC_PMODE_SYM;
2191 		break;
2192 	case DC_DEVID(DC_VENDORID_ASIX, DC_DEVICEID_AX88140A):
2193 		sc->dc_type = DC_TYPE_ASIX;
2194 		sc->dc_flags |= DC_TX_USE_TX_INTR | DC_TX_INTR_FIRSTFRAG;
2195 		sc->dc_flags |= DC_REDUCED_MII_POLL;
2196 		sc->dc_pmode = DC_PMODE_MII;
2197 		break;
2198 	case DC_DEVID(DC_VENDORID_XIRCOM, DC_DEVICEID_X3201):
2199 		sc->dc_type = DC_TYPE_XIRCOM;
2200 		sc->dc_flags |= DC_TX_INTR_ALWAYS | DC_TX_COALESCE |
2201 				DC_TX_ALIGN;
2202 		/*
2203 		 * We don't actually need to coalesce, but we're doing
2204 		 * it to obtain a double word aligned buffer.
2205 		 * The DC_TX_COALESCE flag is required.
2206 		 */
2207 		sc->dc_pmode = DC_PMODE_MII;
2208 		break;
2209 	case DC_DEVID(DC_VENDORID_CONEXANT, DC_DEVICEID_RS7112):
2210 		sc->dc_type = DC_TYPE_CONEXANT;
2211 		sc->dc_flags |= DC_TX_INTR_ALWAYS;
2212 		sc->dc_flags |= DC_REDUCED_MII_POLL;
2213 		sc->dc_pmode = DC_PMODE_MII;
2214 		error = dc_read_srom(sc, sc->dc_romwidth);
2215 		if (error != 0)
2216 			goto fail;
2217 		break;
2218 	case DC_DEVID(DC_VENDORID_ULI, DC_DEVICEID_M5261):
2219 	case DC_DEVID(DC_VENDORID_ULI, DC_DEVICEID_M5263):
2220 		if (sc->dc_info->dc_devid ==
2221 		    DC_DEVID(DC_VENDORID_ULI, DC_DEVICEID_M5261))
2222 			sc->dc_type = DC_TYPE_ULI_M5261;
2223 		else
2224 			sc->dc_type = DC_TYPE_ULI_M5263;
2225 		/* TX buffers should be aligned on 4 byte boundary. */
2226 		sc->dc_flags |= DC_TX_INTR_ALWAYS | DC_TX_COALESCE |
2227 		    DC_TX_ALIGN;
2228 		sc->dc_pmode = DC_PMODE_MII;
2229 		error = dc_read_srom(sc, sc->dc_romwidth);
2230 		if (error != 0)
2231 			goto fail;
2232 		break;
2233 	default:
2234 		device_printf(dev, "unknown device: %x\n",
2235 		    sc->dc_info->dc_devid);
2236 		break;
2237 	}
2238 
2239 	/* Save the cache line size. */
2240 	if (DC_IS_DAVICOM(sc))
2241 		sc->dc_cachesize = 0;
2242 	else
2243 		sc->dc_cachesize = pci_get_cachelnsz(dev);
2244 
2245 	/* Reset the adapter. */
2246 	dc_reset(sc);
2247 
2248 	/* Take 21143 out of snooze mode */
2249 	if (DC_IS_INTEL(sc) || DC_IS_XIRCOM(sc)) {
2250 		command = pci_read_config(dev, DC_PCI_CFDD, 4);
2251 		command &= ~(DC_CFDD_SNOOZE_MODE | DC_CFDD_SLEEP_MODE);
2252 		pci_write_config(dev, DC_PCI_CFDD, command, 4);
2253 	}
2254 
2255 	/*
2256 	 * Try to learn something about the supported media.
2257 	 * We know that ASIX and ADMtek and Davicom devices
2258 	 * will *always* be using MII media, so that's a no-brainer.
2259 	 * The tricky ones are the Macronix/PNIC II and the
2260 	 * Intel 21143.
2261 	 */
2262 	if (DC_IS_INTEL(sc)) {
2263 		error = dc_parse_21143_srom(sc);
2264 		if (error != 0)
2265 			goto fail;
2266 	} else if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) {
2267 		if (sc->dc_type == DC_TYPE_98713)
2268 			sc->dc_pmode = DC_PMODE_MII;
2269 		else
2270 			sc->dc_pmode = DC_PMODE_SYM;
2271 	} else if (!sc->dc_pmode)
2272 		sc->dc_pmode = DC_PMODE_MII;
2273 
2274 	/*
2275 	 * Get station address from the EEPROM.
2276 	 */
2277 	switch(sc->dc_type) {
2278 	case DC_TYPE_98713:
2279 	case DC_TYPE_98713A:
2280 	case DC_TYPE_987x5:
2281 	case DC_TYPE_PNICII:
2282 		dc_read_eeprom(sc, (caddr_t)&mac_offset,
2283 		    (DC_EE_NODEADDR_OFFSET / 2), 1, 0);
2284 		dc_read_eeprom(sc, (caddr_t)&eaddr, (mac_offset / 2), 3, 0);
2285 		break;
2286 	case DC_TYPE_PNIC:
2287 		dc_read_eeprom(sc, (caddr_t)&eaddr, 0, 3, 1);
2288 		break;
2289 	case DC_TYPE_DM9102:
2290 		dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0);
2291 		break;
2292 	case DC_TYPE_21143:
2293 	case DC_TYPE_ASIX:
2294 		dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0);
2295 		break;
2296 	case DC_TYPE_AL981:
2297 	case DC_TYPE_AN983:
2298 		reg = CSR_READ_4(sc, DC_AL_PAR0);
2299 		mac = (uint8_t *)&eaddr[0];
2300 		mac[0] = (reg >> 0) & 0xff;
2301 		mac[1] = (reg >> 8) & 0xff;
2302 		mac[2] = (reg >> 16) & 0xff;
2303 		mac[3] = (reg >> 24) & 0xff;
2304 		reg = CSR_READ_4(sc, DC_AL_PAR1);
2305 		mac[4] = (reg >> 0) & 0xff;
2306 		mac[5] = (reg >> 8) & 0xff;
2307 		break;
2308 	case DC_TYPE_CONEXANT:
2309 		bcopy(sc->dc_srom + DC_CONEXANT_EE_NODEADDR, &eaddr,
2310 		    ETHER_ADDR_LEN);
2311 		break;
2312 	case DC_TYPE_XIRCOM:
2313 		/* The MAC comes from the CIS. */
2314 		mac = pci_get_ether(dev);
2315 		if (!mac) {
2316 			device_printf(dev, "No station address in CIS!\n");
2317 			error = ENXIO;
2318 			goto fail;
2319 		}
2320 		bcopy(mac, eaddr, ETHER_ADDR_LEN);
2321 		break;
2322 	case DC_TYPE_ULI_M5261:
2323 	case DC_TYPE_ULI_M5263:
2324 		srom = (uint16_t *)sc->dc_srom;
2325 		if (srom == NULL || *srom == 0xFFFF || *srom == 0) {
2326 			/*
2327 			 * No valid SROM present, read station address
2328 			 * from ID Table.
2329 			 */
2330 			device_printf(dev,
2331 			    "Reading station address from ID Table.\n");
2332 			CSR_WRITE_4(sc, DC_BUSCTL, 0x10000);
2333 			CSR_WRITE_4(sc, DC_SIARESET, 0x01C0);
2334 			CSR_WRITE_4(sc, DC_10BTCTRL, 0x0000);
2335 			CSR_WRITE_4(sc, DC_10BTCTRL, 0x0010);
2336 			CSR_WRITE_4(sc, DC_10BTCTRL, 0x0000);
2337 			CSR_WRITE_4(sc, DC_SIARESET, 0x0000);
2338 			CSR_WRITE_4(sc, DC_SIARESET, 0x01B0);
2339 			mac = (uint8_t *)eaddr;
2340 			for (n = 0; n < ETHER_ADDR_LEN; n++)
2341 				mac[n] = (uint8_t)CSR_READ_4(sc, DC_10BTCTRL);
2342 			CSR_WRITE_4(sc, DC_SIARESET, 0x0000);
2343 			CSR_WRITE_4(sc, DC_BUSCTL, 0x0000);
2344 			DELAY(10);
2345 		} else
2346 			dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3,
2347 			    0);
2348 		break;
2349 	default:
2350 		dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0);
2351 		break;
2352 	}
2353 
2354 	bcopy(eaddr, sc->dc_eaddr, sizeof(eaddr));
2355 	/*
2356 	 * If we still have invalid station address, see whether we can
2357 	 * find station address for chip 0.  Some multi-port controllers
2358 	 * just store station address for chip 0 if they have a shared
2359 	 * SROM.
2360 	 */
2361 	if ((sc->dc_eaddr[0] == 0 && (sc->dc_eaddr[1] & ~0xffff) == 0) ||
2362 	    (sc->dc_eaddr[0] == 0xffffffff &&
2363 	    (sc->dc_eaddr[1] & 0xffff) == 0xffff)) {
2364 		error = dc_check_multiport(sc);
2365 		if (error == 0) {
2366 			bcopy(sc->dc_eaddr, eaddr, sizeof(eaddr));
2367 			/* Extract media information. */
2368 			if (DC_IS_INTEL(sc) && sc->dc_srom != NULL) {
2369 				while (sc->dc_mi != NULL) {
2370 					m = sc->dc_mi->dc_next;
2371 					free(sc->dc_mi, M_DEVBUF);
2372 					sc->dc_mi = m;
2373 				}
2374 				error = dc_parse_21143_srom(sc);
2375 				if (error != 0)
2376 					goto fail;
2377 			}
2378 		} else if (error == ENOMEM)
2379 			goto fail;
2380 		else
2381 			error = 0;
2382 	}
2383 
2384 	if ((error = dc_dma_alloc(sc)) != 0)
2385 		goto fail;
2386 
2387 	ifp = sc->dc_ifp = if_alloc(IFT_ETHER);
2388 	if (ifp == NULL) {
2389 		device_printf(dev, "can not if_alloc()\n");
2390 		error = ENOSPC;
2391 		goto fail;
2392 	}
2393 	ifp->if_softc = sc;
2394 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2395 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2396 	ifp->if_ioctl = dc_ioctl;
2397 	ifp->if_start = dc_start;
2398 	ifp->if_init = dc_init;
2399 	IFQ_SET_MAXLEN(&ifp->if_snd, DC_TX_LIST_CNT - 1);
2400 	ifp->if_snd.ifq_drv_maxlen = DC_TX_LIST_CNT - 1;
2401 	IFQ_SET_READY(&ifp->if_snd);
2402 
2403 	/*
2404 	 * Do MII setup. If this is a 21143, check for a PHY on the
2405 	 * MII bus after applying any necessary fixups to twiddle the
2406 	 * GPIO bits. If we don't end up finding a PHY, restore the
2407 	 * old selection (SIA only or SIA/SYM) and attach the dcphy
2408 	 * driver instead.
2409 	 */
2410 	tmp = 0;
2411 	if (DC_IS_INTEL(sc)) {
2412 		dc_apply_fixup(sc, IFM_AUTO);
2413 		tmp = sc->dc_pmode;
2414 		sc->dc_pmode = DC_PMODE_MII;
2415 	}
2416 
2417 	/*
2418 	 * Setup General Purpose port mode and data so the tulip can talk
2419 	 * to the MII.  This needs to be done before mii_attach so that
2420 	 * we can actually see them.
2421 	 */
2422 	if (DC_IS_XIRCOM(sc)) {
2423 		CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN |
2424 		    DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
2425 		DELAY(10);
2426 		CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN |
2427 		    DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
2428 		DELAY(10);
2429 	}
2430 
2431 	phy = MII_PHY_ANY;
2432 	/*
2433 	 * Note: both the AL981 and AN983 have internal PHYs, however the
2434 	 * AL981 provides direct access to the PHY registers while the AN983
2435 	 * uses a serial MII interface. The AN983's MII interface is also
2436 	 * buggy in that you can read from any MII address (0 to 31), but
2437 	 * only address 1 behaves normally. To deal with both cases, we
2438 	 * pretend that the PHY is at MII address 1.
2439 	 */
2440 	if (DC_IS_ADMTEK(sc))
2441 		phy = DC_ADMTEK_PHYADDR;
2442 
2443 	/*
2444 	 * Note: the ukphy probes of the RS7112 report a PHY at MII address
2445 	 * 0 (possibly HomePNA?) and 1 (ethernet) so we only respond to the
2446 	 * correct one.
2447 	 */
2448 	if (DC_IS_CONEXANT(sc))
2449 		phy = DC_CONEXANT_PHYADDR;
2450 
2451 	error = mii_attach(dev, &sc->dc_miibus, ifp, dc_ifmedia_upd,
2452 	    dc_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
2453 
2454 	if (error && DC_IS_INTEL(sc)) {
2455 		sc->dc_pmode = tmp;
2456 		if (sc->dc_pmode != DC_PMODE_SIA)
2457 			sc->dc_pmode = DC_PMODE_SYM;
2458 		sc->dc_flags |= DC_21143_NWAY;
2459 		/*
2460 		 * For non-MII cards, we need to have the 21143
2461 		 * drive the LEDs. Except there are some systems
2462 		 * like the NEC VersaPro NoteBook PC which have no
2463 		 * LEDs, and twiddling these bits has adverse effects
2464 		 * on them. (I.e. you suddenly can't get a link.)
2465 		 */
2466 		if (!(pci_get_subvendor(dev) == 0x1033 &&
2467 		    pci_get_subdevice(dev) == 0x8028))
2468 			sc->dc_flags |= DC_TULIP_LEDS;
2469 		error = mii_attach(dev, &sc->dc_miibus, ifp, dc_ifmedia_upd,
2470 		    dc_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY,
2471 		    MII_OFFSET_ANY, 0);
2472 	}
2473 
2474 	if (error) {
2475 		device_printf(dev, "attaching PHYs failed\n");
2476 		goto fail;
2477 	}
2478 
2479 	if (DC_IS_ADMTEK(sc)) {
2480 		/*
2481 		 * Set automatic TX underrun recovery for the ADMtek chips
2482 		 */
2483 		DC_SETBIT(sc, DC_AL_CR, DC_AL_CR_ATUR);
2484 	}
2485 
2486 	/*
2487 	 * Tell the upper layer(s) we support long frames.
2488 	 */
2489 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2490 	ifp->if_capabilities |= IFCAP_VLAN_MTU;
2491 	ifp->if_capenable = ifp->if_capabilities;
2492 #ifdef DEVICE_POLLING
2493 	ifp->if_capabilities |= IFCAP_POLLING;
2494 #endif
2495 
2496 	callout_init_mtx(&sc->dc_stat_ch, &sc->dc_mtx, 0);
2497 	callout_init_mtx(&sc->dc_wdog_ch, &sc->dc_mtx, 0);
2498 
2499 	/*
2500 	 * Call MI attach routine.
2501 	 */
2502 	ether_ifattach(ifp, (caddr_t)eaddr);
2503 
2504 	/* Hook interrupt last to avoid having to lock softc */
2505 	error = bus_setup_intr(dev, sc->dc_irq, INTR_TYPE_NET | INTR_MPSAFE,
2506 	    NULL, dc_intr, sc, &sc->dc_intrhand);
2507 
2508 	if (error) {
2509 		device_printf(dev, "couldn't set up irq\n");
2510 		ether_ifdetach(ifp);
2511 		goto fail;
2512 	}
2513 
2514 fail:
2515 	if (error)
2516 		dc_detach(dev);
2517 	return (error);
2518 }
2519 
2520 /*
2521  * Shutdown hardware and free up resources. This can be called any
2522  * time after the mutex has been initialized. It is called in both
2523  * the error case in attach and the normal detach case so it needs
2524  * to be careful about only freeing resources that have actually been
2525  * allocated.
2526  */
2527 static int
2528 dc_detach(device_t dev)
2529 {
2530 	struct dc_softc *sc;
2531 	struct ifnet *ifp;
2532 	struct dc_mediainfo *m;
2533 
2534 	sc = device_get_softc(dev);
2535 	KASSERT(mtx_initialized(&sc->dc_mtx), ("dc mutex not initialized"));
2536 
2537 	ifp = sc->dc_ifp;
2538 
2539 #ifdef DEVICE_POLLING
2540 	if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING)
2541 		ether_poll_deregister(ifp);
2542 #endif
2543 
2544 	/* These should only be active if attach succeeded */
2545 	if (device_is_attached(dev)) {
2546 		DC_LOCK(sc);
2547 		dc_stop(sc);
2548 		DC_UNLOCK(sc);
2549 		callout_drain(&sc->dc_stat_ch);
2550 		callout_drain(&sc->dc_wdog_ch);
2551 		ether_ifdetach(ifp);
2552 	}
2553 	if (sc->dc_miibus)
2554 		device_delete_child(dev, sc->dc_miibus);
2555 	bus_generic_detach(dev);
2556 
2557 	if (sc->dc_intrhand)
2558 		bus_teardown_intr(dev, sc->dc_irq, sc->dc_intrhand);
2559 	if (sc->dc_irq)
2560 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->dc_irq);
2561 	if (sc->dc_res)
2562 		bus_release_resource(dev, DC_RES, DC_RID, sc->dc_res);
2563 
2564 	if (ifp != NULL)
2565 		if_free(ifp);
2566 
2567 	dc_dma_free(sc);
2568 
2569 	free(sc->dc_pnic_rx_buf, M_DEVBUF);
2570 
2571 	while (sc->dc_mi != NULL) {
2572 		m = sc->dc_mi->dc_next;
2573 		free(sc->dc_mi, M_DEVBUF);
2574 		sc->dc_mi = m;
2575 	}
2576 	free(sc->dc_srom, M_DEVBUF);
2577 
2578 	mtx_destroy(&sc->dc_mtx);
2579 
2580 	return (0);
2581 }
2582 
2583 /*
2584  * Initialize the transmit descriptors.
2585  */
2586 static int
2587 dc_list_tx_init(struct dc_softc *sc)
2588 {
2589 	struct dc_chain_data *cd;
2590 	struct dc_list_data *ld;
2591 	int i, nexti;
2592 
2593 	cd = &sc->dc_cdata;
2594 	ld = &sc->dc_ldata;
2595 	for (i = 0; i < DC_TX_LIST_CNT; i++) {
2596 		if (i == DC_TX_LIST_CNT - 1)
2597 			nexti = 0;
2598 		else
2599 			nexti = i + 1;
2600 		ld->dc_tx_list[i].dc_status = 0;
2601 		ld->dc_tx_list[i].dc_ctl = 0;
2602 		ld->dc_tx_list[i].dc_data = 0;
2603 		ld->dc_tx_list[i].dc_next = htole32(DC_TXDESC(sc, nexti));
2604 		cd->dc_tx_chain[i] = NULL;
2605 	}
2606 
2607 	cd->dc_tx_prod = cd->dc_tx_cons = cd->dc_tx_cnt = 0;
2608 	cd->dc_tx_pkts = 0;
2609 	bus_dmamap_sync(sc->dc_tx_ltag, sc->dc_tx_lmap,
2610 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2611 	return (0);
2612 }
2613 
2614 /*
2615  * Initialize the RX descriptors and allocate mbufs for them. Note that
2616  * we arrange the descriptors in a closed ring, so that the last descriptor
2617  * points back to the first.
2618  */
2619 static int
2620 dc_list_rx_init(struct dc_softc *sc)
2621 {
2622 	struct dc_chain_data *cd;
2623 	struct dc_list_data *ld;
2624 	int i, nexti;
2625 
2626 	cd = &sc->dc_cdata;
2627 	ld = &sc->dc_ldata;
2628 
2629 	for (i = 0; i < DC_RX_LIST_CNT; i++) {
2630 		if (dc_newbuf(sc, i) != 0)
2631 			return (ENOBUFS);
2632 		if (i == DC_RX_LIST_CNT - 1)
2633 			nexti = 0;
2634 		else
2635 			nexti = i + 1;
2636 		ld->dc_rx_list[i].dc_next = htole32(DC_RXDESC(sc, nexti));
2637 	}
2638 
2639 	cd->dc_rx_prod = 0;
2640 	bus_dmamap_sync(sc->dc_rx_ltag, sc->dc_rx_lmap,
2641 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2642 	return (0);
2643 }
2644 
2645 /*
2646  * Initialize an RX descriptor and attach an MBUF cluster.
2647  */
2648 static int
2649 dc_newbuf(struct dc_softc *sc, int i)
2650 {
2651 	struct mbuf *m;
2652 	bus_dmamap_t map;
2653 	bus_dma_segment_t segs[1];
2654 	int error, nseg;
2655 
2656 	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2657 	if (m == NULL)
2658 		return (ENOBUFS);
2659 	m->m_len = m->m_pkthdr.len = MCLBYTES;
2660 	m_adj(m, sizeof(u_int64_t));
2661 
2662 	/*
2663 	 * If this is a PNIC chip, zero the buffer. This is part
2664 	 * of the workaround for the receive bug in the 82c168 and
2665 	 * 82c169 chips.
2666 	 */
2667 	if (sc->dc_flags & DC_PNIC_RX_BUG_WAR)
2668 		bzero(mtod(m, char *), m->m_len);
2669 
2670 	error = bus_dmamap_load_mbuf_sg(sc->dc_rx_mtag, sc->dc_sparemap,
2671 	    m, segs, &nseg, 0);
2672 	if (error) {
2673 		m_freem(m);
2674 		return (error);
2675 	}
2676 	KASSERT(nseg == 1, ("%s: wrong number of segments (%d)", __func__,
2677 	    nseg));
2678 	if (sc->dc_cdata.dc_rx_chain[i] != NULL)
2679 		bus_dmamap_unload(sc->dc_rx_mtag, sc->dc_cdata.dc_rx_map[i]);
2680 
2681 	map = sc->dc_cdata.dc_rx_map[i];
2682 	sc->dc_cdata.dc_rx_map[i] = sc->dc_sparemap;
2683 	sc->dc_sparemap = map;
2684 	sc->dc_cdata.dc_rx_chain[i] = m;
2685 	bus_dmamap_sync(sc->dc_rx_mtag, sc->dc_cdata.dc_rx_map[i],
2686 	    BUS_DMASYNC_PREREAD);
2687 
2688 	sc->dc_ldata.dc_rx_list[i].dc_ctl = htole32(DC_RXCTL_RLINK | DC_RXLEN);
2689 	sc->dc_ldata.dc_rx_list[i].dc_data =
2690 	    htole32(DC_ADDR_LO(segs[0].ds_addr));
2691 	sc->dc_ldata.dc_rx_list[i].dc_status = htole32(DC_RXSTAT_OWN);
2692 	bus_dmamap_sync(sc->dc_rx_ltag, sc->dc_rx_lmap,
2693 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2694 	return (0);
2695 }
2696 
2697 /*
2698  * Grrrrr.
2699  * The PNIC chip has a terrible bug in it that manifests itself during
2700  * periods of heavy activity. The exact mode of failure if difficult to
2701  * pinpoint: sometimes it only happens in promiscuous mode, sometimes it
2702  * will happen on slow machines. The bug is that sometimes instead of
2703  * uploading one complete frame during reception, it uploads what looks
2704  * like the entire contents of its FIFO memory. The frame we want is at
2705  * the end of the whole mess, but we never know exactly how much data has
2706  * been uploaded, so salvaging the frame is hard.
2707  *
2708  * There is only one way to do it reliably, and it's disgusting.
2709  * Here's what we know:
2710  *
2711  * - We know there will always be somewhere between one and three extra
2712  *   descriptors uploaded.
2713  *
2714  * - We know the desired received frame will always be at the end of the
2715  *   total data upload.
2716  *
2717  * - We know the size of the desired received frame because it will be
2718  *   provided in the length field of the status word in the last descriptor.
2719  *
2720  * Here's what we do:
2721  *
2722  * - When we allocate buffers for the receive ring, we bzero() them.
2723  *   This means that we know that the buffer contents should be all
2724  *   zeros, except for data uploaded by the chip.
2725  *
2726  * - We also force the PNIC chip to upload frames that include the
2727  *   ethernet CRC at the end.
2728  *
2729  * - We gather all of the bogus frame data into a single buffer.
2730  *
2731  * - We then position a pointer at the end of this buffer and scan
2732  *   backwards until we encounter the first non-zero byte of data.
2733  *   This is the end of the received frame. We know we will encounter
2734  *   some data at the end of the frame because the CRC will always be
2735  *   there, so even if the sender transmits a packet of all zeros,
2736  *   we won't be fooled.
2737  *
2738  * - We know the size of the actual received frame, so we subtract
2739  *   that value from the current pointer location. This brings us
2740  *   to the start of the actual received packet.
2741  *
2742  * - We copy this into an mbuf and pass it on, along with the actual
2743  *   frame length.
2744  *
2745  * The performance hit is tremendous, but it beats dropping frames all
2746  * the time.
2747  */
2748 
2749 #define	DC_WHOLEFRAME	(DC_RXSTAT_FIRSTFRAG | DC_RXSTAT_LASTFRAG)
2750 static void
2751 dc_pnic_rx_bug_war(struct dc_softc *sc, int idx)
2752 {
2753 	struct dc_desc *cur_rx;
2754 	struct dc_desc *c = NULL;
2755 	struct mbuf *m = NULL;
2756 	unsigned char *ptr;
2757 	int i, total_len;
2758 	uint32_t rxstat = 0;
2759 
2760 	i = sc->dc_pnic_rx_bug_save;
2761 	cur_rx = &sc->dc_ldata.dc_rx_list[idx];
2762 	ptr = sc->dc_pnic_rx_buf;
2763 	bzero(ptr, DC_RXLEN * 5);
2764 
2765 	/* Copy all the bytes from the bogus buffers. */
2766 	while (1) {
2767 		c = &sc->dc_ldata.dc_rx_list[i];
2768 		rxstat = le32toh(c->dc_status);
2769 		m = sc->dc_cdata.dc_rx_chain[i];
2770 		bcopy(mtod(m, char *), ptr, DC_RXLEN);
2771 		ptr += DC_RXLEN;
2772 		/* If this is the last buffer, break out. */
2773 		if (i == idx || rxstat & DC_RXSTAT_LASTFRAG)
2774 			break;
2775 		dc_discard_rxbuf(sc, i);
2776 		DC_INC(i, DC_RX_LIST_CNT);
2777 	}
2778 
2779 	/* Find the length of the actual receive frame. */
2780 	total_len = DC_RXBYTES(rxstat);
2781 
2782 	/* Scan backwards until we hit a non-zero byte. */
2783 	while (*ptr == 0x00)
2784 		ptr--;
2785 
2786 	/* Round off. */
2787 	if ((uintptr_t)(ptr) & 0x3)
2788 		ptr -= 1;
2789 
2790 	/* Now find the start of the frame. */
2791 	ptr -= total_len;
2792 	if (ptr < sc->dc_pnic_rx_buf)
2793 		ptr = sc->dc_pnic_rx_buf;
2794 
2795 	/*
2796 	 * Now copy the salvaged frame to the last mbuf and fake up
2797 	 * the status word to make it look like a successful
2798 	 * frame reception.
2799 	 */
2800 	bcopy(ptr, mtod(m, char *), total_len);
2801 	cur_rx->dc_status = htole32(rxstat | DC_RXSTAT_FIRSTFRAG);
2802 }
2803 
2804 /*
2805  * This routine searches the RX ring for dirty descriptors in the
2806  * event that the rxeof routine falls out of sync with the chip's
2807  * current descriptor pointer. This may happen sometimes as a result
2808  * of a "no RX buffer available" condition that happens when the chip
2809  * consumes all of the RX buffers before the driver has a chance to
2810  * process the RX ring. This routine may need to be called more than
2811  * once to bring the driver back in sync with the chip, however we
2812  * should still be getting RX DONE interrupts to drive the search
2813  * for new packets in the RX ring, so we should catch up eventually.
2814  */
2815 static int
2816 dc_rx_resync(struct dc_softc *sc)
2817 {
2818 	struct dc_desc *cur_rx;
2819 	int i, pos;
2820 
2821 	pos = sc->dc_cdata.dc_rx_prod;
2822 
2823 	for (i = 0; i < DC_RX_LIST_CNT; i++) {
2824 		cur_rx = &sc->dc_ldata.dc_rx_list[pos];
2825 		if (!(le32toh(cur_rx->dc_status) & DC_RXSTAT_OWN))
2826 			break;
2827 		DC_INC(pos, DC_RX_LIST_CNT);
2828 	}
2829 
2830 	/* If the ring really is empty, then just return. */
2831 	if (i == DC_RX_LIST_CNT)
2832 		return (0);
2833 
2834 	/* We've fallen behing the chip: catch it. */
2835 	sc->dc_cdata.dc_rx_prod = pos;
2836 
2837 	return (EAGAIN);
2838 }
2839 
2840 static void
2841 dc_discard_rxbuf(struct dc_softc *sc, int i)
2842 {
2843 	struct mbuf *m;
2844 
2845 	if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) {
2846 		m = sc->dc_cdata.dc_rx_chain[i];
2847 		bzero(mtod(m, char *), m->m_len);
2848 	}
2849 
2850 	sc->dc_ldata.dc_rx_list[i].dc_ctl = htole32(DC_RXCTL_RLINK | DC_RXLEN);
2851 	sc->dc_ldata.dc_rx_list[i].dc_status = htole32(DC_RXSTAT_OWN);
2852 	bus_dmamap_sync(sc->dc_rx_ltag, sc->dc_rx_lmap, BUS_DMASYNC_PREREAD |
2853 	    BUS_DMASYNC_PREWRITE);
2854 }
2855 
2856 /*
2857  * A frame has been uploaded: pass the resulting mbuf chain up to
2858  * the higher level protocols.
2859  */
2860 static int
2861 dc_rxeof(struct dc_softc *sc)
2862 {
2863 	struct mbuf *m;
2864 	struct ifnet *ifp;
2865 	struct dc_desc *cur_rx;
2866 	int i, total_len, rx_npkts;
2867 	uint32_t rxstat;
2868 
2869 	DC_LOCK_ASSERT(sc);
2870 
2871 	ifp = sc->dc_ifp;
2872 	rx_npkts = 0;
2873 
2874 	bus_dmamap_sync(sc->dc_rx_ltag, sc->dc_rx_lmap, BUS_DMASYNC_POSTREAD |
2875 	    BUS_DMASYNC_POSTWRITE);
2876 	for (i = sc->dc_cdata.dc_rx_prod;
2877 	    (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0;
2878 	    DC_INC(i, DC_RX_LIST_CNT)) {
2879 #ifdef DEVICE_POLLING
2880 		if (ifp->if_capenable & IFCAP_POLLING) {
2881 			if (sc->rxcycles <= 0)
2882 				break;
2883 			sc->rxcycles--;
2884 		}
2885 #endif
2886 		cur_rx = &sc->dc_ldata.dc_rx_list[i];
2887 		rxstat = le32toh(cur_rx->dc_status);
2888 		if ((rxstat & DC_RXSTAT_OWN) != 0)
2889 			break;
2890 		m = sc->dc_cdata.dc_rx_chain[i];
2891 		bus_dmamap_sync(sc->dc_rx_mtag, sc->dc_cdata.dc_rx_map[i],
2892 		    BUS_DMASYNC_POSTREAD);
2893 		total_len = DC_RXBYTES(rxstat);
2894 		rx_npkts++;
2895 
2896 		if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) {
2897 			if ((rxstat & DC_WHOLEFRAME) != DC_WHOLEFRAME) {
2898 				if (rxstat & DC_RXSTAT_FIRSTFRAG)
2899 					sc->dc_pnic_rx_bug_save = i;
2900 				if ((rxstat & DC_RXSTAT_LASTFRAG) == 0)
2901 					continue;
2902 				dc_pnic_rx_bug_war(sc, i);
2903 				rxstat = le32toh(cur_rx->dc_status);
2904 				total_len = DC_RXBYTES(rxstat);
2905 			}
2906 		}
2907 
2908 		/*
2909 		 * If an error occurs, update stats, clear the
2910 		 * status word and leave the mbuf cluster in place:
2911 		 * it should simply get re-used next time this descriptor
2912 		 * comes up in the ring.  However, don't report long
2913 		 * frames as errors since they could be vlans.
2914 		 */
2915 		if ((rxstat & DC_RXSTAT_RXERR)) {
2916 			if (!(rxstat & DC_RXSTAT_GIANT) ||
2917 			    (rxstat & (DC_RXSTAT_CRCERR | DC_RXSTAT_DRIBBLE |
2918 				       DC_RXSTAT_MIIERE | DC_RXSTAT_COLLSEEN |
2919 				       DC_RXSTAT_RUNT   | DC_RXSTAT_DE))) {
2920 				if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2921 				if (rxstat & DC_RXSTAT_COLLSEEN)
2922 					if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
2923 				dc_discard_rxbuf(sc, i);
2924 				if (rxstat & DC_RXSTAT_CRCERR)
2925 					continue;
2926 				else {
2927 					ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2928 					dc_init_locked(sc);
2929 					return (rx_npkts);
2930 				}
2931 			}
2932 		}
2933 
2934 		/* No errors; receive the packet. */
2935 		total_len -= ETHER_CRC_LEN;
2936 #ifdef __NO_STRICT_ALIGNMENT
2937 		/*
2938 		 * On architectures without alignment problems we try to
2939 		 * allocate a new buffer for the receive ring, and pass up
2940 		 * the one where the packet is already, saving the expensive
2941 		 * copy done in m_devget().
2942 		 * If we are on an architecture with alignment problems, or
2943 		 * if the allocation fails, then use m_devget and leave the
2944 		 * existing buffer in the receive ring.
2945 		 */
2946 		if (dc_newbuf(sc, i) != 0) {
2947 			dc_discard_rxbuf(sc, i);
2948 			if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
2949 			continue;
2950 		}
2951 		m->m_pkthdr.rcvif = ifp;
2952 		m->m_pkthdr.len = m->m_len = total_len;
2953 #else
2954 		{
2955 			struct mbuf *m0;
2956 
2957 			m0 = m_devget(mtod(m, char *), total_len,
2958 				ETHER_ALIGN, ifp, NULL);
2959 			dc_discard_rxbuf(sc, i);
2960 			if (m0 == NULL) {
2961 				if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
2962 				continue;
2963 			}
2964 			m = m0;
2965 		}
2966 #endif
2967 
2968 		if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
2969 		DC_UNLOCK(sc);
2970 		(*ifp->if_input)(ifp, m);
2971 		DC_LOCK(sc);
2972 	}
2973 
2974 	sc->dc_cdata.dc_rx_prod = i;
2975 	return (rx_npkts);
2976 }
2977 
2978 /*
2979  * A frame was downloaded to the chip. It's safe for us to clean up
2980  * the list buffers.
2981  */
2982 static void
2983 dc_txeof(struct dc_softc *sc)
2984 {
2985 	struct dc_desc *cur_tx;
2986 	struct ifnet *ifp;
2987 	int idx, setup;
2988 	uint32_t ctl, txstat;
2989 
2990 	if (sc->dc_cdata.dc_tx_cnt == 0)
2991 		return;
2992 
2993 	ifp = sc->dc_ifp;
2994 
2995 	/*
2996 	 * Go through our tx list and free mbufs for those
2997 	 * frames that have been transmitted.
2998 	 */
2999 	bus_dmamap_sync(sc->dc_tx_ltag, sc->dc_tx_lmap, BUS_DMASYNC_POSTREAD |
3000 	    BUS_DMASYNC_POSTWRITE);
3001 	setup = 0;
3002 	for (idx = sc->dc_cdata.dc_tx_cons; idx != sc->dc_cdata.dc_tx_prod;
3003 	    DC_INC(idx, DC_TX_LIST_CNT), sc->dc_cdata.dc_tx_cnt--) {
3004 		cur_tx = &sc->dc_ldata.dc_tx_list[idx];
3005 		txstat = le32toh(cur_tx->dc_status);
3006 		ctl = le32toh(cur_tx->dc_ctl);
3007 
3008 		if (txstat & DC_TXSTAT_OWN)
3009 			break;
3010 
3011 		if (sc->dc_cdata.dc_tx_chain[idx] == NULL)
3012 			continue;
3013 
3014 		if (ctl & DC_TXCTL_SETUP) {
3015 			cur_tx->dc_ctl = htole32(ctl & ~DC_TXCTL_SETUP);
3016 			setup++;
3017 			bus_dmamap_sync(sc->dc_stag, sc->dc_smap,
3018 			    BUS_DMASYNC_POSTWRITE);
3019 			/*
3020 			 * Yes, the PNIC is so brain damaged
3021 			 * that it will sometimes generate a TX
3022 			 * underrun error while DMAing the RX
3023 			 * filter setup frame. If we detect this,
3024 			 * we have to send the setup frame again,
3025 			 * or else the filter won't be programmed
3026 			 * correctly.
3027 			 */
3028 			if (DC_IS_PNIC(sc)) {
3029 				if (txstat & DC_TXSTAT_ERRSUM)
3030 					dc_setfilt(sc);
3031 			}
3032 			sc->dc_cdata.dc_tx_chain[idx] = NULL;
3033 			continue;
3034 		}
3035 
3036 		if (DC_IS_XIRCOM(sc) || DC_IS_CONEXANT(sc)) {
3037 			/*
3038 			 * XXX: Why does my Xircom taunt me so?
3039 			 * For some reason it likes setting the CARRLOST flag
3040 			 * even when the carrier is there. wtf?!?
3041 			 * Who knows, but Conexant chips have the
3042 			 * same problem. Maybe they took lessons
3043 			 * from Xircom.
3044 			 */
3045 			if (/*sc->dc_type == DC_TYPE_21143 &&*/
3046 			    sc->dc_pmode == DC_PMODE_MII &&
3047 			    ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM |
3048 			    DC_TXSTAT_NOCARRIER)))
3049 				txstat &= ~DC_TXSTAT_ERRSUM;
3050 		} else {
3051 			if (/*sc->dc_type == DC_TYPE_21143 &&*/
3052 			    sc->dc_pmode == DC_PMODE_MII &&
3053 			    ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM |
3054 			    DC_TXSTAT_NOCARRIER | DC_TXSTAT_CARRLOST)))
3055 				txstat &= ~DC_TXSTAT_ERRSUM;
3056 		}
3057 
3058 		if (txstat & DC_TXSTAT_ERRSUM) {
3059 			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
3060 			if (txstat & DC_TXSTAT_EXCESSCOLL)
3061 				if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
3062 			if (txstat & DC_TXSTAT_LATECOLL)
3063 				if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
3064 			if (!(txstat & DC_TXSTAT_UNDERRUN)) {
3065 				ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3066 				dc_init_locked(sc);
3067 				return;
3068 			}
3069 		} else
3070 			if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
3071 		if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (txstat & DC_TXSTAT_COLLCNT) >> 3);
3072 
3073 		bus_dmamap_sync(sc->dc_tx_mtag, sc->dc_cdata.dc_tx_map[idx],
3074 		    BUS_DMASYNC_POSTWRITE);
3075 		bus_dmamap_unload(sc->dc_tx_mtag, sc->dc_cdata.dc_tx_map[idx]);
3076 		m_freem(sc->dc_cdata.dc_tx_chain[idx]);
3077 		sc->dc_cdata.dc_tx_chain[idx] = NULL;
3078 	}
3079 	sc->dc_cdata.dc_tx_cons = idx;
3080 
3081 	if (sc->dc_cdata.dc_tx_cnt <= DC_TX_LIST_CNT - DC_TX_LIST_RSVD) {
3082 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3083 		if (sc->dc_cdata.dc_tx_cnt == 0)
3084 			sc->dc_wdog_timer = 0;
3085 	}
3086 	if (setup > 0)
3087 		bus_dmamap_sync(sc->dc_tx_ltag, sc->dc_tx_lmap,
3088 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3089 }
3090 
3091 static void
3092 dc_tick(void *xsc)
3093 {
3094 	struct dc_softc *sc;
3095 	struct mii_data *mii;
3096 	struct ifnet *ifp;
3097 	uint32_t r;
3098 
3099 	sc = xsc;
3100 	DC_LOCK_ASSERT(sc);
3101 	ifp = sc->dc_ifp;
3102 	mii = device_get_softc(sc->dc_miibus);
3103 
3104 	/*
3105 	 * Reclaim transmitted frames for controllers that do
3106 	 * not generate TX completion interrupt for every frame.
3107 	 */
3108 	if (sc->dc_flags & DC_TX_USE_TX_INTR)
3109 		dc_txeof(sc);
3110 
3111 	if (sc->dc_flags & DC_REDUCED_MII_POLL) {
3112 		if (sc->dc_flags & DC_21143_NWAY) {
3113 			r = CSR_READ_4(sc, DC_10BTSTAT);
3114 			if (IFM_SUBTYPE(mii->mii_media_active) ==
3115 			    IFM_100_TX && (r & DC_TSTAT_LS100)) {
3116 				sc->dc_link = 0;
3117 				mii_mediachg(mii);
3118 			}
3119 			if (IFM_SUBTYPE(mii->mii_media_active) ==
3120 			    IFM_10_T && (r & DC_TSTAT_LS10)) {
3121 				sc->dc_link = 0;
3122 				mii_mediachg(mii);
3123 			}
3124 			if (sc->dc_link == 0)
3125 				mii_tick(mii);
3126 		} else {
3127 			/*
3128 			 * For NICs which never report DC_RXSTATE_WAIT, we
3129 			 * have to bite the bullet...
3130 			 */
3131 			if ((DC_HAS_BROKEN_RXSTATE(sc) || (CSR_READ_4(sc,
3132 			    DC_ISR) & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT) &&
3133 			    sc->dc_cdata.dc_tx_cnt == 0)
3134 				mii_tick(mii);
3135 		}
3136 	} else
3137 		mii_tick(mii);
3138 
3139 	/*
3140 	 * When the init routine completes, we expect to be able to send
3141 	 * packets right away, and in fact the network code will send a
3142 	 * gratuitous ARP the moment the init routine marks the interface
3143 	 * as running. However, even though the MAC may have been initialized,
3144 	 * there may be a delay of a few seconds before the PHY completes
3145 	 * autonegotiation and the link is brought up. Any transmissions
3146 	 * made during that delay will be lost. Dealing with this is tricky:
3147 	 * we can't just pause in the init routine while waiting for the
3148 	 * PHY to come ready since that would bring the whole system to
3149 	 * a screeching halt for several seconds.
3150 	 *
3151 	 * What we do here is prevent the TX start routine from sending
3152 	 * any packets until a link has been established. After the
3153 	 * interface has been initialized, the tick routine will poll
3154 	 * the state of the PHY until the IFM_ACTIVE flag is set. Until
3155 	 * that time, packets will stay in the send queue, and once the
3156 	 * link comes up, they will be flushed out to the wire.
3157 	 */
3158 	if (sc->dc_link != 0 && !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3159 		dc_start_locked(ifp);
3160 
3161 	if (sc->dc_flags & DC_21143_NWAY && !sc->dc_link)
3162 		callout_reset(&sc->dc_stat_ch, hz/10, dc_tick, sc);
3163 	else
3164 		callout_reset(&sc->dc_stat_ch, hz, dc_tick, sc);
3165 }
3166 
3167 /*
3168  * A transmit underrun has occurred.  Back off the transmit threshold,
3169  * or switch to store and forward mode if we have to.
3170  */
3171 static void
3172 dc_tx_underrun(struct dc_softc *sc)
3173 {
3174 	uint32_t netcfg, isr;
3175 	int i, reinit;
3176 
3177 	reinit = 0;
3178 	netcfg = CSR_READ_4(sc, DC_NETCFG);
3179 	device_printf(sc->dc_dev, "TX underrun -- ");
3180 	if ((sc->dc_flags & DC_TX_STORENFWD) == 0) {
3181 		if (sc->dc_txthresh + DC_TXTHRESH_INC > DC_TXTHRESH_MAX) {
3182 			printf("using store and forward mode\n");
3183 			netcfg |= DC_NETCFG_STORENFWD;
3184 		} else {
3185 			printf("increasing TX threshold\n");
3186 			sc->dc_txthresh += DC_TXTHRESH_INC;
3187 			netcfg &= ~DC_NETCFG_TX_THRESH;
3188 			netcfg |= sc->dc_txthresh;
3189 		}
3190 
3191 		if (DC_IS_INTEL(sc)) {
3192 			/*
3193 			 * The real 21143 requires that the transmitter be idle
3194 			 * in order to change the transmit threshold or store
3195 			 * and forward state.
3196 			 */
3197 			CSR_WRITE_4(sc, DC_NETCFG, netcfg & ~DC_NETCFG_TX_ON);
3198 
3199 			for (i = 0; i < DC_TIMEOUT; i++) {
3200 				isr = CSR_READ_4(sc, DC_ISR);
3201 				if (isr & DC_ISR_TX_IDLE)
3202 					break;
3203 				DELAY(10);
3204 			}
3205 			if (i == DC_TIMEOUT) {
3206 				device_printf(sc->dc_dev,
3207 				    "%s: failed to force tx to idle state\n",
3208 				    __func__);
3209 				reinit++;
3210 			}
3211 		}
3212 	} else {
3213 		printf("resetting\n");
3214 		reinit++;
3215 	}
3216 
3217 	if (reinit == 0) {
3218 		CSR_WRITE_4(sc, DC_NETCFG, netcfg);
3219 		if (DC_IS_INTEL(sc))
3220 			CSR_WRITE_4(sc, DC_NETCFG, netcfg | DC_NETCFG_TX_ON);
3221 	} else {
3222 		sc->dc_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3223 		dc_init_locked(sc);
3224 	}
3225 }
3226 
3227 #ifdef DEVICE_POLLING
3228 static poll_handler_t dc_poll;
3229 
3230 static int
3231 dc_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3232 {
3233 	struct dc_softc *sc = ifp->if_softc;
3234 	int rx_npkts = 0;
3235 
3236 	DC_LOCK(sc);
3237 
3238 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3239 		DC_UNLOCK(sc);
3240 		return (rx_npkts);
3241 	}
3242 
3243 	sc->rxcycles = count;
3244 	rx_npkts = dc_rxeof(sc);
3245 	dc_txeof(sc);
3246 	if (!IFQ_IS_EMPTY(&ifp->if_snd) &&
3247 	    !(ifp->if_drv_flags & IFF_DRV_OACTIVE))
3248 		dc_start_locked(ifp);
3249 
3250 	if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
3251 		uint32_t	status;
3252 
3253 		status = CSR_READ_4(sc, DC_ISR);
3254 		status &= (DC_ISR_RX_WATDOGTIMEO | DC_ISR_RX_NOBUF |
3255 			DC_ISR_TX_NOBUF | DC_ISR_TX_IDLE | DC_ISR_TX_UNDERRUN |
3256 			DC_ISR_BUS_ERR);
3257 		if (!status) {
3258 			DC_UNLOCK(sc);
3259 			return (rx_npkts);
3260 		}
3261 		/* ack what we have */
3262 		CSR_WRITE_4(sc, DC_ISR, status);
3263 
3264 		if (status & (DC_ISR_RX_WATDOGTIMEO | DC_ISR_RX_NOBUF)) {
3265 			uint32_t r = CSR_READ_4(sc, DC_FRAMESDISCARDED);
3266 			if_inc_counter(ifp, IFCOUNTER_IERRORS, (r & 0xffff) + ((r >> 17) & 0x7ff));
3267 
3268 			if (dc_rx_resync(sc))
3269 				dc_rxeof(sc);
3270 		}
3271 		/* restart transmit unit if necessary */
3272 		if (status & DC_ISR_TX_IDLE && sc->dc_cdata.dc_tx_cnt)
3273 			CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
3274 
3275 		if (status & DC_ISR_TX_UNDERRUN)
3276 			dc_tx_underrun(sc);
3277 
3278 		if (status & DC_ISR_BUS_ERR) {
3279 			if_printf(ifp, "%s: bus error\n", __func__);
3280 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3281 			dc_init_locked(sc);
3282 		}
3283 	}
3284 	DC_UNLOCK(sc);
3285 	return (rx_npkts);
3286 }
3287 #endif /* DEVICE_POLLING */
3288 
3289 static void
3290 dc_intr(void *arg)
3291 {
3292 	struct dc_softc *sc;
3293 	struct ifnet *ifp;
3294 	uint32_t r, status;
3295 	int n;
3296 
3297 	sc = arg;
3298 
3299 	if (sc->suspended)
3300 		return;
3301 
3302 	DC_LOCK(sc);
3303 	status = CSR_READ_4(sc, DC_ISR);
3304 	if (status == 0xFFFFFFFF || (status & DC_INTRS) == 0) {
3305 		DC_UNLOCK(sc);
3306 		return;
3307 	}
3308 	ifp = sc->dc_ifp;
3309 #ifdef DEVICE_POLLING
3310 	if (ifp->if_capenable & IFCAP_POLLING) {
3311 		DC_UNLOCK(sc);
3312 		return;
3313 	}
3314 #endif
3315 	/* Disable interrupts. */
3316 	CSR_WRITE_4(sc, DC_IMR, 0x00000000);
3317 
3318 	for (n = 16; n > 0; n--) {
3319 		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
3320 			break;
3321 		/* Ack interrupts. */
3322 		CSR_WRITE_4(sc, DC_ISR, status);
3323 
3324 		if (status & DC_ISR_RX_OK) {
3325 			if (dc_rxeof(sc) == 0) {
3326 				while (dc_rx_resync(sc))
3327 					dc_rxeof(sc);
3328 			}
3329 		}
3330 
3331 		if (status & (DC_ISR_TX_OK | DC_ISR_TX_NOBUF))
3332 			dc_txeof(sc);
3333 
3334 		if (status & DC_ISR_TX_IDLE) {
3335 			dc_txeof(sc);
3336 			if (sc->dc_cdata.dc_tx_cnt) {
3337 				DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
3338 				CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
3339 			}
3340 		}
3341 
3342 		if (status & DC_ISR_TX_UNDERRUN)
3343 			dc_tx_underrun(sc);
3344 
3345 		if ((status & DC_ISR_RX_WATDOGTIMEO)
3346 		    || (status & DC_ISR_RX_NOBUF)) {
3347 			r = CSR_READ_4(sc, DC_FRAMESDISCARDED);
3348 			if_inc_counter(ifp, IFCOUNTER_IERRORS, (r & 0xffff) + ((r >> 17) & 0x7ff));
3349 			if (dc_rxeof(sc) == 0) {
3350 				while (dc_rx_resync(sc))
3351 					dc_rxeof(sc);
3352 			}
3353 		}
3354 
3355 		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3356 			dc_start_locked(ifp);
3357 
3358 		if (status & DC_ISR_BUS_ERR) {
3359 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3360 			dc_init_locked(sc);
3361 			DC_UNLOCK(sc);
3362 			return;
3363 		}
3364 		status = CSR_READ_4(sc, DC_ISR);
3365 		if (status == 0xFFFFFFFF || (status & DC_INTRS) == 0)
3366 			break;
3367 	}
3368 
3369 	/* Re-enable interrupts. */
3370 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3371 		CSR_WRITE_4(sc, DC_IMR, DC_INTRS);
3372 
3373 	DC_UNLOCK(sc);
3374 }
3375 
3376 /*
3377  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
3378  * pointers to the fragment pointers.
3379  */
3380 static int
3381 dc_encap(struct dc_softc *sc, struct mbuf **m_head)
3382 {
3383 	bus_dma_segment_t segs[DC_MAXFRAGS];
3384 	bus_dmamap_t map;
3385 	struct dc_desc *f;
3386 	struct mbuf *m;
3387 	int cur, defragged, error, first, frag, i, idx, nseg;
3388 
3389 	m = NULL;
3390 	defragged = 0;
3391 	if (sc->dc_flags & DC_TX_COALESCE &&
3392 	    ((*m_head)->m_next != NULL || sc->dc_flags & DC_TX_ALIGN)) {
3393 		m = m_defrag(*m_head, M_NOWAIT);
3394 		defragged = 1;
3395 	} else {
3396 		/*
3397 		 * Count the number of frags in this chain to see if we
3398 		 * need to m_collapse.  Since the descriptor list is shared
3399 		 * by all packets, we'll m_collapse long chains so that they
3400 		 * do not use up the entire list, even if they would fit.
3401 		 */
3402 		i = 0;
3403 		for (m = *m_head; m != NULL; m = m->m_next)
3404 			i++;
3405 		if (i > DC_TX_LIST_CNT / 4 ||
3406 		    DC_TX_LIST_CNT - i + sc->dc_cdata.dc_tx_cnt <=
3407 		    DC_TX_LIST_RSVD) {
3408 			m = m_collapse(*m_head, M_NOWAIT, DC_MAXFRAGS);
3409 			defragged = 1;
3410 		}
3411 	}
3412 	if (defragged != 0) {
3413 		if (m == NULL) {
3414 			m_freem(*m_head);
3415 			*m_head = NULL;
3416 			return (ENOBUFS);
3417 		}
3418 		*m_head = m;
3419 	}
3420 
3421 	idx = sc->dc_cdata.dc_tx_prod;
3422 	error = bus_dmamap_load_mbuf_sg(sc->dc_tx_mtag,
3423 	    sc->dc_cdata.dc_tx_map[idx], *m_head, segs, &nseg, 0);
3424 	if (error == EFBIG) {
3425 		if (defragged != 0 || (m = m_collapse(*m_head, M_NOWAIT,
3426 		    DC_MAXFRAGS)) == NULL) {
3427 			m_freem(*m_head);
3428 			*m_head = NULL;
3429 			return (defragged != 0 ? error : ENOBUFS);
3430 		}
3431 		*m_head = m;
3432 		error = bus_dmamap_load_mbuf_sg(sc->dc_tx_mtag,
3433 		    sc->dc_cdata.dc_tx_map[idx], *m_head, segs, &nseg, 0);
3434 		if (error != 0) {
3435 			m_freem(*m_head);
3436 			*m_head = NULL;
3437 			return (error);
3438 		}
3439 	} else if (error != 0)
3440 		return (error);
3441 	KASSERT(nseg <= DC_MAXFRAGS,
3442 	    ("%s: wrong number of segments (%d)", __func__, nseg));
3443 	if (nseg == 0) {
3444 		m_freem(*m_head);
3445 		*m_head = NULL;
3446 		return (EIO);
3447 	}
3448 
3449 	/* Check descriptor overruns. */
3450 	if (sc->dc_cdata.dc_tx_cnt + nseg > DC_TX_LIST_CNT - DC_TX_LIST_RSVD) {
3451 		bus_dmamap_unload(sc->dc_tx_mtag, sc->dc_cdata.dc_tx_map[idx]);
3452 		return (ENOBUFS);
3453 	}
3454 	bus_dmamap_sync(sc->dc_tx_mtag, sc->dc_cdata.dc_tx_map[idx],
3455 	    BUS_DMASYNC_PREWRITE);
3456 
3457 	first = cur = frag = sc->dc_cdata.dc_tx_prod;
3458 	for (i = 0; i < nseg; i++) {
3459 		if ((sc->dc_flags & DC_TX_ADMTEK_WAR) &&
3460 		    (frag == (DC_TX_LIST_CNT - 1)) &&
3461 		    (first != sc->dc_cdata.dc_tx_first)) {
3462 			bus_dmamap_unload(sc->dc_tx_mtag,
3463 			    sc->dc_cdata.dc_tx_map[first]);
3464 			m_freem(*m_head);
3465 			*m_head = NULL;
3466 			return (ENOBUFS);
3467 		}
3468 
3469 		f = &sc->dc_ldata.dc_tx_list[frag];
3470 		f->dc_ctl = htole32(DC_TXCTL_TLINK | segs[i].ds_len);
3471 		if (i == 0) {
3472 			f->dc_status = 0;
3473 			f->dc_ctl |= htole32(DC_TXCTL_FIRSTFRAG);
3474 		} else
3475 			f->dc_status = htole32(DC_TXSTAT_OWN);
3476 		f->dc_data = htole32(DC_ADDR_LO(segs[i].ds_addr));
3477 		cur = frag;
3478 		DC_INC(frag, DC_TX_LIST_CNT);
3479 	}
3480 
3481 	sc->dc_cdata.dc_tx_prod = frag;
3482 	sc->dc_cdata.dc_tx_cnt += nseg;
3483 	sc->dc_cdata.dc_tx_chain[cur] = *m_head;
3484 	sc->dc_ldata.dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_LASTFRAG);
3485 	if (sc->dc_flags & DC_TX_INTR_FIRSTFRAG)
3486 		sc->dc_ldata.dc_tx_list[first].dc_ctl |=
3487 		    htole32(DC_TXCTL_FINT);
3488 	if (sc->dc_flags & DC_TX_INTR_ALWAYS)
3489 		sc->dc_ldata.dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_FINT);
3490 	if (sc->dc_flags & DC_TX_USE_TX_INTR &&
3491 	    ++sc->dc_cdata.dc_tx_pkts >= 8) {
3492 		sc->dc_cdata.dc_tx_pkts = 0;
3493 		sc->dc_ldata.dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_FINT);
3494 	}
3495 	sc->dc_ldata.dc_tx_list[first].dc_status = htole32(DC_TXSTAT_OWN);
3496 
3497 	bus_dmamap_sync(sc->dc_tx_ltag, sc->dc_tx_lmap,
3498 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3499 
3500 	/*
3501 	 * Swap the last and the first dmamaps to ensure the map for
3502 	 * this transmission is placed at the last descriptor.
3503 	 */
3504 	map = sc->dc_cdata.dc_tx_map[cur];
3505 	sc->dc_cdata.dc_tx_map[cur] = sc->dc_cdata.dc_tx_map[first];
3506 	sc->dc_cdata.dc_tx_map[first] = map;
3507 
3508 	return (0);
3509 }
3510 
3511 static void
3512 dc_start(struct ifnet *ifp)
3513 {
3514 	struct dc_softc *sc;
3515 
3516 	sc = ifp->if_softc;
3517 	DC_LOCK(sc);
3518 	dc_start_locked(ifp);
3519 	DC_UNLOCK(sc);
3520 }
3521 
3522 /*
3523  * Main transmit routine
3524  * To avoid having to do mbuf copies, we put pointers to the mbuf data
3525  * regions directly in the transmit lists.  We also save a copy of the
3526  * pointers since the transmit list fragment pointers are physical
3527  * addresses.
3528  */
3529 static void
3530 dc_start_locked(struct ifnet *ifp)
3531 {
3532 	struct dc_softc *sc;
3533 	struct mbuf *m_head;
3534 	int queued;
3535 
3536 	sc = ifp->if_softc;
3537 
3538 	DC_LOCK_ASSERT(sc);
3539 
3540 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
3541 	    IFF_DRV_RUNNING || sc->dc_link == 0)
3542 		return;
3543 
3544 	sc->dc_cdata.dc_tx_first = sc->dc_cdata.dc_tx_prod;
3545 
3546 	for (queued = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
3547 		/*
3548 		 * If there's no way we can send any packets, return now.
3549 		 */
3550 		if (sc->dc_cdata.dc_tx_cnt > DC_TX_LIST_CNT - DC_TX_LIST_RSVD) {
3551 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3552 			break;
3553 		}
3554 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
3555 		if (m_head == NULL)
3556 			break;
3557 
3558 		if (dc_encap(sc, &m_head)) {
3559 			if (m_head == NULL)
3560 				break;
3561 			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3562 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3563 			break;
3564 		}
3565 
3566 		queued++;
3567 		/*
3568 		 * If there's a BPF listener, bounce a copy of this frame
3569 		 * to him.
3570 		 */
3571 		BPF_MTAP(ifp, m_head);
3572 	}
3573 
3574 	if (queued > 0) {
3575 		/* Transmit */
3576 		if (!(sc->dc_flags & DC_TX_POLL))
3577 			CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
3578 
3579 		/*
3580 		 * Set a timeout in case the chip goes out to lunch.
3581 		 */
3582 		sc->dc_wdog_timer = 5;
3583 	}
3584 }
3585 
3586 static void
3587 dc_init(void *xsc)
3588 {
3589 	struct dc_softc *sc = xsc;
3590 
3591 	DC_LOCK(sc);
3592 	dc_init_locked(sc);
3593 	DC_UNLOCK(sc);
3594 }
3595 
3596 static void
3597 dc_init_locked(struct dc_softc *sc)
3598 {
3599 	struct ifnet *ifp = sc->dc_ifp;
3600 	struct mii_data *mii;
3601 	struct ifmedia *ifm;
3602 
3603 	DC_LOCK_ASSERT(sc);
3604 
3605 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
3606 		return;
3607 
3608 	mii = device_get_softc(sc->dc_miibus);
3609 
3610 	/*
3611 	 * Cancel pending I/O and free all RX/TX buffers.
3612 	 */
3613 	dc_stop(sc);
3614 	dc_reset(sc);
3615 	if (DC_IS_INTEL(sc)) {
3616 		ifm = &mii->mii_media;
3617 		dc_apply_fixup(sc, ifm->ifm_media);
3618 	}
3619 
3620 	/*
3621 	 * Set cache alignment and burst length.
3622 	 */
3623 	if (DC_IS_ASIX(sc) || DC_IS_DAVICOM(sc) || DC_IS_ULI(sc))
3624 		CSR_WRITE_4(sc, DC_BUSCTL, 0);
3625 	else
3626 		CSR_WRITE_4(sc, DC_BUSCTL, DC_BUSCTL_MRME | DC_BUSCTL_MRLE);
3627 	/*
3628 	 * Evenly share the bus between receive and transmit process.
3629 	 */
3630 	if (DC_IS_INTEL(sc))
3631 		DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_ARBITRATION);
3632 	if (DC_IS_DAVICOM(sc) || DC_IS_INTEL(sc)) {
3633 		DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_USECA);
3634 	} else {
3635 		DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_16LONG);
3636 	}
3637 	if (sc->dc_flags & DC_TX_POLL)
3638 		DC_SETBIT(sc, DC_BUSCTL, DC_TXPOLL_1);
3639 	switch(sc->dc_cachesize) {
3640 	case 32:
3641 		DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_32LONG);
3642 		break;
3643 	case 16:
3644 		DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_16LONG);
3645 		break;
3646 	case 8:
3647 		DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_8LONG);
3648 		break;
3649 	case 0:
3650 	default:
3651 		DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_NONE);
3652 		break;
3653 	}
3654 
3655 	if (sc->dc_flags & DC_TX_STORENFWD)
3656 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
3657 	else {
3658 		if (sc->dc_txthresh > DC_TXTHRESH_MAX) {
3659 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
3660 		} else {
3661 			DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
3662 			DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh);
3663 		}
3664 	}
3665 
3666 	DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_NO_RXCRC);
3667 	DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_BACKOFF);
3668 
3669 	if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) {
3670 		/*
3671 		 * The app notes for the 98713 and 98715A say that
3672 		 * in order to have the chips operate properly, a magic
3673 		 * number must be written to CSR16. Macronix does not
3674 		 * document the meaning of these bits so there's no way
3675 		 * to know exactly what they do. The 98713 has a magic
3676 		 * number all its own; the rest all use a different one.
3677 		 */
3678 		DC_CLRBIT(sc, DC_MX_MAGICPACKET, 0xFFFF0000);
3679 		if (sc->dc_type == DC_TYPE_98713)
3680 			DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98713);
3681 		else
3682 			DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98715);
3683 	}
3684 
3685 	if (DC_IS_XIRCOM(sc)) {
3686 		/*
3687 		 * setup General Purpose Port mode and data so the tulip
3688 		 * can talk to the MII.
3689 		 */
3690 		CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN |
3691 			   DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
3692 		DELAY(10);
3693 		CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN |
3694 			   DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
3695 		DELAY(10);
3696 	}
3697 
3698 	DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH);
3699 	DC_SETBIT(sc, DC_NETCFG, DC_TXTHRESH_MIN);
3700 
3701 	/* Init circular RX list. */
3702 	if (dc_list_rx_init(sc) == ENOBUFS) {
3703 		device_printf(sc->dc_dev,
3704 		    "initialization failed: no memory for rx buffers\n");
3705 		dc_stop(sc);
3706 		return;
3707 	}
3708 
3709 	/*
3710 	 * Init TX descriptors.
3711 	 */
3712 	dc_list_tx_init(sc);
3713 
3714 	/*
3715 	 * Load the address of the RX list.
3716 	 */
3717 	CSR_WRITE_4(sc, DC_RXADDR, DC_RXDESC(sc, 0));
3718 	CSR_WRITE_4(sc, DC_TXADDR, DC_TXDESC(sc, 0));
3719 
3720 	/*
3721 	 * Enable interrupts.
3722 	 */
3723 #ifdef DEVICE_POLLING
3724 	/*
3725 	 * ... but only if we are not polling, and make sure they are off in
3726 	 * the case of polling. Some cards (e.g. fxp) turn interrupts on
3727 	 * after a reset.
3728 	 */
3729 	if (ifp->if_capenable & IFCAP_POLLING)
3730 		CSR_WRITE_4(sc, DC_IMR, 0x00000000);
3731 	else
3732 #endif
3733 	CSR_WRITE_4(sc, DC_IMR, DC_INTRS);
3734 	CSR_WRITE_4(sc, DC_ISR, 0xFFFFFFFF);
3735 
3736 	/* Initialize TX jabber and RX watchdog timer. */
3737 	if (DC_IS_ULI(sc))
3738 		CSR_WRITE_4(sc, DC_WATCHDOG, DC_WDOG_JABBERCLK |
3739 		    DC_WDOG_HOSTUNJAB);
3740 
3741 	/* Enable transmitter. */
3742 	DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
3743 
3744 	/*
3745 	 * If this is an Intel 21143 and we're not using the
3746 	 * MII port, program the LED control pins so we get
3747 	 * link and activity indications.
3748 	 */
3749 	if (sc->dc_flags & DC_TULIP_LEDS) {
3750 		CSR_WRITE_4(sc, DC_WATCHDOG,
3751 		    DC_WDOG_CTLWREN | DC_WDOG_LINK | DC_WDOG_ACTIVITY);
3752 		CSR_WRITE_4(sc, DC_WATCHDOG, 0);
3753 	}
3754 
3755 	/*
3756 	 * Load the RX/multicast filter. We do this sort of late
3757 	 * because the filter programming scheme on the 21143 and
3758 	 * some clones requires DMAing a setup frame via the TX
3759 	 * engine, and we need the transmitter enabled for that.
3760 	 */
3761 	dc_setfilt(sc);
3762 
3763 	/* Enable receiver. */
3764 	DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON);
3765 	CSR_WRITE_4(sc, DC_RXSTART, 0xFFFFFFFF);
3766 
3767 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3768 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3769 
3770 	dc_ifmedia_upd_locked(sc);
3771 
3772 	/* Clear missed frames and overflow counter. */
3773 	CSR_READ_4(sc, DC_FRAMESDISCARDED);
3774 
3775 	/* Don't start the ticker if this is a homePNA link. */
3776 	if (IFM_SUBTYPE(mii->mii_media.ifm_media) == IFM_HPNA_1)
3777 		sc->dc_link = 1;
3778 	else {
3779 		if (sc->dc_flags & DC_21143_NWAY)
3780 			callout_reset(&sc->dc_stat_ch, hz/10, dc_tick, sc);
3781 		else
3782 			callout_reset(&sc->dc_stat_ch, hz, dc_tick, sc);
3783 	}
3784 
3785 	sc->dc_wdog_timer = 0;
3786 	callout_reset(&sc->dc_wdog_ch, hz, dc_watchdog, sc);
3787 }
3788 
3789 /*
3790  * Set media options.
3791  */
3792 static int
3793 dc_ifmedia_upd(struct ifnet *ifp)
3794 {
3795 	struct dc_softc *sc;
3796 	int error;
3797 
3798 	sc = ifp->if_softc;
3799 	DC_LOCK(sc);
3800 	error = dc_ifmedia_upd_locked(sc);
3801 	DC_UNLOCK(sc);
3802 	return (error);
3803 }
3804 
3805 static int
3806 dc_ifmedia_upd_locked(struct dc_softc *sc)
3807 {
3808 	struct mii_data *mii;
3809 	struct ifmedia *ifm;
3810 	int error;
3811 
3812 	DC_LOCK_ASSERT(sc);
3813 
3814 	sc->dc_link = 0;
3815 	mii = device_get_softc(sc->dc_miibus);
3816 	error = mii_mediachg(mii);
3817 	if (error == 0) {
3818 		ifm = &mii->mii_media;
3819 		if (DC_IS_INTEL(sc))
3820 			dc_setcfg(sc, ifm->ifm_media);
3821 		else if (DC_IS_DAVICOM(sc) &&
3822 		    IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1)
3823 			dc_setcfg(sc, ifm->ifm_media);
3824 	}
3825 
3826 	return (error);
3827 }
3828 
3829 /*
3830  * Report current media status.
3831  */
3832 static void
3833 dc_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3834 {
3835 	struct dc_softc *sc;
3836 	struct mii_data *mii;
3837 	struct ifmedia *ifm;
3838 
3839 	sc = ifp->if_softc;
3840 	mii = device_get_softc(sc->dc_miibus);
3841 	DC_LOCK(sc);
3842 	mii_pollstat(mii);
3843 	ifm = &mii->mii_media;
3844 	if (DC_IS_DAVICOM(sc)) {
3845 		if (IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) {
3846 			ifmr->ifm_active = ifm->ifm_media;
3847 			ifmr->ifm_status = 0;
3848 			DC_UNLOCK(sc);
3849 			return;
3850 		}
3851 	}
3852 	ifmr->ifm_active = mii->mii_media_active;
3853 	ifmr->ifm_status = mii->mii_media_status;
3854 	DC_UNLOCK(sc);
3855 }
3856 
3857 static int
3858 dc_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
3859 {
3860 	struct dc_softc *sc = ifp->if_softc;
3861 	struct ifreq *ifr = (struct ifreq *)data;
3862 	struct mii_data *mii;
3863 	int error = 0;
3864 
3865 	switch (command) {
3866 	case SIOCSIFFLAGS:
3867 		DC_LOCK(sc);
3868 		if (ifp->if_flags & IFF_UP) {
3869 			int need_setfilt = (ifp->if_flags ^ sc->dc_if_flags) &
3870 				(IFF_PROMISC | IFF_ALLMULTI);
3871 
3872 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3873 				if (need_setfilt)
3874 					dc_setfilt(sc);
3875 			} else {
3876 				ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3877 				dc_init_locked(sc);
3878 			}
3879 		} else {
3880 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3881 				dc_stop(sc);
3882 		}
3883 		sc->dc_if_flags = ifp->if_flags;
3884 		DC_UNLOCK(sc);
3885 		break;
3886 	case SIOCADDMULTI:
3887 	case SIOCDELMULTI:
3888 		DC_LOCK(sc);
3889 		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3890 			dc_setfilt(sc);
3891 		DC_UNLOCK(sc);
3892 		break;
3893 	case SIOCGIFMEDIA:
3894 	case SIOCSIFMEDIA:
3895 		mii = device_get_softc(sc->dc_miibus);
3896 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
3897 		break;
3898 	case SIOCSIFCAP:
3899 #ifdef DEVICE_POLLING
3900 		if (ifr->ifr_reqcap & IFCAP_POLLING &&
3901 		    !(ifp->if_capenable & IFCAP_POLLING)) {
3902 			error = ether_poll_register(dc_poll, ifp);
3903 			if (error)
3904 				return(error);
3905 			DC_LOCK(sc);
3906 			/* Disable interrupts */
3907 			CSR_WRITE_4(sc, DC_IMR, 0x00000000);
3908 			ifp->if_capenable |= IFCAP_POLLING;
3909 			DC_UNLOCK(sc);
3910 			return (error);
3911 		}
3912 		if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
3913 		    ifp->if_capenable & IFCAP_POLLING) {
3914 			error = ether_poll_deregister(ifp);
3915 			/* Enable interrupts. */
3916 			DC_LOCK(sc);
3917 			CSR_WRITE_4(sc, DC_IMR, DC_INTRS);
3918 			ifp->if_capenable &= ~IFCAP_POLLING;
3919 			DC_UNLOCK(sc);
3920 			return (error);
3921 		}
3922 #endif /* DEVICE_POLLING */
3923 		break;
3924 	default:
3925 		error = ether_ioctl(ifp, command, data);
3926 		break;
3927 	}
3928 
3929 	return (error);
3930 }
3931 
3932 static void
3933 dc_watchdog(void *xsc)
3934 {
3935 	struct dc_softc *sc = xsc;
3936 	struct ifnet *ifp;
3937 
3938 	DC_LOCK_ASSERT(sc);
3939 
3940 	if (sc->dc_wdog_timer == 0 || --sc->dc_wdog_timer != 0) {
3941 		callout_reset(&sc->dc_wdog_ch, hz, dc_watchdog, sc);
3942 		return;
3943 	}
3944 
3945 	ifp = sc->dc_ifp;
3946 	if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
3947 	device_printf(sc->dc_dev, "watchdog timeout\n");
3948 
3949 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3950 	dc_init_locked(sc);
3951 
3952 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3953 		dc_start_locked(ifp);
3954 }
3955 
3956 /*
3957  * Stop the adapter and free any mbufs allocated to the
3958  * RX and TX lists.
3959  */
3960 static void
3961 dc_stop(struct dc_softc *sc)
3962 {
3963 	struct ifnet *ifp;
3964 	struct dc_list_data *ld;
3965 	struct dc_chain_data *cd;
3966 	int i;
3967 	uint32_t ctl, netcfg;
3968 
3969 	DC_LOCK_ASSERT(sc);
3970 
3971 	ifp = sc->dc_ifp;
3972 	ld = &sc->dc_ldata;
3973 	cd = &sc->dc_cdata;
3974 
3975 	callout_stop(&sc->dc_stat_ch);
3976 	callout_stop(&sc->dc_wdog_ch);
3977 	sc->dc_wdog_timer = 0;
3978 	sc->dc_link = 0;
3979 
3980 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3981 
3982 	netcfg = CSR_READ_4(sc, DC_NETCFG);
3983 	if (netcfg & (DC_NETCFG_RX_ON | DC_NETCFG_TX_ON))
3984 		CSR_WRITE_4(sc, DC_NETCFG,
3985 		   netcfg & ~(DC_NETCFG_RX_ON | DC_NETCFG_TX_ON));
3986 	CSR_WRITE_4(sc, DC_IMR, 0x00000000);
3987 	/* Wait the completion of TX/RX SM. */
3988 	if (netcfg & (DC_NETCFG_RX_ON | DC_NETCFG_TX_ON))
3989 		dc_netcfg_wait(sc);
3990 
3991 	CSR_WRITE_4(sc, DC_TXADDR, 0x00000000);
3992 	CSR_WRITE_4(sc, DC_RXADDR, 0x00000000);
3993 
3994 	/*
3995 	 * Free data in the RX lists.
3996 	 */
3997 	for (i = 0; i < DC_RX_LIST_CNT; i++) {
3998 		if (cd->dc_rx_chain[i] != NULL) {
3999 			bus_dmamap_sync(sc->dc_rx_mtag,
4000 			    cd->dc_rx_map[i], BUS_DMASYNC_POSTREAD);
4001 			bus_dmamap_unload(sc->dc_rx_mtag,
4002 			    cd->dc_rx_map[i]);
4003 			m_freem(cd->dc_rx_chain[i]);
4004 			cd->dc_rx_chain[i] = NULL;
4005 		}
4006 	}
4007 	bzero(ld->dc_rx_list, DC_RX_LIST_SZ);
4008 	bus_dmamap_sync(sc->dc_rx_ltag, sc->dc_rx_lmap,
4009 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4010 
4011 	/*
4012 	 * Free the TX list buffers.
4013 	 */
4014 	for (i = 0; i < DC_TX_LIST_CNT; i++) {
4015 		if (cd->dc_tx_chain[i] != NULL) {
4016 			ctl = le32toh(ld->dc_tx_list[i].dc_ctl);
4017 			if (ctl & DC_TXCTL_SETUP) {
4018 				bus_dmamap_sync(sc->dc_stag, sc->dc_smap,
4019 				    BUS_DMASYNC_POSTWRITE);
4020 			} else {
4021 				bus_dmamap_sync(sc->dc_tx_mtag,
4022 				    cd->dc_tx_map[i], BUS_DMASYNC_POSTWRITE);
4023 				bus_dmamap_unload(sc->dc_tx_mtag,
4024 				    cd->dc_tx_map[i]);
4025 				m_freem(cd->dc_tx_chain[i]);
4026 			}
4027 			cd->dc_tx_chain[i] = NULL;
4028 		}
4029 	}
4030 	bzero(ld->dc_tx_list, DC_TX_LIST_SZ);
4031 	bus_dmamap_sync(sc->dc_tx_ltag, sc->dc_tx_lmap,
4032 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4033 }
4034 
4035 /*
4036  * Device suspend routine.  Stop the interface and save some PCI
4037  * settings in case the BIOS doesn't restore them properly on
4038  * resume.
4039  */
4040 static int
4041 dc_suspend(device_t dev)
4042 {
4043 	struct dc_softc *sc;
4044 
4045 	sc = device_get_softc(dev);
4046 	DC_LOCK(sc);
4047 	dc_stop(sc);
4048 	sc->suspended = 1;
4049 	DC_UNLOCK(sc);
4050 
4051 	return (0);
4052 }
4053 
4054 /*
4055  * Device resume routine.  Restore some PCI settings in case the BIOS
4056  * doesn't, re-enable busmastering, and restart the interface if
4057  * appropriate.
4058  */
4059 static int
4060 dc_resume(device_t dev)
4061 {
4062 	struct dc_softc *sc;
4063 	struct ifnet *ifp;
4064 
4065 	sc = device_get_softc(dev);
4066 	ifp = sc->dc_ifp;
4067 
4068 	/* reinitialize interface if necessary */
4069 	DC_LOCK(sc);
4070 	if (ifp->if_flags & IFF_UP)
4071 		dc_init_locked(sc);
4072 
4073 	sc->suspended = 0;
4074 	DC_UNLOCK(sc);
4075 
4076 	return (0);
4077 }
4078 
4079 /*
4080  * Stop all chip I/O so that the kernel's probe routines don't
4081  * get confused by errant DMAs when rebooting.
4082  */
4083 static int
4084 dc_shutdown(device_t dev)
4085 {
4086 	struct dc_softc *sc;
4087 
4088 	sc = device_get_softc(dev);
4089 
4090 	DC_LOCK(sc);
4091 	dc_stop(sc);
4092 	DC_UNLOCK(sc);
4093 
4094 	return (0);
4095 }
4096 
4097 static int
4098 dc_check_multiport(struct dc_softc *sc)
4099 {
4100 	struct dc_softc *dsc;
4101 	devclass_t dc;
4102 	device_t child;
4103 	uint8_t *eaddr;
4104 	int unit;
4105 
4106 	dc = devclass_find("dc");
4107 	for (unit = 0; unit < devclass_get_maxunit(dc); unit++) {
4108 		child = devclass_get_device(dc, unit);
4109 		if (child == NULL)
4110 			continue;
4111 		if (child == sc->dc_dev)
4112 			continue;
4113 		if (device_get_parent(child) != device_get_parent(sc->dc_dev))
4114 			continue;
4115 		if (unit > device_get_unit(sc->dc_dev))
4116 			continue;
4117 		if (device_is_attached(child) == 0)
4118 			continue;
4119 		dsc = device_get_softc(child);
4120 		device_printf(sc->dc_dev,
4121 		    "Using station address of %s as base\n",
4122 		    device_get_nameunit(child));
4123 		bcopy(dsc->dc_eaddr, sc->dc_eaddr, ETHER_ADDR_LEN);
4124 		eaddr = (uint8_t *)sc->dc_eaddr;
4125 		eaddr[5]++;
4126 		/* Prepare SROM to parse again. */
4127 		if (DC_IS_INTEL(sc) && dsc->dc_srom != NULL &&
4128 		    sc->dc_romwidth != 0) {
4129 			free(sc->dc_srom, M_DEVBUF);
4130 			sc->dc_romwidth = dsc->dc_romwidth;
4131 			sc->dc_srom = malloc(DC_ROM_SIZE(sc->dc_romwidth),
4132 			    M_DEVBUF, M_NOWAIT);
4133 			if (sc->dc_srom == NULL) {
4134 				device_printf(sc->dc_dev,
4135 				    "Could not allocate SROM buffer\n");
4136 				return (ENOMEM);
4137 			}
4138 			bcopy(dsc->dc_srom, sc->dc_srom,
4139 			    DC_ROM_SIZE(sc->dc_romwidth));
4140 		}
4141 		return (0);
4142 	}
4143 	return (ENOENT);
4144 }
4145