xref: /freebsd/sys/dev/fxp/if_fxp.c (revision 683697624683d36244876eba620c7a98f419f843)
1 /*-
2  * Copyright (c) 1995, David Greenman
3  * Copyright (c) 2001 Jonathan Lemon <jlemon@freebsd.org>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice unmodified, this list of conditions, and the following
11  *    disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  */
29 
30 /*
31  * Intel EtherExpress Pro/100B PCI Fast Ethernet driver
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/endian.h>
40 #include <sys/mbuf.h>
41 		/* #include <sys/mutex.h> */
42 #include <sys/kernel.h>
43 #include <sys/socket.h>
44 #include <sys/sysctl.h>
45 
46 #include <net/if.h>
47 #include <net/if_dl.h>
48 #include <net/if_media.h>
49 
50 #include <net/bpf.h>
51 #include <sys/sockio.h>
52 #include <sys/bus.h>
53 #include <machine/bus.h>
54 #include <sys/rman.h>
55 #include <machine/resource.h>
56 
57 #include <net/ethernet.h>
58 #include <net/if_arp.h>
59 
60 #include <machine/clock.h>	/* for DELAY */
61 
62 #include <net/if_types.h>
63 #include <net/if_vlan_var.h>
64 
65 #ifdef FXP_IP_CSUM_WAR
66 #include <netinet/in.h>
67 #include <netinet/in_systm.h>
68 #include <netinet/ip.h>
69 #include <machine/in_cksum.h>
70 #endif
71 
72 #include <pci/pcivar.h>
73 #include <pci/pcireg.h>		/* for PCIM_CMD_xxx */
74 
75 #include <dev/mii/mii.h>
76 #include <dev/mii/miivar.h>
77 
78 #include <dev/fxp/if_fxpreg.h>
79 #include <dev/fxp/if_fxpvar.h>
80 #include <dev/fxp/rcvbundl.h>
81 
82 MODULE_DEPEND(fxp, miibus, 1, 1, 1);
83 #include "miibus_if.h"
84 
85 /*
86  * NOTE!  On the Alpha, we have an alignment constraint.  The
87  * card DMAs the packet immediately following the RFA.  However,
88  * the first thing in the packet is a 14-byte Ethernet header.
89  * This means that the packet is misaligned.  To compensate,
90  * we actually offset the RFA 2 bytes into the cluster.  This
91  * alignes the packet after the Ethernet header at a 32-bit
92  * boundary.  HOWEVER!  This means that the RFA is misaligned!
93  */
94 #define	RFA_ALIGNMENT_FUDGE	2
95 
96 /*
97  * Set initial transmit threshold at 64 (512 bytes). This is
98  * increased by 64 (512 bytes) at a time, to maximum of 192
99  * (1536 bytes), if an underrun occurs.
100  */
101 static int tx_threshold = 64;
102 
103 /*
104  * The configuration byte map has several undefined fields which
105  * must be one or must be zero.  Set up a template for these bits
106  * only, (assuming a 82557 chip) leaving the actual configuration
107  * to fxp_init.
108  *
109  * See struct fxp_cb_config for the bit definitions.
110  */
111 static u_char fxp_cb_config_template[] = {
112 	0x0, 0x0,		/* cb_status */
113 	0x0, 0x0,		/* cb_command */
114 	0x0, 0x0, 0x0, 0x0,	/* link_addr */
115 	0x0,	/*  0 */
116 	0x0,	/*  1 */
117 	0x0,	/*  2 */
118 	0x0,	/*  3 */
119 	0x0,	/*  4 */
120 	0x0,	/*  5 */
121 	0x32,	/*  6 */
122 	0x0,	/*  7 */
123 	0x0,	/*  8 */
124 	0x0,	/*  9 */
125 	0x6,	/* 10 */
126 	0x0,	/* 11 */
127 	0x0,	/* 12 */
128 	0x0,	/* 13 */
129 	0xf2,	/* 14 */
130 	0x48,	/* 15 */
131 	0x0,	/* 16 */
132 	0x40,	/* 17 */
133 	0xf0,	/* 18 */
134 	0x0,	/* 19 */
135 	0x3f,	/* 20 */
136 	0x5	/* 21 */
137 };
138 
139 struct fxp_ident {
140 	u_int16_t	devid;
141 	char 		*name;
142 };
143 
144 /*
145  * Claim various Intel PCI device identifiers for this driver.  The
146  * sub-vendor and sub-device field are extensively used to identify
147  * particular variants, but we don't currently differentiate between
148  * them.
149  */
150 static struct fxp_ident fxp_ident_table[] = {
151     { 0x1029,		"Intel 82559 PCI/CardBus Pro/100" },
152     { 0x1030,		"Intel 82559 Pro/100 Ethernet" },
153     { 0x1031,		"Intel 82801CAM (ICH3) Pro/100 VE Ethernet" },
154     { 0x1032,		"Intel 82801CAM (ICH3) Pro/100 VE Ethernet" },
155     { 0x1033,		"Intel 82801CAM (ICH3) Pro/100 VM Ethernet" },
156     { 0x1034,		"Intel 82801CAM (ICH3) Pro/100 VM Ethernet" },
157     { 0x1035,		"Intel 82801CAM (ICH3) Pro/100 Ethernet" },
158     { 0x1036,		"Intel 82801CAM (ICH3) Pro/100 Ethernet" },
159     { 0x1037,		"Intel 82801CAM (ICH3) Pro/100 Ethernet" },
160     { 0x1038,		"Intel 82801CAM (ICH3) Pro/100 VM Ethernet" },
161     { 0x1039,		"Intel 82801DB (ICH4) Pro/100 VE Ethernet" },
162     { 0x103A,		"Intel 82801DB (ICH4) Pro/100 Ethernet" },
163     { 0x103B,		"Intel 82801DB (ICH4) Pro/100 VM Ethernet" },
164     { 0x103C,		"Intel 82801DB (ICH4) Pro/100 Ethernet" },
165     { 0x103D,		"Intel 82801DB (ICH4) Pro/100 VE Ethernet" },
166     { 0x103E,		"Intel 82801DB (ICH4) Pro/100 VM Ethernet" },
167     { 0x1059,		"Intel 82551QM Pro/100 M Mobile Connection" },
168     { 0x1209,		"Intel 82559ER Embedded 10/100 Ethernet" },
169     { 0x1229,		"Intel 82557/8/9 EtherExpress Pro/100(B) Ethernet" },
170     { 0x2449,		"Intel 82801BA/CAM (ICH2/3) Pro/100 Ethernet" },
171     { 0,		NULL },
172 };
173 
174 #ifdef FXP_IP_CSUM_WAR
175 #define FXP_CSUM_FEATURES    (CSUM_IP | CSUM_TCP | CSUM_UDP)
176 #else
177 #define FXP_CSUM_FEATURES    (CSUM_TCP | CSUM_UDP)
178 #endif
179 
180 static int		fxp_probe(device_t dev);
181 static int		fxp_attach(device_t dev);
182 static int		fxp_detach(device_t dev);
183 static int		fxp_shutdown(device_t dev);
184 static int		fxp_suspend(device_t dev);
185 static int		fxp_resume(device_t dev);
186 
187 static void		fxp_intr(void *xsc);
188 static void 		fxp_init(void *xsc);
189 static void 		fxp_tick(void *xsc);
190 static void		fxp_powerstate_d0(device_t dev);
191 static void 		fxp_start(struct ifnet *ifp);
192 static void		fxp_stop(struct fxp_softc *sc);
193 static void 		fxp_release(struct fxp_softc *sc);
194 static int		fxp_ioctl(struct ifnet *ifp, u_long command,
195 			    caddr_t data);
196 static void 		fxp_watchdog(struct ifnet *ifp);
197 static int		fxp_add_rfabuf(struct fxp_softc *sc,
198     			    struct fxp_rx *rxp);
199 static int		fxp_mc_addrs(struct fxp_softc *sc);
200 static void		fxp_mc_setup(struct fxp_softc *sc);
201 static u_int16_t	fxp_eeprom_getword(struct fxp_softc *sc, int offset,
202 			    int autosize);
203 static void 		fxp_eeprom_putword(struct fxp_softc *sc, int offset,
204 			    u_int16_t data);
205 static void		fxp_autosize_eeprom(struct fxp_softc *sc);
206 static void		fxp_read_eeprom(struct fxp_softc *sc, u_short *data,
207 			    int offset, int words);
208 static void		fxp_write_eeprom(struct fxp_softc *sc, u_short *data,
209 			    int offset, int words);
210 static int		fxp_ifmedia_upd(struct ifnet *ifp);
211 static void		fxp_ifmedia_sts(struct ifnet *ifp,
212 			    struct ifmediareq *ifmr);
213 static int		fxp_serial_ifmedia_upd(struct ifnet *ifp);
214 static void		fxp_serial_ifmedia_sts(struct ifnet *ifp,
215 			    struct ifmediareq *ifmr);
216 static volatile int	fxp_miibus_readreg(device_t dev, int phy, int reg);
217 static void		fxp_miibus_writereg(device_t dev, int phy, int reg,
218 			    int value);
219 static void		fxp_load_ucode(struct fxp_softc *sc);
220 static int		sysctl_int_range(SYSCTL_HANDLER_ARGS,
221 			    int low, int high);
222 static int		sysctl_hw_fxp_bundle_max(SYSCTL_HANDLER_ARGS);
223 static int		sysctl_hw_fxp_int_delay(SYSCTL_HANDLER_ARGS);
224 static __inline void 	fxp_scb_wait(struct fxp_softc *sc);
225 static __inline void	fxp_scb_cmd(struct fxp_softc *sc, int cmd);
226 static __inline void	fxp_dma_wait(struct fxp_softc *sc,
227     			    volatile u_int16_t *status, bus_dma_tag_t dmat,
228 			    bus_dmamap_t map);
229 
230 static device_method_t fxp_methods[] = {
231 	/* Device interface */
232 	DEVMETHOD(device_probe,		fxp_probe),
233 	DEVMETHOD(device_attach,	fxp_attach),
234 	DEVMETHOD(device_detach,	fxp_detach),
235 	DEVMETHOD(device_shutdown,	fxp_shutdown),
236 	DEVMETHOD(device_suspend,	fxp_suspend),
237 	DEVMETHOD(device_resume,	fxp_resume),
238 
239 	/* MII interface */
240 	DEVMETHOD(miibus_readreg,	fxp_miibus_readreg),
241 	DEVMETHOD(miibus_writereg,	fxp_miibus_writereg),
242 
243 	{ 0, 0 }
244 };
245 
246 static driver_t fxp_driver = {
247 	"fxp",
248 	fxp_methods,
249 	sizeof(struct fxp_softc),
250 };
251 
252 static devclass_t fxp_devclass;
253 
254 DRIVER_MODULE(if_fxp, pci, fxp_driver, fxp_devclass, 0, 0);
255 DRIVER_MODULE(if_fxp, cardbus, fxp_driver, fxp_devclass, 0, 0);
256 DRIVER_MODULE(miibus, fxp, miibus_driver, miibus_devclass, 0, 0);
257 
258 static int fxp_rnr;
259 SYSCTL_INT(_hw, OID_AUTO, fxp_rnr, CTLFLAG_RW, &fxp_rnr, 0, "fxp rnr events");
260 
261 /*
262  * Wait for the previous command to be accepted (but not necessarily
263  * completed).
264  */
265 static __inline void
266 fxp_scb_wait(struct fxp_softc *sc)
267 {
268 	int i = 10000;
269 
270 	while (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) && --i)
271 		DELAY(2);
272 	if (i == 0)
273 		device_printf(sc->dev, "SCB timeout: 0x%x 0x%x 0x%x 0x%x\n",
274 		    CSR_READ_1(sc, FXP_CSR_SCB_COMMAND),
275 		    CSR_READ_1(sc, FXP_CSR_SCB_STATACK),
276 		    CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS),
277 		    CSR_READ_2(sc, FXP_CSR_FLOWCONTROL));
278 }
279 
280 static __inline void
281 fxp_scb_cmd(struct fxp_softc *sc, int cmd)
282 {
283 
284 	if (cmd == FXP_SCB_COMMAND_CU_RESUME && sc->cu_resume_bug) {
285 		CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_CB_COMMAND_NOP);
286 		fxp_scb_wait(sc);
287 	}
288 	CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, cmd);
289 }
290 
291 static __inline void
292 fxp_dma_wait(struct fxp_softc *sc, volatile u_int16_t *status,
293     bus_dma_tag_t dmat, bus_dmamap_t map)
294 {
295 	int i = 10000;
296 
297 	bus_dmamap_sync(dmat, map, BUS_DMASYNC_POSTREAD);
298 	while (!(le16toh(*status) & FXP_CB_STATUS_C) && --i) {
299 		DELAY(2);
300 		bus_dmamap_sync(dmat, map, BUS_DMASYNC_POSTREAD);
301 	}
302 	if (i == 0)
303 		device_printf(sc->dev, "DMA timeout\n");
304 }
305 
306 /*
307  * Return identification string if this is device is ours.
308  */
309 static int
310 fxp_probe(device_t dev)
311 {
312 	u_int16_t devid;
313 	struct fxp_ident *ident;
314 
315 	if (pci_get_vendor(dev) == FXP_VENDORID_INTEL) {
316 		devid = pci_get_device(dev);
317 		for (ident = fxp_ident_table; ident->name != NULL; ident++) {
318 			if (ident->devid == devid) {
319 				device_set_desc(dev, ident->name);
320 				return (0);
321 			}
322 		}
323 	}
324 	return (ENXIO);
325 }
326 
327 static void
328 fxp_powerstate_d0(device_t dev)
329 {
330 #if __FreeBSD_version >= 430002
331 	u_int32_t iobase, membase, irq;
332 
333 	if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
334 		/* Save important PCI config data. */
335 		iobase = pci_read_config(dev, FXP_PCI_IOBA, 4);
336 		membase = pci_read_config(dev, FXP_PCI_MMBA, 4);
337 		irq = pci_read_config(dev, PCIR_INTLINE, 4);
338 
339 		/* Reset the power state. */
340 		device_printf(dev, "chip is in D%d power mode "
341 		    "-- setting to D0\n", pci_get_powerstate(dev));
342 
343 		pci_set_powerstate(dev, PCI_POWERSTATE_D0);
344 
345 		/* Restore PCI config data. */
346 		pci_write_config(dev, FXP_PCI_IOBA, iobase, 4);
347 		pci_write_config(dev, FXP_PCI_MMBA, membase, 4);
348 		pci_write_config(dev, PCIR_INTLINE, irq, 4);
349 	}
350 #endif
351 }
352 
353 static void
354 fxp_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
355 {
356 	u_int32_t *addr;
357 
358 	if (error)
359 		return;
360 
361 	KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
362 	addr = arg;
363 	*addr = segs->ds_addr;
364 }
365 
366 static int
367 fxp_attach(device_t dev)
368 {
369 	int error = 0;
370 	struct fxp_softc *sc = device_get_softc(dev);
371 	struct ifnet *ifp;
372 	struct fxp_rx *rxp;
373 	u_int32_t val;
374 	u_int16_t data, myea[ETHER_ADDR_LEN / 2];
375 	int i, rid, m1, m2, prefer_iomap, maxtxseg;
376 	int s;
377 
378 	bzero(sc, sizeof(*sc));
379 	sc->dev = dev;
380 	callout_handle_init(&sc->stat_ch);
381 	sysctl_ctx_init(&sc->sysctl_ctx);
382 	mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
383 	    MTX_DEF | MTX_RECURSE);
384 
385 	s = splimp();
386 
387 	/*
388 	 * Enable bus mastering. Enable memory space too, in case
389 	 * BIOS/Prom forgot about it.
390 	 */
391 	val = pci_read_config(dev, PCIR_COMMAND, 2);
392 	val |= (PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN);
393 	pci_write_config(dev, PCIR_COMMAND, val, 2);
394 	val = pci_read_config(dev, PCIR_COMMAND, 2);
395 
396 	fxp_powerstate_d0(dev);
397 
398 	/*
399 	 * Figure out which we should try first - memory mapping or i/o mapping?
400 	 * We default to memory mapping. Then we accept an override from the
401 	 * command line. Then we check to see which one is enabled.
402 	 */
403 	m1 = PCIM_CMD_MEMEN;
404 	m2 = PCIM_CMD_PORTEN;
405 	prefer_iomap = 0;
406 	if (resource_int_value(device_get_name(dev), device_get_unit(dev),
407 	    "prefer_iomap", &prefer_iomap) == 0 && prefer_iomap != 0) {
408 		m1 = PCIM_CMD_PORTEN;
409 		m2 = PCIM_CMD_MEMEN;
410 	}
411 
412 	if (val & m1) {
413 		sc->rtp =
414 		    (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
415 		sc->rgd = (m1 == PCIM_CMD_MEMEN)? FXP_PCI_MMBA : FXP_PCI_IOBA;
416 		sc->mem = bus_alloc_resource(dev, sc->rtp, &sc->rgd,
417 	                                     0, ~0, 1, RF_ACTIVE);
418 	}
419 	if (sc->mem == NULL && (val & m2)) {
420 		sc->rtp =
421 		    (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
422 		sc->rgd = (m2 == PCIM_CMD_MEMEN)? FXP_PCI_MMBA : FXP_PCI_IOBA;
423 		sc->mem = bus_alloc_resource(dev, sc->rtp, &sc->rgd,
424                                             0, ~0, 1, RF_ACTIVE);
425 	}
426 
427 	if (!sc->mem) {
428 		device_printf(dev, "could not map device registers\n");
429 		error = ENXIO;
430 		goto fail;
431         }
432 	if (bootverbose) {
433 		device_printf(dev, "using %s space register mapping\n",
434 		   sc->rtp == SYS_RES_MEMORY? "memory" : "I/O");
435 	}
436 
437 	sc->sc_st = rman_get_bustag(sc->mem);
438 	sc->sc_sh = rman_get_bushandle(sc->mem);
439 
440 	/*
441 	 * Allocate our interrupt.
442 	 */
443 	rid = 0;
444 	sc->irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
445 				 RF_SHAREABLE | RF_ACTIVE);
446 	if (sc->irq == NULL) {
447 		device_printf(dev, "could not map interrupt\n");
448 		error = ENXIO;
449 		goto fail;
450 	}
451 
452 	/*
453 	 * Reset to a stable state.
454 	 */
455 	CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET);
456 	DELAY(10);
457 
458 	/*
459 	 * Find out how large of an SEEPROM we have.
460 	 */
461 	fxp_autosize_eeprom(sc);
462 
463 	/*
464 	 * Determine whether we must use the 503 serial interface.
465 	 */
466 	fxp_read_eeprom(sc, &data, 6, 1);
467 	if ((data & FXP_PHY_DEVICE_MASK) != 0 &&
468 	    (data & FXP_PHY_SERIAL_ONLY))
469 		sc->flags |= FXP_FLAG_SERIAL_MEDIA;
470 
471 	/*
472 	 * Create the sysctl tree
473 	 */
474 	sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
475 	    SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
476 	    device_get_nameunit(dev), CTLFLAG_RD, 0, "");
477 	if (sc->sysctl_tree == NULL)
478 		goto fail;
479 	SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
480 	    OID_AUTO, "int_delay", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_PRISON,
481 	    &sc->tunable_int_delay, 0, sysctl_hw_fxp_int_delay, "I",
482 	    "FXP driver receive interrupt microcode bundling delay");
483 	SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
484 	    OID_AUTO, "bundle_max", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_PRISON,
485 	    &sc->tunable_bundle_max, 0, sysctl_hw_fxp_bundle_max, "I",
486 	    "FXP driver receive interrupt microcode bundle size limit");
487 
488 	/*
489 	 * Pull in device tunables.
490 	 */
491 	sc->tunable_int_delay = TUNABLE_INT_DELAY;
492 	sc->tunable_bundle_max = TUNABLE_BUNDLE_MAX;
493 	(void) resource_int_value(device_get_name(dev), device_get_unit(dev),
494 	    "int_delay", &sc->tunable_int_delay);
495 	(void) resource_int_value(device_get_name(dev), device_get_unit(dev),
496 	    "bundle_max", &sc->tunable_bundle_max);
497 
498 	/*
499 	 * Find out the chip revision; lump all 82557 revs together.
500 	 */
501 	fxp_read_eeprom(sc, &data, 5, 1);
502 	if ((data >> 8) == 1)
503 		sc->revision = FXP_REV_82557;
504 	else
505 		sc->revision = pci_get_revid(dev);
506 
507 	/*
508 	 * Enable workarounds for certain chip revision deficiencies.
509 	 *
510 	 * Systems based on the ICH2/ICH2-M chip from Intel, and possibly
511 	 * some systems based a normal 82559 design, have a defect where
512 	 * the chip can cause a PCI protocol violation if it receives
513 	 * a CU_RESUME command when it is entering the IDLE state.  The
514 	 * workaround is to disable Dynamic Standby Mode, so the chip never
515 	 * deasserts CLKRUN#, and always remains in an active state.
516 	 *
517 	 * See Intel 82801BA/82801BAM Specification Update, Errata #30.
518 	 */
519 	i = pci_get_device(dev);
520 	if (i == 0x2449 || (i > 0x1030 && i < 0x1039) ||
521 	    sc->revision >= FXP_REV_82559_A0) {
522 		fxp_read_eeprom(sc, &data, 10, 1);
523 		if (data & 0x02) {			/* STB enable */
524 			u_int16_t cksum;
525 			int i;
526 
527 			device_printf(dev,
528 			    "Disabling dynamic standby mode in EEPROM\n");
529 			data &= ~0x02;
530 			fxp_write_eeprom(sc, &data, 10, 1);
531 			device_printf(dev, "New EEPROM ID: 0x%x\n", data);
532 			cksum = 0;
533 			for (i = 0; i < (1 << sc->eeprom_size) - 1; i++) {
534 				fxp_read_eeprom(sc, &data, i, 1);
535 				cksum += data;
536 			}
537 			i = (1 << sc->eeprom_size) - 1;
538 			cksum = 0xBABA - cksum;
539 			fxp_read_eeprom(sc, &data, i, 1);
540 			fxp_write_eeprom(sc, &cksum, i, 1);
541 			device_printf(dev,
542 			    "EEPROM checksum @ 0x%x: 0x%x -> 0x%x\n",
543 			    i, data, cksum);
544 #if 1
545 			/*
546 			 * If the user elects to continue, try the software
547 			 * workaround, as it is better than nothing.
548 			 */
549 			sc->flags |= FXP_FLAG_CU_RESUME_BUG;
550 #endif
551 		}
552 	}
553 
554 	/*
555 	 * If we are not a 82557 chip, we can enable extended features.
556 	 */
557 	if (sc->revision != FXP_REV_82557) {
558 		/*
559 		 * If MWI is enabled in the PCI configuration, and there
560 		 * is a valid cacheline size (8 or 16 dwords), then tell
561 		 * the board to turn on MWI.
562 		 */
563 		if (val & PCIM_CMD_MWRICEN &&
564 		    pci_read_config(dev, PCIR_CACHELNSZ, 1) != 0)
565 			sc->flags |= FXP_FLAG_MWI_ENABLE;
566 
567 		/* turn on the extended TxCB feature */
568 		sc->flags |= FXP_FLAG_EXT_TXCB;
569 
570 		/* enable reception of long frames for VLAN */
571 		sc->flags |= FXP_FLAG_LONG_PKT_EN;
572 	}
573 
574 	/*
575 	 * Enable use of extended RFDs and TCBs for 82550
576 	 * and later chips. Note: we need extended TXCB support
577 	 * too, but that's already enabled by the code above.
578 	 * Be careful to do this only on the right devices.
579 	 */
580 
581 	if (sc->revision == FXP_REV_82550 || sc->revision == FXP_REV_82550_C) {
582 		sc->rfa_size = sizeof (struct fxp_rfa);
583 		sc->tx_cmd = FXP_CB_COMMAND_IPCBXMIT;
584 		sc->flags |= FXP_FLAG_EXT_RFA;
585 	} else {
586 		sc->rfa_size = sizeof (struct fxp_rfa) - FXP_RFAX_LEN;
587 		sc->tx_cmd = FXP_CB_COMMAND_XMIT;
588 	}
589 
590 	/*
591 	 * Allocate DMA tags and DMA safe memory.
592 	 */
593 	maxtxseg = sc->flags & FXP_FLAG_EXT_RFA ? FXP_NTXSEG - 1 : FXP_NTXSEG;
594 	error = bus_dma_tag_create(NULL, 2, 0, BUS_SPACE_MAXADDR_32BIT,
595 	    BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * maxtxseg,
596 	    maxtxseg, MCLBYTES, 0, &sc->fxp_mtag);
597 	if (error) {
598 		device_printf(dev, "could not allocate dma tag\n");
599 		goto fail;
600 	}
601 
602 	error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT,
603 	    BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct fxp_stats), 1,
604 	    sizeof(struct fxp_stats), 0, &sc->fxp_stag);
605 	if (error) {
606 		device_printf(dev, "could not allocate dma tag\n");
607 		goto fail;
608 	}
609 
610 	error = bus_dmamem_alloc(sc->fxp_stag, (void **)&sc->fxp_stats,
611 	    BUS_DMA_NOWAIT, &sc->fxp_smap);
612 	if (error)
613 		goto failmem;
614 	error = bus_dmamap_load(sc->fxp_stag, sc->fxp_smap, sc->fxp_stats,
615 	    sizeof(struct fxp_stats), fxp_dma_map_addr, &sc->stats_addr, 0);
616 	if (error) {
617 		device_printf(dev, "could not map the stats buffer\n");
618 		goto fail;
619 	}
620 	bzero(sc->fxp_stats, sizeof(struct fxp_stats));
621 
622 	error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT,
623 	    BUS_SPACE_MAXADDR, NULL, NULL, FXP_TXCB_SZ, 1,
624 	    FXP_TXCB_SZ, 0, &sc->cbl_tag);
625 	if (error) {
626 		device_printf(dev, "could not allocate dma tag\n");
627 		goto fail;
628 	}
629 
630 	error = bus_dmamem_alloc(sc->cbl_tag, (void **)&sc->fxp_desc.cbl_list,
631 	    BUS_DMA_NOWAIT, &sc->cbl_map);
632 	if (error)
633 		goto failmem;
634 	bzero(sc->fxp_desc.cbl_list, FXP_TXCB_SZ);
635 
636 	error = bus_dmamap_load(sc->cbl_tag, sc->cbl_map,
637 	    sc->fxp_desc.cbl_list, FXP_TXCB_SZ, fxp_dma_map_addr,
638 	    &sc->fxp_desc.cbl_addr, 0);
639 	if (error) {
640 		device_printf(dev, "could not map DMA memory\n");
641 		goto fail;
642 	}
643 
644 	error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT,
645 	    BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct fxp_cb_mcs), 1,
646 	    sizeof(struct fxp_cb_mcs), 0, &sc->mcs_tag);
647 	if (error) {
648 		device_printf(dev, "could not allocate dma tag\n");
649 		goto fail;
650 	}
651 
652 	error = bus_dmamem_alloc(sc->mcs_tag, (void **)&sc->mcsp,
653 	    BUS_DMA_NOWAIT, &sc->mcs_map);
654 	if (error)
655 		goto failmem;
656 	error = bus_dmamap_load(sc->mcs_tag, sc->mcs_map, sc->mcsp,
657 	    sizeof(struct fxp_cb_mcs), fxp_dma_map_addr, &sc->mcs_addr, 0);
658 	if (error) {
659 		device_printf(dev, "can't map the multicast setup command\n");
660 		goto fail;
661 	}
662 
663 	/*
664 	 * Pre-allocate the TX DMA maps.
665 	 */
666 	for (i = 0; i < FXP_NTXCB; i++) {
667 		error = bus_dmamap_create(sc->fxp_mtag, 0,
668 		    &sc->fxp_desc.tx_list[i].tx_map);
669 		if (error) {
670 			device_printf(dev, "can't create DMA map for TX\n");
671 			goto fail;
672 		}
673 	}
674 	error = bus_dmamap_create(sc->fxp_mtag, 0, &sc->spare_map);
675 	if (error) {
676 		device_printf(dev, "can't create spare DMA map\n");
677 		goto fail;
678 	}
679 
680 	/*
681 	 * Pre-allocate our receive buffers.
682 	 */
683 	sc->fxp_desc.rx_head = sc->fxp_desc.rx_tail = NULL;
684 	for (i = 0; i < FXP_NRFABUFS; i++) {
685 		rxp = &sc->fxp_desc.rx_list[i];
686 		error = bus_dmamap_create(sc->fxp_mtag, 0, &rxp->rx_map);
687 		if (error) {
688 			device_printf(dev, "can't create DMA map for RX\n");
689 			goto fail;
690 		}
691 		if (fxp_add_rfabuf(sc, rxp) != 0)
692 			goto failmem;
693 	}
694 
695 	/*
696 	 * Read MAC address.
697 	 */
698 	fxp_read_eeprom(sc, myea, 0, 3);
699 	sc->arpcom.ac_enaddr[0] = myea[0] & 0xff;
700 	sc->arpcom.ac_enaddr[1] = myea[0] >> 8;
701 	sc->arpcom.ac_enaddr[2] = myea[1] & 0xff;
702 	sc->arpcom.ac_enaddr[3] = myea[1] >> 8;
703 	sc->arpcom.ac_enaddr[4] = myea[2] & 0xff;
704 	sc->arpcom.ac_enaddr[5] = myea[2] >> 8;
705 	device_printf(dev, "Ethernet address %6D%s\n",
706 	    sc->arpcom.ac_enaddr, ":",
707 	    sc->flags & FXP_FLAG_SERIAL_MEDIA ? ", 10Mbps" : "");
708 	if (bootverbose) {
709 		device_printf(dev, "PCI IDs: %04x %04x %04x %04x %04x\n",
710 		    pci_get_vendor(dev), pci_get_device(dev),
711 		    pci_get_subvendor(dev), pci_get_subdevice(dev),
712 		    pci_get_revid(dev));
713 		fxp_read_eeprom(sc, &data, 10, 1);
714 		device_printf(dev, "Dynamic Standby mode is %s\n",
715 		    data & 0x02 ? "enabled" : "disabled");
716 	}
717 
718 	/*
719 	 * If this is only a 10Mbps device, then there is no MII, and
720 	 * the PHY will use a serial interface instead.
721 	 *
722 	 * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter
723 	 * doesn't have a programming interface of any sort.  The
724 	 * media is sensed automatically based on how the link partner
725 	 * is configured.  This is, in essence, manual configuration.
726 	 */
727 	if (sc->flags & FXP_FLAG_SERIAL_MEDIA) {
728 		ifmedia_init(&sc->sc_media, 0, fxp_serial_ifmedia_upd,
729 		    fxp_serial_ifmedia_sts);
730 		ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
731 		ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
732 	} else {
733 		if (mii_phy_probe(dev, &sc->miibus, fxp_ifmedia_upd,
734 		    fxp_ifmedia_sts)) {
735 	                device_printf(dev, "MII without any PHY!\n");
736 			error = ENXIO;
737 			goto fail;
738 		}
739 	}
740 
741 	ifp = &sc->arpcom.ac_if;
742 	ifp->if_unit = device_get_unit(dev);
743 	ifp->if_name = "fxp";
744 	ifp->if_output = ether_output;
745 	ifp->if_baudrate = 100000000;
746 	ifp->if_init = fxp_init;
747 	ifp->if_softc = sc;
748 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
749 	ifp->if_ioctl = fxp_ioctl;
750 	ifp->if_start = fxp_start;
751 	ifp->if_watchdog = fxp_watchdog;
752 
753 	/* Enable checksum offload for 82550 or better chips */
754 
755 	if (sc->flags & FXP_FLAG_EXT_RFA) {
756 		ifp->if_hwassist = FXP_CSUM_FEATURES;
757 		ifp->if_capabilities = IFCAP_HWCSUM;
758 		ifp->if_capenable = ifp->if_capabilities;
759 	}
760 
761 	/*
762 	 * Tell the upper layer(s) we support long frames.
763 	 */
764 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
765 	ifp->if_capabilities |= IFCAP_VLAN_MTU;
766 
767 	/*
768 	 * Let the system queue as many packets as we have available
769 	 * TX descriptors.
770 	 */
771 	ifp->if_snd.ifq_maxlen = FXP_NTXCB - 1;
772 
773 	/*
774 	 * Attach the interface.
775 	 */
776 	ether_ifattach(ifp, sc->arpcom.ac_enaddr);
777 
778 	error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET,
779 	    fxp_intr, sc, &sc->ih);
780 	if (error) {
781 		device_printf(dev, "could not setup irq\n");
782 		goto fail;
783 	}
784 
785 	splx(s);
786 	return (0);
787 
788 failmem:
789 	device_printf(dev, "Failed to malloc memory\n");
790 	error = ENOMEM;
791 fail:
792 	splx(s);
793 	fxp_release(sc);
794 	return (error);
795 }
796 
797 /*
798  * release all resources
799  */
800 static void
801 fxp_release(struct fxp_softc *sc)
802 {
803 	struct fxp_rx *rxp;
804 	struct fxp_tx *txp;
805 	int i;
806 
807 	for (i = 0; i < FXP_NRFABUFS; i++) {
808 		rxp = &sc->fxp_desc.rx_list[i];
809 		if (rxp->rx_mbuf != NULL) {
810 			bus_dmamap_sync(sc->fxp_mtag, rxp->rx_map,
811 			    BUS_DMASYNC_POSTREAD);
812 			bus_dmamap_unload(sc->fxp_mtag, rxp->rx_map);
813 			m_freem(rxp->rx_mbuf);
814 		}
815 		bus_dmamap_destroy(sc->fxp_mtag, rxp->rx_map);
816 	}
817 	bus_dmamap_destroy(sc->fxp_mtag, sc->spare_map);
818 
819 	for (i = 0; i < FXP_NTXCB; i++) {
820 		txp = &sc->fxp_desc.tx_list[i];
821 		if (txp->tx_mbuf != NULL) {
822 			bus_dmamap_sync(sc->fxp_mtag, txp->tx_map,
823 			    BUS_DMASYNC_POSTWRITE);
824 			bus_dmamap_unload(sc->fxp_mtag, txp->tx_map);
825 			m_freem(txp->tx_mbuf);
826 		}
827 		bus_dmamap_destroy(sc->fxp_mtag, txp->tx_map);
828 	}
829 
830 	bus_generic_detach(sc->dev);
831 	if (sc->miibus)
832 		device_delete_child(sc->dev, sc->miibus);
833 
834 	if (sc->fxp_desc.cbl_list) {
835 		bus_dmamap_unload(sc->cbl_tag, sc->cbl_map);
836 		bus_dmamem_free(sc->cbl_tag, sc->fxp_desc.cbl_list,
837 		    sc->cbl_map);
838 	}
839 	if (sc->fxp_stats) {
840 		bus_dmamap_unload(sc->fxp_stag, sc->fxp_smap);
841 		bus_dmamem_free(sc->fxp_stag, sc->fxp_stats, sc->fxp_smap);
842 	}
843 	if (sc->mcsp) {
844 		bus_dmamap_unload(sc->mcs_tag, sc->mcs_map);
845 		bus_dmamem_free(sc->mcs_tag, sc->mcsp, sc->mcs_map);
846 	}
847 	if (sc->ih)
848 		bus_teardown_intr(sc->dev, sc->irq, sc->ih);
849 	if (sc->irq)
850 		bus_release_resource(sc->dev, SYS_RES_IRQ, 0, sc->irq);
851 	if (sc->mem)
852 		bus_release_resource(sc->dev, sc->rtp, sc->rgd, sc->mem);
853 	if (sc->fxp_mtag)
854 		bus_dma_tag_destroy(sc->fxp_mtag);
855 	if (sc->fxp_stag)
856 		bus_dma_tag_destroy(sc->fxp_stag);
857 	if (sc->cbl_tag)
858 		bus_dma_tag_destroy(sc->cbl_tag);
859 	if (sc->mcs_tag)
860 		bus_dma_tag_destroy(sc->mcs_tag);
861 
862         sysctl_ctx_free(&sc->sysctl_ctx);
863 
864 	mtx_destroy(&sc->sc_mtx);
865 }
866 
867 /*
868  * Detach interface.
869  */
870 static int
871 fxp_detach(device_t dev)
872 {
873 	struct fxp_softc *sc = device_get_softc(dev);
874 	int s;
875 
876 	/* disable interrupts */
877 	CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE);
878 
879 	s = splimp();
880 
881 	/*
882 	 * Stop DMA and drop transmit queue.
883 	 */
884 	fxp_stop(sc);
885 
886 	/*
887 	 * Close down routes etc.
888 	 */
889 	ether_ifdetach(&sc->arpcom.ac_if);
890 
891 	/*
892 	 * Free all media structures.
893 	 */
894 	ifmedia_removeall(&sc->sc_media);
895 
896 	splx(s);
897 
898 	/* Release our allocated resources. */
899 	fxp_release(sc);
900 
901 	return (0);
902 }
903 
904 /*
905  * Device shutdown routine. Called at system shutdown after sync. The
906  * main purpose of this routine is to shut off receiver DMA so that
907  * kernel memory doesn't get clobbered during warmboot.
908  */
909 static int
910 fxp_shutdown(device_t dev)
911 {
912 	/*
913 	 * Make sure that DMA is disabled prior to reboot. Not doing
914 	 * do could allow DMA to corrupt kernel memory during the
915 	 * reboot before the driver initializes.
916 	 */
917 	fxp_stop((struct fxp_softc *) device_get_softc(dev));
918 	return (0);
919 }
920 
921 /*
922  * Device suspend routine.  Stop the interface and save some PCI
923  * settings in case the BIOS doesn't restore them properly on
924  * resume.
925  */
926 static int
927 fxp_suspend(device_t dev)
928 {
929 	struct fxp_softc *sc = device_get_softc(dev);
930 	int i, s;
931 
932 	s = splimp();
933 
934 	fxp_stop(sc);
935 
936 	for (i = 0; i < 5; i++)
937 		sc->saved_maps[i] = pci_read_config(dev, PCIR_MAPS + i * 4, 4);
938 	sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4);
939 	sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1);
940 	sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
941 	sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
942 
943 	sc->suspended = 1;
944 
945 	splx(s);
946 	return (0);
947 }
948 
949 /*
950  * Device resume routine.  Restore some PCI settings in case the BIOS
951  * doesn't, re-enable busmastering, and restart the interface if
952  * appropriate.
953  */
954 static int
955 fxp_resume(device_t dev)
956 {
957 	struct fxp_softc *sc = device_get_softc(dev);
958 	struct ifnet *ifp = &sc->sc_if;
959 	u_int16_t pci_command;
960 	int i, s;
961 
962 	s = splimp();
963 
964 	fxp_powerstate_d0(dev);
965 
966 	/* better way to do this? */
967 	for (i = 0; i < 5; i++)
968 		pci_write_config(dev, PCIR_MAPS + i * 4, sc->saved_maps[i], 4);
969 	pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4);
970 	pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1);
971 	pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1);
972 	pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1);
973 
974 	/* reenable busmastering */
975 	pci_command = pci_read_config(dev, PCIR_COMMAND, 2);
976 	pci_command |= (PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN);
977 	pci_write_config(dev, PCIR_COMMAND, pci_command, 2);
978 
979 	CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET);
980 	DELAY(10);
981 
982 	/* reinitialize interface if necessary */
983 	if (ifp->if_flags & IFF_UP)
984 		fxp_init(sc);
985 
986 	sc->suspended = 0;
987 
988 	splx(s);
989 	return (0);
990 }
991 
992 static void
993 fxp_eeprom_shiftin(struct fxp_softc *sc, int data, int length)
994 {
995 	u_int16_t reg;
996 	int x;
997 
998 	/*
999 	 * Shift in data.
1000 	 */
1001 	for (x = 1 << (length - 1); x; x >>= 1) {
1002 		if (data & x)
1003 			reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI;
1004 		else
1005 			reg = FXP_EEPROM_EECS;
1006 		CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
1007 		DELAY(1);
1008 		CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK);
1009 		DELAY(1);
1010 		CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
1011 		DELAY(1);
1012 	}
1013 }
1014 
1015 /*
1016  * Read from the serial EEPROM. Basically, you manually shift in
1017  * the read opcode (one bit at a time) and then shift in the address,
1018  * and then you shift out the data (all of this one bit at a time).
1019  * The word size is 16 bits, so you have to provide the address for
1020  * every 16 bits of data.
1021  */
1022 static u_int16_t
1023 fxp_eeprom_getword(struct fxp_softc *sc, int offset, int autosize)
1024 {
1025 	u_int16_t reg, data;
1026 	int x;
1027 
1028 	CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
1029 	/*
1030 	 * Shift in read opcode.
1031 	 */
1032 	fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_READ, 3);
1033 	/*
1034 	 * Shift in address.
1035 	 */
1036 	data = 0;
1037 	for (x = 1 << (sc->eeprom_size - 1); x; x >>= 1) {
1038 		if (offset & x)
1039 			reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI;
1040 		else
1041 			reg = FXP_EEPROM_EECS;
1042 		CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
1043 		DELAY(1);
1044 		CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK);
1045 		DELAY(1);
1046 		CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
1047 		DELAY(1);
1048 		reg = CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO;
1049 		data++;
1050 		if (autosize && reg == 0) {
1051 			sc->eeprom_size = data;
1052 			break;
1053 		}
1054 	}
1055 	/*
1056 	 * Shift out data.
1057 	 */
1058 	data = 0;
1059 	reg = FXP_EEPROM_EECS;
1060 	for (x = 1 << 15; x; x >>= 1) {
1061 		CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK);
1062 		DELAY(1);
1063 		if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO)
1064 			data |= x;
1065 		CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
1066 		DELAY(1);
1067 	}
1068 	CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
1069 	DELAY(1);
1070 
1071 	return (data);
1072 }
1073 
1074 static void
1075 fxp_eeprom_putword(struct fxp_softc *sc, int offset, u_int16_t data)
1076 {
1077 	int i;
1078 
1079 	/*
1080 	 * Erase/write enable.
1081 	 */
1082 	CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
1083 	fxp_eeprom_shiftin(sc, 0x4, 3);
1084 	fxp_eeprom_shiftin(sc, 0x03 << (sc->eeprom_size - 2), sc->eeprom_size);
1085 	CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
1086 	DELAY(1);
1087 	/*
1088 	 * Shift in write opcode, address, data.
1089 	 */
1090 	CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
1091 	fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_WRITE, 3);
1092 	fxp_eeprom_shiftin(sc, offset, sc->eeprom_size);
1093 	fxp_eeprom_shiftin(sc, data, 16);
1094 	CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
1095 	DELAY(1);
1096 	/*
1097 	 * Wait for EEPROM to finish up.
1098 	 */
1099 	CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
1100 	DELAY(1);
1101 	for (i = 0; i < 1000; i++) {
1102 		if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO)
1103 			break;
1104 		DELAY(50);
1105 	}
1106 	CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
1107 	DELAY(1);
1108 	/*
1109 	 * Erase/write disable.
1110 	 */
1111 	CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
1112 	fxp_eeprom_shiftin(sc, 0x4, 3);
1113 	fxp_eeprom_shiftin(sc, 0, sc->eeprom_size);
1114 	CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
1115 	DELAY(1);
1116 }
1117 
1118 /*
1119  * From NetBSD:
1120  *
1121  * Figure out EEPROM size.
1122  *
1123  * 559's can have either 64-word or 256-word EEPROMs, the 558
1124  * datasheet only talks about 64-word EEPROMs, and the 557 datasheet
1125  * talks about the existance of 16 to 256 word EEPROMs.
1126  *
1127  * The only known sizes are 64 and 256, where the 256 version is used
1128  * by CardBus cards to store CIS information.
1129  *
1130  * The address is shifted in msb-to-lsb, and after the last
1131  * address-bit the EEPROM is supposed to output a `dummy zero' bit,
1132  * after which follows the actual data. We try to detect this zero, by
1133  * probing the data-out bit in the EEPROM control register just after
1134  * having shifted in a bit. If the bit is zero, we assume we've
1135  * shifted enough address bits. The data-out should be tri-state,
1136  * before this, which should translate to a logical one.
1137  */
1138 static void
1139 fxp_autosize_eeprom(struct fxp_softc *sc)
1140 {
1141 
1142 	/* guess maximum size of 256 words */
1143 	sc->eeprom_size = 8;
1144 
1145 	/* autosize */
1146 	(void) fxp_eeprom_getword(sc, 0, 1);
1147 }
1148 
1149 static void
1150 fxp_read_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words)
1151 {
1152 	int i;
1153 
1154 	for (i = 0; i < words; i++)
1155 		data[i] = fxp_eeprom_getword(sc, offset + i, 0);
1156 }
1157 
1158 static void
1159 fxp_write_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words)
1160 {
1161 	int i;
1162 
1163 	for (i = 0; i < words; i++)
1164 		fxp_eeprom_putword(sc, offset + i, data[i]);
1165 }
1166 
1167 static void
1168 fxp_dma_map_txbuf(void *arg, bus_dma_segment_t *segs, int nseg,
1169     bus_size_t mapsize, int error)
1170 {
1171 	struct fxp_softc *sc;
1172 	struct fxp_cb_tx *txp;
1173 	int i;
1174 
1175 	if (error)
1176 		return;
1177 
1178 	KASSERT(nseg <= FXP_NTXSEG, ("too many DMA segments"));
1179 
1180 	sc = arg;
1181 	txp = sc->fxp_desc.tx_last->tx_next->tx_cb;
1182 	for (i = 0; i < nseg; i++) {
1183 		KASSERT(segs[i].ds_len <= MCLBYTES, ("segment size too large"));
1184 		/*
1185 		 * If this is an 82550/82551, then we're using extended
1186 		 * TxCBs _and_ we're using checksum offload. This means
1187 		 * that the TxCB is really an IPCB. One major difference
1188 		 * between the two is that with plain extended TxCBs,
1189 		 * the bottom half of the TxCB contains two entries from
1190 		 * the TBD array, whereas IPCBs contain just one entry:
1191 		 * one entry (8 bytes) has been sacrificed for the TCP/IP
1192 		 * checksum offload control bits. So to make things work
1193 		 * right, we have to start filling in the TBD array
1194 		 * starting from a different place depending on whether
1195 		 * the chip is an 82550/82551 or not.
1196 		 */
1197 		if (sc->flags & FXP_FLAG_EXT_RFA) {
1198 			txp->tbd[i + 1].tb_addr = htole32(segs[i].ds_addr);
1199 			txp->tbd[i + 1].tb_size = htole32(segs[i].ds_len);
1200 		} else {
1201 			txp->tbd[i].tb_addr = htole32(segs[i].ds_addr);
1202 			txp->tbd[i].tb_size = htole32(segs[i].ds_len);
1203 		}
1204 	}
1205 	txp->tbd_number = nseg;
1206 }
1207 
1208 /*
1209  * Start packet transmission on the interface.
1210  */
1211 static void
1212 fxp_start(struct ifnet *ifp)
1213 {
1214 	struct fxp_softc *sc = ifp->if_softc;
1215 	struct fxp_tx *txp;
1216 	struct mbuf *mb_head;
1217 	int error;
1218 
1219 	/*
1220 	 * See if we need to suspend xmit until the multicast filter
1221 	 * has been reprogrammed (which can only be done at the head
1222 	 * of the command chain).
1223 	 */
1224 	if (sc->need_mcsetup) {
1225 		return;
1226 	}
1227 
1228 	txp = NULL;
1229 
1230 	/*
1231 	 * We're finished if there is nothing more to add to the list or if
1232 	 * we're all filled up with buffers to transmit.
1233 	 * NOTE: One TxCB is reserved to guarantee that fxp_mc_setup() can add
1234 	 *       a NOP command when needed.
1235 	 */
1236 	while (ifp->if_snd.ifq_head != NULL && sc->tx_queued < FXP_NTXCB - 1) {
1237 
1238 		/*
1239 		 * Grab a packet to transmit.
1240 		 */
1241 		IF_DEQUEUE(&ifp->if_snd, mb_head);
1242 
1243 		/*
1244 		 * Get pointer to next available tx desc.
1245 		 */
1246 		txp = sc->fxp_desc.tx_last->tx_next;
1247 
1248 		/*
1249 		 * Deal with TCP/IP checksum offload. Note that
1250 		 * in order for TCP checksum offload to work,
1251 		 * the pseudo header checksum must have already
1252 		 * been computed and stored in the checksum field
1253 		 * in the TCP header. The stack should have
1254 		 * already done this for us.
1255 		 */
1256 
1257 		if (mb_head->m_pkthdr.csum_flags) {
1258 			if (mb_head->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
1259 				txp->tx_cb->ipcb_ip_activation_high =
1260 				    FXP_IPCB_HARDWAREPARSING_ENABLE;
1261 				txp->tx_cb->ipcb_ip_schedule =
1262 				    FXP_IPCB_TCPUDP_CHECKSUM_ENABLE;
1263 				if (mb_head->m_pkthdr.csum_flags & CSUM_TCP)
1264 					txp->tx_cb->ipcb_ip_schedule |=
1265 					    FXP_IPCB_TCP_PACKET;
1266 			}
1267 #ifdef FXP_IP_CSUM_WAR
1268 		/*
1269 		 * XXX The 82550 chip appears to have trouble
1270 		 * dealing with IP header checksums in very small
1271 		 * datagrams, namely fragments from 1 to 3 bytes
1272 		 * in size. For example, say you want to transmit
1273 		 * a UDP packet of 1473 bytes. The packet will be
1274 		 * fragmented over two IP datagrams, the latter
1275 		 * containing only one byte of data. The 82550 will
1276 		 * botch the header checksum on the 1-byte fragment.
1277 		 * As long as the datagram contains 4 or more bytes
1278 		 * of data, you're ok.
1279 		 *
1280                  * The following code attempts to work around this
1281 		 * problem: if the datagram is less than 38 bytes
1282 		 * in size (14 bytes ether header, 20 bytes IP header,
1283 		 * plus 4 bytes of data), we punt and compute the IP
1284 		 * header checksum by hand. This workaround doesn't
1285 		 * work very well, however, since it can be fooled
1286 		 * by things like VLAN tags and IP options that make
1287 		 * the header sizes/offsets vary.
1288 		 */
1289 
1290 			if (mb_head->m_pkthdr.csum_flags & CSUM_IP) {
1291 				if (mb_head->m_pkthdr.len < 38) {
1292 					struct ip *ip;
1293 					mb_head->m_data += ETHER_HDR_LEN;
1294 					ip = mtod(mb_head, struct ip *);
1295 					ip->ip_sum = in_cksum(mb_head,
1296 					    ip->ip_hl << 2);
1297 					mb_head->m_data -= ETHER_HDR_LEN;
1298 				} else {
1299 					txp->tx_cb->ipcb_ip_activation_high =
1300 					    FXP_IPCB_HARDWAREPARSING_ENABLE;
1301 					txp->tx_cb->ipcb_ip_schedule |=
1302 					    FXP_IPCB_IP_CHECKSUM_ENABLE;
1303 				}
1304 			}
1305 #endif
1306 		}
1307 
1308 		/*
1309 		 * Go through each of the mbufs in the chain and initialize
1310 		 * the transmit buffer descriptors with the physical address
1311 		 * and size of the mbuf.
1312 		 */
1313 		error = bus_dmamap_load_mbuf(sc->fxp_mtag, txp->tx_map,
1314 		    mb_head, fxp_dma_map_txbuf, sc, 0);
1315 
1316 		if (error && error != EFBIG) {
1317 			device_printf(sc->dev, "can't map mbuf (error %d)\n",
1318 			    error);
1319 			m_freem(mb_head);
1320 			break;
1321 		}
1322 
1323 		if (error) {
1324 			struct mbuf *mn;
1325 
1326 			/*
1327 			 * We ran out of segments. We have to recopy this
1328 			 * mbuf chain first. Bail out if we can't get the
1329 			 * new buffers.
1330 			 */
1331 			MGETHDR(mn, M_DONTWAIT, MT_DATA);
1332 			if (mn == NULL) {
1333 				m_freem(mb_head);
1334 				break;
1335 			}
1336 			if (mb_head->m_pkthdr.len > MHLEN) {
1337 				MCLGET(mn, M_DONTWAIT);
1338 				if ((mn->m_flags & M_EXT) == 0) {
1339 					m_freem(mn);
1340 					m_freem(mb_head);
1341 					break;
1342 				}
1343 			}
1344 			m_copydata(mb_head, 0, mb_head->m_pkthdr.len,
1345 			    mtod(mn, caddr_t));
1346 			mn->m_pkthdr.len = mn->m_len = mb_head->m_pkthdr.len;
1347 			m_freem(mb_head);
1348 			mb_head = mn;
1349 			error = bus_dmamap_load_mbuf(sc->fxp_mtag, txp->tx_map,
1350 			    mb_head, fxp_dma_map_txbuf, sc, 0);
1351 			if (error) {
1352 				device_printf(sc->dev,
1353 				    "can't map mbuf (error %d)\n", error);
1354 				m_freem(mb_head);
1355 				break;
1356 			}
1357 		}
1358 
1359 		bus_dmamap_sync(sc->fxp_mtag, txp->tx_map,
1360 		    BUS_DMASYNC_PREWRITE);
1361 
1362 		txp->tx_mbuf = mb_head;
1363 		txp->tx_cb->cb_status = 0;
1364 		txp->tx_cb->byte_count = 0;
1365 		if (sc->tx_queued != FXP_CXINT_THRESH - 1) {
1366 			txp->tx_cb->cb_command =
1367 			    htole16(sc->tx_cmd | FXP_CB_COMMAND_SF |
1368 			    FXP_CB_COMMAND_S);
1369 		} else {
1370 			txp->tx_cb->cb_command =
1371 			    htole16(sc->tx_cmd | FXP_CB_COMMAND_SF |
1372 			    FXP_CB_COMMAND_S | FXP_CB_COMMAND_I);
1373 			/*
1374 			 * Set a 5 second timer just in case we don't hear
1375 			 * from the card again.
1376 			 */
1377 			ifp->if_timer = 5;
1378 		}
1379 		txp->tx_cb->tx_threshold = tx_threshold;
1380 
1381 		/*
1382 		 * Advance the end of list forward.
1383 		 */
1384 
1385 #ifdef __alpha__
1386 		/*
1387 		 * On platforms which can't access memory in 16-bit
1388 		 * granularities, we must prevent the card from DMA'ing
1389 		 * up the status while we update the command field.
1390 		 * This could cause us to overwrite the completion status.
1391 		 * XXX This is probably bogus and we're _not_ looking
1392 		 * for atomicity here.
1393 		 */
1394 		atomic_clear_16(&sc->fxp_desc.tx_last->tx_cb->cb_command,
1395 		    htole16(FXP_CB_COMMAND_S));
1396 #else
1397 		sc->fxp_desc.tx_last->tx_cb->cb_command &=
1398 		    htole16(~FXP_CB_COMMAND_S);
1399 #endif /*__alpha__*/
1400 		sc->fxp_desc.tx_last = txp;
1401 
1402 		/*
1403 		 * Advance the beginning of the list forward if there are
1404 		 * no other packets queued (when nothing is queued, tx_first
1405 		 * sits on the last TxCB that was sent out).
1406 		 */
1407 		if (sc->tx_queued == 0)
1408 			sc->fxp_desc.tx_first = txp;
1409 
1410 		sc->tx_queued++;
1411 
1412 		/*
1413 		 * Pass packet to bpf if there is a listener.
1414 		 */
1415 		BPF_MTAP(ifp, mb_head);
1416 	}
1417 	bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
1418 
1419 	/*
1420 	 * We're finished. If we added to the list, issue a RESUME to get DMA
1421 	 * going again if suspended.
1422 	 */
1423 	if (txp != NULL) {
1424 		fxp_scb_wait(sc);
1425 		fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_RESUME);
1426 	}
1427 }
1428 
1429 static void fxp_intr_body(struct fxp_softc *sc, u_int8_t statack, int count);
1430 
1431 #ifdef DEVICE_POLLING
1432 static poll_handler_t fxp_poll;
1433 
1434 static void
1435 fxp_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1436 {
1437 	struct fxp_softc *sc = ifp->if_softc;
1438 	u_int8_t statack;
1439 
1440 	if (cmd == POLL_DEREGISTER) {	/* final call, enable interrupts */
1441 		CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, 0);
1442 		return;
1443 	}
1444 	statack = FXP_SCB_STATACK_CXTNO | FXP_SCB_STATACK_CNA |
1445 	    FXP_SCB_STATACK_FR;
1446 	if (cmd == POLL_AND_CHECK_STATUS) {
1447 		u_int8_t tmp;
1448 
1449 		tmp = CSR_READ_1(sc, FXP_CSR_SCB_STATACK);
1450 		if (tmp == 0xff || tmp == 0)
1451 			return; /* nothing to do */
1452 		tmp &= ~statack;
1453 		/* ack what we can */
1454 		if (tmp != 0)
1455 			CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, tmp);
1456 		statack |= tmp;
1457 	}
1458 	fxp_intr_body(sc, statack, count);
1459 }
1460 #endif /* DEVICE_POLLING */
1461 
1462 /*
1463  * Process interface interrupts.
1464  */
1465 static void
1466 fxp_intr(void *xsc)
1467 {
1468 	struct fxp_softc *sc = xsc;
1469 	u_int8_t statack;
1470 
1471 #ifdef DEVICE_POLLING
1472 	struct ifnet *ifp = &sc->sc_if;
1473 
1474 	if (ifp->if_flags & IFF_POLLING)
1475 		return;
1476 	if (ether_poll_register(fxp_poll, ifp)) {
1477 		/* disable interrupts */
1478 		CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE);
1479 		fxp_poll(ifp, 0, 1);
1480 		return;
1481 	}
1482 #endif
1483 
1484 	if (sc->suspended) {
1485 		return;
1486 	}
1487 
1488 	while ((statack = CSR_READ_1(sc, FXP_CSR_SCB_STATACK)) != 0) {
1489 		/*
1490 		 * It should not be possible to have all bits set; the
1491 		 * FXP_SCB_INTR_SWI bit always returns 0 on a read.  If
1492 		 * all bits are set, this may indicate that the card has
1493 		 * been physically ejected, so ignore it.
1494 		 */
1495 		if (statack == 0xff)
1496 			return;
1497 
1498 		/*
1499 		 * First ACK all the interrupts in this pass.
1500 		 */
1501 		CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, statack);
1502 		fxp_intr_body(sc, statack, -1);
1503 	}
1504 }
1505 
1506 static void
1507 fxp_txeof(struct fxp_softc *sc)
1508 {
1509 	struct fxp_tx *txp;
1510 
1511 	bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREREAD);
1512 	for (txp = sc->fxp_desc.tx_first; sc->tx_queued &&
1513 	    (le16toh(txp->tx_cb->cb_status) & FXP_CB_STATUS_C) != 0;
1514 	    txp = txp->tx_next) {
1515 		if (txp->tx_mbuf != NULL) {
1516 			bus_dmamap_sync(sc->fxp_mtag, txp->tx_map,
1517 			    BUS_DMASYNC_POSTWRITE);
1518 			bus_dmamap_unload(sc->fxp_mtag, txp->tx_map);
1519 			m_freem(txp->tx_mbuf);
1520 			txp->tx_mbuf = NULL;
1521 			/* clear this to reset csum offload bits */
1522 			txp->tx_cb->tbd[0].tb_addr = 0;
1523 		}
1524 		sc->tx_queued--;
1525 	}
1526 	sc->fxp_desc.tx_first = txp;
1527 	bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
1528 }
1529 
1530 static void
1531 fxp_intr_body(struct fxp_softc *sc, u_int8_t statack, int count)
1532 {
1533 	struct ifnet *ifp = &sc->sc_if;
1534 	struct mbuf *m;
1535 	struct fxp_rx *rxp;
1536 	struct fxp_rfa *rfa;
1537 	int rnr = (statack & FXP_SCB_STATACK_RNR) ? 1 : 0;
1538 
1539 	if (rnr)
1540 		fxp_rnr++;
1541 #ifdef DEVICE_POLLING
1542 	/* Pick up a deferred RNR condition if `count' ran out last time. */
1543 	if (sc->flags & FXP_FLAG_DEFERRED_RNR) {
1544 		sc->flags &= ~FXP_FLAG_DEFERRED_RNR;
1545 		rnr = 1;
1546 	}
1547 #endif
1548 
1549 	/*
1550 	 * Free any finished transmit mbuf chains.
1551 	 *
1552 	 * Handle the CNA event likt a CXTNO event. It used to
1553 	 * be that this event (control unit not ready) was not
1554 	 * encountered, but it is now with the SMPng modifications.
1555 	 * The exact sequence of events that occur when the interface
1556 	 * is brought up are different now, and if this event
1557 	 * goes unhandled, the configuration/rxfilter setup sequence
1558 	 * can stall for several seconds. The result is that no
1559 	 * packets go out onto the wire for about 5 to 10 seconds
1560 	 * after the interface is ifconfig'ed for the first time.
1561 	 */
1562 	if (statack & (FXP_SCB_STATACK_CXTNO | FXP_SCB_STATACK_CNA)) {
1563 		fxp_txeof(sc);
1564 
1565 		ifp->if_timer = 0;
1566 		if (sc->tx_queued == 0) {
1567 			if (sc->need_mcsetup)
1568 				fxp_mc_setup(sc);
1569 		}
1570 		/*
1571 		 * Try to start more packets transmitting.
1572 		 */
1573 		if (ifp->if_snd.ifq_head != NULL)
1574 			fxp_start(ifp);
1575 	}
1576 
1577 	/*
1578 	 * Just return if nothing happened on the receive side.
1579 	 */
1580 	if (!rnr && (statack & FXP_SCB_STATACK_FR) == 0)
1581 		return;
1582 
1583 	/*
1584 	 * Process receiver interrupts. If a no-resource (RNR)
1585 	 * condition exists, get whatever packets we can and
1586 	 * re-start the receiver.
1587 	 *
1588 	 * When using polling, we do not process the list to completion,
1589 	 * so when we get an RNR interrupt we must defer the restart
1590 	 * until we hit the last buffer with the C bit set.
1591 	 * If we run out of cycles and rfa_headm has the C bit set,
1592 	 * record the pending RNR in the FXP_FLAG_DEFERRED_RNR flag so
1593 	 * that the info will be used in the subsequent polling cycle.
1594 	 */
1595 	for (;;) {
1596 		rxp = sc->fxp_desc.rx_head;
1597 		m = rxp->rx_mbuf;
1598 		rfa = (struct fxp_rfa *)(m->m_ext.ext_buf +
1599 		    RFA_ALIGNMENT_FUDGE);
1600 		bus_dmamap_sync(sc->fxp_mtag, rxp->rx_map,
1601 		    BUS_DMASYNC_POSTREAD);
1602 
1603 #ifdef DEVICE_POLLING /* loop at most count times if count >=0 */
1604 		if (count >= 0 && count-- == 0) {
1605 			if (rnr) {
1606 				/* Defer RNR processing until the next time. */
1607 				sc->flags |= FXP_FLAG_DEFERRED_RNR;
1608 				rnr = 0;
1609 			}
1610 			break;
1611 		}
1612 #endif /* DEVICE_POLLING */
1613 
1614 		if ((le16toh(rfa->rfa_status) & FXP_RFA_STATUS_C) == 0)
1615 			break;
1616 
1617 		/*
1618 		 * Advance head forward.
1619 		 */
1620 		sc->fxp_desc.rx_head = rxp->rx_next;
1621 
1622 		/*
1623 		 * Add a new buffer to the receive chain.
1624 		 * If this fails, the old buffer is recycled
1625 		 * instead.
1626 		 */
1627 		if (fxp_add_rfabuf(sc, rxp) == 0) {
1628 			int total_len;
1629 
1630 			/*
1631 			 * Fetch packet length (the top 2 bits of
1632 			 * actual_size are flags set by the controller
1633 			 * upon completion), and drop the packet in case
1634 			 * of bogus length or CRC errors.
1635 			 */
1636 			total_len = le16toh(rfa->actual_size) & 0x3fff;
1637 			if (total_len < sizeof(struct ether_header) ||
1638 			    total_len > MCLBYTES - RFA_ALIGNMENT_FUDGE -
1639 				sc->rfa_size ||
1640 			    le16toh(rfa->rfa_status) & FXP_RFA_STATUS_CRC) {
1641 				m_freem(m);
1642 				continue;
1643 			}
1644 
1645                         /* Do IP checksum checking. */
1646 			if (le16toh(rfa->rfa_status) & FXP_RFA_STATUS_PARSE) {
1647 				if (rfa->rfax_csum_sts &
1648 				    FXP_RFDX_CS_IP_CSUM_BIT_VALID)
1649 					m->m_pkthdr.csum_flags |=
1650 					    CSUM_IP_CHECKED;
1651 				if (rfa->rfax_csum_sts &
1652 				    FXP_RFDX_CS_IP_CSUM_VALID)
1653 					m->m_pkthdr.csum_flags |=
1654 					    CSUM_IP_VALID;
1655 				if ((rfa->rfax_csum_sts &
1656 				    FXP_RFDX_CS_TCPUDP_CSUM_BIT_VALID) &&
1657 				    (rfa->rfax_csum_sts &
1658 				    FXP_RFDX_CS_TCPUDP_CSUM_VALID)) {
1659 					m->m_pkthdr.csum_flags |=
1660 					    CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
1661 					m->m_pkthdr.csum_data = 0xffff;
1662 				}
1663 			}
1664 
1665 			m->m_pkthdr.len = m->m_len = total_len;
1666 			m->m_pkthdr.rcvif = ifp;
1667 
1668 			(*ifp->if_input)(ifp, m);
1669 		}
1670 	}
1671 	if (rnr) {
1672 		fxp_scb_wait(sc);
1673 		CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL,
1674 		    sc->fxp_desc.rx_head->rx_addr);
1675 		fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START);
1676 	}
1677 }
1678 
1679 /*
1680  * Update packet in/out/collision statistics. The i82557 doesn't
1681  * allow you to access these counters without doing a fairly
1682  * expensive DMA to get _all_ of the statistics it maintains, so
1683  * we do this operation here only once per second. The statistics
1684  * counters in the kernel are updated from the previous dump-stats
1685  * DMA and then a new dump-stats DMA is started. The on-chip
1686  * counters are zeroed when the DMA completes. If we can't start
1687  * the DMA immediately, we don't wait - we just prepare to read
1688  * them again next time.
1689  */
1690 static void
1691 fxp_tick(void *xsc)
1692 {
1693 	struct fxp_softc *sc = xsc;
1694 	struct ifnet *ifp = &sc->sc_if;
1695 	struct fxp_stats *sp = sc->fxp_stats;
1696 	int s;
1697 
1698 	bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap, BUS_DMASYNC_POSTREAD);
1699 	ifp->if_opackets += le32toh(sp->tx_good);
1700 	ifp->if_collisions += le32toh(sp->tx_total_collisions);
1701 	if (sp->rx_good) {
1702 		ifp->if_ipackets += le32toh(sp->rx_good);
1703 		sc->rx_idle_secs = 0;
1704 	} else {
1705 		/*
1706 		 * Receiver's been idle for another second.
1707 		 */
1708 		sc->rx_idle_secs++;
1709 	}
1710 	ifp->if_ierrors +=
1711 	    le32toh(sp->rx_crc_errors) +
1712 	    le32toh(sp->rx_alignment_errors) +
1713 	    le32toh(sp->rx_rnr_errors) +
1714 	    le32toh(sp->rx_overrun_errors);
1715 	/*
1716 	 * If any transmit underruns occured, bump up the transmit
1717 	 * threshold by another 512 bytes (64 * 8).
1718 	 */
1719 	if (sp->tx_underruns) {
1720 		ifp->if_oerrors += le32toh(sp->tx_underruns);
1721 		if (tx_threshold < 192)
1722 			tx_threshold += 64;
1723 	}
1724 	s = splimp();
1725 	/*
1726 	 * Release any xmit buffers that have completed DMA. This isn't
1727 	 * strictly necessary to do here, but it's advantagous for mbufs
1728 	 * with external storage to be released in a timely manner rather
1729 	 * than being defered for a potentially long time. This limits
1730 	 * the delay to a maximum of one second.
1731 	 */
1732 	fxp_txeof(sc);
1733 
1734 	/*
1735 	 * If we haven't received any packets in FXP_MAC_RX_IDLE seconds,
1736 	 * then assume the receiver has locked up and attempt to clear
1737 	 * the condition by reprogramming the multicast filter. This is
1738 	 * a work-around for a bug in the 82557 where the receiver locks
1739 	 * up if it gets certain types of garbage in the syncronization
1740 	 * bits prior to the packet header. This bug is supposed to only
1741 	 * occur in 10Mbps mode, but has been seen to occur in 100Mbps
1742 	 * mode as well (perhaps due to a 10/100 speed transition).
1743 	 */
1744 	if (sc->rx_idle_secs > FXP_MAX_RX_IDLE) {
1745 		sc->rx_idle_secs = 0;
1746 		fxp_mc_setup(sc);
1747 	}
1748 	/*
1749 	 * If there is no pending command, start another stats
1750 	 * dump. Otherwise punt for now.
1751 	 */
1752 	if (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) == 0) {
1753 		/*
1754 		 * Start another stats dump.
1755 		 */
1756 		bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap,
1757 		    BUS_DMASYNC_PREREAD);
1758 		fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMPRESET);
1759 	} else {
1760 		/*
1761 		 * A previous command is still waiting to be accepted.
1762 		 * Just zero our copy of the stats and wait for the
1763 		 * next timer event to update them.
1764 		 */
1765 		sp->tx_good = 0;
1766 		sp->tx_underruns = 0;
1767 		sp->tx_total_collisions = 0;
1768 
1769 		sp->rx_good = 0;
1770 		sp->rx_crc_errors = 0;
1771 		sp->rx_alignment_errors = 0;
1772 		sp->rx_rnr_errors = 0;
1773 		sp->rx_overrun_errors = 0;
1774 	}
1775 	if (sc->miibus != NULL)
1776 		mii_tick(device_get_softc(sc->miibus));
1777 	splx(s);
1778 	/*
1779 	 * Schedule another timeout one second from now.
1780 	 */
1781 	sc->stat_ch = timeout(fxp_tick, sc, hz);
1782 }
1783 
1784 /*
1785  * Stop the interface. Cancels the statistics updater and resets
1786  * the interface.
1787  */
1788 static void
1789 fxp_stop(struct fxp_softc *sc)
1790 {
1791 	struct ifnet *ifp = &sc->sc_if;
1792 	struct fxp_tx *txp;
1793 	int i;
1794 
1795 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1796 	ifp->if_timer = 0;
1797 
1798 #ifdef DEVICE_POLLING
1799 	ether_poll_deregister(ifp);
1800 #endif
1801 	/*
1802 	 * Cancel stats updater.
1803 	 */
1804 	untimeout(fxp_tick, sc, sc->stat_ch);
1805 
1806 	/*
1807 	 * Issue software reset, which also unloads the microcode.
1808 	 */
1809 	sc->flags &= ~FXP_FLAG_UCODE;
1810 	CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SOFTWARE_RESET);
1811 	DELAY(50);
1812 
1813 	/*
1814 	 * Release any xmit buffers.
1815 	 */
1816 	txp = sc->fxp_desc.tx_list;
1817 	if (txp != NULL) {
1818 		for (i = 0; i < FXP_NTXCB; i++) {
1819 			if (txp[i].tx_mbuf != NULL) {
1820 				bus_dmamap_sync(sc->fxp_mtag, txp[i].tx_map,
1821 				    BUS_DMASYNC_POSTWRITE);
1822 				bus_dmamap_unload(sc->fxp_mtag, txp[i].tx_map);
1823 				m_freem(txp[i].tx_mbuf);
1824 				txp[i].tx_mbuf = NULL;
1825 				/* clear this to reset csum offload bits */
1826 				txp[i].tx_cb->tbd[0].tb_addr = 0;
1827 			}
1828 		}
1829 	}
1830 	bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
1831 	sc->tx_queued = 0;
1832 }
1833 
1834 /*
1835  * Watchdog/transmission transmit timeout handler. Called when a
1836  * transmission is started on the interface, but no interrupt is
1837  * received before the timeout. This usually indicates that the
1838  * card has wedged for some reason.
1839  */
1840 static void
1841 fxp_watchdog(struct ifnet *ifp)
1842 {
1843 	struct fxp_softc *sc = ifp->if_softc;
1844 
1845 	device_printf(sc->dev, "device timeout\n");
1846 	ifp->if_oerrors++;
1847 
1848 	fxp_init(sc);
1849 }
1850 
1851 static void
1852 fxp_init(void *xsc)
1853 {
1854 	struct fxp_softc *sc = xsc;
1855 	struct ifnet *ifp = &sc->sc_if;
1856 	struct fxp_cb_config *cbp;
1857 	struct fxp_cb_ias *cb_ias;
1858 	struct fxp_cb_tx *tcbp;
1859 	struct fxp_tx *txp;
1860 	struct fxp_cb_mcs *mcsp;
1861 	int i, prm, s;
1862 
1863 	s = splimp();
1864 	/*
1865 	 * Cancel any pending I/O
1866 	 */
1867 	fxp_stop(sc);
1868 
1869 	prm = (ifp->if_flags & IFF_PROMISC) ? 1 : 0;
1870 
1871 	/*
1872 	 * Initialize base of CBL and RFA memory. Loading with zero
1873 	 * sets it up for regular linear addressing.
1874 	 */
1875 	CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0);
1876 	fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_BASE);
1877 
1878 	fxp_scb_wait(sc);
1879 	fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_BASE);
1880 
1881 	/*
1882 	 * Initialize base of dump-stats buffer.
1883 	 */
1884 	fxp_scb_wait(sc);
1885 	bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap, BUS_DMASYNC_PREREAD);
1886 	CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->stats_addr);
1887 	fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMP_ADR);
1888 
1889 	/*
1890 	 * Attempt to load microcode if requested.
1891 	 */
1892 	if (ifp->if_flags & IFF_LINK0 && (sc->flags & FXP_FLAG_UCODE) == 0)
1893 		fxp_load_ucode(sc);
1894 
1895 	/*
1896 	 * Initialize the multicast address list.
1897 	 */
1898 	if (fxp_mc_addrs(sc)) {
1899 		mcsp = sc->mcsp;
1900 		mcsp->cb_status = 0;
1901 		mcsp->cb_command =
1902 		    htole16(FXP_CB_COMMAND_MCAS | FXP_CB_COMMAND_EL);
1903 		mcsp->link_addr = 0xffffffff;
1904 		/*
1905 	 	 * Start the multicast setup command.
1906 		 */
1907 		fxp_scb_wait(sc);
1908 		bus_dmamap_sync(sc->mcs_tag, sc->mcs_map, BUS_DMASYNC_PREWRITE);
1909 		CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->mcs_addr);
1910 		fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
1911 		/* ...and wait for it to complete. */
1912 		fxp_dma_wait(sc, &mcsp->cb_status, sc->mcs_tag, sc->mcs_map);
1913 		bus_dmamap_sync(sc->mcs_tag, sc->mcs_map,
1914 		    BUS_DMASYNC_POSTWRITE);
1915 	}
1916 
1917 	/*
1918 	 * We temporarily use memory that contains the TxCB list to
1919 	 * construct the config CB. The TxCB list memory is rebuilt
1920 	 * later.
1921 	 */
1922 	cbp = (struct fxp_cb_config *)sc->fxp_desc.cbl_list;
1923 
1924 	/*
1925 	 * This bcopy is kind of disgusting, but there are a bunch of must be
1926 	 * zero and must be one bits in this structure and this is the easiest
1927 	 * way to initialize them all to proper values.
1928 	 */
1929 	bcopy(fxp_cb_config_template, cbp, sizeof(fxp_cb_config_template));
1930 
1931 	cbp->cb_status =	0;
1932 	cbp->cb_command =	htole16(FXP_CB_COMMAND_CONFIG |
1933 	    FXP_CB_COMMAND_EL);
1934 	cbp->link_addr =	0xffffffff;	/* (no) next command */
1935 	cbp->byte_count =	sc->flags & FXP_FLAG_EXT_RFA ? 32 : 22;
1936 	cbp->rx_fifo_limit =	8;	/* rx fifo threshold (32 bytes) */
1937 	cbp->tx_fifo_limit =	0;	/* tx fifo threshold (0 bytes) */
1938 	cbp->adaptive_ifs =	0;	/* (no) adaptive interframe spacing */
1939 	cbp->mwi_enable =	sc->flags & FXP_FLAG_MWI_ENABLE ? 1 : 0;
1940 	cbp->type_enable =	0;	/* actually reserved */
1941 	cbp->read_align_en =	sc->flags & FXP_FLAG_READ_ALIGN ? 1 : 0;
1942 	cbp->end_wr_on_cl =	sc->flags & FXP_FLAG_WRITE_ALIGN ? 1 : 0;
1943 	cbp->rx_dma_bytecount =	0;	/* (no) rx DMA max */
1944 	cbp->tx_dma_bytecount =	0;	/* (no) tx DMA max */
1945 	cbp->dma_mbce =		0;	/* (disable) dma max counters */
1946 	cbp->late_scb =		0;	/* (don't) defer SCB update */
1947 	cbp->direct_dma_dis =	1;	/* disable direct rcv dma mode */
1948 	cbp->tno_int_or_tco_en =0;	/* (disable) tx not okay interrupt */
1949 	cbp->ci_int =		1;	/* interrupt on CU idle */
1950 	cbp->ext_txcb_dis = 	sc->flags & FXP_FLAG_EXT_TXCB ? 0 : 1;
1951 	cbp->ext_stats_dis = 	1;	/* disable extended counters */
1952 	cbp->keep_overrun_rx = 	0;	/* don't pass overrun frames to host */
1953 	cbp->save_bf =		sc->revision == FXP_REV_82557 ? 1 : prm;
1954 	cbp->disc_short_rx =	!prm;	/* discard short packets */
1955 	cbp->underrun_retry =	1;	/* retry mode (once) on DMA underrun */
1956 	cbp->two_frames =	0;	/* do not limit FIFO to 2 frames */
1957 	cbp->dyn_tbd =		0;	/* (no) dynamic TBD mode */
1958 	cbp->ext_rfa =		sc->flags & FXP_FLAG_EXT_RFA ? 1 : 0;
1959 	cbp->mediatype =	sc->flags & FXP_FLAG_SERIAL_MEDIA ? 0 : 1;
1960 	cbp->csma_dis =		0;	/* (don't) disable link */
1961 	cbp->tcp_udp_cksum =	0;	/* (don't) enable checksum */
1962 	cbp->vlan_tco =		0;	/* (don't) enable vlan wakeup */
1963 	cbp->link_wake_en =	0;	/* (don't) assert PME# on link change */
1964 	cbp->arp_wake_en =	0;	/* (don't) assert PME# on arp */
1965 	cbp->mc_wake_en =	0;	/* (don't) enable PME# on mcmatch */
1966 	cbp->nsai =		1;	/* (don't) disable source addr insert */
1967 	cbp->preamble_length =	2;	/* (7 byte) preamble */
1968 	cbp->loopback =		0;	/* (don't) loopback */
1969 	cbp->linear_priority =	0;	/* (normal CSMA/CD operation) */
1970 	cbp->linear_pri_mode =	0;	/* (wait after xmit only) */
1971 	cbp->interfrm_spacing =	6;	/* (96 bits of) interframe spacing */
1972 	cbp->promiscuous =	prm;	/* promiscuous mode */
1973 	cbp->bcast_disable =	0;	/* (don't) disable broadcasts */
1974 	cbp->wait_after_win =	0;	/* (don't) enable modified backoff alg*/
1975 	cbp->ignore_ul =	0;	/* consider U/L bit in IA matching */
1976 	cbp->crc16_en =		0;	/* (don't) enable crc-16 algorithm */
1977 	cbp->crscdt =		sc->flags & FXP_FLAG_SERIAL_MEDIA ? 1 : 0;
1978 
1979 	cbp->stripping =	!prm;	/* truncate rx packet to byte count */
1980 	cbp->padding =		1;	/* (do) pad short tx packets */
1981 	cbp->rcv_crc_xfer =	0;	/* (don't) xfer CRC to host */
1982 	cbp->long_rx_en =	sc->flags & FXP_FLAG_LONG_PKT_EN ? 1 : 0;
1983 	cbp->ia_wake_en =	0;	/* (don't) wake up on address match */
1984 	cbp->magic_pkt_dis =	0;	/* (don't) disable magic packet */
1985 					/* must set wake_en in PMCSR also */
1986 	cbp->force_fdx =	0;	/* (don't) force full duplex */
1987 	cbp->fdx_pin_en =	1;	/* (enable) FDX# pin */
1988 	cbp->multi_ia =		0;	/* (don't) accept multiple IAs */
1989 	cbp->mc_all =		sc->flags & FXP_FLAG_ALL_MCAST ? 1 : 0;
1990 	cbp->gamla_rx =		sc->flags & FXP_FLAG_EXT_RFA ? 1 : 0;
1991 
1992 	if (sc->revision == FXP_REV_82557) {
1993 		/*
1994 		 * The 82557 has no hardware flow control, the values
1995 		 * below are the defaults for the chip.
1996 		 */
1997 		cbp->fc_delay_lsb =	0;
1998 		cbp->fc_delay_msb =	0x40;
1999 		cbp->pri_fc_thresh =	3;
2000 		cbp->tx_fc_dis =	0;
2001 		cbp->rx_fc_restop =	0;
2002 		cbp->rx_fc_restart =	0;
2003 		cbp->fc_filter =	0;
2004 		cbp->pri_fc_loc =	1;
2005 	} else {
2006 		cbp->fc_delay_lsb =	0x1f;
2007 		cbp->fc_delay_msb =	0x01;
2008 		cbp->pri_fc_thresh =	3;
2009 		cbp->tx_fc_dis =	0;	/* enable transmit FC */
2010 		cbp->rx_fc_restop =	1;	/* enable FC restop frames */
2011 		cbp->rx_fc_restart =	1;	/* enable FC restart frames */
2012 		cbp->fc_filter =	!prm;	/* drop FC frames to host */
2013 		cbp->pri_fc_loc =	1;	/* FC pri location (byte31) */
2014 	}
2015 
2016 	/*
2017 	 * Start the config command/DMA.
2018 	 */
2019 	fxp_scb_wait(sc);
2020 	bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
2021 	CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.cbl_addr);
2022 	fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
2023 	/* ...and wait for it to complete. */
2024 	fxp_dma_wait(sc, &cbp->cb_status, sc->cbl_tag, sc->cbl_map);
2025 	bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_POSTWRITE);
2026 
2027 	/*
2028 	 * Now initialize the station address. Temporarily use the TxCB
2029 	 * memory area like we did above for the config CB.
2030 	 */
2031 	cb_ias = (struct fxp_cb_ias *)sc->fxp_desc.cbl_list;
2032 	cb_ias->cb_status = 0;
2033 	cb_ias->cb_command = htole16(FXP_CB_COMMAND_IAS | FXP_CB_COMMAND_EL);
2034 	cb_ias->link_addr = 0xffffffff;
2035 	bcopy(sc->arpcom.ac_enaddr, cb_ias->macaddr,
2036 	    sizeof(sc->arpcom.ac_enaddr));
2037 
2038 	/*
2039 	 * Start the IAS (Individual Address Setup) command/DMA.
2040 	 */
2041 	fxp_scb_wait(sc);
2042 	bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
2043 	fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
2044 	/* ...and wait for it to complete. */
2045 	fxp_dma_wait(sc, &cb_ias->cb_status, sc->cbl_tag, sc->cbl_map);
2046 	bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_POSTWRITE);
2047 
2048 	/*
2049 	 * Initialize transmit control block (TxCB) list.
2050 	 */
2051 	txp = sc->fxp_desc.tx_list;
2052 	tcbp = sc->fxp_desc.cbl_list;
2053 	bzero(tcbp, FXP_TXCB_SZ);
2054 	for (i = 0; i < FXP_NTXCB; i++) {
2055 		txp[i].tx_cb = tcbp + i;
2056 		txp[i].tx_mbuf = NULL;
2057 		tcbp[i].cb_status = htole16(FXP_CB_STATUS_C | FXP_CB_STATUS_OK);
2058 		tcbp[i].cb_command = htole16(FXP_CB_COMMAND_NOP);
2059 		tcbp[i].link_addr = htole32(sc->fxp_desc.cbl_addr +
2060 		    (((i + 1) & FXP_TXCB_MASK) * sizeof(struct fxp_cb_tx)));
2061 		if (sc->flags & FXP_FLAG_EXT_TXCB)
2062 			tcbp[i].tbd_array_addr =
2063 			    htole32(FXP_TXCB_DMA_ADDR(sc, &tcbp[i].tbd[2]));
2064 		else
2065 			tcbp[i].tbd_array_addr =
2066 			    htole32(FXP_TXCB_DMA_ADDR(sc, &tcbp[i].tbd[0]));
2067 		txp[i].tx_next = &txp[(i + 1) & FXP_TXCB_MASK];
2068 	}
2069 	/*
2070 	 * Set the suspend flag on the first TxCB and start the control
2071 	 * unit. It will execute the NOP and then suspend.
2072 	 */
2073 	tcbp->cb_command = htole16(FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S);
2074 	bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
2075 	sc->fxp_desc.tx_first = sc->fxp_desc.tx_last = txp;
2076 	sc->tx_queued = 1;
2077 
2078 	fxp_scb_wait(sc);
2079 	fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
2080 
2081 	/*
2082 	 * Initialize receiver buffer area - RFA.
2083 	 */
2084 	fxp_scb_wait(sc);
2085 	CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.rx_head->rx_addr);
2086 	fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START);
2087 
2088 	/*
2089 	 * Set current media.
2090 	 */
2091 	if (sc->miibus != NULL)
2092 		mii_mediachg(device_get_softc(sc->miibus));
2093 
2094 	ifp->if_flags |= IFF_RUNNING;
2095 	ifp->if_flags &= ~IFF_OACTIVE;
2096 
2097 	/*
2098 	 * Enable interrupts.
2099 	 */
2100 #ifdef DEVICE_POLLING
2101 	/*
2102 	 * ... but only do that if we are not polling. And because (presumably)
2103 	 * the default is interrupts on, we need to disable them explicitly!
2104 	 */
2105 	if ( ifp->if_flags & IFF_POLLING )
2106 		CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE);
2107 	else
2108 #endif /* DEVICE_POLLING */
2109 	CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, 0);
2110 	splx(s);
2111 
2112 	/*
2113 	 * Start stats updater.
2114 	 */
2115 	sc->stat_ch = timeout(fxp_tick, sc, hz);
2116 }
2117 
2118 static int
2119 fxp_serial_ifmedia_upd(struct ifnet *ifp)
2120 {
2121 
2122 	return (0);
2123 }
2124 
2125 static void
2126 fxp_serial_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2127 {
2128 
2129 	ifmr->ifm_active = IFM_ETHER|IFM_MANUAL;
2130 }
2131 
2132 /*
2133  * Change media according to request.
2134  */
2135 static int
2136 fxp_ifmedia_upd(struct ifnet *ifp)
2137 {
2138 	struct fxp_softc *sc = ifp->if_softc;
2139 	struct mii_data *mii;
2140 
2141 	mii = device_get_softc(sc->miibus);
2142 	mii_mediachg(mii);
2143 	return (0);
2144 }
2145 
2146 /*
2147  * Notify the world which media we're using.
2148  */
2149 static void
2150 fxp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2151 {
2152 	struct fxp_softc *sc = ifp->if_softc;
2153 	struct mii_data *mii;
2154 
2155 	mii = device_get_softc(sc->miibus);
2156 	mii_pollstat(mii);
2157 	ifmr->ifm_active = mii->mii_media_active;
2158 	ifmr->ifm_status = mii->mii_media_status;
2159 
2160 	if (ifmr->ifm_status & IFM_10_T && sc->flags & FXP_FLAG_CU_RESUME_BUG)
2161 		sc->cu_resume_bug = 1;
2162 	else
2163 		sc->cu_resume_bug = 0;
2164 }
2165 
2166 /*
2167  * Add a buffer to the end of the RFA buffer list.
2168  * Return 0 if successful, 1 for failure. A failure results in
2169  * adding the 'oldm' (if non-NULL) on to the end of the list -
2170  * tossing out its old contents and recycling it.
2171  * The RFA struct is stuck at the beginning of mbuf cluster and the
2172  * data pointer is fixed up to point just past it.
2173  */
2174 static int
2175 fxp_add_rfabuf(struct fxp_softc *sc, struct fxp_rx *rxp)
2176 {
2177 	struct mbuf *m;
2178 	struct fxp_rfa *rfa, *p_rfa;
2179 	struct fxp_rx *p_rx;
2180 	bus_dmamap_t tmp_map;
2181 	int error;
2182 
2183 	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2184 	if (m == NULL)
2185 		return (ENOBUFS);
2186 
2187 	/*
2188 	 * Move the data pointer up so that the incoming data packet
2189 	 * will be 32-bit aligned.
2190 	 */
2191 	m->m_data += RFA_ALIGNMENT_FUDGE;
2192 
2193 	/*
2194 	 * Get a pointer to the base of the mbuf cluster and move
2195 	 * data start past it.
2196 	 */
2197 	rfa = mtod(m, struct fxp_rfa *);
2198 	m->m_data += sc->rfa_size;
2199 	rfa->size = htole16(MCLBYTES - sc->rfa_size - RFA_ALIGNMENT_FUDGE);
2200 
2201 	/*
2202 	 * Initialize the rest of the RFA.  Note that since the RFA
2203 	 * is misaligned, we cannot store values directly.  Instead,
2204 	 * we use an optimized, inline copy.
2205 	 */
2206 
2207 	rfa->rfa_status = 0;
2208 	rfa->rfa_control = htole16(FXP_RFA_CONTROL_EL);
2209 	rfa->actual_size = 0;
2210 
2211 	le32enc(&rfa->link_addr, 0xffffffff);
2212 	le32enc(&rfa->rbd_addr, 0xffffffff);
2213 
2214 	/* Map the RFA into DMA memory. */
2215 	error = bus_dmamap_load(sc->fxp_mtag, sc->spare_map, rfa,
2216 	    MCLBYTES - RFA_ALIGNMENT_FUDGE, fxp_dma_map_addr,
2217 	    &rxp->rx_addr, 0);
2218 	if (error) {
2219 		m_freem(m);
2220 		return (error);
2221 	}
2222 
2223 	bus_dmamap_unload(sc->fxp_mtag, rxp->rx_map);
2224 	tmp_map = sc->spare_map;
2225 	sc->spare_map = rxp->rx_map;
2226 	rxp->rx_map = tmp_map;
2227 	rxp->rx_mbuf = m;
2228 
2229 	bus_dmamap_sync(sc->fxp_mtag, rxp->rx_map, BUS_DMASYNC_PREWRITE);
2230 
2231 	/*
2232 	 * If there are other buffers already on the list, attach this
2233 	 * one to the end by fixing up the tail to point to this one.
2234 	 */
2235 	if (sc->fxp_desc.rx_head != NULL) {
2236 		p_rx = sc->fxp_desc.rx_tail;
2237 		p_rfa = (struct fxp_rfa *)
2238 		    (p_rx->rx_mbuf->m_ext.ext_buf + RFA_ALIGNMENT_FUDGE);
2239 		p_rx->rx_next = rxp;
2240 		le32enc(&p_rfa->link_addr, rxp->rx_addr);
2241 		p_rfa->rfa_control = 0;
2242 		bus_dmamap_sync(sc->fxp_mtag, p_rx->rx_map,
2243 		    BUS_DMASYNC_PREWRITE);
2244 	} else {
2245 		rxp->rx_next = NULL;
2246 		sc->fxp_desc.rx_head = rxp;
2247 	}
2248 	sc->fxp_desc.rx_tail = rxp;
2249 	return (0);
2250 }
2251 
2252 static volatile int
2253 fxp_miibus_readreg(device_t dev, int phy, int reg)
2254 {
2255 	struct fxp_softc *sc = device_get_softc(dev);
2256 	int count = 10000;
2257 	int value;
2258 
2259 	CSR_WRITE_4(sc, FXP_CSR_MDICONTROL,
2260 	    (FXP_MDI_READ << 26) | (reg << 16) | (phy << 21));
2261 
2262 	while (((value = CSR_READ_4(sc, FXP_CSR_MDICONTROL)) & 0x10000000) == 0
2263 	    && count--)
2264 		DELAY(10);
2265 
2266 	if (count <= 0)
2267 		device_printf(dev, "fxp_miibus_readreg: timed out\n");
2268 
2269 	return (value & 0xffff);
2270 }
2271 
2272 static void
2273 fxp_miibus_writereg(device_t dev, int phy, int reg, int value)
2274 {
2275 	struct fxp_softc *sc = device_get_softc(dev);
2276 	int count = 10000;
2277 
2278 	CSR_WRITE_4(sc, FXP_CSR_MDICONTROL,
2279 	    (FXP_MDI_WRITE << 26) | (reg << 16) | (phy << 21) |
2280 	    (value & 0xffff));
2281 
2282 	while ((CSR_READ_4(sc, FXP_CSR_MDICONTROL) & 0x10000000) == 0 &&
2283 	    count--)
2284 		DELAY(10);
2285 
2286 	if (count <= 0)
2287 		device_printf(dev, "fxp_miibus_writereg: timed out\n");
2288 }
2289 
2290 static int
2291 fxp_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2292 {
2293 	struct fxp_softc *sc = ifp->if_softc;
2294 	struct ifreq *ifr = (struct ifreq *)data;
2295 	struct mii_data *mii;
2296 	int s, error = 0;
2297 
2298 	s = splimp();
2299 
2300 	switch (command) {
2301 	case SIOCSIFFLAGS:
2302 		if (ifp->if_flags & IFF_ALLMULTI)
2303 			sc->flags |= FXP_FLAG_ALL_MCAST;
2304 		else
2305 			sc->flags &= ~FXP_FLAG_ALL_MCAST;
2306 
2307 		/*
2308 		 * If interface is marked up and not running, then start it.
2309 		 * If it is marked down and running, stop it.
2310 		 * XXX If it's up then re-initialize it. This is so flags
2311 		 * such as IFF_PROMISC are handled.
2312 		 */
2313 		if (ifp->if_flags & IFF_UP) {
2314 			fxp_init(sc);
2315 		} else {
2316 			if (ifp->if_flags & IFF_RUNNING)
2317 				fxp_stop(sc);
2318 		}
2319 		break;
2320 
2321 	case SIOCADDMULTI:
2322 	case SIOCDELMULTI:
2323 		if (ifp->if_flags & IFF_ALLMULTI)
2324 			sc->flags |= FXP_FLAG_ALL_MCAST;
2325 		else
2326 			sc->flags &= ~FXP_FLAG_ALL_MCAST;
2327 		/*
2328 		 * Multicast list has changed; set the hardware filter
2329 		 * accordingly.
2330 		 */
2331 		if ((sc->flags & FXP_FLAG_ALL_MCAST) == 0)
2332 			fxp_mc_setup(sc);
2333 		/*
2334 		 * fxp_mc_setup() can set FXP_FLAG_ALL_MCAST, so check it
2335 		 * again rather than else {}.
2336 		 */
2337 		if (sc->flags & FXP_FLAG_ALL_MCAST)
2338 			fxp_init(sc);
2339 		error = 0;
2340 		break;
2341 
2342 	case SIOCSIFMEDIA:
2343 	case SIOCGIFMEDIA:
2344 		if (sc->miibus != NULL) {
2345 			mii = device_get_softc(sc->miibus);
2346                         error = ifmedia_ioctl(ifp, ifr,
2347                             &mii->mii_media, command);
2348 		} else {
2349                         error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, command);
2350 		}
2351 		break;
2352 
2353 	default:
2354 		error = ether_ioctl(ifp, command, data);
2355 	}
2356 	splx(s);
2357 	return (error);
2358 }
2359 
2360 /*
2361  * Fill in the multicast address list and return number of entries.
2362  */
2363 static int
2364 fxp_mc_addrs(struct fxp_softc *sc)
2365 {
2366 	struct fxp_cb_mcs *mcsp = sc->mcsp;
2367 	struct ifnet *ifp = &sc->sc_if;
2368 	struct ifmultiaddr *ifma;
2369 	int nmcasts;
2370 
2371 	nmcasts = 0;
2372 	if ((sc->flags & FXP_FLAG_ALL_MCAST) == 0) {
2373 #if __FreeBSD_version < 500000
2374 		LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2375 #else
2376 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2377 #endif
2378 			if (ifma->ifma_addr->sa_family != AF_LINK)
2379 				continue;
2380 			if (nmcasts >= MAXMCADDR) {
2381 				sc->flags |= FXP_FLAG_ALL_MCAST;
2382 				nmcasts = 0;
2383 				break;
2384 			}
2385 			bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
2386 			    &sc->mcsp->mc_addr[nmcasts][0], ETHER_ADDR_LEN);
2387 			nmcasts++;
2388 		}
2389 	}
2390 	mcsp->mc_cnt = htole16(nmcasts * ETHER_ADDR_LEN);
2391 	return (nmcasts);
2392 }
2393 
2394 /*
2395  * Program the multicast filter.
2396  *
2397  * We have an artificial restriction that the multicast setup command
2398  * must be the first command in the chain, so we take steps to ensure
2399  * this. By requiring this, it allows us to keep up the performance of
2400  * the pre-initialized command ring (esp. link pointers) by not actually
2401  * inserting the mcsetup command in the ring - i.e. its link pointer
2402  * points to the TxCB ring, but the mcsetup descriptor itself is not part
2403  * of it. We then can do 'CU_START' on the mcsetup descriptor and have it
2404  * lead into the regular TxCB ring when it completes.
2405  *
2406  * This function must be called at splimp.
2407  */
2408 static void
2409 fxp_mc_setup(struct fxp_softc *sc)
2410 {
2411 	struct fxp_cb_mcs *mcsp = sc->mcsp;
2412 	struct ifnet *ifp = &sc->sc_if;
2413 	struct fxp_tx *txp;
2414 	int count;
2415 
2416 	/*
2417 	 * If there are queued commands, we must wait until they are all
2418 	 * completed. If we are already waiting, then add a NOP command
2419 	 * with interrupt option so that we're notified when all commands
2420 	 * have been completed - fxp_start() ensures that no additional
2421 	 * TX commands will be added when need_mcsetup is true.
2422 	 */
2423 	if (sc->tx_queued) {
2424 		/*
2425 		 * need_mcsetup will be true if we are already waiting for the
2426 		 * NOP command to be completed (see below). In this case, bail.
2427 		 */
2428 		if (sc->need_mcsetup)
2429 			return;
2430 		sc->need_mcsetup = 1;
2431 
2432 		/*
2433 		 * Add a NOP command with interrupt so that we are notified
2434 		 * when all TX commands have been processed.
2435 		 */
2436 		txp = sc->fxp_desc.tx_last->tx_next;
2437 		txp->tx_mbuf = NULL;
2438 		txp->tx_cb->cb_status = 0;
2439 		txp->tx_cb->cb_command = htole16(FXP_CB_COMMAND_NOP |
2440 		    FXP_CB_COMMAND_S | FXP_CB_COMMAND_I);
2441 		/*
2442 		 * Advance the end of list forward.
2443 		 */
2444 		sc->fxp_desc.tx_last->tx_cb->cb_command &=
2445 		    htole16(~FXP_CB_COMMAND_S);
2446 		bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
2447 		sc->fxp_desc.tx_last = txp;
2448 		sc->tx_queued++;
2449 		/*
2450 		 * Issue a resume in case the CU has just suspended.
2451 		 */
2452 		fxp_scb_wait(sc);
2453 		fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_RESUME);
2454 		/*
2455 		 * Set a 5 second timer just in case we don't hear from the
2456 		 * card again.
2457 		 */
2458 		ifp->if_timer = 5;
2459 
2460 		return;
2461 	}
2462 	sc->need_mcsetup = 0;
2463 
2464 	/*
2465 	 * Initialize multicast setup descriptor.
2466 	 */
2467 	mcsp->cb_status = 0;
2468 	mcsp->cb_command = htole16(FXP_CB_COMMAND_MCAS |
2469 	    FXP_CB_COMMAND_S | FXP_CB_COMMAND_I);
2470 	mcsp->link_addr = htole32(sc->fxp_desc.cbl_addr);
2471 	txp = &sc->fxp_desc.mcs_tx;
2472 	txp->tx_mbuf = NULL;
2473 	txp->tx_cb = (struct fxp_cb_tx *)sc->mcsp;
2474 	txp->tx_next = sc->fxp_desc.tx_list;
2475 	(void) fxp_mc_addrs(sc);
2476 	sc->fxp_desc.tx_first = sc->fxp_desc.tx_last = txp;
2477 	sc->tx_queued = 1;
2478 
2479 	/*
2480 	 * Wait until command unit is not active. This should never
2481 	 * be the case when nothing is queued, but make sure anyway.
2482 	 */
2483 	count = 100;
2484 	while ((CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS) >> 6) ==
2485 	    FXP_SCB_CUS_ACTIVE && --count)
2486 		DELAY(10);
2487 	if (count == 0) {
2488 		device_printf(sc->dev, "command queue timeout\n");
2489 		return;
2490 	}
2491 
2492 	/*
2493 	 * Start the multicast setup command.
2494 	 */
2495 	fxp_scb_wait(sc);
2496 	bus_dmamap_sync(sc->mcs_tag, sc->mcs_map, BUS_DMASYNC_PREWRITE);
2497 	CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->mcs_addr);
2498 	fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
2499 
2500 	ifp->if_timer = 2;
2501 	return;
2502 }
2503 
2504 static u_int32_t fxp_ucode_d101a[] = D101_A_RCVBUNDLE_UCODE;
2505 static u_int32_t fxp_ucode_d101b0[] = D101_B0_RCVBUNDLE_UCODE;
2506 static u_int32_t fxp_ucode_d101ma[] = D101M_B_RCVBUNDLE_UCODE;
2507 static u_int32_t fxp_ucode_d101s[] = D101S_RCVBUNDLE_UCODE;
2508 static u_int32_t fxp_ucode_d102[] = D102_B_RCVBUNDLE_UCODE;
2509 static u_int32_t fxp_ucode_d102c[] = D102_C_RCVBUNDLE_UCODE;
2510 
2511 #define UCODE(x)	x, sizeof(x)
2512 
2513 struct ucode {
2514 	u_int32_t	revision;
2515 	u_int32_t	*ucode;
2516 	int		length;
2517 	u_short		int_delay_offset;
2518 	u_short		bundle_max_offset;
2519 } ucode_table[] = {
2520 	{ FXP_REV_82558_A4, UCODE(fxp_ucode_d101a), D101_CPUSAVER_DWORD, 0 },
2521 	{ FXP_REV_82558_B0, UCODE(fxp_ucode_d101b0), D101_CPUSAVER_DWORD, 0 },
2522 	{ FXP_REV_82559_A0, UCODE(fxp_ucode_d101ma),
2523 	    D101M_CPUSAVER_DWORD, D101M_CPUSAVER_BUNDLE_MAX_DWORD },
2524 	{ FXP_REV_82559S_A, UCODE(fxp_ucode_d101s),
2525 	    D101S_CPUSAVER_DWORD, D101S_CPUSAVER_BUNDLE_MAX_DWORD },
2526 	{ FXP_REV_82550, UCODE(fxp_ucode_d102),
2527 	    D102_B_CPUSAVER_DWORD, D102_B_CPUSAVER_BUNDLE_MAX_DWORD },
2528 	{ FXP_REV_82550_C, UCODE(fxp_ucode_d102c),
2529 	    D102_C_CPUSAVER_DWORD, D102_C_CPUSAVER_BUNDLE_MAX_DWORD },
2530 	{ 0, NULL, 0, 0, 0 }
2531 };
2532 
2533 static void
2534 fxp_load_ucode(struct fxp_softc *sc)
2535 {
2536 	struct ucode *uc;
2537 	struct fxp_cb_ucode *cbp;
2538 
2539 	for (uc = ucode_table; uc->ucode != NULL; uc++)
2540 		if (sc->revision == uc->revision)
2541 			break;
2542 	if (uc->ucode == NULL)
2543 		return;
2544 	cbp = (struct fxp_cb_ucode *)sc->fxp_desc.cbl_list;
2545 	cbp->cb_status = 0;
2546 	cbp->cb_command = htole16(FXP_CB_COMMAND_UCODE | FXP_CB_COMMAND_EL);
2547 	cbp->link_addr = 0xffffffff;    	/* (no) next command */
2548 	memcpy(cbp->ucode, uc->ucode, uc->length);
2549 	if (uc->int_delay_offset)
2550 		*(u_int16_t *)&cbp->ucode[uc->int_delay_offset] =
2551 		    htole16(sc->tunable_int_delay + sc->tunable_int_delay / 2);
2552 	if (uc->bundle_max_offset)
2553 		*(u_int16_t *)&cbp->ucode[uc->bundle_max_offset] =
2554 		    htole16(sc->tunable_bundle_max);
2555 	/*
2556 	 * Download the ucode to the chip.
2557 	 */
2558 	fxp_scb_wait(sc);
2559 	bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
2560 	CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.cbl_addr);
2561 	fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
2562 	/* ...and wait for it to complete. */
2563 	fxp_dma_wait(sc, &cbp->cb_status, sc->cbl_tag, sc->cbl_map);
2564 	bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_POSTWRITE);
2565 	device_printf(sc->dev,
2566 	    "Microcode loaded, int_delay: %d usec  bundle_max: %d\n",
2567 	    sc->tunable_int_delay,
2568 	    uc->bundle_max_offset == 0 ? 0 : sc->tunable_bundle_max);
2569 	sc->flags |= FXP_FLAG_UCODE;
2570 }
2571 
2572 static int
2573 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
2574 {
2575 	int error, value;
2576 
2577 	value = *(int *)arg1;
2578 	error = sysctl_handle_int(oidp, &value, 0, req);
2579 	if (error || !req->newptr)
2580 		return (error);
2581 	if (value < low || value > high)
2582 		return (EINVAL);
2583 	*(int *)arg1 = value;
2584 	return (0);
2585 }
2586 
2587 /*
2588  * Interrupt delay is expressed in microseconds, a multiplier is used
2589  * to convert this to the appropriate clock ticks before using.
2590  */
2591 static int
2592 sysctl_hw_fxp_int_delay(SYSCTL_HANDLER_ARGS)
2593 {
2594 	return (sysctl_int_range(oidp, arg1, arg2, req, 300, 3000));
2595 }
2596 
2597 static int
2598 sysctl_hw_fxp_bundle_max(SYSCTL_HANDLER_ARGS)
2599 {
2600 	return (sysctl_int_range(oidp, arg1, arg2, req, 1, 0xffff));
2601 }
2602