xref: /freebsd/sys/dev/bge/if_bge.c (revision b3a1f9373a31b644f8a65de1ba35929af3f6a9fe)
1 /*-
2  * Copyright (c) 2001 Wind River Systems
3  * Copyright (c) 1997, 1998, 1999, 2001
4  *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by Bill Paul.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 /*
38  * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39  *
40  * The Broadcom BCM5700 is based on technology originally developed by
41  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42  * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
43  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45  * frames, highly configurable RX filtering, and 16 RX and TX queues
46  * (which, along with RX filter rules, can be used for QOS applications).
47  * Other features, such as TCP segmentation, may be available as part
48  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49  * firmware images can be stored in hardware and need not be compiled
50  * into the driver.
51  *
52  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53  * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54  *
55  * The BCM5701 is a single-chip solution incorporating both the BCM5700
56  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57  * does not support external SSRAM.
58  *
59  * Broadcom also produces a variation of the BCM5700 under the "Altima"
60  * brand name, which is functionally similar but lacks PCI-X support.
61  *
62  * Without external SSRAM, you can only have at most 4 TX rings,
63  * and the use of the mini RX ring is disabled. This seems to imply
64  * that these features are simply not available on the BCM5701. As a
65  * result, this driver does not implement any support for the mini RX
66  * ring.
67  */
68 
69 #ifdef HAVE_KERNEL_OPTION_HEADERS
70 #include "opt_device_polling.h"
71 #endif
72 
73 #include <sys/param.h>
74 #include <sys/endian.h>
75 #include <sys/systm.h>
76 #include <sys/sockio.h>
77 #include <sys/mbuf.h>
78 #include <sys/malloc.h>
79 #include <sys/kernel.h>
80 #include <sys/module.h>
81 #include <sys/socket.h>
82 
83 #include <net/if.h>
84 #include <net/if_arp.h>
85 #include <net/ethernet.h>
86 #include <net/if_dl.h>
87 #include <net/if_media.h>
88 
89 #include <net/bpf.h>
90 
91 #include <net/if_types.h>
92 #include <net/if_vlan_var.h>
93 
94 #include <netinet/in_systm.h>
95 #include <netinet/in.h>
96 #include <netinet/ip.h>
97 
98 #include <machine/bus.h>
99 #include <machine/resource.h>
100 #include <sys/bus.h>
101 #include <sys/rman.h>
102 
103 #include <dev/mii/mii.h>
104 #include <dev/mii/miivar.h>
105 #include "miidevs.h"
106 #include <dev/mii/brgphyreg.h>
107 
108 #include <dev/pci/pcireg.h>
109 #include <dev/pci/pcivar.h>
110 
111 #include <dev/bge/if_bgereg.h>
112 
113 #define BGE_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
114 #define ETHER_MIN_NOPAD		(ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
115 
116 MODULE_DEPEND(bge, pci, 1, 1, 1);
117 MODULE_DEPEND(bge, ether, 1, 1, 1);
118 MODULE_DEPEND(bge, miibus, 1, 1, 1);
119 
120 /* "device miibus" required.  See GENERIC if you get errors here. */
121 #include "miibus_if.h"
122 
123 /*
124  * Various supported device vendors/types and their names. Note: the
125  * spec seems to indicate that the hardware still has Alteon's vendor
126  * ID burned into it, though it will always be overriden by the vendor
127  * ID in the EEPROM. Just to be safe, we cover all possibilities.
128  */
129 #define BGE_DEVDESC_MAX		64	/* Maximum device description length */
130 
131 static struct bge_type bge_devs[] = {
132 	{ ALT_VENDORID,	ALT_DEVICEID_BCM5700,
133 		"Broadcom BCM5700 Gigabit Ethernet" },
134 	{ ALT_VENDORID,	ALT_DEVICEID_BCM5701,
135 		"Broadcom BCM5701 Gigabit Ethernet" },
136 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5700,
137 		"Broadcom BCM5700 Gigabit Ethernet" },
138 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5701,
139 		"Broadcom BCM5701 Gigabit Ethernet" },
140 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5702,
141 		"Broadcom BCM5702 Gigabit Ethernet" },
142 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5702X,
143 		"Broadcom BCM5702X Gigabit Ethernet" },
144 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5703,
145 		"Broadcom BCM5703 Gigabit Ethernet" },
146 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5703X,
147 		"Broadcom BCM5703X Gigabit Ethernet" },
148 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5704C,
149 		"Broadcom BCM5704C Dual Gigabit Ethernet" },
150 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5704S,
151 		"Broadcom BCM5704S Dual Gigabit Ethernet" },
152 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5705,
153 		"Broadcom BCM5705 Gigabit Ethernet" },
154 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5705K,
155 		"Broadcom BCM5705K Gigabit Ethernet" },
156 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5705M,
157 		"Broadcom BCM5705M Gigabit Ethernet" },
158 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT,
159 		"Broadcom BCM5705M Gigabit Ethernet" },
160 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5714C,
161 		"Broadcom BCM5714C Gigabit Ethernet" },
162 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5721,
163 		"Broadcom BCM5721 Gigabit Ethernet" },
164 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5750,
165 		"Broadcom BCM5750 Gigabit Ethernet" },
166 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5750M,
167 		"Broadcom BCM5750M Gigabit Ethernet" },
168 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5751,
169 		"Broadcom BCM5751 Gigabit Ethernet" },
170 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5751M,
171 		"Broadcom BCM5751M Gigabit Ethernet" },
172 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5752,
173 		"Broadcom BCM5752 Gigabit Ethernet" },
174 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5782,
175 		"Broadcom BCM5782 Gigabit Ethernet" },
176 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5788,
177 		"Broadcom BCM5788 Gigabit Ethernet" },
178 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5789,
179 		"Broadcom BCM5789 Gigabit Ethernet" },
180 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5901,
181 		"Broadcom BCM5901 Fast Ethernet" },
182 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2,
183 		"Broadcom BCM5901A2 Fast Ethernet" },
184 	{ SK_VENDORID, SK_DEVICEID_ALTIMA,
185 		"SysKonnect Gigabit Ethernet" },
186 	{ ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000,
187 		"Altima AC1000 Gigabit Ethernet" },
188 	{ ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002,
189 		"Altima AC1002 Gigabit Ethernet" },
190 	{ ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100,
191 		"Altima AC9100 Gigabit Ethernet" },
192 	{ 0, 0, NULL }
193 };
194 
195 static int bge_probe(device_t);
196 static int bge_attach(device_t);
197 static int bge_detach(device_t);
198 static int bge_suspend(device_t);
199 static int bge_resume(device_t);
200 static void bge_release_resources(struct bge_softc *);
201 static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
202 static int bge_dma_alloc(device_t);
203 static void bge_dma_free(struct bge_softc *);
204 
205 static void bge_txeof(struct bge_softc *);
206 static void bge_rxeof(struct bge_softc *);
207 
208 static void bge_tick_locked(struct bge_softc *);
209 static void bge_tick(void *);
210 static void bge_stats_update(struct bge_softc *);
211 static void bge_stats_update_regs(struct bge_softc *);
212 static int bge_encap(struct bge_softc *, struct mbuf *, uint32_t *);
213 
214 static void bge_intr(void *);
215 static void bge_start_locked(struct ifnet *);
216 static void bge_start(struct ifnet *);
217 static int bge_ioctl(struct ifnet *, u_long, caddr_t);
218 static void bge_init_locked(struct bge_softc *);
219 static void bge_init(void *);
220 static void bge_stop(struct bge_softc *);
221 static void bge_watchdog(struct ifnet *);
222 static void bge_shutdown(device_t);
223 static int bge_ifmedia_upd(struct ifnet *);
224 static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
225 
226 static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
227 static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
228 
229 static void bge_setmulti(struct bge_softc *);
230 
231 static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *);
232 static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
233 static int bge_init_rx_ring_std(struct bge_softc *);
234 static void bge_free_rx_ring_std(struct bge_softc *);
235 static int bge_init_rx_ring_jumbo(struct bge_softc *);
236 static void bge_free_rx_ring_jumbo(struct bge_softc *);
237 static void bge_free_tx_ring(struct bge_softc *);
238 static int bge_init_tx_ring(struct bge_softc *);
239 
240 static int bge_chipinit(struct bge_softc *);
241 static int bge_blockinit(struct bge_softc *);
242 
243 #ifdef notdef
244 static uint8_t bge_vpd_readbyte(struct bge_softc *, int);
245 static void bge_vpd_read_res(struct bge_softc *, struct vpd_res *, int);
246 static void bge_vpd_read(struct bge_softc *);
247 #endif
248 
249 static uint32_t bge_readmem_ind(struct bge_softc *, int);
250 static void bge_writemem_ind(struct bge_softc *, int, int);
251 #ifdef notdef
252 static uint32_t bge_readreg_ind(struct bge_softc *, int);
253 #endif
254 static void bge_writereg_ind(struct bge_softc *, int, int);
255 
256 static int bge_miibus_readreg(device_t, int, int);
257 static int bge_miibus_writereg(device_t, int, int, int);
258 static void bge_miibus_statchg(device_t);
259 #ifdef DEVICE_POLLING
260 static void bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
261 #endif
262 
263 static void bge_reset(struct bge_softc *);
264 static void bge_link_upd(struct bge_softc *);
265 
266 static device_method_t bge_methods[] = {
267 	/* Device interface */
268 	DEVMETHOD(device_probe,		bge_probe),
269 	DEVMETHOD(device_attach,	bge_attach),
270 	DEVMETHOD(device_detach,	bge_detach),
271 	DEVMETHOD(device_shutdown,	bge_shutdown),
272 	DEVMETHOD(device_suspend,	bge_suspend),
273 	DEVMETHOD(device_resume,	bge_resume),
274 
275 	/* bus interface */
276 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
277 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
278 
279 	/* MII interface */
280 	DEVMETHOD(miibus_readreg,	bge_miibus_readreg),
281 	DEVMETHOD(miibus_writereg,	bge_miibus_writereg),
282 	DEVMETHOD(miibus_statchg,	bge_miibus_statchg),
283 
284 	{ 0, 0 }
285 };
286 
287 static driver_t bge_driver = {
288 	"bge",
289 	bge_methods,
290 	sizeof(struct bge_softc)
291 };
292 
293 static devclass_t bge_devclass;
294 
295 DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
296 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
297 
298 static int bge_fake_autoneg = 0;
299 TUNABLE_INT("hw.bge.fake_autoneg", &bge_fake_autoneg);
300 
301 static uint32_t
302 bge_readmem_ind(struct bge_softc *sc, int off)
303 {
304 	device_t dev;
305 
306 	dev = sc->bge_dev;
307 
308 	pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
309 	return (pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4));
310 }
311 
312 static void
313 bge_writemem_ind(struct bge_softc *sc, int off, int val)
314 {
315 	device_t dev;
316 
317 	dev = sc->bge_dev;
318 
319 	pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
320 	pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
321 }
322 
323 #ifdef notdef
324 static uint32_t
325 bge_readreg_ind(struct bge_softc *sc, int off)
326 {
327 	device_t dev;
328 
329 	dev = sc->bge_dev;
330 
331 	pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
332 	return (pci_read_config(dev, BGE_PCI_REG_DATA, 4));
333 }
334 #endif
335 
336 static void
337 bge_writereg_ind(struct bge_softc *sc, int off, int val)
338 {
339 	device_t dev;
340 
341 	dev = sc->bge_dev;
342 
343 	pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
344 	pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
345 }
346 
347 /*
348  * Map a single buffer address.
349  */
350 
351 static void
352 bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
353 {
354 	struct bge_dmamap_arg *ctx;
355 
356 	if (error)
357 		return;
358 
359 	ctx = arg;
360 
361 	if (nseg > ctx->bge_maxsegs) {
362 		ctx->bge_maxsegs = 0;
363 		return;
364 	}
365 
366 	ctx->bge_busaddr = segs->ds_addr;
367 }
368 
369 #ifdef notdef
370 static uint8_t
371 bge_vpd_readbyte(struct bge_softc *sc, int addr)
372 {
373 	int i;
374 	device_t dev;
375 	uint32_t val;
376 
377 	dev = sc->bge_dev;
378 	pci_write_config(dev, BGE_PCI_VPD_ADDR, addr, 2);
379 	for (i = 0; i < BGE_TIMEOUT * 10; i++) {
380 		DELAY(10);
381 		if (pci_read_config(dev, BGE_PCI_VPD_ADDR, 2) & BGE_VPD_FLAG)
382 			break;
383 	}
384 
385 	if (i == BGE_TIMEOUT) {
386 		device_printf(sc->bge_dev, "VPD read timed out\n");
387 		return (0);
388 	}
389 
390 	val = pci_read_config(dev, BGE_PCI_VPD_DATA, 4);
391 
392 	return ((val >> ((addr % 4) * 8)) & 0xFF);
393 }
394 
395 static void
396 bge_vpd_read_res(struct bge_softc *sc, struct vpd_res *res, int addr)
397 {
398 	int i;
399 	uint8_t *ptr;
400 
401 	ptr = (uint8_t *)res;
402 	for (i = 0; i < sizeof(struct vpd_res); i++)
403 		ptr[i] = bge_vpd_readbyte(sc, i + addr);
404 }
405 
406 static void
407 bge_vpd_read(struct bge_softc *sc)
408 {
409 	struct vpd_res res;
410 	int i, pos = 0;
411 
412 	if (sc->bge_vpd_prodname != NULL)
413 		free(sc->bge_vpd_prodname, M_DEVBUF);
414 	if (sc->bge_vpd_readonly != NULL)
415 		free(sc->bge_vpd_readonly, M_DEVBUF);
416 	sc->bge_vpd_prodname = NULL;
417 	sc->bge_vpd_readonly = NULL;
418 
419 	bge_vpd_read_res(sc, &res, pos);
420 
421 	if (res.vr_id != VPD_RES_ID) {
422 		device_printf(sc->bge_dev,
423 		    "bad VPD resource id: expected %x got %x\n", VPD_RES_ID,
424 		    res.vr_id);
425 		return;
426 	}
427 
428 	pos += sizeof(res);
429 	sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
430 	for (i = 0; i < res.vr_len; i++)
431 		sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos);
432 	sc->bge_vpd_prodname[i] = '\0';
433 	pos += i;
434 
435 	bge_vpd_read_res(sc, &res, pos);
436 
437 	if (res.vr_id != VPD_RES_READ) {
438 		device_printf(sc->bge_dev,
439 		    "bad VPD resource id: expected %x got %x\n", VPD_RES_READ,
440 		    res.vr_id);
441 		return;
442 	}
443 
444 	pos += sizeof(res);
445 	sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
446 	for (i = 0; i < res.vr_len + 1; i++)
447 		sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos);
448 }
449 #endif
450 
451 /*
452  * Read a byte of data stored in the EEPROM at address 'addr.' The
453  * BCM570x supports both the traditional bitbang interface and an
454  * auto access interface for reading the EEPROM. We use the auto
455  * access method.
456  */
457 static uint8_t
458 bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
459 {
460 	int i;
461 	uint32_t byte = 0;
462 
463 	/*
464 	 * Enable use of auto EEPROM access so we can avoid
465 	 * having to use the bitbang method.
466 	 */
467 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
468 
469 	/* Reset the EEPROM, load the clock period. */
470 	CSR_WRITE_4(sc, BGE_EE_ADDR,
471 	    BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
472 	DELAY(20);
473 
474 	/* Issue the read EEPROM command. */
475 	CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
476 
477 	/* Wait for completion */
478 	for(i = 0; i < BGE_TIMEOUT * 10; i++) {
479 		DELAY(10);
480 		if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
481 			break;
482 	}
483 
484 	if (i == BGE_TIMEOUT) {
485 		device_printf(sc->bge_dev, "EEPROM read timed out\n");
486 		return (1);
487 	}
488 
489 	/* Get result. */
490 	byte = CSR_READ_4(sc, BGE_EE_DATA);
491 
492 	*dest = (byte >> ((addr % 4) * 8)) & 0xFF;
493 
494 	return (0);
495 }
496 
497 /*
498  * Read a sequence of bytes from the EEPROM.
499  */
500 static int
501 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
502 {
503 	int i, error = 0;
504 	uint8_t byte = 0;
505 
506 	for (i = 0; i < cnt; i++) {
507 		error = bge_eeprom_getbyte(sc, off + i, &byte);
508 		if (error)
509 			break;
510 		*(dest + i) = byte;
511 	}
512 
513 	return (error ? 1 : 0);
514 }
515 
516 static int
517 bge_miibus_readreg(device_t dev, int phy, int reg)
518 {
519 	struct bge_softc *sc;
520 	uint32_t val, autopoll;
521 	int i;
522 
523 	sc = device_get_softc(dev);
524 
525 	/*
526 	 * Broadcom's own driver always assumes the internal
527 	 * PHY is at GMII address 1. On some chips, the PHY responds
528 	 * to accesses at all addresses, which could cause us to
529 	 * bogusly attach the PHY 32 times at probe type. Always
530 	 * restricting the lookup to address 1 is simpler than
531 	 * trying to figure out which chips revisions should be
532 	 * special-cased.
533 	 */
534 	if (phy != 1)
535 		return (0);
536 
537 	/* Reading with autopolling on may trigger PCI errors */
538 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
539 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
540 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
541 		DELAY(40);
542 	}
543 
544 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
545 	    BGE_MIPHY(phy)|BGE_MIREG(reg));
546 
547 	for (i = 0; i < BGE_TIMEOUT; i++) {
548 		val = CSR_READ_4(sc, BGE_MI_COMM);
549 		if (!(val & BGE_MICOMM_BUSY))
550 			break;
551 	}
552 
553 	if (i == BGE_TIMEOUT) {
554 		if_printf(sc->bge_ifp, "PHY read timed out\n");
555 		val = 0;
556 		goto done;
557 	}
558 
559 	val = CSR_READ_4(sc, BGE_MI_COMM);
560 
561 done:
562 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
563 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
564 		DELAY(40);
565 	}
566 
567 	if (val & BGE_MICOMM_READFAIL)
568 		return (0);
569 
570 	return (val & 0xFFFF);
571 }
572 
573 static int
574 bge_miibus_writereg(device_t dev, int phy, int reg, int val)
575 {
576 	struct bge_softc *sc;
577 	uint32_t autopoll;
578 	int i;
579 
580 	sc = device_get_softc(dev);
581 
582 	/* Reading with autopolling on may trigger PCI errors */
583 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
584 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
585 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
586 		DELAY(40);
587 	}
588 
589 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
590 	    BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
591 
592 	for (i = 0; i < BGE_TIMEOUT; i++) {
593 		if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
594 			break;
595 	}
596 
597 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
598 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
599 		DELAY(40);
600 	}
601 
602 	if (i == BGE_TIMEOUT) {
603 		if_printf(sc->bge_ifp, "PHY read timed out\n");
604 		return (0);
605 	}
606 
607 	return (0);
608 }
609 
610 static void
611 bge_miibus_statchg(device_t dev)
612 {
613 	struct bge_softc *sc;
614 	struct mii_data *mii;
615 
616 	sc = device_get_softc(dev);
617 	mii = device_get_softc(sc->bge_miibus);
618 
619 	BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
620 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
621 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
622 	else
623 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
624 
625 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
626 		BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
627 	else
628 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
629 }
630 
631 /*
632  * Intialize a standard receive ring descriptor.
633  */
634 static int
635 bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m)
636 {
637 	struct mbuf *m_new = NULL;
638 	struct bge_rx_bd *r;
639 	struct bge_dmamap_arg ctx;
640 	int error;
641 
642 	if (m == NULL) {
643 		m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
644 		if (m_new == NULL)
645 			return (ENOBUFS);
646 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
647 	} else {
648 		m_new = m;
649 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
650 		m_new->m_data = m_new->m_ext.ext_buf;
651 	}
652 
653 	if (!sc->bge_rx_alignment_bug)
654 		m_adj(m_new, ETHER_ALIGN);
655 	sc->bge_cdata.bge_rx_std_chain[i] = m_new;
656 	r = &sc->bge_ldata.bge_rx_std_ring[i];
657 	ctx.bge_maxsegs = 1;
658 	ctx.sc = sc;
659 	error = bus_dmamap_load(sc->bge_cdata.bge_mtag,
660 	    sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *),
661 	    m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
662 	if (error || ctx.bge_maxsegs == 0) {
663 		if (m == NULL) {
664 			sc->bge_cdata.bge_rx_std_chain[i] = NULL;
665 			m_freem(m_new);
666 		}
667 		return (ENOMEM);
668 	}
669 	r->bge_addr.bge_addr_lo = BGE_ADDR_LO(ctx.bge_busaddr);
670 	r->bge_addr.bge_addr_hi = BGE_ADDR_HI(ctx.bge_busaddr);
671 	r->bge_flags = BGE_RXBDFLAG_END;
672 	r->bge_len = m_new->m_len;
673 	r->bge_idx = i;
674 
675 	bus_dmamap_sync(sc->bge_cdata.bge_mtag,
676 	    sc->bge_cdata.bge_rx_std_dmamap[i],
677 	    BUS_DMASYNC_PREREAD);
678 
679 	return (0);
680 }
681 
682 /*
683  * Initialize a jumbo receive ring descriptor. This allocates
684  * a jumbo buffer from the pool managed internally by the driver.
685  */
686 static int
687 bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m)
688 {
689 	bus_dma_segment_t segs[BGE_NSEG_JUMBO];
690 	struct bge_extrx_bd *r;
691 	struct mbuf *m_new = NULL;
692 	int nsegs;
693 	int error;
694 
695 	if (m == NULL) {
696 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
697 		if (m_new == NULL)
698 			return (ENOBUFS);
699 
700 		m_cljget(m_new, M_DONTWAIT, MJUM9BYTES);
701 		if (!(m_new->m_flags & M_EXT)) {
702 			m_freem(m_new);
703 			return (ENOBUFS);
704 		}
705 		m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES;
706 	} else {
707 		m_new = m;
708 		m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES;
709 		m_new->m_data = m_new->m_ext.ext_buf;
710 	}
711 
712 	if (!sc->bge_rx_alignment_bug)
713 		m_adj(m_new, ETHER_ALIGN);
714 
715 	error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
716 	    sc->bge_cdata.bge_rx_jumbo_dmamap[i],
717 	    m_new, segs, &nsegs, BUS_DMA_NOWAIT);
718 	if (error) {
719 		if (m == NULL)
720 			m_freem(m_new);
721 		return (error);
722 	}
723 	sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
724 
725 	/*
726 	 * Fill in the extended RX buffer descriptor.
727 	 */
728 	r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
729 	r->bge_flags = BGE_RXBDFLAG_JUMBO_RING|BGE_RXBDFLAG_END;
730 	r->bge_idx = i;
731 	r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
732 	switch (nsegs) {
733 	case 4:
734 		r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
735 		r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
736 		r->bge_len3 = segs[3].ds_len;
737 	case 3:
738 		r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
739 		r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
740 		r->bge_len2 = segs[2].ds_len;
741 	case 2:
742 		r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
743 		r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
744 		r->bge_len1 = segs[1].ds_len;
745 	case 1:
746 		r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
747 		r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
748 		r->bge_len0 = segs[0].ds_len;
749 		break;
750 	default:
751 		panic("%s: %d segments\n", __func__, nsegs);
752 	}
753 
754 	bus_dmamap_sync(sc->bge_cdata.bge_mtag,
755 	    sc->bge_cdata.bge_rx_jumbo_dmamap[i],
756 	    BUS_DMASYNC_PREREAD);
757 
758 	return (0);
759 }
760 
761 /*
762  * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
763  * that's 1MB or memory, which is a lot. For now, we fill only the first
764  * 256 ring entries and hope that our CPU is fast enough to keep up with
765  * the NIC.
766  */
767 static int
768 bge_init_rx_ring_std(struct bge_softc *sc)
769 {
770 	int i;
771 
772 	for (i = 0; i < BGE_SSLOTS; i++) {
773 		if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
774 			return (ENOBUFS);
775 	};
776 
777 	bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
778 	    sc->bge_cdata.bge_rx_std_ring_map,
779 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
780 
781 	sc->bge_std = i - 1;
782 	CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
783 
784 	return (0);
785 }
786 
787 static void
788 bge_free_rx_ring_std(struct bge_softc *sc)
789 {
790 	int i;
791 
792 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
793 		if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
794 			bus_dmamap_sync(sc->bge_cdata.bge_mtag,
795 			    sc->bge_cdata.bge_rx_std_dmamap[i],
796 			    BUS_DMASYNC_POSTREAD);
797 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
798 			    sc->bge_cdata.bge_rx_std_dmamap[i]);
799 			m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
800 			sc->bge_cdata.bge_rx_std_chain[i] = NULL;
801 		}
802 		bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
803 		    sizeof(struct bge_rx_bd));
804 	}
805 }
806 
807 static int
808 bge_init_rx_ring_jumbo(struct bge_softc *sc)
809 {
810 	struct bge_rcb *rcb;
811 	int i;
812 
813 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
814 		if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
815 			return (ENOBUFS);
816 	};
817 
818 	bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
819 	    sc->bge_cdata.bge_rx_jumbo_ring_map,
820 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
821 
822 	sc->bge_jumbo = i - 1;
823 
824 	rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
825 	rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
826 				    BGE_RCB_FLAG_USE_EXT_RX_BD);
827 	CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
828 
829 	CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
830 
831 	return (0);
832 }
833 
834 static void
835 bge_free_rx_ring_jumbo(struct bge_softc *sc)
836 {
837 	int i;
838 
839 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
840 		if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
841 			bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
842 			    sc->bge_cdata.bge_rx_jumbo_dmamap[i],
843 			    BUS_DMASYNC_POSTREAD);
844 			bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
845 			    sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
846 			m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
847 			sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
848 		}
849 		bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
850 		    sizeof(struct bge_extrx_bd));
851 	}
852 }
853 
854 static void
855 bge_free_tx_ring(struct bge_softc *sc)
856 {
857 	int i;
858 
859 	if (sc->bge_ldata.bge_tx_ring == NULL)
860 		return;
861 
862 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
863 		if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
864 			bus_dmamap_sync(sc->bge_cdata.bge_mtag,
865 			    sc->bge_cdata.bge_tx_dmamap[i],
866 			    BUS_DMASYNC_POSTWRITE);
867 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
868 			    sc->bge_cdata.bge_tx_dmamap[i]);
869 			m_freem(sc->bge_cdata.bge_tx_chain[i]);
870 			sc->bge_cdata.bge_tx_chain[i] = NULL;
871 		}
872 		bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
873 		    sizeof(struct bge_tx_bd));
874 	}
875 }
876 
877 static int
878 bge_init_tx_ring(struct bge_softc *sc)
879 {
880 	sc->bge_txcnt = 0;
881 	sc->bge_tx_saved_considx = 0;
882 
883 	/* Initialize transmit producer index for host-memory send ring. */
884 	sc->bge_tx_prodidx = 0;
885 	CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
886 
887 	/* 5700 b2 errata */
888 	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
889 		CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
890 
891 	/* NIC-memory send ring not used; initialize to zero. */
892 	CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
893 	/* 5700 b2 errata */
894 	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
895 		CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
896 
897 	return (0);
898 }
899 
900 static void
901 bge_setmulti(struct bge_softc *sc)
902 {
903 	struct ifnet *ifp;
904 	struct ifmultiaddr *ifma;
905 	uint32_t hashes[4] = { 0, 0, 0, 0 };
906 	int h, i;
907 
908 	BGE_LOCK_ASSERT(sc);
909 
910 	ifp = sc->bge_ifp;
911 
912 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
913 		for (i = 0; i < 4; i++)
914 			CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
915 		return;
916 	}
917 
918 	/* First, zot all the existing filters. */
919 	for (i = 0; i < 4; i++)
920 		CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
921 
922 	/* Now program new ones. */
923 	IF_ADDR_LOCK(ifp);
924 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
925 		if (ifma->ifma_addr->sa_family != AF_LINK)
926 			continue;
927 		h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
928 		    ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
929 		hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
930 	}
931 	IF_ADDR_UNLOCK(ifp);
932 
933 	for (i = 0; i < 4; i++)
934 		CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
935 }
936 
937 /*
938  * Do endian, PCI and DMA initialization. Also check the on-board ROM
939  * self-test results.
940  */
941 static int
942 bge_chipinit(struct bge_softc *sc)
943 {
944 	uint32_t dma_rw_ctl;
945 	int i;
946 
947 	/* Set endian type before we access any non-PCI registers. */
948 	pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4);
949 
950 	/*
951 	 * Check the 'ROM failed' bit on the RX CPU to see if
952 	 * self-tests passed.
953 	 */
954 	if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
955 		device_printf(sc->bge_dev, "RX CPU self-diagnostics failed!\n");
956 		return (ENODEV);
957 	}
958 
959 	/* Clear the MAC control register */
960 	CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
961 
962 	/*
963 	 * Clear the MAC statistics block in the NIC's
964 	 * internal memory.
965 	 */
966 	for (i = BGE_STATS_BLOCK;
967 	    i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
968 		BGE_MEMWIN_WRITE(sc, i, 0);
969 
970 	for (i = BGE_STATUS_BLOCK;
971 	    i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
972 		BGE_MEMWIN_WRITE(sc, i, 0);
973 
974 	/* Set up the PCI DMA control register. */
975 	if (sc->bge_pcie) {
976 		dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
977 		    (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
978 		    (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
979 	} else if (pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
980 	    BGE_PCISTATE_PCI_BUSMODE) {
981 		/* Conventional PCI bus */
982 		dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
983 		    (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
984 		    (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
985 		    (0x0F);
986 	} else {
987 		/* PCI-X bus */
988 		/*
989 		 * The 5704 uses a different encoding of read/write
990 		 * watermarks.
991 		 */
992 		if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
993 			dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
994 			    (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
995 			    (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
996 		else
997 			dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
998 			    (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
999 			    (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1000 			    (0x0F);
1001 
1002 		/*
1003 		 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1004 		 * for hardware bugs.
1005 		 */
1006 		if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1007 		    sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1008 			uint32_t tmp;
1009 
1010 			tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1011 			if (tmp == 0x6 || tmp == 0x7)
1012 				dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1013 		}
1014 	}
1015 
1016 	if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1017 	    sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
1018 	    sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1019 	    sc->bge_asicrev == BGE_ASICREV_BCM5750)
1020 		dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1021 	pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1022 
1023 	/*
1024 	 * Set up general mode register.
1025 	 */
1026 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
1027 	    BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1028 	    BGE_MODECTL_TX_NO_PHDR_CSUM);
1029 
1030 	/*
1031 	 * Disable memory write invalidate.  Apparently it is not supported
1032 	 * properly by these devices.
1033 	 */
1034 	PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1035 
1036 #ifdef __brokenalpha__
1037 	/*
1038 	 * Must insure that we do not cross an 8K (bytes) boundary
1039 	 * for DMA reads.  Our highest limit is 1K bytes.  This is a
1040 	 * restriction on some ALPHA platforms with early revision
1041 	 * 21174 PCI chipsets, such as the AlphaPC 164lx
1042 	 */
1043 	PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL,
1044 	    BGE_PCI_READ_BNDRY_1024BYTES, 4);
1045 #endif
1046 
1047 	/* Set the timer prescaler (always 66Mhz) */
1048 	CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1049 
1050 	return (0);
1051 }
1052 
1053 static int
1054 bge_blockinit(struct bge_softc *sc)
1055 {
1056 	struct bge_rcb *rcb;
1057 	bus_size_t vrcb;
1058 	bge_hostaddr taddr;
1059 	int i;
1060 
1061 	/*
1062 	 * Initialize the memory window pointer register so that
1063 	 * we can access the first 32K of internal NIC RAM. This will
1064 	 * allow us to set up the TX send ring RCBs and the RX return
1065 	 * ring RCBs, plus other things which live in NIC memory.
1066 	 */
1067 	CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1068 
1069 	/* Note: the BCM5704 has a smaller mbuf space than other chips. */
1070 
1071 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1072 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1073 		/* Configure mbuf memory pool */
1074 		if (sc->bge_extram) {
1075 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1076 			    BGE_EXT_SSRAM);
1077 			if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1078 				CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1079 			else
1080 				CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1081 		} else {
1082 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1083 			    BGE_BUFFPOOL_1);
1084 			if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1085 				CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1086 			else
1087 				CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1088 		}
1089 
1090 		/* Configure DMA resource pool */
1091 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1092 		    BGE_DMA_DESCRIPTORS);
1093 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1094 	}
1095 
1096 	/* Configure mbuf pool watermarks */
1097 	if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1098 	    sc->bge_asicrev == BGE_ASICREV_BCM5750) {
1099 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1100 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1101 	} else {
1102 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1103 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1104 	}
1105 	CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1106 
1107 	/* Configure DMA resource watermarks */
1108 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1109 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1110 
1111 	/* Enable buffer manager */
1112 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1113 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1114 		CSR_WRITE_4(sc, BGE_BMAN_MODE,
1115 		    BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1116 
1117 		/* Poll for buffer manager start indication */
1118 		for (i = 0; i < BGE_TIMEOUT; i++) {
1119 			if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1120 				break;
1121 			DELAY(10);
1122 		}
1123 
1124 		if (i == BGE_TIMEOUT) {
1125 			device_printf(sc->bge_dev,
1126 			    "buffer manager failed to start\n");
1127 			return (ENXIO);
1128 		}
1129 	}
1130 
1131 	/* Enable flow-through queues */
1132 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1133 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1134 
1135 	/* Wait until queue initialization is complete */
1136 	for (i = 0; i < BGE_TIMEOUT; i++) {
1137 		if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1138 			break;
1139 		DELAY(10);
1140 	}
1141 
1142 	if (i == BGE_TIMEOUT) {
1143 		device_printf(sc->bge_dev, "flow-through queue init failed\n");
1144 		return (ENXIO);
1145 	}
1146 
1147 	/* Initialize the standard RX ring control block */
1148 	rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1149 	rcb->bge_hostaddr.bge_addr_lo =
1150 	    BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1151 	rcb->bge_hostaddr.bge_addr_hi =
1152 	    BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1153 	bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1154 	    sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1155 	if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1156 	    sc->bge_asicrev == BGE_ASICREV_BCM5750)
1157 		rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1158 	else
1159 		rcb->bge_maxlen_flags =
1160 		    BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1161 	if (sc->bge_extram)
1162 		rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
1163 	else
1164 		rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1165 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1166 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1167 
1168 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1169 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1170 
1171 	/*
1172 	 * Initialize the jumbo RX ring control block
1173 	 * We set the 'ring disabled' bit in the flags
1174 	 * field until we're actually ready to start
1175 	 * using this ring (i.e. once we set the MTU
1176 	 * high enough to require it).
1177 	 */
1178 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1179 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1180 		rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1181 
1182 		rcb->bge_hostaddr.bge_addr_lo =
1183 		    BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1184 		rcb->bge_hostaddr.bge_addr_hi =
1185 		    BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1186 		bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1187 		    sc->bge_cdata.bge_rx_jumbo_ring_map,
1188 		    BUS_DMASYNC_PREREAD);
1189 		rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1190 		    BGE_RCB_FLAG_USE_EXT_RX_BD|BGE_RCB_FLAG_RING_DISABLED);
1191 		if (sc->bge_extram)
1192 			rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
1193 		else
1194 			rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1195 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1196 		    rcb->bge_hostaddr.bge_addr_hi);
1197 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1198 		    rcb->bge_hostaddr.bge_addr_lo);
1199 
1200 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1201 		    rcb->bge_maxlen_flags);
1202 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1203 
1204 		/* Set up dummy disabled mini ring RCB */
1205 		rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1206 		rcb->bge_maxlen_flags =
1207 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1208 		CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1209 		    rcb->bge_maxlen_flags);
1210 	}
1211 
1212 	/*
1213 	 * Set the BD ring replentish thresholds. The recommended
1214 	 * values are 1/8th the number of descriptors allocated to
1215 	 * each ring.
1216 	 */
1217 	CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
1218 	CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1219 
1220 	/*
1221 	 * Disable all unused send rings by setting the 'ring disabled'
1222 	 * bit in the flags field of all the TX send ring control blocks.
1223 	 * These are located in NIC memory.
1224 	 */
1225 	vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1226 	for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1227 		RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1228 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1229 		RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1230 		vrcb += sizeof(struct bge_rcb);
1231 	}
1232 
1233 	/* Configure TX RCB 0 (we use only the first ring) */
1234 	vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1235 	BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1236 	RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1237 	RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1238 	RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1239 	    BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1240 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1241 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
1242 		RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1243 		    BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1244 
1245 	/* Disable all unused RX return rings */
1246 	vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1247 	for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1248 		RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1249 		RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1250 		RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1251 		    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1252 		    BGE_RCB_FLAG_RING_DISABLED));
1253 		RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1254 		CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
1255 		    (i * (sizeof(uint64_t))), 0);
1256 		vrcb += sizeof(struct bge_rcb);
1257 	}
1258 
1259 	/* Initialize RX ring indexes */
1260 	CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1261 	CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1262 	CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1263 
1264 	/*
1265 	 * Set up RX return ring 0
1266 	 * Note that the NIC address for RX return rings is 0x00000000.
1267 	 * The return rings live entirely within the host, so the
1268 	 * nicaddr field in the RCB isn't used.
1269 	 */
1270 	vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1271 	BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1272 	RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1273 	RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1274 	RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000);
1275 	RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1276 	    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1277 
1278 	/* Set random backoff seed for TX */
1279 	CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1280 	    IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
1281 	    IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
1282 	    IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] +
1283 	    BGE_TX_BACKOFF_SEED_MASK);
1284 
1285 	/* Set inter-packet gap */
1286 	CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1287 
1288 	/*
1289 	 * Specify which ring to use for packets that don't match
1290 	 * any RX rules.
1291 	 */
1292 	CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1293 
1294 	/*
1295 	 * Configure number of RX lists. One interrupt distribution
1296 	 * list, sixteen active lists, one bad frames class.
1297 	 */
1298 	CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1299 
1300 	/* Inialize RX list placement stats mask. */
1301 	CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1302 	CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1303 
1304 	/* Disable host coalescing until we get it set up */
1305 	CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1306 
1307 	/* Poll to make sure it's shut down. */
1308 	for (i = 0; i < BGE_TIMEOUT; i++) {
1309 		if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1310 			break;
1311 		DELAY(10);
1312 	}
1313 
1314 	if (i == BGE_TIMEOUT) {
1315 		device_printf(sc->bge_dev,
1316 		    "host coalescing engine failed to idle\n");
1317 		return (ENXIO);
1318 	}
1319 
1320 	/* Set up host coalescing defaults */
1321 	CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1322 	CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1323 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1324 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1325 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1326 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1327 		CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1328 		CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1329 	}
1330 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1331 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1332 
1333 	/* Set up address of statistics block */
1334 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1335 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1336 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1337 		    BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1338 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1339 		    BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1340 		CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1341 		CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1342 		CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1343 	}
1344 
1345 	/* Set up address of status block */
1346 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1347 	    BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1348 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1349 	    BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1350 	sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0;
1351 	sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0;
1352 
1353 	/* Turn on host coalescing state machine */
1354 	CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1355 
1356 	/* Turn on RX BD completion state machine and enable attentions */
1357 	CSR_WRITE_4(sc, BGE_RBDC_MODE,
1358 	    BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1359 
1360 	/* Turn on RX list placement state machine */
1361 	CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1362 
1363 	/* Turn on RX list selector state machine. */
1364 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1365 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
1366 		CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1367 
1368 	/* Turn on DMA, clear stats */
1369 	CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1370 	    BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1371 	    BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1372 	    BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1373 	    (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1374 
1375 	/* Set misc. local control, enable interrupts on attentions */
1376 	CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1377 
1378 #ifdef notdef
1379 	/* Assert GPIO pins for PHY reset */
1380 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1381 	    BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1382 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1383 	    BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1384 #endif
1385 
1386 	/* Turn on DMA completion state machine */
1387 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1388 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
1389 		CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1390 
1391 	/* Turn on write DMA state machine */
1392 	CSR_WRITE_4(sc, BGE_WDMA_MODE,
1393 	    BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
1394 
1395 	/* Turn on read DMA state machine */
1396 	CSR_WRITE_4(sc, BGE_RDMA_MODE,
1397 	    BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
1398 
1399 	/* Turn on RX data completion state machine */
1400 	CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1401 
1402 	/* Turn on RX BD initiator state machine */
1403 	CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1404 
1405 	/* Turn on RX data and RX BD initiator state machine */
1406 	CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1407 
1408 	/* Turn on Mbuf cluster free state machine */
1409 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1410 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
1411 		CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1412 
1413 	/* Turn on send BD completion state machine */
1414 	CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1415 
1416 	/* Turn on send data completion state machine */
1417 	CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1418 
1419 	/* Turn on send data initiator state machine */
1420 	CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1421 
1422 	/* Turn on send BD initiator state machine */
1423 	CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1424 
1425 	/* Turn on send BD selector state machine */
1426 	CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1427 
1428 	CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1429 	CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1430 	    BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1431 
1432 	/* ack/clear link change events */
1433 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1434 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1435 	    BGE_MACSTAT_LINK_CHANGED);
1436 	CSR_WRITE_4(sc, BGE_MI_STS, 0);
1437 
1438 	/* Enable PHY auto polling (for MII/GMII only) */
1439 	if (sc->bge_tbi) {
1440 		CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1441 	} else {
1442 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1443 		if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1444 		    sc->bge_chipid != BGE_CHIPID_BCM5700_B1)
1445 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1446 			    BGE_EVTENB_MI_INTERRUPT);
1447 	}
1448 
1449 	/*
1450 	 * Clear any pending link state attention.
1451 	 * Otherwise some link state change events may be lost until attention
1452 	 * is cleared by bge_intr() -> bge_link_upd() sequence.
1453 	 * It's not necessary on newer BCM chips - perhaps enabling link
1454 	 * state change attentions implies clearing pending attention.
1455 	 */
1456 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1457 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1458 	    BGE_MACSTAT_LINK_CHANGED);
1459 
1460 	/* Enable link state change attentions. */
1461 	BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1462 
1463 	return (0);
1464 }
1465 
1466 /*
1467  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1468  * against our list and return its name if we find a match. Note
1469  * that since the Broadcom controller contains VPD support, we
1470  * can get the device name string from the controller itself instead
1471  * of the compiled-in string. This is a little slow, but it guarantees
1472  * we'll always announce the right product name.
1473  */
1474 static int
1475 bge_probe(device_t dev)
1476 {
1477 	struct bge_type *t;
1478 	struct bge_softc *sc;
1479 	char *descbuf;
1480 
1481 	t = bge_devs;
1482 
1483 	sc = device_get_softc(dev);
1484 	bzero(sc, sizeof(struct bge_softc));
1485 	sc->bge_dev = dev;
1486 
1487 	while(t->bge_name != NULL) {
1488 		if ((pci_get_vendor(dev) == t->bge_vid) &&
1489 		    (pci_get_device(dev) == t->bge_did)) {
1490 #ifdef notdef
1491 			bge_vpd_read(sc);
1492 			device_set_desc(dev, sc->bge_vpd_prodname);
1493 #endif
1494 			descbuf = malloc(BGE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
1495 			if (descbuf == NULL)
1496 				return (ENOMEM);
1497 			snprintf(descbuf, BGE_DEVDESC_MAX,
1498 			    "%s, ASIC rev. %#04x", t->bge_name,
1499 			    pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 16);
1500 			device_set_desc_copy(dev, descbuf);
1501 			if (pci_get_subvendor(dev) == DELL_VENDORID)
1502 				sc->bge_no_3_led = 1;
1503 			free(descbuf, M_TEMP);
1504 			return (0);
1505 		}
1506 		t++;
1507 	}
1508 
1509 	return (ENXIO);
1510 }
1511 
1512 static void
1513 bge_dma_free(struct bge_softc *sc)
1514 {
1515 	int i;
1516 
1517 	/* Destroy DMA maps for RX buffers. */
1518 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1519 		if (sc->bge_cdata.bge_rx_std_dmamap[i])
1520 			bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1521 			    sc->bge_cdata.bge_rx_std_dmamap[i]);
1522 	}
1523 
1524 	/* Destroy DMA maps for jumbo RX buffers. */
1525 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1526 		if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
1527 			bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
1528 			    sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1529 	}
1530 
1531 	/* Destroy DMA maps for TX buffers. */
1532 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
1533 		if (sc->bge_cdata.bge_tx_dmamap[i])
1534 			bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1535 			    sc->bge_cdata.bge_tx_dmamap[i]);
1536 	}
1537 
1538 	if (sc->bge_cdata.bge_mtag)
1539 		bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
1540 
1541 
1542 	/* Destroy standard RX ring. */
1543 	if (sc->bge_cdata.bge_rx_std_ring_map)
1544 		bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
1545 		    sc->bge_cdata.bge_rx_std_ring_map);
1546 	if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring)
1547 		bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
1548 		    sc->bge_ldata.bge_rx_std_ring,
1549 		    sc->bge_cdata.bge_rx_std_ring_map);
1550 
1551 	if (sc->bge_cdata.bge_rx_std_ring_tag)
1552 		bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
1553 
1554 	/* Destroy jumbo RX ring. */
1555 	if (sc->bge_cdata.bge_rx_jumbo_ring_map)
1556 		bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1557 		    sc->bge_cdata.bge_rx_jumbo_ring_map);
1558 
1559 	if (sc->bge_cdata.bge_rx_jumbo_ring_map &&
1560 	    sc->bge_ldata.bge_rx_jumbo_ring)
1561 		bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1562 		    sc->bge_ldata.bge_rx_jumbo_ring,
1563 		    sc->bge_cdata.bge_rx_jumbo_ring_map);
1564 
1565 	if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
1566 		bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
1567 
1568 	/* Destroy RX return ring. */
1569 	if (sc->bge_cdata.bge_rx_return_ring_map)
1570 		bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
1571 		    sc->bge_cdata.bge_rx_return_ring_map);
1572 
1573 	if (sc->bge_cdata.bge_rx_return_ring_map &&
1574 	    sc->bge_ldata.bge_rx_return_ring)
1575 		bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
1576 		    sc->bge_ldata.bge_rx_return_ring,
1577 		    sc->bge_cdata.bge_rx_return_ring_map);
1578 
1579 	if (sc->bge_cdata.bge_rx_return_ring_tag)
1580 		bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
1581 
1582 	/* Destroy TX ring. */
1583 	if (sc->bge_cdata.bge_tx_ring_map)
1584 		bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
1585 		    sc->bge_cdata.bge_tx_ring_map);
1586 
1587 	if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring)
1588 		bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
1589 		    sc->bge_ldata.bge_tx_ring,
1590 		    sc->bge_cdata.bge_tx_ring_map);
1591 
1592 	if (sc->bge_cdata.bge_tx_ring_tag)
1593 		bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
1594 
1595 	/* Destroy status block. */
1596 	if (sc->bge_cdata.bge_status_map)
1597 		bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
1598 		    sc->bge_cdata.bge_status_map);
1599 
1600 	if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block)
1601 		bus_dmamem_free(sc->bge_cdata.bge_status_tag,
1602 		    sc->bge_ldata.bge_status_block,
1603 		    sc->bge_cdata.bge_status_map);
1604 
1605 	if (sc->bge_cdata.bge_status_tag)
1606 		bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
1607 
1608 	/* Destroy statistics block. */
1609 	if (sc->bge_cdata.bge_stats_map)
1610 		bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
1611 		    sc->bge_cdata.bge_stats_map);
1612 
1613 	if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats)
1614 		bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
1615 		    sc->bge_ldata.bge_stats,
1616 		    sc->bge_cdata.bge_stats_map);
1617 
1618 	if (sc->bge_cdata.bge_stats_tag)
1619 		bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
1620 
1621 	/* Destroy the parent tag. */
1622 	if (sc->bge_cdata.bge_parent_tag)
1623 		bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
1624 }
1625 
1626 static int
1627 bge_dma_alloc(device_t dev)
1628 {
1629 	struct bge_dmamap_arg ctx;
1630 	struct bge_softc *sc;
1631 	int i, error;
1632 
1633 	sc = device_get_softc(dev);
1634 
1635 	/*
1636 	 * Allocate the parent bus DMA tag appropriate for PCI.
1637 	 */
1638 	error = bus_dma_tag_create(NULL,	/* parent */
1639 			PAGE_SIZE, 0,		/* alignment, boundary */
1640 			BUS_SPACE_MAXADDR,	/* lowaddr */
1641 			BUS_SPACE_MAXADDR,	/* highaddr */
1642 			NULL, NULL,		/* filter, filterarg */
1643 			MAXBSIZE, BGE_NSEG_NEW,	/* maxsize, nsegments */
1644 			BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
1645 			0,			/* flags */
1646 			NULL, NULL,		/* lockfunc, lockarg */
1647 			&sc->bge_cdata.bge_parent_tag);
1648 
1649 	if (error != 0) {
1650 		device_printf(sc->bge_dev,
1651 		    "could not allocate parent dma tag\n");
1652 		return (ENOMEM);
1653 	}
1654 
1655 	/*
1656 	 * Create tag for RX mbufs.
1657 	 */
1658 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1,
1659 	    0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1660 	    NULL, MCLBYTES * BGE_NSEG_NEW, BGE_NSEG_NEW, MCLBYTES,
1661 	    BUS_DMA_ALLOCNOW, NULL, NULL, &sc->bge_cdata.bge_mtag);
1662 
1663 	if (error) {
1664 		device_printf(sc->bge_dev, "could not allocate dma tag\n");
1665 		return (ENOMEM);
1666 	}
1667 
1668 	/* Create DMA maps for RX buffers. */
1669 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1670 		error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1671 			    &sc->bge_cdata.bge_rx_std_dmamap[i]);
1672 		if (error) {
1673 			device_printf(sc->bge_dev,
1674 			    "can't create DMA map for RX\n");
1675 			return (ENOMEM);
1676 		}
1677 	}
1678 
1679 	/* Create DMA maps for TX buffers. */
1680 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
1681 		error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1682 			    &sc->bge_cdata.bge_tx_dmamap[i]);
1683 		if (error) {
1684 			device_printf(sc->bge_dev,
1685 			    "can't create DMA map for RX\n");
1686 			return (ENOMEM);
1687 		}
1688 	}
1689 
1690 	/* Create tag for standard RX ring. */
1691 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1692 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1693 	    NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0,
1694 	    NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag);
1695 
1696 	if (error) {
1697 		device_printf(sc->bge_dev, "could not allocate dma tag\n");
1698 		return (ENOMEM);
1699 	}
1700 
1701 	/* Allocate DMA'able memory for standard RX ring. */
1702 	error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag,
1703 	    (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT,
1704 	    &sc->bge_cdata.bge_rx_std_ring_map);
1705 	if (error)
1706 		return (ENOMEM);
1707 
1708 	bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1709 
1710 	/* Load the address of the standard RX ring. */
1711 	ctx.bge_maxsegs = 1;
1712 	ctx.sc = sc;
1713 
1714 	error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag,
1715 	    sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring,
1716 	    BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1717 
1718 	if (error)
1719 		return (ENOMEM);
1720 
1721 	sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr;
1722 
1723 	/* Create tags for jumbo mbufs. */
1724 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1725 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1726 		error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1727 		    1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1728 		    NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
1729 		    0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
1730 		if (error) {
1731 			device_printf(sc->bge_dev,
1732 			    "could not allocate jumbo dma tag\n");
1733 			return (ENOMEM);
1734 		}
1735 
1736 		/* Create tag for jumbo RX ring. */
1737 		error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1738 		    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1739 		    NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0,
1740 		    NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag);
1741 
1742 		if (error) {
1743 			device_printf(sc->bge_dev,
1744 			    "could not allocate jumbo ring dma tag\n");
1745 			return (ENOMEM);
1746 		}
1747 
1748 		/* Allocate DMA'able memory for jumbo RX ring. */
1749 		error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1750 		    (void **)&sc->bge_ldata.bge_rx_jumbo_ring,
1751 		    BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1752 		    &sc->bge_cdata.bge_rx_jumbo_ring_map);
1753 		if (error)
1754 			return (ENOMEM);
1755 
1756 		/* Load the address of the jumbo RX ring. */
1757 		ctx.bge_maxsegs = 1;
1758 		ctx.sc = sc;
1759 
1760 		error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1761 		    sc->bge_cdata.bge_rx_jumbo_ring_map,
1762 		    sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ,
1763 		    bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1764 
1765 		if (error)
1766 			return (ENOMEM);
1767 
1768 		sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr;
1769 
1770 		/* Create DMA maps for jumbo RX buffers. */
1771 		for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1772 			error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
1773 				    0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1774 			if (error) {
1775 				device_printf(sc->bge_dev,
1776 				    "can't create DMA map for jumbo RX\n");
1777 				return (ENOMEM);
1778 			}
1779 		}
1780 
1781 	}
1782 
1783 	/* Create tag for RX return ring. */
1784 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1785 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1786 	    NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0,
1787 	    NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag);
1788 
1789 	if (error) {
1790 		device_printf(sc->bge_dev, "could not allocate dma tag\n");
1791 		return (ENOMEM);
1792 	}
1793 
1794 	/* Allocate DMA'able memory for RX return ring. */
1795 	error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag,
1796 	    (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT,
1797 	    &sc->bge_cdata.bge_rx_return_ring_map);
1798 	if (error)
1799 		return (ENOMEM);
1800 
1801 	bzero((char *)sc->bge_ldata.bge_rx_return_ring,
1802 	    BGE_RX_RTN_RING_SZ(sc));
1803 
1804 	/* Load the address of the RX return ring. */
1805 	ctx.bge_maxsegs = 1;
1806 	ctx.sc = sc;
1807 
1808 	error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag,
1809 	    sc->bge_cdata.bge_rx_return_ring_map,
1810 	    sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc),
1811 	    bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1812 
1813 	if (error)
1814 		return (ENOMEM);
1815 
1816 	sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr;
1817 
1818 	/* Create tag for TX ring. */
1819 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1820 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1821 	    NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL,
1822 	    &sc->bge_cdata.bge_tx_ring_tag);
1823 
1824 	if (error) {
1825 		device_printf(sc->bge_dev, "could not allocate dma tag\n");
1826 		return (ENOMEM);
1827 	}
1828 
1829 	/* Allocate DMA'able memory for TX ring. */
1830 	error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag,
1831 	    (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT,
1832 	    &sc->bge_cdata.bge_tx_ring_map);
1833 	if (error)
1834 		return (ENOMEM);
1835 
1836 	bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
1837 
1838 	/* Load the address of the TX ring. */
1839 	ctx.bge_maxsegs = 1;
1840 	ctx.sc = sc;
1841 
1842 	error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag,
1843 	    sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring,
1844 	    BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1845 
1846 	if (error)
1847 		return (ENOMEM);
1848 
1849 	sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr;
1850 
1851 	/* Create tag for status block. */
1852 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1853 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1854 	    NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0,
1855 	    NULL, NULL, &sc->bge_cdata.bge_status_tag);
1856 
1857 	if (error) {
1858 		device_printf(sc->bge_dev, "could not allocate dma tag\n");
1859 		return (ENOMEM);
1860 	}
1861 
1862 	/* Allocate DMA'able memory for status block. */
1863 	error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag,
1864 	    (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT,
1865 	    &sc->bge_cdata.bge_status_map);
1866 	if (error)
1867 		return (ENOMEM);
1868 
1869 	bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
1870 
1871 	/* Load the address of the status block. */
1872 	ctx.sc = sc;
1873 	ctx.bge_maxsegs = 1;
1874 
1875 	error = bus_dmamap_load(sc->bge_cdata.bge_status_tag,
1876 	    sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block,
1877 	    BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1878 
1879 	if (error)
1880 		return (ENOMEM);
1881 
1882 	sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr;
1883 
1884 	/* Create tag for statistics block. */
1885 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1886 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1887 	    NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL,
1888 	    &sc->bge_cdata.bge_stats_tag);
1889 
1890 	if (error) {
1891 		device_printf(sc->bge_dev, "could not allocate dma tag\n");
1892 		return (ENOMEM);
1893 	}
1894 
1895 	/* Allocate DMA'able memory for statistics block. */
1896 	error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag,
1897 	    (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT,
1898 	    &sc->bge_cdata.bge_stats_map);
1899 	if (error)
1900 		return (ENOMEM);
1901 
1902 	bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ);
1903 
1904 	/* Load the address of the statstics block. */
1905 	ctx.sc = sc;
1906 	ctx.bge_maxsegs = 1;
1907 
1908 	error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag,
1909 	    sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats,
1910 	    BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1911 
1912 	if (error)
1913 		return (ENOMEM);
1914 
1915 	sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr;
1916 
1917 	return (0);
1918 }
1919 
1920 static int
1921 bge_attach(device_t dev)
1922 {
1923 	struct ifnet *ifp;
1924 	struct bge_softc *sc;
1925 	uint32_t hwcfg = 0;
1926 	uint32_t mac_tmp = 0;
1927 	u_char eaddr[6];
1928 	int error = 0, rid;
1929 
1930 	sc = device_get_softc(dev);
1931 	sc->bge_dev = dev;
1932 
1933 	/*
1934 	 * Map control/status registers.
1935 	 */
1936 	pci_enable_busmaster(dev);
1937 
1938 	rid = BGE_PCI_BAR0;
1939 	sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1940 	    RF_ACTIVE|PCI_RF_DENSE);
1941 
1942 	if (sc->bge_res == NULL) {
1943 		device_printf (sc->bge_dev, "couldn't map memory\n");
1944 		error = ENXIO;
1945 		goto fail;
1946 	}
1947 
1948 	sc->bge_btag = rman_get_bustag(sc->bge_res);
1949 	sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
1950 
1951 	/* Allocate interrupt. */
1952 	rid = 0;
1953 
1954 	sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1955 	    RF_SHAREABLE | RF_ACTIVE);
1956 
1957 	if (sc->bge_irq == NULL) {
1958 		device_printf(sc->bge_dev, "couldn't map interrupt\n");
1959 		error = ENXIO;
1960 		goto fail;
1961 	}
1962 
1963 	BGE_LOCK_INIT(sc, device_get_nameunit(dev));
1964 
1965 	/* Save ASIC rev. */
1966 
1967 	sc->bge_chipid =
1968 	    pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
1969 	    BGE_PCIMISCCTL_ASICREV;
1970 	sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
1971 	sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
1972 
1973 	/*
1974 	 * Treat the 5714 and the 5752 like the 5750 until we have more info
1975 	 * on this chip.
1976 	 */
1977 	if (sc->bge_asicrev == BGE_ASICREV_BCM5714 ||
1978 	    sc->bge_asicrev == BGE_ASICREV_BCM5752)
1979 		sc->bge_asicrev = BGE_ASICREV_BCM5750;
1980 
1981 	/*
1982 	 * XXX: Broadcom Linux driver.  Not in specs or eratta.
1983 	 * PCI-Express?
1984 	 */
1985 	if (sc->bge_asicrev == BGE_ASICREV_BCM5750) {
1986 		uint32_t v;
1987 
1988 		v = pci_read_config(dev, BGE_PCI_MSI_CAPID, 4);
1989 		if (((v >> 8) & 0xff) == BGE_PCIE_CAPID_REG) {
1990 			v = pci_read_config(dev, BGE_PCIE_CAPID_REG, 4);
1991 			if ((v & 0xff) == BGE_PCIE_CAPID)
1992 				sc->bge_pcie = 1;
1993 		}
1994 	}
1995 
1996 	/* Try to reset the chip. */
1997 	bge_reset(sc);
1998 
1999 	if (bge_chipinit(sc)) {
2000 		device_printf(sc->bge_dev, "chip initialization failed\n");
2001 		bge_release_resources(sc);
2002 		error = ENXIO;
2003 		goto fail;
2004 	}
2005 
2006 	/*
2007 	 * Get station address from the EEPROM.
2008 	 */
2009 	mac_tmp = bge_readmem_ind(sc, 0x0c14);
2010 	if ((mac_tmp >> 16) == 0x484b) {
2011 		eaddr[0] = (u_char)(mac_tmp >> 8);
2012 		eaddr[1] = (u_char)mac_tmp;
2013 		mac_tmp = bge_readmem_ind(sc, 0x0c18);
2014 		eaddr[2] = (u_char)(mac_tmp >> 24);
2015 		eaddr[3] = (u_char)(mac_tmp >> 16);
2016 		eaddr[4] = (u_char)(mac_tmp >> 8);
2017 		eaddr[5] = (u_char)mac_tmp;
2018 	} else if (bge_read_eeprom(sc, eaddr,
2019 	    BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
2020 		device_printf(sc->bge_dev, "failed to read station address\n");
2021 		bge_release_resources(sc);
2022 		error = ENXIO;
2023 		goto fail;
2024 	}
2025 
2026 	/* 5705 limits RX return ring to 512 entries. */
2027 	if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
2028 	    sc->bge_asicrev == BGE_ASICREV_BCM5750)
2029 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2030 	else
2031 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2032 
2033 	if (bge_dma_alloc(dev)) {
2034 		device_printf(sc->bge_dev,
2035 		    "failed to allocate DMA resources\n");
2036 		bge_release_resources(sc);
2037 		error = ENXIO;
2038 		goto fail;
2039 	}
2040 
2041 	/* Set default tuneable values. */
2042 	sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2043 	sc->bge_rx_coal_ticks = 150;
2044 	sc->bge_tx_coal_ticks = 150;
2045 	sc->bge_rx_max_coal_bds = 64;
2046 	sc->bge_tx_max_coal_bds = 128;
2047 
2048 	/* Set up ifnet structure */
2049 	ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
2050 	if (ifp == NULL) {
2051 		device_printf(sc->bge_dev, "failed to if_alloc()\n");
2052 		bge_release_resources(sc);
2053 		error = ENXIO;
2054 		goto fail;
2055 	}
2056 	ifp->if_softc = sc;
2057 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2058 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2059 	ifp->if_ioctl = bge_ioctl;
2060 	ifp->if_start = bge_start;
2061 	ifp->if_watchdog = bge_watchdog;
2062 	ifp->if_init = bge_init;
2063 	ifp->if_mtu = ETHERMTU;
2064 	ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
2065 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
2066 	IFQ_SET_READY(&ifp->if_snd);
2067 	ifp->if_hwassist = BGE_CSUM_FEATURES;
2068 	ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
2069 	    IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM;
2070 	ifp->if_capenable = ifp->if_capabilities;
2071 #ifdef DEVICE_POLLING
2072 	ifp->if_capabilities |= IFCAP_POLLING;
2073 #endif
2074 
2075 	/*
2076 	 * 5700 B0 chips do not support checksumming correctly due
2077 	 * to hardware bugs.
2078 	 */
2079 	if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
2080 		ifp->if_capabilities &= ~IFCAP_HWCSUM;
2081 		ifp->if_capenable &= IFCAP_HWCSUM;
2082 		ifp->if_hwassist = 0;
2083 	}
2084 
2085 	/*
2086 	 * Figure out what sort of media we have by checking the
2087 	 * hardware config word in the first 32k of NIC internal memory,
2088 	 * or fall back to examining the EEPROM if necessary.
2089 	 * Note: on some BCM5700 cards, this value appears to be unset.
2090 	 * If that's the case, we have to rely on identifying the NIC
2091 	 * by its PCI subsystem ID, as we do below for the SysKonnect
2092 	 * SK-9D41.
2093 	 */
2094 	if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
2095 		hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2096 	else {
2097 		if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
2098 		    sizeof(hwcfg))) {
2099 			device_printf(sc->bge_dev, "failed to read EEPROM\n");
2100 			bge_release_resources(sc);
2101 			error = ENXIO;
2102 			goto fail;
2103 		}
2104 		hwcfg = ntohl(hwcfg);
2105 	}
2106 
2107 	if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
2108 		sc->bge_tbi = 1;
2109 
2110 	/* The SysKonnect SK-9D41 is a 1000baseSX card. */
2111 	if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41)
2112 		sc->bge_tbi = 1;
2113 
2114 	if (sc->bge_tbi) {
2115 		ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
2116 		    bge_ifmedia_upd, bge_ifmedia_sts);
2117 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2118 		ifmedia_add(&sc->bge_ifmedia,
2119 		    IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
2120 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2121 		ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
2122 		sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
2123 	} else {
2124 		/*
2125 		 * Do transceiver setup.
2126 		 */
2127 		if (mii_phy_probe(dev, &sc->bge_miibus,
2128 		    bge_ifmedia_upd, bge_ifmedia_sts)) {
2129 			device_printf(sc->bge_dev, "MII without any PHY!\n");
2130 			bge_release_resources(sc);
2131 			error = ENXIO;
2132 			goto fail;
2133 		}
2134 	}
2135 
2136 	/*
2137 	 * When using the BCM5701 in PCI-X mode, data corruption has
2138 	 * been observed in the first few bytes of some received packets.
2139 	 * Aligning the packet buffer in memory eliminates the corruption.
2140 	 * Unfortunately, this misaligns the packet payloads.  On platforms
2141 	 * which do not support unaligned accesses, we will realign the
2142 	 * payloads by copying the received packets.
2143 	 */
2144 	switch (sc->bge_chipid) {
2145 	case BGE_CHIPID_BCM5701_A0:
2146 	case BGE_CHIPID_BCM5701_B0:
2147 	case BGE_CHIPID_BCM5701_B2:
2148 	case BGE_CHIPID_BCM5701_B5:
2149 		/* If in PCI-X mode, work around the alignment bug. */
2150 		if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
2151 		    (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) ==
2152 		    BGE_PCISTATE_PCI_BUSSPEED)
2153 			sc->bge_rx_alignment_bug = 1;
2154 		break;
2155 	}
2156 
2157 	/*
2158 	 * Call MI attach routine.
2159 	 */
2160 	ether_ifattach(ifp, eaddr);
2161 	callout_init(&sc->bge_stat_ch, CALLOUT_MPSAFE);
2162 
2163 	/*
2164 	 * Hookup IRQ last.
2165 	 */
2166 	error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
2167 	   bge_intr, sc, &sc->bge_intrhand);
2168 
2169 	if (error) {
2170 		bge_detach(dev);
2171 		device_printf(sc->bge_dev, "couldn't set up irq\n");
2172 	}
2173 
2174 fail:
2175 	return (error);
2176 }
2177 
2178 static int
2179 bge_detach(device_t dev)
2180 {
2181 	struct bge_softc *sc;
2182 	struct ifnet *ifp;
2183 
2184 	sc = device_get_softc(dev);
2185 	ifp = sc->bge_ifp;
2186 
2187 #ifdef DEVICE_POLLING
2188 	if (ifp->if_capenable & IFCAP_POLLING)
2189 		ether_poll_deregister(ifp);
2190 #endif
2191 
2192 	BGE_LOCK(sc);
2193 	bge_stop(sc);
2194 	bge_reset(sc);
2195 	BGE_UNLOCK(sc);
2196 
2197 	ether_ifdetach(ifp);
2198 
2199 	if (sc->bge_tbi) {
2200 		ifmedia_removeall(&sc->bge_ifmedia);
2201 	} else {
2202 		bus_generic_detach(dev);
2203 		device_delete_child(dev, sc->bge_miibus);
2204 	}
2205 
2206 	bge_release_resources(sc);
2207 
2208 	return (0);
2209 }
2210 
2211 static void
2212 bge_release_resources(struct bge_softc *sc)
2213 {
2214 	device_t dev;
2215 
2216 	dev = sc->bge_dev;
2217 
2218 	if (sc->bge_vpd_prodname != NULL)
2219 		free(sc->bge_vpd_prodname, M_DEVBUF);
2220 
2221 	if (sc->bge_vpd_readonly != NULL)
2222 		free(sc->bge_vpd_readonly, M_DEVBUF);
2223 
2224 	if (sc->bge_intrhand != NULL)
2225 		bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
2226 
2227 	if (sc->bge_irq != NULL)
2228 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
2229 
2230 	if (sc->bge_res != NULL)
2231 		bus_release_resource(dev, SYS_RES_MEMORY,
2232 		    BGE_PCI_BAR0, sc->bge_res);
2233 
2234 	if (sc->bge_ifp != NULL)
2235 		if_free(sc->bge_ifp);
2236 
2237 	bge_dma_free(sc);
2238 
2239 	if (mtx_initialized(&sc->bge_mtx))	/* XXX */
2240 		BGE_LOCK_DESTROY(sc);
2241 }
2242 
2243 static void
2244 bge_reset(struct bge_softc *sc)
2245 {
2246 	device_t dev;
2247 	uint32_t cachesize, command, pcistate, reset;
2248 	int i, val = 0;
2249 
2250 	dev = sc->bge_dev;
2251 
2252 	/* Save some important PCI state. */
2253 	cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2254 	command = pci_read_config(dev, BGE_PCI_CMD, 4);
2255 	pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2256 
2257 	pci_write_config(dev, BGE_PCI_MISC_CTL,
2258 	    BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2259 	BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2260 
2261 	reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2262 
2263 	/* XXX: Broadcom Linux driver. */
2264 	if (sc->bge_pcie) {
2265 		if (CSR_READ_4(sc, 0x7e2c) == 0x60)	/* PCIE 1.0 */
2266 			CSR_WRITE_4(sc, 0x7e2c, 0x20);
2267 		if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2268 			/* Prevent PCIE link training during global reset */
2269 			CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2270 			reset |= (1<<29);
2271 		}
2272 	}
2273 
2274 	/* Issue global reset */
2275 	bge_writereg_ind(sc, BGE_MISC_CFG, reset);
2276 
2277 	DELAY(1000);
2278 
2279 	/* XXX: Broadcom Linux driver. */
2280 	if (sc->bge_pcie) {
2281 		if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
2282 			uint32_t v;
2283 
2284 			DELAY(500000); /* wait for link training to complete */
2285 			v = pci_read_config(dev, 0xc4, 4);
2286 			pci_write_config(dev, 0xc4, v | (1<<15), 4);
2287 		}
2288 		/* Set PCIE max payload size and clear error status. */
2289 		pci_write_config(dev, 0xd8, 0xf5000, 4);
2290 	}
2291 
2292 	/* Reset some of the PCI state that got zapped by reset. */
2293 	pci_write_config(dev, BGE_PCI_MISC_CTL,
2294 	    BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2295 	    BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2296 	pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2297 	pci_write_config(dev, BGE_PCI_CMD, command, 4);
2298 	bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
2299 
2300 	/* Enable memory arbiter. */
2301 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2302 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
2303 		CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2304 
2305 	/*
2306 	 * Prevent PXE restart: write a magic number to the
2307 	 * general communications memory at 0xB50.
2308 	 */
2309 	bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2310 	/*
2311 	 * Poll the value location we just wrote until
2312 	 * we see the 1's complement of the magic number.
2313 	 * This indicates that the firmware initialization
2314 	 * is complete.
2315 	 */
2316 	for (i = 0; i < BGE_TIMEOUT; i++) {
2317 		val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2318 		if (val == ~BGE_MAGIC_NUMBER)
2319 			break;
2320 		DELAY(10);
2321 	}
2322 
2323 	if (i == BGE_TIMEOUT) {
2324 		device_printf(sc->bge_dev, "firmware handshake timed out\n");
2325 		return;
2326 	}
2327 
2328 	/*
2329 	 * XXX Wait for the value of the PCISTATE register to
2330 	 * return to its original pre-reset state. This is a
2331 	 * fairly good indicator of reset completion. If we don't
2332 	 * wait for the reset to fully complete, trying to read
2333 	 * from the device's non-PCI registers may yield garbage
2334 	 * results.
2335 	 */
2336 	for (i = 0; i < BGE_TIMEOUT; i++) {
2337 		if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
2338 			break;
2339 		DELAY(10);
2340 	}
2341 
2342 	/* Fix up byte swapping. */
2343 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
2344 	    BGE_MODECTL_BYTESWAP_DATA);
2345 
2346 	CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2347 
2348 	/*
2349 	 * The 5704 in TBI mode apparently needs some special
2350 	 * adjustment to insure the SERDES drive level is set
2351 	 * to 1.2V.
2352 	 */
2353 	if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && sc->bge_tbi) {
2354 		uint32_t serdescfg;
2355 		serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2356 		serdescfg = (serdescfg & ~0xFFF) | 0x880;
2357 		CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2358 	}
2359 
2360 	/* XXX: Broadcom Linux driver. */
2361 	if (sc->bge_pcie && sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2362 		uint32_t v;
2363 
2364 		v = CSR_READ_4(sc, 0x7c00);
2365 		CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
2366 	}
2367 	DELAY(10000);
2368 }
2369 
2370 /*
2371  * Frame reception handling. This is called if there's a frame
2372  * on the receive return list.
2373  *
2374  * Note: we have to be able to handle two possibilities here:
2375  * 1) the frame is from the jumbo receive ring
2376  * 2) the frame is from the standard receive ring
2377  */
2378 
2379 static void
2380 bge_rxeof(struct bge_softc *sc)
2381 {
2382 	struct ifnet *ifp;
2383 	int stdcnt = 0, jumbocnt = 0;
2384 
2385 	BGE_LOCK_ASSERT(sc);
2386 
2387 	/* Nothing to do. */
2388 	if (sc->bge_rx_saved_considx ==
2389 	    sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx)
2390 		return;
2391 
2392 	ifp = sc->bge_ifp;
2393 
2394 	bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
2395 	    sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
2396 	bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2397 	    sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD);
2398 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2399 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
2400 		bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2401 		    sc->bge_cdata.bge_rx_jumbo_ring_map,
2402 		    BUS_DMASYNC_POSTREAD);
2403 	}
2404 
2405 	while(sc->bge_rx_saved_considx !=
2406 	    sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) {
2407 		struct bge_rx_bd	*cur_rx;
2408 		uint32_t		rxidx;
2409 		struct mbuf		*m = NULL;
2410 		uint16_t		vlan_tag = 0;
2411 		int			have_tag = 0;
2412 
2413 #ifdef DEVICE_POLLING
2414 		if (ifp->if_capenable & IFCAP_POLLING) {
2415 			if (sc->rxcycles <= 0)
2416 				break;
2417 			sc->rxcycles--;
2418 		}
2419 #endif
2420 
2421 		cur_rx =
2422 	    &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
2423 
2424 		rxidx = cur_rx->bge_idx;
2425 		BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
2426 
2427 		if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2428 			have_tag = 1;
2429 			vlan_tag = cur_rx->bge_vlan_tag;
2430 		}
2431 
2432 		if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2433 			BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2434 			bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
2435 			    sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx],
2436 			    BUS_DMASYNC_POSTREAD);
2437 			bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
2438 			    sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx]);
2439 			m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
2440 			sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
2441 			jumbocnt++;
2442 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2443 				ifp->if_ierrors++;
2444 				bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2445 				continue;
2446 			}
2447 			if (bge_newbuf_jumbo(sc,
2448 			    sc->bge_jumbo, NULL) == ENOBUFS) {
2449 				ifp->if_ierrors++;
2450 				bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2451 				continue;
2452 			}
2453 		} else {
2454 			BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2455 			bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2456 			    sc->bge_cdata.bge_rx_std_dmamap[rxidx],
2457 			    BUS_DMASYNC_POSTREAD);
2458 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2459 			    sc->bge_cdata.bge_rx_std_dmamap[rxidx]);
2460 			m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2461 			sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2462 			stdcnt++;
2463 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2464 				ifp->if_ierrors++;
2465 				bge_newbuf_std(sc, sc->bge_std, m);
2466 				continue;
2467 			}
2468 			if (bge_newbuf_std(sc, sc->bge_std,
2469 			    NULL) == ENOBUFS) {
2470 				ifp->if_ierrors++;
2471 				bge_newbuf_std(sc, sc->bge_std, m);
2472 				continue;
2473 			}
2474 		}
2475 
2476 		ifp->if_ipackets++;
2477 #ifndef __NO_STRICT_ALIGNMENT
2478 		/*
2479 		 * For architectures with strict alignment we must make sure
2480 		 * the payload is aligned.
2481 		 */
2482 		if (sc->bge_rx_alignment_bug) {
2483 			bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2484 			    cur_rx->bge_len);
2485 			m->m_data += ETHER_ALIGN;
2486 		}
2487 #endif
2488 		m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2489 		m->m_pkthdr.rcvif = ifp;
2490 
2491 		if (ifp->if_capenable & IFCAP_RXCSUM) {
2492 			if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
2493 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2494 				if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2495 					m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2496 			}
2497 			if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
2498 			    m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
2499 				m->m_pkthdr.csum_data =
2500 				    cur_rx->bge_tcp_udp_csum;
2501 				m->m_pkthdr.csum_flags |=
2502 				    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2503 			}
2504 		}
2505 
2506 		/*
2507 		 * If we received a packet with a vlan tag,
2508 		 * attach that information to the packet.
2509 		 */
2510 		if (have_tag) {
2511 			VLAN_INPUT_TAG(ifp, m, vlan_tag);
2512 			if (m == NULL)
2513 				continue;
2514 		}
2515 
2516 		BGE_UNLOCK(sc);
2517 		(*ifp->if_input)(ifp, m);
2518 		BGE_LOCK(sc);
2519 	}
2520 
2521 	if (stdcnt > 0)
2522 		bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2523 		    sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
2524 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2525 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
2526 		if (jumbocnt > 0)
2527 			bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2528 			    sc->bge_cdata.bge_rx_jumbo_ring_map,
2529 			    BUS_DMASYNC_PREWRITE);
2530 	}
2531 
2532 	CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2533 	if (stdcnt)
2534 		CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2535 	if (jumbocnt)
2536 		CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2537 }
2538 
2539 static void
2540 bge_txeof(struct bge_softc *sc)
2541 {
2542 	struct bge_tx_bd *cur_tx = NULL;
2543 	struct ifnet *ifp;
2544 
2545 	BGE_LOCK_ASSERT(sc);
2546 
2547 	/* Nothing to do. */
2548 	if (sc->bge_tx_saved_considx ==
2549 	    sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx)
2550 		return;
2551 
2552 	ifp = sc->bge_ifp;
2553 
2554 	bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
2555 	    sc->bge_cdata.bge_tx_ring_map,
2556 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2557 	/*
2558 	 * Go through our tx ring and free mbufs for those
2559 	 * frames that have been sent.
2560 	 */
2561 	while (sc->bge_tx_saved_considx !=
2562 	    sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) {
2563 		uint32_t		idx = 0;
2564 
2565 		idx = sc->bge_tx_saved_considx;
2566 		cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
2567 		if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2568 			ifp->if_opackets++;
2569 		if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
2570 			bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2571 			    sc->bge_cdata.bge_tx_dmamap[idx],
2572 			    BUS_DMASYNC_POSTWRITE);
2573 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2574 			    sc->bge_cdata.bge_tx_dmamap[idx]);
2575 			m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2576 			sc->bge_cdata.bge_tx_chain[idx] = NULL;
2577 		}
2578 		sc->bge_txcnt--;
2579 		BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2580 		ifp->if_timer = 0;
2581 	}
2582 
2583 	if (cur_tx != NULL)
2584 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2585 }
2586 
2587 #ifdef DEVICE_POLLING
2588 static void
2589 bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2590 {
2591 	struct bge_softc *sc = ifp->if_softc;
2592 	uint32_t statusword;
2593 
2594 	BGE_LOCK(sc);
2595 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2596 		BGE_UNLOCK(sc);
2597 		return;
2598 	}
2599 
2600 	bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2601 	    sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD);
2602 
2603 	statusword = atomic_readandclear_32(
2604 	    &sc->bge_ldata.bge_status_block->bge_status);
2605 
2606 	bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2607 	    sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD);
2608 
2609 	/* Note link event. It will be processed by POLL_AND_CHECK_STATUS cmd */
2610 	if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
2611 		sc->bge_link_evt++;
2612 
2613 	if (cmd == POLL_AND_CHECK_STATUS)
2614 		if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2615 		    sc->bge_chipid != BGE_CHIPID_BCM5700_B1) ||
2616 		    sc->bge_link_evt || sc->bge_tbi)
2617 			bge_link_upd(sc);
2618 
2619 	sc->rxcycles = count;
2620 	bge_rxeof(sc);
2621 	bge_txeof(sc);
2622 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2623 		bge_start_locked(ifp);
2624 
2625 	BGE_UNLOCK(sc);
2626 }
2627 #endif /* DEVICE_POLLING */
2628 
2629 static void
2630 bge_intr(void *xsc)
2631 {
2632 	struct bge_softc *sc;
2633 	struct ifnet *ifp;
2634 	uint32_t statusword;
2635 
2636 	sc = xsc;
2637 
2638 	BGE_LOCK(sc);
2639 
2640 	ifp = sc->bge_ifp;
2641 
2642 #ifdef DEVICE_POLLING
2643 	if (ifp->if_capenable & IFCAP_POLLING) {
2644 		BGE_UNLOCK(sc);
2645 		return;
2646 	}
2647 #endif
2648 
2649 	/*
2650 	 * Do the mandatory PCI flush as well as get the link status.
2651 	 */
2652 	statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED;
2653 
2654 	/* Ack interrupt and stop others from occuring. */
2655 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2656 
2657 	/* Make sure the descriptor ring indexes are coherent. */
2658 	bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2659 	    sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD);
2660 	bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2661 	    sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD);
2662 
2663 	if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2664 	    sc->bge_chipid != BGE_CHIPID_BCM5700_B1) ||
2665 	    statusword || sc->bge_link_evt)
2666 		bge_link_upd(sc);
2667 
2668 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2669 		/* Check RX return ring producer/consumer. */
2670 		bge_rxeof(sc);
2671 
2672 		/* Check TX ring producer/consumer. */
2673 		bge_txeof(sc);
2674 	}
2675 
2676 	/* Re-enable interrupts. */
2677 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2678 
2679 	if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2680 	    !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2681 		bge_start_locked(ifp);
2682 
2683 	BGE_UNLOCK(sc);
2684 }
2685 
2686 static void
2687 bge_tick_locked(struct bge_softc *sc)
2688 {
2689 	struct mii_data *mii = NULL;
2690 
2691 	BGE_LOCK_ASSERT(sc);
2692 
2693 	if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
2694 	    sc->bge_asicrev == BGE_ASICREV_BCM5750)
2695 		bge_stats_update_regs(sc);
2696 	else
2697 		bge_stats_update(sc);
2698 
2699 	if (!sc->bge_tbi) {
2700 		mii = device_get_softc(sc->bge_miibus);
2701 		mii_tick(mii);
2702 	} else {
2703 		/*
2704 		 * Since in TBI mode auto-polling can't be used we should poll
2705 		 * link status manually. Here we register pending link event
2706 		 * and trigger interrupt.
2707 		 */
2708 #ifdef DEVICE_POLLING
2709 		/* In polling mode we poll link state in bge_poll(). */
2710 		if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING))
2711 #endif
2712 		{
2713 		sc->bge_link_evt++;
2714 		BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
2715 		}
2716 	}
2717 
2718 	callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
2719 }
2720 
2721 static void
2722 bge_tick(void *xsc)
2723 {
2724 	struct bge_softc *sc;
2725 
2726 	sc = xsc;
2727 
2728 	BGE_LOCK(sc);
2729 	bge_tick_locked(sc);
2730 	BGE_UNLOCK(sc);
2731 }
2732 
2733 static void
2734 bge_stats_update_regs(struct bge_softc *sc)
2735 {
2736 	struct bge_mac_stats_regs stats;
2737 	struct ifnet *ifp;
2738 	uint32_t *s;
2739 	u_long cnt;	/* current register value */
2740 	int i;
2741 
2742 	ifp = sc->bge_ifp;
2743 
2744 	s = (uint32_t *)&stats;
2745 	for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
2746 		*s = CSR_READ_4(sc, BGE_RX_STATS + i);
2747 		s++;
2748 	}
2749 
2750 	cnt = stats.dot3StatsSingleCollisionFrames +
2751 	    stats.dot3StatsMultipleCollisionFrames +
2752 	    stats.dot3StatsExcessiveCollisions +
2753 	    stats.dot3StatsLateCollisions;
2754 	ifp->if_collisions += cnt >= sc->bge_tx_collisions ?
2755 	    cnt - sc->bge_tx_collisions : cnt;
2756 	sc->bge_tx_collisions = cnt;
2757 }
2758 
2759 static void
2760 bge_stats_update(struct bge_softc *sc)
2761 {
2762 	struct ifnet *ifp;
2763 	bus_size_t stats;
2764 	u_long cnt;	/* current register value */
2765 
2766 	ifp = sc->bge_ifp;
2767 
2768 	stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
2769 
2770 #define READ_STAT(sc, stats, stat) \
2771 	CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
2772 
2773 	cnt = READ_STAT(sc, stats,
2774 	    txstats.dot3StatsSingleCollisionFrames.bge_addr_lo);
2775 	cnt += READ_STAT(sc, stats,
2776 	    txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo);
2777 	cnt += READ_STAT(sc, stats,
2778 	    txstats.dot3StatsExcessiveCollisions.bge_addr_lo);
2779 	cnt += READ_STAT(sc, stats,
2780 		txstats.dot3StatsLateCollisions.bge_addr_lo);
2781 	ifp->if_collisions += cnt >= sc->bge_tx_collisions ?
2782 	    cnt - sc->bge_tx_collisions : cnt;
2783 	sc->bge_tx_collisions = cnt;
2784 
2785 	cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
2786 	ifp->if_ierrors += cnt >= sc->bge_rx_discards ?
2787 	    cnt - sc->bge_rx_discards : cnt;
2788 	sc->bge_rx_discards = cnt;
2789 
2790 	cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
2791 	ifp->if_oerrors += cnt >= sc->bge_tx_discards ?
2792 	    cnt - sc->bge_tx_discards : cnt;
2793 	sc->bge_tx_discards = cnt;
2794 
2795 #undef READ_STAT
2796 }
2797 
2798 /*
2799  * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
2800  * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
2801  * but when such padded frames employ the bge IP/TCP checksum offload,
2802  * the hardware checksum assist gives incorrect results (possibly
2803  * from incorporating its own padding into the UDP/TCP checksum; who knows).
2804  * If we pad such runts with zeros, the onboard checksum comes out correct.
2805  */
2806 static __inline int
2807 bge_cksum_pad(struct mbuf *m)
2808 {
2809 	int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
2810 	struct mbuf *last;
2811 
2812 	/* If there's only the packet-header and we can pad there, use it. */
2813 	if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
2814 	    M_TRAILINGSPACE(m) >= padlen) {
2815 		last = m;
2816 	} else {
2817 		/*
2818 		 * Walk packet chain to find last mbuf. We will either
2819 		 * pad there, or append a new mbuf and pad it.
2820 		 */
2821 		for (last = m; last->m_next != NULL; last = last->m_next);
2822 		if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
2823 			/* Allocate new empty mbuf, pad it. Compact later. */
2824 			struct mbuf *n;
2825 
2826 			MGET(n, M_DONTWAIT, MT_DATA);
2827 			if (n == NULL)
2828 				return (ENOBUFS);
2829 			n->m_len = 0;
2830 			last->m_next = n;
2831 			last = n;
2832 		}
2833 	}
2834 
2835 	/* Now zero the pad area, to avoid the bge cksum-assist bug. */
2836 	memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
2837 	last->m_len += padlen;
2838 	m->m_pkthdr.len += padlen;
2839 
2840 	return (0);
2841 }
2842 
2843 /*
2844  * Encapsulate an mbuf chain in the tx ring  by coupling the mbuf data
2845  * pointers to descriptors.
2846  */
2847 static int
2848 bge_encap(struct bge_softc *sc, struct mbuf *m_head, uint32_t *txidx)
2849 {
2850 	bus_dma_segment_t	segs[BGE_NSEG_NEW];
2851 	bus_dmamap_t		map;
2852 	struct bge_tx_bd	*d = NULL;
2853 	struct m_tag		*mtag;
2854 	uint32_t		idx = *txidx;
2855 	uint16_t		csum_flags = 0;
2856 	int			nsegs, i, error;
2857 
2858 	if (m_head->m_pkthdr.csum_flags) {
2859 		if (m_head->m_pkthdr.csum_flags & CSUM_IP)
2860 			csum_flags |= BGE_TXBDFLAG_IP_CSUM;
2861 		if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
2862 			csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
2863 			if (m_head->m_pkthdr.len < ETHER_MIN_NOPAD &&
2864 			    bge_cksum_pad(m_head) != 0)
2865 				return (ENOBUFS);
2866 		}
2867 		if (m_head->m_flags & M_LASTFRAG)
2868 			csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
2869 		else if (m_head->m_flags & M_FRAG)
2870 			csum_flags |= BGE_TXBDFLAG_IP_FRAG;
2871 	}
2872 
2873 	mtag = VLAN_OUTPUT_TAG(sc->bge_ifp, m_head);
2874 
2875 	map = sc->bge_cdata.bge_tx_dmamap[idx];
2876 	error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag, map,
2877 	    m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2878 	if (error) {
2879 		if (error == EFBIG) {
2880 			struct mbuf *m0;
2881 
2882 			m0 = m_defrag(m_head, M_DONTWAIT);
2883 			if (m0 == NULL)
2884 				return (ENOBUFS);
2885 			m_head = m0;
2886 			error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag,
2887 			    map, m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2888 		}
2889 		if (error)
2890 			return (error);
2891 	}
2892 
2893 	/*
2894 	 * Sanity check: avoid coming within 16 descriptors
2895 	 * of the end of the ring.
2896 	 */
2897 	if (nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) {
2898 		bus_dmamap_unload(sc->bge_cdata.bge_mtag, map);
2899 		return (ENOBUFS);
2900 	}
2901 
2902 	bus_dmamap_sync(sc->bge_cdata.bge_mtag, map, BUS_DMASYNC_PREWRITE);
2903 
2904 	for (i = 0; ; i++) {
2905 		d = &sc->bge_ldata.bge_tx_ring[idx];
2906 		d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
2907 		d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
2908 		d->bge_len = segs[i].ds_len;
2909 		d->bge_flags = csum_flags;
2910 		if (i == nsegs - 1)
2911 			break;
2912 		BGE_INC(idx, BGE_TX_RING_CNT);
2913 	}
2914 
2915 	/* Mark the last segment as end of packet... */
2916 	d->bge_flags |= BGE_TXBDFLAG_END;
2917 	/* ... and put VLAN tag into first segment.  */
2918 	d = &sc->bge_ldata.bge_tx_ring[*txidx];
2919 	if (mtag != NULL) {
2920 		d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
2921 		d->bge_vlan_tag = VLAN_TAG_VALUE(mtag);
2922 	} else
2923 		d->bge_vlan_tag = 0;
2924 
2925 	/*
2926 	 * Insure that the map for this transmission
2927 	 * is placed at the array index of the last descriptor
2928 	 * in this chain.
2929 	 */
2930 	sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
2931 	sc->bge_cdata.bge_tx_dmamap[idx] = map;
2932 	sc->bge_cdata.bge_tx_chain[idx] = m_head;
2933 	sc->bge_txcnt += nsegs;
2934 
2935 	BGE_INC(idx, BGE_TX_RING_CNT);
2936 	*txidx = idx;
2937 
2938 	return (0);
2939 }
2940 
2941 /*
2942  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2943  * to the mbuf data regions directly in the transmit descriptors.
2944  */
2945 static void
2946 bge_start_locked(struct ifnet *ifp)
2947 {
2948 	struct bge_softc *sc;
2949 	struct mbuf *m_head = NULL;
2950 	uint32_t prodidx;
2951 	int count = 0;
2952 
2953 	sc = ifp->if_softc;
2954 
2955 	if (!sc->bge_link || IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2956 		return;
2957 
2958 	prodidx = sc->bge_tx_prodidx;
2959 
2960 	while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
2961 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2962 		if (m_head == NULL)
2963 			break;
2964 
2965 		/*
2966 		 * XXX
2967 		 * The code inside the if() block is never reached since we
2968 		 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
2969 		 * requests to checksum TCP/UDP in a fragmented packet.
2970 		 *
2971 		 * XXX
2972 		 * safety overkill.  If this is a fragmented packet chain
2973 		 * with delayed TCP/UDP checksums, then only encapsulate
2974 		 * it if we have enough descriptors to handle the entire
2975 		 * chain at once.
2976 		 * (paranoia -- may not actually be needed)
2977 		 */
2978 		if (m_head->m_flags & M_FIRSTFRAG &&
2979 		    m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
2980 			if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
2981 			    m_head->m_pkthdr.csum_data + 16) {
2982 				IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2983 				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2984 				break;
2985 			}
2986 		}
2987 
2988 		/*
2989 		 * Pack the data into the transmit ring. If we
2990 		 * don't have room, set the OACTIVE flag and wait
2991 		 * for the NIC to drain the ring.
2992 		 */
2993 		if (bge_encap(sc, m_head, &prodidx)) {
2994 			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2995 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2996 			break;
2997 		}
2998 		++count;
2999 
3000 		/*
3001 		 * If there's a BPF listener, bounce a copy of this frame
3002 		 * to him.
3003 		 */
3004 		BPF_MTAP(ifp, m_head);
3005 	}
3006 
3007 	if (count == 0)
3008 		/* No packets were dequeued. */
3009 		return;
3010 
3011 	/* Transmit. */
3012 	CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3013 	/* 5700 b2 errata */
3014 	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
3015 		CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3016 
3017 	sc->bge_tx_prodidx = prodidx;
3018 
3019 	/*
3020 	 * Set a timeout in case the chip goes out to lunch.
3021 	 */
3022 	ifp->if_timer = 5;
3023 }
3024 
3025 /*
3026  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3027  * to the mbuf data regions directly in the transmit descriptors.
3028  */
3029 static void
3030 bge_start(struct ifnet *ifp)
3031 {
3032 	struct bge_softc *sc;
3033 
3034 	sc = ifp->if_softc;
3035 	BGE_LOCK(sc);
3036 	bge_start_locked(ifp);
3037 	BGE_UNLOCK(sc);
3038 }
3039 
3040 static void
3041 bge_init_locked(struct bge_softc *sc)
3042 {
3043 	struct ifnet *ifp;
3044 	uint16_t *m;
3045 
3046 	BGE_LOCK_ASSERT(sc);
3047 
3048 	ifp = sc->bge_ifp;
3049 
3050 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3051 		return;
3052 
3053 	/* Cancel pending I/O and flush buffers. */
3054 	bge_stop(sc);
3055 	bge_reset(sc);
3056 	bge_chipinit(sc);
3057 
3058 	/*
3059 	 * Init the various state machines, ring
3060 	 * control blocks and firmware.
3061 	 */
3062 	if (bge_blockinit(sc)) {
3063 		device_printf(sc->bge_dev, "initialization failure\n");
3064 		return;
3065 	}
3066 
3067 	ifp = sc->bge_ifp;
3068 
3069 	/* Specify MTU. */
3070 	CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
3071 	    ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN);
3072 
3073 	/* Load our MAC address. */
3074 	m = (uint16_t *)IF_LLADDR(sc->bge_ifp);
3075 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3076 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3077 
3078 	/* Enable or disable promiscuous mode as needed. */
3079 	if (ifp->if_flags & IFF_PROMISC) {
3080 		BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3081 	} else {
3082 		BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3083 	}
3084 
3085 	/* Program multicast filter. */
3086 	bge_setmulti(sc);
3087 
3088 	/* Init RX ring. */
3089 	bge_init_rx_ring_std(sc);
3090 
3091 	/*
3092 	 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
3093 	 * memory to insure that the chip has in fact read the first
3094 	 * entry of the ring.
3095 	 */
3096 	if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
3097 		uint32_t		v, i;
3098 		for (i = 0; i < 10; i++) {
3099 			DELAY(20);
3100 			v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
3101 			if (v == (MCLBYTES - ETHER_ALIGN))
3102 				break;
3103 		}
3104 		if (i == 10)
3105 			device_printf (sc->bge_dev,
3106 			    "5705 A0 chip failed to load RX ring\n");
3107 	}
3108 
3109 	/* Init jumbo RX ring. */
3110 	if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
3111 		bge_init_rx_ring_jumbo(sc);
3112 
3113 	/* Init our RX return ring index. */
3114 	sc->bge_rx_saved_considx = 0;
3115 
3116 	/* Init TX ring. */
3117 	bge_init_tx_ring(sc);
3118 
3119 	/* Turn on transmitter. */
3120 	BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
3121 
3122 	/* Turn on receiver. */
3123 	BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3124 
3125 	/* Tell firmware we're alive. */
3126 	BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3127 
3128 #ifdef DEVICE_POLLING
3129 	/* Disable interrupts if we are polling. */
3130 	if (ifp->if_capenable & IFCAP_POLLING) {
3131 		BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
3132 		    BGE_PCIMISCCTL_MASK_PCI_INTR);
3133 		CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3134 		CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
3135 		CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
3136 	} else
3137 #endif
3138 
3139 	/* Enable host interrupts. */
3140 	{
3141 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
3142 	BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3143 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3144 	}
3145 
3146 	bge_ifmedia_upd(ifp);
3147 
3148 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3149 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3150 
3151 	callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
3152 }
3153 
3154 static void
3155 bge_init(void *xsc)
3156 {
3157 	struct bge_softc *sc = xsc;
3158 
3159 	BGE_LOCK(sc);
3160 	bge_init_locked(sc);
3161 	BGE_UNLOCK(sc);
3162 }
3163 
3164 /*
3165  * Set media options.
3166  */
3167 static int
3168 bge_ifmedia_upd(struct ifnet *ifp)
3169 {
3170 	struct bge_softc *sc;
3171 	struct mii_data *mii;
3172 	struct ifmedia *ifm;
3173 
3174 	sc = ifp->if_softc;
3175 	ifm = &sc->bge_ifmedia;
3176 
3177 	/* If this is a 1000baseX NIC, enable the TBI port. */
3178 	if (sc->bge_tbi) {
3179 		if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3180 			return (EINVAL);
3181 		switch(IFM_SUBTYPE(ifm->ifm_media)) {
3182 		case IFM_AUTO:
3183 			/*
3184 			 * The BCM5704 ASIC appears to have a special
3185 			 * mechanism for programming the autoneg
3186 			 * advertisement registers in TBI mode.
3187 			 */
3188 			if (bge_fake_autoneg == 0 &&
3189 			    sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3190 				uint32_t sgdig;
3191 				CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
3192 				sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
3193 				sgdig |= BGE_SGDIGCFG_AUTO|
3194 				    BGE_SGDIGCFG_PAUSE_CAP|
3195 				    BGE_SGDIGCFG_ASYM_PAUSE;
3196 				CSR_WRITE_4(sc, BGE_SGDIG_CFG,
3197 				    sgdig|BGE_SGDIGCFG_SEND);
3198 				DELAY(5);
3199 				CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
3200 			}
3201 			break;
3202 		case IFM_1000_SX:
3203 			if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3204 				BGE_CLRBIT(sc, BGE_MAC_MODE,
3205 				    BGE_MACMODE_HALF_DUPLEX);
3206 			} else {
3207 				BGE_SETBIT(sc, BGE_MAC_MODE,
3208 				    BGE_MACMODE_HALF_DUPLEX);
3209 			}
3210 			break;
3211 		default:
3212 			return (EINVAL);
3213 		}
3214 		return (0);
3215 	}
3216 
3217 	sc->bge_link_evt++;
3218 	mii = device_get_softc(sc->bge_miibus);
3219 	if (mii->mii_instance) {
3220 		struct mii_softc *miisc;
3221 		for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
3222 		    miisc = LIST_NEXT(miisc, mii_list))
3223 			mii_phy_reset(miisc);
3224 	}
3225 	mii_mediachg(mii);
3226 
3227 	return (0);
3228 }
3229 
3230 /*
3231  * Report current media status.
3232  */
3233 static void
3234 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3235 {
3236 	struct bge_softc *sc;
3237 	struct mii_data *mii;
3238 
3239 	sc = ifp->if_softc;
3240 
3241 	if (sc->bge_tbi) {
3242 		ifmr->ifm_status = IFM_AVALID;
3243 		ifmr->ifm_active = IFM_ETHER;
3244 		if (CSR_READ_4(sc, BGE_MAC_STS) &
3245 		    BGE_MACSTAT_TBI_PCS_SYNCHED)
3246 			ifmr->ifm_status |= IFM_ACTIVE;
3247 		ifmr->ifm_active |= IFM_1000_SX;
3248 		if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3249 			ifmr->ifm_active |= IFM_HDX;
3250 		else
3251 			ifmr->ifm_active |= IFM_FDX;
3252 		return;
3253 	}
3254 
3255 	mii = device_get_softc(sc->bge_miibus);
3256 	mii_pollstat(mii);
3257 	ifmr->ifm_active = mii->mii_media_active;
3258 	ifmr->ifm_status = mii->mii_media_status;
3259 }
3260 
3261 static int
3262 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
3263 {
3264 	struct bge_softc *sc = ifp->if_softc;
3265 	struct ifreq *ifr = (struct ifreq *) data;
3266 	struct mii_data *mii;
3267 	int mask, error = 0;
3268 
3269 	switch (command) {
3270 	case SIOCSIFMTU:
3271 		/* Disallow jumbo frames on 5705. */
3272 		if (((sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
3273 		    sc->bge_asicrev == BGE_ASICREV_BCM5750) &&
3274 		    ifr->ifr_mtu > ETHERMTU) || ifr->ifr_mtu > BGE_JUMBO_MTU)
3275 			error = EINVAL;
3276 		else {
3277 			ifp->if_mtu = ifr->ifr_mtu;
3278 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3279 			bge_init(sc);
3280 		}
3281 		break;
3282 	case SIOCSIFFLAGS:
3283 		BGE_LOCK(sc);
3284 		if (ifp->if_flags & IFF_UP) {
3285 			/*
3286 			 * If only the state of the PROMISC flag changed,
3287 			 * then just use the 'set promisc mode' command
3288 			 * instead of reinitializing the entire NIC. Doing
3289 			 * a full re-init means reloading the firmware and
3290 			 * waiting for it to start up, which may take a
3291 			 * second or two.  Similarly for ALLMULTI.
3292 			 */
3293 			if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3294 			    ifp->if_flags & IFF_PROMISC &&
3295 			    !(sc->bge_if_flags & IFF_PROMISC)) {
3296 				BGE_SETBIT(sc, BGE_RX_MODE,
3297 				    BGE_RXMODE_RX_PROMISC);
3298 			} else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3299 			    !(ifp->if_flags & IFF_PROMISC) &&
3300 			    sc->bge_if_flags & IFF_PROMISC) {
3301 				BGE_CLRBIT(sc, BGE_RX_MODE,
3302 				    BGE_RXMODE_RX_PROMISC);
3303 			} else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3304 			    (ifp->if_flags ^ sc->bge_if_flags) & IFF_ALLMULTI) {
3305 				bge_setmulti(sc);
3306 			} else
3307 				bge_init_locked(sc);
3308 		} else {
3309 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3310 				bge_stop(sc);
3311 			}
3312 		}
3313 		sc->bge_if_flags = ifp->if_flags;
3314 		BGE_UNLOCK(sc);
3315 		error = 0;
3316 		break;
3317 	case SIOCADDMULTI:
3318 	case SIOCDELMULTI:
3319 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3320 			BGE_LOCK(sc);
3321 			bge_setmulti(sc);
3322 			BGE_UNLOCK(sc);
3323 			error = 0;
3324 		}
3325 		break;
3326 	case SIOCSIFMEDIA:
3327 	case SIOCGIFMEDIA:
3328 		if (sc->bge_tbi) {
3329 			error = ifmedia_ioctl(ifp, ifr,
3330 			    &sc->bge_ifmedia, command);
3331 		} else {
3332 			mii = device_get_softc(sc->bge_miibus);
3333 			error = ifmedia_ioctl(ifp, ifr,
3334 			    &mii->mii_media, command);
3335 		}
3336 		break;
3337 	case SIOCSIFCAP:
3338 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3339 #ifdef DEVICE_POLLING
3340 		if (mask & IFCAP_POLLING) {
3341 			if (ifr->ifr_reqcap & IFCAP_POLLING) {
3342 				error = ether_poll_register(bge_poll, ifp);
3343 				if (error)
3344 					return (error);
3345 				BGE_LOCK(sc);
3346 				BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
3347 				    BGE_PCIMISCCTL_MASK_PCI_INTR);
3348 				CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3349 				CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
3350 				CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
3351 				ifp->if_capenable |= IFCAP_POLLING;
3352 				BGE_UNLOCK(sc);
3353 			} else {
3354 				error = ether_poll_deregister(ifp);
3355 				/* Enable interrupt even in error case */
3356 				BGE_LOCK(sc);
3357 				CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
3358 				CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
3359 				BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
3360 				    BGE_PCIMISCCTL_MASK_PCI_INTR);
3361 				CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3362 				ifp->if_capenable &= ~IFCAP_POLLING;
3363 				BGE_UNLOCK(sc);
3364 			}
3365 		}
3366 #endif
3367 		if (mask & IFCAP_HWCSUM) {
3368 			ifp->if_capenable ^= IFCAP_HWCSUM;
3369 			if (IFCAP_HWCSUM & ifp->if_capenable &&
3370 			    IFCAP_HWCSUM & ifp->if_capabilities)
3371 				ifp->if_hwassist = BGE_CSUM_FEATURES;
3372 			else
3373 				ifp->if_hwassist = 0;
3374 			VLAN_CAPABILITIES(ifp);
3375 		}
3376 		break;
3377 	default:
3378 		error = ether_ioctl(ifp, command, data);
3379 		break;
3380 	}
3381 
3382 	return (error);
3383 }
3384 
3385 static void
3386 bge_watchdog(struct ifnet *ifp)
3387 {
3388 	struct bge_softc *sc;
3389 
3390 	sc = ifp->if_softc;
3391 
3392 	if_printf(ifp, "watchdog timeout -- resetting\n");
3393 
3394 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3395 	bge_init(sc);
3396 
3397 	ifp->if_oerrors++;
3398 }
3399 
3400 /*
3401  * Stop the adapter and free any mbufs allocated to the
3402  * RX and TX lists.
3403  */
3404 static void
3405 bge_stop(struct bge_softc *sc)
3406 {
3407 	struct ifnet *ifp;
3408 	struct ifmedia_entry *ifm;
3409 	struct mii_data *mii = NULL;
3410 	int mtmp, itmp;
3411 
3412 	BGE_LOCK_ASSERT(sc);
3413 
3414 	ifp = sc->bge_ifp;
3415 
3416 	if (!sc->bge_tbi)
3417 		mii = device_get_softc(sc->bge_miibus);
3418 
3419 	callout_stop(&sc->bge_stat_ch);
3420 
3421 	/*
3422 	 * Disable all of the receiver blocks.
3423 	 */
3424 	BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3425 	BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3426 	BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3427 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3428 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
3429 		BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
3430 	BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3431 	BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3432 	BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3433 
3434 	/*
3435 	 * Disable all of the transmit blocks.
3436 	 */
3437 	BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3438 	BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3439 	BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3440 	BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3441 	BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3442 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3443 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
3444 		BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
3445 	BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3446 
3447 	/*
3448 	 * Shut down all of the memory managers and related
3449 	 * state machines.
3450 	 */
3451 	BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3452 	BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3453 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3454 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
3455 		BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
3456 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3457 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3458 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3459 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
3460 		BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
3461 		BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3462 	}
3463 
3464 	/* Disable host interrupts. */
3465 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3466 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3467 
3468 	/*
3469 	 * Tell firmware we're shutting down.
3470 	 */
3471 	BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3472 
3473 	/* Free the RX lists. */
3474 	bge_free_rx_ring_std(sc);
3475 
3476 	/* Free jumbo RX list. */
3477 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3478 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
3479 		bge_free_rx_ring_jumbo(sc);
3480 
3481 	/* Free TX buffers. */
3482 	bge_free_tx_ring(sc);
3483 
3484 	/*
3485 	 * Isolate/power down the PHY, but leave the media selection
3486 	 * unchanged so that things will be put back to normal when
3487 	 * we bring the interface back up.
3488 	 */
3489 	if (!sc->bge_tbi) {
3490 		itmp = ifp->if_flags;
3491 		ifp->if_flags |= IFF_UP;
3492 		/*
3493 		 * If we are called from bge_detach(), mii is already NULL.
3494 		 */
3495 		if (mii != NULL) {
3496 			ifm = mii->mii_media.ifm_cur;
3497 			mtmp = ifm->ifm_media;
3498 			ifm->ifm_media = IFM_ETHER|IFM_NONE;
3499 			mii_mediachg(mii);
3500 			ifm->ifm_media = mtmp;
3501 		}
3502 		ifp->if_flags = itmp;
3503 	}
3504 
3505 	sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
3506 
3507 	/*
3508 	 * We can't just call bge_link_upd() cause chip is almost stopped so
3509 	 * bge_link_upd -> bge_tick_locked -> bge_stats_update sequence may
3510 	 * lead to hardware deadlock. So we just clearing MAC's link state
3511 	 * (PHY may still have link UP).
3512 	 */
3513 	if (bootverbose && sc->bge_link)
3514 		if_printf(sc->bge_ifp, "link DOWN\n");
3515 	sc->bge_link = 0;
3516 
3517 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3518 }
3519 
3520 /*
3521  * Stop all chip I/O so that the kernel's probe routines don't
3522  * get confused by errant DMAs when rebooting.
3523  */
3524 static void
3525 bge_shutdown(device_t dev)
3526 {
3527 	struct bge_softc *sc;
3528 
3529 	sc = device_get_softc(dev);
3530 
3531 	BGE_LOCK(sc);
3532 	bge_stop(sc);
3533 	bge_reset(sc);
3534 	BGE_UNLOCK(sc);
3535 }
3536 
3537 static int
3538 bge_suspend(device_t dev)
3539 {
3540 	struct bge_softc *sc;
3541 
3542 	sc = device_get_softc(dev);
3543 	BGE_LOCK(sc);
3544 	bge_stop(sc);
3545 	BGE_UNLOCK(sc);
3546 
3547 	return (0);
3548 }
3549 
3550 static int
3551 bge_resume(device_t dev)
3552 {
3553 	struct bge_softc *sc;
3554 	struct ifnet *ifp;
3555 
3556 	sc = device_get_softc(dev);
3557 	BGE_LOCK(sc);
3558 	ifp = sc->bge_ifp;
3559 	if (ifp->if_flags & IFF_UP) {
3560 		bge_init_locked(sc);
3561 		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3562 			bge_start_locked(ifp);
3563 	}
3564 	BGE_UNLOCK(sc);
3565 
3566 	return (0);
3567 }
3568 
3569 static void
3570 bge_link_upd(struct bge_softc *sc)
3571 {
3572 	struct mii_data *mii;
3573 	uint32_t link, status;
3574 
3575 	BGE_LOCK_ASSERT(sc);
3576 
3577 	/* Clear 'pending link event' flag. */
3578 	sc->bge_link_evt = 0;
3579 
3580 	/*
3581 	 * Process link state changes.
3582 	 * Grrr. The link status word in the status block does
3583 	 * not work correctly on the BCM5700 rev AX and BX chips,
3584 	 * according to all available information. Hence, we have
3585 	 * to enable MII interrupts in order to properly obtain
3586 	 * async link changes. Unfortunately, this also means that
3587 	 * we have to read the MAC status register to detect link
3588 	 * changes, thereby adding an additional register access to
3589 	 * the interrupt handler.
3590 	 *
3591 	 * XXX: perhaps link state detection procedure used for
3592 	 * BGE_CHIPID_BCM5700_B1 can be used for others BCM5700 revisions.
3593 	 */
3594 
3595 	if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3596 	    sc->bge_chipid != BGE_CHIPID_BCM5700_B1) {
3597 		status = CSR_READ_4(sc, BGE_MAC_STS);
3598 		if (status & BGE_MACSTAT_MI_INTERRUPT) {
3599 			callout_stop(&sc->bge_stat_ch);
3600 			bge_tick_locked(sc);
3601 
3602 			mii = device_get_softc(sc->bge_miibus);
3603 			if (!sc->bge_link &&
3604 			    mii->mii_media_status & IFM_ACTIVE &&
3605 			    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3606 				sc->bge_link++;
3607 				if (bootverbose)
3608 					if_printf(sc->bge_ifp, "link UP\n");
3609 			} else if (sc->bge_link &&
3610 			    (!(mii->mii_media_status & IFM_ACTIVE) ||
3611 			    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3612 				sc->bge_link = 0;
3613 				if (bootverbose)
3614 					if_printf(sc->bge_ifp, "link DOWN\n");
3615 			}
3616 
3617 			/* Clear the interrupt. */
3618 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
3619 			    BGE_EVTENB_MI_INTERRUPT);
3620 			bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
3621 			bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
3622 			    BRGPHY_INTRS);
3623 		}
3624 		return;
3625 	}
3626 
3627 	if (sc->bge_tbi) {
3628 		status = CSR_READ_4(sc, BGE_MAC_STS);
3629 		if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
3630 			if (!sc->bge_link) {
3631 				sc->bge_link++;
3632 				if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
3633 					BGE_CLRBIT(sc, BGE_MAC_MODE,
3634 					    BGE_MACMODE_TBI_SEND_CFGS);
3635 				CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
3636 				if (bootverbose)
3637 					if_printf(sc->bge_ifp, "link UP\n");
3638 				if_link_state_change(sc->bge_ifp,
3639 				    LINK_STATE_UP);
3640 			}
3641 		} else if (sc->bge_link) {
3642 			sc->bge_link = 0;
3643 			if (bootverbose)
3644 				if_printf(sc->bge_ifp, "link DOWN\n");
3645 			if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN);
3646 		}
3647 	/* Discard link events for MII/GMII cards if MI auto-polling disabled */
3648 	} else if (CSR_READ_4(sc, BGE_MI_MODE) & BGE_MIMODE_AUTOPOLL) {
3649 		/*
3650 		 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
3651 		 * in status word always set. Workaround this bug by reading
3652 		 * PHY link status directly.
3653 		 */
3654 		link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
3655 
3656 		if (link != sc->bge_link ||
3657 		    sc->bge_asicrev == BGE_ASICREV_BCM5700) {
3658 			callout_stop(&sc->bge_stat_ch);
3659 			bge_tick_locked(sc);
3660 
3661 			mii = device_get_softc(sc->bge_miibus);
3662 			if (!sc->bge_link &&
3663 			    mii->mii_media_status & IFM_ACTIVE &&
3664 			    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3665 				sc->bge_link++;
3666 				if (bootverbose)
3667 					if_printf(sc->bge_ifp, "link UP\n");
3668 			} else if (sc->bge_link &&
3669 			    (!(mii->mii_media_status & IFM_ACTIVE) ||
3670 			    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3671 				sc->bge_link = 0;
3672 				if (bootverbose)
3673 					if_printf(sc->bge_ifp, "link DOWN\n");
3674 			}
3675 		}
3676 	}
3677 
3678 	/* Clear the attention. */
3679 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
3680 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
3681 	    BGE_MACSTAT_LINK_CHANGED);
3682 }
3683