xref: /freebsd/sys/dev/bge/if_bge.c (revision 6af83ee0d2941d18880b6aaa2b4facd1d30c6106)
1 /*-
2  * Copyright (c) 2001 Wind River Systems
3  * Copyright (c) 1997, 1998, 1999, 2001
4  *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by Bill Paul.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 /*
38  * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39  *
40  * The Broadcom BCM5700 is based on technology originally developed by
41  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42  * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
43  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45  * frames, highly configurable RX filtering, and 16 RX and TX queues
46  * (which, along with RX filter rules, can be used for QOS applications).
47  * Other features, such as TCP segmentation, may be available as part
48  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49  * firmware images can be stored in hardware and need not be compiled
50  * into the driver.
51  *
52  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53  * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54  *
55  * The BCM5701 is a single-chip solution incorporating both the BCM5700
56  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57  * does not support external SSRAM.
58  *
59  * Broadcom also produces a variation of the BCM5700 under the "Altima"
60  * brand name, which is functionally similar but lacks PCI-X support.
61  *
62  * Without external SSRAM, you can only have at most 4 TX rings,
63  * and the use of the mini RX ring is disabled. This seems to imply
64  * that these features are simply not available on the BCM5701. As a
65  * result, this driver does not implement any support for the mini RX
66  * ring.
67  */
68 
69 #include <sys/param.h>
70 #include <sys/endian.h>
71 #include <sys/systm.h>
72 #include <sys/sockio.h>
73 #include <sys/mbuf.h>
74 #include <sys/malloc.h>
75 #include <sys/kernel.h>
76 #include <sys/module.h>
77 #include <sys/socket.h>
78 #include <sys/queue.h>
79 
80 #include <net/if.h>
81 #include <net/if_arp.h>
82 #include <net/ethernet.h>
83 #include <net/if_dl.h>
84 #include <net/if_media.h>
85 
86 #include <net/bpf.h>
87 
88 #include <net/if_types.h>
89 #include <net/if_vlan_var.h>
90 
91 #include <netinet/in_systm.h>
92 #include <netinet/in.h>
93 #include <netinet/ip.h>
94 
95 #include <machine/clock.h>      /* for DELAY */
96 #include <machine/bus_memio.h>
97 #include <machine/bus.h>
98 #include <machine/resource.h>
99 #include <sys/bus.h>
100 #include <sys/rman.h>
101 
102 #include <dev/mii/mii.h>
103 #include <dev/mii/miivar.h>
104 #include "miidevs.h"
105 #include <dev/mii/brgphyreg.h>
106 
107 #include <dev/pci/pcireg.h>
108 #include <dev/pci/pcivar.h>
109 
110 #include <dev/bge/if_bgereg.h>
111 
112 #define BGE_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
113 
114 MODULE_DEPEND(bge, pci, 1, 1, 1);
115 MODULE_DEPEND(bge, ether, 1, 1, 1);
116 MODULE_DEPEND(bge, miibus, 1, 1, 1);
117 
118 /* "controller miibus0" required.  See GENERIC if you get errors here. */
119 #include "miibus_if.h"
120 
121 /*
122  * Various supported device vendors/types and their names. Note: the
123  * spec seems to indicate that the hardware still has Alteon's vendor
124  * ID burned into it, though it will always be overriden by the vendor
125  * ID in the EEPROM. Just to be safe, we cover all possibilities.
126  */
127 #define BGE_DEVDESC_MAX		64	/* Maximum device description length */
128 
129 static struct bge_type bge_devs[] = {
130 	{ ALT_VENDORID,	ALT_DEVICEID_BCM5700,
131 		"Broadcom BCM5700 Gigabit Ethernet" },
132 	{ ALT_VENDORID,	ALT_DEVICEID_BCM5701,
133 		"Broadcom BCM5701 Gigabit Ethernet" },
134 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5700,
135 		"Broadcom BCM5700 Gigabit Ethernet" },
136 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5701,
137 		"Broadcom BCM5701 Gigabit Ethernet" },
138 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5702,
139 		"Broadcom BCM5702 Gigabit Ethernet" },
140 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5702X,
141 		"Broadcom BCM5702X Gigabit Ethernet" },
142 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5703,
143 		"Broadcom BCM5703 Gigabit Ethernet" },
144 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5703X,
145 		"Broadcom BCM5703X Gigabit Ethernet" },
146 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5704C,
147 		"Broadcom BCM5704C Dual Gigabit Ethernet" },
148 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5704S,
149 		"Broadcom BCM5704S Dual Gigabit Ethernet" },
150 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5705,
151 		"Broadcom BCM5705 Gigabit Ethernet" },
152 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5705K,
153 		"Broadcom BCM5705K Gigabit Ethernet" },
154 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5705M,
155 		"Broadcom BCM5705M Gigabit Ethernet" },
156 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT,
157 		"Broadcom BCM5705M Gigabit Ethernet" },
158 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5721,
159 		"Broadcom BCM5721 Gigabit Ethernet" },
160 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5750,
161 		"Broadcom BCM5750 Gigabit Ethernet" },
162 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5750M,
163 		"Broadcom BCM5750M Gigabit Ethernet" },
164 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5751,
165 		"Broadcom BCM5751 Gigabit Ethernet" },
166 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5782,
167 		"Broadcom BCM5782 Gigabit Ethernet" },
168 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5788,
169 		"Broadcom BCM5788 Gigabit Ethernet" },
170 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5901,
171 		"Broadcom BCM5901 Fast Ethernet" },
172 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2,
173 		"Broadcom BCM5901A2 Fast Ethernet" },
174 	{ SK_VENDORID, SK_DEVICEID_ALTIMA,
175 		"SysKonnect Gigabit Ethernet" },
176 	{ ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000,
177 		"Altima AC1000 Gigabit Ethernet" },
178 	{ ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002,
179 		"Altima AC1002 Gigabit Ethernet" },
180 	{ ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100,
181 		"Altima AC9100 Gigabit Ethernet" },
182 	{ 0, 0, NULL }
183 };
184 
185 static int bge_probe		(device_t);
186 static int bge_attach		(device_t);
187 static int bge_detach		(device_t);
188 static void bge_release_resources
189 				(struct bge_softc *);
190 static void bge_dma_map_addr	(void *, bus_dma_segment_t *, int, int);
191 static void bge_dma_map_tx_desc	(void *, bus_dma_segment_t *, int,
192 				    bus_size_t, int);
193 static int bge_dma_alloc	(device_t);
194 static void bge_dma_free	(struct bge_softc *);
195 
196 static void bge_txeof		(struct bge_softc *);
197 static void bge_rxeof		(struct bge_softc *);
198 
199 static void bge_tick_locked	(struct bge_softc *);
200 static void bge_tick		(void *);
201 static void bge_stats_update	(struct bge_softc *);
202 static void bge_stats_update_regs
203 				(struct bge_softc *);
204 static int bge_encap		(struct bge_softc *, struct mbuf *,
205 					u_int32_t *);
206 
207 static void bge_intr		(void *);
208 static void bge_start_locked	(struct ifnet *);
209 static void bge_start		(struct ifnet *);
210 static int bge_ioctl		(struct ifnet *, u_long, caddr_t);
211 static void bge_init_locked	(struct bge_softc *);
212 static void bge_init		(void *);
213 static void bge_stop		(struct bge_softc *);
214 static void bge_watchdog		(struct ifnet *);
215 static void bge_shutdown		(device_t);
216 static int bge_ifmedia_upd	(struct ifnet *);
217 static void bge_ifmedia_sts	(struct ifnet *, struct ifmediareq *);
218 
219 static u_int8_t	bge_eeprom_getbyte	(struct bge_softc *, int, u_int8_t *);
220 static int bge_read_eeprom	(struct bge_softc *, caddr_t, int, int);
221 
222 static void bge_setmulti	(struct bge_softc *);
223 
224 static void bge_handle_events	(struct bge_softc *);
225 static int bge_alloc_jumbo_mem	(struct bge_softc *);
226 static void bge_free_jumbo_mem	(struct bge_softc *);
227 static void *bge_jalloc		(struct bge_softc *);
228 static void bge_jfree		(void *, void *);
229 static int bge_newbuf_std	(struct bge_softc *, int, struct mbuf *);
230 static int bge_newbuf_jumbo	(struct bge_softc *, int, struct mbuf *);
231 static int bge_init_rx_ring_std	(struct bge_softc *);
232 static void bge_free_rx_ring_std	(struct bge_softc *);
233 static int bge_init_rx_ring_jumbo	(struct bge_softc *);
234 static void bge_free_rx_ring_jumbo	(struct bge_softc *);
235 static void bge_free_tx_ring	(struct bge_softc *);
236 static int bge_init_tx_ring	(struct bge_softc *);
237 
238 static int bge_chipinit		(struct bge_softc *);
239 static int bge_blockinit	(struct bge_softc *);
240 
241 #ifdef notdef
242 static u_int8_t bge_vpd_readbyte(struct bge_softc *, int);
243 static void bge_vpd_read_res	(struct bge_softc *, struct vpd_res *, int);
244 static void bge_vpd_read	(struct bge_softc *);
245 #endif
246 
247 static u_int32_t bge_readmem_ind
248 				(struct bge_softc *, int);
249 static void bge_writemem_ind	(struct bge_softc *, int, int);
250 #ifdef notdef
251 static u_int32_t bge_readreg_ind
252 				(struct bge_softc *, int);
253 #endif
254 static void bge_writereg_ind	(struct bge_softc *, int, int);
255 
256 static int bge_miibus_readreg	(device_t, int, int);
257 static int bge_miibus_writereg	(device_t, int, int, int);
258 static void bge_miibus_statchg	(device_t);
259 
260 static void bge_reset		(struct bge_softc *);
261 
262 static device_method_t bge_methods[] = {
263 	/* Device interface */
264 	DEVMETHOD(device_probe,		bge_probe),
265 	DEVMETHOD(device_attach,	bge_attach),
266 	DEVMETHOD(device_detach,	bge_detach),
267 	DEVMETHOD(device_shutdown,	bge_shutdown),
268 
269 	/* bus interface */
270 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
271 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
272 
273 	/* MII interface */
274 	DEVMETHOD(miibus_readreg,	bge_miibus_readreg),
275 	DEVMETHOD(miibus_writereg,	bge_miibus_writereg),
276 	DEVMETHOD(miibus_statchg,	bge_miibus_statchg),
277 
278 	{ 0, 0 }
279 };
280 
281 static driver_t bge_driver = {
282 	"bge",
283 	bge_methods,
284 	sizeof(struct bge_softc)
285 };
286 
287 static devclass_t bge_devclass;
288 
289 DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
290 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
291 
292 static u_int32_t
293 bge_readmem_ind(sc, off)
294 	struct bge_softc *sc;
295 	int off;
296 {
297 	device_t dev;
298 
299 	dev = sc->bge_dev;
300 
301 	pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
302 	return(pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4));
303 }
304 
305 static void
306 bge_writemem_ind(sc, off, val)
307 	struct bge_softc *sc;
308 	int off, val;
309 {
310 	device_t dev;
311 
312 	dev = sc->bge_dev;
313 
314 	pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
315 	pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
316 
317 	return;
318 }
319 
320 #ifdef notdef
321 static u_int32_t
322 bge_readreg_ind(sc, off)
323 	struct bge_softc *sc;
324 	int off;
325 {
326 	device_t dev;
327 
328 	dev = sc->bge_dev;
329 
330 	pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
331 	return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
332 }
333 #endif
334 
335 static void
336 bge_writereg_ind(sc, off, val)
337 	struct bge_softc *sc;
338 	int off, val;
339 {
340 	device_t dev;
341 
342 	dev = sc->bge_dev;
343 
344 	pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
345 	pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
346 
347 	return;
348 }
349 
350 /*
351  * Map a single buffer address.
352  */
353 
354 static void
355 bge_dma_map_addr(arg, segs, nseg, error)
356 	void *arg;
357 	bus_dma_segment_t *segs;
358 	int nseg;
359 	int error;
360 {
361 	struct bge_dmamap_arg *ctx;
362 
363 	if (error)
364 		return;
365 
366 	ctx = arg;
367 
368 	if (nseg > ctx->bge_maxsegs) {
369 		ctx->bge_maxsegs = 0;
370 		return;
371 	}
372 
373 	ctx->bge_busaddr = segs->ds_addr;
374 
375 	return;
376 }
377 
378 /*
379  * Map an mbuf chain into an TX ring.
380  */
381 
382 static void
383 bge_dma_map_tx_desc(arg, segs, nseg, mapsize, error)
384 	void *arg;
385 	bus_dma_segment_t *segs;
386 	int nseg;
387 	bus_size_t mapsize;
388 	int error;
389 {
390 	struct bge_dmamap_arg *ctx;
391 	struct bge_tx_bd *d = NULL;
392 	int i = 0, idx;
393 
394 	if (error)
395 		return;
396 
397 	ctx = arg;
398 
399 	/* Signal error to caller if there's too many segments */
400 	if (nseg > ctx->bge_maxsegs) {
401 		ctx->bge_maxsegs = 0;
402 		return;
403 	}
404 
405 	idx = ctx->bge_idx;
406 	while(1) {
407 		d = &ctx->bge_ring[idx];
408 		d->bge_addr.bge_addr_lo =
409 		    htole32(BGE_ADDR_LO(segs[i].ds_addr));
410 		d->bge_addr.bge_addr_hi =
411 		    htole32(BGE_ADDR_HI(segs[i].ds_addr));
412 		d->bge_len = htole16(segs[i].ds_len);
413 		d->bge_flags = htole16(ctx->bge_flags);
414 		i++;
415 		if (i == nseg)
416 			break;
417 		BGE_INC(idx, BGE_TX_RING_CNT);
418 	}
419 
420 	d->bge_flags |= htole16(BGE_TXBDFLAG_END);
421 	ctx->bge_maxsegs = nseg;
422 	ctx->bge_idx = idx;
423 
424 	return;
425 }
426 
427 
428 #ifdef notdef
429 static u_int8_t
430 bge_vpd_readbyte(sc, addr)
431 	struct bge_softc *sc;
432 	int addr;
433 {
434 	int i;
435 	device_t dev;
436 	u_int32_t val;
437 
438 	dev = sc->bge_dev;
439 	pci_write_config(dev, BGE_PCI_VPD_ADDR, addr, 2);
440 	for (i = 0; i < BGE_TIMEOUT * 10; i++) {
441 		DELAY(10);
442 		if (pci_read_config(dev, BGE_PCI_VPD_ADDR, 2) & BGE_VPD_FLAG)
443 			break;
444 	}
445 
446 	if (i == BGE_TIMEOUT) {
447 		printf("bge%d: VPD read timed out\n", sc->bge_unit);
448 		return(0);
449 	}
450 
451 	val = pci_read_config(dev, BGE_PCI_VPD_DATA, 4);
452 
453 	return((val >> ((addr % 4) * 8)) & 0xFF);
454 }
455 
456 static void
457 bge_vpd_read_res(sc, res, addr)
458 	struct bge_softc *sc;
459 	struct vpd_res *res;
460 	int addr;
461 {
462 	int i;
463 	u_int8_t *ptr;
464 
465 	ptr = (u_int8_t *)res;
466 	for (i = 0; i < sizeof(struct vpd_res); i++)
467 		ptr[i] = bge_vpd_readbyte(sc, i + addr);
468 
469 	return;
470 }
471 
472 static void
473 bge_vpd_read(sc)
474 	struct bge_softc *sc;
475 {
476 	int pos = 0, i;
477 	struct vpd_res res;
478 
479 	if (sc->bge_vpd_prodname != NULL)
480 		free(sc->bge_vpd_prodname, M_DEVBUF);
481 	if (sc->bge_vpd_readonly != NULL)
482 		free(sc->bge_vpd_readonly, M_DEVBUF);
483 	sc->bge_vpd_prodname = NULL;
484 	sc->bge_vpd_readonly = NULL;
485 
486 	bge_vpd_read_res(sc, &res, pos);
487 
488 	if (res.vr_id != VPD_RES_ID) {
489 		printf("bge%d: bad VPD resource id: expected %x got %x\n",
490 			sc->bge_unit, VPD_RES_ID, res.vr_id);
491 		return;
492 	}
493 
494 	pos += sizeof(res);
495 	sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
496 	for (i = 0; i < res.vr_len; i++)
497 		sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos);
498 	sc->bge_vpd_prodname[i] = '\0';
499 	pos += i;
500 
501 	bge_vpd_read_res(sc, &res, pos);
502 
503 	if (res.vr_id != VPD_RES_READ) {
504 		printf("bge%d: bad VPD resource id: expected %x got %x\n",
505 		    sc->bge_unit, VPD_RES_READ, res.vr_id);
506 		return;
507 	}
508 
509 	pos += sizeof(res);
510 	sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
511 	for (i = 0; i < res.vr_len + 1; i++)
512 		sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos);
513 
514 	return;
515 }
516 #endif
517 
518 /*
519  * Read a byte of data stored in the EEPROM at address 'addr.' The
520  * BCM570x supports both the traditional bitbang interface and an
521  * auto access interface for reading the EEPROM. We use the auto
522  * access method.
523  */
524 static u_int8_t
525 bge_eeprom_getbyte(sc, addr, dest)
526 	struct bge_softc *sc;
527 	int addr;
528 	u_int8_t *dest;
529 {
530 	int i;
531 	u_int32_t byte = 0;
532 
533 	/*
534 	 * Enable use of auto EEPROM access so we can avoid
535 	 * having to use the bitbang method.
536 	 */
537 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
538 
539 	/* Reset the EEPROM, load the clock period. */
540 	CSR_WRITE_4(sc, BGE_EE_ADDR,
541 	    BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
542 	DELAY(20);
543 
544 	/* Issue the read EEPROM command. */
545 	CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
546 
547 	/* Wait for completion */
548 	for(i = 0; i < BGE_TIMEOUT * 10; i++) {
549 		DELAY(10);
550 		if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
551 			break;
552 	}
553 
554 	if (i == BGE_TIMEOUT) {
555 		printf("bge%d: eeprom read timed out\n", sc->bge_unit);
556 		return(0);
557 	}
558 
559 	/* Get result. */
560 	byte = CSR_READ_4(sc, BGE_EE_DATA);
561 
562 	*dest = (byte >> ((addr % 4) * 8)) & 0xFF;
563 
564 	return(0);
565 }
566 
567 /*
568  * Read a sequence of bytes from the EEPROM.
569  */
570 static int
571 bge_read_eeprom(sc, dest, off, cnt)
572 	struct bge_softc *sc;
573 	caddr_t dest;
574 	int off;
575 	int cnt;
576 {
577 	int err = 0, i;
578 	u_int8_t byte = 0;
579 
580 	for (i = 0; i < cnt; i++) {
581 		err = bge_eeprom_getbyte(sc, off + i, &byte);
582 		if (err)
583 			break;
584 		*(dest + i) = byte;
585 	}
586 
587 	return(err ? 1 : 0);
588 }
589 
590 static int
591 bge_miibus_readreg(dev, phy, reg)
592 	device_t dev;
593 	int phy, reg;
594 {
595 	struct bge_softc *sc;
596 	u_int32_t val, autopoll;
597 	int i;
598 
599 	sc = device_get_softc(dev);
600 
601 	/*
602 	 * Broadcom's own driver always assumes the internal
603 	 * PHY is at GMII address 1. On some chips, the PHY responds
604 	 * to accesses at all addresses, which could cause us to
605 	 * bogusly attach the PHY 32 times at probe type. Always
606 	 * restricting the lookup to address 1 is simpler than
607 	 * trying to figure out which chips revisions should be
608 	 * special-cased.
609 	 */
610 	if (phy != 1)
611 		return(0);
612 
613 	/* Reading with autopolling on may trigger PCI errors */
614 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
615 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
616 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
617 		DELAY(40);
618 	}
619 
620 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
621 	    BGE_MIPHY(phy)|BGE_MIREG(reg));
622 
623 	for (i = 0; i < BGE_TIMEOUT; i++) {
624 		val = CSR_READ_4(sc, BGE_MI_COMM);
625 		if (!(val & BGE_MICOMM_BUSY))
626 			break;
627 	}
628 
629 	if (i == BGE_TIMEOUT) {
630 		printf("bge%d: PHY read timed out\n", sc->bge_unit);
631 		val = 0;
632 		goto done;
633 	}
634 
635 	val = CSR_READ_4(sc, BGE_MI_COMM);
636 
637 done:
638 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
639 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
640 		DELAY(40);
641 	}
642 
643 	if (val & BGE_MICOMM_READFAIL)
644 		return(0);
645 
646 	return(val & 0xFFFF);
647 }
648 
649 static int
650 bge_miibus_writereg(dev, phy, reg, val)
651 	device_t dev;
652 	int phy, reg, val;
653 {
654 	struct bge_softc *sc;
655 	u_int32_t autopoll;
656 	int i;
657 
658 	sc = device_get_softc(dev);
659 
660 	/* Reading with autopolling on may trigger PCI errors */
661 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
662 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
663 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
664 		DELAY(40);
665 	}
666 
667 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
668 	    BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
669 
670 	for (i = 0; i < BGE_TIMEOUT; i++) {
671 		if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
672 			break;
673 	}
674 
675 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
676 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
677 		DELAY(40);
678 	}
679 
680 	if (i == BGE_TIMEOUT) {
681 		printf("bge%d: PHY read timed out\n", sc->bge_unit);
682 		return(0);
683 	}
684 
685 	return(0);
686 }
687 
688 static void
689 bge_miibus_statchg(dev)
690 	device_t dev;
691 {
692 	struct bge_softc *sc;
693 	struct mii_data *mii;
694 
695 	sc = device_get_softc(dev);
696 	mii = device_get_softc(sc->bge_miibus);
697 
698 	BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
699 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
700 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
701 	} else {
702 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
703 	}
704 
705 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
706 		BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
707 	} else {
708 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
709 	}
710 
711 	return;
712 }
713 
714 /*
715  * Handle events that have triggered interrupts.
716  */
717 static void
718 bge_handle_events(sc)
719 	struct bge_softc		*sc;
720 {
721 
722 	return;
723 }
724 
725 /*
726  * Memory management for jumbo frames.
727  */
728 
729 static int
730 bge_alloc_jumbo_mem(sc)
731 	struct bge_softc		*sc;
732 {
733 	caddr_t			ptr;
734 	register int		i, error;
735 	struct bge_jpool_entry   *entry;
736 
737 	/* Create tag for jumbo buffer block */
738 
739 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
740 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
741 	    NULL, BGE_JMEM, 1, BGE_JMEM, 0, NULL, NULL,
742 	    &sc->bge_cdata.bge_jumbo_tag);
743 
744 	if (error) {
745 		printf("bge%d: could not allocate jumbo dma tag\n",
746 		    sc->bge_unit);
747 		return (ENOMEM);
748 	}
749 
750 	/* Allocate DMA'able memory for jumbo buffer block */
751 
752 	error = bus_dmamem_alloc(sc->bge_cdata.bge_jumbo_tag,
753 	    (void **)&sc->bge_ldata.bge_jumbo_buf, BUS_DMA_NOWAIT,
754 	    &sc->bge_cdata.bge_jumbo_map);
755 
756 	if (error)
757 		return (ENOMEM);
758 
759 	SLIST_INIT(&sc->bge_jfree_listhead);
760 	SLIST_INIT(&sc->bge_jinuse_listhead);
761 
762 	/*
763 	 * Now divide it up into 9K pieces and save the addresses
764 	 * in an array.
765 	 */
766 	ptr = sc->bge_ldata.bge_jumbo_buf;
767 	for (i = 0; i < BGE_JSLOTS; i++) {
768 		sc->bge_cdata.bge_jslots[i] = ptr;
769 		ptr += BGE_JLEN;
770 		entry = malloc(sizeof(struct bge_jpool_entry),
771 		    M_DEVBUF, M_NOWAIT);
772 		if (entry == NULL) {
773 			bge_free_jumbo_mem(sc);
774 			sc->bge_ldata.bge_jumbo_buf = NULL;
775 			printf("bge%d: no memory for jumbo "
776 			    "buffer queue!\n", sc->bge_unit);
777 			return(ENOBUFS);
778 		}
779 		entry->slot = i;
780 		SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
781 		    entry, jpool_entries);
782 	}
783 
784 	return(0);
785 }
786 
787 static void
788 bge_free_jumbo_mem(sc)
789 	struct bge_softc *sc;
790 {
791 	int i;
792 	struct bge_jpool_entry *entry;
793 
794 	for (i = 0; i < BGE_JSLOTS; i++) {
795 		entry = SLIST_FIRST(&sc->bge_jfree_listhead);
796 		SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
797 		free(entry, M_DEVBUF);
798 	}
799 
800 	/* Destroy jumbo buffer block */
801 
802 	if (sc->bge_ldata.bge_rx_jumbo_ring)
803 		bus_dmamem_free(sc->bge_cdata.bge_jumbo_tag,
804 		    sc->bge_ldata.bge_jumbo_buf,
805 		    sc->bge_cdata.bge_jumbo_map);
806 
807 	if (sc->bge_cdata.bge_rx_jumbo_ring_map)
808 		bus_dmamap_destroy(sc->bge_cdata.bge_jumbo_tag,
809 		    sc->bge_cdata.bge_jumbo_map);
810 
811 	if (sc->bge_cdata.bge_jumbo_tag)
812 		bus_dma_tag_destroy(sc->bge_cdata.bge_jumbo_tag);
813 
814 	return;
815 }
816 
817 /*
818  * Allocate a jumbo buffer.
819  */
820 static void *
821 bge_jalloc(sc)
822 	struct bge_softc		*sc;
823 {
824 	struct bge_jpool_entry   *entry;
825 
826 	entry = SLIST_FIRST(&sc->bge_jfree_listhead);
827 
828 	if (entry == NULL) {
829 		printf("bge%d: no free jumbo buffers\n", sc->bge_unit);
830 		return(NULL);
831 	}
832 
833 	SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
834 	SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries);
835 	return(sc->bge_cdata.bge_jslots[entry->slot]);
836 }
837 
838 /*
839  * Release a jumbo buffer.
840  */
841 static void
842 bge_jfree(buf, args)
843 	void *buf;
844 	void *args;
845 {
846 	struct bge_jpool_entry *entry;
847 	struct bge_softc *sc;
848 	int i;
849 
850 	/* Extract the softc struct pointer. */
851 	sc = (struct bge_softc *)args;
852 
853 	if (sc == NULL)
854 		panic("bge_jfree: can't find softc pointer!");
855 
856 	/* calculate the slot this buffer belongs to */
857 
858 	i = ((vm_offset_t)buf
859 	     - (vm_offset_t)sc->bge_ldata.bge_jumbo_buf) / BGE_JLEN;
860 
861 	if ((i < 0) || (i >= BGE_JSLOTS))
862 		panic("bge_jfree: asked to free buffer that we don't manage!");
863 
864 	entry = SLIST_FIRST(&sc->bge_jinuse_listhead);
865 	if (entry == NULL)
866 		panic("bge_jfree: buffer not in use!");
867 	entry->slot = i;
868 	SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries);
869 	SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries);
870 
871 	return;
872 }
873 
874 
875 /*
876  * Intialize a standard receive ring descriptor.
877  */
878 static int
879 bge_newbuf_std(sc, i, m)
880 	struct bge_softc	*sc;
881 	int			i;
882 	struct mbuf		*m;
883 {
884 	struct mbuf		*m_new = NULL;
885 	struct bge_rx_bd	*r;
886 	struct bge_dmamap_arg	ctx;
887 	int			error;
888 
889 	if (m == NULL) {
890 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
891 		if (m_new == NULL) {
892 			return(ENOBUFS);
893 		}
894 
895 		MCLGET(m_new, M_DONTWAIT);
896 		if (!(m_new->m_flags & M_EXT)) {
897 			m_freem(m_new);
898 			return(ENOBUFS);
899 		}
900 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
901 	} else {
902 		m_new = m;
903 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
904 		m_new->m_data = m_new->m_ext.ext_buf;
905 	}
906 
907 	if (!sc->bge_rx_alignment_bug)
908 		m_adj(m_new, ETHER_ALIGN);
909 	sc->bge_cdata.bge_rx_std_chain[i] = m_new;
910 	r = &sc->bge_ldata.bge_rx_std_ring[i];
911 	ctx.bge_maxsegs = 1;
912 	ctx.sc = sc;
913 	error = bus_dmamap_load(sc->bge_cdata.bge_mtag,
914 	    sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *),
915 	    m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
916 	if (error || ctx.bge_maxsegs == 0) {
917 		if (m == NULL)
918 			m_freem(m_new);
919 		return(ENOMEM);
920 	}
921 	r->bge_addr.bge_addr_lo = htole32(BGE_ADDR_LO(ctx.bge_busaddr));
922 	r->bge_addr.bge_addr_hi = htole32(BGE_ADDR_HI(ctx.bge_busaddr));
923 	r->bge_flags = htole16(BGE_RXBDFLAG_END);
924 	r->bge_len = htole16(m_new->m_len);
925 	r->bge_idx = htole16(i);
926 
927 	bus_dmamap_sync(sc->bge_cdata.bge_mtag,
928 	    sc->bge_cdata.bge_rx_std_dmamap[i],
929 	    BUS_DMASYNC_PREREAD);
930 
931 	return(0);
932 }
933 
934 /*
935  * Initialize a jumbo receive ring descriptor. This allocates
936  * a jumbo buffer from the pool managed internally by the driver.
937  */
938 static int
939 bge_newbuf_jumbo(sc, i, m)
940 	struct bge_softc *sc;
941 	int i;
942 	struct mbuf *m;
943 {
944 	struct mbuf *m_new = NULL;
945 	struct bge_rx_bd *r;
946 	struct bge_dmamap_arg ctx;
947 	int error;
948 
949 	if (m == NULL) {
950 		caddr_t			*buf = NULL;
951 
952 		/* Allocate the mbuf. */
953 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
954 		if (m_new == NULL) {
955 			return(ENOBUFS);
956 		}
957 
958 		/* Allocate the jumbo buffer */
959 		buf = bge_jalloc(sc);
960 		if (buf == NULL) {
961 			m_freem(m_new);
962 			printf("bge%d: jumbo allocation failed "
963 			    "-- packet dropped!\n", sc->bge_unit);
964 			return(ENOBUFS);
965 		}
966 
967 		/* Attach the buffer to the mbuf. */
968 		m_new->m_data = (void *) buf;
969 		m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN;
970 		MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, bge_jfree,
971 		    (struct bge_softc *)sc, 0, EXT_NET_DRV);
972 	} else {
973 		m_new = m;
974 		m_new->m_data = m_new->m_ext.ext_buf;
975 		m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
976 	}
977 
978 	if (!sc->bge_rx_alignment_bug)
979 		m_adj(m_new, ETHER_ALIGN);
980 	/* Set up the descriptor. */
981 	sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
982 	r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
983 	ctx.bge_maxsegs = 1;
984 	ctx.sc = sc;
985 	error = bus_dmamap_load(sc->bge_cdata.bge_mtag_jumbo,
986 	    sc->bge_cdata.bge_rx_jumbo_dmamap[i], mtod(m_new, void *),
987 	    m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
988 	if (error || ctx.bge_maxsegs == 0) {
989 		if (m == NULL)
990 			m_freem(m_new);
991 		return(ENOMEM);
992 	}
993 	r->bge_addr.bge_addr_lo = htole32(BGE_ADDR_LO(ctx.bge_busaddr));
994 	r->bge_addr.bge_addr_hi = htole32(BGE_ADDR_HI(ctx.bge_busaddr));
995 	r->bge_flags = htole16(BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING);
996 	r->bge_len = htole16(m_new->m_len);
997 	r->bge_idx = htole16(i);
998 
999 	bus_dmamap_sync(sc->bge_cdata.bge_mtag,
1000 	    sc->bge_cdata.bge_rx_jumbo_dmamap[i],
1001 	    BUS_DMASYNC_PREREAD);
1002 
1003 	return(0);
1004 }
1005 
1006 /*
1007  * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
1008  * that's 1MB or memory, which is a lot. For now, we fill only the first
1009  * 256 ring entries and hope that our CPU is fast enough to keep up with
1010  * the NIC.
1011  */
1012 static int
1013 bge_init_rx_ring_std(sc)
1014 	struct bge_softc *sc;
1015 {
1016 	int i;
1017 
1018 	for (i = 0; i < BGE_SSLOTS; i++) {
1019 		if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
1020 			return(ENOBUFS);
1021 	};
1022 
1023 	bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1024 	    sc->bge_cdata.bge_rx_std_ring_map,
1025 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1026 
1027 	sc->bge_std = i - 1;
1028 	CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
1029 
1030 	return(0);
1031 }
1032 
1033 static void
1034 bge_free_rx_ring_std(sc)
1035 	struct bge_softc *sc;
1036 {
1037 	int i;
1038 
1039 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1040 		if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1041 			m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1042 			sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1043 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
1044 			    sc->bge_cdata.bge_rx_std_dmamap[i]);
1045 		}
1046 		bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
1047 		    sizeof(struct bge_rx_bd));
1048 	}
1049 
1050 	return;
1051 }
1052 
1053 static int
1054 bge_init_rx_ring_jumbo(sc)
1055 	struct bge_softc *sc;
1056 {
1057 	int i;
1058 	struct bge_rcb *rcb;
1059 
1060 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1061 		if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
1062 			return(ENOBUFS);
1063 	};
1064 
1065 	bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1066 	    sc->bge_cdata.bge_rx_jumbo_ring_map,
1067 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1068 
1069 	sc->bge_jumbo = i - 1;
1070 
1071 	rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1072 	rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
1073 	CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1074 
1075 	CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
1076 
1077 	return(0);
1078 }
1079 
1080 static void
1081 bge_free_rx_ring_jumbo(sc)
1082 	struct bge_softc *sc;
1083 {
1084 	int i;
1085 
1086 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1087 		if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1088 			m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1089 			sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1090 			bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1091 			    sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1092 		}
1093 		bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
1094 		    sizeof(struct bge_rx_bd));
1095 	}
1096 
1097 	return;
1098 }
1099 
1100 static void
1101 bge_free_tx_ring(sc)
1102 	struct bge_softc *sc;
1103 {
1104 	int i;
1105 
1106 	if (sc->bge_ldata.bge_tx_ring == NULL)
1107 		return;
1108 
1109 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
1110 		if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1111 			m_freem(sc->bge_cdata.bge_tx_chain[i]);
1112 			sc->bge_cdata.bge_tx_chain[i] = NULL;
1113 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
1114 			    sc->bge_cdata.bge_tx_dmamap[i]);
1115 		}
1116 		bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
1117 		    sizeof(struct bge_tx_bd));
1118 	}
1119 
1120 	return;
1121 }
1122 
1123 static int
1124 bge_init_tx_ring(sc)
1125 	struct bge_softc *sc;
1126 {
1127 	sc->bge_txcnt = 0;
1128 	sc->bge_tx_saved_considx = 0;
1129 
1130 	CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
1131 	/* 5700 b2 errata */
1132 	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1133 		CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
1134 
1135 	CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1136 	/* 5700 b2 errata */
1137 	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1138 		CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1139 
1140 	return(0);
1141 }
1142 
1143 static void
1144 bge_setmulti(sc)
1145 	struct bge_softc *sc;
1146 {
1147 	struct ifnet *ifp;
1148 	struct ifmultiaddr *ifma;
1149 	u_int32_t hashes[4] = { 0, 0, 0, 0 };
1150 	int h, i;
1151 
1152 	BGE_LOCK_ASSERT(sc);
1153 
1154 	ifp = &sc->arpcom.ac_if;
1155 
1156 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1157 		for (i = 0; i < 4; i++)
1158 			CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1159 		return;
1160 	}
1161 
1162 	/* First, zot all the existing filters. */
1163 	for (i = 0; i < 4; i++)
1164 		CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1165 
1166 	/* Now program new ones. */
1167 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1168 		if (ifma->ifma_addr->sa_family != AF_LINK)
1169 			continue;
1170 		h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1171 		    ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
1172 		hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1173 	}
1174 
1175 	for (i = 0; i < 4; i++)
1176 		CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1177 
1178 	return;
1179 }
1180 
1181 /*
1182  * Do endian, PCI and DMA initialization. Also check the on-board ROM
1183  * self-test results.
1184  */
1185 static int
1186 bge_chipinit(sc)
1187 	struct bge_softc *sc;
1188 {
1189 	int			i;
1190 	u_int32_t		dma_rw_ctl;
1191 
1192 	/* Set endianness before we access any non-PCI registers. */
1193 #if BYTE_ORDER == BIG_ENDIAN
1194 	pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
1195 	    BGE_BIGENDIAN_INIT, 4);
1196 #else
1197 	pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
1198 	    BGE_LITTLEENDIAN_INIT, 4);
1199 #endif
1200 
1201 	/*
1202 	 * Check the 'ROM failed' bit on the RX CPU to see if
1203 	 * self-tests passed.
1204 	 */
1205 	if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
1206 		printf("bge%d: RX CPU self-diagnostics failed!\n",
1207 		    sc->bge_unit);
1208 		return(ENODEV);
1209 	}
1210 
1211 	/* Clear the MAC control register */
1212 	CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1213 
1214 	/*
1215 	 * Clear the MAC statistics block in the NIC's
1216 	 * internal memory.
1217 	 */
1218 	for (i = BGE_STATS_BLOCK;
1219 	    i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t))
1220 		BGE_MEMWIN_WRITE(sc, i, 0);
1221 
1222 	for (i = BGE_STATUS_BLOCK;
1223 	    i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t))
1224 		BGE_MEMWIN_WRITE(sc, i, 0);
1225 
1226 	/* Set up the PCI DMA control register. */
1227 	if (sc->bge_pcie) {
1228 		dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1229 		    (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1230 		    (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1231 	} else if (pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
1232 	    BGE_PCISTATE_PCI_BUSMODE) {
1233 		/* Conventional PCI bus */
1234 		dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1235 		    (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1236 		    (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1237 		    (0x0F);
1238 	} else {
1239 		/* PCI-X bus */
1240 		/*
1241 		 * The 5704 uses a different encoding of read/write
1242 		 * watermarks.
1243 		 */
1244 		if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1245 			dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1246 			    (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1247 			    (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1248 		else
1249 			dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1250 			    (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1251 			    (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1252 			    (0x0F);
1253 
1254 		/*
1255 		 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1256 		 * for hardware bugs.
1257 		 */
1258 		if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1259 		    sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1260 			u_int32_t tmp;
1261 
1262 			tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1263 			if (tmp == 0x6 || tmp == 0x7)
1264 				dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1265 		}
1266 	}
1267 
1268 	if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1269 	    sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
1270 	    sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1271 	    sc->bge_asicrev == BGE_ASICREV_BCM5750)
1272 		dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1273 	pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1274 
1275 	/*
1276 	 * Set up general mode register.
1277 	 */
1278 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_WORDSWAP_NONFRAME|
1279 	    BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
1280 	    BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1281 	    BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM);
1282 
1283 	/*
1284 	 * Disable memory write invalidate.  Apparently it is not supported
1285 	 * properly by these devices.
1286 	 */
1287 	PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1288 
1289 #ifdef __brokenalpha__
1290 	/*
1291 	 * Must insure that we do not cross an 8K (bytes) boundary
1292 	 * for DMA reads.  Our highest limit is 1K bytes.  This is a
1293 	 * restriction on some ALPHA platforms with early revision
1294 	 * 21174 PCI chipsets, such as the AlphaPC 164lx
1295 	 */
1296 	PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL,
1297 	    BGE_PCI_READ_BNDRY_1024BYTES, 4);
1298 #endif
1299 
1300 	/* Set the timer prescaler (always 66Mhz) */
1301 	CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1302 
1303 	return(0);
1304 }
1305 
1306 static int
1307 bge_blockinit(sc)
1308 	struct bge_softc *sc;
1309 {
1310 	struct bge_rcb *rcb;
1311 	volatile struct bge_rcb *vrcb;
1312 	int i;
1313 
1314 	/*
1315 	 * Initialize the memory window pointer register so that
1316 	 * we can access the first 32K of internal NIC RAM. This will
1317 	 * allow us to set up the TX send ring RCBs and the RX return
1318 	 * ring RCBs, plus other things which live in NIC memory.
1319 	 */
1320 	CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1321 
1322 	/* Note: the BCM5704 has a smaller mbuf space than other chips. */
1323 
1324 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1325 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1326 		/* Configure mbuf memory pool */
1327 		if (sc->bge_extram) {
1328 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1329 			    BGE_EXT_SSRAM);
1330 			if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1331 				CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1332 			else
1333 				CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1334 		} else {
1335 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1336 			    BGE_BUFFPOOL_1);
1337 			if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1338 				CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1339 			else
1340 				CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1341 		}
1342 
1343 		/* Configure DMA resource pool */
1344 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1345 		    BGE_DMA_DESCRIPTORS);
1346 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1347 	}
1348 
1349 	/* Configure mbuf pool watermarks */
1350 	if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1351 	    sc->bge_asicrev == BGE_ASICREV_BCM5750) {
1352 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1353 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1354 	} else {
1355 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1356 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1357 	}
1358 	CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1359 
1360 	/* Configure DMA resource watermarks */
1361 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1362 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1363 
1364 	/* Enable buffer manager */
1365 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1366 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1367 		CSR_WRITE_4(sc, BGE_BMAN_MODE,
1368 		    BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1369 
1370 		/* Poll for buffer manager start indication */
1371 		for (i = 0; i < BGE_TIMEOUT; i++) {
1372 			if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1373 				break;
1374 			DELAY(10);
1375 		}
1376 
1377 		if (i == BGE_TIMEOUT) {
1378 			printf("bge%d: buffer manager failed to start\n",
1379 			    sc->bge_unit);
1380 			return(ENXIO);
1381 		}
1382 	}
1383 
1384 	/* Enable flow-through queues */
1385 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1386 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1387 
1388 	/* Wait until queue initialization is complete */
1389 	for (i = 0; i < BGE_TIMEOUT; i++) {
1390 		if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1391 			break;
1392 		DELAY(10);
1393 	}
1394 
1395 	if (i == BGE_TIMEOUT) {
1396 		printf("bge%d: flow-through queue init failed\n",
1397 		    sc->bge_unit);
1398 		return(ENXIO);
1399 	}
1400 
1401 	/* Initialize the standard RX ring control block */
1402 	rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1403 	rcb->bge_hostaddr.bge_addr_lo =
1404 	    BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1405 	rcb->bge_hostaddr.bge_addr_hi =
1406 	    BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1407 	bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1408 	    sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1409 	if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1410 	    sc->bge_asicrev == BGE_ASICREV_BCM5750)
1411 		rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1412 	else
1413 		rcb->bge_maxlen_flags =
1414 		    BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1415 	if (sc->bge_extram)
1416 		rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
1417 	else
1418 		rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1419 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1420 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1421 
1422 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1423 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1424 
1425 	/*
1426 	 * Initialize the jumbo RX ring control block
1427 	 * We set the 'ring disabled' bit in the flags
1428 	 * field until we're actually ready to start
1429 	 * using this ring (i.e. once we set the MTU
1430 	 * high enough to require it).
1431 	 */
1432 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1433 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1434 		rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1435 
1436 		rcb->bge_hostaddr.bge_addr_lo =
1437 		    BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1438 		rcb->bge_hostaddr.bge_addr_hi =
1439 		    BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1440 		bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1441 		    sc->bge_cdata.bge_rx_jumbo_ring_map,
1442 		    BUS_DMASYNC_PREREAD);
1443 		rcb->bge_maxlen_flags =
1444 		    BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN,
1445 		    BGE_RCB_FLAG_RING_DISABLED);
1446 		if (sc->bge_extram)
1447 			rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
1448 		else
1449 			rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1450 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1451 		    rcb->bge_hostaddr.bge_addr_hi);
1452 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1453 		    rcb->bge_hostaddr.bge_addr_lo);
1454 
1455 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1456 		    rcb->bge_maxlen_flags);
1457 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1458 
1459 		/* Set up dummy disabled mini ring RCB */
1460 		rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1461 		rcb->bge_maxlen_flags =
1462 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1463 		CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1464 		    rcb->bge_maxlen_flags);
1465 	}
1466 
1467 	/*
1468 	 * Set the BD ring replentish thresholds. The recommended
1469 	 * values are 1/8th the number of descriptors allocated to
1470 	 * each ring.
1471 	 */
1472 	CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
1473 	CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1474 
1475 	/*
1476 	 * Disable all unused send rings by setting the 'ring disabled'
1477 	 * bit in the flags field of all the TX send ring control blocks.
1478 	 * These are located in NIC memory.
1479 	 */
1480 	vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1481 	    BGE_SEND_RING_RCB);
1482 	for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1483 		vrcb->bge_maxlen_flags =
1484 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1485 		vrcb->bge_nicaddr = 0;
1486 		vrcb++;
1487 	}
1488 
1489 	/* Configure TX RCB 0 (we use only the first ring) */
1490 	vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1491 	    BGE_SEND_RING_RCB);
1492 	vrcb->bge_hostaddr.bge_addr_lo =
1493 	    htole32(BGE_ADDR_LO(sc->bge_ldata.bge_tx_ring_paddr));
1494 	vrcb->bge_hostaddr.bge_addr_hi =
1495 	    htole32(BGE_ADDR_HI(sc->bge_ldata.bge_tx_ring_paddr));
1496 	vrcb->bge_nicaddr = BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT);
1497 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1498 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
1499 		vrcb->bge_maxlen_flags =
1500 		    BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0);
1501 
1502 	/* Disable all unused RX return rings */
1503 	vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1504 	    BGE_RX_RETURN_RING_RCB);
1505 	for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1506 		vrcb->bge_hostaddr.bge_addr_hi = 0;
1507 		vrcb->bge_hostaddr.bge_addr_lo = 0;
1508 		vrcb->bge_maxlen_flags =
1509 		    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1510 		    BGE_RCB_FLAG_RING_DISABLED);
1511 		vrcb->bge_nicaddr = 0;
1512 		CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
1513 		    (i * (sizeof(u_int64_t))), 0);
1514 		vrcb++;
1515 	}
1516 
1517 	/* Initialize RX ring indexes */
1518 	CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1519 	CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1520 	CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1521 
1522 	/*
1523 	 * Set up RX return ring 0
1524 	 * Note that the NIC address for RX return rings is 0x00000000.
1525 	 * The return rings live entirely within the host, so the
1526 	 * nicaddr field in the RCB isn't used.
1527 	 */
1528 	vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1529 	    BGE_RX_RETURN_RING_RCB);
1530 	vrcb->bge_hostaddr.bge_addr_lo =
1531 	    BGE_ADDR_LO(sc->bge_ldata.bge_rx_return_ring_paddr);
1532 	vrcb->bge_hostaddr.bge_addr_hi =
1533 	    BGE_ADDR_HI(sc->bge_ldata.bge_rx_return_ring_paddr);
1534 	bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
1535 	    sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREWRITE);
1536 	vrcb->bge_nicaddr = 0x00000000;
1537 	vrcb->bge_maxlen_flags =
1538 	    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0);
1539 
1540 	/* Set random backoff seed for TX */
1541 	CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1542 	    sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1543 	    sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1544 	    sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
1545 	    BGE_TX_BACKOFF_SEED_MASK);
1546 
1547 	/* Set inter-packet gap */
1548 	CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1549 
1550 	/*
1551 	 * Specify which ring to use for packets that don't match
1552 	 * any RX rules.
1553 	 */
1554 	CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1555 
1556 	/*
1557 	 * Configure number of RX lists. One interrupt distribution
1558 	 * list, sixteen active lists, one bad frames class.
1559 	 */
1560 	CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1561 
1562 	/* Inialize RX list placement stats mask. */
1563 	CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1564 	CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1565 
1566 	/* Disable host coalescing until we get it set up */
1567 	CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1568 
1569 	/* Poll to make sure it's shut down. */
1570 	for (i = 0; i < BGE_TIMEOUT; i++) {
1571 		if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1572 			break;
1573 		DELAY(10);
1574 	}
1575 
1576 	if (i == BGE_TIMEOUT) {
1577 		printf("bge%d: host coalescing engine failed to idle\n",
1578 		    sc->bge_unit);
1579 		return(ENXIO);
1580 	}
1581 
1582 	/* Set up host coalescing defaults */
1583 	CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1584 	CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1585 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1586 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1587 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1588 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1589 		CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1590 		CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1591 	}
1592 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1593 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1594 
1595 	/* Set up address of statistics block */
1596 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1597 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1598 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1599 		    BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1600 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1601 		    BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1602 		CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1603 		CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1604 		CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1605 	}
1606 
1607 	/* Set up address of status block */
1608 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1609 	    BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1610 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1611 	    BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1612 	bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
1613 	    sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREWRITE);
1614 	sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0;
1615 	sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0;
1616 
1617 	/* Turn on host coalescing state machine */
1618 	CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1619 
1620 	/* Turn on RX BD completion state machine and enable attentions */
1621 	CSR_WRITE_4(sc, BGE_RBDC_MODE,
1622 	    BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1623 
1624 	/* Turn on RX list placement state machine */
1625 	CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1626 
1627 	/* Turn on RX list selector state machine. */
1628 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1629 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
1630 		CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1631 
1632 	/* Turn on DMA, clear stats */
1633 	CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1634 	    BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1635 	    BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1636 	    BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1637 	    (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1638 
1639 	/* Set misc. local control, enable interrupts on attentions */
1640 	CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1641 
1642 #ifdef notdef
1643 	/* Assert GPIO pins for PHY reset */
1644 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1645 	    BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1646 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1647 	    BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1648 #endif
1649 
1650 	/* Turn on DMA completion state machine */
1651 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1652 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
1653 		CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1654 
1655 	/* Turn on write DMA state machine */
1656 	CSR_WRITE_4(sc, BGE_WDMA_MODE,
1657 	    BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
1658 
1659 	/* Turn on read DMA state machine */
1660 	CSR_WRITE_4(sc, BGE_RDMA_MODE,
1661 	    BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
1662 
1663 	/* Turn on RX data completion state machine */
1664 	CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1665 
1666 	/* Turn on RX BD initiator state machine */
1667 	CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1668 
1669 	/* Turn on RX data and RX BD initiator state machine */
1670 	CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1671 
1672 	/* Turn on Mbuf cluster free state machine */
1673 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1674 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
1675 		CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1676 
1677 	/* Turn on send BD completion state machine */
1678 	CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1679 
1680 	/* Turn on send data completion state machine */
1681 	CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1682 
1683 	/* Turn on send data initiator state machine */
1684 	CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1685 
1686 	/* Turn on send BD initiator state machine */
1687 	CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1688 
1689 	/* Turn on send BD selector state machine */
1690 	CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1691 
1692 	CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1693 	CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1694 	    BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1695 
1696 	/* ack/clear link change events */
1697 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1698 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1699 	    BGE_MACSTAT_LINK_CHANGED);
1700 	CSR_WRITE_4(sc, BGE_MI_STS, 0);
1701 
1702 	/* Enable PHY auto polling (for MII/GMII only) */
1703 	if (sc->bge_tbi) {
1704 		CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1705 	} else {
1706 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1707 		if (sc->bge_asicrev == BGE_ASICREV_BCM5700)
1708 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1709 			    BGE_EVTENB_MI_INTERRUPT);
1710 	}
1711 
1712 	/* Enable link state change attentions. */
1713 	BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1714 
1715 	return(0);
1716 }
1717 
1718 /*
1719  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1720  * against our list and return its name if we find a match. Note
1721  * that since the Broadcom controller contains VPD support, we
1722  * can get the device name string from the controller itself instead
1723  * of the compiled-in string. This is a little slow, but it guarantees
1724  * we'll always announce the right product name.
1725  */
1726 static int
1727 bge_probe(dev)
1728 	device_t dev;
1729 {
1730 	struct bge_type *t;
1731 	struct bge_softc *sc;
1732 	char *descbuf;
1733 
1734 	t = bge_devs;
1735 
1736 	sc = device_get_softc(dev);
1737 	bzero(sc, sizeof(struct bge_softc));
1738 	sc->bge_unit = device_get_unit(dev);
1739 	sc->bge_dev = dev;
1740 
1741 	while(t->bge_name != NULL) {
1742 		if ((pci_get_vendor(dev) == t->bge_vid) &&
1743 		    (pci_get_device(dev) == t->bge_did)) {
1744 #ifdef notdef
1745 			bge_vpd_read(sc);
1746 			device_set_desc(dev, sc->bge_vpd_prodname);
1747 #endif
1748 			descbuf = malloc(BGE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
1749 			if (descbuf == NULL)
1750 				return(ENOMEM);
1751 			snprintf(descbuf, BGE_DEVDESC_MAX,
1752 			    "%s, ASIC rev. %#04x", t->bge_name,
1753 			    pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 16);
1754 			device_set_desc_copy(dev, descbuf);
1755 			if (pci_get_subvendor(dev) == DELL_VENDORID)
1756 				sc->bge_no_3_led = 1;
1757 			free(descbuf, M_TEMP);
1758 			return(0);
1759 		}
1760 		t++;
1761 	}
1762 
1763 	return(ENXIO);
1764 }
1765 
1766 static void
1767 bge_dma_free(sc)
1768 	struct bge_softc *sc;
1769 {
1770 	int i;
1771 
1772 
1773 	/* Destroy DMA maps for RX buffers */
1774 
1775 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1776 		if (sc->bge_cdata.bge_rx_std_dmamap[i])
1777 			bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1778 			    sc->bge_cdata.bge_rx_std_dmamap[i]);
1779 	}
1780 
1781 	/* Destroy DMA maps for jumbo RX buffers */
1782 
1783 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1784 		if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
1785 			bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
1786 			    sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1787 	}
1788 
1789 	/* Destroy DMA maps for TX buffers */
1790 
1791 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
1792 		if (sc->bge_cdata.bge_tx_dmamap[i])
1793 			bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1794 			    sc->bge_cdata.bge_tx_dmamap[i]);
1795 	}
1796 
1797 	if (sc->bge_cdata.bge_mtag)
1798 		bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
1799 
1800 
1801 	/* Destroy standard RX ring */
1802 
1803 	if (sc->bge_ldata.bge_rx_std_ring)
1804 		bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
1805 		    sc->bge_ldata.bge_rx_std_ring,
1806 		    sc->bge_cdata.bge_rx_std_ring_map);
1807 
1808 	if (sc->bge_cdata.bge_rx_std_ring_map) {
1809 		bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
1810 		    sc->bge_cdata.bge_rx_std_ring_map);
1811 		bus_dmamap_destroy(sc->bge_cdata.bge_rx_std_ring_tag,
1812 		    sc->bge_cdata.bge_rx_std_ring_map);
1813 	}
1814 
1815 	if (sc->bge_cdata.bge_rx_std_ring_tag)
1816 		bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
1817 
1818 	/* Destroy jumbo RX ring */
1819 
1820 	if (sc->bge_ldata.bge_rx_jumbo_ring)
1821 		bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1822 		    sc->bge_ldata.bge_rx_jumbo_ring,
1823 		    sc->bge_cdata.bge_rx_jumbo_ring_map);
1824 
1825 	if (sc->bge_cdata.bge_rx_jumbo_ring_map) {
1826 		bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1827 		    sc->bge_cdata.bge_rx_jumbo_ring_map);
1828 		bus_dmamap_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1829 		    sc->bge_cdata.bge_rx_jumbo_ring_map);
1830 	}
1831 
1832 	if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
1833 		bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
1834 
1835 	/* Destroy RX return ring */
1836 
1837 	if (sc->bge_ldata.bge_rx_return_ring)
1838 		bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
1839 		    sc->bge_ldata.bge_rx_return_ring,
1840 		    sc->bge_cdata.bge_rx_return_ring_map);
1841 
1842 	if (sc->bge_cdata.bge_rx_return_ring_map) {
1843 		bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
1844 		    sc->bge_cdata.bge_rx_return_ring_map);
1845 		bus_dmamap_destroy(sc->bge_cdata.bge_rx_return_ring_tag,
1846 		    sc->bge_cdata.bge_rx_return_ring_map);
1847 	}
1848 
1849 	if (sc->bge_cdata.bge_rx_return_ring_tag)
1850 		bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
1851 
1852 	/* Destroy TX ring */
1853 
1854 	if (sc->bge_ldata.bge_tx_ring)
1855 		bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
1856 		    sc->bge_ldata.bge_tx_ring,
1857 		    sc->bge_cdata.bge_tx_ring_map);
1858 
1859 	if (sc->bge_cdata.bge_tx_ring_map) {
1860 		bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
1861 		    sc->bge_cdata.bge_tx_ring_map);
1862 		bus_dmamap_destroy(sc->bge_cdata.bge_tx_ring_tag,
1863 		    sc->bge_cdata.bge_tx_ring_map);
1864 	}
1865 
1866 	if (sc->bge_cdata.bge_tx_ring_tag)
1867 		bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
1868 
1869 	/* Destroy status block */
1870 
1871 	if (sc->bge_ldata.bge_status_block)
1872 		bus_dmamem_free(sc->bge_cdata.bge_status_tag,
1873 		    sc->bge_ldata.bge_status_block,
1874 		    sc->bge_cdata.bge_status_map);
1875 
1876 	if (sc->bge_cdata.bge_status_map) {
1877 		bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
1878 		    sc->bge_cdata.bge_status_map);
1879 		bus_dmamap_destroy(sc->bge_cdata.bge_status_tag,
1880 		    sc->bge_cdata.bge_status_map);
1881 	}
1882 
1883 	if (sc->bge_cdata.bge_status_tag)
1884 		bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
1885 
1886 	/* Destroy statistics block */
1887 
1888 	if (sc->bge_ldata.bge_stats)
1889 		bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
1890 		    sc->bge_ldata.bge_stats,
1891 		    sc->bge_cdata.bge_stats_map);
1892 
1893 	if (sc->bge_cdata.bge_stats_map) {
1894 		bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
1895 		    sc->bge_cdata.bge_stats_map);
1896 		bus_dmamap_destroy(sc->bge_cdata.bge_stats_tag,
1897 		    sc->bge_cdata.bge_stats_map);
1898 	}
1899 
1900 	if (sc->bge_cdata.bge_stats_tag)
1901 		bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
1902 
1903 	/* Destroy the parent tag */
1904 
1905 	if (sc->bge_cdata.bge_parent_tag)
1906 		bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
1907 
1908 	return;
1909 }
1910 
1911 static int
1912 bge_dma_alloc(dev)
1913 	device_t dev;
1914 {
1915 	struct bge_softc *sc;
1916 	int nseg, i, error;
1917 	struct bge_dmamap_arg ctx;
1918 
1919 	sc = device_get_softc(dev);
1920 
1921 	/*
1922 	 * Allocate the parent bus DMA tag appropriate for PCI.
1923 	 */
1924 #define BGE_NSEG_NEW 32
1925 	error = bus_dma_tag_create(NULL,	/* parent */
1926 			PAGE_SIZE, 0,		/* alignment, boundary */
1927 			BUS_SPACE_MAXADDR,	/* lowaddr */
1928 			BUS_SPACE_MAXADDR_32BIT,/* highaddr */
1929 			NULL, NULL,		/* filter, filterarg */
1930 			MAXBSIZE, BGE_NSEG_NEW,	/* maxsize, nsegments */
1931 			BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
1932 			0,			/* flags */
1933 			NULL, NULL,		/* lockfunc, lockarg */
1934 			&sc->bge_cdata.bge_parent_tag);
1935 
1936 	/*
1937 	 * Create tag for RX mbufs.
1938 	 */
1939 	nseg = 32;
1940 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1,
1941 	    0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1942 	    NULL, MCLBYTES * nseg, nseg, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL,
1943 	    &sc->bge_cdata.bge_mtag);
1944 
1945 	if (error) {
1946 		device_printf(dev, "could not allocate dma tag\n");
1947 		return (ENOMEM);
1948 	}
1949 
1950 	/* Create DMA maps for RX buffers */
1951 
1952 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1953 		error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1954 			    &sc->bge_cdata.bge_rx_std_dmamap[i]);
1955 		if (error) {
1956 			device_printf(dev, "can't create DMA map for RX\n");
1957 			return(ENOMEM);
1958 		}
1959 	}
1960 
1961 	/* Create DMA maps for TX buffers */
1962 
1963 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
1964 		error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1965 			    &sc->bge_cdata.bge_tx_dmamap[i]);
1966 		if (error) {
1967 			device_printf(dev, "can't create DMA map for RX\n");
1968 			return(ENOMEM);
1969 		}
1970 	}
1971 
1972 	/* Create tag for standard RX ring */
1973 
1974 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1975 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1976 	    NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0,
1977 	    NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag);
1978 
1979 	if (error) {
1980 		device_printf(dev, "could not allocate dma tag\n");
1981 		return (ENOMEM);
1982 	}
1983 
1984 	/* Allocate DMA'able memory for standard RX ring */
1985 
1986 	error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag,
1987 	    (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT,
1988 	    &sc->bge_cdata.bge_rx_std_ring_map);
1989 	if (error)
1990 		return (ENOMEM);
1991 
1992 	bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1993 
1994 	/* Load the address of the standard RX ring */
1995 
1996 	ctx.bge_maxsegs = 1;
1997 	ctx.sc = sc;
1998 
1999 	error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag,
2000 	    sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring,
2001 	    BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2002 
2003 	if (error)
2004 		return (ENOMEM);
2005 
2006 	sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr;
2007 
2008 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2009 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
2010 
2011 		/*
2012 		 * Create tag for jumbo mbufs.
2013 		 * This is really a bit of a kludge. We allocate a special
2014 		 * jumbo buffer pool which (thanks to the way our DMA
2015 		 * memory allocation works) will consist of contiguous
2016 		 * pages. This means that even though a jumbo buffer might
2017 		 * be larger than a page size, we don't really need to
2018 		 * map it into more than one DMA segment. However, the
2019 		 * default mbuf tag will result in multi-segment mappings,
2020 		 * so we have to create a special jumbo mbuf tag that
2021 		 * lets us get away with mapping the jumbo buffers as
2022 		 * a single segment. I think eventually the driver should
2023 		 * be changed so that it uses ordinary mbufs and cluster
2024 		 * buffers, i.e. jumbo frames can span multiple DMA
2025 		 * descriptors. But that's a project for another day.
2026 		 */
2027 
2028 		error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2029 		    1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2030 		    NULL, MCLBYTES * nseg, nseg, BGE_JLEN, 0, NULL, NULL,
2031 		    &sc->bge_cdata.bge_mtag_jumbo);
2032 
2033 		if (error) {
2034 			device_printf(dev, "could not allocate dma tag\n");
2035 			return (ENOMEM);
2036 		}
2037 
2038 		/* Create tag for jumbo RX ring */
2039 
2040 		error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2041 		    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2042 		    NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0,
2043 		    NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag);
2044 
2045 		if (error) {
2046 			device_printf(dev, "could not allocate dma tag\n");
2047 			return (ENOMEM);
2048 		}
2049 
2050 		/* Allocate DMA'able memory for jumbo RX ring */
2051 
2052 		error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2053 		    (void **)&sc->bge_ldata.bge_rx_jumbo_ring, BUS_DMA_NOWAIT,
2054 		    &sc->bge_cdata.bge_rx_jumbo_ring_map);
2055 		if (error)
2056 			return (ENOMEM);
2057 
2058 		bzero((char *)sc->bge_ldata.bge_rx_jumbo_ring,
2059 		    BGE_JUMBO_RX_RING_SZ);
2060 
2061 		/* Load the address of the jumbo RX ring */
2062 
2063 		ctx.bge_maxsegs = 1;
2064 		ctx.sc = sc;
2065 
2066 		error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2067 		    sc->bge_cdata.bge_rx_jumbo_ring_map,
2068 		    sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ,
2069 		    bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2070 
2071 		if (error)
2072 			return (ENOMEM);
2073 
2074 		sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr;
2075 
2076 		/* Create DMA maps for jumbo RX buffers */
2077 
2078 		for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2079 			error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2080 				    0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2081 			if (error) {
2082 				device_printf(dev,
2083 				    "can't create DMA map for RX\n");
2084 				return(ENOMEM);
2085 			}
2086 		}
2087 
2088 	}
2089 
2090 	/* Create tag for RX return ring */
2091 
2092 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2093 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2094 	    NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0,
2095 	    NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag);
2096 
2097 	if (error) {
2098 		device_printf(dev, "could not allocate dma tag\n");
2099 		return (ENOMEM);
2100 	}
2101 
2102 	/* Allocate DMA'able memory for RX return ring */
2103 
2104 	error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag,
2105 	    (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT,
2106 	    &sc->bge_cdata.bge_rx_return_ring_map);
2107 	if (error)
2108 		return (ENOMEM);
2109 
2110 	bzero((char *)sc->bge_ldata.bge_rx_return_ring,
2111 	    BGE_RX_RTN_RING_SZ(sc));
2112 
2113 	/* Load the address of the RX return ring */
2114 
2115 	ctx.bge_maxsegs = 1;
2116 	ctx.sc = sc;
2117 
2118 	error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag,
2119 	    sc->bge_cdata.bge_rx_return_ring_map,
2120 	    sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc),
2121 	    bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2122 
2123 	if (error)
2124 		return (ENOMEM);
2125 
2126 	sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr;
2127 
2128 	/* Create tag for TX ring */
2129 
2130 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2131 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2132 	    NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL,
2133 	    &sc->bge_cdata.bge_tx_ring_tag);
2134 
2135 	if (error) {
2136 		device_printf(dev, "could not allocate dma tag\n");
2137 		return (ENOMEM);
2138 	}
2139 
2140 	/* Allocate DMA'able memory for TX ring */
2141 
2142 	error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag,
2143 	    (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT,
2144 	    &sc->bge_cdata.bge_tx_ring_map);
2145 	if (error)
2146 		return (ENOMEM);
2147 
2148 	bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
2149 
2150 	/* Load the address of the TX ring */
2151 
2152 	ctx.bge_maxsegs = 1;
2153 	ctx.sc = sc;
2154 
2155 	error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag,
2156 	    sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring,
2157 	    BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2158 
2159 	if (error)
2160 		return (ENOMEM);
2161 
2162 	sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr;
2163 
2164 	/* Create tag for status block */
2165 
2166 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2167 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2168 	    NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0,
2169 	    NULL, NULL, &sc->bge_cdata.bge_status_tag);
2170 
2171 	if (error) {
2172 		device_printf(dev, "could not allocate dma tag\n");
2173 		return (ENOMEM);
2174 	}
2175 
2176 	/* Allocate DMA'able memory for status block */
2177 
2178 	error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag,
2179 	    (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT,
2180 	    &sc->bge_cdata.bge_status_map);
2181 	if (error)
2182 		return (ENOMEM);
2183 
2184 	bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
2185 
2186 	/* Load the address of the status block */
2187 
2188 	ctx.sc = sc;
2189 	ctx.bge_maxsegs = 1;
2190 
2191 	error = bus_dmamap_load(sc->bge_cdata.bge_status_tag,
2192 	    sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block,
2193 	    BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2194 
2195 	if (error)
2196 		return (ENOMEM);
2197 
2198 	sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr;
2199 
2200 	/* Create tag for statistics block */
2201 
2202 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2203 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2204 	    NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL,
2205 	    &sc->bge_cdata.bge_stats_tag);
2206 
2207 	if (error) {
2208 		device_printf(dev, "could not allocate dma tag\n");
2209 		return (ENOMEM);
2210 	}
2211 
2212 	/* Allocate DMA'able memory for statistics block */
2213 
2214 	error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag,
2215 	    (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT,
2216 	    &sc->bge_cdata.bge_stats_map);
2217 	if (error)
2218 		return (ENOMEM);
2219 
2220 	bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ);
2221 
2222 	/* Load the address of the statstics block */
2223 
2224 	ctx.sc = sc;
2225 	ctx.bge_maxsegs = 1;
2226 
2227 	error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag,
2228 	    sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats,
2229 	    BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2230 
2231 	if (error)
2232 		return (ENOMEM);
2233 
2234 	sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr;
2235 
2236 	return(0);
2237 }
2238 
2239 static int
2240 bge_attach(dev)
2241 	device_t dev;
2242 {
2243 	struct ifnet *ifp;
2244 	struct bge_softc *sc;
2245 	u_int32_t hwcfg = 0;
2246 	u_int32_t mac_addr = 0;
2247 	int unit, error = 0, rid;
2248 
2249 	sc = device_get_softc(dev);
2250 	unit = device_get_unit(dev);
2251 	sc->bge_dev = dev;
2252 	sc->bge_unit = unit;
2253 
2254 	/*
2255 	 * Map control/status registers.
2256 	 */
2257 	pci_enable_busmaster(dev);
2258 
2259 	rid = BGE_PCI_BAR0;
2260 	sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2261 	    RF_ACTIVE|PCI_RF_DENSE);
2262 
2263 	if (sc->bge_res == NULL) {
2264 		printf ("bge%d: couldn't map memory\n", unit);
2265 		error = ENXIO;
2266 		goto fail;
2267 	}
2268 
2269 	sc->bge_btag = rman_get_bustag(sc->bge_res);
2270 	sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
2271 	sc->bge_vhandle = (vm_offset_t)rman_get_virtual(sc->bge_res);
2272 
2273 	/* Allocate interrupt */
2274 	rid = 0;
2275 
2276 	sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2277 	    RF_SHAREABLE | RF_ACTIVE);
2278 
2279 	if (sc->bge_irq == NULL) {
2280 		printf("bge%d: couldn't map interrupt\n", unit);
2281 		error = ENXIO;
2282 		goto fail;
2283 	}
2284 
2285 	sc->bge_unit = unit;
2286 
2287 	BGE_LOCK_INIT(sc, device_get_nameunit(dev));
2288 
2289 	/* Save ASIC rev. */
2290 
2291 	sc->bge_chipid =
2292 	    pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
2293 	    BGE_PCIMISCCTL_ASICREV;
2294 	sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2295 	sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2296 
2297 	/*
2298 	 * XXX: Broadcom Linux driver.  Not in specs or eratta.
2299 	 * PCI-Express?
2300 	 */
2301 	if (sc->bge_asicrev == BGE_ASICREV_BCM5750) {
2302 		u_int32_t v;
2303 
2304 		v = pci_read_config(dev, BGE_PCI_MSI_CAPID, 4);
2305 		if (((v >> 8) & 0xff) == BGE_PCIE_CAPID_REG) {
2306 			v = pci_read_config(dev, BGE_PCIE_CAPID_REG, 4);
2307 			if ((v & 0xff) == BGE_PCIE_CAPID)
2308 				sc->bge_pcie = 1;
2309 		}
2310 	}
2311 
2312 	/* Try to reset the chip. */
2313 	bge_reset(sc);
2314 
2315 	if (bge_chipinit(sc)) {
2316 		printf("bge%d: chip initialization failed\n", sc->bge_unit);
2317 		bge_release_resources(sc);
2318 		error = ENXIO;
2319 		goto fail;
2320 	}
2321 
2322 	/*
2323 	 * Get station address from the EEPROM.
2324 	 */
2325 	mac_addr = bge_readmem_ind(sc, 0x0c14);
2326 	if ((mac_addr >> 16) == 0x484b) {
2327 		sc->arpcom.ac_enaddr[0] = (u_char)(mac_addr >> 8);
2328 		sc->arpcom.ac_enaddr[1] = (u_char)mac_addr;
2329 		mac_addr = bge_readmem_ind(sc, 0x0c18);
2330 		sc->arpcom.ac_enaddr[2] = (u_char)(mac_addr >> 24);
2331 		sc->arpcom.ac_enaddr[3] = (u_char)(mac_addr >> 16);
2332 		sc->arpcom.ac_enaddr[4] = (u_char)(mac_addr >> 8);
2333 		sc->arpcom.ac_enaddr[5] = (u_char)mac_addr;
2334 	} else if (bge_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
2335 	    BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
2336 		printf("bge%d: failed to read station address\n", unit);
2337 		bge_release_resources(sc);
2338 		error = ENXIO;
2339 		goto fail;
2340 	}
2341 
2342 	/* 5705 limits RX return ring to 512 entries. */
2343 	if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
2344 	    sc->bge_asicrev == BGE_ASICREV_BCM5750)
2345 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2346 	else
2347 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2348 
2349 	if (bge_dma_alloc(dev)) {
2350 		printf ("bge%d: failed to allocate DMA resources\n",
2351 		    sc->bge_unit);
2352 		bge_release_resources(sc);
2353 		error = ENXIO;
2354 		goto fail;
2355 	}
2356 
2357 	/*
2358 	 * Try to allocate memory for jumbo buffers.
2359 	 * The 5705 does not appear to support jumbo frames.
2360 	 */
2361 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2362 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
2363 		if (bge_alloc_jumbo_mem(sc)) {
2364 			printf("bge%d: jumbo buffer allocation "
2365 			    "failed\n", sc->bge_unit);
2366 			bge_release_resources(sc);
2367 			error = ENXIO;
2368 			goto fail;
2369 		}
2370 	}
2371 
2372 	/* Set default tuneable values. */
2373 	sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2374 	sc->bge_rx_coal_ticks = 150;
2375 	sc->bge_tx_coal_ticks = 150;
2376 	sc->bge_rx_max_coal_bds = 64;
2377 	sc->bge_tx_max_coal_bds = 128;
2378 
2379 	/* Set up ifnet structure */
2380 	ifp = &sc->arpcom.ac_if;
2381 	ifp->if_softc = sc;
2382 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2383 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2384 	ifp->if_ioctl = bge_ioctl;
2385 	ifp->if_start = bge_start;
2386 	ifp->if_watchdog = bge_watchdog;
2387 	ifp->if_init = bge_init;
2388 	ifp->if_mtu = ETHERMTU;
2389 	ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
2390 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
2391 	IFQ_SET_READY(&ifp->if_snd);
2392 	ifp->if_hwassist = BGE_CSUM_FEATURES;
2393 	/* NB: the code for RX csum offload is disabled for now */
2394 	ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_HWTAGGING |
2395 	    IFCAP_VLAN_MTU;
2396 	ifp->if_capenable = ifp->if_capabilities;
2397 
2398 	/*
2399 	 * Figure out what sort of media we have by checking the
2400 	 * hardware config word in the first 32k of NIC internal memory,
2401 	 * or fall back to examining the EEPROM if necessary.
2402 	 * Note: on some BCM5700 cards, this value appears to be unset.
2403 	 * If that's the case, we have to rely on identifying the NIC
2404 	 * by its PCI subsystem ID, as we do below for the SysKonnect
2405 	 * SK-9D41.
2406 	 */
2407 	if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
2408 		hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2409 	else {
2410 		bge_read_eeprom(sc, (caddr_t)&hwcfg,
2411 				BGE_EE_HWCFG_OFFSET, sizeof(hwcfg));
2412 		hwcfg = ntohl(hwcfg);
2413 	}
2414 
2415 	if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
2416 		sc->bge_tbi = 1;
2417 
2418 	/* The SysKonnect SK-9D41 is a 1000baseSX card. */
2419 	if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41)
2420 		sc->bge_tbi = 1;
2421 
2422 	if (sc->bge_tbi) {
2423 		ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
2424 		    bge_ifmedia_upd, bge_ifmedia_sts);
2425 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2426 		ifmedia_add(&sc->bge_ifmedia,
2427 		    IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
2428 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2429 		ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
2430 		sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
2431 	} else {
2432 		/*
2433 		 * Do transceiver setup.
2434 		 */
2435 		if (mii_phy_probe(dev, &sc->bge_miibus,
2436 		    bge_ifmedia_upd, bge_ifmedia_sts)) {
2437 			printf("bge%d: MII without any PHY!\n", sc->bge_unit);
2438 			bge_release_resources(sc);
2439 			bge_free_jumbo_mem(sc);
2440 			error = ENXIO;
2441 			goto fail;
2442 		}
2443 	}
2444 
2445 	/*
2446 	 * When using the BCM5701 in PCI-X mode, data corruption has
2447 	 * been observed in the first few bytes of some received packets.
2448 	 * Aligning the packet buffer in memory eliminates the corruption.
2449 	 * Unfortunately, this misaligns the packet payloads.  On platforms
2450 	 * which do not support unaligned accesses, we will realign the
2451 	 * payloads by copying the received packets.
2452 	 */
2453 	switch (sc->bge_chipid) {
2454 	case BGE_CHIPID_BCM5701_A0:
2455 	case BGE_CHIPID_BCM5701_B0:
2456 	case BGE_CHIPID_BCM5701_B2:
2457 	case BGE_CHIPID_BCM5701_B5:
2458 		/* If in PCI-X mode, work around the alignment bug. */
2459 		if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
2460 		    (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) ==
2461 		    BGE_PCISTATE_PCI_BUSSPEED)
2462 			sc->bge_rx_alignment_bug = 1;
2463 		break;
2464 	}
2465 
2466 	/*
2467 	 * Call MI attach routine.
2468 	 */
2469 	ether_ifattach(ifp, sc->arpcom.ac_enaddr);
2470 	callout_init(&sc->bge_stat_ch, CALLOUT_MPSAFE);
2471 
2472 	/*
2473 	 * Hookup IRQ last.
2474 	 */
2475 	error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
2476 	   bge_intr, sc, &sc->bge_intrhand);
2477 
2478 	if (error) {
2479 		bge_release_resources(sc);
2480 		printf("bge%d: couldn't set up irq\n", unit);
2481 	}
2482 
2483 fail:
2484 	return(error);
2485 }
2486 
2487 static int
2488 bge_detach(dev)
2489 	device_t dev;
2490 {
2491 	struct bge_softc *sc;
2492 	struct ifnet *ifp;
2493 
2494 	sc = device_get_softc(dev);
2495 	ifp = &sc->arpcom.ac_if;
2496 
2497 	BGE_LOCK(sc);
2498 	bge_stop(sc);
2499 	bge_reset(sc);
2500 	BGE_UNLOCK(sc);
2501 
2502 	ether_ifdetach(ifp);
2503 
2504 	if (sc->bge_tbi) {
2505 		ifmedia_removeall(&sc->bge_ifmedia);
2506 	} else {
2507 		bus_generic_detach(dev);
2508 		device_delete_child(dev, sc->bge_miibus);
2509 	}
2510 
2511 	bge_release_resources(sc);
2512 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2513 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
2514 		bge_free_jumbo_mem(sc);
2515 
2516 	return(0);
2517 }
2518 
2519 static void
2520 bge_release_resources(sc)
2521 	struct bge_softc *sc;
2522 {
2523 	device_t dev;
2524 
2525 	dev = sc->bge_dev;
2526 
2527 	if (sc->bge_vpd_prodname != NULL)
2528 		free(sc->bge_vpd_prodname, M_DEVBUF);
2529 
2530 	if (sc->bge_vpd_readonly != NULL)
2531 		free(sc->bge_vpd_readonly, M_DEVBUF);
2532 
2533 	if (sc->bge_intrhand != NULL)
2534 		bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
2535 
2536 	if (sc->bge_irq != NULL)
2537 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
2538 
2539 	if (sc->bge_res != NULL)
2540 		bus_release_resource(dev, SYS_RES_MEMORY,
2541 		    BGE_PCI_BAR0, sc->bge_res);
2542 
2543 	bge_dma_free(sc);
2544 
2545 	if (mtx_initialized(&sc->bge_mtx))	/* XXX */
2546 		BGE_LOCK_DESTROY(sc);
2547 
2548 	return;
2549 }
2550 
2551 static void
2552 bge_reset(sc)
2553 	struct bge_softc *sc;
2554 {
2555 	device_t dev;
2556 	u_int32_t cachesize, command, pcistate, reset;
2557 	int i, val = 0;
2558 
2559 	dev = sc->bge_dev;
2560 
2561 	/* Save some important PCI state. */
2562 	cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2563 	command = pci_read_config(dev, BGE_PCI_CMD, 4);
2564 	pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2565 
2566 	pci_write_config(dev, BGE_PCI_MISC_CTL,
2567 	    BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2568 	    BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2569 
2570 	reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2571 
2572 	/* XXX: Broadcom Linux driver. */
2573 	if (sc->bge_pcie) {
2574 		if (CSR_READ_4(sc, 0x7e2c) == 0x60)	/* PCIE 1.0 */
2575 			CSR_WRITE_4(sc, 0x7e2c, 0x20);
2576 		if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2577 			/* Prevent PCIE link training during global reset */
2578 			CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2579 			reset |= (1<<29);
2580 		}
2581 	}
2582 
2583 	/* Issue global reset */
2584 	bge_writereg_ind(sc, BGE_MISC_CFG, reset);
2585 
2586 	DELAY(1000);
2587 
2588 	/* XXX: Broadcom Linux driver. */
2589 	if (sc->bge_pcie) {
2590 		if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
2591 			uint32_t v;
2592 
2593 			DELAY(500000); /* wait for link training to complete */
2594 			v = pci_read_config(dev, 0xc4, 4);
2595 			pci_write_config(dev, 0xc4, v | (1<<15), 4);
2596 		}
2597 		/* Set PCIE max payload size and clear error status. */
2598 		pci_write_config(dev, 0xd8, 0xf5000, 4);
2599 	}
2600 
2601 	/* Reset some of the PCI state that got zapped by reset */
2602 	pci_write_config(dev, BGE_PCI_MISC_CTL,
2603 	    BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2604 	    BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2605 	pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2606 	pci_write_config(dev, BGE_PCI_CMD, command, 4);
2607 	bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
2608 
2609 	/* Enable memory arbiter. */
2610 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2611 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
2612 		CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2613 
2614 	/*
2615 	 * Prevent PXE restart: write a magic number to the
2616 	 * general communications memory at 0xB50.
2617 	 */
2618 	bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2619 	/*
2620 	 * Poll the value location we just wrote until
2621 	 * we see the 1's complement of the magic number.
2622 	 * This indicates that the firmware initialization
2623 	 * is complete.
2624 	 */
2625 	for (i = 0; i < BGE_TIMEOUT; i++) {
2626 		val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2627 		if (val == ~BGE_MAGIC_NUMBER)
2628 			break;
2629 		DELAY(10);
2630 	}
2631 
2632 	if (i == BGE_TIMEOUT) {
2633 		printf("bge%d: firmware handshake timed out\n", sc->bge_unit);
2634 		return;
2635 	}
2636 
2637 	/*
2638 	 * XXX Wait for the value of the PCISTATE register to
2639 	 * return to its original pre-reset state. This is a
2640 	 * fairly good indicator of reset completion. If we don't
2641 	 * wait for the reset to fully complete, trying to read
2642 	 * from the device's non-PCI registers may yield garbage
2643 	 * results.
2644 	 */
2645 	for (i = 0; i < BGE_TIMEOUT; i++) {
2646 		if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
2647 			break;
2648 		DELAY(10);
2649 	}
2650 
2651 	/* Fix up byte swapping */
2652 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_BYTESWAP_NONFRAME|
2653 	    BGE_MODECTL_BYTESWAP_DATA);
2654 
2655 	CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2656 
2657 	/*
2658 	 * The 5704 in TBI mode apparently needs some special
2659 	 * adjustment to insure the SERDES drive level is set
2660 	 * to 1.2V.
2661 	 */
2662 	if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && sc->bge_tbi) {
2663 		uint32_t serdescfg;
2664 		serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2665 		serdescfg = (serdescfg & ~0xFFF) | 0x880;
2666 		CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2667 	}
2668 
2669 	/* XXX: Broadcom Linux driver. */
2670 	if (sc->bge_pcie && sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2671 		uint32_t v;
2672 
2673 		v = CSR_READ_4(sc, 0x7c00);
2674 		CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
2675 	}
2676 	DELAY(10000);
2677 
2678 	return;
2679 }
2680 
2681 /*
2682  * Frame reception handling. This is called if there's a frame
2683  * on the receive return list.
2684  *
2685  * Note: we have to be able to handle two possibilities here:
2686  * 1) the frame is from the jumbo recieve ring
2687  * 2) the frame is from the standard receive ring
2688  */
2689 
2690 static void
2691 bge_rxeof(sc)
2692 	struct bge_softc *sc;
2693 {
2694 	struct ifnet *ifp;
2695 	int stdcnt = 0, jumbocnt = 0;
2696 
2697 	BGE_LOCK_ASSERT(sc);
2698 
2699 	ifp = &sc->arpcom.ac_if;
2700 
2701 	bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
2702 	    sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTWRITE);
2703 	bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2704 	    sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD);
2705 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2706 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
2707 		bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2708 		    sc->bge_cdata.bge_rx_jumbo_ring_map,
2709 		    BUS_DMASYNC_POSTREAD);
2710 	}
2711 
2712 	while(sc->bge_rx_saved_considx !=
2713 	    sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) {
2714 		struct bge_rx_bd	*cur_rx;
2715 		u_int32_t		rxidx;
2716 		struct ether_header	*eh;
2717 		struct mbuf		*m = NULL;
2718 		u_int16_t		vlan_tag = 0;
2719 		int			have_tag = 0;
2720 
2721 		cur_rx =
2722 	    &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
2723 
2724 		rxidx = cur_rx->bge_idx;
2725 		BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
2726 
2727 		if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2728 			have_tag = 1;
2729 			vlan_tag = cur_rx->bge_vlan_tag;
2730 		}
2731 
2732 		if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2733 			BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2734 			bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
2735 			    sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx],
2736 			    BUS_DMASYNC_POSTREAD);
2737 			bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
2738 			    sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx]);
2739 			m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
2740 			sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
2741 			jumbocnt++;
2742 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2743 				ifp->if_ierrors++;
2744 				bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2745 				continue;
2746 			}
2747 			if (bge_newbuf_jumbo(sc,
2748 			    sc->bge_jumbo, NULL) == ENOBUFS) {
2749 				ifp->if_ierrors++;
2750 				bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2751 				continue;
2752 			}
2753 		} else {
2754 			BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2755 			bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2756 			    sc->bge_cdata.bge_rx_std_dmamap[rxidx],
2757 			    BUS_DMASYNC_POSTREAD);
2758 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2759 			    sc->bge_cdata.bge_rx_std_dmamap[rxidx]);
2760 			m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2761 			sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2762 			stdcnt++;
2763 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2764 				ifp->if_ierrors++;
2765 				bge_newbuf_std(sc, sc->bge_std, m);
2766 				continue;
2767 			}
2768 			if (bge_newbuf_std(sc, sc->bge_std,
2769 			    NULL) == ENOBUFS) {
2770 				ifp->if_ierrors++;
2771 				bge_newbuf_std(sc, sc->bge_std, m);
2772 				continue;
2773 			}
2774 		}
2775 
2776 		ifp->if_ipackets++;
2777 #ifndef __i386__
2778 		/*
2779 		 * The i386 allows unaligned accesses, but for other
2780 		 * platforms we must make sure the payload is aligned.
2781 		 */
2782 		if (sc->bge_rx_alignment_bug) {
2783 			bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2784 			    cur_rx->bge_len);
2785 			m->m_data += ETHER_ALIGN;
2786 		}
2787 #endif
2788 		eh = mtod(m, struct ether_header *);
2789 		m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2790 		m->m_pkthdr.rcvif = ifp;
2791 
2792 #if 0 /* currently broken for some packets, possibly related to TCP options */
2793 		if (ifp->if_capenable & IFCAP_RXCSUM) {
2794 			m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2795 			if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2796 				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2797 			if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
2798 				m->m_pkthdr.csum_data =
2799 				    cur_rx->bge_tcp_udp_csum;
2800 				m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
2801 			}
2802 		}
2803 #endif
2804 
2805 		/*
2806 		 * If we received a packet with a vlan tag,
2807 		 * attach that information to the packet.
2808 		 */
2809 		if (have_tag)
2810 			VLAN_INPUT_TAG(ifp, m, vlan_tag, continue);
2811 
2812 		BGE_UNLOCK(sc);
2813 		(*ifp->if_input)(ifp, m);
2814 		BGE_LOCK(sc);
2815 	}
2816 
2817 	bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
2818 	    sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREWRITE);
2819 	bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2820 	    sc->bge_cdata.bge_rx_std_ring_map,
2821 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_PREWRITE);
2822 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2823 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
2824 		bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2825 		    sc->bge_cdata.bge_rx_jumbo_ring_map,
2826 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2827 	}
2828 
2829 	CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2830 	if (stdcnt)
2831 		CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2832 	if (jumbocnt)
2833 		CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2834 
2835 	return;
2836 }
2837 
2838 static void
2839 bge_txeof(sc)
2840 	struct bge_softc *sc;
2841 {
2842 	struct bge_tx_bd *cur_tx = NULL;
2843 	struct ifnet *ifp;
2844 
2845 	BGE_LOCK_ASSERT(sc);
2846 
2847 	ifp = &sc->arpcom.ac_if;
2848 
2849 	/*
2850 	 * Go through our tx ring and free mbufs for those
2851 	 * frames that have been sent.
2852 	 */
2853 	while (sc->bge_tx_saved_considx !=
2854 	    sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) {
2855 		u_int32_t		idx = 0;
2856 
2857 		idx = sc->bge_tx_saved_considx;
2858 		cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
2859 		if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2860 			ifp->if_opackets++;
2861 		if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
2862 			m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2863 			sc->bge_cdata.bge_tx_chain[idx] = NULL;
2864 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2865 			    sc->bge_cdata.bge_tx_dmamap[idx]);
2866 		}
2867 		sc->bge_txcnt--;
2868 		BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2869 		ifp->if_timer = 0;
2870 	}
2871 
2872 	if (cur_tx != NULL)
2873 		ifp->if_flags &= ~IFF_OACTIVE;
2874 
2875 	return;
2876 }
2877 
2878 static void
2879 bge_intr(xsc)
2880 	void *xsc;
2881 {
2882 	struct bge_softc *sc;
2883 	struct ifnet *ifp;
2884 	u_int32_t statusword;
2885 	u_int32_t status, mimode;
2886 
2887 	sc = xsc;
2888 	ifp = &sc->arpcom.ac_if;
2889 
2890 	BGE_LOCK(sc);
2891 
2892 	bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2893 	    sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTWRITE);
2894 
2895 	statusword =
2896 	    atomic_readandclear_32(&sc->bge_ldata.bge_status_block->bge_status);
2897 
2898 #ifdef notdef
2899 	/* Avoid this for now -- checking this register is expensive. */
2900 	/* Make sure this is really our interrupt. */
2901 	if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE))
2902 		return;
2903 #endif
2904 	/* Ack interrupt and stop others from occuring. */
2905 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2906 
2907 	/*
2908 	 * Process link state changes.
2909 	 * Grrr. The link status word in the status block does
2910 	 * not work correctly on the BCM5700 rev AX and BX chips,
2911 	 * according to all available information. Hence, we have
2912 	 * to enable MII interrupts in order to properly obtain
2913 	 * async link changes. Unfortunately, this also means that
2914 	 * we have to read the MAC status register to detect link
2915 	 * changes, thereby adding an additional register access to
2916 	 * the interrupt handler.
2917 	 */
2918 
2919 	if (sc->bge_asicrev == BGE_ASICREV_BCM5700) {
2920 
2921 		status = CSR_READ_4(sc, BGE_MAC_STS);
2922 		if (status & BGE_MACSTAT_MI_INTERRUPT) {
2923 			sc->bge_link = 0;
2924 			callout_stop(&sc->bge_stat_ch);
2925 			bge_tick_locked(sc);
2926 			/* Clear the interrupt */
2927 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2928 			    BGE_EVTENB_MI_INTERRUPT);
2929 			bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
2930 			bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
2931 			    BRGPHY_INTRS);
2932 		}
2933 	} else {
2934 		if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED) {
2935 			/*
2936 			 * Sometimes PCS encoding errors are detected in
2937 			 * TBI mode (on fiber NICs), and for some reason
2938 			 * the chip will signal them as link changes.
2939 			 * If we get a link change event, but the 'PCS
2940 			 * encoding error' bit in the MAC status register
2941 			 * is set, don't bother doing a link check.
2942 			 * This avoids spurious "gigabit link up" messages
2943 			 * that sometimes appear on fiber NICs during
2944 			 * periods of heavy traffic. (There should be no
2945 			 * effect on copper NICs.)
2946 			 *
2947 			 * If we do have a copper NIC (bge_tbi == 0) then
2948 			 * check that the AUTOPOLL bit is set before
2949 			 * processing the event as a real link change.
2950 			 * Turning AUTOPOLL on and off in the MII read/write
2951 			 * functions will often trigger a link status
2952 			 * interrupt for no reason.
2953 			 */
2954 			status = CSR_READ_4(sc, BGE_MAC_STS);
2955 			mimode = CSR_READ_4(sc, BGE_MI_MODE);
2956 			if (!(status & (BGE_MACSTAT_PORT_DECODE_ERROR|
2957 			    BGE_MACSTAT_MI_COMPLETE)) && (!sc->bge_tbi &&
2958 			    (mimode & BGE_MIMODE_AUTOPOLL))) {
2959 				sc->bge_link = 0;
2960 				callout_stop(&sc->bge_stat_ch);
2961 				bge_tick_locked(sc);
2962 			}
2963 			/* Clear the interrupt */
2964 			CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
2965 			    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
2966 			    BGE_MACSTAT_LINK_CHANGED);
2967 
2968 			/* Force flush the status block cached by PCI bridge */
2969 			CSR_READ_4(sc, BGE_MBX_IRQ0_LO);
2970 		}
2971 	}
2972 
2973 	if (ifp->if_flags & IFF_RUNNING) {
2974 		/* Check RX return ring producer/consumer */
2975 		bge_rxeof(sc);
2976 
2977 		/* Check TX ring producer/consumer */
2978 		bge_txeof(sc);
2979 	}
2980 
2981 	bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2982 	    sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREWRITE);
2983 
2984 	bge_handle_events(sc);
2985 
2986 	/* Re-enable interrupts. */
2987 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2988 
2989 	if (ifp->if_flags & IFF_RUNNING && !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2990 		bge_start_locked(ifp);
2991 
2992 	BGE_UNLOCK(sc);
2993 
2994 	return;
2995 }
2996 
2997 static void
2998 bge_tick_locked(sc)
2999 	struct bge_softc *sc;
3000 {
3001 	struct mii_data *mii = NULL;
3002 	struct ifmedia *ifm = NULL;
3003 	struct ifnet *ifp;
3004 
3005 	ifp = &sc->arpcom.ac_if;
3006 
3007 	BGE_LOCK_ASSERT(sc);
3008 
3009 	if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
3010 	    sc->bge_asicrev == BGE_ASICREV_BCM5750)
3011 		bge_stats_update_regs(sc);
3012 	else
3013 		bge_stats_update(sc);
3014 	callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
3015 	if (sc->bge_link)
3016 		return;
3017 
3018 	if (sc->bge_tbi) {
3019 		ifm = &sc->bge_ifmedia;
3020 		if (CSR_READ_4(sc, BGE_MAC_STS) &
3021 		    BGE_MACSTAT_TBI_PCS_SYNCHED) {
3022 			sc->bge_link++;
3023 			if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
3024 				BGE_CLRBIT(sc, BGE_MAC_MODE,
3025 				    BGE_MACMODE_TBI_SEND_CFGS);
3026 			CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
3027 			if (bootverbose)
3028 				printf("bge%d: gigabit link up\n",
3029 				    sc->bge_unit);
3030 			if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3031 				bge_start_locked(ifp);
3032 		}
3033 		return;
3034 	}
3035 
3036 	mii = device_get_softc(sc->bge_miibus);
3037 	mii_tick(mii);
3038 
3039 	if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE &&
3040 	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3041 		sc->bge_link++;
3042 		if ((IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
3043 		    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) &&
3044 		    bootverbose)
3045 			printf("bge%d: gigabit link up\n", sc->bge_unit);
3046 		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3047 			bge_start_locked(ifp);
3048 	}
3049 
3050 	return;
3051 }
3052 
3053 static void
3054 bge_tick(xsc)
3055 	void *xsc;
3056 {
3057 	struct bge_softc *sc;
3058 
3059 	sc = xsc;
3060 
3061 	BGE_LOCK(sc);
3062 	bge_tick_locked(sc);
3063 	BGE_UNLOCK(sc);
3064 }
3065 
3066 static void
3067 bge_stats_update_regs(sc)
3068 	struct bge_softc *sc;
3069 {
3070 	struct ifnet *ifp;
3071 	struct bge_mac_stats_regs stats;
3072 	u_int32_t *s;
3073 	int i;
3074 
3075 	ifp = &sc->arpcom.ac_if;
3076 
3077 	s = (u_int32_t *)&stats;
3078 	for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
3079 		*s = CSR_READ_4(sc, BGE_RX_STATS + i);
3080 		s++;
3081 	}
3082 
3083 	ifp->if_collisions +=
3084 	   (stats.dot3StatsSingleCollisionFrames +
3085 	   stats.dot3StatsMultipleCollisionFrames +
3086 	   stats.dot3StatsExcessiveCollisions +
3087 	   stats.dot3StatsLateCollisions) -
3088 	   ifp->if_collisions;
3089 
3090 	return;
3091 }
3092 
3093 static void
3094 bge_stats_update(sc)
3095 	struct bge_softc *sc;
3096 {
3097 	struct ifnet *ifp;
3098 	struct bge_stats *stats;
3099 
3100 	ifp = &sc->arpcom.ac_if;
3101 
3102 	stats = (struct bge_stats *)(sc->bge_vhandle +
3103 	    BGE_MEMWIN_START + BGE_STATS_BLOCK);
3104 
3105 	ifp->if_collisions +=
3106 	   (stats->txstats.dot3StatsSingleCollisionFrames.bge_addr_lo +
3107 	   stats->txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo +
3108 	   stats->txstats.dot3StatsExcessiveCollisions.bge_addr_lo +
3109 	   stats->txstats.dot3StatsLateCollisions.bge_addr_lo) -
3110 	   ifp->if_collisions;
3111 
3112 #ifdef notdef
3113 	ifp->if_collisions +=
3114 	   (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
3115 	   sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
3116 	   sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
3117 	   sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
3118 	   ifp->if_collisions;
3119 #endif
3120 
3121 	return;
3122 }
3123 
3124 /*
3125  * Encapsulate an mbuf chain in the tx ring  by coupling the mbuf data
3126  * pointers to descriptors.
3127  */
3128 static int
3129 bge_encap(sc, m_head, txidx)
3130 	struct bge_softc *sc;
3131 	struct mbuf *m_head;
3132 	u_int32_t *txidx;
3133 {
3134 	struct bge_tx_bd	*f = NULL;
3135 	u_int16_t		csum_flags = 0;
3136 	struct m_tag		*mtag;
3137 	struct bge_dmamap_arg	ctx;
3138 	bus_dmamap_t		map;
3139 	int			error;
3140 
3141 
3142 	if (m_head->m_pkthdr.csum_flags) {
3143 		if (m_head->m_pkthdr.csum_flags & CSUM_IP)
3144 			csum_flags |= BGE_TXBDFLAG_IP_CSUM;
3145 		if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
3146 			csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
3147 		if (m_head->m_flags & M_LASTFRAG)
3148 			csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
3149 		else if (m_head->m_flags & M_FRAG)
3150 			csum_flags |= BGE_TXBDFLAG_IP_FRAG;
3151 	}
3152 
3153 	mtag = VLAN_OUTPUT_TAG(&sc->arpcom.ac_if, m_head);
3154 
3155 	ctx.sc = sc;
3156 	ctx.bge_idx = *txidx;
3157 	ctx.bge_ring = sc->bge_ldata.bge_tx_ring;
3158 	ctx.bge_flags = csum_flags;
3159 	/*
3160 	 * Sanity check: avoid coming within 16 descriptors
3161 	 * of the end of the ring.
3162 	 */
3163 	ctx.bge_maxsegs = (BGE_TX_RING_CNT - sc->bge_txcnt) - 16;
3164 
3165 	map = sc->bge_cdata.bge_tx_dmamap[*txidx];
3166 	error = bus_dmamap_load_mbuf(sc->bge_cdata.bge_mtag, map,
3167 	    m_head, bge_dma_map_tx_desc, &ctx, BUS_DMA_NOWAIT);
3168 
3169 	if (error || ctx.bge_maxsegs == 0 /*||
3170 	    ctx.bge_idx == sc->bge_tx_saved_considx*/)
3171 		return (ENOBUFS);
3172 
3173 	/*
3174 	 * Insure that the map for this transmission
3175 	 * is placed at the array index of the last descriptor
3176 	 * in this chain.
3177 	 */
3178 	sc->bge_cdata.bge_tx_dmamap[*txidx] =
3179 	    sc->bge_cdata.bge_tx_dmamap[ctx.bge_idx];
3180 	sc->bge_cdata.bge_tx_dmamap[ctx.bge_idx] = map;
3181 	sc->bge_cdata.bge_tx_chain[ctx.bge_idx] = m_head;
3182 	sc->bge_txcnt += ctx.bge_maxsegs;
3183 	f = &sc->bge_ldata.bge_tx_ring[*txidx];
3184 	if (mtag != NULL) {
3185 		f->bge_flags |= htole16(BGE_TXBDFLAG_VLAN_TAG);
3186 		f->bge_vlan_tag = htole16(VLAN_TAG_VALUE(mtag));
3187 	} else {
3188 		f->bge_vlan_tag = 0;
3189 	}
3190 
3191 	BGE_INC(ctx.bge_idx, BGE_TX_RING_CNT);
3192 	*txidx = ctx.bge_idx;
3193 
3194 	return(0);
3195 }
3196 
3197 /*
3198  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3199  * to the mbuf data regions directly in the transmit descriptors.
3200  */
3201 static void
3202 bge_start_locked(ifp)
3203 	struct ifnet *ifp;
3204 {
3205 	struct bge_softc *sc;
3206 	struct mbuf *m_head = NULL;
3207 	u_int32_t prodidx = 0;
3208 	int count = 0;
3209 
3210 	sc = ifp->if_softc;
3211 
3212 	if (!sc->bge_link && IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3213 		return;
3214 
3215 	prodidx = CSR_READ_4(sc, BGE_MBX_TX_HOST_PROD0_LO);
3216 
3217 	while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
3218 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
3219 		if (m_head == NULL)
3220 			break;
3221 
3222 		/*
3223 		 * XXX
3224 		 * The code inside the if() block is never reached since we
3225 		 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
3226 		 * requests to checksum TCP/UDP in a fragmented packet.
3227 		 *
3228 		 * XXX
3229 		 * safety overkill.  If this is a fragmented packet chain
3230 		 * with delayed TCP/UDP checksums, then only encapsulate
3231 		 * it if we have enough descriptors to handle the entire
3232 		 * chain at once.
3233 		 * (paranoia -- may not actually be needed)
3234 		 */
3235 		if (m_head->m_flags & M_FIRSTFRAG &&
3236 		    m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
3237 			if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
3238 			    m_head->m_pkthdr.csum_data + 16) {
3239 				IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3240 				ifp->if_flags |= IFF_OACTIVE;
3241 				break;
3242 			}
3243 		}
3244 
3245 		/*
3246 		 * Pack the data into the transmit ring. If we
3247 		 * don't have room, set the OACTIVE flag and wait
3248 		 * for the NIC to drain the ring.
3249 		 */
3250 		if (bge_encap(sc, m_head, &prodidx)) {
3251 			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3252 			ifp->if_flags |= IFF_OACTIVE;
3253 			break;
3254 		}
3255 		++count;
3256 
3257 		/*
3258 		 * If there's a BPF listener, bounce a copy of this frame
3259 		 * to him.
3260 		 */
3261 		BPF_MTAP(ifp, m_head);
3262 	}
3263 
3264 	if (count == 0) {
3265 		/* no packets were dequeued */
3266 		return;
3267 	}
3268 
3269 	/* Transmit */
3270 	CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3271 	/* 5700 b2 errata */
3272 	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
3273 		CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3274 
3275 	/*
3276 	 * Set a timeout in case the chip goes out to lunch.
3277 	 */
3278 	ifp->if_timer = 5;
3279 
3280 	return;
3281 }
3282 
3283 /*
3284  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3285  * to the mbuf data regions directly in the transmit descriptors.
3286  */
3287 static void
3288 bge_start(ifp)
3289 	struct ifnet *ifp;
3290 {
3291 	struct bge_softc *sc;
3292 
3293 	sc = ifp->if_softc;
3294 	BGE_LOCK(sc);
3295 	bge_start_locked(ifp);
3296 	BGE_UNLOCK(sc);
3297 }
3298 
3299 static void
3300 bge_init_locked(sc)
3301 	struct bge_softc *sc;
3302 {
3303 	struct ifnet *ifp;
3304 	u_int16_t *m;
3305 
3306 	BGE_LOCK_ASSERT(sc);
3307 
3308 	ifp = &sc->arpcom.ac_if;
3309 
3310 	if (ifp->if_flags & IFF_RUNNING)
3311 		return;
3312 
3313 	/* Cancel pending I/O and flush buffers. */
3314 	bge_stop(sc);
3315 	bge_reset(sc);
3316 	bge_chipinit(sc);
3317 
3318 	/*
3319 	 * Init the various state machines, ring
3320 	 * control blocks and firmware.
3321 	 */
3322 	if (bge_blockinit(sc)) {
3323 		printf("bge%d: initialization failure\n", sc->bge_unit);
3324 		return;
3325 	}
3326 
3327 	ifp = &sc->arpcom.ac_if;
3328 
3329 	/* Specify MTU. */
3330 	CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
3331 	    ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN);
3332 
3333 	/* Load our MAC address. */
3334 	m = (u_int16_t *)&sc->arpcom.ac_enaddr[0];
3335 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3336 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3337 
3338 	/* Enable or disable promiscuous mode as needed. */
3339 	if (ifp->if_flags & IFF_PROMISC) {
3340 		BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3341 	} else {
3342 		BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3343 	}
3344 
3345 	/* Program multicast filter. */
3346 	bge_setmulti(sc);
3347 
3348 	/* Init RX ring. */
3349 	bge_init_rx_ring_std(sc);
3350 
3351 	/*
3352 	 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
3353 	 * memory to insure that the chip has in fact read the first
3354 	 * entry of the ring.
3355 	 */
3356 	if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
3357 		u_int32_t		v, i;
3358 		for (i = 0; i < 10; i++) {
3359 			DELAY(20);
3360 			v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
3361 			if (v == (MCLBYTES - ETHER_ALIGN))
3362 				break;
3363 		}
3364 		if (i == 10)
3365 			printf ("bge%d: 5705 A0 chip failed to load RX ring\n",
3366 			    sc->bge_unit);
3367 	}
3368 
3369 	/* Init jumbo RX ring. */
3370 	if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
3371 		bge_init_rx_ring_jumbo(sc);
3372 
3373 	/* Init our RX return ring index */
3374 	sc->bge_rx_saved_considx = 0;
3375 
3376 	/* Init TX ring. */
3377 	bge_init_tx_ring(sc);
3378 
3379 	/* Turn on transmitter */
3380 	BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
3381 
3382 	/* Turn on receiver */
3383 	BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3384 
3385 	/* Tell firmware we're alive. */
3386 	BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3387 
3388 	/* Enable host interrupts. */
3389 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
3390 	BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3391 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3392 
3393 	bge_ifmedia_upd(ifp);
3394 
3395 	ifp->if_flags |= IFF_RUNNING;
3396 	ifp->if_flags &= ~IFF_OACTIVE;
3397 
3398 	callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
3399 
3400 	return;
3401 }
3402 
3403 static void
3404 bge_init(xsc)
3405 	void *xsc;
3406 {
3407 	struct bge_softc *sc = xsc;
3408 
3409 	BGE_LOCK(sc);
3410 	bge_init_locked(sc);
3411 	BGE_UNLOCK(sc);
3412 
3413 	return;
3414 }
3415 
3416 /*
3417  * Set media options.
3418  */
3419 static int
3420 bge_ifmedia_upd(ifp)
3421 	struct ifnet *ifp;
3422 {
3423 	struct bge_softc *sc;
3424 	struct mii_data *mii;
3425 	struct ifmedia *ifm;
3426 
3427 	sc = ifp->if_softc;
3428 	ifm = &sc->bge_ifmedia;
3429 
3430 	/* If this is a 1000baseX NIC, enable the TBI port. */
3431 	if (sc->bge_tbi) {
3432 		if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3433 			return(EINVAL);
3434 		switch(IFM_SUBTYPE(ifm->ifm_media)) {
3435 		case IFM_AUTO:
3436 			/*
3437 			 * The BCM5704 ASIC appears to have a special
3438 			 * mechanism for programming the autoneg
3439 			 * advertisement registers in TBI mode.
3440 			 */
3441 			if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3442 				uint32_t sgdig;
3443 				CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
3444 				sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
3445 				sgdig |= BGE_SGDIGCFG_AUTO|
3446 				    BGE_SGDIGCFG_PAUSE_CAP|
3447 				    BGE_SGDIGCFG_ASYM_PAUSE;
3448 				CSR_WRITE_4(sc, BGE_SGDIG_CFG,
3449 				    sgdig|BGE_SGDIGCFG_SEND);
3450 				DELAY(5);
3451 				CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
3452 			}
3453 			break;
3454 		case IFM_1000_SX:
3455 			if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3456 				BGE_CLRBIT(sc, BGE_MAC_MODE,
3457 				    BGE_MACMODE_HALF_DUPLEX);
3458 			} else {
3459 				BGE_SETBIT(sc, BGE_MAC_MODE,
3460 				    BGE_MACMODE_HALF_DUPLEX);
3461 			}
3462 			break;
3463 		default:
3464 			return(EINVAL);
3465 		}
3466 		return(0);
3467 	}
3468 
3469 	mii = device_get_softc(sc->bge_miibus);
3470 	sc->bge_link = 0;
3471 	if (mii->mii_instance) {
3472 		struct mii_softc *miisc;
3473 		for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
3474 		    miisc = LIST_NEXT(miisc, mii_list))
3475 			mii_phy_reset(miisc);
3476 	}
3477 	mii_mediachg(mii);
3478 
3479 	return(0);
3480 }
3481 
3482 /*
3483  * Report current media status.
3484  */
3485 static void
3486 bge_ifmedia_sts(ifp, ifmr)
3487 	struct ifnet *ifp;
3488 	struct ifmediareq *ifmr;
3489 {
3490 	struct bge_softc *sc;
3491 	struct mii_data *mii;
3492 
3493 	sc = ifp->if_softc;
3494 
3495 	if (sc->bge_tbi) {
3496 		ifmr->ifm_status = IFM_AVALID;
3497 		ifmr->ifm_active = IFM_ETHER;
3498 		if (CSR_READ_4(sc, BGE_MAC_STS) &
3499 		    BGE_MACSTAT_TBI_PCS_SYNCHED)
3500 			ifmr->ifm_status |= IFM_ACTIVE;
3501 		ifmr->ifm_active |= IFM_1000_SX;
3502 		if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3503 			ifmr->ifm_active |= IFM_HDX;
3504 		else
3505 			ifmr->ifm_active |= IFM_FDX;
3506 		return;
3507 	}
3508 
3509 	mii = device_get_softc(sc->bge_miibus);
3510 	mii_pollstat(mii);
3511 	ifmr->ifm_active = mii->mii_media_active;
3512 	ifmr->ifm_status = mii->mii_media_status;
3513 
3514 	return;
3515 }
3516 
3517 static int
3518 bge_ioctl(ifp, command, data)
3519 	struct ifnet *ifp;
3520 	u_long command;
3521 	caddr_t data;
3522 {
3523 	struct bge_softc *sc = ifp->if_softc;
3524 	struct ifreq *ifr = (struct ifreq *) data;
3525 	int mask, error = 0;
3526 	struct mii_data *mii;
3527 
3528 	switch(command) {
3529 	case SIOCSIFMTU:
3530 		/* Disallow jumbo frames on 5705. */
3531 		if (((sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
3532 		      sc->bge_asicrev == BGE_ASICREV_BCM5750) &&
3533 		    ifr->ifr_mtu > ETHERMTU) || ifr->ifr_mtu > BGE_JUMBO_MTU)
3534 			error = EINVAL;
3535 		else {
3536 			ifp->if_mtu = ifr->ifr_mtu;
3537 			ifp->if_flags &= ~IFF_RUNNING;
3538 			bge_init(sc);
3539 		}
3540 		break;
3541 	case SIOCSIFFLAGS:
3542 		BGE_LOCK(sc);
3543 		if (ifp->if_flags & IFF_UP) {
3544 			/*
3545 			 * If only the state of the PROMISC flag changed,
3546 			 * then just use the 'set promisc mode' command
3547 			 * instead of reinitializing the entire NIC. Doing
3548 			 * a full re-init means reloading the firmware and
3549 			 * waiting for it to start up, which may take a
3550 			 * second or two.
3551 			 */
3552 			if (ifp->if_flags & IFF_RUNNING &&
3553 			    ifp->if_flags & IFF_PROMISC &&
3554 			    !(sc->bge_if_flags & IFF_PROMISC)) {
3555 				BGE_SETBIT(sc, BGE_RX_MODE,
3556 				    BGE_RXMODE_RX_PROMISC);
3557 			} else if (ifp->if_flags & IFF_RUNNING &&
3558 			    !(ifp->if_flags & IFF_PROMISC) &&
3559 			    sc->bge_if_flags & IFF_PROMISC) {
3560 				BGE_CLRBIT(sc, BGE_RX_MODE,
3561 				    BGE_RXMODE_RX_PROMISC);
3562 			} else
3563 				bge_init_locked(sc);
3564 		} else {
3565 			if (ifp->if_flags & IFF_RUNNING) {
3566 				bge_stop(sc);
3567 			}
3568 		}
3569 		sc->bge_if_flags = ifp->if_flags;
3570 		BGE_UNLOCK(sc);
3571 		error = 0;
3572 		break;
3573 	case SIOCADDMULTI:
3574 	case SIOCDELMULTI:
3575 		if (ifp->if_flags & IFF_RUNNING) {
3576 			BGE_LOCK(sc);
3577 			bge_setmulti(sc);
3578 			BGE_UNLOCK(sc);
3579 			error = 0;
3580 		}
3581 		break;
3582 	case SIOCSIFMEDIA:
3583 	case SIOCGIFMEDIA:
3584 		if (sc->bge_tbi) {
3585 			error = ifmedia_ioctl(ifp, ifr,
3586 			    &sc->bge_ifmedia, command);
3587 		} else {
3588 			mii = device_get_softc(sc->bge_miibus);
3589 			error = ifmedia_ioctl(ifp, ifr,
3590 			    &mii->mii_media, command);
3591 		}
3592 		break;
3593 	case SIOCSIFCAP:
3594 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3595 		/* NB: the code for RX csum offload is disabled for now */
3596 		if (mask & IFCAP_TXCSUM) {
3597 			ifp->if_capenable ^= IFCAP_TXCSUM;
3598 			if (IFCAP_TXCSUM & ifp->if_capenable)
3599 				ifp->if_hwassist = BGE_CSUM_FEATURES;
3600 			else
3601 				ifp->if_hwassist = 0;
3602 		}
3603 		error = 0;
3604 		break;
3605 	default:
3606 		error = ether_ioctl(ifp, command, data);
3607 		break;
3608 	}
3609 
3610 	return(error);
3611 }
3612 
3613 static void
3614 bge_watchdog(ifp)
3615 	struct ifnet *ifp;
3616 {
3617 	struct bge_softc *sc;
3618 
3619 	sc = ifp->if_softc;
3620 
3621 	printf("bge%d: watchdog timeout -- resetting\n", sc->bge_unit);
3622 
3623 	ifp->if_flags &= ~IFF_RUNNING;
3624 	bge_init(sc);
3625 
3626 	ifp->if_oerrors++;
3627 
3628 	return;
3629 }
3630 
3631 /*
3632  * Stop the adapter and free any mbufs allocated to the
3633  * RX and TX lists.
3634  */
3635 static void
3636 bge_stop(sc)
3637 	struct bge_softc *sc;
3638 {
3639 	struct ifnet *ifp;
3640 	struct ifmedia_entry *ifm;
3641 	struct mii_data *mii = NULL;
3642 	int mtmp, itmp;
3643 
3644 	BGE_LOCK_ASSERT(sc);
3645 
3646 	ifp = &sc->arpcom.ac_if;
3647 
3648 	if (!sc->bge_tbi)
3649 		mii = device_get_softc(sc->bge_miibus);
3650 
3651 	callout_stop(&sc->bge_stat_ch);
3652 
3653 	/*
3654 	 * Disable all of the receiver blocks
3655 	 */
3656 	BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3657 	BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3658 	BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3659 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3660 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
3661 		BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
3662 	BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3663 	BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3664 	BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3665 
3666 	/*
3667 	 * Disable all of the transmit blocks
3668 	 */
3669 	BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3670 	BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3671 	BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3672 	BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3673 	BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3674 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3675 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
3676 		BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
3677 	BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3678 
3679 	/*
3680 	 * Shut down all of the memory managers and related
3681 	 * state machines.
3682 	 */
3683 	BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3684 	BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3685 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3686 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
3687 		BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
3688 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3689 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3690 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3691 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
3692 		BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
3693 		BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3694 	}
3695 
3696 	/* Disable host interrupts. */
3697 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3698 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3699 
3700 	/*
3701 	 * Tell firmware we're shutting down.
3702 	 */
3703 	BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3704 
3705 	/* Free the RX lists. */
3706 	bge_free_rx_ring_std(sc);
3707 
3708 	/* Free jumbo RX list. */
3709 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3710 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
3711 		bge_free_rx_ring_jumbo(sc);
3712 
3713 	/* Free TX buffers. */
3714 	bge_free_tx_ring(sc);
3715 
3716 	/*
3717 	 * Isolate/power down the PHY, but leave the media selection
3718 	 * unchanged so that things will be put back to normal when
3719 	 * we bring the interface back up.
3720 	 */
3721 	if (!sc->bge_tbi) {
3722 		itmp = ifp->if_flags;
3723 		ifp->if_flags |= IFF_UP;
3724 		ifm = mii->mii_media.ifm_cur;
3725 		mtmp = ifm->ifm_media;
3726 		ifm->ifm_media = IFM_ETHER|IFM_NONE;
3727 		mii_mediachg(mii);
3728 		ifm->ifm_media = mtmp;
3729 		ifp->if_flags = itmp;
3730 	}
3731 
3732 	sc->bge_link = 0;
3733 
3734 	sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
3735 
3736 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3737 
3738 	return;
3739 }
3740 
3741 /*
3742  * Stop all chip I/O so that the kernel's probe routines don't
3743  * get confused by errant DMAs when rebooting.
3744  */
3745 static void
3746 bge_shutdown(dev)
3747 	device_t dev;
3748 {
3749 	struct bge_softc *sc;
3750 
3751 	sc = device_get_softc(dev);
3752 
3753 	BGE_LOCK(sc);
3754 	bge_stop(sc);
3755 	bge_reset(sc);
3756 	BGE_UNLOCK(sc);
3757 
3758 	return;
3759 }
3760