xref: /freebsd/sys/dev/bge/if_bge.c (revision 822923447e454b30d310cb46903c9ddeca9f0a7a)
1 /*-
2  * Copyright (c) 2001 Wind River Systems
3  * Copyright (c) 1997, 1998, 1999, 2001
4  *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by Bill Paul.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 /*
38  * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39  *
40  * The Broadcom BCM5700 is based on technology originally developed by
41  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42  * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
43  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45  * frames, highly configurable RX filtering, and 16 RX and TX queues
46  * (which, along with RX filter rules, can be used for QOS applications).
47  * Other features, such as TCP segmentation, may be available as part
48  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49  * firmware images can be stored in hardware and need not be compiled
50  * into the driver.
51  *
52  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53  * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54  *
55  * The BCM5701 is a single-chip solution incorporating both the BCM5700
56  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57  * does not support external SSRAM.
58  *
59  * Broadcom also produces a variation of the BCM5700 under the "Altima"
60  * brand name, which is functionally similar but lacks PCI-X support.
61  *
62  * Without external SSRAM, you can only have at most 4 TX rings,
63  * and the use of the mini RX ring is disabled. This seems to imply
64  * that these features are simply not available on the BCM5701. As a
65  * result, this driver does not implement any support for the mini RX
66  * ring.
67  */
68 
69 #include <sys/param.h>
70 #include <sys/endian.h>
71 #include <sys/systm.h>
72 #include <sys/sockio.h>
73 #include <sys/mbuf.h>
74 #include <sys/malloc.h>
75 #include <sys/kernel.h>
76 #include <sys/module.h>
77 #include <sys/socket.h>
78 #include <sys/queue.h>
79 
80 #include <net/if.h>
81 #include <net/if_arp.h>
82 #include <net/ethernet.h>
83 #include <net/if_dl.h>
84 #include <net/if_media.h>
85 
86 #include <net/bpf.h>
87 
88 #include <net/if_types.h>
89 #include <net/if_vlan_var.h>
90 
91 #include <netinet/in_systm.h>
92 #include <netinet/in.h>
93 #include <netinet/ip.h>
94 
95 #include <machine/clock.h>      /* for DELAY */
96 #include <machine/bus.h>
97 #include <machine/resource.h>
98 #include <sys/bus.h>
99 #include <sys/rman.h>
100 
101 #include <dev/mii/mii.h>
102 #include <dev/mii/miivar.h>
103 #include "miidevs.h"
104 #include <dev/mii/brgphyreg.h>
105 
106 #include <dev/pci/pcireg.h>
107 #include <dev/pci/pcivar.h>
108 
109 #include <dev/bge/if_bgereg.h>
110 
111 #include "opt_bge.h"
112 
113 #define BGE_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
114 
115 MODULE_DEPEND(bge, pci, 1, 1, 1);
116 MODULE_DEPEND(bge, ether, 1, 1, 1);
117 MODULE_DEPEND(bge, miibus, 1, 1, 1);
118 
119 /* "controller miibus0" required.  See GENERIC if you get errors here. */
120 #include "miibus_if.h"
121 
122 /*
123  * Various supported device vendors/types and their names. Note: the
124  * spec seems to indicate that the hardware still has Alteon's vendor
125  * ID burned into it, though it will always be overriden by the vendor
126  * ID in the EEPROM. Just to be safe, we cover all possibilities.
127  */
128 #define BGE_DEVDESC_MAX		64	/* Maximum device description length */
129 
130 static struct bge_type bge_devs[] = {
131 	{ ALT_VENDORID,	ALT_DEVICEID_BCM5700,
132 		"Broadcom BCM5700 Gigabit Ethernet" },
133 	{ ALT_VENDORID,	ALT_DEVICEID_BCM5701,
134 		"Broadcom BCM5701 Gigabit Ethernet" },
135 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5700,
136 		"Broadcom BCM5700 Gigabit Ethernet" },
137 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5701,
138 		"Broadcom BCM5701 Gigabit Ethernet" },
139 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5702,
140 		"Broadcom BCM5702 Gigabit Ethernet" },
141 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5702X,
142 		"Broadcom BCM5702X Gigabit Ethernet" },
143 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5703,
144 		"Broadcom BCM5703 Gigabit Ethernet" },
145 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5703X,
146 		"Broadcom BCM5703X Gigabit Ethernet" },
147 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5704C,
148 		"Broadcom BCM5704C Dual Gigabit Ethernet" },
149 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5704S,
150 		"Broadcom BCM5704S Dual Gigabit Ethernet" },
151 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5705,
152 		"Broadcom BCM5705 Gigabit Ethernet" },
153 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5705K,
154 		"Broadcom BCM5705K Gigabit Ethernet" },
155 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5705M,
156 		"Broadcom BCM5705M Gigabit Ethernet" },
157 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT,
158 		"Broadcom BCM5705M Gigabit Ethernet" },
159 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5714C,
160 		"Broadcom BCM5714C Gigabit Ethernet" },
161 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5721,
162 		"Broadcom BCM5721 Gigabit Ethernet" },
163 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5750,
164 		"Broadcom BCM5750 Gigabit Ethernet" },
165 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5750M,
166 		"Broadcom BCM5750M Gigabit Ethernet" },
167 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5751,
168 		"Broadcom BCM5751 Gigabit Ethernet" },
169 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5751M,
170 		"Broadcom BCM5751M Gigabit Ethernet" },
171 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5782,
172 		"Broadcom BCM5782 Gigabit Ethernet" },
173 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5788,
174 		"Broadcom BCM5788 Gigabit Ethernet" },
175 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5789,
176 		"Broadcom BCM5789 Gigabit Ethernet" },
177 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5901,
178 		"Broadcom BCM5901 Fast Ethernet" },
179 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2,
180 		"Broadcom BCM5901A2 Fast Ethernet" },
181 	{ SK_VENDORID, SK_DEVICEID_ALTIMA,
182 		"SysKonnect Gigabit Ethernet" },
183 	{ ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000,
184 		"Altima AC1000 Gigabit Ethernet" },
185 	{ ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002,
186 		"Altima AC1002 Gigabit Ethernet" },
187 	{ ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100,
188 		"Altima AC9100 Gigabit Ethernet" },
189 	{ 0, 0, NULL }
190 };
191 
192 static int bge_probe		(device_t);
193 static int bge_attach		(device_t);
194 static int bge_detach		(device_t);
195 static int bge_suspend		(device_t);
196 static int bge_resume		(device_t);
197 static void bge_release_resources
198 				(struct bge_softc *);
199 static void bge_dma_map_addr	(void *, bus_dma_segment_t *, int, int);
200 static void bge_dma_map_tx_desc	(void *, bus_dma_segment_t *, int,
201 				    bus_size_t, int);
202 static int bge_dma_alloc	(device_t);
203 static void bge_dma_free	(struct bge_softc *);
204 
205 static void bge_txeof		(struct bge_softc *);
206 static void bge_rxeof		(struct bge_softc *);
207 
208 static void bge_tick_locked	(struct bge_softc *);
209 static void bge_tick		(void *);
210 static void bge_stats_update	(struct bge_softc *);
211 static void bge_stats_update_regs
212 				(struct bge_softc *);
213 static int bge_encap		(struct bge_softc *, struct mbuf *,
214 					u_int32_t *);
215 
216 static void bge_intr		(void *);
217 static void bge_start_locked	(struct ifnet *);
218 static void bge_start		(struct ifnet *);
219 static int bge_ioctl		(struct ifnet *, u_long, caddr_t);
220 static void bge_init_locked	(struct bge_softc *);
221 static void bge_init		(void *);
222 static void bge_stop		(struct bge_softc *);
223 static void bge_watchdog		(struct ifnet *);
224 static void bge_shutdown		(device_t);
225 static int bge_ifmedia_upd	(struct ifnet *);
226 static void bge_ifmedia_sts	(struct ifnet *, struct ifmediareq *);
227 
228 static u_int8_t	bge_eeprom_getbyte	(struct bge_softc *, int, u_int8_t *);
229 static int bge_read_eeprom	(struct bge_softc *, caddr_t, int, int);
230 
231 static void bge_setmulti	(struct bge_softc *);
232 
233 static void bge_handle_events	(struct bge_softc *);
234 static int bge_alloc_jumbo_mem	(struct bge_softc *);
235 static void bge_free_jumbo_mem	(struct bge_softc *);
236 static void *bge_jalloc		(struct bge_softc *);
237 static void bge_jfree		(void *, void *);
238 static int bge_newbuf_std	(struct bge_softc *, int, struct mbuf *);
239 static int bge_newbuf_jumbo	(struct bge_softc *, int, struct mbuf *);
240 static int bge_init_rx_ring_std	(struct bge_softc *);
241 static void bge_free_rx_ring_std	(struct bge_softc *);
242 static int bge_init_rx_ring_jumbo	(struct bge_softc *);
243 static void bge_free_rx_ring_jumbo	(struct bge_softc *);
244 static void bge_free_tx_ring	(struct bge_softc *);
245 static int bge_init_tx_ring	(struct bge_softc *);
246 
247 static int bge_chipinit		(struct bge_softc *);
248 static int bge_blockinit	(struct bge_softc *);
249 
250 #ifdef notdef
251 static u_int8_t bge_vpd_readbyte(struct bge_softc *, int);
252 static void bge_vpd_read_res	(struct bge_softc *, struct vpd_res *, int);
253 static void bge_vpd_read	(struct bge_softc *);
254 #endif
255 
256 static u_int32_t bge_readmem_ind
257 				(struct bge_softc *, int);
258 static void bge_writemem_ind	(struct bge_softc *, int, int);
259 #ifdef notdef
260 static u_int32_t bge_readreg_ind
261 				(struct bge_softc *, int);
262 #endif
263 static void bge_writereg_ind	(struct bge_softc *, int, int);
264 
265 static int bge_miibus_readreg	(device_t, int, int);
266 static int bge_miibus_writereg	(device_t, int, int, int);
267 static void bge_miibus_statchg	(device_t);
268 
269 static void bge_reset		(struct bge_softc *);
270 
271 static device_method_t bge_methods[] = {
272 	/* Device interface */
273 	DEVMETHOD(device_probe,		bge_probe),
274 	DEVMETHOD(device_attach,	bge_attach),
275 	DEVMETHOD(device_detach,	bge_detach),
276 	DEVMETHOD(device_shutdown,	bge_shutdown),
277 	DEVMETHOD(device_suspend,	bge_suspend),
278 	DEVMETHOD(device_resume,	bge_resume),
279 
280 	/* bus interface */
281 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
282 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
283 
284 	/* MII interface */
285 	DEVMETHOD(miibus_readreg,	bge_miibus_readreg),
286 	DEVMETHOD(miibus_writereg,	bge_miibus_writereg),
287 	DEVMETHOD(miibus_statchg,	bge_miibus_statchg),
288 
289 	{ 0, 0 }
290 };
291 
292 static driver_t bge_driver = {
293 	"bge",
294 	bge_methods,
295 	sizeof(struct bge_softc)
296 };
297 
298 static devclass_t bge_devclass;
299 
300 DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
301 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
302 
303 static u_int32_t
304 bge_readmem_ind(sc, off)
305 	struct bge_softc *sc;
306 	int off;
307 {
308 	device_t dev;
309 
310 	dev = sc->bge_dev;
311 
312 	pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
313 	return(pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4));
314 }
315 
316 static void
317 bge_writemem_ind(sc, off, val)
318 	struct bge_softc *sc;
319 	int off, val;
320 {
321 	device_t dev;
322 
323 	dev = sc->bge_dev;
324 
325 	pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
326 	pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
327 
328 	return;
329 }
330 
331 #ifdef notdef
332 static u_int32_t
333 bge_readreg_ind(sc, off)
334 	struct bge_softc *sc;
335 	int off;
336 {
337 	device_t dev;
338 
339 	dev = sc->bge_dev;
340 
341 	pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
342 	return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
343 }
344 #endif
345 
346 static void
347 bge_writereg_ind(sc, off, val)
348 	struct bge_softc *sc;
349 	int off, val;
350 {
351 	device_t dev;
352 
353 	dev = sc->bge_dev;
354 
355 	pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
356 	pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
357 
358 	return;
359 }
360 
361 /*
362  * Map a single buffer address.
363  */
364 
365 static void
366 bge_dma_map_addr(arg, segs, nseg, error)
367 	void *arg;
368 	bus_dma_segment_t *segs;
369 	int nseg;
370 	int error;
371 {
372 	struct bge_dmamap_arg *ctx;
373 
374 	if (error)
375 		return;
376 
377 	ctx = arg;
378 
379 	if (nseg > ctx->bge_maxsegs) {
380 		ctx->bge_maxsegs = 0;
381 		return;
382 	}
383 
384 	ctx->bge_busaddr = segs->ds_addr;
385 
386 	return;
387 }
388 
389 /*
390  * Map an mbuf chain into an TX ring.
391  */
392 
393 static void
394 bge_dma_map_tx_desc(arg, segs, nseg, mapsize, error)
395 	void *arg;
396 	bus_dma_segment_t *segs;
397 	int nseg;
398 	bus_size_t mapsize;
399 	int error;
400 {
401 	struct bge_dmamap_arg *ctx;
402 	struct bge_tx_bd *d = NULL;
403 	int i = 0, idx;
404 
405 	if (error)
406 		return;
407 
408 	ctx = arg;
409 
410 	/* Signal error to caller if there's too many segments */
411 	if (nseg > ctx->bge_maxsegs) {
412 		ctx->bge_maxsegs = 0;
413 		return;
414 	}
415 
416 	idx = ctx->bge_idx;
417 	while(1) {
418 		d = &ctx->bge_ring[idx];
419 		d->bge_addr.bge_addr_lo =
420 		    htole32(BGE_ADDR_LO(segs[i].ds_addr));
421 		d->bge_addr.bge_addr_hi =
422 		    htole32(BGE_ADDR_HI(segs[i].ds_addr));
423 		d->bge_len = htole16(segs[i].ds_len);
424 		d->bge_flags = htole16(ctx->bge_flags);
425 		i++;
426 		if (i == nseg)
427 			break;
428 		BGE_INC(idx, BGE_TX_RING_CNT);
429 	}
430 
431 	d->bge_flags |= htole16(BGE_TXBDFLAG_END);
432 	ctx->bge_maxsegs = nseg;
433 	ctx->bge_idx = idx;
434 
435 	return;
436 }
437 
438 
439 #ifdef notdef
440 static u_int8_t
441 bge_vpd_readbyte(sc, addr)
442 	struct bge_softc *sc;
443 	int addr;
444 {
445 	int i;
446 	device_t dev;
447 	u_int32_t val;
448 
449 	dev = sc->bge_dev;
450 	pci_write_config(dev, BGE_PCI_VPD_ADDR, addr, 2);
451 	for (i = 0; i < BGE_TIMEOUT * 10; i++) {
452 		DELAY(10);
453 		if (pci_read_config(dev, BGE_PCI_VPD_ADDR, 2) & BGE_VPD_FLAG)
454 			break;
455 	}
456 
457 	if (i == BGE_TIMEOUT) {
458 		printf("bge%d: VPD read timed out\n", sc->bge_unit);
459 		return(0);
460 	}
461 
462 	val = pci_read_config(dev, BGE_PCI_VPD_DATA, 4);
463 
464 	return((val >> ((addr % 4) * 8)) & 0xFF);
465 }
466 
467 static void
468 bge_vpd_read_res(sc, res, addr)
469 	struct bge_softc *sc;
470 	struct vpd_res *res;
471 	int addr;
472 {
473 	int i;
474 	u_int8_t *ptr;
475 
476 	ptr = (u_int8_t *)res;
477 	for (i = 0; i < sizeof(struct vpd_res); i++)
478 		ptr[i] = bge_vpd_readbyte(sc, i + addr);
479 
480 	return;
481 }
482 
483 static void
484 bge_vpd_read(sc)
485 	struct bge_softc *sc;
486 {
487 	int pos = 0, i;
488 	struct vpd_res res;
489 
490 	if (sc->bge_vpd_prodname != NULL)
491 		free(sc->bge_vpd_prodname, M_DEVBUF);
492 	if (sc->bge_vpd_readonly != NULL)
493 		free(sc->bge_vpd_readonly, M_DEVBUF);
494 	sc->bge_vpd_prodname = NULL;
495 	sc->bge_vpd_readonly = NULL;
496 
497 	bge_vpd_read_res(sc, &res, pos);
498 
499 	if (res.vr_id != VPD_RES_ID) {
500 		printf("bge%d: bad VPD resource id: expected %x got %x\n",
501 			sc->bge_unit, VPD_RES_ID, res.vr_id);
502 		return;
503 	}
504 
505 	pos += sizeof(res);
506 	sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
507 	for (i = 0; i < res.vr_len; i++)
508 		sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos);
509 	sc->bge_vpd_prodname[i] = '\0';
510 	pos += i;
511 
512 	bge_vpd_read_res(sc, &res, pos);
513 
514 	if (res.vr_id != VPD_RES_READ) {
515 		printf("bge%d: bad VPD resource id: expected %x got %x\n",
516 		    sc->bge_unit, VPD_RES_READ, res.vr_id);
517 		return;
518 	}
519 
520 	pos += sizeof(res);
521 	sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
522 	for (i = 0; i < res.vr_len + 1; i++)
523 		sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos);
524 
525 	return;
526 }
527 #endif
528 
529 /*
530  * Read a byte of data stored in the EEPROM at address 'addr.' The
531  * BCM570x supports both the traditional bitbang interface and an
532  * auto access interface for reading the EEPROM. We use the auto
533  * access method.
534  */
535 static u_int8_t
536 bge_eeprom_getbyte(sc, addr, dest)
537 	struct bge_softc *sc;
538 	int addr;
539 	u_int8_t *dest;
540 {
541 	int i;
542 	u_int32_t byte = 0;
543 
544 	/*
545 	 * Enable use of auto EEPROM access so we can avoid
546 	 * having to use the bitbang method.
547 	 */
548 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
549 
550 	/* Reset the EEPROM, load the clock period. */
551 	CSR_WRITE_4(sc, BGE_EE_ADDR,
552 	    BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
553 	DELAY(20);
554 
555 	/* Issue the read EEPROM command. */
556 	CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
557 
558 	/* Wait for completion */
559 	for(i = 0; i < BGE_TIMEOUT * 10; i++) {
560 		DELAY(10);
561 		if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
562 			break;
563 	}
564 
565 	if (i == BGE_TIMEOUT) {
566 		printf("bge%d: eeprom read timed out\n", sc->bge_unit);
567 		return(0);
568 	}
569 
570 	/* Get result. */
571 	byte = CSR_READ_4(sc, BGE_EE_DATA);
572 
573 	*dest = (byte >> ((addr % 4) * 8)) & 0xFF;
574 
575 	return(0);
576 }
577 
578 /*
579  * Read a sequence of bytes from the EEPROM.
580  */
581 static int
582 bge_read_eeprom(sc, dest, off, cnt)
583 	struct bge_softc *sc;
584 	caddr_t dest;
585 	int off;
586 	int cnt;
587 {
588 	int err = 0, i;
589 	u_int8_t byte = 0;
590 
591 	for (i = 0; i < cnt; i++) {
592 		err = bge_eeprom_getbyte(sc, off + i, &byte);
593 		if (err)
594 			break;
595 		*(dest + i) = byte;
596 	}
597 
598 	return(err ? 1 : 0);
599 }
600 
601 static int
602 bge_miibus_readreg(dev, phy, reg)
603 	device_t dev;
604 	int phy, reg;
605 {
606 	struct bge_softc *sc;
607 	u_int32_t val, autopoll;
608 	int i;
609 
610 	sc = device_get_softc(dev);
611 
612 	/*
613 	 * Broadcom's own driver always assumes the internal
614 	 * PHY is at GMII address 1. On some chips, the PHY responds
615 	 * to accesses at all addresses, which could cause us to
616 	 * bogusly attach the PHY 32 times at probe type. Always
617 	 * restricting the lookup to address 1 is simpler than
618 	 * trying to figure out which chips revisions should be
619 	 * special-cased.
620 	 */
621 	if (phy != 1)
622 		return(0);
623 
624 	/* Reading with autopolling on may trigger PCI errors */
625 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
626 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
627 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
628 		DELAY(40);
629 	}
630 
631 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
632 	    BGE_MIPHY(phy)|BGE_MIREG(reg));
633 
634 	for (i = 0; i < BGE_TIMEOUT; i++) {
635 		val = CSR_READ_4(sc, BGE_MI_COMM);
636 		if (!(val & BGE_MICOMM_BUSY))
637 			break;
638 	}
639 
640 	if (i == BGE_TIMEOUT) {
641 		printf("bge%d: PHY read timed out\n", sc->bge_unit);
642 		val = 0;
643 		goto done;
644 	}
645 
646 	val = CSR_READ_4(sc, BGE_MI_COMM);
647 
648 done:
649 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
650 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
651 		DELAY(40);
652 	}
653 
654 	if (val & BGE_MICOMM_READFAIL)
655 		return(0);
656 
657 	return(val & 0xFFFF);
658 }
659 
660 static int
661 bge_miibus_writereg(dev, phy, reg, val)
662 	device_t dev;
663 	int phy, reg, val;
664 {
665 	struct bge_softc *sc;
666 	u_int32_t autopoll;
667 	int i;
668 
669 	sc = device_get_softc(dev);
670 
671 	/* Reading with autopolling on may trigger PCI errors */
672 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
673 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
674 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
675 		DELAY(40);
676 	}
677 
678 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
679 	    BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
680 
681 	for (i = 0; i < BGE_TIMEOUT; i++) {
682 		if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
683 			break;
684 	}
685 
686 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
687 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
688 		DELAY(40);
689 	}
690 
691 	if (i == BGE_TIMEOUT) {
692 		printf("bge%d: PHY read timed out\n", sc->bge_unit);
693 		return(0);
694 	}
695 
696 	return(0);
697 }
698 
699 static void
700 bge_miibus_statchg(dev)
701 	device_t dev;
702 {
703 	struct bge_softc *sc;
704 	struct mii_data *mii;
705 
706 	sc = device_get_softc(dev);
707 	mii = device_get_softc(sc->bge_miibus);
708 
709 	BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
710 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
711 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
712 	} else {
713 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
714 	}
715 
716 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
717 		BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
718 	} else {
719 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
720 	}
721 
722 	return;
723 }
724 
725 /*
726  * Handle events that have triggered interrupts.
727  */
728 static void
729 bge_handle_events(sc)
730 	struct bge_softc		*sc;
731 {
732 
733 	return;
734 }
735 
736 /*
737  * Memory management for jumbo frames.
738  */
739 
740 static int
741 bge_alloc_jumbo_mem(sc)
742 	struct bge_softc		*sc;
743 {
744 	caddr_t			ptr;
745 	register int		i, error;
746 	struct bge_jpool_entry   *entry;
747 
748 	/* Create tag for jumbo buffer block */
749 
750 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
751 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
752 	    NULL, BGE_JMEM, 1, BGE_JMEM, 0, NULL, NULL,
753 	    &sc->bge_cdata.bge_jumbo_tag);
754 
755 	if (error) {
756 		printf("bge%d: could not allocate jumbo dma tag\n",
757 		    sc->bge_unit);
758 		return (ENOMEM);
759 	}
760 
761 	/* Allocate DMA'able memory for jumbo buffer block */
762 
763 	error = bus_dmamem_alloc(sc->bge_cdata.bge_jumbo_tag,
764 	    (void **)&sc->bge_ldata.bge_jumbo_buf, BUS_DMA_NOWAIT,
765 	    &sc->bge_cdata.bge_jumbo_map);
766 
767 	if (error)
768 		return (ENOMEM);
769 
770 	SLIST_INIT(&sc->bge_jfree_listhead);
771 	SLIST_INIT(&sc->bge_jinuse_listhead);
772 
773 	/*
774 	 * Now divide it up into 9K pieces and save the addresses
775 	 * in an array.
776 	 */
777 	ptr = sc->bge_ldata.bge_jumbo_buf;
778 	for (i = 0; i < BGE_JSLOTS; i++) {
779 		sc->bge_cdata.bge_jslots[i] = ptr;
780 		ptr += BGE_JLEN;
781 		entry = malloc(sizeof(struct bge_jpool_entry),
782 		    M_DEVBUF, M_NOWAIT);
783 		if (entry == NULL) {
784 			bge_free_jumbo_mem(sc);
785 			sc->bge_ldata.bge_jumbo_buf = NULL;
786 			printf("bge%d: no memory for jumbo "
787 			    "buffer queue!\n", sc->bge_unit);
788 			return(ENOBUFS);
789 		}
790 		entry->slot = i;
791 		SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
792 		    entry, jpool_entries);
793 	}
794 
795 	return(0);
796 }
797 
798 static void
799 bge_free_jumbo_mem(sc)
800 	struct bge_softc *sc;
801 {
802 	int i;
803 	struct bge_jpool_entry *entry;
804 
805 	for (i = 0; i < BGE_JSLOTS; i++) {
806 		entry = SLIST_FIRST(&sc->bge_jfree_listhead);
807 		SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
808 		free(entry, M_DEVBUF);
809 	}
810 
811 	/* Destroy jumbo buffer block */
812 
813 	if (sc->bge_ldata.bge_rx_jumbo_ring)
814 		bus_dmamem_free(sc->bge_cdata.bge_jumbo_tag,
815 		    sc->bge_ldata.bge_jumbo_buf,
816 		    sc->bge_cdata.bge_jumbo_map);
817 
818 	if (sc->bge_cdata.bge_rx_jumbo_ring_map)
819 		bus_dmamap_destroy(sc->bge_cdata.bge_jumbo_tag,
820 		    sc->bge_cdata.bge_jumbo_map);
821 
822 	if (sc->bge_cdata.bge_jumbo_tag)
823 		bus_dma_tag_destroy(sc->bge_cdata.bge_jumbo_tag);
824 
825 	return;
826 }
827 
828 /*
829  * Allocate a jumbo buffer.
830  */
831 static void *
832 bge_jalloc(sc)
833 	struct bge_softc		*sc;
834 {
835 	struct bge_jpool_entry   *entry;
836 
837 	entry = SLIST_FIRST(&sc->bge_jfree_listhead);
838 
839 	if (entry == NULL) {
840 		printf("bge%d: no free jumbo buffers\n", sc->bge_unit);
841 		return(NULL);
842 	}
843 
844 	SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
845 	SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries);
846 	return(sc->bge_cdata.bge_jslots[entry->slot]);
847 }
848 
849 /*
850  * Release a jumbo buffer.
851  */
852 static void
853 bge_jfree(buf, args)
854 	void *buf;
855 	void *args;
856 {
857 	struct bge_jpool_entry *entry;
858 	struct bge_softc *sc;
859 	int i;
860 
861 	/* Extract the softc struct pointer. */
862 	sc = (struct bge_softc *)args;
863 
864 	if (sc == NULL)
865 		panic("bge_jfree: can't find softc pointer!");
866 
867 	/* calculate the slot this buffer belongs to */
868 
869 	i = ((vm_offset_t)buf
870 	     - (vm_offset_t)sc->bge_ldata.bge_jumbo_buf) / BGE_JLEN;
871 
872 	if ((i < 0) || (i >= BGE_JSLOTS))
873 		panic("bge_jfree: asked to free buffer that we don't manage!");
874 
875 	entry = SLIST_FIRST(&sc->bge_jinuse_listhead);
876 	if (entry == NULL)
877 		panic("bge_jfree: buffer not in use!");
878 	entry->slot = i;
879 	SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries);
880 	SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries);
881 
882 	return;
883 }
884 
885 
886 /*
887  * Intialize a standard receive ring descriptor.
888  */
889 static int
890 bge_newbuf_std(sc, i, m)
891 	struct bge_softc	*sc;
892 	int			i;
893 	struct mbuf		*m;
894 {
895 	struct mbuf		*m_new = NULL;
896 	struct bge_rx_bd	*r;
897 	struct bge_dmamap_arg	ctx;
898 	int			error;
899 
900 	if (m == NULL) {
901 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
902 		if (m_new == NULL) {
903 			return(ENOBUFS);
904 		}
905 
906 		MCLGET(m_new, M_DONTWAIT);
907 		if (!(m_new->m_flags & M_EXT)) {
908 			m_freem(m_new);
909 			return(ENOBUFS);
910 		}
911 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
912 	} else {
913 		m_new = m;
914 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
915 		m_new->m_data = m_new->m_ext.ext_buf;
916 	}
917 
918 	if (!sc->bge_rx_alignment_bug)
919 		m_adj(m_new, ETHER_ALIGN);
920 	sc->bge_cdata.bge_rx_std_chain[i] = m_new;
921 	r = &sc->bge_ldata.bge_rx_std_ring[i];
922 	ctx.bge_maxsegs = 1;
923 	ctx.sc = sc;
924 	error = bus_dmamap_load(sc->bge_cdata.bge_mtag,
925 	    sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *),
926 	    m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
927 	if (error || ctx.bge_maxsegs == 0) {
928 		if (m == NULL)
929 			m_freem(m_new);
930 		return(ENOMEM);
931 	}
932 	r->bge_addr.bge_addr_lo = htole32(BGE_ADDR_LO(ctx.bge_busaddr));
933 	r->bge_addr.bge_addr_hi = htole32(BGE_ADDR_HI(ctx.bge_busaddr));
934 	r->bge_flags = htole16(BGE_RXBDFLAG_END);
935 	r->bge_len = htole16(m_new->m_len);
936 	r->bge_idx = htole16(i);
937 
938 	bus_dmamap_sync(sc->bge_cdata.bge_mtag,
939 	    sc->bge_cdata.bge_rx_std_dmamap[i],
940 	    BUS_DMASYNC_PREREAD);
941 
942 	return(0);
943 }
944 
945 /*
946  * Initialize a jumbo receive ring descriptor. This allocates
947  * a jumbo buffer from the pool managed internally by the driver.
948  */
949 static int
950 bge_newbuf_jumbo(sc, i, m)
951 	struct bge_softc *sc;
952 	int i;
953 	struct mbuf *m;
954 {
955 	struct mbuf *m_new = NULL;
956 	struct bge_rx_bd *r;
957 	struct bge_dmamap_arg ctx;
958 	int error;
959 
960 	if (m == NULL) {
961 		caddr_t			*buf = NULL;
962 
963 		/* Allocate the mbuf. */
964 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
965 		if (m_new == NULL) {
966 			return(ENOBUFS);
967 		}
968 
969 		/* Allocate the jumbo buffer */
970 		buf = bge_jalloc(sc);
971 		if (buf == NULL) {
972 			m_freem(m_new);
973 			printf("bge%d: jumbo allocation failed "
974 			    "-- packet dropped!\n", sc->bge_unit);
975 			return(ENOBUFS);
976 		}
977 
978 		/* Attach the buffer to the mbuf. */
979 		m_new->m_data = (void *) buf;
980 		m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN;
981 		MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, bge_jfree,
982 		    (struct bge_softc *)sc, 0, EXT_NET_DRV);
983 	} else {
984 		m_new = m;
985 		m_new->m_data = m_new->m_ext.ext_buf;
986 		m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
987 	}
988 
989 	if (!sc->bge_rx_alignment_bug)
990 		m_adj(m_new, ETHER_ALIGN);
991 	/* Set up the descriptor. */
992 	sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
993 	r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
994 	ctx.bge_maxsegs = 1;
995 	ctx.sc = sc;
996 	error = bus_dmamap_load(sc->bge_cdata.bge_mtag_jumbo,
997 	    sc->bge_cdata.bge_rx_jumbo_dmamap[i], mtod(m_new, void *),
998 	    m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
999 	if (error || ctx.bge_maxsegs == 0) {
1000 		if (m == NULL)
1001 			m_freem(m_new);
1002 		return(ENOMEM);
1003 	}
1004 	r->bge_addr.bge_addr_lo = htole32(BGE_ADDR_LO(ctx.bge_busaddr));
1005 	r->bge_addr.bge_addr_hi = htole32(BGE_ADDR_HI(ctx.bge_busaddr));
1006 	r->bge_flags = htole16(BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING);
1007 	r->bge_len = htole16(m_new->m_len);
1008 	r->bge_idx = htole16(i);
1009 
1010 	bus_dmamap_sync(sc->bge_cdata.bge_mtag,
1011 	    sc->bge_cdata.bge_rx_jumbo_dmamap[i],
1012 	    BUS_DMASYNC_PREREAD);
1013 
1014 	return(0);
1015 }
1016 
1017 /*
1018  * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
1019  * that's 1MB or memory, which is a lot. For now, we fill only the first
1020  * 256 ring entries and hope that our CPU is fast enough to keep up with
1021  * the NIC.
1022  */
1023 static int
1024 bge_init_rx_ring_std(sc)
1025 	struct bge_softc *sc;
1026 {
1027 	int i;
1028 
1029 	for (i = 0; i < BGE_SSLOTS; i++) {
1030 		if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
1031 			return(ENOBUFS);
1032 	};
1033 
1034 	bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1035 	    sc->bge_cdata.bge_rx_std_ring_map,
1036 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1037 
1038 	sc->bge_std = i - 1;
1039 	CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
1040 
1041 	return(0);
1042 }
1043 
1044 static void
1045 bge_free_rx_ring_std(sc)
1046 	struct bge_softc *sc;
1047 {
1048 	int i;
1049 
1050 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1051 		if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1052 			m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1053 			sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1054 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
1055 			    sc->bge_cdata.bge_rx_std_dmamap[i]);
1056 		}
1057 		bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
1058 		    sizeof(struct bge_rx_bd));
1059 	}
1060 
1061 	return;
1062 }
1063 
1064 static int
1065 bge_init_rx_ring_jumbo(sc)
1066 	struct bge_softc *sc;
1067 {
1068 	int i;
1069 	struct bge_rcb *rcb;
1070 
1071 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1072 		if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
1073 			return(ENOBUFS);
1074 	};
1075 
1076 	bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1077 	    sc->bge_cdata.bge_rx_jumbo_ring_map,
1078 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1079 
1080 	sc->bge_jumbo = i - 1;
1081 
1082 	rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1083 	rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
1084 	CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1085 
1086 	CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
1087 
1088 	return(0);
1089 }
1090 
1091 static void
1092 bge_free_rx_ring_jumbo(sc)
1093 	struct bge_softc *sc;
1094 {
1095 	int i;
1096 
1097 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1098 		if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1099 			m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1100 			sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1101 			bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1102 			    sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1103 		}
1104 		bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
1105 		    sizeof(struct bge_rx_bd));
1106 	}
1107 
1108 	return;
1109 }
1110 
1111 static void
1112 bge_free_tx_ring(sc)
1113 	struct bge_softc *sc;
1114 {
1115 	int i;
1116 
1117 	if (sc->bge_ldata.bge_tx_ring == NULL)
1118 		return;
1119 
1120 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
1121 		if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1122 			m_freem(sc->bge_cdata.bge_tx_chain[i]);
1123 			sc->bge_cdata.bge_tx_chain[i] = NULL;
1124 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
1125 			    sc->bge_cdata.bge_tx_dmamap[i]);
1126 		}
1127 		bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
1128 		    sizeof(struct bge_tx_bd));
1129 	}
1130 
1131 	return;
1132 }
1133 
1134 static int
1135 bge_init_tx_ring(sc)
1136 	struct bge_softc *sc;
1137 {
1138 	sc->bge_txcnt = 0;
1139 	sc->bge_tx_saved_considx = 0;
1140 
1141 	CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
1142 	/* 5700 b2 errata */
1143 	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1144 		CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
1145 
1146 	CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1147 	/* 5700 b2 errata */
1148 	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1149 		CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1150 
1151 	return(0);
1152 }
1153 
1154 static void
1155 bge_setmulti(sc)
1156 	struct bge_softc *sc;
1157 {
1158 	struct ifnet *ifp;
1159 	struct ifmultiaddr *ifma;
1160 	u_int32_t hashes[4] = { 0, 0, 0, 0 };
1161 	int h, i;
1162 
1163 	BGE_LOCK_ASSERT(sc);
1164 
1165 	ifp = sc->bge_ifp;
1166 
1167 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1168 		for (i = 0; i < 4; i++)
1169 			CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1170 		return;
1171 	}
1172 
1173 	/* First, zot all the existing filters. */
1174 	for (i = 0; i < 4; i++)
1175 		CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1176 
1177 	/* Now program new ones. */
1178 	IF_ADDR_LOCK(ifp);
1179 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1180 		if (ifma->ifma_addr->sa_family != AF_LINK)
1181 			continue;
1182 		h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1183 		    ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
1184 		hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1185 	}
1186 	IF_ADDR_UNLOCK(ifp);
1187 
1188 	for (i = 0; i < 4; i++)
1189 		CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1190 
1191 	return;
1192 }
1193 
1194 /*
1195  * Do endian, PCI and DMA initialization. Also check the on-board ROM
1196  * self-test results.
1197  */
1198 static int
1199 bge_chipinit(sc)
1200 	struct bge_softc *sc;
1201 {
1202 	int			i;
1203 	u_int32_t		dma_rw_ctl;
1204 
1205 	/* Set endianness before we access any non-PCI registers. */
1206 #if BYTE_ORDER == BIG_ENDIAN
1207 	pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
1208 	    BGE_BIGENDIAN_INIT, 4);
1209 #else
1210 	pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
1211 	    BGE_LITTLEENDIAN_INIT, 4);
1212 #endif
1213 
1214 	/*
1215 	 * Check the 'ROM failed' bit on the RX CPU to see if
1216 	 * self-tests passed.
1217 	 */
1218 	if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
1219 		printf("bge%d: RX CPU self-diagnostics failed!\n",
1220 		    sc->bge_unit);
1221 		return(ENODEV);
1222 	}
1223 
1224 	/* Clear the MAC control register */
1225 	CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1226 
1227 	/*
1228 	 * Clear the MAC statistics block in the NIC's
1229 	 * internal memory.
1230 	 */
1231 	for (i = BGE_STATS_BLOCK;
1232 	    i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t))
1233 		BGE_MEMWIN_WRITE(sc, i, 0);
1234 
1235 	for (i = BGE_STATUS_BLOCK;
1236 	    i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t))
1237 		BGE_MEMWIN_WRITE(sc, i, 0);
1238 
1239 	/* Set up the PCI DMA control register. */
1240 	if (sc->bge_pcie) {
1241 		dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1242 		    (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1243 		    (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1244 	} else if (pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
1245 	    BGE_PCISTATE_PCI_BUSMODE) {
1246 		/* Conventional PCI bus */
1247 		dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1248 		    (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1249 		    (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1250 		    (0x0F);
1251 	} else {
1252 		/* PCI-X bus */
1253 		/*
1254 		 * The 5704 uses a different encoding of read/write
1255 		 * watermarks.
1256 		 */
1257 		if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1258 			dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1259 			    (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1260 			    (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1261 		else
1262 			dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1263 			    (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1264 			    (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1265 			    (0x0F);
1266 
1267 		/*
1268 		 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1269 		 * for hardware bugs.
1270 		 */
1271 		if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1272 		    sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1273 			u_int32_t tmp;
1274 
1275 			tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1276 			if (tmp == 0x6 || tmp == 0x7)
1277 				dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1278 		}
1279 	}
1280 
1281 	if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1282 	    sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
1283 	    sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1284 	    sc->bge_asicrev == BGE_ASICREV_BCM5750)
1285 		dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1286 	pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1287 
1288 	/*
1289 	 * Set up general mode register.
1290 	 */
1291 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_WORDSWAP_NONFRAME|
1292 	    BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
1293 	    BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1294 	    BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM);
1295 
1296 	/*
1297 	 * Disable memory write invalidate.  Apparently it is not supported
1298 	 * properly by these devices.
1299 	 */
1300 	PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1301 
1302 #ifdef __brokenalpha__
1303 	/*
1304 	 * Must insure that we do not cross an 8K (bytes) boundary
1305 	 * for DMA reads.  Our highest limit is 1K bytes.  This is a
1306 	 * restriction on some ALPHA platforms with early revision
1307 	 * 21174 PCI chipsets, such as the AlphaPC 164lx
1308 	 */
1309 	PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL,
1310 	    BGE_PCI_READ_BNDRY_1024BYTES, 4);
1311 #endif
1312 
1313 	/* Set the timer prescaler (always 66Mhz) */
1314 	CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1315 
1316 	return(0);
1317 }
1318 
1319 static int
1320 bge_blockinit(sc)
1321 	struct bge_softc *sc;
1322 {
1323 	struct bge_rcb *rcb;
1324 	volatile struct bge_rcb *vrcb;
1325 	int i;
1326 
1327 	/*
1328 	 * Initialize the memory window pointer register so that
1329 	 * we can access the first 32K of internal NIC RAM. This will
1330 	 * allow us to set up the TX send ring RCBs and the RX return
1331 	 * ring RCBs, plus other things which live in NIC memory.
1332 	 */
1333 	CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1334 
1335 	/* Note: the BCM5704 has a smaller mbuf space than other chips. */
1336 
1337 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1338 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1339 		/* Configure mbuf memory pool */
1340 		if (sc->bge_extram) {
1341 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1342 			    BGE_EXT_SSRAM);
1343 			if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1344 				CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1345 			else
1346 				CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1347 		} else {
1348 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1349 			    BGE_BUFFPOOL_1);
1350 			if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1351 				CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1352 			else
1353 				CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1354 		}
1355 
1356 		/* Configure DMA resource pool */
1357 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1358 		    BGE_DMA_DESCRIPTORS);
1359 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1360 	}
1361 
1362 	/* Configure mbuf pool watermarks */
1363 	if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1364 	    sc->bge_asicrev == BGE_ASICREV_BCM5750) {
1365 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1366 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1367 	} else {
1368 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1369 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1370 	}
1371 	CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1372 
1373 	/* Configure DMA resource watermarks */
1374 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1375 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1376 
1377 	/* Enable buffer manager */
1378 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1379 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1380 		CSR_WRITE_4(sc, BGE_BMAN_MODE,
1381 		    BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1382 
1383 		/* Poll for buffer manager start indication */
1384 		for (i = 0; i < BGE_TIMEOUT; i++) {
1385 			if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1386 				break;
1387 			DELAY(10);
1388 		}
1389 
1390 		if (i == BGE_TIMEOUT) {
1391 			printf("bge%d: buffer manager failed to start\n",
1392 			    sc->bge_unit);
1393 			return(ENXIO);
1394 		}
1395 	}
1396 
1397 	/* Enable flow-through queues */
1398 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1399 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1400 
1401 	/* Wait until queue initialization is complete */
1402 	for (i = 0; i < BGE_TIMEOUT; i++) {
1403 		if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1404 			break;
1405 		DELAY(10);
1406 	}
1407 
1408 	if (i == BGE_TIMEOUT) {
1409 		printf("bge%d: flow-through queue init failed\n",
1410 		    sc->bge_unit);
1411 		return(ENXIO);
1412 	}
1413 
1414 	/* Initialize the standard RX ring control block */
1415 	rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1416 	rcb->bge_hostaddr.bge_addr_lo =
1417 	    BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1418 	rcb->bge_hostaddr.bge_addr_hi =
1419 	    BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1420 	bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1421 	    sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1422 	if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1423 	    sc->bge_asicrev == BGE_ASICREV_BCM5750)
1424 		rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1425 	else
1426 		rcb->bge_maxlen_flags =
1427 		    BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1428 	if (sc->bge_extram)
1429 		rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
1430 	else
1431 		rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1432 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1433 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1434 
1435 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1436 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1437 
1438 	/*
1439 	 * Initialize the jumbo RX ring control block
1440 	 * We set the 'ring disabled' bit in the flags
1441 	 * field until we're actually ready to start
1442 	 * using this ring (i.e. once we set the MTU
1443 	 * high enough to require it).
1444 	 */
1445 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1446 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1447 		rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1448 
1449 		rcb->bge_hostaddr.bge_addr_lo =
1450 		    BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1451 		rcb->bge_hostaddr.bge_addr_hi =
1452 		    BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1453 		bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1454 		    sc->bge_cdata.bge_rx_jumbo_ring_map,
1455 		    BUS_DMASYNC_PREREAD);
1456 		rcb->bge_maxlen_flags =
1457 		    BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN,
1458 		    BGE_RCB_FLAG_RING_DISABLED);
1459 		if (sc->bge_extram)
1460 			rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
1461 		else
1462 			rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1463 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1464 		    rcb->bge_hostaddr.bge_addr_hi);
1465 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1466 		    rcb->bge_hostaddr.bge_addr_lo);
1467 
1468 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1469 		    rcb->bge_maxlen_flags);
1470 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1471 
1472 		/* Set up dummy disabled mini ring RCB */
1473 		rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1474 		rcb->bge_maxlen_flags =
1475 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1476 		CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1477 		    rcb->bge_maxlen_flags);
1478 	}
1479 
1480 	/*
1481 	 * Set the BD ring replentish thresholds. The recommended
1482 	 * values are 1/8th the number of descriptors allocated to
1483 	 * each ring.
1484 	 */
1485 	CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
1486 	CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1487 
1488 	/*
1489 	 * Disable all unused send rings by setting the 'ring disabled'
1490 	 * bit in the flags field of all the TX send ring control blocks.
1491 	 * These are located in NIC memory.
1492 	 */
1493 	vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1494 	    BGE_SEND_RING_RCB);
1495 	for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1496 		vrcb->bge_maxlen_flags =
1497 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1498 		vrcb->bge_nicaddr = 0;
1499 		vrcb++;
1500 	}
1501 
1502 	/* Configure TX RCB 0 (we use only the first ring) */
1503 	vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1504 	    BGE_SEND_RING_RCB);
1505 	vrcb->bge_hostaddr.bge_addr_lo =
1506 	    htole32(BGE_ADDR_LO(sc->bge_ldata.bge_tx_ring_paddr));
1507 	vrcb->bge_hostaddr.bge_addr_hi =
1508 	    htole32(BGE_ADDR_HI(sc->bge_ldata.bge_tx_ring_paddr));
1509 	vrcb->bge_nicaddr = BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT);
1510 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1511 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
1512 		vrcb->bge_maxlen_flags =
1513 		    BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0);
1514 
1515 	/* Disable all unused RX return rings */
1516 	vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1517 	    BGE_RX_RETURN_RING_RCB);
1518 	for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1519 		vrcb->bge_hostaddr.bge_addr_hi = 0;
1520 		vrcb->bge_hostaddr.bge_addr_lo = 0;
1521 		vrcb->bge_maxlen_flags =
1522 		    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1523 		    BGE_RCB_FLAG_RING_DISABLED);
1524 		vrcb->bge_nicaddr = 0;
1525 		CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
1526 		    (i * (sizeof(u_int64_t))), 0);
1527 		vrcb++;
1528 	}
1529 
1530 	/* Initialize RX ring indexes */
1531 	CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1532 	CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1533 	CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1534 
1535 	/*
1536 	 * Set up RX return ring 0
1537 	 * Note that the NIC address for RX return rings is 0x00000000.
1538 	 * The return rings live entirely within the host, so the
1539 	 * nicaddr field in the RCB isn't used.
1540 	 */
1541 	vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1542 	    BGE_RX_RETURN_RING_RCB);
1543 	vrcb->bge_hostaddr.bge_addr_lo =
1544 	    BGE_ADDR_LO(sc->bge_ldata.bge_rx_return_ring_paddr);
1545 	vrcb->bge_hostaddr.bge_addr_hi =
1546 	    BGE_ADDR_HI(sc->bge_ldata.bge_rx_return_ring_paddr);
1547 	bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
1548 	    sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREWRITE);
1549 	vrcb->bge_nicaddr = 0x00000000;
1550 	vrcb->bge_maxlen_flags =
1551 	    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0);
1552 
1553 	/* Set random backoff seed for TX */
1554 	CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1555 	    IFP2ENADDR(sc->bge_ifp)[0] + IFP2ENADDR(sc->bge_ifp)[1] +
1556 	    IFP2ENADDR(sc->bge_ifp)[2] + IFP2ENADDR(sc->bge_ifp)[3] +
1557 	    IFP2ENADDR(sc->bge_ifp)[4] + IFP2ENADDR(sc->bge_ifp)[5] +
1558 	    BGE_TX_BACKOFF_SEED_MASK);
1559 
1560 	/* Set inter-packet gap */
1561 	CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1562 
1563 	/*
1564 	 * Specify which ring to use for packets that don't match
1565 	 * any RX rules.
1566 	 */
1567 	CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1568 
1569 	/*
1570 	 * Configure number of RX lists. One interrupt distribution
1571 	 * list, sixteen active lists, one bad frames class.
1572 	 */
1573 	CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1574 
1575 	/* Inialize RX list placement stats mask. */
1576 	CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1577 	CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1578 
1579 	/* Disable host coalescing until we get it set up */
1580 	CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1581 
1582 	/* Poll to make sure it's shut down. */
1583 	for (i = 0; i < BGE_TIMEOUT; i++) {
1584 		if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1585 			break;
1586 		DELAY(10);
1587 	}
1588 
1589 	if (i == BGE_TIMEOUT) {
1590 		printf("bge%d: host coalescing engine failed to idle\n",
1591 		    sc->bge_unit);
1592 		return(ENXIO);
1593 	}
1594 
1595 	/* Set up host coalescing defaults */
1596 	CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1597 	CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1598 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1599 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1600 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1601 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1602 		CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1603 		CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1604 	}
1605 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1606 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1607 
1608 	/* Set up address of statistics block */
1609 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1610 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1611 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1612 		    BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1613 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1614 		    BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1615 		CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1616 		CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1617 		CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1618 	}
1619 
1620 	/* Set up address of status block */
1621 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1622 	    BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1623 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1624 	    BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1625 	bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
1626 	    sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREWRITE);
1627 	sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0;
1628 	sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0;
1629 
1630 	/* Turn on host coalescing state machine */
1631 	CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1632 
1633 	/* Turn on RX BD completion state machine and enable attentions */
1634 	CSR_WRITE_4(sc, BGE_RBDC_MODE,
1635 	    BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1636 
1637 	/* Turn on RX list placement state machine */
1638 	CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1639 
1640 	/* Turn on RX list selector state machine. */
1641 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1642 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
1643 		CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1644 
1645 	/* Turn on DMA, clear stats */
1646 	CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1647 	    BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1648 	    BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1649 	    BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1650 	    (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1651 
1652 	/* Set misc. local control, enable interrupts on attentions */
1653 	CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1654 
1655 #ifdef notdef
1656 	/* Assert GPIO pins for PHY reset */
1657 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1658 	    BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1659 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1660 	    BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1661 #endif
1662 
1663 	/* Turn on DMA completion state machine */
1664 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1665 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
1666 		CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1667 
1668 	/* Turn on write DMA state machine */
1669 	CSR_WRITE_4(sc, BGE_WDMA_MODE,
1670 	    BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
1671 
1672 	/* Turn on read DMA state machine */
1673 	CSR_WRITE_4(sc, BGE_RDMA_MODE,
1674 	    BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
1675 
1676 	/* Turn on RX data completion state machine */
1677 	CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1678 
1679 	/* Turn on RX BD initiator state machine */
1680 	CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1681 
1682 	/* Turn on RX data and RX BD initiator state machine */
1683 	CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1684 
1685 	/* Turn on Mbuf cluster free state machine */
1686 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1687 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
1688 		CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1689 
1690 	/* Turn on send BD completion state machine */
1691 	CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1692 
1693 	/* Turn on send data completion state machine */
1694 	CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1695 
1696 	/* Turn on send data initiator state machine */
1697 	CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1698 
1699 	/* Turn on send BD initiator state machine */
1700 	CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1701 
1702 	/* Turn on send BD selector state machine */
1703 	CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1704 
1705 	CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1706 	CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1707 	    BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1708 
1709 	/* ack/clear link change events */
1710 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1711 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1712 	    BGE_MACSTAT_LINK_CHANGED);
1713 	CSR_WRITE_4(sc, BGE_MI_STS, 0);
1714 
1715 	/* Enable PHY auto polling (for MII/GMII only) */
1716 	if (sc->bge_tbi) {
1717 		CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1718 	} else {
1719 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1720 		if (sc->bge_asicrev == BGE_ASICREV_BCM5700)
1721 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1722 			    BGE_EVTENB_MI_INTERRUPT);
1723 	}
1724 
1725 	/* Enable link state change attentions. */
1726 	BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1727 
1728 	return(0);
1729 }
1730 
1731 /*
1732  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1733  * against our list and return its name if we find a match. Note
1734  * that since the Broadcom controller contains VPD support, we
1735  * can get the device name string from the controller itself instead
1736  * of the compiled-in string. This is a little slow, but it guarantees
1737  * we'll always announce the right product name.
1738  */
1739 static int
1740 bge_probe(dev)
1741 	device_t dev;
1742 {
1743 	struct bge_type *t;
1744 	struct bge_softc *sc;
1745 	char *descbuf;
1746 
1747 	t = bge_devs;
1748 
1749 	sc = device_get_softc(dev);
1750 	bzero(sc, sizeof(struct bge_softc));
1751 	sc->bge_unit = device_get_unit(dev);
1752 	sc->bge_dev = dev;
1753 
1754 	while(t->bge_name != NULL) {
1755 		if ((pci_get_vendor(dev) == t->bge_vid) &&
1756 		    (pci_get_device(dev) == t->bge_did)) {
1757 #ifdef notdef
1758 			bge_vpd_read(sc);
1759 			device_set_desc(dev, sc->bge_vpd_prodname);
1760 #endif
1761 			descbuf = malloc(BGE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
1762 			if (descbuf == NULL)
1763 				return(ENOMEM);
1764 			snprintf(descbuf, BGE_DEVDESC_MAX,
1765 			    "%s, ASIC rev. %#04x", t->bge_name,
1766 			    pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 16);
1767 			device_set_desc_copy(dev, descbuf);
1768 			if (pci_get_subvendor(dev) == DELL_VENDORID)
1769 				sc->bge_no_3_led = 1;
1770 			free(descbuf, M_TEMP);
1771 			return(0);
1772 		}
1773 		t++;
1774 	}
1775 
1776 	return(ENXIO);
1777 }
1778 
1779 static void
1780 bge_dma_free(sc)
1781 	struct bge_softc *sc;
1782 {
1783 	int i;
1784 
1785 
1786 	/* Destroy DMA maps for RX buffers */
1787 
1788 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1789 		if (sc->bge_cdata.bge_rx_std_dmamap[i])
1790 			bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1791 			    sc->bge_cdata.bge_rx_std_dmamap[i]);
1792 	}
1793 
1794 	/* Destroy DMA maps for jumbo RX buffers */
1795 
1796 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1797 		if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
1798 			bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
1799 			    sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1800 	}
1801 
1802 	/* Destroy DMA maps for TX buffers */
1803 
1804 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
1805 		if (sc->bge_cdata.bge_tx_dmamap[i])
1806 			bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1807 			    sc->bge_cdata.bge_tx_dmamap[i]);
1808 	}
1809 
1810 	if (sc->bge_cdata.bge_mtag)
1811 		bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
1812 
1813 
1814 	/* Destroy standard RX ring */
1815 
1816 	if (sc->bge_ldata.bge_rx_std_ring)
1817 		bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
1818 		    sc->bge_ldata.bge_rx_std_ring,
1819 		    sc->bge_cdata.bge_rx_std_ring_map);
1820 
1821 	if (sc->bge_cdata.bge_rx_std_ring_map) {
1822 		bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
1823 		    sc->bge_cdata.bge_rx_std_ring_map);
1824 		bus_dmamap_destroy(sc->bge_cdata.bge_rx_std_ring_tag,
1825 		    sc->bge_cdata.bge_rx_std_ring_map);
1826 	}
1827 
1828 	if (sc->bge_cdata.bge_rx_std_ring_tag)
1829 		bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
1830 
1831 	/* Destroy jumbo RX ring */
1832 
1833 	if (sc->bge_ldata.bge_rx_jumbo_ring)
1834 		bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1835 		    sc->bge_ldata.bge_rx_jumbo_ring,
1836 		    sc->bge_cdata.bge_rx_jumbo_ring_map);
1837 
1838 	if (sc->bge_cdata.bge_rx_jumbo_ring_map) {
1839 		bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1840 		    sc->bge_cdata.bge_rx_jumbo_ring_map);
1841 		bus_dmamap_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1842 		    sc->bge_cdata.bge_rx_jumbo_ring_map);
1843 	}
1844 
1845 	if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
1846 		bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
1847 
1848 	/* Destroy RX return ring */
1849 
1850 	if (sc->bge_ldata.bge_rx_return_ring)
1851 		bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
1852 		    sc->bge_ldata.bge_rx_return_ring,
1853 		    sc->bge_cdata.bge_rx_return_ring_map);
1854 
1855 	if (sc->bge_cdata.bge_rx_return_ring_map) {
1856 		bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
1857 		    sc->bge_cdata.bge_rx_return_ring_map);
1858 		bus_dmamap_destroy(sc->bge_cdata.bge_rx_return_ring_tag,
1859 		    sc->bge_cdata.bge_rx_return_ring_map);
1860 	}
1861 
1862 	if (sc->bge_cdata.bge_rx_return_ring_tag)
1863 		bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
1864 
1865 	/* Destroy TX ring */
1866 
1867 	if (sc->bge_ldata.bge_tx_ring)
1868 		bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
1869 		    sc->bge_ldata.bge_tx_ring,
1870 		    sc->bge_cdata.bge_tx_ring_map);
1871 
1872 	if (sc->bge_cdata.bge_tx_ring_map) {
1873 		bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
1874 		    sc->bge_cdata.bge_tx_ring_map);
1875 		bus_dmamap_destroy(sc->bge_cdata.bge_tx_ring_tag,
1876 		    sc->bge_cdata.bge_tx_ring_map);
1877 	}
1878 
1879 	if (sc->bge_cdata.bge_tx_ring_tag)
1880 		bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
1881 
1882 	/* Destroy status block */
1883 
1884 	if (sc->bge_ldata.bge_status_block)
1885 		bus_dmamem_free(sc->bge_cdata.bge_status_tag,
1886 		    sc->bge_ldata.bge_status_block,
1887 		    sc->bge_cdata.bge_status_map);
1888 
1889 	if (sc->bge_cdata.bge_status_map) {
1890 		bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
1891 		    sc->bge_cdata.bge_status_map);
1892 		bus_dmamap_destroy(sc->bge_cdata.bge_status_tag,
1893 		    sc->bge_cdata.bge_status_map);
1894 	}
1895 
1896 	if (sc->bge_cdata.bge_status_tag)
1897 		bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
1898 
1899 	/* Destroy statistics block */
1900 
1901 	if (sc->bge_ldata.bge_stats)
1902 		bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
1903 		    sc->bge_ldata.bge_stats,
1904 		    sc->bge_cdata.bge_stats_map);
1905 
1906 	if (sc->bge_cdata.bge_stats_map) {
1907 		bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
1908 		    sc->bge_cdata.bge_stats_map);
1909 		bus_dmamap_destroy(sc->bge_cdata.bge_stats_tag,
1910 		    sc->bge_cdata.bge_stats_map);
1911 	}
1912 
1913 	if (sc->bge_cdata.bge_stats_tag)
1914 		bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
1915 
1916 	/* Destroy the parent tag */
1917 
1918 	if (sc->bge_cdata.bge_parent_tag)
1919 		bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
1920 
1921 	return;
1922 }
1923 
1924 static int
1925 bge_dma_alloc(dev)
1926 	device_t dev;
1927 {
1928 	struct bge_softc *sc;
1929 	int nseg, i, error;
1930 	struct bge_dmamap_arg ctx;
1931 
1932 	sc = device_get_softc(dev);
1933 
1934 	/*
1935 	 * Allocate the parent bus DMA tag appropriate for PCI.
1936 	 */
1937 #define BGE_NSEG_NEW 32
1938 	error = bus_dma_tag_create(NULL,	/* parent */
1939 			PAGE_SIZE, 0,		/* alignment, boundary */
1940 			BUS_SPACE_MAXADDR,	/* lowaddr */
1941 			BUS_SPACE_MAXADDR,	/* highaddr */
1942 			NULL, NULL,		/* filter, filterarg */
1943 			MAXBSIZE, BGE_NSEG_NEW,	/* maxsize, nsegments */
1944 			BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
1945 			0,			/* flags */
1946 			NULL, NULL,		/* lockfunc, lockarg */
1947 			&sc->bge_cdata.bge_parent_tag);
1948 
1949 	/*
1950 	 * Create tag for RX mbufs.
1951 	 */
1952 	nseg = 32;
1953 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1,
1954 	    0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1955 	    NULL, MCLBYTES * nseg, nseg, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL,
1956 	    &sc->bge_cdata.bge_mtag);
1957 
1958 	if (error) {
1959 		device_printf(dev, "could not allocate dma tag\n");
1960 		return (ENOMEM);
1961 	}
1962 
1963 	/* Create DMA maps for RX buffers */
1964 
1965 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1966 		error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1967 			    &sc->bge_cdata.bge_rx_std_dmamap[i]);
1968 		if (error) {
1969 			device_printf(dev, "can't create DMA map for RX\n");
1970 			return(ENOMEM);
1971 		}
1972 	}
1973 
1974 	/* Create DMA maps for TX buffers */
1975 
1976 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
1977 		error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1978 			    &sc->bge_cdata.bge_tx_dmamap[i]);
1979 		if (error) {
1980 			device_printf(dev, "can't create DMA map for RX\n");
1981 			return(ENOMEM);
1982 		}
1983 	}
1984 
1985 	/* Create tag for standard RX ring */
1986 
1987 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1988 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1989 	    NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0,
1990 	    NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag);
1991 
1992 	if (error) {
1993 		device_printf(dev, "could not allocate dma tag\n");
1994 		return (ENOMEM);
1995 	}
1996 
1997 	/* Allocate DMA'able memory for standard RX ring */
1998 
1999 	error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag,
2000 	    (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT,
2001 	    &sc->bge_cdata.bge_rx_std_ring_map);
2002 	if (error)
2003 		return (ENOMEM);
2004 
2005 	bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
2006 
2007 	/* Load the address of the standard RX ring */
2008 
2009 	ctx.bge_maxsegs = 1;
2010 	ctx.sc = sc;
2011 
2012 	error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag,
2013 	    sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring,
2014 	    BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2015 
2016 	if (error)
2017 		return (ENOMEM);
2018 
2019 	sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr;
2020 
2021 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2022 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
2023 
2024 		/*
2025 		 * Create tag for jumbo mbufs.
2026 		 * This is really a bit of a kludge. We allocate a special
2027 		 * jumbo buffer pool which (thanks to the way our DMA
2028 		 * memory allocation works) will consist of contiguous
2029 		 * pages. This means that even though a jumbo buffer might
2030 		 * be larger than a page size, we don't really need to
2031 		 * map it into more than one DMA segment. However, the
2032 		 * default mbuf tag will result in multi-segment mappings,
2033 		 * so we have to create a special jumbo mbuf tag that
2034 		 * lets us get away with mapping the jumbo buffers as
2035 		 * a single segment. I think eventually the driver should
2036 		 * be changed so that it uses ordinary mbufs and cluster
2037 		 * buffers, i.e. jumbo frames can span multiple DMA
2038 		 * descriptors. But that's a project for another day.
2039 		 */
2040 
2041 		error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2042 		    1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2043 		    NULL, MCLBYTES * nseg, nseg, BGE_JLEN, 0, NULL, NULL,
2044 		    &sc->bge_cdata.bge_mtag_jumbo);
2045 
2046 		if (error) {
2047 			device_printf(dev, "could not allocate dma tag\n");
2048 			return (ENOMEM);
2049 		}
2050 
2051 		/* Create tag for jumbo RX ring */
2052 
2053 		error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2054 		    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2055 		    NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0,
2056 		    NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag);
2057 
2058 		if (error) {
2059 			device_printf(dev, "could not allocate dma tag\n");
2060 			return (ENOMEM);
2061 		}
2062 
2063 		/* Allocate DMA'able memory for jumbo RX ring */
2064 
2065 		error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2066 		    (void **)&sc->bge_ldata.bge_rx_jumbo_ring, BUS_DMA_NOWAIT,
2067 		    &sc->bge_cdata.bge_rx_jumbo_ring_map);
2068 		if (error)
2069 			return (ENOMEM);
2070 
2071 		bzero((char *)sc->bge_ldata.bge_rx_jumbo_ring,
2072 		    BGE_JUMBO_RX_RING_SZ);
2073 
2074 		/* Load the address of the jumbo RX ring */
2075 
2076 		ctx.bge_maxsegs = 1;
2077 		ctx.sc = sc;
2078 
2079 		error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2080 		    sc->bge_cdata.bge_rx_jumbo_ring_map,
2081 		    sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ,
2082 		    bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2083 
2084 		if (error)
2085 			return (ENOMEM);
2086 
2087 		sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr;
2088 
2089 		/* Create DMA maps for jumbo RX buffers */
2090 
2091 		for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2092 			error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
2093 				    0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2094 			if (error) {
2095 				device_printf(dev,
2096 				    "can't create DMA map for RX\n");
2097 				return(ENOMEM);
2098 			}
2099 		}
2100 
2101 	}
2102 
2103 	/* Create tag for RX return ring */
2104 
2105 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2106 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2107 	    NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0,
2108 	    NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag);
2109 
2110 	if (error) {
2111 		device_printf(dev, "could not allocate dma tag\n");
2112 		return (ENOMEM);
2113 	}
2114 
2115 	/* Allocate DMA'able memory for RX return ring */
2116 
2117 	error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag,
2118 	    (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT,
2119 	    &sc->bge_cdata.bge_rx_return_ring_map);
2120 	if (error)
2121 		return (ENOMEM);
2122 
2123 	bzero((char *)sc->bge_ldata.bge_rx_return_ring,
2124 	    BGE_RX_RTN_RING_SZ(sc));
2125 
2126 	/* Load the address of the RX return ring */
2127 
2128 	ctx.bge_maxsegs = 1;
2129 	ctx.sc = sc;
2130 
2131 	error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag,
2132 	    sc->bge_cdata.bge_rx_return_ring_map,
2133 	    sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc),
2134 	    bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2135 
2136 	if (error)
2137 		return (ENOMEM);
2138 
2139 	sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr;
2140 
2141 	/* Create tag for TX ring */
2142 
2143 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2144 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2145 	    NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL,
2146 	    &sc->bge_cdata.bge_tx_ring_tag);
2147 
2148 	if (error) {
2149 		device_printf(dev, "could not allocate dma tag\n");
2150 		return (ENOMEM);
2151 	}
2152 
2153 	/* Allocate DMA'able memory for TX ring */
2154 
2155 	error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag,
2156 	    (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT,
2157 	    &sc->bge_cdata.bge_tx_ring_map);
2158 	if (error)
2159 		return (ENOMEM);
2160 
2161 	bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
2162 
2163 	/* Load the address of the TX ring */
2164 
2165 	ctx.bge_maxsegs = 1;
2166 	ctx.sc = sc;
2167 
2168 	error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag,
2169 	    sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring,
2170 	    BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2171 
2172 	if (error)
2173 		return (ENOMEM);
2174 
2175 	sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr;
2176 
2177 	/* Create tag for status block */
2178 
2179 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2180 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2181 	    NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0,
2182 	    NULL, NULL, &sc->bge_cdata.bge_status_tag);
2183 
2184 	if (error) {
2185 		device_printf(dev, "could not allocate dma tag\n");
2186 		return (ENOMEM);
2187 	}
2188 
2189 	/* Allocate DMA'able memory for status block */
2190 
2191 	error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag,
2192 	    (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT,
2193 	    &sc->bge_cdata.bge_status_map);
2194 	if (error)
2195 		return (ENOMEM);
2196 
2197 	bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
2198 
2199 	/* Load the address of the status block */
2200 
2201 	ctx.sc = sc;
2202 	ctx.bge_maxsegs = 1;
2203 
2204 	error = bus_dmamap_load(sc->bge_cdata.bge_status_tag,
2205 	    sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block,
2206 	    BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2207 
2208 	if (error)
2209 		return (ENOMEM);
2210 
2211 	sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr;
2212 
2213 	/* Create tag for statistics block */
2214 
2215 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2216 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2217 	    NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL,
2218 	    &sc->bge_cdata.bge_stats_tag);
2219 
2220 	if (error) {
2221 		device_printf(dev, "could not allocate dma tag\n");
2222 		return (ENOMEM);
2223 	}
2224 
2225 	/* Allocate DMA'able memory for statistics block */
2226 
2227 	error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag,
2228 	    (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT,
2229 	    &sc->bge_cdata.bge_stats_map);
2230 	if (error)
2231 		return (ENOMEM);
2232 
2233 	bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ);
2234 
2235 	/* Load the address of the statstics block */
2236 
2237 	ctx.sc = sc;
2238 	ctx.bge_maxsegs = 1;
2239 
2240 	error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag,
2241 	    sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats,
2242 	    BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2243 
2244 	if (error)
2245 		return (ENOMEM);
2246 
2247 	sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr;
2248 
2249 	return(0);
2250 }
2251 
2252 static int
2253 bge_attach(dev)
2254 	device_t dev;
2255 {
2256 	struct ifnet *ifp;
2257 	struct bge_softc *sc;
2258 	u_int32_t hwcfg = 0;
2259 	u_int32_t mac_tmp = 0;
2260 	u_char eaddr[6];
2261 	int unit, error = 0, rid;
2262 
2263 	sc = device_get_softc(dev);
2264 	unit = device_get_unit(dev);
2265 	sc->bge_dev = dev;
2266 	sc->bge_unit = unit;
2267 
2268 	/*
2269 	 * Map control/status registers.
2270 	 */
2271 	pci_enable_busmaster(dev);
2272 
2273 	rid = BGE_PCI_BAR0;
2274 	sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2275 	    RF_ACTIVE|PCI_RF_DENSE);
2276 
2277 	if (sc->bge_res == NULL) {
2278 		printf ("bge%d: couldn't map memory\n", unit);
2279 		error = ENXIO;
2280 		goto fail;
2281 	}
2282 
2283 	sc->bge_btag = rman_get_bustag(sc->bge_res);
2284 	sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
2285 	sc->bge_vhandle = (vm_offset_t)rman_get_virtual(sc->bge_res);
2286 
2287 	/* Allocate interrupt */
2288 	rid = 0;
2289 
2290 	sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2291 	    RF_SHAREABLE | RF_ACTIVE);
2292 
2293 	if (sc->bge_irq == NULL) {
2294 		printf("bge%d: couldn't map interrupt\n", unit);
2295 		error = ENXIO;
2296 		goto fail;
2297 	}
2298 
2299 	sc->bge_unit = unit;
2300 
2301 	BGE_LOCK_INIT(sc, device_get_nameunit(dev));
2302 
2303 	/* Save ASIC rev. */
2304 
2305 	sc->bge_chipid =
2306 	    pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
2307 	    BGE_PCIMISCCTL_ASICREV;
2308 	sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2309 	sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2310 
2311 	/*
2312 	 * Treat the 5714 like the 5750 until we have more info
2313 	 * on this chip.
2314 	 */
2315 	if (sc->bge_asicrev == BGE_ASICREV_BCM5714)
2316 		sc->bge_asicrev = BGE_ASICREV_BCM5750;
2317 
2318 	/*
2319 	 * XXX: Broadcom Linux driver.  Not in specs or eratta.
2320 	 * PCI-Express?
2321 	 */
2322 	if (sc->bge_asicrev == BGE_ASICREV_BCM5750) {
2323 		u_int32_t v;
2324 
2325 		v = pci_read_config(dev, BGE_PCI_MSI_CAPID, 4);
2326 		if (((v >> 8) & 0xff) == BGE_PCIE_CAPID_REG) {
2327 			v = pci_read_config(dev, BGE_PCIE_CAPID_REG, 4);
2328 			if ((v & 0xff) == BGE_PCIE_CAPID)
2329 				sc->bge_pcie = 1;
2330 		}
2331 	}
2332 
2333 	/* Try to reset the chip. */
2334 	bge_reset(sc);
2335 
2336 	if (bge_chipinit(sc)) {
2337 		printf("bge%d: chip initialization failed\n", sc->bge_unit);
2338 		bge_release_resources(sc);
2339 		error = ENXIO;
2340 		goto fail;
2341 	}
2342 
2343 	/*
2344 	 * Get station address from the EEPROM.
2345 	 */
2346 	mac_tmp = bge_readmem_ind(sc, 0x0c14);
2347 	if ((mac_tmp >> 16) == 0x484b) {
2348 		eaddr[0] = (u_char)(mac_tmp >> 8);
2349 		eaddr[1] = (u_char)mac_tmp;
2350 		mac_tmp = bge_readmem_ind(sc, 0x0c18);
2351 		eaddr[2] = (u_char)(mac_tmp >> 24);
2352 		eaddr[3] = (u_char)(mac_tmp >> 16);
2353 		eaddr[4] = (u_char)(mac_tmp >> 8);
2354 		eaddr[5] = (u_char)mac_tmp;
2355 	} else if (bge_read_eeprom(sc, eaddr,
2356 	    BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
2357 		printf("bge%d: failed to read station address\n", unit);
2358 		bge_release_resources(sc);
2359 		error = ENXIO;
2360 		goto fail;
2361 	}
2362 
2363 	/* 5705 limits RX return ring to 512 entries. */
2364 	if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
2365 	    sc->bge_asicrev == BGE_ASICREV_BCM5750)
2366 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2367 	else
2368 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2369 
2370 	if (bge_dma_alloc(dev)) {
2371 		printf ("bge%d: failed to allocate DMA resources\n",
2372 		    sc->bge_unit);
2373 		bge_release_resources(sc);
2374 		error = ENXIO;
2375 		goto fail;
2376 	}
2377 
2378 	/*
2379 	 * Try to allocate memory for jumbo buffers.
2380 	 * The 5705 does not appear to support jumbo frames.
2381 	 */
2382 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2383 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
2384 		if (bge_alloc_jumbo_mem(sc)) {
2385 			printf("bge%d: jumbo buffer allocation "
2386 			    "failed\n", sc->bge_unit);
2387 			bge_release_resources(sc);
2388 			error = ENXIO;
2389 			goto fail;
2390 		}
2391 	}
2392 
2393 	/* Set default tuneable values. */
2394 	sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2395 	sc->bge_rx_coal_ticks = 150;
2396 	sc->bge_tx_coal_ticks = 150;
2397 	sc->bge_rx_max_coal_bds = 64;
2398 	sc->bge_tx_max_coal_bds = 128;
2399 
2400 	/* Set up ifnet structure */
2401 	ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
2402 	if (ifp == NULL) {
2403 		printf("bge%d: failed to if_alloc()\n", sc->bge_unit);
2404 		bge_release_resources(sc);
2405 		error = ENXIO;
2406 		goto fail;
2407 	}
2408 	ifp->if_softc = sc;
2409 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2410 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2411 	ifp->if_ioctl = bge_ioctl;
2412 	ifp->if_start = bge_start;
2413 	ifp->if_watchdog = bge_watchdog;
2414 	ifp->if_init = bge_init;
2415 	ifp->if_mtu = ETHERMTU;
2416 	ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
2417 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
2418 	IFQ_SET_READY(&ifp->if_snd);
2419 	ifp->if_hwassist = BGE_CSUM_FEATURES;
2420 	/* NB: the code for RX csum offload is disabled for now */
2421 	ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_HWTAGGING |
2422 	    IFCAP_VLAN_MTU;
2423 	ifp->if_capenable = ifp->if_capabilities;
2424 
2425 	/*
2426 	 * Figure out what sort of media we have by checking the
2427 	 * hardware config word in the first 32k of NIC internal memory,
2428 	 * or fall back to examining the EEPROM if necessary.
2429 	 * Note: on some BCM5700 cards, this value appears to be unset.
2430 	 * If that's the case, we have to rely on identifying the NIC
2431 	 * by its PCI subsystem ID, as we do below for the SysKonnect
2432 	 * SK-9D41.
2433 	 */
2434 	if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
2435 		hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2436 	else {
2437 		bge_read_eeprom(sc, (caddr_t)&hwcfg,
2438 				BGE_EE_HWCFG_OFFSET, sizeof(hwcfg));
2439 		hwcfg = ntohl(hwcfg);
2440 	}
2441 
2442 	if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
2443 		sc->bge_tbi = 1;
2444 
2445 	/* The SysKonnect SK-9D41 is a 1000baseSX card. */
2446 	if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41)
2447 		sc->bge_tbi = 1;
2448 
2449 	if (sc->bge_tbi) {
2450 		ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
2451 		    bge_ifmedia_upd, bge_ifmedia_sts);
2452 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2453 		ifmedia_add(&sc->bge_ifmedia,
2454 		    IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
2455 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2456 		ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
2457 		sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
2458 	} else {
2459 		/*
2460 		 * Do transceiver setup.
2461 		 */
2462 		if (mii_phy_probe(dev, &sc->bge_miibus,
2463 		    bge_ifmedia_upd, bge_ifmedia_sts)) {
2464 			printf("bge%d: MII without any PHY!\n", sc->bge_unit);
2465 			bge_release_resources(sc);
2466 			bge_free_jumbo_mem(sc);
2467 			error = ENXIO;
2468 			goto fail;
2469 		}
2470 	}
2471 
2472 	/*
2473 	 * When using the BCM5701 in PCI-X mode, data corruption has
2474 	 * been observed in the first few bytes of some received packets.
2475 	 * Aligning the packet buffer in memory eliminates the corruption.
2476 	 * Unfortunately, this misaligns the packet payloads.  On platforms
2477 	 * which do not support unaligned accesses, we will realign the
2478 	 * payloads by copying the received packets.
2479 	 */
2480 	switch (sc->bge_chipid) {
2481 	case BGE_CHIPID_BCM5701_A0:
2482 	case BGE_CHIPID_BCM5701_B0:
2483 	case BGE_CHIPID_BCM5701_B2:
2484 	case BGE_CHIPID_BCM5701_B5:
2485 		/* If in PCI-X mode, work around the alignment bug. */
2486 		if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
2487 		    (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) ==
2488 		    BGE_PCISTATE_PCI_BUSSPEED)
2489 			sc->bge_rx_alignment_bug = 1;
2490 		break;
2491 	}
2492 
2493 	/*
2494 	 * Call MI attach routine.
2495 	 */
2496 	ether_ifattach(ifp, eaddr);
2497 	callout_init(&sc->bge_stat_ch, CALLOUT_MPSAFE);
2498 
2499 	/*
2500 	 * Hookup IRQ last.
2501 	 */
2502 	error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
2503 	   bge_intr, sc, &sc->bge_intrhand);
2504 
2505 	if (error) {
2506 		bge_detach(dev);
2507 		printf("bge%d: couldn't set up irq\n", unit);
2508 	}
2509 
2510 fail:
2511 	return(error);
2512 }
2513 
2514 static int
2515 bge_detach(dev)
2516 	device_t dev;
2517 {
2518 	struct bge_softc *sc;
2519 	struct ifnet *ifp;
2520 
2521 	sc = device_get_softc(dev);
2522 	ifp = sc->bge_ifp;
2523 
2524 	BGE_LOCK(sc);
2525 	bge_stop(sc);
2526 	bge_reset(sc);
2527 	BGE_UNLOCK(sc);
2528 
2529 	ether_ifdetach(ifp);
2530 
2531 	if (sc->bge_tbi) {
2532 		ifmedia_removeall(&sc->bge_ifmedia);
2533 	} else {
2534 		bus_generic_detach(dev);
2535 		device_delete_child(dev, sc->bge_miibus);
2536 	}
2537 
2538 	bge_release_resources(sc);
2539 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2540 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
2541 		bge_free_jumbo_mem(sc);
2542 
2543 	return(0);
2544 }
2545 
2546 static void
2547 bge_release_resources(sc)
2548 	struct bge_softc *sc;
2549 {
2550 	device_t dev;
2551 
2552 	dev = sc->bge_dev;
2553 
2554 	if (sc->bge_vpd_prodname != NULL)
2555 		free(sc->bge_vpd_prodname, M_DEVBUF);
2556 
2557 	if (sc->bge_vpd_readonly != NULL)
2558 		free(sc->bge_vpd_readonly, M_DEVBUF);
2559 
2560 	if (sc->bge_intrhand != NULL)
2561 		bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
2562 
2563 	if (sc->bge_irq != NULL)
2564 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
2565 
2566 	if (sc->bge_res != NULL)
2567 		bus_release_resource(dev, SYS_RES_MEMORY,
2568 		    BGE_PCI_BAR0, sc->bge_res);
2569 
2570 	if (sc->bge_ifp != NULL)
2571 		if_free(sc->bge_ifp);
2572 
2573 	bge_dma_free(sc);
2574 
2575 	if (mtx_initialized(&sc->bge_mtx))	/* XXX */
2576 		BGE_LOCK_DESTROY(sc);
2577 
2578 	return;
2579 }
2580 
2581 static void
2582 bge_reset(sc)
2583 	struct bge_softc *sc;
2584 {
2585 	device_t dev;
2586 	u_int32_t cachesize, command, pcistate, reset;
2587 	int i, val = 0;
2588 
2589 	dev = sc->bge_dev;
2590 
2591 	/* Save some important PCI state. */
2592 	cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2593 	command = pci_read_config(dev, BGE_PCI_CMD, 4);
2594 	pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2595 
2596 	pci_write_config(dev, BGE_PCI_MISC_CTL,
2597 	    BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2598 	    BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2599 
2600 	reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2601 
2602 	/* XXX: Broadcom Linux driver. */
2603 	if (sc->bge_pcie) {
2604 		if (CSR_READ_4(sc, 0x7e2c) == 0x60)	/* PCIE 1.0 */
2605 			CSR_WRITE_4(sc, 0x7e2c, 0x20);
2606 		if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2607 			/* Prevent PCIE link training during global reset */
2608 			CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2609 			reset |= (1<<29);
2610 		}
2611 	}
2612 
2613 	/* Issue global reset */
2614 	bge_writereg_ind(sc, BGE_MISC_CFG, reset);
2615 
2616 	DELAY(1000);
2617 
2618 	/* XXX: Broadcom Linux driver. */
2619 	if (sc->bge_pcie) {
2620 		if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
2621 			uint32_t v;
2622 
2623 			DELAY(500000); /* wait for link training to complete */
2624 			v = pci_read_config(dev, 0xc4, 4);
2625 			pci_write_config(dev, 0xc4, v | (1<<15), 4);
2626 		}
2627 		/* Set PCIE max payload size and clear error status. */
2628 		pci_write_config(dev, 0xd8, 0xf5000, 4);
2629 	}
2630 
2631 	/* Reset some of the PCI state that got zapped by reset */
2632 	pci_write_config(dev, BGE_PCI_MISC_CTL,
2633 	    BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2634 	    BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2635 	pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2636 	pci_write_config(dev, BGE_PCI_CMD, command, 4);
2637 	bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
2638 
2639 	/* Enable memory arbiter. */
2640 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2641 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
2642 		CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2643 
2644 	/*
2645 	 * Prevent PXE restart: write a magic number to the
2646 	 * general communications memory at 0xB50.
2647 	 */
2648 	bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2649 	/*
2650 	 * Poll the value location we just wrote until
2651 	 * we see the 1's complement of the magic number.
2652 	 * This indicates that the firmware initialization
2653 	 * is complete.
2654 	 */
2655 	for (i = 0; i < BGE_TIMEOUT; i++) {
2656 		val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2657 		if (val == ~BGE_MAGIC_NUMBER)
2658 			break;
2659 		DELAY(10);
2660 	}
2661 
2662 	if (i == BGE_TIMEOUT) {
2663 		printf("bge%d: firmware handshake timed out\n", sc->bge_unit);
2664 		return;
2665 	}
2666 
2667 	/*
2668 	 * XXX Wait for the value of the PCISTATE register to
2669 	 * return to its original pre-reset state. This is a
2670 	 * fairly good indicator of reset completion. If we don't
2671 	 * wait for the reset to fully complete, trying to read
2672 	 * from the device's non-PCI registers may yield garbage
2673 	 * results.
2674 	 */
2675 	for (i = 0; i < BGE_TIMEOUT; i++) {
2676 		if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
2677 			break;
2678 		DELAY(10);
2679 	}
2680 
2681 	/* Fix up byte swapping */
2682 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_BYTESWAP_NONFRAME|
2683 	    BGE_MODECTL_BYTESWAP_DATA);
2684 
2685 	CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2686 
2687 	/*
2688 	 * The 5704 in TBI mode apparently needs some special
2689 	 * adjustment to insure the SERDES drive level is set
2690 	 * to 1.2V.
2691 	 */
2692 	if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && sc->bge_tbi) {
2693 		uint32_t serdescfg;
2694 		serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2695 		serdescfg = (serdescfg & ~0xFFF) | 0x880;
2696 		CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2697 	}
2698 
2699 	/* XXX: Broadcom Linux driver. */
2700 	if (sc->bge_pcie && sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2701 		uint32_t v;
2702 
2703 		v = CSR_READ_4(sc, 0x7c00);
2704 		CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
2705 	}
2706 	DELAY(10000);
2707 
2708 	return;
2709 }
2710 
2711 /*
2712  * Frame reception handling. This is called if there's a frame
2713  * on the receive return list.
2714  *
2715  * Note: we have to be able to handle two possibilities here:
2716  * 1) the frame is from the jumbo recieve ring
2717  * 2) the frame is from the standard receive ring
2718  */
2719 
2720 static void
2721 bge_rxeof(sc)
2722 	struct bge_softc *sc;
2723 {
2724 	struct ifnet *ifp;
2725 	int stdcnt = 0, jumbocnt = 0;
2726 
2727 	BGE_LOCK_ASSERT(sc);
2728 
2729 	ifp = sc->bge_ifp;
2730 
2731 	bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
2732 	    sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTWRITE);
2733 	bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2734 	    sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD);
2735 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2736 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
2737 		bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2738 		    sc->bge_cdata.bge_rx_jumbo_ring_map,
2739 		    BUS_DMASYNC_POSTREAD);
2740 	}
2741 
2742 	while(sc->bge_rx_saved_considx !=
2743 	    sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) {
2744 		struct bge_rx_bd	*cur_rx;
2745 		u_int32_t		rxidx;
2746 		struct ether_header	*eh;
2747 		struct mbuf		*m = NULL;
2748 		u_int16_t		vlan_tag = 0;
2749 		int			have_tag = 0;
2750 
2751 		cur_rx =
2752 	    &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
2753 
2754 		rxidx = cur_rx->bge_idx;
2755 		BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
2756 
2757 		if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2758 			have_tag = 1;
2759 			vlan_tag = cur_rx->bge_vlan_tag;
2760 		}
2761 
2762 		if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2763 			BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2764 			bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
2765 			    sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx],
2766 			    BUS_DMASYNC_POSTREAD);
2767 			bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
2768 			    sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx]);
2769 			m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
2770 			sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
2771 			jumbocnt++;
2772 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2773 				ifp->if_ierrors++;
2774 				bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2775 				continue;
2776 			}
2777 			if (bge_newbuf_jumbo(sc,
2778 			    sc->bge_jumbo, NULL) == ENOBUFS) {
2779 				ifp->if_ierrors++;
2780 				bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2781 				continue;
2782 			}
2783 		} else {
2784 			BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2785 			bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2786 			    sc->bge_cdata.bge_rx_std_dmamap[rxidx],
2787 			    BUS_DMASYNC_POSTREAD);
2788 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2789 			    sc->bge_cdata.bge_rx_std_dmamap[rxidx]);
2790 			m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2791 			sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2792 			stdcnt++;
2793 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2794 				ifp->if_ierrors++;
2795 				bge_newbuf_std(sc, sc->bge_std, m);
2796 				continue;
2797 			}
2798 			if (bge_newbuf_std(sc, sc->bge_std,
2799 			    NULL) == ENOBUFS) {
2800 				ifp->if_ierrors++;
2801 				bge_newbuf_std(sc, sc->bge_std, m);
2802 				continue;
2803 			}
2804 		}
2805 
2806 		ifp->if_ipackets++;
2807 #ifndef __i386__
2808 		/*
2809 		 * The i386 allows unaligned accesses, but for other
2810 		 * platforms we must make sure the payload is aligned.
2811 		 */
2812 		if (sc->bge_rx_alignment_bug) {
2813 			bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2814 			    cur_rx->bge_len);
2815 			m->m_data += ETHER_ALIGN;
2816 		}
2817 #endif
2818 		eh = mtod(m, struct ether_header *);
2819 		m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2820 		m->m_pkthdr.rcvif = ifp;
2821 
2822 #if 0 /* currently broken for some packets, possibly related to TCP options */
2823 		if (ifp->if_capenable & IFCAP_RXCSUM) {
2824 			m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2825 			if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2826 				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2827 			if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
2828 				m->m_pkthdr.csum_data =
2829 				    cur_rx->bge_tcp_udp_csum;
2830 				m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
2831 			}
2832 		}
2833 #endif
2834 
2835 		/*
2836 		 * If we received a packet with a vlan tag,
2837 		 * attach that information to the packet.
2838 		 */
2839 		if (have_tag)
2840 			VLAN_INPUT_TAG(ifp, m, vlan_tag, continue);
2841 
2842 		BGE_UNLOCK(sc);
2843 		(*ifp->if_input)(ifp, m);
2844 		BGE_LOCK(sc);
2845 	}
2846 
2847 	bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
2848 	    sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREWRITE);
2849 	bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2850 	    sc->bge_cdata.bge_rx_std_ring_map,
2851 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_PREWRITE);
2852 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2853 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
2854 		bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2855 		    sc->bge_cdata.bge_rx_jumbo_ring_map,
2856 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2857 	}
2858 
2859 	CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2860 	if (stdcnt)
2861 		CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2862 	if (jumbocnt)
2863 		CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2864 
2865 	return;
2866 }
2867 
2868 static void
2869 bge_txeof(sc)
2870 	struct bge_softc *sc;
2871 {
2872 	struct bge_tx_bd *cur_tx = NULL;
2873 	struct ifnet *ifp;
2874 
2875 	BGE_LOCK_ASSERT(sc);
2876 
2877 	ifp = sc->bge_ifp;
2878 
2879 	/*
2880 	 * Go through our tx ring and free mbufs for those
2881 	 * frames that have been sent.
2882 	 */
2883 	while (sc->bge_tx_saved_considx !=
2884 	    sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) {
2885 		u_int32_t		idx = 0;
2886 
2887 		idx = sc->bge_tx_saved_considx;
2888 		cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
2889 		if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2890 			ifp->if_opackets++;
2891 		if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
2892 			m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2893 			sc->bge_cdata.bge_tx_chain[idx] = NULL;
2894 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2895 			    sc->bge_cdata.bge_tx_dmamap[idx]);
2896 		}
2897 		sc->bge_txcnt--;
2898 		BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2899 		ifp->if_timer = 0;
2900 	}
2901 
2902 	if (cur_tx != NULL)
2903 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2904 
2905 	return;
2906 }
2907 
2908 static void
2909 bge_intr(xsc)
2910 	void *xsc;
2911 {
2912 	struct bge_softc *sc;
2913 	struct ifnet *ifp;
2914 	u_int32_t statusword;
2915 	u_int32_t status, mimode;
2916 
2917 	sc = xsc;
2918 	ifp = sc->bge_ifp;
2919 
2920 	BGE_LOCK(sc);
2921 
2922 	bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2923 	    sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTWRITE);
2924 
2925 	statusword =
2926 	    atomic_readandclear_32(&sc->bge_ldata.bge_status_block->bge_status);
2927 
2928 #ifdef notdef
2929 	/* Avoid this for now -- checking this register is expensive. */
2930 	/* Make sure this is really our interrupt. */
2931 	if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE))
2932 		return;
2933 #endif
2934 	/* Ack interrupt and stop others from occuring. */
2935 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2936 
2937 	/*
2938 	 * Process link state changes.
2939 	 * Grrr. The link status word in the status block does
2940 	 * not work correctly on the BCM5700 rev AX and BX chips,
2941 	 * according to all available information. Hence, we have
2942 	 * to enable MII interrupts in order to properly obtain
2943 	 * async link changes. Unfortunately, this also means that
2944 	 * we have to read the MAC status register to detect link
2945 	 * changes, thereby adding an additional register access to
2946 	 * the interrupt handler.
2947 	 */
2948 
2949 	if (sc->bge_asicrev == BGE_ASICREV_BCM5700) {
2950 
2951 		status = CSR_READ_4(sc, BGE_MAC_STS);
2952 		if (status & BGE_MACSTAT_MI_INTERRUPT) {
2953 			sc->bge_link = 0;
2954 			callout_stop(&sc->bge_stat_ch);
2955 			bge_tick_locked(sc);
2956 			/* Clear the interrupt */
2957 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2958 			    BGE_EVTENB_MI_INTERRUPT);
2959 			bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
2960 			bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
2961 			    BRGPHY_INTRS);
2962 		}
2963 	} else {
2964 		if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED) {
2965 			/*
2966 			 * Sometimes PCS encoding errors are detected in
2967 			 * TBI mode (on fiber NICs), and for some reason
2968 			 * the chip will signal them as link changes.
2969 			 * If we get a link change event, but the 'PCS
2970 			 * encoding error' bit in the MAC status register
2971 			 * is set, don't bother doing a link check.
2972 			 * This avoids spurious "gigabit link up" messages
2973 			 * that sometimes appear on fiber NICs during
2974 			 * periods of heavy traffic. (There should be no
2975 			 * effect on copper NICs.)
2976 			 *
2977 			 * If we do have a copper NIC (bge_tbi == 0) then
2978 			 * check that the AUTOPOLL bit is set before
2979 			 * processing the event as a real link change.
2980 			 * Turning AUTOPOLL on and off in the MII read/write
2981 			 * functions will often trigger a link status
2982 			 * interrupt for no reason.
2983 			 */
2984 			status = CSR_READ_4(sc, BGE_MAC_STS);
2985 			mimode = CSR_READ_4(sc, BGE_MI_MODE);
2986 			if (!(status & (BGE_MACSTAT_PORT_DECODE_ERROR|
2987 			    BGE_MACSTAT_MI_COMPLETE)) && (!sc->bge_tbi &&
2988 			    (mimode & BGE_MIMODE_AUTOPOLL))) {
2989 				sc->bge_link = 0;
2990 				callout_stop(&sc->bge_stat_ch);
2991 				bge_tick_locked(sc);
2992 			}
2993 			/* Clear the interrupt */
2994 			CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
2995 			    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
2996 			    BGE_MACSTAT_LINK_CHANGED);
2997 
2998 			/* Force flush the status block cached by PCI bridge */
2999 			CSR_READ_4(sc, BGE_MBX_IRQ0_LO);
3000 		}
3001 	}
3002 
3003 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3004 		/* Check RX return ring producer/consumer */
3005 		bge_rxeof(sc);
3006 
3007 		/* Check TX ring producer/consumer */
3008 		bge_txeof(sc);
3009 	}
3010 
3011 	bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3012 	    sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREWRITE);
3013 
3014 	bge_handle_events(sc);
3015 
3016 	/* Re-enable interrupts. */
3017 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3018 
3019 	if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3020 	    !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3021 		bge_start_locked(ifp);
3022 
3023 	BGE_UNLOCK(sc);
3024 
3025 	return;
3026 }
3027 
3028 static void
3029 bge_tick_locked(sc)
3030 	struct bge_softc *sc;
3031 {
3032 	struct mii_data *mii = NULL;
3033 	struct ifmedia *ifm = NULL;
3034 	struct ifnet *ifp;
3035 
3036 	ifp = sc->bge_ifp;
3037 
3038 	BGE_LOCK_ASSERT(sc);
3039 
3040 	if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
3041 	    sc->bge_asicrev == BGE_ASICREV_BCM5750)
3042 		bge_stats_update_regs(sc);
3043 	else
3044 		bge_stats_update(sc);
3045 	callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
3046 	if (sc->bge_link)
3047 		return;
3048 
3049 	if (sc->bge_tbi) {
3050 		ifm = &sc->bge_ifmedia;
3051 		if (CSR_READ_4(sc, BGE_MAC_STS) &
3052 		    BGE_MACSTAT_TBI_PCS_SYNCHED) {
3053 			sc->bge_link++;
3054 			if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
3055 				BGE_CLRBIT(sc, BGE_MAC_MODE,
3056 				    BGE_MACMODE_TBI_SEND_CFGS);
3057 			CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
3058 			if (bootverbose)
3059 				printf("bge%d: gigabit link up\n",
3060 				    sc->bge_unit);
3061 			if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3062 				bge_start_locked(ifp);
3063 		}
3064 		return;
3065 	}
3066 
3067 	mii = device_get_softc(sc->bge_miibus);
3068 	mii_tick(mii);
3069 
3070 	if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE &&
3071 	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3072 		sc->bge_link++;
3073 		if ((IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
3074 		    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) &&
3075 		    bootverbose)
3076 			printf("bge%d: gigabit link up\n", sc->bge_unit);
3077 		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3078 			bge_start_locked(ifp);
3079 	}
3080 
3081 	return;
3082 }
3083 
3084 static void
3085 bge_tick(xsc)
3086 	void *xsc;
3087 {
3088 	struct bge_softc *sc;
3089 
3090 	sc = xsc;
3091 
3092 	BGE_LOCK(sc);
3093 	bge_tick_locked(sc);
3094 	BGE_UNLOCK(sc);
3095 }
3096 
3097 static void
3098 bge_stats_update_regs(sc)
3099 	struct bge_softc *sc;
3100 {
3101 	struct ifnet *ifp;
3102 	struct bge_mac_stats_regs stats;
3103 	u_int32_t *s;
3104 	int i;
3105 
3106 	ifp = sc->bge_ifp;
3107 
3108 	s = (u_int32_t *)&stats;
3109 	for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
3110 		*s = CSR_READ_4(sc, BGE_RX_STATS + i);
3111 		s++;
3112 	}
3113 
3114 	ifp->if_collisions +=
3115 	   (stats.dot3StatsSingleCollisionFrames +
3116 	   stats.dot3StatsMultipleCollisionFrames +
3117 	   stats.dot3StatsExcessiveCollisions +
3118 	   stats.dot3StatsLateCollisions) -
3119 	   ifp->if_collisions;
3120 
3121 	return;
3122 }
3123 
3124 static void
3125 bge_stats_update(sc)
3126 	struct bge_softc *sc;
3127 {
3128 	struct ifnet *ifp;
3129 	struct bge_stats *stats;
3130 
3131 	ifp = sc->bge_ifp;
3132 
3133 	stats = (struct bge_stats *)(sc->bge_vhandle +
3134 	    BGE_MEMWIN_START + BGE_STATS_BLOCK);
3135 
3136 	ifp->if_collisions +=
3137 	   (stats->txstats.dot3StatsSingleCollisionFrames.bge_addr_lo +
3138 	   stats->txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo +
3139 	   stats->txstats.dot3StatsExcessiveCollisions.bge_addr_lo +
3140 	   stats->txstats.dot3StatsLateCollisions.bge_addr_lo) -
3141 	   ifp->if_collisions;
3142 
3143 #ifdef notdef
3144 	ifp->if_collisions +=
3145 	   (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
3146 	   sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
3147 	   sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
3148 	   sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
3149 	   ifp->if_collisions;
3150 #endif
3151 
3152 	return;
3153 }
3154 
3155 /*
3156  * Encapsulate an mbuf chain in the tx ring  by coupling the mbuf data
3157  * pointers to descriptors.
3158  */
3159 static int
3160 bge_encap(sc, m_head, txidx)
3161 	struct bge_softc *sc;
3162 	struct mbuf *m_head;
3163 	u_int32_t *txidx;
3164 {
3165 	struct bge_tx_bd	*f = NULL;
3166 	u_int16_t		csum_flags = 0;
3167 	struct m_tag		*mtag;
3168 	struct bge_dmamap_arg	ctx;
3169 	bus_dmamap_t		map;
3170 	int			error;
3171 
3172 
3173 	if (m_head->m_pkthdr.csum_flags) {
3174 		if (m_head->m_pkthdr.csum_flags & CSUM_IP)
3175 			csum_flags |= BGE_TXBDFLAG_IP_CSUM;
3176 		if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
3177 			csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
3178 		if (m_head->m_flags & M_LASTFRAG)
3179 			csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
3180 		else if (m_head->m_flags & M_FRAG)
3181 			csum_flags |= BGE_TXBDFLAG_IP_FRAG;
3182 	}
3183 
3184 	mtag = VLAN_OUTPUT_TAG(sc->bge_ifp, m_head);
3185 
3186 	ctx.sc = sc;
3187 	ctx.bge_idx = *txidx;
3188 	ctx.bge_ring = sc->bge_ldata.bge_tx_ring;
3189 	ctx.bge_flags = csum_flags;
3190 	/*
3191 	 * Sanity check: avoid coming within 16 descriptors
3192 	 * of the end of the ring.
3193 	 */
3194 	ctx.bge_maxsegs = (BGE_TX_RING_CNT - sc->bge_txcnt) - 16;
3195 
3196 	map = sc->bge_cdata.bge_tx_dmamap[*txidx];
3197 	error = bus_dmamap_load_mbuf(sc->bge_cdata.bge_mtag, map,
3198 	    m_head, bge_dma_map_tx_desc, &ctx, BUS_DMA_NOWAIT);
3199 
3200 	if (error || ctx.bge_maxsegs == 0 /*||
3201 	    ctx.bge_idx == sc->bge_tx_saved_considx*/)
3202 		return (ENOBUFS);
3203 
3204 	/*
3205 	 * Insure that the map for this transmission
3206 	 * is placed at the array index of the last descriptor
3207 	 * in this chain.
3208 	 */
3209 	sc->bge_cdata.bge_tx_dmamap[*txidx] =
3210 	    sc->bge_cdata.bge_tx_dmamap[ctx.bge_idx];
3211 	sc->bge_cdata.bge_tx_dmamap[ctx.bge_idx] = map;
3212 	sc->bge_cdata.bge_tx_chain[ctx.bge_idx] = m_head;
3213 	sc->bge_txcnt += ctx.bge_maxsegs;
3214 	f = &sc->bge_ldata.bge_tx_ring[*txidx];
3215 	if (mtag != NULL) {
3216 		f->bge_flags |= htole16(BGE_TXBDFLAG_VLAN_TAG);
3217 		f->bge_vlan_tag = htole16(VLAN_TAG_VALUE(mtag));
3218 	} else {
3219 		f->bge_vlan_tag = 0;
3220 	}
3221 
3222 	BGE_INC(ctx.bge_idx, BGE_TX_RING_CNT);
3223 	*txidx = ctx.bge_idx;
3224 
3225 	return(0);
3226 }
3227 
3228 /*
3229  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3230  * to the mbuf data regions directly in the transmit descriptors.
3231  */
3232 static void
3233 bge_start_locked(ifp)
3234 	struct ifnet *ifp;
3235 {
3236 	struct bge_softc *sc;
3237 	struct mbuf *m_head = NULL;
3238 	u_int32_t prodidx = 0;
3239 	int count = 0;
3240 
3241 	sc = ifp->if_softc;
3242 
3243 	if (!sc->bge_link && IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3244 		return;
3245 
3246 	prodidx = CSR_READ_4(sc, BGE_MBX_TX_HOST_PROD0_LO);
3247 
3248 	while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
3249 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
3250 		if (m_head == NULL)
3251 			break;
3252 
3253 		/*
3254 		 * XXX
3255 		 * The code inside the if() block is never reached since we
3256 		 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
3257 		 * requests to checksum TCP/UDP in a fragmented packet.
3258 		 *
3259 		 * XXX
3260 		 * safety overkill.  If this is a fragmented packet chain
3261 		 * with delayed TCP/UDP checksums, then only encapsulate
3262 		 * it if we have enough descriptors to handle the entire
3263 		 * chain at once.
3264 		 * (paranoia -- may not actually be needed)
3265 		 */
3266 		if (m_head->m_flags & M_FIRSTFRAG &&
3267 		    m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
3268 			if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
3269 			    m_head->m_pkthdr.csum_data + 16) {
3270 				IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3271 				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3272 				break;
3273 			}
3274 		}
3275 
3276 		/*
3277 		 * Pack the data into the transmit ring. If we
3278 		 * don't have room, set the OACTIVE flag and wait
3279 		 * for the NIC to drain the ring.
3280 		 */
3281 		if (bge_encap(sc, m_head, &prodidx)) {
3282 			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3283 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3284 			break;
3285 		}
3286 		++count;
3287 
3288 		/*
3289 		 * If there's a BPF listener, bounce a copy of this frame
3290 		 * to him.
3291 		 */
3292 		BPF_MTAP(ifp, m_head);
3293 	}
3294 
3295 	if (count == 0) {
3296 		/* no packets were dequeued */
3297 		return;
3298 	}
3299 
3300 	/* Transmit */
3301 	CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3302 	/* 5700 b2 errata */
3303 	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
3304 		CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3305 
3306 	/*
3307 	 * Set a timeout in case the chip goes out to lunch.
3308 	 */
3309 	ifp->if_timer = 5;
3310 
3311 	return;
3312 }
3313 
3314 /*
3315  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3316  * to the mbuf data regions directly in the transmit descriptors.
3317  */
3318 static void
3319 bge_start(ifp)
3320 	struct ifnet *ifp;
3321 {
3322 	struct bge_softc *sc;
3323 
3324 	sc = ifp->if_softc;
3325 	BGE_LOCK(sc);
3326 	bge_start_locked(ifp);
3327 	BGE_UNLOCK(sc);
3328 }
3329 
3330 static void
3331 bge_init_locked(sc)
3332 	struct bge_softc *sc;
3333 {
3334 	struct ifnet *ifp;
3335 	u_int16_t *m;
3336 
3337 	BGE_LOCK_ASSERT(sc);
3338 
3339 	ifp = sc->bge_ifp;
3340 
3341 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3342 		return;
3343 
3344 	/* Cancel pending I/O and flush buffers. */
3345 	bge_stop(sc);
3346 	bge_reset(sc);
3347 	bge_chipinit(sc);
3348 
3349 	/*
3350 	 * Init the various state machines, ring
3351 	 * control blocks and firmware.
3352 	 */
3353 	if (bge_blockinit(sc)) {
3354 		printf("bge%d: initialization failure\n", sc->bge_unit);
3355 		return;
3356 	}
3357 
3358 	ifp = sc->bge_ifp;
3359 
3360 	/* Specify MTU. */
3361 	CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
3362 	    ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN);
3363 
3364 	/* Load our MAC address. */
3365 	m = (u_int16_t *)&IFP2ENADDR(sc->bge_ifp)[0];
3366 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3367 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3368 
3369 	/* Enable or disable promiscuous mode as needed. */
3370 	if (ifp->if_flags & IFF_PROMISC) {
3371 		BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3372 	} else {
3373 		BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3374 	}
3375 
3376 	/* Program multicast filter. */
3377 	bge_setmulti(sc);
3378 
3379 	/* Init RX ring. */
3380 	bge_init_rx_ring_std(sc);
3381 
3382 	/*
3383 	 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
3384 	 * memory to insure that the chip has in fact read the first
3385 	 * entry of the ring.
3386 	 */
3387 	if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
3388 		u_int32_t		v, i;
3389 		for (i = 0; i < 10; i++) {
3390 			DELAY(20);
3391 			v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
3392 			if (v == (MCLBYTES - ETHER_ALIGN))
3393 				break;
3394 		}
3395 		if (i == 10)
3396 			printf ("bge%d: 5705 A0 chip failed to load RX ring\n",
3397 			    sc->bge_unit);
3398 	}
3399 
3400 	/* Init jumbo RX ring. */
3401 	if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
3402 		bge_init_rx_ring_jumbo(sc);
3403 
3404 	/* Init our RX return ring index */
3405 	sc->bge_rx_saved_considx = 0;
3406 
3407 	/* Init TX ring. */
3408 	bge_init_tx_ring(sc);
3409 
3410 	/* Turn on transmitter */
3411 	BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
3412 
3413 	/* Turn on receiver */
3414 	BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3415 
3416 	/* Tell firmware we're alive. */
3417 	BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3418 
3419 	/* Enable host interrupts. */
3420 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
3421 	BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3422 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3423 
3424 	bge_ifmedia_upd(ifp);
3425 
3426 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3427 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3428 
3429 	callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
3430 
3431 	return;
3432 }
3433 
3434 static void
3435 bge_init(xsc)
3436 	void *xsc;
3437 {
3438 	struct bge_softc *sc = xsc;
3439 
3440 	BGE_LOCK(sc);
3441 	bge_init_locked(sc);
3442 	BGE_UNLOCK(sc);
3443 
3444 	return;
3445 }
3446 
3447 /*
3448  * Set media options.
3449  */
3450 static int
3451 bge_ifmedia_upd(ifp)
3452 	struct ifnet *ifp;
3453 {
3454 	struct bge_softc *sc;
3455 	struct mii_data *mii;
3456 	struct ifmedia *ifm;
3457 
3458 	sc = ifp->if_softc;
3459 	ifm = &sc->bge_ifmedia;
3460 
3461 	/* If this is a 1000baseX NIC, enable the TBI port. */
3462 	if (sc->bge_tbi) {
3463 		if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3464 			return(EINVAL);
3465 		switch(IFM_SUBTYPE(ifm->ifm_media)) {
3466 		case IFM_AUTO:
3467 #ifndef BGE_FAKE_AUTONEG
3468 			/*
3469 			 * The BCM5704 ASIC appears to have a special
3470 			 * mechanism for programming the autoneg
3471 			 * advertisement registers in TBI mode.
3472 			 */
3473 			if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3474 				uint32_t sgdig;
3475 				CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
3476 				sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
3477 				sgdig |= BGE_SGDIGCFG_AUTO|
3478 				    BGE_SGDIGCFG_PAUSE_CAP|
3479 				    BGE_SGDIGCFG_ASYM_PAUSE;
3480 				CSR_WRITE_4(sc, BGE_SGDIG_CFG,
3481 				    sgdig|BGE_SGDIGCFG_SEND);
3482 				DELAY(5);
3483 				CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
3484 			}
3485 #endif
3486 			break;
3487 		case IFM_1000_SX:
3488 			if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3489 				BGE_CLRBIT(sc, BGE_MAC_MODE,
3490 				    BGE_MACMODE_HALF_DUPLEX);
3491 			} else {
3492 				BGE_SETBIT(sc, BGE_MAC_MODE,
3493 				    BGE_MACMODE_HALF_DUPLEX);
3494 			}
3495 			break;
3496 		default:
3497 			return(EINVAL);
3498 		}
3499 		return(0);
3500 	}
3501 
3502 	mii = device_get_softc(sc->bge_miibus);
3503 	sc->bge_link = 0;
3504 	if (mii->mii_instance) {
3505 		struct mii_softc *miisc;
3506 		for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
3507 		    miisc = LIST_NEXT(miisc, mii_list))
3508 			mii_phy_reset(miisc);
3509 	}
3510 	mii_mediachg(mii);
3511 
3512 	return(0);
3513 }
3514 
3515 /*
3516  * Report current media status.
3517  */
3518 static void
3519 bge_ifmedia_sts(ifp, ifmr)
3520 	struct ifnet *ifp;
3521 	struct ifmediareq *ifmr;
3522 {
3523 	struct bge_softc *sc;
3524 	struct mii_data *mii;
3525 
3526 	sc = ifp->if_softc;
3527 
3528 	if (sc->bge_tbi) {
3529 		ifmr->ifm_status = IFM_AVALID;
3530 		ifmr->ifm_active = IFM_ETHER;
3531 		if (CSR_READ_4(sc, BGE_MAC_STS) &
3532 		    BGE_MACSTAT_TBI_PCS_SYNCHED)
3533 			ifmr->ifm_status |= IFM_ACTIVE;
3534 		ifmr->ifm_active |= IFM_1000_SX;
3535 		if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3536 			ifmr->ifm_active |= IFM_HDX;
3537 		else
3538 			ifmr->ifm_active |= IFM_FDX;
3539 		return;
3540 	}
3541 
3542 	mii = device_get_softc(sc->bge_miibus);
3543 	mii_pollstat(mii);
3544 	ifmr->ifm_active = mii->mii_media_active;
3545 	ifmr->ifm_status = mii->mii_media_status;
3546 
3547 	return;
3548 }
3549 
3550 static int
3551 bge_ioctl(ifp, command, data)
3552 	struct ifnet *ifp;
3553 	u_long command;
3554 	caddr_t data;
3555 {
3556 	struct bge_softc *sc = ifp->if_softc;
3557 	struct ifreq *ifr = (struct ifreq *) data;
3558 	int mask, error = 0;
3559 	struct mii_data *mii;
3560 
3561 	switch(command) {
3562 	case SIOCSIFMTU:
3563 		/* Disallow jumbo frames on 5705. */
3564 		if (((sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
3565 		      sc->bge_asicrev == BGE_ASICREV_BCM5750) &&
3566 		    ifr->ifr_mtu > ETHERMTU) || ifr->ifr_mtu > BGE_JUMBO_MTU)
3567 			error = EINVAL;
3568 		else {
3569 			ifp->if_mtu = ifr->ifr_mtu;
3570 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3571 			bge_init(sc);
3572 		}
3573 		break;
3574 	case SIOCSIFFLAGS:
3575 		BGE_LOCK(sc);
3576 		if (ifp->if_flags & IFF_UP) {
3577 			/*
3578 			 * If only the state of the PROMISC flag changed,
3579 			 * then just use the 'set promisc mode' command
3580 			 * instead of reinitializing the entire NIC. Doing
3581 			 * a full re-init means reloading the firmware and
3582 			 * waiting for it to start up, which may take a
3583 			 * second or two.
3584 			 */
3585 			if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3586 			    ifp->if_flags & IFF_PROMISC &&
3587 			    !(sc->bge_if_flags & IFF_PROMISC)) {
3588 				BGE_SETBIT(sc, BGE_RX_MODE,
3589 				    BGE_RXMODE_RX_PROMISC);
3590 			} else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3591 			    !(ifp->if_flags & IFF_PROMISC) &&
3592 			    sc->bge_if_flags & IFF_PROMISC) {
3593 				BGE_CLRBIT(sc, BGE_RX_MODE,
3594 				    BGE_RXMODE_RX_PROMISC);
3595 			} else
3596 				bge_init_locked(sc);
3597 		} else {
3598 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3599 				bge_stop(sc);
3600 			}
3601 		}
3602 		sc->bge_if_flags = ifp->if_flags;
3603 		BGE_UNLOCK(sc);
3604 		error = 0;
3605 		break;
3606 	case SIOCADDMULTI:
3607 	case SIOCDELMULTI:
3608 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3609 			BGE_LOCK(sc);
3610 			bge_setmulti(sc);
3611 			BGE_UNLOCK(sc);
3612 			error = 0;
3613 		}
3614 		break;
3615 	case SIOCSIFMEDIA:
3616 	case SIOCGIFMEDIA:
3617 		if (sc->bge_tbi) {
3618 			error = ifmedia_ioctl(ifp, ifr,
3619 			    &sc->bge_ifmedia, command);
3620 		} else {
3621 			mii = device_get_softc(sc->bge_miibus);
3622 			error = ifmedia_ioctl(ifp, ifr,
3623 			    &mii->mii_media, command);
3624 		}
3625 		break;
3626 	case SIOCSIFCAP:
3627 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3628 		/* NB: the code for RX csum offload is disabled for now */
3629 		if (mask & IFCAP_TXCSUM) {
3630 			ifp->if_capenable ^= IFCAP_TXCSUM;
3631 			if (IFCAP_TXCSUM & ifp->if_capenable)
3632 				ifp->if_hwassist = BGE_CSUM_FEATURES;
3633 			else
3634 				ifp->if_hwassist = 0;
3635 		}
3636 		error = 0;
3637 		break;
3638 	default:
3639 		error = ether_ioctl(ifp, command, data);
3640 		break;
3641 	}
3642 
3643 	return(error);
3644 }
3645 
3646 static void
3647 bge_watchdog(ifp)
3648 	struct ifnet *ifp;
3649 {
3650 	struct bge_softc *sc;
3651 
3652 	sc = ifp->if_softc;
3653 
3654 	printf("bge%d: watchdog timeout -- resetting\n", sc->bge_unit);
3655 
3656 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3657 	bge_init(sc);
3658 
3659 	ifp->if_oerrors++;
3660 
3661 	return;
3662 }
3663 
3664 /*
3665  * Stop the adapter and free any mbufs allocated to the
3666  * RX and TX lists.
3667  */
3668 static void
3669 bge_stop(sc)
3670 	struct bge_softc *sc;
3671 {
3672 	struct ifnet *ifp;
3673 	struct ifmedia_entry *ifm;
3674 	struct mii_data *mii = NULL;
3675 	int mtmp, itmp;
3676 
3677 	BGE_LOCK_ASSERT(sc);
3678 
3679 	ifp = sc->bge_ifp;
3680 
3681 	if (!sc->bge_tbi)
3682 		mii = device_get_softc(sc->bge_miibus);
3683 
3684 	callout_stop(&sc->bge_stat_ch);
3685 
3686 	/*
3687 	 * Disable all of the receiver blocks
3688 	 */
3689 	BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3690 	BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3691 	BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3692 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3693 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
3694 		BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
3695 	BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3696 	BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3697 	BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3698 
3699 	/*
3700 	 * Disable all of the transmit blocks
3701 	 */
3702 	BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3703 	BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3704 	BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3705 	BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3706 	BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3707 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3708 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
3709 		BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
3710 	BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3711 
3712 	/*
3713 	 * Shut down all of the memory managers and related
3714 	 * state machines.
3715 	 */
3716 	BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3717 	BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3718 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3719 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
3720 		BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
3721 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3722 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3723 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3724 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
3725 		BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
3726 		BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3727 	}
3728 
3729 	/* Disable host interrupts. */
3730 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3731 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3732 
3733 	/*
3734 	 * Tell firmware we're shutting down.
3735 	 */
3736 	BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3737 
3738 	/* Free the RX lists. */
3739 	bge_free_rx_ring_std(sc);
3740 
3741 	/* Free jumbo RX list. */
3742 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3743 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
3744 		bge_free_rx_ring_jumbo(sc);
3745 
3746 	/* Free TX buffers. */
3747 	bge_free_tx_ring(sc);
3748 
3749 	/*
3750 	 * Isolate/power down the PHY, but leave the media selection
3751 	 * unchanged so that things will be put back to normal when
3752 	 * we bring the interface back up.
3753 	 */
3754 	if (!sc->bge_tbi) {
3755 		itmp = ifp->if_flags;
3756 		ifp->if_flags |= IFF_UP;
3757 		/*
3758 		 * If we are called from bge_detach(), mii is already NULL.
3759 		 */
3760 		if (mii != NULL) {
3761 			ifm = mii->mii_media.ifm_cur;
3762 			mtmp = ifm->ifm_media;
3763 			ifm->ifm_media = IFM_ETHER|IFM_NONE;
3764 			mii_mediachg(mii);
3765 			ifm->ifm_media = mtmp;
3766 		}
3767 		ifp->if_flags = itmp;
3768 	}
3769 
3770 	sc->bge_link = 0;
3771 
3772 	sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
3773 
3774 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3775 
3776 	return;
3777 }
3778 
3779 /*
3780  * Stop all chip I/O so that the kernel's probe routines don't
3781  * get confused by errant DMAs when rebooting.
3782  */
3783 static void
3784 bge_shutdown(dev)
3785 	device_t dev;
3786 {
3787 	struct bge_softc *sc;
3788 
3789 	sc = device_get_softc(dev);
3790 
3791 	BGE_LOCK(sc);
3792 	bge_stop(sc);
3793 	bge_reset(sc);
3794 	BGE_UNLOCK(sc);
3795 
3796 	return;
3797 }
3798 
3799 static int
3800 bge_suspend(device_t dev)
3801 {
3802 	struct bge_softc *sc;
3803 
3804 	sc = device_get_softc(dev);
3805 	BGE_LOCK(sc);
3806 	bge_stop(sc);
3807 	BGE_UNLOCK(sc);
3808 
3809 	return (0);
3810 }
3811 
3812 static int
3813 bge_resume(device_t dev)
3814 {
3815 	struct bge_softc *sc;
3816 	struct ifnet *ifp;
3817 
3818 	sc = device_get_softc(dev);
3819 	BGE_LOCK(sc);
3820 	ifp = sc->bge_ifp;
3821 	if (ifp->if_flags & IFF_UP) {
3822 		bge_init_locked(sc);
3823 		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3824 			bge_start_locked(ifp);
3825 	}
3826 	BGE_UNLOCK(sc);
3827 
3828 	return (0);
3829 }
3830