xref: /freebsd/sys/dev/bge/if_bge.c (revision 262e143bd46171a6415a5b28af260a5efa2a3db8)
1 /*-
2  * Copyright (c) 2001 Wind River Systems
3  * Copyright (c) 1997, 1998, 1999, 2001
4  *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by Bill Paul.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 /*
38  * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39  *
40  * The Broadcom BCM5700 is based on technology originally developed by
41  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42  * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
43  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45  * frames, highly configurable RX filtering, and 16 RX and TX queues
46  * (which, along with RX filter rules, can be used for QOS applications).
47  * Other features, such as TCP segmentation, may be available as part
48  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49  * firmware images can be stored in hardware and need not be compiled
50  * into the driver.
51  *
52  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53  * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54  *
55  * The BCM5701 is a single-chip solution incorporating both the BCM5700
56  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57  * does not support external SSRAM.
58  *
59  * Broadcom also produces a variation of the BCM5700 under the "Altima"
60  * brand name, which is functionally similar but lacks PCI-X support.
61  *
62  * Without external SSRAM, you can only have at most 4 TX rings,
63  * and the use of the mini RX ring is disabled. This seems to imply
64  * that these features are simply not available on the BCM5701. As a
65  * result, this driver does not implement any support for the mini RX
66  * ring.
67  */
68 
69 #ifdef HAVE_KERNEL_OPTION_HEADERS
70 #include "opt_device_polling.h"
71 #endif
72 
73 #include <sys/param.h>
74 #include <sys/endian.h>
75 #include <sys/systm.h>
76 #include <sys/sockio.h>
77 #include <sys/mbuf.h>
78 #include <sys/malloc.h>
79 #include <sys/kernel.h>
80 #include <sys/module.h>
81 #include <sys/socket.h>
82 
83 #include <net/if.h>
84 #include <net/if_arp.h>
85 #include <net/ethernet.h>
86 #include <net/if_dl.h>
87 #include <net/if_media.h>
88 
89 #include <net/bpf.h>
90 
91 #include <net/if_types.h>
92 #include <net/if_vlan_var.h>
93 
94 #include <netinet/in_systm.h>
95 #include <netinet/in.h>
96 #include <netinet/ip.h>
97 
98 #include <machine/clock.h>      /* for DELAY */
99 #include <machine/bus.h>
100 #include <machine/resource.h>
101 #include <sys/bus.h>
102 #include <sys/rman.h>
103 
104 #include <dev/mii/mii.h>
105 #include <dev/mii/miivar.h>
106 #include "miidevs.h"
107 #include <dev/mii/brgphyreg.h>
108 
109 #include <dev/pci/pcireg.h>
110 #include <dev/pci/pcivar.h>
111 
112 #include <dev/bge/if_bgereg.h>
113 
114 #include "opt_bge.h"
115 
116 #define BGE_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
117 
118 MODULE_DEPEND(bge, pci, 1, 1, 1);
119 MODULE_DEPEND(bge, ether, 1, 1, 1);
120 MODULE_DEPEND(bge, miibus, 1, 1, 1);
121 
122 /* "device miibus" required.  See GENERIC if you get errors here. */
123 #include "miibus_if.h"
124 
125 /*
126  * Various supported device vendors/types and their names. Note: the
127  * spec seems to indicate that the hardware still has Alteon's vendor
128  * ID burned into it, though it will always be overriden by the vendor
129  * ID in the EEPROM. Just to be safe, we cover all possibilities.
130  */
131 #define BGE_DEVDESC_MAX		64	/* Maximum device description length */
132 
133 static struct bge_type bge_devs[] = {
134 	{ ALT_VENDORID,	ALT_DEVICEID_BCM5700,
135 		"Broadcom BCM5700 Gigabit Ethernet" },
136 	{ ALT_VENDORID,	ALT_DEVICEID_BCM5701,
137 		"Broadcom BCM5701 Gigabit Ethernet" },
138 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5700,
139 		"Broadcom BCM5700 Gigabit Ethernet" },
140 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5701,
141 		"Broadcom BCM5701 Gigabit Ethernet" },
142 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5702,
143 		"Broadcom BCM5702 Gigabit Ethernet" },
144 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5702X,
145 		"Broadcom BCM5702X Gigabit Ethernet" },
146 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5703,
147 		"Broadcom BCM5703 Gigabit Ethernet" },
148 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5703X,
149 		"Broadcom BCM5703X Gigabit Ethernet" },
150 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5704C,
151 		"Broadcom BCM5704C Dual Gigabit Ethernet" },
152 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5704S,
153 		"Broadcom BCM5704S Dual Gigabit Ethernet" },
154 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5705,
155 		"Broadcom BCM5705 Gigabit Ethernet" },
156 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5705K,
157 		"Broadcom BCM5705K Gigabit Ethernet" },
158 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5705M,
159 		"Broadcom BCM5705M Gigabit Ethernet" },
160 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT,
161 		"Broadcom BCM5705M Gigabit Ethernet" },
162 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5714C,
163 		"Broadcom BCM5714C Gigabit Ethernet" },
164 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5721,
165 		"Broadcom BCM5721 Gigabit Ethernet" },
166 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5750,
167 		"Broadcom BCM5750 Gigabit Ethernet" },
168 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5750M,
169 		"Broadcom BCM5750M Gigabit Ethernet" },
170 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5751,
171 		"Broadcom BCM5751 Gigabit Ethernet" },
172 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5751M,
173 		"Broadcom BCM5751M Gigabit Ethernet" },
174 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5752,
175 		"Broadcom BCM5752 Gigabit Ethernet" },
176 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5782,
177 		"Broadcom BCM5782 Gigabit Ethernet" },
178 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5788,
179 		"Broadcom BCM5788 Gigabit Ethernet" },
180 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5789,
181 		"Broadcom BCM5789 Gigabit Ethernet" },
182 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5901,
183 		"Broadcom BCM5901 Fast Ethernet" },
184 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2,
185 		"Broadcom BCM5901A2 Fast Ethernet" },
186 	{ SK_VENDORID, SK_DEVICEID_ALTIMA,
187 		"SysKonnect Gigabit Ethernet" },
188 	{ ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000,
189 		"Altima AC1000 Gigabit Ethernet" },
190 	{ ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002,
191 		"Altima AC1002 Gigabit Ethernet" },
192 	{ ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100,
193 		"Altima AC9100 Gigabit Ethernet" },
194 	{ 0, 0, NULL }
195 };
196 
197 static int bge_probe		(device_t);
198 static int bge_attach		(device_t);
199 static int bge_detach		(device_t);
200 static int bge_suspend		(device_t);
201 static int bge_resume		(device_t);
202 static void bge_release_resources
203 				(struct bge_softc *);
204 static void bge_dma_map_addr	(void *, bus_dma_segment_t *, int, int);
205 static void bge_dma_map_tx_desc	(void *, bus_dma_segment_t *, int,
206 				    bus_size_t, int);
207 static int bge_dma_alloc	(device_t);
208 static void bge_dma_free	(struct bge_softc *);
209 
210 static void bge_txeof		(struct bge_softc *);
211 static void bge_rxeof		(struct bge_softc *);
212 
213 static void bge_tick_locked	(struct bge_softc *);
214 static void bge_tick		(void *);
215 static void bge_stats_update	(struct bge_softc *);
216 static void bge_stats_update_regs
217 				(struct bge_softc *);
218 static int bge_encap		(struct bge_softc *, struct mbuf *,
219 					u_int32_t *);
220 
221 static void bge_intr		(void *);
222 static void bge_start_locked	(struct ifnet *);
223 static void bge_start		(struct ifnet *);
224 static int bge_ioctl		(struct ifnet *, u_long, caddr_t);
225 static void bge_init_locked	(struct bge_softc *);
226 static void bge_init		(void *);
227 static void bge_stop		(struct bge_softc *);
228 static void bge_watchdog		(struct ifnet *);
229 static void bge_shutdown		(device_t);
230 static int bge_ifmedia_upd	(struct ifnet *);
231 static void bge_ifmedia_sts	(struct ifnet *, struct ifmediareq *);
232 
233 static u_int8_t	bge_eeprom_getbyte	(struct bge_softc *, int, u_int8_t *);
234 static int bge_read_eeprom	(struct bge_softc *, caddr_t, int, int);
235 
236 static void bge_setmulti	(struct bge_softc *);
237 
238 static void bge_handle_events	(struct bge_softc *);
239 static int bge_newbuf_std	(struct bge_softc *, int, struct mbuf *);
240 static int bge_newbuf_jumbo	(struct bge_softc *, int, struct mbuf *);
241 static int bge_init_rx_ring_std	(struct bge_softc *);
242 static void bge_free_rx_ring_std	(struct bge_softc *);
243 static int bge_init_rx_ring_jumbo	(struct bge_softc *);
244 static void bge_free_rx_ring_jumbo	(struct bge_softc *);
245 static void bge_free_tx_ring	(struct bge_softc *);
246 static int bge_init_tx_ring	(struct bge_softc *);
247 
248 static int bge_chipinit		(struct bge_softc *);
249 static int bge_blockinit	(struct bge_softc *);
250 
251 #ifdef notdef
252 static u_int8_t bge_vpd_readbyte(struct bge_softc *, int);
253 static void bge_vpd_read_res	(struct bge_softc *, struct vpd_res *, int);
254 static void bge_vpd_read	(struct bge_softc *);
255 #endif
256 
257 static u_int32_t bge_readmem_ind
258 				(struct bge_softc *, int);
259 static void bge_writemem_ind	(struct bge_softc *, int, int);
260 #ifdef notdef
261 static u_int32_t bge_readreg_ind
262 				(struct bge_softc *, int);
263 #endif
264 static void bge_writereg_ind	(struct bge_softc *, int, int);
265 
266 static int bge_miibus_readreg	(device_t, int, int);
267 static int bge_miibus_writereg	(device_t, int, int, int);
268 static void bge_miibus_statchg	(device_t);
269 #ifdef DEVICE_POLLING
270 static void bge_poll		(struct ifnet *ifp, enum poll_cmd cmd,
271 				    int count);
272 static void bge_poll_locked	(struct ifnet *ifp, enum poll_cmd cmd,
273 				    int count);
274 #endif
275 
276 static void bge_reset		(struct bge_softc *);
277 static void bge_link_upd	(struct bge_softc *);
278 
279 static device_method_t bge_methods[] = {
280 	/* Device interface */
281 	DEVMETHOD(device_probe,		bge_probe),
282 	DEVMETHOD(device_attach,	bge_attach),
283 	DEVMETHOD(device_detach,	bge_detach),
284 	DEVMETHOD(device_shutdown,	bge_shutdown),
285 	DEVMETHOD(device_suspend,	bge_suspend),
286 	DEVMETHOD(device_resume,	bge_resume),
287 
288 	/* bus interface */
289 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
290 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
291 
292 	/* MII interface */
293 	DEVMETHOD(miibus_readreg,	bge_miibus_readreg),
294 	DEVMETHOD(miibus_writereg,	bge_miibus_writereg),
295 	DEVMETHOD(miibus_statchg,	bge_miibus_statchg),
296 
297 	{ 0, 0 }
298 };
299 
300 static driver_t bge_driver = {
301 	"bge",
302 	bge_methods,
303 	sizeof(struct bge_softc)
304 };
305 
306 static devclass_t bge_devclass;
307 
308 DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
309 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
310 
311 static u_int32_t
312 bge_readmem_ind(sc, off)
313 	struct bge_softc *sc;
314 	int off;
315 {
316 	device_t dev;
317 
318 	dev = sc->bge_dev;
319 
320 	pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
321 	return(pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4));
322 }
323 
324 static void
325 bge_writemem_ind(sc, off, val)
326 	struct bge_softc *sc;
327 	int off, val;
328 {
329 	device_t dev;
330 
331 	dev = sc->bge_dev;
332 
333 	pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
334 	pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
335 
336 	return;
337 }
338 
339 #ifdef notdef
340 static u_int32_t
341 bge_readreg_ind(sc, off)
342 	struct bge_softc *sc;
343 	int off;
344 {
345 	device_t dev;
346 
347 	dev = sc->bge_dev;
348 
349 	pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
350 	return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
351 }
352 #endif
353 
354 static void
355 bge_writereg_ind(sc, off, val)
356 	struct bge_softc *sc;
357 	int off, val;
358 {
359 	device_t dev;
360 
361 	dev = sc->bge_dev;
362 
363 	pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
364 	pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
365 
366 	return;
367 }
368 
369 /*
370  * Map a single buffer address.
371  */
372 
373 static void
374 bge_dma_map_addr(arg, segs, nseg, error)
375 	void *arg;
376 	bus_dma_segment_t *segs;
377 	int nseg;
378 	int error;
379 {
380 	struct bge_dmamap_arg *ctx;
381 
382 	if (error)
383 		return;
384 
385 	ctx = arg;
386 
387 	if (nseg > ctx->bge_maxsegs) {
388 		ctx->bge_maxsegs = 0;
389 		return;
390 	}
391 
392 	ctx->bge_busaddr = segs->ds_addr;
393 
394 	return;
395 }
396 
397 /*
398  * Map an mbuf chain into an TX ring.
399  */
400 
401 static void
402 bge_dma_map_tx_desc(arg, segs, nseg, mapsize, error)
403 	void *arg;
404 	bus_dma_segment_t *segs;
405 	int nseg;
406 	bus_size_t mapsize;
407 	int error;
408 {
409 	struct bge_dmamap_arg *ctx;
410 	struct bge_tx_bd *d = NULL;
411 	int i = 0, idx;
412 
413 	if (error)
414 		return;
415 
416 	ctx = arg;
417 
418 	/* Signal error to caller if there's too many segments */
419 	if (nseg > ctx->bge_maxsegs) {
420 		ctx->bge_maxsegs = 0;
421 		return;
422 	}
423 
424 	idx = ctx->bge_idx;
425 	while(1) {
426 		d = &ctx->bge_ring[idx];
427 		d->bge_addr.bge_addr_lo =
428 		    htole32(BGE_ADDR_LO(segs[i].ds_addr));
429 		d->bge_addr.bge_addr_hi =
430 		    htole32(BGE_ADDR_HI(segs[i].ds_addr));
431 		d->bge_len = htole16(segs[i].ds_len);
432 		d->bge_flags = htole16(ctx->bge_flags);
433 		i++;
434 		if (i == nseg)
435 			break;
436 		BGE_INC(idx, BGE_TX_RING_CNT);
437 	}
438 
439 	d->bge_flags |= htole16(BGE_TXBDFLAG_END);
440 	ctx->bge_maxsegs = nseg;
441 	ctx->bge_idx = idx;
442 
443 	return;
444 }
445 
446 #ifdef notdef
447 static u_int8_t
448 bge_vpd_readbyte(sc, addr)
449 	struct bge_softc *sc;
450 	int addr;
451 {
452 	int i;
453 	device_t dev;
454 	u_int32_t val;
455 
456 	dev = sc->bge_dev;
457 	pci_write_config(dev, BGE_PCI_VPD_ADDR, addr, 2);
458 	for (i = 0; i < BGE_TIMEOUT * 10; i++) {
459 		DELAY(10);
460 		if (pci_read_config(dev, BGE_PCI_VPD_ADDR, 2) & BGE_VPD_FLAG)
461 			break;
462 	}
463 
464 	if (i == BGE_TIMEOUT) {
465 		printf("bge%d: VPD read timed out\n", sc->bge_unit);
466 		return(0);
467 	}
468 
469 	val = pci_read_config(dev, BGE_PCI_VPD_DATA, 4);
470 
471 	return((val >> ((addr % 4) * 8)) & 0xFF);
472 }
473 
474 static void
475 bge_vpd_read_res(sc, res, addr)
476 	struct bge_softc *sc;
477 	struct vpd_res *res;
478 	int addr;
479 {
480 	int i;
481 	u_int8_t *ptr;
482 
483 	ptr = (u_int8_t *)res;
484 	for (i = 0; i < sizeof(struct vpd_res); i++)
485 		ptr[i] = bge_vpd_readbyte(sc, i + addr);
486 
487 	return;
488 }
489 
490 static void
491 bge_vpd_read(sc)
492 	struct bge_softc *sc;
493 {
494 	int pos = 0, i;
495 	struct vpd_res res;
496 
497 	if (sc->bge_vpd_prodname != NULL)
498 		free(sc->bge_vpd_prodname, M_DEVBUF);
499 	if (sc->bge_vpd_readonly != NULL)
500 		free(sc->bge_vpd_readonly, M_DEVBUF);
501 	sc->bge_vpd_prodname = NULL;
502 	sc->bge_vpd_readonly = NULL;
503 
504 	bge_vpd_read_res(sc, &res, pos);
505 
506 	if (res.vr_id != VPD_RES_ID) {
507 		printf("bge%d: bad VPD resource id: expected %x got %x\n",
508 			sc->bge_unit, VPD_RES_ID, res.vr_id);
509 		return;
510 	}
511 
512 	pos += sizeof(res);
513 	sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
514 	for (i = 0; i < res.vr_len; i++)
515 		sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos);
516 	sc->bge_vpd_prodname[i] = '\0';
517 	pos += i;
518 
519 	bge_vpd_read_res(sc, &res, pos);
520 
521 	if (res.vr_id != VPD_RES_READ) {
522 		printf("bge%d: bad VPD resource id: expected %x got %x\n",
523 		    sc->bge_unit, VPD_RES_READ, res.vr_id);
524 		return;
525 	}
526 
527 	pos += sizeof(res);
528 	sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
529 	for (i = 0; i < res.vr_len + 1; i++)
530 		sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos);
531 
532 	return;
533 }
534 #endif
535 
536 /*
537  * Read a byte of data stored in the EEPROM at address 'addr.' The
538  * BCM570x supports both the traditional bitbang interface and an
539  * auto access interface for reading the EEPROM. We use the auto
540  * access method.
541  */
542 static u_int8_t
543 bge_eeprom_getbyte(sc, addr, dest)
544 	struct bge_softc *sc;
545 	int addr;
546 	u_int8_t *dest;
547 {
548 	int i;
549 	u_int32_t byte = 0;
550 
551 	/*
552 	 * Enable use of auto EEPROM access so we can avoid
553 	 * having to use the bitbang method.
554 	 */
555 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
556 
557 	/* Reset the EEPROM, load the clock period. */
558 	CSR_WRITE_4(sc, BGE_EE_ADDR,
559 	    BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
560 	DELAY(20);
561 
562 	/* Issue the read EEPROM command. */
563 	CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
564 
565 	/* Wait for completion */
566 	for(i = 0; i < BGE_TIMEOUT * 10; i++) {
567 		DELAY(10);
568 		if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
569 			break;
570 	}
571 
572 	if (i == BGE_TIMEOUT) {
573 		printf("bge%d: eeprom read timed out\n", sc->bge_unit);
574 		return(0);
575 	}
576 
577 	/* Get result. */
578 	byte = CSR_READ_4(sc, BGE_EE_DATA);
579 
580 	*dest = (byte >> ((addr % 4) * 8)) & 0xFF;
581 
582 	return(0);
583 }
584 
585 /*
586  * Read a sequence of bytes from the EEPROM.
587  */
588 static int
589 bge_read_eeprom(sc, dest, off, cnt)
590 	struct bge_softc *sc;
591 	caddr_t dest;
592 	int off;
593 	int cnt;
594 {
595 	int err = 0, i;
596 	u_int8_t byte = 0;
597 
598 	for (i = 0; i < cnt; i++) {
599 		err = bge_eeprom_getbyte(sc, off + i, &byte);
600 		if (err)
601 			break;
602 		*(dest + i) = byte;
603 	}
604 
605 	return(err ? 1 : 0);
606 }
607 
608 static int
609 bge_miibus_readreg(dev, phy, reg)
610 	device_t dev;
611 	int phy, reg;
612 {
613 	struct bge_softc *sc;
614 	u_int32_t val, autopoll;
615 	int i;
616 
617 	sc = device_get_softc(dev);
618 
619 	/*
620 	 * Broadcom's own driver always assumes the internal
621 	 * PHY is at GMII address 1. On some chips, the PHY responds
622 	 * to accesses at all addresses, which could cause us to
623 	 * bogusly attach the PHY 32 times at probe type. Always
624 	 * restricting the lookup to address 1 is simpler than
625 	 * trying to figure out which chips revisions should be
626 	 * special-cased.
627 	 */
628 	if (phy != 1)
629 		return(0);
630 
631 	/* Reading with autopolling on may trigger PCI errors */
632 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
633 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
634 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
635 		DELAY(40);
636 	}
637 
638 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
639 	    BGE_MIPHY(phy)|BGE_MIREG(reg));
640 
641 	for (i = 0; i < BGE_TIMEOUT; i++) {
642 		val = CSR_READ_4(sc, BGE_MI_COMM);
643 		if (!(val & BGE_MICOMM_BUSY))
644 			break;
645 	}
646 
647 	if (i == BGE_TIMEOUT) {
648 		printf("bge%d: PHY read timed out\n", sc->bge_unit);
649 		val = 0;
650 		goto done;
651 	}
652 
653 	val = CSR_READ_4(sc, BGE_MI_COMM);
654 
655 done:
656 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
657 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
658 		DELAY(40);
659 	}
660 
661 	if (val & BGE_MICOMM_READFAIL)
662 		return(0);
663 
664 	return(val & 0xFFFF);
665 }
666 
667 static int
668 bge_miibus_writereg(dev, phy, reg, val)
669 	device_t dev;
670 	int phy, reg, val;
671 {
672 	struct bge_softc *sc;
673 	u_int32_t autopoll;
674 	int i;
675 
676 	sc = device_get_softc(dev);
677 
678 	/* Reading with autopolling on may trigger PCI errors */
679 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
680 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
681 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
682 		DELAY(40);
683 	}
684 
685 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
686 	    BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
687 
688 	for (i = 0; i < BGE_TIMEOUT; i++) {
689 		if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
690 			break;
691 	}
692 
693 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
694 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
695 		DELAY(40);
696 	}
697 
698 	if (i == BGE_TIMEOUT) {
699 		printf("bge%d: PHY read timed out\n", sc->bge_unit);
700 		return(0);
701 	}
702 
703 	return(0);
704 }
705 
706 static void
707 bge_miibus_statchg(dev)
708 	device_t dev;
709 {
710 	struct bge_softc *sc;
711 	struct mii_data *mii;
712 
713 	sc = device_get_softc(dev);
714 	mii = device_get_softc(sc->bge_miibus);
715 
716 	BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
717 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
718 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
719 	} else {
720 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
721 	}
722 
723 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
724 		BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
725 	} else {
726 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
727 	}
728 
729 	return;
730 }
731 
732 /*
733  * Handle events that have triggered interrupts.
734  */
735 static void
736 bge_handle_events(sc)
737 	struct bge_softc		*sc;
738 {
739 
740 	return;
741 }
742 
743 /*
744  * Intialize a standard receive ring descriptor.
745  */
746 static int
747 bge_newbuf_std(sc, i, m)
748 	struct bge_softc	*sc;
749 	int			i;
750 	struct mbuf		*m;
751 {
752 	struct mbuf		*m_new = NULL;
753 	struct bge_rx_bd	*r;
754 	struct bge_dmamap_arg	ctx;
755 	int			error;
756 
757 	if (m == NULL) {
758 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
759 		if (m_new == NULL) {
760 			return(ENOBUFS);
761 		}
762 
763 		MCLGET(m_new, M_DONTWAIT);
764 		if (!(m_new->m_flags & M_EXT)) {
765 			m_freem(m_new);
766 			return(ENOBUFS);
767 		}
768 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
769 	} else {
770 		m_new = m;
771 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
772 		m_new->m_data = m_new->m_ext.ext_buf;
773 	}
774 
775 	if (!sc->bge_rx_alignment_bug)
776 		m_adj(m_new, ETHER_ALIGN);
777 	sc->bge_cdata.bge_rx_std_chain[i] = m_new;
778 	r = &sc->bge_ldata.bge_rx_std_ring[i];
779 	ctx.bge_maxsegs = 1;
780 	ctx.sc = sc;
781 	error = bus_dmamap_load(sc->bge_cdata.bge_mtag,
782 	    sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *),
783 	    m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
784 	if (error || ctx.bge_maxsegs == 0) {
785 		if (m == NULL) {
786 			sc->bge_cdata.bge_rx_std_chain[i] = NULL;
787 			m_freem(m_new);
788 		}
789 		return(ENOMEM);
790 	}
791 	r->bge_addr.bge_addr_lo = htole32(BGE_ADDR_LO(ctx.bge_busaddr));
792 	r->bge_addr.bge_addr_hi = htole32(BGE_ADDR_HI(ctx.bge_busaddr));
793 	r->bge_flags = htole16(BGE_RXBDFLAG_END);
794 	r->bge_len = htole16(m_new->m_len);
795 	r->bge_idx = htole16(i);
796 
797 	bus_dmamap_sync(sc->bge_cdata.bge_mtag,
798 	    sc->bge_cdata.bge_rx_std_dmamap[i],
799 	    BUS_DMASYNC_PREREAD);
800 
801 	return(0);
802 }
803 
804 /*
805  * Initialize a jumbo receive ring descriptor. This allocates
806  * a jumbo buffer from the pool managed internally by the driver.
807  */
808 static int
809 bge_newbuf_jumbo(sc, i, m)
810 	struct bge_softc *sc;
811 	int i;
812 	struct mbuf *m;
813 {
814 	bus_dma_segment_t segs[BGE_NSEG_JUMBO];
815 	struct bge_extrx_bd *r;
816 	struct mbuf *m_new = NULL;
817 	int nsegs;
818 	int error;
819 
820 	if (m == NULL) {
821 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
822 		if (m_new == NULL)
823 			return(ENOBUFS);
824 
825 		m_cljget(m_new, M_DONTWAIT, MJUM9BYTES);
826 		if (!(m_new->m_flags & M_EXT)) {
827 			m_freem(m_new);
828 			return(ENOBUFS);
829 		}
830 		m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES;
831 	} else {
832 		m_new = m;
833 		m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES;
834 		m_new->m_data = m_new->m_ext.ext_buf;
835 	}
836 
837 	if (!sc->bge_rx_alignment_bug)
838 		m_adj(m_new, ETHER_ALIGN);
839 
840 	error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
841 	    sc->bge_cdata.bge_rx_jumbo_dmamap[i],
842 	    m_new, segs, &nsegs, BUS_DMA_NOWAIT);
843 	if (error) {
844 		if (m == NULL)
845 			m_freem(m_new);
846 		return(error);
847 	}
848 	KASSERT(nsegs == BGE_NSEG_JUMBO, ("%s: %d segments", __func__, nsegs));
849 
850 	sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
851 
852 	/*
853 	 * Fill in the extended RX buffer descriptor.
854 	 */
855 	r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
856 	r->bge_addr0.bge_addr_lo = htole32(BGE_ADDR_LO(segs[0].ds_addr));
857 	r->bge_addr0.bge_addr_hi = htole32(BGE_ADDR_HI(segs[0].ds_addr));
858 	r->bge_len0 = htole16(segs[0].ds_len);
859 	r->bge_addr1.bge_addr_lo = htole32(BGE_ADDR_LO(segs[1].ds_addr));
860 	r->bge_addr1.bge_addr_hi = htole32(BGE_ADDR_HI(segs[1].ds_addr));
861 	r->bge_len1 = htole16(segs[1].ds_len);
862 	r->bge_addr2.bge_addr_lo = htole32(BGE_ADDR_LO(segs[2].ds_addr));
863 	r->bge_addr2.bge_addr_hi = htole32(BGE_ADDR_HI(segs[2].ds_addr));
864 	r->bge_len2 = htole16(segs[2].ds_len);
865 	r->bge_len3 = htole16(0);
866 	r->bge_flags = htole16(BGE_RXBDFLAG_JUMBO_RING|BGE_RXBDFLAG_END);
867 	r->bge_idx = htole16(i);
868 
869 	bus_dmamap_sync(sc->bge_cdata.bge_mtag,
870 	    sc->bge_cdata.bge_rx_jumbo_dmamap[i],
871 	    BUS_DMASYNC_PREREAD);
872 
873 	return (0);
874 }
875 
876 /*
877  * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
878  * that's 1MB or memory, which is a lot. For now, we fill only the first
879  * 256 ring entries and hope that our CPU is fast enough to keep up with
880  * the NIC.
881  */
882 static int
883 bge_init_rx_ring_std(sc)
884 	struct bge_softc *sc;
885 {
886 	int i;
887 
888 	for (i = 0; i < BGE_SSLOTS; i++) {
889 		if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
890 			return(ENOBUFS);
891 	};
892 
893 	bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
894 	    sc->bge_cdata.bge_rx_std_ring_map,
895 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
896 
897 	sc->bge_std = i - 1;
898 	CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
899 
900 	return(0);
901 }
902 
903 static void
904 bge_free_rx_ring_std(sc)
905 	struct bge_softc *sc;
906 {
907 	int i;
908 
909 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
910 		if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
911 			m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
912 			sc->bge_cdata.bge_rx_std_chain[i] = NULL;
913 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
914 			    sc->bge_cdata.bge_rx_std_dmamap[i]);
915 		}
916 		bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
917 		    sizeof(struct bge_rx_bd));
918 	}
919 
920 	return;
921 }
922 
923 static int
924 bge_init_rx_ring_jumbo(sc)
925 	struct bge_softc *sc;
926 {
927 	struct bge_rcb *rcb;
928 	int i;
929 
930 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
931 		if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
932 			return(ENOBUFS);
933 	};
934 
935 	bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
936 	    sc->bge_cdata.bge_rx_jumbo_ring_map,
937 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
938 
939 	sc->bge_jumbo = i - 1;
940 
941 	rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
942 	rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
943 				    BGE_RCB_FLAG_USE_EXT_RX_BD);
944 	CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
945 
946 	CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
947 
948 	return(0);
949 }
950 
951 static void
952 bge_free_rx_ring_jumbo(sc)
953 	struct bge_softc *sc;
954 {
955 	int i;
956 
957 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
958 		if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
959 			m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
960 			sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
961 			bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
962 			    sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
963 		}
964 		bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
965 		    sizeof(struct bge_extrx_bd));
966 	}
967 
968 	return;
969 }
970 
971 static void
972 bge_free_tx_ring(sc)
973 	struct bge_softc *sc;
974 {
975 	int i;
976 
977 	if (sc->bge_ldata.bge_tx_ring == NULL)
978 		return;
979 
980 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
981 		if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
982 			m_freem(sc->bge_cdata.bge_tx_chain[i]);
983 			sc->bge_cdata.bge_tx_chain[i] = NULL;
984 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
985 			    sc->bge_cdata.bge_tx_dmamap[i]);
986 		}
987 		bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
988 		    sizeof(struct bge_tx_bd));
989 	}
990 
991 	return;
992 }
993 
994 static int
995 bge_init_tx_ring(sc)
996 	struct bge_softc *sc;
997 {
998 	sc->bge_txcnt = 0;
999 	sc->bge_tx_saved_considx = 0;
1000 
1001 	CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
1002 	/* 5700 b2 errata */
1003 	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1004 		CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
1005 
1006 	CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1007 	/* 5700 b2 errata */
1008 	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1009 		CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1010 
1011 	return(0);
1012 }
1013 
1014 static void
1015 bge_setmulti(sc)
1016 	struct bge_softc *sc;
1017 {
1018 	struct ifnet *ifp;
1019 	struct ifmultiaddr *ifma;
1020 	u_int32_t hashes[4] = { 0, 0, 0, 0 };
1021 	int h, i;
1022 
1023 	BGE_LOCK_ASSERT(sc);
1024 
1025 	ifp = sc->bge_ifp;
1026 
1027 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1028 		for (i = 0; i < 4; i++)
1029 			CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1030 		return;
1031 	}
1032 
1033 	/* First, zot all the existing filters. */
1034 	for (i = 0; i < 4; i++)
1035 		CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1036 
1037 	/* Now program new ones. */
1038 	IF_ADDR_LOCK(ifp);
1039 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1040 		if (ifma->ifma_addr->sa_family != AF_LINK)
1041 			continue;
1042 		h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1043 		    ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
1044 		hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1045 	}
1046 	IF_ADDR_UNLOCK(ifp);
1047 
1048 	for (i = 0; i < 4; i++)
1049 		CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1050 
1051 	return;
1052 }
1053 
1054 /*
1055  * Do endian, PCI and DMA initialization. Also check the on-board ROM
1056  * self-test results.
1057  */
1058 static int
1059 bge_chipinit(sc)
1060 	struct bge_softc *sc;
1061 {
1062 	int			i;
1063 	u_int32_t		dma_rw_ctl;
1064 
1065 	/* Set endianness before we access any non-PCI registers. */
1066 #if BYTE_ORDER == BIG_ENDIAN
1067 	pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
1068 	    BGE_BIGENDIAN_INIT, 4);
1069 #else
1070 	pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
1071 	    BGE_LITTLEENDIAN_INIT, 4);
1072 #endif
1073 
1074 	/*
1075 	 * Check the 'ROM failed' bit on the RX CPU to see if
1076 	 * self-tests passed.
1077 	 */
1078 	if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
1079 		printf("bge%d: RX CPU self-diagnostics failed!\n",
1080 		    sc->bge_unit);
1081 		return(ENODEV);
1082 	}
1083 
1084 	/* Clear the MAC control register */
1085 	CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1086 
1087 	/*
1088 	 * Clear the MAC statistics block in the NIC's
1089 	 * internal memory.
1090 	 */
1091 	for (i = BGE_STATS_BLOCK;
1092 	    i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t))
1093 		BGE_MEMWIN_WRITE(sc, i, 0);
1094 
1095 	for (i = BGE_STATUS_BLOCK;
1096 	    i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t))
1097 		BGE_MEMWIN_WRITE(sc, i, 0);
1098 
1099 	/* Set up the PCI DMA control register. */
1100 	if (sc->bge_pcie) {
1101 		dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1102 		    (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1103 		    (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1104 	} else if (pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
1105 	    BGE_PCISTATE_PCI_BUSMODE) {
1106 		/* Conventional PCI bus */
1107 		dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1108 		    (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1109 		    (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1110 		    (0x0F);
1111 	} else {
1112 		/* PCI-X bus */
1113 		/*
1114 		 * The 5704 uses a different encoding of read/write
1115 		 * watermarks.
1116 		 */
1117 		if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1118 			dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1119 			    (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1120 			    (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1121 		else
1122 			dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1123 			    (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1124 			    (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1125 			    (0x0F);
1126 
1127 		/*
1128 		 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1129 		 * for hardware bugs.
1130 		 */
1131 		if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1132 		    sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1133 			u_int32_t tmp;
1134 
1135 			tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1136 			if (tmp == 0x6 || tmp == 0x7)
1137 				dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1138 		}
1139 	}
1140 
1141 	if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1142 	    sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
1143 	    sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1144 	    sc->bge_asicrev == BGE_ASICREV_BCM5750)
1145 		dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1146 	pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1147 
1148 	/*
1149 	 * Set up general mode register.
1150 	 */
1151 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_WORDSWAP_NONFRAME|
1152 	    BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
1153 	    BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1154 	    BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM);
1155 
1156 	/*
1157 	 * Disable memory write invalidate.  Apparently it is not supported
1158 	 * properly by these devices.
1159 	 */
1160 	PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1161 
1162 #ifdef __brokenalpha__
1163 	/*
1164 	 * Must insure that we do not cross an 8K (bytes) boundary
1165 	 * for DMA reads.  Our highest limit is 1K bytes.  This is a
1166 	 * restriction on some ALPHA platforms with early revision
1167 	 * 21174 PCI chipsets, such as the AlphaPC 164lx
1168 	 */
1169 	PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL,
1170 	    BGE_PCI_READ_BNDRY_1024BYTES, 4);
1171 #endif
1172 
1173 	/* Set the timer prescaler (always 66Mhz) */
1174 	CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1175 
1176 	return(0);
1177 }
1178 
1179 static int
1180 bge_blockinit(sc)
1181 	struct bge_softc *sc;
1182 {
1183 	struct bge_rcb *rcb;
1184 	volatile struct bge_rcb *vrcb;
1185 	int i;
1186 
1187 	/*
1188 	 * Initialize the memory window pointer register so that
1189 	 * we can access the first 32K of internal NIC RAM. This will
1190 	 * allow us to set up the TX send ring RCBs and the RX return
1191 	 * ring RCBs, plus other things which live in NIC memory.
1192 	 */
1193 	CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1194 
1195 	/* Note: the BCM5704 has a smaller mbuf space than other chips. */
1196 
1197 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1198 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1199 		/* Configure mbuf memory pool */
1200 		if (sc->bge_extram) {
1201 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1202 			    BGE_EXT_SSRAM);
1203 			if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1204 				CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1205 			else
1206 				CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1207 		} else {
1208 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1209 			    BGE_BUFFPOOL_1);
1210 			if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1211 				CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1212 			else
1213 				CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1214 		}
1215 
1216 		/* Configure DMA resource pool */
1217 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1218 		    BGE_DMA_DESCRIPTORS);
1219 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1220 	}
1221 
1222 	/* Configure mbuf pool watermarks */
1223 	if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1224 	    sc->bge_asicrev == BGE_ASICREV_BCM5750) {
1225 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1226 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1227 	} else {
1228 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1229 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1230 	}
1231 	CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1232 
1233 	/* Configure DMA resource watermarks */
1234 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1235 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1236 
1237 	/* Enable buffer manager */
1238 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1239 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1240 		CSR_WRITE_4(sc, BGE_BMAN_MODE,
1241 		    BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1242 
1243 		/* Poll for buffer manager start indication */
1244 		for (i = 0; i < BGE_TIMEOUT; i++) {
1245 			if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1246 				break;
1247 			DELAY(10);
1248 		}
1249 
1250 		if (i == BGE_TIMEOUT) {
1251 			printf("bge%d: buffer manager failed to start\n",
1252 			    sc->bge_unit);
1253 			return(ENXIO);
1254 		}
1255 	}
1256 
1257 	/* Enable flow-through queues */
1258 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1259 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1260 
1261 	/* Wait until queue initialization is complete */
1262 	for (i = 0; i < BGE_TIMEOUT; i++) {
1263 		if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1264 			break;
1265 		DELAY(10);
1266 	}
1267 
1268 	if (i == BGE_TIMEOUT) {
1269 		printf("bge%d: flow-through queue init failed\n",
1270 		    sc->bge_unit);
1271 		return(ENXIO);
1272 	}
1273 
1274 	/* Initialize the standard RX ring control block */
1275 	rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1276 	rcb->bge_hostaddr.bge_addr_lo =
1277 	    BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1278 	rcb->bge_hostaddr.bge_addr_hi =
1279 	    BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1280 	bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1281 	    sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1282 	if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1283 	    sc->bge_asicrev == BGE_ASICREV_BCM5750)
1284 		rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1285 	else
1286 		rcb->bge_maxlen_flags =
1287 		    BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1288 	if (sc->bge_extram)
1289 		rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
1290 	else
1291 		rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1292 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1293 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1294 
1295 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1296 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1297 
1298 	/*
1299 	 * Initialize the jumbo RX ring control block
1300 	 * We set the 'ring disabled' bit in the flags
1301 	 * field until we're actually ready to start
1302 	 * using this ring (i.e. once we set the MTU
1303 	 * high enough to require it).
1304 	 */
1305 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1306 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1307 		rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1308 
1309 		rcb->bge_hostaddr.bge_addr_lo =
1310 		    BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1311 		rcb->bge_hostaddr.bge_addr_hi =
1312 		    BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1313 		bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1314 		    sc->bge_cdata.bge_rx_jumbo_ring_map,
1315 		    BUS_DMASYNC_PREREAD);
1316 		rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1317 		    BGE_RCB_FLAG_USE_EXT_RX_BD|BGE_RCB_FLAG_RING_DISABLED);
1318 		if (sc->bge_extram)
1319 			rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
1320 		else
1321 			rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1322 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1323 		    rcb->bge_hostaddr.bge_addr_hi);
1324 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1325 		    rcb->bge_hostaddr.bge_addr_lo);
1326 
1327 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1328 		    rcb->bge_maxlen_flags);
1329 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1330 
1331 		/* Set up dummy disabled mini ring RCB */
1332 		rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1333 		rcb->bge_maxlen_flags =
1334 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1335 		CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1336 		    rcb->bge_maxlen_flags);
1337 	}
1338 
1339 	/*
1340 	 * Set the BD ring replentish thresholds. The recommended
1341 	 * values are 1/8th the number of descriptors allocated to
1342 	 * each ring.
1343 	 */
1344 	CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
1345 	CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1346 
1347 	/*
1348 	 * Disable all unused send rings by setting the 'ring disabled'
1349 	 * bit in the flags field of all the TX send ring control blocks.
1350 	 * These are located in NIC memory.
1351 	 */
1352 	vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1353 	    BGE_SEND_RING_RCB);
1354 	for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1355 		vrcb->bge_maxlen_flags =
1356 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1357 		vrcb->bge_nicaddr = 0;
1358 		vrcb++;
1359 	}
1360 
1361 	/* Configure TX RCB 0 (we use only the first ring) */
1362 	vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1363 	    BGE_SEND_RING_RCB);
1364 	vrcb->bge_hostaddr.bge_addr_lo =
1365 	    htole32(BGE_ADDR_LO(sc->bge_ldata.bge_tx_ring_paddr));
1366 	vrcb->bge_hostaddr.bge_addr_hi =
1367 	    htole32(BGE_ADDR_HI(sc->bge_ldata.bge_tx_ring_paddr));
1368 	vrcb->bge_nicaddr = BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT);
1369 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1370 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
1371 		vrcb->bge_maxlen_flags =
1372 		    BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0);
1373 
1374 	/* Disable all unused RX return rings */
1375 	vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1376 	    BGE_RX_RETURN_RING_RCB);
1377 	for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1378 		vrcb->bge_hostaddr.bge_addr_hi = 0;
1379 		vrcb->bge_hostaddr.bge_addr_lo = 0;
1380 		vrcb->bge_maxlen_flags =
1381 		    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1382 		    BGE_RCB_FLAG_RING_DISABLED);
1383 		vrcb->bge_nicaddr = 0;
1384 		CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
1385 		    (i * (sizeof(u_int64_t))), 0);
1386 		vrcb++;
1387 	}
1388 
1389 	/* Initialize RX ring indexes */
1390 	CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1391 	CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1392 	CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1393 
1394 	/*
1395 	 * Set up RX return ring 0
1396 	 * Note that the NIC address for RX return rings is 0x00000000.
1397 	 * The return rings live entirely within the host, so the
1398 	 * nicaddr field in the RCB isn't used.
1399 	 */
1400 	vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1401 	    BGE_RX_RETURN_RING_RCB);
1402 	vrcb->bge_hostaddr.bge_addr_lo =
1403 	    BGE_ADDR_LO(sc->bge_ldata.bge_rx_return_ring_paddr);
1404 	vrcb->bge_hostaddr.bge_addr_hi =
1405 	    BGE_ADDR_HI(sc->bge_ldata.bge_rx_return_ring_paddr);
1406 	bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
1407 	    sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREWRITE);
1408 	vrcb->bge_nicaddr = 0x00000000;
1409 	vrcb->bge_maxlen_flags =
1410 	    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0);
1411 
1412 	/* Set random backoff seed for TX */
1413 	CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1414 	    IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
1415 	    IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
1416 	    IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] +
1417 	    BGE_TX_BACKOFF_SEED_MASK);
1418 
1419 	/* Set inter-packet gap */
1420 	CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1421 
1422 	/*
1423 	 * Specify which ring to use for packets that don't match
1424 	 * any RX rules.
1425 	 */
1426 	CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1427 
1428 	/*
1429 	 * Configure number of RX lists. One interrupt distribution
1430 	 * list, sixteen active lists, one bad frames class.
1431 	 */
1432 	CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1433 
1434 	/* Inialize RX list placement stats mask. */
1435 	CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1436 	CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1437 
1438 	/* Disable host coalescing until we get it set up */
1439 	CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1440 
1441 	/* Poll to make sure it's shut down. */
1442 	for (i = 0; i < BGE_TIMEOUT; i++) {
1443 		if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1444 			break;
1445 		DELAY(10);
1446 	}
1447 
1448 	if (i == BGE_TIMEOUT) {
1449 		printf("bge%d: host coalescing engine failed to idle\n",
1450 		    sc->bge_unit);
1451 		return(ENXIO);
1452 	}
1453 
1454 	/* Set up host coalescing defaults */
1455 	CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1456 	CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1457 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1458 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1459 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1460 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1461 		CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1462 		CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1463 	}
1464 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1465 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1466 
1467 	/* Set up address of statistics block */
1468 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1469 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1470 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1471 		    BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1472 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1473 		    BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1474 		CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1475 		CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1476 		CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1477 	}
1478 
1479 	/* Set up address of status block */
1480 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1481 	    BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1482 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1483 	    BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1484 	bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
1485 	    sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREWRITE);
1486 	sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0;
1487 	sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0;
1488 
1489 	/* Turn on host coalescing state machine */
1490 	CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1491 
1492 	/* Turn on RX BD completion state machine and enable attentions */
1493 	CSR_WRITE_4(sc, BGE_RBDC_MODE,
1494 	    BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1495 
1496 	/* Turn on RX list placement state machine */
1497 	CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1498 
1499 	/* Turn on RX list selector state machine. */
1500 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1501 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
1502 		CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1503 
1504 	/* Turn on DMA, clear stats */
1505 	CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1506 	    BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1507 	    BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1508 	    BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1509 	    (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1510 
1511 	/* Set misc. local control, enable interrupts on attentions */
1512 	CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1513 
1514 #ifdef notdef
1515 	/* Assert GPIO pins for PHY reset */
1516 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1517 	    BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1518 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1519 	    BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1520 #endif
1521 
1522 	/* Turn on DMA completion state machine */
1523 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1524 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
1525 		CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1526 
1527 	/* Turn on write DMA state machine */
1528 	CSR_WRITE_4(sc, BGE_WDMA_MODE,
1529 	    BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
1530 
1531 	/* Turn on read DMA state machine */
1532 	CSR_WRITE_4(sc, BGE_RDMA_MODE,
1533 	    BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
1534 
1535 	/* Turn on RX data completion state machine */
1536 	CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1537 
1538 	/* Turn on RX BD initiator state machine */
1539 	CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1540 
1541 	/* Turn on RX data and RX BD initiator state machine */
1542 	CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1543 
1544 	/* Turn on Mbuf cluster free state machine */
1545 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1546 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
1547 		CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1548 
1549 	/* Turn on send BD completion state machine */
1550 	CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1551 
1552 	/* Turn on send data completion state machine */
1553 	CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1554 
1555 	/* Turn on send data initiator state machine */
1556 	CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1557 
1558 	/* Turn on send BD initiator state machine */
1559 	CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1560 
1561 	/* Turn on send BD selector state machine */
1562 	CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1563 
1564 	CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1565 	CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1566 	    BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1567 
1568 	/* ack/clear link change events */
1569 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1570 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1571 	    BGE_MACSTAT_LINK_CHANGED);
1572 	CSR_WRITE_4(sc, BGE_MI_STS, 0);
1573 
1574 	/* Enable PHY auto polling (for MII/GMII only) */
1575 	if (sc->bge_tbi) {
1576 		CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1577 	} else {
1578 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1579 		if (sc->bge_asicrev == BGE_ASICREV_BCM5700)
1580 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1581 			    BGE_EVTENB_MI_INTERRUPT);
1582 	}
1583 
1584 	/* Enable link state change attentions. */
1585 	BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1586 
1587 	return(0);
1588 }
1589 
1590 /*
1591  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1592  * against our list and return its name if we find a match. Note
1593  * that since the Broadcom controller contains VPD support, we
1594  * can get the device name string from the controller itself instead
1595  * of the compiled-in string. This is a little slow, but it guarantees
1596  * we'll always announce the right product name.
1597  */
1598 static int
1599 bge_probe(dev)
1600 	device_t dev;
1601 {
1602 	struct bge_type *t;
1603 	struct bge_softc *sc;
1604 	char *descbuf;
1605 
1606 	t = bge_devs;
1607 
1608 	sc = device_get_softc(dev);
1609 	bzero(sc, sizeof(struct bge_softc));
1610 	sc->bge_unit = device_get_unit(dev);
1611 	sc->bge_dev = dev;
1612 
1613 	while(t->bge_name != NULL) {
1614 		if ((pci_get_vendor(dev) == t->bge_vid) &&
1615 		    (pci_get_device(dev) == t->bge_did)) {
1616 #ifdef notdef
1617 			bge_vpd_read(sc);
1618 			device_set_desc(dev, sc->bge_vpd_prodname);
1619 #endif
1620 			descbuf = malloc(BGE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
1621 			if (descbuf == NULL)
1622 				return(ENOMEM);
1623 			snprintf(descbuf, BGE_DEVDESC_MAX,
1624 			    "%s, ASIC rev. %#04x", t->bge_name,
1625 			    pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 16);
1626 			device_set_desc_copy(dev, descbuf);
1627 			if (pci_get_subvendor(dev) == DELL_VENDORID)
1628 				sc->bge_no_3_led = 1;
1629 			free(descbuf, M_TEMP);
1630 			return(0);
1631 		}
1632 		t++;
1633 	}
1634 
1635 	return(ENXIO);
1636 }
1637 
1638 static void
1639 bge_dma_free(sc)
1640 	struct bge_softc *sc;
1641 {
1642 	int i;
1643 
1644 
1645 	/* Destroy DMA maps for RX buffers */
1646 
1647 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1648 		if (sc->bge_cdata.bge_rx_std_dmamap[i])
1649 			bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1650 			    sc->bge_cdata.bge_rx_std_dmamap[i]);
1651 	}
1652 
1653 	/* Destroy DMA maps for jumbo RX buffers */
1654 
1655 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1656 		if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
1657 			bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
1658 			    sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1659 	}
1660 
1661 	/* Destroy DMA maps for TX buffers */
1662 
1663 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
1664 		if (sc->bge_cdata.bge_tx_dmamap[i])
1665 			bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1666 			    sc->bge_cdata.bge_tx_dmamap[i]);
1667 	}
1668 
1669 	if (sc->bge_cdata.bge_mtag)
1670 		bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
1671 
1672 
1673 	/* Destroy standard RX ring */
1674 
1675 	if (sc->bge_ldata.bge_rx_std_ring)
1676 		bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
1677 		    sc->bge_ldata.bge_rx_std_ring,
1678 		    sc->bge_cdata.bge_rx_std_ring_map);
1679 
1680 	if (sc->bge_cdata.bge_rx_std_ring_map) {
1681 		bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
1682 		    sc->bge_cdata.bge_rx_std_ring_map);
1683 		bus_dmamap_destroy(sc->bge_cdata.bge_rx_std_ring_tag,
1684 		    sc->bge_cdata.bge_rx_std_ring_map);
1685 	}
1686 
1687 	if (sc->bge_cdata.bge_rx_std_ring_tag)
1688 		bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
1689 
1690 	/* Destroy jumbo RX ring */
1691 
1692 	if (sc->bge_ldata.bge_rx_jumbo_ring)
1693 		bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1694 		    sc->bge_ldata.bge_rx_jumbo_ring,
1695 		    sc->bge_cdata.bge_rx_jumbo_ring_map);
1696 
1697 	if (sc->bge_cdata.bge_rx_jumbo_ring_map) {
1698 		bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1699 		    sc->bge_cdata.bge_rx_jumbo_ring_map);
1700 		bus_dmamap_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1701 		    sc->bge_cdata.bge_rx_jumbo_ring_map);
1702 	}
1703 
1704 	if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
1705 		bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
1706 
1707 	/* Destroy RX return ring */
1708 
1709 	if (sc->bge_ldata.bge_rx_return_ring)
1710 		bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
1711 		    sc->bge_ldata.bge_rx_return_ring,
1712 		    sc->bge_cdata.bge_rx_return_ring_map);
1713 
1714 	if (sc->bge_cdata.bge_rx_return_ring_map) {
1715 		bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
1716 		    sc->bge_cdata.bge_rx_return_ring_map);
1717 		bus_dmamap_destroy(sc->bge_cdata.bge_rx_return_ring_tag,
1718 		    sc->bge_cdata.bge_rx_return_ring_map);
1719 	}
1720 
1721 	if (sc->bge_cdata.bge_rx_return_ring_tag)
1722 		bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
1723 
1724 	/* Destroy TX ring */
1725 
1726 	if (sc->bge_ldata.bge_tx_ring)
1727 		bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
1728 		    sc->bge_ldata.bge_tx_ring,
1729 		    sc->bge_cdata.bge_tx_ring_map);
1730 
1731 	if (sc->bge_cdata.bge_tx_ring_map) {
1732 		bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
1733 		    sc->bge_cdata.bge_tx_ring_map);
1734 		bus_dmamap_destroy(sc->bge_cdata.bge_tx_ring_tag,
1735 		    sc->bge_cdata.bge_tx_ring_map);
1736 	}
1737 
1738 	if (sc->bge_cdata.bge_tx_ring_tag)
1739 		bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
1740 
1741 	/* Destroy status block */
1742 
1743 	if (sc->bge_ldata.bge_status_block)
1744 		bus_dmamem_free(sc->bge_cdata.bge_status_tag,
1745 		    sc->bge_ldata.bge_status_block,
1746 		    sc->bge_cdata.bge_status_map);
1747 
1748 	if (sc->bge_cdata.bge_status_map) {
1749 		bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
1750 		    sc->bge_cdata.bge_status_map);
1751 		bus_dmamap_destroy(sc->bge_cdata.bge_status_tag,
1752 		    sc->bge_cdata.bge_status_map);
1753 	}
1754 
1755 	if (sc->bge_cdata.bge_status_tag)
1756 		bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
1757 
1758 	/* Destroy statistics block */
1759 
1760 	if (sc->bge_ldata.bge_stats)
1761 		bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
1762 		    sc->bge_ldata.bge_stats,
1763 		    sc->bge_cdata.bge_stats_map);
1764 
1765 	if (sc->bge_cdata.bge_stats_map) {
1766 		bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
1767 		    sc->bge_cdata.bge_stats_map);
1768 		bus_dmamap_destroy(sc->bge_cdata.bge_stats_tag,
1769 		    sc->bge_cdata.bge_stats_map);
1770 	}
1771 
1772 	if (sc->bge_cdata.bge_stats_tag)
1773 		bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
1774 
1775 	/* Destroy the parent tag */
1776 
1777 	if (sc->bge_cdata.bge_parent_tag)
1778 		bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
1779 
1780 	return;
1781 }
1782 
1783 static int
1784 bge_dma_alloc(dev)
1785 	device_t dev;
1786 {
1787 	struct bge_softc *sc;
1788 	int i, error;
1789 	struct bge_dmamap_arg ctx;
1790 
1791 	sc = device_get_softc(dev);
1792 
1793 	/*
1794 	 * Allocate the parent bus DMA tag appropriate for PCI.
1795 	 */
1796 	error = bus_dma_tag_create(NULL,	/* parent */
1797 			PAGE_SIZE, 0,		/* alignment, boundary */
1798 			BUS_SPACE_MAXADDR,	/* lowaddr */
1799 			BUS_SPACE_MAXADDR,	/* highaddr */
1800 			NULL, NULL,		/* filter, filterarg */
1801 			MAXBSIZE, BGE_NSEG_NEW,	/* maxsize, nsegments */
1802 			BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
1803 			0,			/* flags */
1804 			NULL, NULL,		/* lockfunc, lockarg */
1805 			&sc->bge_cdata.bge_parent_tag);
1806 
1807 	/*
1808 	 * Create tag for RX mbufs.
1809 	 */
1810 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1,
1811 	    0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1812 	    NULL, MCLBYTES * BGE_NSEG_NEW, BGE_NSEG_NEW, MCLBYTES,
1813 	    BUS_DMA_ALLOCNOW, NULL, NULL, &sc->bge_cdata.bge_mtag);
1814 
1815 	if (error) {
1816 		device_printf(dev, "could not allocate dma tag\n");
1817 		return (ENOMEM);
1818 	}
1819 
1820 	/* Create DMA maps for RX buffers */
1821 
1822 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1823 		error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1824 			    &sc->bge_cdata.bge_rx_std_dmamap[i]);
1825 		if (error) {
1826 			device_printf(dev, "can't create DMA map for RX\n");
1827 			return(ENOMEM);
1828 		}
1829 	}
1830 
1831 	/* Create DMA maps for TX buffers */
1832 
1833 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
1834 		error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1835 			    &sc->bge_cdata.bge_tx_dmamap[i]);
1836 		if (error) {
1837 			device_printf(dev, "can't create DMA map for RX\n");
1838 			return(ENOMEM);
1839 		}
1840 	}
1841 
1842 	/* Create tag for standard RX ring */
1843 
1844 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1845 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1846 	    NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0,
1847 	    NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag);
1848 
1849 	if (error) {
1850 		device_printf(dev, "could not allocate dma tag\n");
1851 		return (ENOMEM);
1852 	}
1853 
1854 	/* Allocate DMA'able memory for standard RX ring */
1855 
1856 	error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag,
1857 	    (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT,
1858 	    &sc->bge_cdata.bge_rx_std_ring_map);
1859 	if (error)
1860 		return (ENOMEM);
1861 
1862 	bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1863 
1864 	/* Load the address of the standard RX ring */
1865 
1866 	ctx.bge_maxsegs = 1;
1867 	ctx.sc = sc;
1868 
1869 	error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag,
1870 	    sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring,
1871 	    BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1872 
1873 	if (error)
1874 		return (ENOMEM);
1875 
1876 	sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr;
1877 
1878 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1879 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1880 
1881 		/*
1882 		 * Create tag for jumbo mbufs.
1883 		 * This is really a bit of a kludge. We allocate a special
1884 		 * jumbo buffer pool which (thanks to the way our DMA
1885 		 * memory allocation works) will consist of contiguous
1886 		 * pages. This means that even though a jumbo buffer might
1887 		 * be larger than a page size, we don't really need to
1888 		 * map it into more than one DMA segment. However, the
1889 		 * default mbuf tag will result in multi-segment mappings,
1890 		 * so we have to create a special jumbo mbuf tag that
1891 		 * lets us get away with mapping the jumbo buffers as
1892 		 * a single segment. I think eventually the driver should
1893 		 * be changed so that it uses ordinary mbufs and cluster
1894 		 * buffers, i.e. jumbo frames can span multiple DMA
1895 		 * descriptors. But that's a project for another day.
1896 		 */
1897 
1898 		error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1899 		    1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1900 		    NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
1901 		    0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
1902 
1903 		if (error) {
1904 			device_printf(dev, "could not allocate dma tag\n");
1905 			return (ENOMEM);
1906 		}
1907 
1908 		/* Create tag for jumbo RX ring */
1909 		error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1910 		    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1911 		    NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0,
1912 		    NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag);
1913 
1914 		if (error) {
1915 			device_printf(dev, "could not allocate dma tag\n");
1916 			return (ENOMEM);
1917 		}
1918 
1919 		/* Allocate DMA'able memory for jumbo RX ring */
1920 		error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1921 		    (void **)&sc->bge_ldata.bge_rx_jumbo_ring,
1922 		    BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1923 		    &sc->bge_cdata.bge_rx_jumbo_ring_map);
1924 		if (error)
1925 			return (ENOMEM);
1926 
1927 		/* Load the address of the jumbo RX ring */
1928 		ctx.bge_maxsegs = 1;
1929 		ctx.sc = sc;
1930 
1931 		error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1932 		    sc->bge_cdata.bge_rx_jumbo_ring_map,
1933 		    sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ,
1934 		    bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1935 
1936 		if (error)
1937 			return (ENOMEM);
1938 
1939 		sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr;
1940 
1941 		/* Create DMA maps for jumbo RX buffers */
1942 
1943 		for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1944 			error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
1945 				    0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1946 			if (error) {
1947 				device_printf(dev,
1948 				    "can't create DMA map for RX\n");
1949 				return(ENOMEM);
1950 			}
1951 		}
1952 
1953 	}
1954 
1955 	/* Create tag for RX return ring */
1956 
1957 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1958 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1959 	    NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0,
1960 	    NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag);
1961 
1962 	if (error) {
1963 		device_printf(dev, "could not allocate dma tag\n");
1964 		return (ENOMEM);
1965 	}
1966 
1967 	/* Allocate DMA'able memory for RX return ring */
1968 
1969 	error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag,
1970 	    (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT,
1971 	    &sc->bge_cdata.bge_rx_return_ring_map);
1972 	if (error)
1973 		return (ENOMEM);
1974 
1975 	bzero((char *)sc->bge_ldata.bge_rx_return_ring,
1976 	    BGE_RX_RTN_RING_SZ(sc));
1977 
1978 	/* Load the address of the RX return ring */
1979 
1980 	ctx.bge_maxsegs = 1;
1981 	ctx.sc = sc;
1982 
1983 	error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag,
1984 	    sc->bge_cdata.bge_rx_return_ring_map,
1985 	    sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc),
1986 	    bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1987 
1988 	if (error)
1989 		return (ENOMEM);
1990 
1991 	sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr;
1992 
1993 	/* Create tag for TX ring */
1994 
1995 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1996 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1997 	    NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL,
1998 	    &sc->bge_cdata.bge_tx_ring_tag);
1999 
2000 	if (error) {
2001 		device_printf(dev, "could not allocate dma tag\n");
2002 		return (ENOMEM);
2003 	}
2004 
2005 	/* Allocate DMA'able memory for TX ring */
2006 
2007 	error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag,
2008 	    (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT,
2009 	    &sc->bge_cdata.bge_tx_ring_map);
2010 	if (error)
2011 		return (ENOMEM);
2012 
2013 	bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
2014 
2015 	/* Load the address of the TX ring */
2016 
2017 	ctx.bge_maxsegs = 1;
2018 	ctx.sc = sc;
2019 
2020 	error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag,
2021 	    sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring,
2022 	    BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2023 
2024 	if (error)
2025 		return (ENOMEM);
2026 
2027 	sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr;
2028 
2029 	/* Create tag for status block */
2030 
2031 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2032 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2033 	    NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0,
2034 	    NULL, NULL, &sc->bge_cdata.bge_status_tag);
2035 
2036 	if (error) {
2037 		device_printf(dev, "could not allocate dma tag\n");
2038 		return (ENOMEM);
2039 	}
2040 
2041 	/* Allocate DMA'able memory for status block */
2042 
2043 	error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag,
2044 	    (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT,
2045 	    &sc->bge_cdata.bge_status_map);
2046 	if (error)
2047 		return (ENOMEM);
2048 
2049 	bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
2050 
2051 	/* Load the address of the status block */
2052 
2053 	ctx.sc = sc;
2054 	ctx.bge_maxsegs = 1;
2055 
2056 	error = bus_dmamap_load(sc->bge_cdata.bge_status_tag,
2057 	    sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block,
2058 	    BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2059 
2060 	if (error)
2061 		return (ENOMEM);
2062 
2063 	sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr;
2064 
2065 	/* Create tag for statistics block */
2066 
2067 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2068 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2069 	    NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL,
2070 	    &sc->bge_cdata.bge_stats_tag);
2071 
2072 	if (error) {
2073 		device_printf(dev, "could not allocate dma tag\n");
2074 		return (ENOMEM);
2075 	}
2076 
2077 	/* Allocate DMA'able memory for statistics block */
2078 
2079 	error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag,
2080 	    (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT,
2081 	    &sc->bge_cdata.bge_stats_map);
2082 	if (error)
2083 		return (ENOMEM);
2084 
2085 	bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ);
2086 
2087 	/* Load the address of the statstics block */
2088 
2089 	ctx.sc = sc;
2090 	ctx.bge_maxsegs = 1;
2091 
2092 	error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag,
2093 	    sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats,
2094 	    BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2095 
2096 	if (error)
2097 		return (ENOMEM);
2098 
2099 	sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr;
2100 
2101 	return(0);
2102 }
2103 
2104 static int
2105 bge_attach(dev)
2106 	device_t dev;
2107 {
2108 	struct ifnet *ifp;
2109 	struct bge_softc *sc;
2110 	u_int32_t hwcfg = 0;
2111 	u_int32_t mac_tmp = 0;
2112 	u_char eaddr[6];
2113 	int unit, error = 0, rid;
2114 
2115 	sc = device_get_softc(dev);
2116 	unit = device_get_unit(dev);
2117 	sc->bge_dev = dev;
2118 	sc->bge_unit = unit;
2119 
2120 	/*
2121 	 * Map control/status registers.
2122 	 */
2123 	pci_enable_busmaster(dev);
2124 
2125 	rid = BGE_PCI_BAR0;
2126 	sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2127 	    RF_ACTIVE|PCI_RF_DENSE);
2128 
2129 	if (sc->bge_res == NULL) {
2130 		printf ("bge%d: couldn't map memory\n", unit);
2131 		error = ENXIO;
2132 		goto fail;
2133 	}
2134 
2135 	sc->bge_btag = rman_get_bustag(sc->bge_res);
2136 	sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
2137 	sc->bge_vhandle = (vm_offset_t)rman_get_virtual(sc->bge_res);
2138 
2139 	/* Allocate interrupt */
2140 	rid = 0;
2141 
2142 	sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2143 	    RF_SHAREABLE | RF_ACTIVE);
2144 
2145 	if (sc->bge_irq == NULL) {
2146 		printf("bge%d: couldn't map interrupt\n", unit);
2147 		error = ENXIO;
2148 		goto fail;
2149 	}
2150 
2151 	sc->bge_unit = unit;
2152 
2153 	BGE_LOCK_INIT(sc, device_get_nameunit(dev));
2154 
2155 	/* Save ASIC rev. */
2156 
2157 	sc->bge_chipid =
2158 	    pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
2159 	    BGE_PCIMISCCTL_ASICREV;
2160 	sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2161 	sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2162 
2163 	/*
2164 	 * Treat the 5714 and the 5752 like the 5750 until we have more info
2165 	 * on this chip.
2166 	 */
2167 	if (sc->bge_asicrev == BGE_ASICREV_BCM5714 ||
2168             sc->bge_asicrev == BGE_ASICREV_BCM5752)
2169 		sc->bge_asicrev = BGE_ASICREV_BCM5750;
2170 
2171 	/*
2172 	 * XXX: Broadcom Linux driver.  Not in specs or eratta.
2173 	 * PCI-Express?
2174 	 */
2175 	if (sc->bge_asicrev == BGE_ASICREV_BCM5750) {
2176 		u_int32_t v;
2177 
2178 		v = pci_read_config(dev, BGE_PCI_MSI_CAPID, 4);
2179 		if (((v >> 8) & 0xff) == BGE_PCIE_CAPID_REG) {
2180 			v = pci_read_config(dev, BGE_PCIE_CAPID_REG, 4);
2181 			if ((v & 0xff) == BGE_PCIE_CAPID)
2182 				sc->bge_pcie = 1;
2183 		}
2184 	}
2185 
2186 	/* Try to reset the chip. */
2187 	bge_reset(sc);
2188 
2189 	if (bge_chipinit(sc)) {
2190 		printf("bge%d: chip initialization failed\n", sc->bge_unit);
2191 		bge_release_resources(sc);
2192 		error = ENXIO;
2193 		goto fail;
2194 	}
2195 
2196 	/*
2197 	 * Get station address from the EEPROM.
2198 	 */
2199 	mac_tmp = bge_readmem_ind(sc, 0x0c14);
2200 	if ((mac_tmp >> 16) == 0x484b) {
2201 		eaddr[0] = (u_char)(mac_tmp >> 8);
2202 		eaddr[1] = (u_char)mac_tmp;
2203 		mac_tmp = bge_readmem_ind(sc, 0x0c18);
2204 		eaddr[2] = (u_char)(mac_tmp >> 24);
2205 		eaddr[3] = (u_char)(mac_tmp >> 16);
2206 		eaddr[4] = (u_char)(mac_tmp >> 8);
2207 		eaddr[5] = (u_char)mac_tmp;
2208 	} else if (bge_read_eeprom(sc, eaddr,
2209 	    BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
2210 		printf("bge%d: failed to read station address\n", unit);
2211 		bge_release_resources(sc);
2212 		error = ENXIO;
2213 		goto fail;
2214 	}
2215 
2216 	/* 5705 limits RX return ring to 512 entries. */
2217 	if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
2218 	    sc->bge_asicrev == BGE_ASICREV_BCM5750)
2219 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2220 	else
2221 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2222 
2223 	if (bge_dma_alloc(dev)) {
2224 		printf ("bge%d: failed to allocate DMA resources\n",
2225 		    sc->bge_unit);
2226 		bge_release_resources(sc);
2227 		error = ENXIO;
2228 		goto fail;
2229 	}
2230 
2231 	/* Set default tuneable values. */
2232 	sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2233 	sc->bge_rx_coal_ticks = 150;
2234 	sc->bge_tx_coal_ticks = 150;
2235 	sc->bge_rx_max_coal_bds = 64;
2236 	sc->bge_tx_max_coal_bds = 128;
2237 
2238 	/* Set up ifnet structure */
2239 	ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
2240 	if (ifp == NULL) {
2241 		printf("bge%d: failed to if_alloc()\n", sc->bge_unit);
2242 		bge_release_resources(sc);
2243 		error = ENXIO;
2244 		goto fail;
2245 	}
2246 	ifp->if_softc = sc;
2247 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2248 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2249 	ifp->if_ioctl = bge_ioctl;
2250 	ifp->if_start = bge_start;
2251 	ifp->if_watchdog = bge_watchdog;
2252 	ifp->if_init = bge_init;
2253 	ifp->if_mtu = ETHERMTU;
2254 	ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
2255 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
2256 	IFQ_SET_READY(&ifp->if_snd);
2257 	ifp->if_hwassist = BGE_CSUM_FEATURES;
2258 	/* NB: the code for RX csum offload is disabled for now */
2259 	ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_HWTAGGING |
2260 	    IFCAP_VLAN_MTU;
2261 	ifp->if_capenable = ifp->if_capabilities;
2262 #ifdef DEVICE_POLLING
2263 	ifp->if_capabilities |= IFCAP_POLLING;
2264 #endif
2265 
2266 	/*
2267 	 * Figure out what sort of media we have by checking the
2268 	 * hardware config word in the first 32k of NIC internal memory,
2269 	 * or fall back to examining the EEPROM if necessary.
2270 	 * Note: on some BCM5700 cards, this value appears to be unset.
2271 	 * If that's the case, we have to rely on identifying the NIC
2272 	 * by its PCI subsystem ID, as we do below for the SysKonnect
2273 	 * SK-9D41.
2274 	 */
2275 	if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
2276 		hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2277 	else {
2278 		bge_read_eeprom(sc, (caddr_t)&hwcfg,
2279 				BGE_EE_HWCFG_OFFSET, sizeof(hwcfg));
2280 		hwcfg = ntohl(hwcfg);
2281 	}
2282 
2283 	if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
2284 		sc->bge_tbi = 1;
2285 
2286 	/* The SysKonnect SK-9D41 is a 1000baseSX card. */
2287 	if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41)
2288 		sc->bge_tbi = 1;
2289 
2290 	if (sc->bge_tbi) {
2291 		ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
2292 		    bge_ifmedia_upd, bge_ifmedia_sts);
2293 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2294 		ifmedia_add(&sc->bge_ifmedia,
2295 		    IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
2296 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2297 		ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
2298 		sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
2299 	} else {
2300 		/*
2301 		 * Do transceiver setup.
2302 		 */
2303 		if (mii_phy_probe(dev, &sc->bge_miibus,
2304 		    bge_ifmedia_upd, bge_ifmedia_sts)) {
2305 			printf("bge%d: MII without any PHY!\n", sc->bge_unit);
2306 			bge_release_resources(sc);
2307 			error = ENXIO;
2308 			goto fail;
2309 		}
2310 	}
2311 
2312 	/*
2313 	 * When using the BCM5701 in PCI-X mode, data corruption has
2314 	 * been observed in the first few bytes of some received packets.
2315 	 * Aligning the packet buffer in memory eliminates the corruption.
2316 	 * Unfortunately, this misaligns the packet payloads.  On platforms
2317 	 * which do not support unaligned accesses, we will realign the
2318 	 * payloads by copying the received packets.
2319 	 */
2320 	switch (sc->bge_chipid) {
2321 	case BGE_CHIPID_BCM5701_A0:
2322 	case BGE_CHIPID_BCM5701_B0:
2323 	case BGE_CHIPID_BCM5701_B2:
2324 	case BGE_CHIPID_BCM5701_B5:
2325 		/* If in PCI-X mode, work around the alignment bug. */
2326 		if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
2327 		    (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) ==
2328 		    BGE_PCISTATE_PCI_BUSSPEED)
2329 			sc->bge_rx_alignment_bug = 1;
2330 		break;
2331 	}
2332 
2333 	/*
2334 	 * Call MI attach routine.
2335 	 */
2336 	ether_ifattach(ifp, eaddr);
2337 	callout_init(&sc->bge_stat_ch, CALLOUT_MPSAFE);
2338 
2339 	/*
2340 	 * Hookup IRQ last.
2341 	 */
2342 	error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
2343 	   bge_intr, sc, &sc->bge_intrhand);
2344 
2345 	if (error) {
2346 		bge_detach(dev);
2347 		printf("bge%d: couldn't set up irq\n", unit);
2348 	}
2349 
2350 fail:
2351 	return(error);
2352 }
2353 
2354 static int
2355 bge_detach(dev)
2356 	device_t dev;
2357 {
2358 	struct bge_softc *sc;
2359 	struct ifnet *ifp;
2360 
2361 	sc = device_get_softc(dev);
2362 	ifp = sc->bge_ifp;
2363 
2364 #ifdef DEVICE_POLLING
2365 	if (ifp->if_capenable & IFCAP_POLLING)
2366 		ether_poll_deregister(ifp);
2367 #endif
2368 
2369 	BGE_LOCK(sc);
2370 	bge_stop(sc);
2371 	bge_reset(sc);
2372 	BGE_UNLOCK(sc);
2373 
2374 	ether_ifdetach(ifp);
2375 
2376 	if (sc->bge_tbi) {
2377 		ifmedia_removeall(&sc->bge_ifmedia);
2378 	} else {
2379 		bus_generic_detach(dev);
2380 		device_delete_child(dev, sc->bge_miibus);
2381 	}
2382 
2383 	bge_release_resources(sc);
2384 
2385 	return(0);
2386 }
2387 
2388 static void
2389 bge_release_resources(sc)
2390 	struct bge_softc *sc;
2391 {
2392 	device_t dev;
2393 
2394 	dev = sc->bge_dev;
2395 
2396 	if (sc->bge_vpd_prodname != NULL)
2397 		free(sc->bge_vpd_prodname, M_DEVBUF);
2398 
2399 	if (sc->bge_vpd_readonly != NULL)
2400 		free(sc->bge_vpd_readonly, M_DEVBUF);
2401 
2402 	if (sc->bge_intrhand != NULL)
2403 		bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
2404 
2405 	if (sc->bge_irq != NULL)
2406 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
2407 
2408 	if (sc->bge_res != NULL)
2409 		bus_release_resource(dev, SYS_RES_MEMORY,
2410 		    BGE_PCI_BAR0, sc->bge_res);
2411 
2412 	if (sc->bge_ifp != NULL)
2413 		if_free(sc->bge_ifp);
2414 
2415 	bge_dma_free(sc);
2416 
2417 	if (mtx_initialized(&sc->bge_mtx))	/* XXX */
2418 		BGE_LOCK_DESTROY(sc);
2419 
2420 	return;
2421 }
2422 
2423 static void
2424 bge_reset(sc)
2425 	struct bge_softc *sc;
2426 {
2427 	device_t dev;
2428 	u_int32_t cachesize, command, pcistate, reset;
2429 	int i, val = 0;
2430 
2431 	dev = sc->bge_dev;
2432 
2433 	/* Save some important PCI state. */
2434 	cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2435 	command = pci_read_config(dev, BGE_PCI_CMD, 4);
2436 	pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2437 
2438 	pci_write_config(dev, BGE_PCI_MISC_CTL,
2439 	    BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2440 	    BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2441 
2442 	reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2443 
2444 	/* XXX: Broadcom Linux driver. */
2445 	if (sc->bge_pcie) {
2446 		if (CSR_READ_4(sc, 0x7e2c) == 0x60)	/* PCIE 1.0 */
2447 			CSR_WRITE_4(sc, 0x7e2c, 0x20);
2448 		if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2449 			/* Prevent PCIE link training during global reset */
2450 			CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2451 			reset |= (1<<29);
2452 		}
2453 	}
2454 
2455 	/* Issue global reset */
2456 	bge_writereg_ind(sc, BGE_MISC_CFG, reset);
2457 
2458 	DELAY(1000);
2459 
2460 	/* XXX: Broadcom Linux driver. */
2461 	if (sc->bge_pcie) {
2462 		if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
2463 			uint32_t v;
2464 
2465 			DELAY(500000); /* wait for link training to complete */
2466 			v = pci_read_config(dev, 0xc4, 4);
2467 			pci_write_config(dev, 0xc4, v | (1<<15), 4);
2468 		}
2469 		/* Set PCIE max payload size and clear error status. */
2470 		pci_write_config(dev, 0xd8, 0xf5000, 4);
2471 	}
2472 
2473 	/* Reset some of the PCI state that got zapped by reset */
2474 	pci_write_config(dev, BGE_PCI_MISC_CTL,
2475 	    BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2476 	    BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2477 	pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2478 	pci_write_config(dev, BGE_PCI_CMD, command, 4);
2479 	bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
2480 
2481 	/* Enable memory arbiter. */
2482 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2483 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
2484 		CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2485 
2486 	/*
2487 	 * Prevent PXE restart: write a magic number to the
2488 	 * general communications memory at 0xB50.
2489 	 */
2490 	bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2491 	/*
2492 	 * Poll the value location we just wrote until
2493 	 * we see the 1's complement of the magic number.
2494 	 * This indicates that the firmware initialization
2495 	 * is complete.
2496 	 */
2497 	for (i = 0; i < BGE_TIMEOUT; i++) {
2498 		val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2499 		if (val == ~BGE_MAGIC_NUMBER)
2500 			break;
2501 		DELAY(10);
2502 	}
2503 
2504 	if (i == BGE_TIMEOUT) {
2505 		printf("bge%d: firmware handshake timed out\n", sc->bge_unit);
2506 		return;
2507 	}
2508 
2509 	/*
2510 	 * XXX Wait for the value of the PCISTATE register to
2511 	 * return to its original pre-reset state. This is a
2512 	 * fairly good indicator of reset completion. If we don't
2513 	 * wait for the reset to fully complete, trying to read
2514 	 * from the device's non-PCI registers may yield garbage
2515 	 * results.
2516 	 */
2517 	for (i = 0; i < BGE_TIMEOUT; i++) {
2518 		if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
2519 			break;
2520 		DELAY(10);
2521 	}
2522 
2523 	/* Fix up byte swapping */
2524 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_BYTESWAP_NONFRAME|
2525 	    BGE_MODECTL_BYTESWAP_DATA);
2526 
2527 	CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2528 
2529 	/*
2530 	 * The 5704 in TBI mode apparently needs some special
2531 	 * adjustment to insure the SERDES drive level is set
2532 	 * to 1.2V.
2533 	 */
2534 	if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && sc->bge_tbi) {
2535 		uint32_t serdescfg;
2536 		serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2537 		serdescfg = (serdescfg & ~0xFFF) | 0x880;
2538 		CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2539 	}
2540 
2541 	/* XXX: Broadcom Linux driver. */
2542 	if (sc->bge_pcie && sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2543 		uint32_t v;
2544 
2545 		v = CSR_READ_4(sc, 0x7c00);
2546 		CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
2547 	}
2548 	DELAY(10000);
2549 
2550 	return;
2551 }
2552 
2553 /*
2554  * Frame reception handling. This is called if there's a frame
2555  * on the receive return list.
2556  *
2557  * Note: we have to be able to handle two possibilities here:
2558  * 1) the frame is from the jumbo receive ring
2559  * 2) the frame is from the standard receive ring
2560  */
2561 
2562 static void
2563 bge_rxeof(sc)
2564 	struct bge_softc *sc;
2565 {
2566 	struct ifnet *ifp;
2567 	int stdcnt = 0, jumbocnt = 0;
2568 
2569 	BGE_LOCK_ASSERT(sc);
2570 
2571 	ifp = sc->bge_ifp;
2572 
2573 	bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
2574 	    sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTWRITE);
2575 	bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2576 	    sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD);
2577 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2578 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
2579 		bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2580 		    sc->bge_cdata.bge_rx_jumbo_ring_map,
2581 		    BUS_DMASYNC_POSTREAD);
2582 	}
2583 
2584 	while(sc->bge_rx_saved_considx !=
2585 	    sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) {
2586 		struct bge_rx_bd	*cur_rx;
2587 		u_int32_t		rxidx;
2588 		struct ether_header	*eh;
2589 		struct mbuf		*m = NULL;
2590 		u_int16_t		vlan_tag = 0;
2591 		int			have_tag = 0;
2592 
2593 #ifdef DEVICE_POLLING
2594 		if (ifp->if_capenable & IFCAP_POLLING) {
2595 			if (sc->rxcycles <= 0)
2596 				break;
2597 			sc->rxcycles--;
2598 		}
2599 #endif
2600 
2601 		cur_rx =
2602 	    &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
2603 
2604 		rxidx = cur_rx->bge_idx;
2605 		BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
2606 
2607 		if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2608 			have_tag = 1;
2609 			vlan_tag = cur_rx->bge_vlan_tag;
2610 		}
2611 
2612 		if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2613 			BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2614 			bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
2615 			    sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx],
2616 			    BUS_DMASYNC_POSTREAD);
2617 			bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
2618 			    sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx]);
2619 			m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
2620 			sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
2621 			jumbocnt++;
2622 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2623 				ifp->if_ierrors++;
2624 				bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2625 				continue;
2626 			}
2627 			if (bge_newbuf_jumbo(sc,
2628 			    sc->bge_jumbo, NULL) == ENOBUFS) {
2629 				ifp->if_ierrors++;
2630 				bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2631 				continue;
2632 			}
2633 		} else {
2634 			BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2635 			bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2636 			    sc->bge_cdata.bge_rx_std_dmamap[rxidx],
2637 			    BUS_DMASYNC_POSTREAD);
2638 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2639 			    sc->bge_cdata.bge_rx_std_dmamap[rxidx]);
2640 			m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2641 			sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2642 			stdcnt++;
2643 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2644 				ifp->if_ierrors++;
2645 				bge_newbuf_std(sc, sc->bge_std, m);
2646 				continue;
2647 			}
2648 			if (bge_newbuf_std(sc, sc->bge_std,
2649 			    NULL) == ENOBUFS) {
2650 				ifp->if_ierrors++;
2651 				bge_newbuf_std(sc, sc->bge_std, m);
2652 				continue;
2653 			}
2654 		}
2655 
2656 		ifp->if_ipackets++;
2657 #ifndef __i386__
2658 		/*
2659 		 * The i386 allows unaligned accesses, but for other
2660 		 * platforms we must make sure the payload is aligned.
2661 		 */
2662 		if (sc->bge_rx_alignment_bug) {
2663 			bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2664 			    cur_rx->bge_len);
2665 			m->m_data += ETHER_ALIGN;
2666 		}
2667 #endif
2668 		eh = mtod(m, struct ether_header *);
2669 		m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2670 		m->m_pkthdr.rcvif = ifp;
2671 
2672 #if 0 /* currently broken for some packets, possibly related to TCP options */
2673 		if (ifp->if_capenable & IFCAP_RXCSUM) {
2674 			m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2675 			if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2676 				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2677 			if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
2678 				m->m_pkthdr.csum_data =
2679 				    cur_rx->bge_tcp_udp_csum;
2680 				m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
2681 			}
2682 		}
2683 #endif
2684 
2685 		/*
2686 		 * If we received a packet with a vlan tag,
2687 		 * attach that information to the packet.
2688 		 */
2689 		if (have_tag)
2690 			VLAN_INPUT_TAG(ifp, m, vlan_tag, continue);
2691 
2692 		BGE_UNLOCK(sc);
2693 		(*ifp->if_input)(ifp, m);
2694 		BGE_LOCK(sc);
2695 	}
2696 
2697 	bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
2698 	    sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREWRITE);
2699 	bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2700 	    sc->bge_cdata.bge_rx_std_ring_map,
2701 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_PREWRITE);
2702 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2703 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
2704 		bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2705 		    sc->bge_cdata.bge_rx_jumbo_ring_map,
2706 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2707 	}
2708 
2709 	CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2710 	if (stdcnt)
2711 		CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2712 	if (jumbocnt)
2713 		CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2714 
2715 	return;
2716 }
2717 
2718 static void
2719 bge_txeof(sc)
2720 	struct bge_softc *sc;
2721 {
2722 	struct bge_tx_bd *cur_tx = NULL;
2723 	struct ifnet *ifp;
2724 
2725 	BGE_LOCK_ASSERT(sc);
2726 
2727 	ifp = sc->bge_ifp;
2728 
2729 	/*
2730 	 * Go through our tx ring and free mbufs for those
2731 	 * frames that have been sent.
2732 	 */
2733 	while (sc->bge_tx_saved_considx !=
2734 	    sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) {
2735 		u_int32_t		idx = 0;
2736 
2737 		idx = sc->bge_tx_saved_considx;
2738 		cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
2739 		if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2740 			ifp->if_opackets++;
2741 		if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
2742 			m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2743 			sc->bge_cdata.bge_tx_chain[idx] = NULL;
2744 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2745 			    sc->bge_cdata.bge_tx_dmamap[idx]);
2746 		}
2747 		sc->bge_txcnt--;
2748 		BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2749 		ifp->if_timer = 0;
2750 	}
2751 
2752 	if (cur_tx != NULL)
2753 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2754 
2755 	return;
2756 }
2757 
2758 #ifdef DEVICE_POLLING
2759 static void
2760 bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2761 {
2762 	struct bge_softc *sc = ifp->if_softc;
2763 
2764 	BGE_LOCK(sc);
2765 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2766 		bge_poll_locked(ifp, cmd, count);
2767 	BGE_UNLOCK(sc);
2768 }
2769 
2770 static void
2771 bge_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
2772 {
2773 	struct bge_softc *sc = ifp->if_softc;
2774 
2775 	BGE_LOCK_ASSERT(sc);
2776 
2777 	sc->rxcycles = count;
2778 	bge_rxeof(sc);
2779 	bge_txeof(sc);
2780 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2781 		bge_start_locked(ifp);
2782 
2783 	if (cmd == POLL_AND_CHECK_STATUS) {
2784 		uint32_t statusword;
2785 
2786 		bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2787 		    sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTWRITE);
2788 
2789 	    	statusword = atomic_readandclear_32(&sc->bge_ldata.bge_status_block->bge_status);
2790 
2791 		if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
2792 		    statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
2793 			bge_link_upd(sc);
2794 
2795 		bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2796 		    sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREWRITE);
2797 	}
2798 }
2799 #endif /* DEVICE_POLLING */
2800 
2801 static void
2802 bge_intr(xsc)
2803 	void *xsc;
2804 {
2805 	struct bge_softc *sc;
2806 	struct ifnet *ifp;
2807 	uint32_t statusword;
2808 
2809 	sc = xsc;
2810 
2811 	BGE_LOCK(sc);
2812 
2813 	ifp = sc->bge_ifp;
2814 
2815 #ifdef DEVICE_POLLING
2816 	if (ifp->if_capenable & IFCAP_POLLING) {
2817 		BGE_UNLOCK(sc);
2818 		return;
2819 	}
2820 #endif
2821 
2822 	bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2823 	    sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTWRITE);
2824 
2825 	statusword =
2826 	    atomic_readandclear_32(&sc->bge_ldata.bge_status_block->bge_status);
2827 
2828 #ifdef notdef
2829 	/* Avoid this for now -- checking this register is expensive. */
2830 	/* Make sure this is really our interrupt. */
2831 	if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE))
2832 		return;
2833 #endif
2834 	/* Ack interrupt and stop others from occuring. */
2835 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2836 
2837 	if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
2838 	    statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
2839 		bge_link_upd(sc);
2840 
2841 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2842 		/* Check RX return ring producer/consumer */
2843 		bge_rxeof(sc);
2844 
2845 		/* Check TX ring producer/consumer */
2846 		bge_txeof(sc);
2847 	}
2848 
2849 	bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2850 	    sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREWRITE);
2851 
2852 	bge_handle_events(sc);
2853 
2854 	/* Re-enable interrupts. */
2855 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2856 
2857 	if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2858 	    !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2859 		bge_start_locked(ifp);
2860 
2861 	BGE_UNLOCK(sc);
2862 
2863 	return;
2864 }
2865 
2866 static void
2867 bge_tick_locked(sc)
2868 	struct bge_softc *sc;
2869 {
2870 	struct mii_data *mii = NULL;
2871 	struct ifnet *ifp;
2872 
2873 	BGE_LOCK_ASSERT(sc);
2874 
2875 	ifp = sc->bge_ifp;
2876 
2877 	if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
2878 	    sc->bge_asicrev == BGE_ASICREV_BCM5750)
2879 		bge_stats_update_regs(sc);
2880 	else
2881 		bge_stats_update(sc);
2882 
2883 	if (sc->bge_tbi) {
2884 		if (!sc->bge_link) {
2885 			if (CSR_READ_4(sc, BGE_MAC_STS) &
2886 			    BGE_MACSTAT_TBI_PCS_SYNCHED) {
2887 				sc->bge_link++;
2888 				if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
2889 					BGE_CLRBIT(sc, BGE_MAC_MODE,
2890 					    BGE_MACMODE_TBI_SEND_CFGS);
2891 				CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
2892 				if (bootverbose)
2893 					printf("bge%d: gigabit link up\n",
2894 					    sc->bge_unit);
2895 				if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2896 					bge_start_locked(ifp);
2897 			}
2898 		}
2899 	}
2900 	else {
2901 		mii = device_get_softc(sc->bge_miibus);
2902 		mii_tick(mii);
2903 
2904 		if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE &&
2905 		    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2906 			sc->bge_link++;
2907 			if ((IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
2908 			    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)&&
2909 			    bootverbose)
2910 				printf("bge%d: gigabit link up\n", sc->bge_unit);
2911 			if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2912 				bge_start_locked(ifp);
2913 		}
2914 	}
2915 
2916 	callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
2917 }
2918 
2919 static void
2920 bge_tick(xsc)
2921 	void *xsc;
2922 {
2923 	struct bge_softc *sc;
2924 
2925 	sc = xsc;
2926 
2927 	BGE_LOCK(sc);
2928 	bge_tick_locked(sc);
2929 	BGE_UNLOCK(sc);
2930 }
2931 
2932 static void
2933 bge_stats_update_regs(sc)
2934 	struct bge_softc *sc;
2935 {
2936 	struct ifnet *ifp;
2937 	struct bge_mac_stats_regs stats;
2938 	u_int32_t *s;
2939 	int i;
2940 
2941 	ifp = sc->bge_ifp;
2942 
2943 	s = (u_int32_t *)&stats;
2944 	for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
2945 		*s = CSR_READ_4(sc, BGE_RX_STATS + i);
2946 		s++;
2947 	}
2948 
2949 	ifp->if_collisions +=
2950 	   (stats.dot3StatsSingleCollisionFrames +
2951 	   stats.dot3StatsMultipleCollisionFrames +
2952 	   stats.dot3StatsExcessiveCollisions +
2953 	   stats.dot3StatsLateCollisions) -
2954 	   ifp->if_collisions;
2955 
2956 	return;
2957 }
2958 
2959 static void
2960 bge_stats_update(sc)
2961 	struct bge_softc *sc;
2962 {
2963 	struct ifnet *ifp;
2964 	struct bge_stats *stats;
2965 
2966 	ifp = sc->bge_ifp;
2967 
2968 	stats = (struct bge_stats *)(sc->bge_vhandle +
2969 	    BGE_MEMWIN_START + BGE_STATS_BLOCK);
2970 
2971 	ifp->if_collisions +=
2972 	   (stats->txstats.dot3StatsSingleCollisionFrames.bge_addr_lo +
2973 	   stats->txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo +
2974 	   stats->txstats.dot3StatsExcessiveCollisions.bge_addr_lo +
2975 	   stats->txstats.dot3StatsLateCollisions.bge_addr_lo) -
2976 	   ifp->if_collisions;
2977 
2978 #ifdef notdef
2979 	ifp->if_collisions +=
2980 	   (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
2981 	   sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
2982 	   sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
2983 	   sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
2984 	   ifp->if_collisions;
2985 #endif
2986 
2987 	return;
2988 }
2989 
2990 /*
2991  * Encapsulate an mbuf chain in the tx ring  by coupling the mbuf data
2992  * pointers to descriptors.
2993  */
2994 static int
2995 bge_encap(sc, m_head, txidx)
2996 	struct bge_softc *sc;
2997 	struct mbuf *m_head;
2998 	u_int32_t *txidx;
2999 {
3000 	struct bge_tx_bd	*f = NULL;
3001 	u_int16_t		csum_flags = 0;
3002 	struct m_tag		*mtag;
3003 	struct bge_dmamap_arg	ctx;
3004 	bus_dmamap_t		map;
3005 	int			error;
3006 
3007 
3008 	if (m_head->m_pkthdr.csum_flags) {
3009 		if (m_head->m_pkthdr.csum_flags & CSUM_IP)
3010 			csum_flags |= BGE_TXBDFLAG_IP_CSUM;
3011 		if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
3012 			csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
3013 		if (m_head->m_flags & M_LASTFRAG)
3014 			csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
3015 		else if (m_head->m_flags & M_FRAG)
3016 			csum_flags |= BGE_TXBDFLAG_IP_FRAG;
3017 	}
3018 
3019 	mtag = VLAN_OUTPUT_TAG(sc->bge_ifp, m_head);
3020 
3021 	ctx.sc = sc;
3022 	ctx.bge_idx = *txidx;
3023 	ctx.bge_ring = sc->bge_ldata.bge_tx_ring;
3024 	ctx.bge_flags = csum_flags;
3025 	/*
3026 	 * Sanity check: avoid coming within 16 descriptors
3027 	 * of the end of the ring.
3028 	 */
3029 	ctx.bge_maxsegs = (BGE_TX_RING_CNT - sc->bge_txcnt) - 16;
3030 
3031 	map = sc->bge_cdata.bge_tx_dmamap[*txidx];
3032 	error = bus_dmamap_load_mbuf(sc->bge_cdata.bge_mtag, map,
3033 	    m_head, bge_dma_map_tx_desc, &ctx, BUS_DMA_NOWAIT);
3034 
3035 	if (error || ctx.bge_maxsegs == 0 /*||
3036 	    ctx.bge_idx == sc->bge_tx_saved_considx*/)
3037 		return (ENOBUFS);
3038 
3039 	/*
3040 	 * Insure that the map for this transmission
3041 	 * is placed at the array index of the last descriptor
3042 	 * in this chain.
3043 	 */
3044 	sc->bge_cdata.bge_tx_dmamap[*txidx] =
3045 	    sc->bge_cdata.bge_tx_dmamap[ctx.bge_idx];
3046 	sc->bge_cdata.bge_tx_dmamap[ctx.bge_idx] = map;
3047 	sc->bge_cdata.bge_tx_chain[ctx.bge_idx] = m_head;
3048 	sc->bge_txcnt += ctx.bge_maxsegs;
3049 	f = &sc->bge_ldata.bge_tx_ring[*txidx];
3050 	if (mtag != NULL) {
3051 		f->bge_flags |= htole16(BGE_TXBDFLAG_VLAN_TAG);
3052 		f->bge_vlan_tag = htole16(VLAN_TAG_VALUE(mtag));
3053 	} else {
3054 		f->bge_vlan_tag = 0;
3055 	}
3056 
3057 	BGE_INC(ctx.bge_idx, BGE_TX_RING_CNT);
3058 	*txidx = ctx.bge_idx;
3059 
3060 	return(0);
3061 }
3062 
3063 /*
3064  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3065  * to the mbuf data regions directly in the transmit descriptors.
3066  */
3067 static void
3068 bge_start_locked(ifp)
3069 	struct ifnet *ifp;
3070 {
3071 	struct bge_softc *sc;
3072 	struct mbuf *m_head = NULL;
3073 	u_int32_t prodidx = 0;
3074 	int count = 0;
3075 
3076 	sc = ifp->if_softc;
3077 
3078 	if (!sc->bge_link || IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3079 		return;
3080 
3081 	prodidx = CSR_READ_4(sc, BGE_MBX_TX_HOST_PROD0_LO);
3082 
3083 	while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
3084 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
3085 		if (m_head == NULL)
3086 			break;
3087 
3088 		/*
3089 		 * XXX
3090 		 * The code inside the if() block is never reached since we
3091 		 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
3092 		 * requests to checksum TCP/UDP in a fragmented packet.
3093 		 *
3094 		 * XXX
3095 		 * safety overkill.  If this is a fragmented packet chain
3096 		 * with delayed TCP/UDP checksums, then only encapsulate
3097 		 * it if we have enough descriptors to handle the entire
3098 		 * chain at once.
3099 		 * (paranoia -- may not actually be needed)
3100 		 */
3101 		if (m_head->m_flags & M_FIRSTFRAG &&
3102 		    m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
3103 			if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
3104 			    m_head->m_pkthdr.csum_data + 16) {
3105 				IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3106 				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3107 				break;
3108 			}
3109 		}
3110 
3111 		/*
3112 		 * Pack the data into the transmit ring. If we
3113 		 * don't have room, set the OACTIVE flag and wait
3114 		 * for the NIC to drain the ring.
3115 		 */
3116 		if (bge_encap(sc, m_head, &prodidx)) {
3117 			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3118 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3119 			break;
3120 		}
3121 		++count;
3122 
3123 		/*
3124 		 * If there's a BPF listener, bounce a copy of this frame
3125 		 * to him.
3126 		 */
3127 		BPF_MTAP(ifp, m_head);
3128 	}
3129 
3130 	if (count == 0) {
3131 		/* no packets were dequeued */
3132 		return;
3133 	}
3134 
3135 	/* Transmit */
3136 	CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3137 	/* 5700 b2 errata */
3138 	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
3139 		CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3140 
3141 	/*
3142 	 * Set a timeout in case the chip goes out to lunch.
3143 	 */
3144 	ifp->if_timer = 5;
3145 
3146 	return;
3147 }
3148 
3149 /*
3150  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3151  * to the mbuf data regions directly in the transmit descriptors.
3152  */
3153 static void
3154 bge_start(ifp)
3155 	struct ifnet *ifp;
3156 {
3157 	struct bge_softc *sc;
3158 
3159 	sc = ifp->if_softc;
3160 	BGE_LOCK(sc);
3161 	bge_start_locked(ifp);
3162 	BGE_UNLOCK(sc);
3163 }
3164 
3165 static void
3166 bge_init_locked(sc)
3167 	struct bge_softc *sc;
3168 {
3169 	struct ifnet *ifp;
3170 	u_int16_t *m;
3171 
3172 	BGE_LOCK_ASSERT(sc);
3173 
3174 	ifp = sc->bge_ifp;
3175 
3176 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3177 		return;
3178 
3179 	/* Cancel pending I/O and flush buffers. */
3180 	bge_stop(sc);
3181 	bge_reset(sc);
3182 	bge_chipinit(sc);
3183 
3184 	/*
3185 	 * Init the various state machines, ring
3186 	 * control blocks and firmware.
3187 	 */
3188 	if (bge_blockinit(sc)) {
3189 		printf("bge%d: initialization failure\n", sc->bge_unit);
3190 		return;
3191 	}
3192 
3193 	ifp = sc->bge_ifp;
3194 
3195 	/* Specify MTU. */
3196 	CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
3197 	    ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN);
3198 
3199 	/* Load our MAC address. */
3200 	m = (u_int16_t *)IF_LLADDR(sc->bge_ifp);
3201 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3202 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3203 
3204 	/* Enable or disable promiscuous mode as needed. */
3205 	if (ifp->if_flags & IFF_PROMISC) {
3206 		BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3207 	} else {
3208 		BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3209 	}
3210 
3211 	/* Program multicast filter. */
3212 	bge_setmulti(sc);
3213 
3214 	/* Init RX ring. */
3215 	bge_init_rx_ring_std(sc);
3216 
3217 	/*
3218 	 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
3219 	 * memory to insure that the chip has in fact read the first
3220 	 * entry of the ring.
3221 	 */
3222 	if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
3223 		u_int32_t		v, i;
3224 		for (i = 0; i < 10; i++) {
3225 			DELAY(20);
3226 			v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
3227 			if (v == (MCLBYTES - ETHER_ALIGN))
3228 				break;
3229 		}
3230 		if (i == 10)
3231 			printf ("bge%d: 5705 A0 chip failed to load RX ring\n",
3232 			    sc->bge_unit);
3233 	}
3234 
3235 	/* Init jumbo RX ring. */
3236 	if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
3237 		bge_init_rx_ring_jumbo(sc);
3238 
3239 	/* Init our RX return ring index */
3240 	sc->bge_rx_saved_considx = 0;
3241 
3242 	/* Init TX ring. */
3243 	bge_init_tx_ring(sc);
3244 
3245 	/* Turn on transmitter */
3246 	BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
3247 
3248 	/* Turn on receiver */
3249 	BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3250 
3251 	/* Tell firmware we're alive. */
3252 	BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3253 
3254 #ifdef DEVICE_POLLING
3255 	/* Disable interrupts if we are polling. */
3256 	if (ifp->if_capenable & IFCAP_POLLING) {
3257 		BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
3258 		    BGE_PCIMISCCTL_MASK_PCI_INTR);
3259 		CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3260 		CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
3261 		CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
3262 	} else
3263 #endif
3264 
3265 	/* Enable host interrupts. */
3266 	{
3267 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
3268 	BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3269 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3270 	}
3271 
3272 	bge_ifmedia_upd(ifp);
3273 
3274 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3275 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3276 
3277 	callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
3278 
3279 	return;
3280 }
3281 
3282 static void
3283 bge_init(xsc)
3284 	void *xsc;
3285 {
3286 	struct bge_softc *sc = xsc;
3287 
3288 	BGE_LOCK(sc);
3289 	bge_init_locked(sc);
3290 	BGE_UNLOCK(sc);
3291 
3292 	return;
3293 }
3294 
3295 /*
3296  * Set media options.
3297  */
3298 static int
3299 bge_ifmedia_upd(ifp)
3300 	struct ifnet *ifp;
3301 {
3302 	struct bge_softc *sc;
3303 	struct mii_data *mii;
3304 	struct ifmedia *ifm;
3305 
3306 	sc = ifp->if_softc;
3307 	ifm = &sc->bge_ifmedia;
3308 
3309 	/* If this is a 1000baseX NIC, enable the TBI port. */
3310 	if (sc->bge_tbi) {
3311 		if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3312 			return(EINVAL);
3313 		switch(IFM_SUBTYPE(ifm->ifm_media)) {
3314 		case IFM_AUTO:
3315 #ifndef BGE_FAKE_AUTONEG
3316 			/*
3317 			 * The BCM5704 ASIC appears to have a special
3318 			 * mechanism for programming the autoneg
3319 			 * advertisement registers in TBI mode.
3320 			 */
3321 			if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3322 				uint32_t sgdig;
3323 				CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
3324 				sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
3325 				sgdig |= BGE_SGDIGCFG_AUTO|
3326 				    BGE_SGDIGCFG_PAUSE_CAP|
3327 				    BGE_SGDIGCFG_ASYM_PAUSE;
3328 				CSR_WRITE_4(sc, BGE_SGDIG_CFG,
3329 				    sgdig|BGE_SGDIGCFG_SEND);
3330 				DELAY(5);
3331 				CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
3332 			}
3333 #endif
3334 			break;
3335 		case IFM_1000_SX:
3336 			if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3337 				BGE_CLRBIT(sc, BGE_MAC_MODE,
3338 				    BGE_MACMODE_HALF_DUPLEX);
3339 			} else {
3340 				BGE_SETBIT(sc, BGE_MAC_MODE,
3341 				    BGE_MACMODE_HALF_DUPLEX);
3342 			}
3343 			break;
3344 		default:
3345 			return(EINVAL);
3346 		}
3347 		return(0);
3348 	}
3349 
3350 	mii = device_get_softc(sc->bge_miibus);
3351 	sc->bge_link = 0;
3352 	if (mii->mii_instance) {
3353 		struct mii_softc *miisc;
3354 		for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
3355 		    miisc = LIST_NEXT(miisc, mii_list))
3356 			mii_phy_reset(miisc);
3357 	}
3358 	mii_mediachg(mii);
3359 
3360 	return(0);
3361 }
3362 
3363 /*
3364  * Report current media status.
3365  */
3366 static void
3367 bge_ifmedia_sts(ifp, ifmr)
3368 	struct ifnet *ifp;
3369 	struct ifmediareq *ifmr;
3370 {
3371 	struct bge_softc *sc;
3372 	struct mii_data *mii;
3373 
3374 	sc = ifp->if_softc;
3375 
3376 	if (sc->bge_tbi) {
3377 		ifmr->ifm_status = IFM_AVALID;
3378 		ifmr->ifm_active = IFM_ETHER;
3379 		if (CSR_READ_4(sc, BGE_MAC_STS) &
3380 		    BGE_MACSTAT_TBI_PCS_SYNCHED)
3381 			ifmr->ifm_status |= IFM_ACTIVE;
3382 		ifmr->ifm_active |= IFM_1000_SX;
3383 		if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3384 			ifmr->ifm_active |= IFM_HDX;
3385 		else
3386 			ifmr->ifm_active |= IFM_FDX;
3387 		return;
3388 	}
3389 
3390 	mii = device_get_softc(sc->bge_miibus);
3391 	mii_pollstat(mii);
3392 	ifmr->ifm_active = mii->mii_media_active;
3393 	ifmr->ifm_status = mii->mii_media_status;
3394 
3395 	return;
3396 }
3397 
3398 static int
3399 bge_ioctl(ifp, command, data)
3400 	struct ifnet *ifp;
3401 	u_long command;
3402 	caddr_t data;
3403 {
3404 	struct bge_softc *sc = ifp->if_softc;
3405 	struct ifreq *ifr = (struct ifreq *) data;
3406 	int mask, error = 0;
3407 	struct mii_data *mii;
3408 
3409 	switch(command) {
3410 	case SIOCSIFMTU:
3411 		/* Disallow jumbo frames on 5705. */
3412 		if (((sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
3413 		      sc->bge_asicrev == BGE_ASICREV_BCM5750) &&
3414 		    ifr->ifr_mtu > ETHERMTU) || ifr->ifr_mtu > BGE_JUMBO_MTU)
3415 			error = EINVAL;
3416 		else {
3417 			ifp->if_mtu = ifr->ifr_mtu;
3418 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3419 			bge_init(sc);
3420 		}
3421 		break;
3422 	case SIOCSIFFLAGS:
3423 		BGE_LOCK(sc);
3424 		if (ifp->if_flags & IFF_UP) {
3425 			/*
3426 			 * If only the state of the PROMISC flag changed,
3427 			 * then just use the 'set promisc mode' command
3428 			 * instead of reinitializing the entire NIC. Doing
3429 			 * a full re-init means reloading the firmware and
3430 			 * waiting for it to start up, which may take a
3431 			 * second or two.
3432 			 */
3433 			if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3434 			    ifp->if_flags & IFF_PROMISC &&
3435 			    !(sc->bge_if_flags & IFF_PROMISC)) {
3436 				BGE_SETBIT(sc, BGE_RX_MODE,
3437 				    BGE_RXMODE_RX_PROMISC);
3438 			} else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3439 			    !(ifp->if_flags & IFF_PROMISC) &&
3440 			    sc->bge_if_flags & IFF_PROMISC) {
3441 				BGE_CLRBIT(sc, BGE_RX_MODE,
3442 				    BGE_RXMODE_RX_PROMISC);
3443 			} else
3444 				bge_init_locked(sc);
3445 		} else {
3446 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3447 				bge_stop(sc);
3448 			}
3449 		}
3450 		sc->bge_if_flags = ifp->if_flags;
3451 		BGE_UNLOCK(sc);
3452 		error = 0;
3453 		break;
3454 	case SIOCADDMULTI:
3455 	case SIOCDELMULTI:
3456 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3457 			BGE_LOCK(sc);
3458 			bge_setmulti(sc);
3459 			BGE_UNLOCK(sc);
3460 			error = 0;
3461 		}
3462 		break;
3463 	case SIOCSIFMEDIA:
3464 	case SIOCGIFMEDIA:
3465 		if (sc->bge_tbi) {
3466 			error = ifmedia_ioctl(ifp, ifr,
3467 			    &sc->bge_ifmedia, command);
3468 		} else {
3469 			mii = device_get_softc(sc->bge_miibus);
3470 			error = ifmedia_ioctl(ifp, ifr,
3471 			    &mii->mii_media, command);
3472 		}
3473 		break;
3474 	case SIOCSIFCAP:
3475 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3476 #ifdef DEVICE_POLLING
3477 		if (mask & IFCAP_POLLING) {
3478 			if (ifr->ifr_reqcap & IFCAP_POLLING) {
3479 				error = ether_poll_register(bge_poll, ifp);
3480 				if (error)
3481 					return(error);
3482 				BGE_LOCK(sc);
3483 				BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
3484 				    BGE_PCIMISCCTL_MASK_PCI_INTR);
3485 				CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3486 				CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
3487 				CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
3488 				ifp->if_capenable |= IFCAP_POLLING;
3489 				BGE_UNLOCK(sc);
3490 			} else {
3491 				error = ether_poll_deregister(ifp);
3492 				/* Enable interrupt even in error case */
3493 				BGE_LOCK(sc);
3494 				CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
3495 				CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
3496 				BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
3497 				    BGE_PCIMISCCTL_MASK_PCI_INTR);
3498 				CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3499 				ifp->if_capenable &= ~IFCAP_POLLING;
3500 				BGE_UNLOCK(sc);
3501 			}
3502 		}
3503 #endif
3504 		/* NB: the code for RX csum offload is disabled for now */
3505 		if (mask & IFCAP_TXCSUM) {
3506 			ifp->if_capenable ^= IFCAP_TXCSUM;
3507 			if (IFCAP_TXCSUM & ifp->if_capenable)
3508 				ifp->if_hwassist = BGE_CSUM_FEATURES;
3509 			else
3510 				ifp->if_hwassist = 0;
3511 		}
3512 		break;
3513 	default:
3514 		error = ether_ioctl(ifp, command, data);
3515 		break;
3516 	}
3517 
3518 	return(error);
3519 }
3520 
3521 static void
3522 bge_watchdog(ifp)
3523 	struct ifnet *ifp;
3524 {
3525 	struct bge_softc *sc;
3526 
3527 	sc = ifp->if_softc;
3528 
3529 	printf("bge%d: watchdog timeout -- resetting\n", sc->bge_unit);
3530 
3531 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3532 	bge_init(sc);
3533 
3534 	ifp->if_oerrors++;
3535 
3536 	return;
3537 }
3538 
3539 /*
3540  * Stop the adapter and free any mbufs allocated to the
3541  * RX and TX lists.
3542  */
3543 static void
3544 bge_stop(sc)
3545 	struct bge_softc *sc;
3546 {
3547 	struct ifnet *ifp;
3548 	struct ifmedia_entry *ifm;
3549 	struct mii_data *mii = NULL;
3550 	int mtmp, itmp;
3551 
3552 	BGE_LOCK_ASSERT(sc);
3553 
3554 	ifp = sc->bge_ifp;
3555 
3556 	if (!sc->bge_tbi)
3557 		mii = device_get_softc(sc->bge_miibus);
3558 
3559 	callout_stop(&sc->bge_stat_ch);
3560 
3561 	/*
3562 	 * Disable all of the receiver blocks
3563 	 */
3564 	BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3565 	BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3566 	BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3567 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3568 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
3569 		BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
3570 	BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3571 	BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3572 	BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3573 
3574 	/*
3575 	 * Disable all of the transmit blocks
3576 	 */
3577 	BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3578 	BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3579 	BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3580 	BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3581 	BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3582 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3583 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
3584 		BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
3585 	BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3586 
3587 	/*
3588 	 * Shut down all of the memory managers and related
3589 	 * state machines.
3590 	 */
3591 	BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3592 	BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3593 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3594 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
3595 		BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
3596 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3597 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3598 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3599 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
3600 		BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
3601 		BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3602 	}
3603 
3604 	/* Disable host interrupts. */
3605 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3606 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3607 
3608 	/*
3609 	 * Tell firmware we're shutting down.
3610 	 */
3611 	BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3612 
3613 	/* Free the RX lists. */
3614 	bge_free_rx_ring_std(sc);
3615 
3616 	/* Free jumbo RX list. */
3617 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3618 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
3619 		bge_free_rx_ring_jumbo(sc);
3620 
3621 	/* Free TX buffers. */
3622 	bge_free_tx_ring(sc);
3623 
3624 	/*
3625 	 * Isolate/power down the PHY, but leave the media selection
3626 	 * unchanged so that things will be put back to normal when
3627 	 * we bring the interface back up.
3628 	 */
3629 	if (!sc->bge_tbi) {
3630 		itmp = ifp->if_flags;
3631 		ifp->if_flags |= IFF_UP;
3632 		/*
3633 		 * If we are called from bge_detach(), mii is already NULL.
3634 		 */
3635 		if (mii != NULL) {
3636 			ifm = mii->mii_media.ifm_cur;
3637 			mtmp = ifm->ifm_media;
3638 			ifm->ifm_media = IFM_ETHER|IFM_NONE;
3639 			mii_mediachg(mii);
3640 			ifm->ifm_media = mtmp;
3641 		}
3642 		ifp->if_flags = itmp;
3643 	}
3644 
3645 	sc->bge_link = 0;
3646 
3647 	sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
3648 
3649 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3650 
3651 	return;
3652 }
3653 
3654 /*
3655  * Stop all chip I/O so that the kernel's probe routines don't
3656  * get confused by errant DMAs when rebooting.
3657  */
3658 static void
3659 bge_shutdown(dev)
3660 	device_t dev;
3661 {
3662 	struct bge_softc *sc;
3663 
3664 	sc = device_get_softc(dev);
3665 
3666 	BGE_LOCK(sc);
3667 	bge_stop(sc);
3668 	bge_reset(sc);
3669 	BGE_UNLOCK(sc);
3670 
3671 	return;
3672 }
3673 
3674 static int
3675 bge_suspend(device_t dev)
3676 {
3677 	struct bge_softc *sc;
3678 
3679 	sc = device_get_softc(dev);
3680 	BGE_LOCK(sc);
3681 	bge_stop(sc);
3682 	BGE_UNLOCK(sc);
3683 
3684 	return (0);
3685 }
3686 
3687 static int
3688 bge_resume(device_t dev)
3689 {
3690 	struct bge_softc *sc;
3691 	struct ifnet *ifp;
3692 
3693 	sc = device_get_softc(dev);
3694 	BGE_LOCK(sc);
3695 	ifp = sc->bge_ifp;
3696 	if (ifp->if_flags & IFF_UP) {
3697 		bge_init_locked(sc);
3698 		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3699 			bge_start_locked(ifp);
3700 	}
3701 	BGE_UNLOCK(sc);
3702 
3703 	return (0);
3704 }
3705 
3706 static void
3707 bge_link_upd(sc)
3708 	struct bge_softc *sc;
3709 {
3710 	uint32_t status;
3711 
3712 	BGE_LOCK_ASSERT(sc);
3713 	/*
3714 	 * Process link state changes.
3715 	 * Grrr. The link status word in the status block does
3716 	 * not work correctly on the BCM5700 rev AX and BX chips,
3717 	 * according to all available information. Hence, we have
3718 	 * to enable MII interrupts in order to properly obtain
3719 	 * async link changes. Unfortunately, this also means that
3720 	 * we have to read the MAC status register to detect link
3721 	 * changes, thereby adding an additional register access to
3722 	 * the interrupt handler.
3723 	 */
3724 
3725 	if (sc->bge_asicrev == BGE_ASICREV_BCM5700) {
3726 		status = CSR_READ_4(sc, BGE_MAC_STS);
3727 		if (status & BGE_MACSTAT_MI_INTERRUPT) {
3728 			sc->bge_link = 0;
3729 			callout_stop(&sc->bge_stat_ch);
3730 			bge_tick_locked(sc);
3731 			/* Clear the interrupt */
3732 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
3733 			    BGE_EVTENB_MI_INTERRUPT);
3734 			bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
3735 			bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
3736 			    BRGPHY_INTRS);
3737 		}
3738 		return;
3739 	}
3740 
3741 	/*
3742 	 * Sometimes PCS encoding errors are detected in
3743 	 * TBI mode (on fiber NICs), and for some reason
3744 	 * the chip will signal them as link changes.
3745 	 * If we get a link change event, but the 'PCS
3746 	 * encoding error' bit in the MAC status register
3747 	 * is set, don't bother doing a link check.
3748 	 * This avoids spurious "gigabit link up" messages
3749 	 * that sometimes appear on fiber NICs during
3750 	 * periods of heavy traffic. (There should be no
3751 	 * effect on copper NICs.)
3752 	 */
3753 	if (sc->bge_tbi) status = CSR_READ_4(sc, BGE_MAC_STS);
3754 
3755 	if (!sc->bge_tbi || !(status & (BGE_MACSTAT_PORT_DECODE_ERROR |
3756 		    BGE_MACSTAT_MI_COMPLETE))) {
3757 		sc->bge_link = 0;
3758 		callout_stop(&sc->bge_stat_ch);
3759 		bge_tick_locked(sc);
3760 	}
3761 
3762 	/* Clear the interrupt */
3763 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
3764 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
3765 	    BGE_MACSTAT_LINK_CHANGED);
3766 
3767 	/* Force flush the status block cached by PCI bridge */
3768 	CSR_READ_4(sc, BGE_MBX_IRQ0_LO);
3769 }
3770 
3771