xref: /freebsd/sys/dev/bge/if_bge.c (revision 87569f75a91f298c52a71823c04d41cf53c88889)
1 /*-
2  * Copyright (c) 2001 Wind River Systems
3  * Copyright (c) 1997, 1998, 1999, 2001
4  *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by Bill Paul.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 /*
38  * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39  *
40  * The Broadcom BCM5700 is based on technology originally developed by
41  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42  * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
43  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45  * frames, highly configurable RX filtering, and 16 RX and TX queues
46  * (which, along with RX filter rules, can be used for QOS applications).
47  * Other features, such as TCP segmentation, may be available as part
48  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49  * firmware images can be stored in hardware and need not be compiled
50  * into the driver.
51  *
52  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53  * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54  *
55  * The BCM5701 is a single-chip solution incorporating both the BCM5700
56  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57  * does not support external SSRAM.
58  *
59  * Broadcom also produces a variation of the BCM5700 under the "Altima"
60  * brand name, which is functionally similar but lacks PCI-X support.
61  *
62  * Without external SSRAM, you can only have at most 4 TX rings,
63  * and the use of the mini RX ring is disabled. This seems to imply
64  * that these features are simply not available on the BCM5701. As a
65  * result, this driver does not implement any support for the mini RX
66  * ring.
67  */
68 
69 #ifdef HAVE_KERNEL_OPTION_HEADERS
70 #include "opt_device_polling.h"
71 #endif
72 
73 #include <sys/param.h>
74 #include <sys/endian.h>
75 #include <sys/systm.h>
76 #include <sys/sockio.h>
77 #include <sys/mbuf.h>
78 #include <sys/malloc.h>
79 #include <sys/kernel.h>
80 #include <sys/module.h>
81 #include <sys/socket.h>
82 
83 #include <net/if.h>
84 #include <net/if_arp.h>
85 #include <net/ethernet.h>
86 #include <net/if_dl.h>
87 #include <net/if_media.h>
88 
89 #include <net/bpf.h>
90 
91 #include <net/if_types.h>
92 #include <net/if_vlan_var.h>
93 
94 #include <netinet/in_systm.h>
95 #include <netinet/in.h>
96 #include <netinet/ip.h>
97 
98 #include <machine/clock.h>      /* for DELAY */
99 #include <machine/bus.h>
100 #include <machine/resource.h>
101 #include <sys/bus.h>
102 #include <sys/rman.h>
103 
104 #include <dev/mii/mii.h>
105 #include <dev/mii/miivar.h>
106 #include "miidevs.h"
107 #include <dev/mii/brgphyreg.h>
108 
109 #include <dev/pci/pcireg.h>
110 #include <dev/pci/pcivar.h>
111 
112 #include <dev/bge/if_bgereg.h>
113 
114 #include "opt_bge.h"
115 
116 #define BGE_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
117 #define ETHER_MIN_NOPAD		(ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
118 
119 MODULE_DEPEND(bge, pci, 1, 1, 1);
120 MODULE_DEPEND(bge, ether, 1, 1, 1);
121 MODULE_DEPEND(bge, miibus, 1, 1, 1);
122 
123 /* "device miibus" required.  See GENERIC if you get errors here. */
124 #include "miibus_if.h"
125 
126 /*
127  * Various supported device vendors/types and their names. Note: the
128  * spec seems to indicate that the hardware still has Alteon's vendor
129  * ID burned into it, though it will always be overriden by the vendor
130  * ID in the EEPROM. Just to be safe, we cover all possibilities.
131  */
132 #define BGE_DEVDESC_MAX		64	/* Maximum device description length */
133 
134 static struct bge_type bge_devs[] = {
135 	{ ALT_VENDORID,	ALT_DEVICEID_BCM5700,
136 		"Broadcom BCM5700 Gigabit Ethernet" },
137 	{ ALT_VENDORID,	ALT_DEVICEID_BCM5701,
138 		"Broadcom BCM5701 Gigabit Ethernet" },
139 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5700,
140 		"Broadcom BCM5700 Gigabit Ethernet" },
141 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5701,
142 		"Broadcom BCM5701 Gigabit Ethernet" },
143 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5702,
144 		"Broadcom BCM5702 Gigabit Ethernet" },
145 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5702X,
146 		"Broadcom BCM5702X Gigabit Ethernet" },
147 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5703,
148 		"Broadcom BCM5703 Gigabit Ethernet" },
149 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5703X,
150 		"Broadcom BCM5703X Gigabit Ethernet" },
151 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5704C,
152 		"Broadcom BCM5704C Dual Gigabit Ethernet" },
153 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5704S,
154 		"Broadcom BCM5704S Dual Gigabit Ethernet" },
155 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5705,
156 		"Broadcom BCM5705 Gigabit Ethernet" },
157 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5705K,
158 		"Broadcom BCM5705K Gigabit Ethernet" },
159 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5705M,
160 		"Broadcom BCM5705M Gigabit Ethernet" },
161 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT,
162 		"Broadcom BCM5705M Gigabit Ethernet" },
163 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5714C,
164 		"Broadcom BCM5714C Gigabit Ethernet" },
165 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5721,
166 		"Broadcom BCM5721 Gigabit Ethernet" },
167 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5750,
168 		"Broadcom BCM5750 Gigabit Ethernet" },
169 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5750M,
170 		"Broadcom BCM5750M Gigabit Ethernet" },
171 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5751,
172 		"Broadcom BCM5751 Gigabit Ethernet" },
173 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5751M,
174 		"Broadcom BCM5751M Gigabit Ethernet" },
175 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5752,
176 		"Broadcom BCM5752 Gigabit Ethernet" },
177 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5782,
178 		"Broadcom BCM5782 Gigabit Ethernet" },
179 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5788,
180 		"Broadcom BCM5788 Gigabit Ethernet" },
181 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5789,
182 		"Broadcom BCM5789 Gigabit Ethernet" },
183 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5901,
184 		"Broadcom BCM5901 Fast Ethernet" },
185 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2,
186 		"Broadcom BCM5901A2 Fast Ethernet" },
187 	{ SK_VENDORID, SK_DEVICEID_ALTIMA,
188 		"SysKonnect Gigabit Ethernet" },
189 	{ ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000,
190 		"Altima AC1000 Gigabit Ethernet" },
191 	{ ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002,
192 		"Altima AC1002 Gigabit Ethernet" },
193 	{ ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100,
194 		"Altima AC9100 Gigabit Ethernet" },
195 	{ 0, 0, NULL }
196 };
197 
198 static int bge_probe		(device_t);
199 static int bge_attach		(device_t);
200 static int bge_detach		(device_t);
201 static int bge_suspend		(device_t);
202 static int bge_resume		(device_t);
203 static void bge_release_resources
204 				(struct bge_softc *);
205 static void bge_dma_map_addr	(void *, bus_dma_segment_t *, int, int);
206 static int bge_dma_alloc	(device_t);
207 static void bge_dma_free	(struct bge_softc *);
208 
209 static void bge_txeof		(struct bge_softc *);
210 static void bge_rxeof		(struct bge_softc *);
211 
212 static void bge_tick_locked	(struct bge_softc *);
213 static void bge_tick		(void *);
214 static void bge_stats_update	(struct bge_softc *);
215 static void bge_stats_update_regs
216 				(struct bge_softc *);
217 static int bge_encap		(struct bge_softc *, struct mbuf *,
218 					u_int32_t *);
219 
220 static void bge_intr		(void *);
221 static void bge_start_locked	(struct ifnet *);
222 static void bge_start		(struct ifnet *);
223 static int bge_ioctl		(struct ifnet *, u_long, caddr_t);
224 static void bge_init_locked	(struct bge_softc *);
225 static void bge_init		(void *);
226 static void bge_stop		(struct bge_softc *);
227 static void bge_watchdog		(struct ifnet *);
228 static void bge_shutdown		(device_t);
229 static int bge_ifmedia_upd	(struct ifnet *);
230 static void bge_ifmedia_sts	(struct ifnet *, struct ifmediareq *);
231 
232 static u_int8_t	bge_eeprom_getbyte	(struct bge_softc *, int, u_int8_t *);
233 static int bge_read_eeprom	(struct bge_softc *, caddr_t, int, int);
234 
235 static void bge_setmulti	(struct bge_softc *);
236 
237 static int bge_newbuf_std	(struct bge_softc *, int, struct mbuf *);
238 static int bge_newbuf_jumbo	(struct bge_softc *, int, struct mbuf *);
239 static int bge_init_rx_ring_std	(struct bge_softc *);
240 static void bge_free_rx_ring_std	(struct bge_softc *);
241 static int bge_init_rx_ring_jumbo	(struct bge_softc *);
242 static void bge_free_rx_ring_jumbo	(struct bge_softc *);
243 static void bge_free_tx_ring	(struct bge_softc *);
244 static int bge_init_tx_ring	(struct bge_softc *);
245 
246 static int bge_chipinit		(struct bge_softc *);
247 static int bge_blockinit	(struct bge_softc *);
248 
249 #ifdef notdef
250 static u_int8_t bge_vpd_readbyte(struct bge_softc *, int);
251 static void bge_vpd_read_res	(struct bge_softc *, struct vpd_res *, int);
252 static void bge_vpd_read	(struct bge_softc *);
253 #endif
254 
255 static u_int32_t bge_readmem_ind
256 				(struct bge_softc *, int);
257 static void bge_writemem_ind	(struct bge_softc *, int, int);
258 #ifdef notdef
259 static u_int32_t bge_readreg_ind
260 				(struct bge_softc *, int);
261 #endif
262 static void bge_writereg_ind	(struct bge_softc *, int, int);
263 
264 static int bge_miibus_readreg	(device_t, int, int);
265 static int bge_miibus_writereg	(device_t, int, int, int);
266 static void bge_miibus_statchg	(device_t);
267 #ifdef DEVICE_POLLING
268 static void bge_poll		(struct ifnet *ifp, enum poll_cmd cmd,
269 				    int count);
270 static void bge_poll_locked	(struct ifnet *ifp, enum poll_cmd cmd,
271 				    int count);
272 #endif
273 
274 static void bge_reset		(struct bge_softc *);
275 static void bge_link_upd	(struct bge_softc *);
276 
277 static device_method_t bge_methods[] = {
278 	/* Device interface */
279 	DEVMETHOD(device_probe,		bge_probe),
280 	DEVMETHOD(device_attach,	bge_attach),
281 	DEVMETHOD(device_detach,	bge_detach),
282 	DEVMETHOD(device_shutdown,	bge_shutdown),
283 	DEVMETHOD(device_suspend,	bge_suspend),
284 	DEVMETHOD(device_resume,	bge_resume),
285 
286 	/* bus interface */
287 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
288 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
289 
290 	/* MII interface */
291 	DEVMETHOD(miibus_readreg,	bge_miibus_readreg),
292 	DEVMETHOD(miibus_writereg,	bge_miibus_writereg),
293 	DEVMETHOD(miibus_statchg,	bge_miibus_statchg),
294 
295 	{ 0, 0 }
296 };
297 
298 static driver_t bge_driver = {
299 	"bge",
300 	bge_methods,
301 	sizeof(struct bge_softc)
302 };
303 
304 static devclass_t bge_devclass;
305 
306 DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
307 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
308 
309 static u_int32_t
310 bge_readmem_ind(sc, off)
311 	struct bge_softc *sc;
312 	int off;
313 {
314 	device_t dev;
315 
316 	dev = sc->bge_dev;
317 
318 	pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
319 	return(pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4));
320 }
321 
322 static void
323 bge_writemem_ind(sc, off, val)
324 	struct bge_softc *sc;
325 	int off, val;
326 {
327 	device_t dev;
328 
329 	dev = sc->bge_dev;
330 
331 	pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
332 	pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
333 
334 	return;
335 }
336 
337 #ifdef notdef
338 static u_int32_t
339 bge_readreg_ind(sc, off)
340 	struct bge_softc *sc;
341 	int off;
342 {
343 	device_t dev;
344 
345 	dev = sc->bge_dev;
346 
347 	pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
348 	return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
349 }
350 #endif
351 
352 static void
353 bge_writereg_ind(sc, off, val)
354 	struct bge_softc *sc;
355 	int off, val;
356 {
357 	device_t dev;
358 
359 	dev = sc->bge_dev;
360 
361 	pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
362 	pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
363 
364 	return;
365 }
366 
367 /*
368  * Map a single buffer address.
369  */
370 
371 static void
372 bge_dma_map_addr(arg, segs, nseg, error)
373 	void *arg;
374 	bus_dma_segment_t *segs;
375 	int nseg;
376 	int error;
377 {
378 	struct bge_dmamap_arg *ctx;
379 
380 	if (error)
381 		return;
382 
383 	ctx = arg;
384 
385 	if (nseg > ctx->bge_maxsegs) {
386 		ctx->bge_maxsegs = 0;
387 		return;
388 	}
389 
390 	ctx->bge_busaddr = segs->ds_addr;
391 
392 	return;
393 }
394 
395 #ifdef notdef
396 static u_int8_t
397 bge_vpd_readbyte(sc, addr)
398 	struct bge_softc *sc;
399 	int addr;
400 {
401 	int i;
402 	device_t dev;
403 	u_int32_t val;
404 
405 	dev = sc->bge_dev;
406 	pci_write_config(dev, BGE_PCI_VPD_ADDR, addr, 2);
407 	for (i = 0; i < BGE_TIMEOUT * 10; i++) {
408 		DELAY(10);
409 		if (pci_read_config(dev, BGE_PCI_VPD_ADDR, 2) & BGE_VPD_FLAG)
410 			break;
411 	}
412 
413 	if (i == BGE_TIMEOUT) {
414 		device_printf(sc->bge_dev, "VPD read timed out\n");
415 		return(0);
416 	}
417 
418 	val = pci_read_config(dev, BGE_PCI_VPD_DATA, 4);
419 
420 	return((val >> ((addr % 4) * 8)) & 0xFF);
421 }
422 
423 static void
424 bge_vpd_read_res(sc, res, addr)
425 	struct bge_softc *sc;
426 	struct vpd_res *res;
427 	int addr;
428 {
429 	int i;
430 	u_int8_t *ptr;
431 
432 	ptr = (u_int8_t *)res;
433 	for (i = 0; i < sizeof(struct vpd_res); i++)
434 		ptr[i] = bge_vpd_readbyte(sc, i + addr);
435 
436 	return;
437 }
438 
439 static void
440 bge_vpd_read(sc)
441 	struct bge_softc *sc;
442 {
443 	int pos = 0, i;
444 	struct vpd_res res;
445 
446 	if (sc->bge_vpd_prodname != NULL)
447 		free(sc->bge_vpd_prodname, M_DEVBUF);
448 	if (sc->bge_vpd_readonly != NULL)
449 		free(sc->bge_vpd_readonly, M_DEVBUF);
450 	sc->bge_vpd_prodname = NULL;
451 	sc->bge_vpd_readonly = NULL;
452 
453 	bge_vpd_read_res(sc, &res, pos);
454 
455 	if (res.vr_id != VPD_RES_ID) {
456 		device_printf(sc->bge_dev,
457 		    "bad VPD resource id: expected %x got %x\n", VPD_RES_ID,
458 		    res.vr_id);
459 		return;
460 	}
461 
462 	pos += sizeof(res);
463 	sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
464 	for (i = 0; i < res.vr_len; i++)
465 		sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos);
466 	sc->bge_vpd_prodname[i] = '\0';
467 	pos += i;
468 
469 	bge_vpd_read_res(sc, &res, pos);
470 
471 	if (res.vr_id != VPD_RES_READ) {
472 		device_printf(sc->bge_dev,
473 		    "bad VPD resource id: expected %x got %x\n", VPD_RES_READ,
474 		    res.vr_id);
475 		return;
476 	}
477 
478 	pos += sizeof(res);
479 	sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
480 	for (i = 0; i < res.vr_len + 1; i++)
481 		sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos);
482 
483 	return;
484 }
485 #endif
486 
487 /*
488  * Read a byte of data stored in the EEPROM at address 'addr.' The
489  * BCM570x supports both the traditional bitbang interface and an
490  * auto access interface for reading the EEPROM. We use the auto
491  * access method.
492  */
493 static u_int8_t
494 bge_eeprom_getbyte(sc, addr, dest)
495 	struct bge_softc *sc;
496 	int addr;
497 	u_int8_t *dest;
498 {
499 	int i;
500 	u_int32_t byte = 0;
501 
502 	/*
503 	 * Enable use of auto EEPROM access so we can avoid
504 	 * having to use the bitbang method.
505 	 */
506 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
507 
508 	/* Reset the EEPROM, load the clock period. */
509 	CSR_WRITE_4(sc, BGE_EE_ADDR,
510 	    BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
511 	DELAY(20);
512 
513 	/* Issue the read EEPROM command. */
514 	CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
515 
516 	/* Wait for completion */
517 	for(i = 0; i < BGE_TIMEOUT * 10; i++) {
518 		DELAY(10);
519 		if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
520 			break;
521 	}
522 
523 	if (i == BGE_TIMEOUT) {
524 		device_printf(sc->bge_dev, "EEPROM read timed out\n");
525 		return(1);
526 	}
527 
528 	/* Get result. */
529 	byte = CSR_READ_4(sc, BGE_EE_DATA);
530 
531 	*dest = (byte >> ((addr % 4) * 8)) & 0xFF;
532 
533 	return(0);
534 }
535 
536 /*
537  * Read a sequence of bytes from the EEPROM.
538  */
539 static int
540 bge_read_eeprom(sc, dest, off, cnt)
541 	struct bge_softc *sc;
542 	caddr_t dest;
543 	int off;
544 	int cnt;
545 {
546 	int err = 0, i;
547 	u_int8_t byte = 0;
548 
549 	for (i = 0; i < cnt; i++) {
550 		err = bge_eeprom_getbyte(sc, off + i, &byte);
551 		if (err)
552 			break;
553 		*(dest + i) = byte;
554 	}
555 
556 	return(err ? 1 : 0);
557 }
558 
559 static int
560 bge_miibus_readreg(dev, phy, reg)
561 	device_t dev;
562 	int phy, reg;
563 {
564 	struct bge_softc *sc;
565 	u_int32_t val, autopoll;
566 	int i;
567 
568 	sc = device_get_softc(dev);
569 
570 	/*
571 	 * Broadcom's own driver always assumes the internal
572 	 * PHY is at GMII address 1. On some chips, the PHY responds
573 	 * to accesses at all addresses, which could cause us to
574 	 * bogusly attach the PHY 32 times at probe type. Always
575 	 * restricting the lookup to address 1 is simpler than
576 	 * trying to figure out which chips revisions should be
577 	 * special-cased.
578 	 */
579 	if (phy != 1)
580 		return(0);
581 
582 	/* Reading with autopolling on may trigger PCI errors */
583 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
584 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
585 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
586 		DELAY(40);
587 	}
588 
589 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
590 	    BGE_MIPHY(phy)|BGE_MIREG(reg));
591 
592 	for (i = 0; i < BGE_TIMEOUT; i++) {
593 		val = CSR_READ_4(sc, BGE_MI_COMM);
594 		if (!(val & BGE_MICOMM_BUSY))
595 			break;
596 	}
597 
598 	if (i == BGE_TIMEOUT) {
599 		if_printf(sc->bge_ifp, "PHY read timed out\n");
600 		val = 0;
601 		goto done;
602 	}
603 
604 	val = CSR_READ_4(sc, BGE_MI_COMM);
605 
606 done:
607 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
608 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
609 		DELAY(40);
610 	}
611 
612 	if (val & BGE_MICOMM_READFAIL)
613 		return(0);
614 
615 	return(val & 0xFFFF);
616 }
617 
618 static int
619 bge_miibus_writereg(dev, phy, reg, val)
620 	device_t dev;
621 	int phy, reg, val;
622 {
623 	struct bge_softc *sc;
624 	u_int32_t autopoll;
625 	int i;
626 
627 	sc = device_get_softc(dev);
628 
629 	/* Reading with autopolling on may trigger PCI errors */
630 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
631 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
632 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
633 		DELAY(40);
634 	}
635 
636 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
637 	    BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
638 
639 	for (i = 0; i < BGE_TIMEOUT; i++) {
640 		if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
641 			break;
642 	}
643 
644 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
645 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
646 		DELAY(40);
647 	}
648 
649 	if (i == BGE_TIMEOUT) {
650 		if_printf(sc->bge_ifp, "PHY read timed out\n");
651 		return(0);
652 	}
653 
654 	return(0);
655 }
656 
657 static void
658 bge_miibus_statchg(dev)
659 	device_t dev;
660 {
661 	struct bge_softc *sc;
662 	struct mii_data *mii;
663 
664 	sc = device_get_softc(dev);
665 	mii = device_get_softc(sc->bge_miibus);
666 
667 	BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
668 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
669 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
670 	} else {
671 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
672 	}
673 
674 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
675 		BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
676 	} else {
677 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
678 	}
679 
680 	return;
681 }
682 
683 /*
684  * Intialize a standard receive ring descriptor.
685  */
686 static int
687 bge_newbuf_std(sc, i, m)
688 	struct bge_softc	*sc;
689 	int			i;
690 	struct mbuf		*m;
691 {
692 	struct mbuf		*m_new = NULL;
693 	struct bge_rx_bd	*r;
694 	struct bge_dmamap_arg	ctx;
695 	int			error;
696 
697 	if (m == NULL) {
698 		m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
699 		if (m_new == NULL)
700 			return(ENOBUFS);
701 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
702 	} else {
703 		m_new = m;
704 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
705 		m_new->m_data = m_new->m_ext.ext_buf;
706 	}
707 
708 	if (!sc->bge_rx_alignment_bug)
709 		m_adj(m_new, ETHER_ALIGN);
710 	sc->bge_cdata.bge_rx_std_chain[i] = m_new;
711 	r = &sc->bge_ldata.bge_rx_std_ring[i];
712 	ctx.bge_maxsegs = 1;
713 	ctx.sc = sc;
714 	error = bus_dmamap_load(sc->bge_cdata.bge_mtag,
715 	    sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *),
716 	    m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
717 	if (error || ctx.bge_maxsegs == 0) {
718 		if (m == NULL) {
719 			sc->bge_cdata.bge_rx_std_chain[i] = NULL;
720 			m_freem(m_new);
721 		}
722 		return(ENOMEM);
723 	}
724 	r->bge_addr.bge_addr_lo = BGE_ADDR_LO(ctx.bge_busaddr);
725 	r->bge_addr.bge_addr_hi = BGE_ADDR_HI(ctx.bge_busaddr);
726 	r->bge_flags = BGE_RXBDFLAG_END;
727 	r->bge_len = m_new->m_len;
728 	r->bge_idx = i;
729 
730 	bus_dmamap_sync(sc->bge_cdata.bge_mtag,
731 	    sc->bge_cdata.bge_rx_std_dmamap[i],
732 	    BUS_DMASYNC_PREREAD);
733 
734 	return(0);
735 }
736 
737 /*
738  * Initialize a jumbo receive ring descriptor. This allocates
739  * a jumbo buffer from the pool managed internally by the driver.
740  */
741 static int
742 bge_newbuf_jumbo(sc, i, m)
743 	struct bge_softc *sc;
744 	int i;
745 	struct mbuf *m;
746 {
747 	bus_dma_segment_t segs[BGE_NSEG_JUMBO];
748 	struct bge_extrx_bd *r;
749 	struct mbuf *m_new = NULL;
750 	int nsegs;
751 	int error;
752 
753 	if (m == NULL) {
754 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
755 		if (m_new == NULL)
756 			return(ENOBUFS);
757 
758 		m_cljget(m_new, M_DONTWAIT, MJUM9BYTES);
759 		if (!(m_new->m_flags & M_EXT)) {
760 			m_freem(m_new);
761 			return(ENOBUFS);
762 		}
763 		m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES;
764 	} else {
765 		m_new = m;
766 		m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES;
767 		m_new->m_data = m_new->m_ext.ext_buf;
768 	}
769 
770 	if (!sc->bge_rx_alignment_bug)
771 		m_adj(m_new, ETHER_ALIGN);
772 
773 	error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
774 	    sc->bge_cdata.bge_rx_jumbo_dmamap[i],
775 	    m_new, segs, &nsegs, BUS_DMA_NOWAIT);
776 	if (error) {
777 		if (m == NULL)
778 			m_freem(m_new);
779 		return(error);
780 	}
781 	sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
782 
783 	/*
784 	 * Fill in the extended RX buffer descriptor.
785 	 */
786 	r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
787 	r->bge_flags = BGE_RXBDFLAG_JUMBO_RING|BGE_RXBDFLAG_END;
788 	r->bge_idx = i;
789 	r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
790 	switch (nsegs) {
791 	case 4:
792 		r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
793 		r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
794 		r->bge_len3 = segs[3].ds_len;
795 	case 3:
796 		r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
797 		r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
798 		r->bge_len2 = segs[2].ds_len;
799 	case 2:
800 		r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
801 		r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
802 		r->bge_len1 = segs[1].ds_len;
803 	case 1:
804 		r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
805 		r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
806 		r->bge_len0 = segs[0].ds_len;
807 		break;
808 	default:
809 		panic("%s: %d segments\n", __func__, nsegs);
810 	}
811 
812 	bus_dmamap_sync(sc->bge_cdata.bge_mtag,
813 	    sc->bge_cdata.bge_rx_jumbo_dmamap[i],
814 	    BUS_DMASYNC_PREREAD);
815 
816 	return (0);
817 }
818 
819 /*
820  * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
821  * that's 1MB or memory, which is a lot. For now, we fill only the first
822  * 256 ring entries and hope that our CPU is fast enough to keep up with
823  * the NIC.
824  */
825 static int
826 bge_init_rx_ring_std(sc)
827 	struct bge_softc *sc;
828 {
829 	int i;
830 
831 	for (i = 0; i < BGE_SSLOTS; i++) {
832 		if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
833 			return(ENOBUFS);
834 	};
835 
836 	bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
837 	    sc->bge_cdata.bge_rx_std_ring_map,
838 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
839 
840 	sc->bge_std = i - 1;
841 	CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
842 
843 	return(0);
844 }
845 
846 static void
847 bge_free_rx_ring_std(sc)
848 	struct bge_softc *sc;
849 {
850 	int i;
851 
852 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
853 		if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
854 			bus_dmamap_sync(sc->bge_cdata.bge_mtag,
855 			    sc->bge_cdata.bge_rx_std_dmamap[i],
856 			    BUS_DMASYNC_POSTREAD);
857 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
858 			    sc->bge_cdata.bge_rx_std_dmamap[i]);
859 			m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
860 			sc->bge_cdata.bge_rx_std_chain[i] = NULL;
861 		}
862 		bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
863 		    sizeof(struct bge_rx_bd));
864 	}
865 
866 	return;
867 }
868 
869 static int
870 bge_init_rx_ring_jumbo(sc)
871 	struct bge_softc *sc;
872 {
873 	struct bge_rcb *rcb;
874 	int i;
875 
876 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
877 		if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
878 			return(ENOBUFS);
879 	};
880 
881 	bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
882 	    sc->bge_cdata.bge_rx_jumbo_ring_map,
883 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
884 
885 	sc->bge_jumbo = i - 1;
886 
887 	rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
888 	rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
889 				    BGE_RCB_FLAG_USE_EXT_RX_BD);
890 	CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
891 
892 	CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
893 
894 	return(0);
895 }
896 
897 static void
898 bge_free_rx_ring_jumbo(sc)
899 	struct bge_softc *sc;
900 {
901 	int i;
902 
903 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
904 		if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
905 			bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
906 			    sc->bge_cdata.bge_rx_jumbo_dmamap[i],
907 			    BUS_DMASYNC_POSTREAD);
908 			bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
909 			    sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
910 			m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
911 			sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
912 		}
913 		bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
914 		    sizeof(struct bge_extrx_bd));
915 	}
916 
917 	return;
918 }
919 
920 static void
921 bge_free_tx_ring(sc)
922 	struct bge_softc *sc;
923 {
924 	int i;
925 
926 	if (sc->bge_ldata.bge_tx_ring == NULL)
927 		return;
928 
929 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
930 		if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
931 			bus_dmamap_sync(sc->bge_cdata.bge_mtag,
932 			    sc->bge_cdata.bge_tx_dmamap[i],
933 			    BUS_DMASYNC_POSTWRITE);
934 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
935 			    sc->bge_cdata.bge_tx_dmamap[i]);
936 			m_freem(sc->bge_cdata.bge_tx_chain[i]);
937 			sc->bge_cdata.bge_tx_chain[i] = NULL;
938 		}
939 		bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
940 		    sizeof(struct bge_tx_bd));
941 	}
942 
943 	return;
944 }
945 
946 static int
947 bge_init_tx_ring(sc)
948 	struct bge_softc *sc;
949 {
950 	sc->bge_txcnt = 0;
951 	sc->bge_tx_saved_considx = 0;
952 
953 	/* Initialize transmit producer index for host-memory send ring. */
954 	sc->bge_tx_prodidx = 0;
955 	CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
956 
957 	/* 5700 b2 errata */
958 	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
959 		CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
960 
961 	/* NIC-memory send ring not used; initialize to zero. */
962 	CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
963 	/* 5700 b2 errata */
964 	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
965 		CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
966 
967 	return(0);
968 }
969 
970 static void
971 bge_setmulti(sc)
972 	struct bge_softc *sc;
973 {
974 	struct ifnet *ifp;
975 	struct ifmultiaddr *ifma;
976 	u_int32_t hashes[4] = { 0, 0, 0, 0 };
977 	int h, i;
978 
979 	BGE_LOCK_ASSERT(sc);
980 
981 	ifp = sc->bge_ifp;
982 
983 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
984 		for (i = 0; i < 4; i++)
985 			CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
986 		return;
987 	}
988 
989 	/* First, zot all the existing filters. */
990 	for (i = 0; i < 4; i++)
991 		CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
992 
993 	/* Now program new ones. */
994 	IF_ADDR_LOCK(ifp);
995 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
996 		if (ifma->ifma_addr->sa_family != AF_LINK)
997 			continue;
998 		h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
999 		    ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
1000 		hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1001 	}
1002 	IF_ADDR_UNLOCK(ifp);
1003 
1004 	for (i = 0; i < 4; i++)
1005 		CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1006 
1007 	return;
1008 }
1009 
1010 /*
1011  * Do endian, PCI and DMA initialization. Also check the on-board ROM
1012  * self-test results.
1013  */
1014 static int
1015 bge_chipinit(sc)
1016 	struct bge_softc *sc;
1017 {
1018 	int			i;
1019 	u_int32_t		dma_rw_ctl;
1020 
1021 	/* Set endian type before we access any non-PCI registers. */
1022 	pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4);
1023 
1024 	/*
1025 	 * Check the 'ROM failed' bit on the RX CPU to see if
1026 	 * self-tests passed.
1027 	 */
1028 	if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
1029 		device_printf(sc->bge_dev, "RX CPU self-diagnostics failed!\n");
1030 		return(ENODEV);
1031 	}
1032 
1033 	/* Clear the MAC control register */
1034 	CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1035 
1036 	/*
1037 	 * Clear the MAC statistics block in the NIC's
1038 	 * internal memory.
1039 	 */
1040 	for (i = BGE_STATS_BLOCK;
1041 	    i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t))
1042 		BGE_MEMWIN_WRITE(sc, i, 0);
1043 
1044 	for (i = BGE_STATUS_BLOCK;
1045 	    i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t))
1046 		BGE_MEMWIN_WRITE(sc, i, 0);
1047 
1048 	/* Set up the PCI DMA control register. */
1049 	if (sc->bge_pcie) {
1050 		dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1051 		    (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1052 		    (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1053 	} else if (pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
1054 	    BGE_PCISTATE_PCI_BUSMODE) {
1055 		/* Conventional PCI bus */
1056 		dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1057 		    (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1058 		    (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1059 		    (0x0F);
1060 	} else {
1061 		/* PCI-X bus */
1062 		/*
1063 		 * The 5704 uses a different encoding of read/write
1064 		 * watermarks.
1065 		 */
1066 		if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1067 			dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1068 			    (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1069 			    (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1070 		else
1071 			dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1072 			    (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1073 			    (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1074 			    (0x0F);
1075 
1076 		/*
1077 		 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1078 		 * for hardware bugs.
1079 		 */
1080 		if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1081 		    sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1082 			u_int32_t tmp;
1083 
1084 			tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1085 			if (tmp == 0x6 || tmp == 0x7)
1086 				dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1087 		}
1088 	}
1089 
1090 	if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1091 	    sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
1092 	    sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1093 	    sc->bge_asicrev == BGE_ASICREV_BCM5750)
1094 		dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1095 	pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1096 
1097 	/*
1098 	 * Set up general mode register.
1099 	 */
1100 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
1101 	    BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1102 	    BGE_MODECTL_TX_NO_PHDR_CSUM);
1103 
1104 	/*
1105 	 * Disable memory write invalidate.  Apparently it is not supported
1106 	 * properly by these devices.
1107 	 */
1108 	PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1109 
1110 #ifdef __brokenalpha__
1111 	/*
1112 	 * Must insure that we do not cross an 8K (bytes) boundary
1113 	 * for DMA reads.  Our highest limit is 1K bytes.  This is a
1114 	 * restriction on some ALPHA platforms with early revision
1115 	 * 21174 PCI chipsets, such as the AlphaPC 164lx
1116 	 */
1117 	PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL,
1118 	    BGE_PCI_READ_BNDRY_1024BYTES, 4);
1119 #endif
1120 
1121 	/* Set the timer prescaler (always 66Mhz) */
1122 	CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1123 
1124 	return(0);
1125 }
1126 
1127 static int
1128 bge_blockinit(sc)
1129 	struct bge_softc *sc;
1130 {
1131 	struct bge_rcb *rcb;
1132 	bus_size_t vrcb;
1133 	bge_hostaddr taddr;
1134 	int i;
1135 
1136 	/*
1137 	 * Initialize the memory window pointer register so that
1138 	 * we can access the first 32K of internal NIC RAM. This will
1139 	 * allow us to set up the TX send ring RCBs and the RX return
1140 	 * ring RCBs, plus other things which live in NIC memory.
1141 	 */
1142 	CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1143 
1144 	/* Note: the BCM5704 has a smaller mbuf space than other chips. */
1145 
1146 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1147 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1148 		/* Configure mbuf memory pool */
1149 		if (sc->bge_extram) {
1150 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1151 			    BGE_EXT_SSRAM);
1152 			if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1153 				CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1154 			else
1155 				CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1156 		} else {
1157 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1158 			    BGE_BUFFPOOL_1);
1159 			if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1160 				CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1161 			else
1162 				CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1163 		}
1164 
1165 		/* Configure DMA resource pool */
1166 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1167 		    BGE_DMA_DESCRIPTORS);
1168 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1169 	}
1170 
1171 	/* Configure mbuf pool watermarks */
1172 	if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1173 	    sc->bge_asicrev == BGE_ASICREV_BCM5750) {
1174 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1175 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1176 	} else {
1177 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1178 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1179 	}
1180 	CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1181 
1182 	/* Configure DMA resource watermarks */
1183 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1184 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1185 
1186 	/* Enable buffer manager */
1187 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1188 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1189 		CSR_WRITE_4(sc, BGE_BMAN_MODE,
1190 		    BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1191 
1192 		/* Poll for buffer manager start indication */
1193 		for (i = 0; i < BGE_TIMEOUT; i++) {
1194 			if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1195 				break;
1196 			DELAY(10);
1197 		}
1198 
1199 		if (i == BGE_TIMEOUT) {
1200 			device_printf(sc->bge_dev,
1201 			    "buffer manager failed to start\n");
1202 			return(ENXIO);
1203 		}
1204 	}
1205 
1206 	/* Enable flow-through queues */
1207 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1208 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1209 
1210 	/* Wait until queue initialization is complete */
1211 	for (i = 0; i < BGE_TIMEOUT; i++) {
1212 		if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1213 			break;
1214 		DELAY(10);
1215 	}
1216 
1217 	if (i == BGE_TIMEOUT) {
1218 		device_printf(sc->bge_dev, "flow-through queue init failed\n");
1219 		return(ENXIO);
1220 	}
1221 
1222 	/* Initialize the standard RX ring control block */
1223 	rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1224 	rcb->bge_hostaddr.bge_addr_lo =
1225 	    BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1226 	rcb->bge_hostaddr.bge_addr_hi =
1227 	    BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1228 	bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1229 	    sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1230 	if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1231 	    sc->bge_asicrev == BGE_ASICREV_BCM5750)
1232 		rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1233 	else
1234 		rcb->bge_maxlen_flags =
1235 		    BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1236 	if (sc->bge_extram)
1237 		rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
1238 	else
1239 		rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1240 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1241 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1242 
1243 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1244 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1245 
1246 	/*
1247 	 * Initialize the jumbo RX ring control block
1248 	 * We set the 'ring disabled' bit in the flags
1249 	 * field until we're actually ready to start
1250 	 * using this ring (i.e. once we set the MTU
1251 	 * high enough to require it).
1252 	 */
1253 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1254 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1255 		rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1256 
1257 		rcb->bge_hostaddr.bge_addr_lo =
1258 		    BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1259 		rcb->bge_hostaddr.bge_addr_hi =
1260 		    BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1261 		bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1262 		    sc->bge_cdata.bge_rx_jumbo_ring_map,
1263 		    BUS_DMASYNC_PREREAD);
1264 		rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1265 		    BGE_RCB_FLAG_USE_EXT_RX_BD|BGE_RCB_FLAG_RING_DISABLED);
1266 		if (sc->bge_extram)
1267 			rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
1268 		else
1269 			rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1270 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1271 		    rcb->bge_hostaddr.bge_addr_hi);
1272 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1273 		    rcb->bge_hostaddr.bge_addr_lo);
1274 
1275 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1276 		    rcb->bge_maxlen_flags);
1277 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1278 
1279 		/* Set up dummy disabled mini ring RCB */
1280 		rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1281 		rcb->bge_maxlen_flags =
1282 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1283 		CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1284 		    rcb->bge_maxlen_flags);
1285 	}
1286 
1287 	/*
1288 	 * Set the BD ring replentish thresholds. The recommended
1289 	 * values are 1/8th the number of descriptors allocated to
1290 	 * each ring.
1291 	 */
1292 	CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
1293 	CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1294 
1295 	/*
1296 	 * Disable all unused send rings by setting the 'ring disabled'
1297 	 * bit in the flags field of all the TX send ring control blocks.
1298 	 * These are located in NIC memory.
1299 	 */
1300 	vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1301 	for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1302 		RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1303 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1304 		RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1305 		vrcb += sizeof(struct bge_rcb);
1306 	}
1307 
1308 	/* Configure TX RCB 0 (we use only the first ring) */
1309 	vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1310 	BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1311 	RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1312 	RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1313 	RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1314 	    BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1315 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1316 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
1317 		RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1318 		    BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1319 
1320 	/* Disable all unused RX return rings */
1321 	vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1322 	for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1323 		RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1324 		RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1325 		RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1326 		    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1327 		    BGE_RCB_FLAG_RING_DISABLED));
1328 		RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1329 		CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
1330 		    (i * (sizeof(u_int64_t))), 0);
1331 		vrcb += sizeof(struct bge_rcb);
1332 	}
1333 
1334 	/* Initialize RX ring indexes */
1335 	CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1336 	CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1337 	CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1338 
1339 	/*
1340 	 * Set up RX return ring 0
1341 	 * Note that the NIC address for RX return rings is 0x00000000.
1342 	 * The return rings live entirely within the host, so the
1343 	 * nicaddr field in the RCB isn't used.
1344 	 */
1345 	vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1346 	BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1347 	RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1348 	RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1349 	RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000);
1350 	RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1351 	    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1352 
1353 	/* Set random backoff seed for TX */
1354 	CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1355 	    IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
1356 	    IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
1357 	    IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] +
1358 	    BGE_TX_BACKOFF_SEED_MASK);
1359 
1360 	/* Set inter-packet gap */
1361 	CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1362 
1363 	/*
1364 	 * Specify which ring to use for packets that don't match
1365 	 * any RX rules.
1366 	 */
1367 	CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1368 
1369 	/*
1370 	 * Configure number of RX lists. One interrupt distribution
1371 	 * list, sixteen active lists, one bad frames class.
1372 	 */
1373 	CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1374 
1375 	/* Inialize RX list placement stats mask. */
1376 	CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1377 	CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1378 
1379 	/* Disable host coalescing until we get it set up */
1380 	CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1381 
1382 	/* Poll to make sure it's shut down. */
1383 	for (i = 0; i < BGE_TIMEOUT; i++) {
1384 		if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1385 			break;
1386 		DELAY(10);
1387 	}
1388 
1389 	if (i == BGE_TIMEOUT) {
1390 		device_printf(sc->bge_dev,
1391 		    "host coalescing engine failed to idle\n");
1392 		return(ENXIO);
1393 	}
1394 
1395 	/* Set up host coalescing defaults */
1396 	CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1397 	CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1398 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1399 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1400 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1401 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1402 		CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1403 		CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1404 	}
1405 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1406 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1407 
1408 	/* Set up address of statistics block */
1409 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1410 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1411 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1412 		    BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1413 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1414 		    BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1415 		CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1416 		CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1417 		CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1418 	}
1419 
1420 	/* Set up address of status block */
1421 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1422 	    BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1423 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1424 	    BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1425 	sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0;
1426 	sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0;
1427 
1428 	/* Turn on host coalescing state machine */
1429 	CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1430 
1431 	/* Turn on RX BD completion state machine and enable attentions */
1432 	CSR_WRITE_4(sc, BGE_RBDC_MODE,
1433 	    BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1434 
1435 	/* Turn on RX list placement state machine */
1436 	CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1437 
1438 	/* Turn on RX list selector state machine. */
1439 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1440 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
1441 		CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1442 
1443 	/* Turn on DMA, clear stats */
1444 	CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1445 	    BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1446 	    BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1447 	    BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1448 	    (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1449 
1450 	/* Set misc. local control, enable interrupts on attentions */
1451 	CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1452 
1453 #ifdef notdef
1454 	/* Assert GPIO pins for PHY reset */
1455 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1456 	    BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1457 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1458 	    BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1459 #endif
1460 
1461 	/* Turn on DMA completion state machine */
1462 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1463 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
1464 		CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1465 
1466 	/* Turn on write DMA state machine */
1467 	CSR_WRITE_4(sc, BGE_WDMA_MODE,
1468 	    BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
1469 
1470 	/* Turn on read DMA state machine */
1471 	CSR_WRITE_4(sc, BGE_RDMA_MODE,
1472 	    BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
1473 
1474 	/* Turn on RX data completion state machine */
1475 	CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1476 
1477 	/* Turn on RX BD initiator state machine */
1478 	CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1479 
1480 	/* Turn on RX data and RX BD initiator state machine */
1481 	CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1482 
1483 	/* Turn on Mbuf cluster free state machine */
1484 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1485 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
1486 		CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1487 
1488 	/* Turn on send BD completion state machine */
1489 	CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1490 
1491 	/* Turn on send data completion state machine */
1492 	CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1493 
1494 	/* Turn on send data initiator state machine */
1495 	CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1496 
1497 	/* Turn on send BD initiator state machine */
1498 	CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1499 
1500 	/* Turn on send BD selector state machine */
1501 	CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1502 
1503 	CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1504 	CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1505 	    BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1506 
1507 	/* ack/clear link change events */
1508 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1509 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1510 	    BGE_MACSTAT_LINK_CHANGED);
1511 	CSR_WRITE_4(sc, BGE_MI_STS, 0);
1512 
1513 	/* Enable PHY auto polling (for MII/GMII only) */
1514 	if (sc->bge_tbi) {
1515 		CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1516 	} else {
1517 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1518 		if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1519 		    sc->bge_chipid != BGE_CHIPID_BCM5700_B1)
1520 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1521 			    BGE_EVTENB_MI_INTERRUPT);
1522 	}
1523 
1524 	/*
1525 	 * Clear any pending link state attention.
1526 	 * Otherwise some link state change events may be lost until attention
1527 	 * is cleared by bge_intr() -> bge_link_upd() sequence.
1528 	 * It's not necessary on newer BCM chips - perhaps enabling link
1529 	 * state change attentions implies clearing pending attention.
1530 	 */
1531 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1532 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1533 	    BGE_MACSTAT_LINK_CHANGED);
1534 
1535 	/* Enable link state change attentions. */
1536 	BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1537 
1538 	return(0);
1539 }
1540 
1541 /*
1542  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1543  * against our list and return its name if we find a match. Note
1544  * that since the Broadcom controller contains VPD support, we
1545  * can get the device name string from the controller itself instead
1546  * of the compiled-in string. This is a little slow, but it guarantees
1547  * we'll always announce the right product name.
1548  */
1549 static int
1550 bge_probe(dev)
1551 	device_t dev;
1552 {
1553 	struct bge_type *t;
1554 	struct bge_softc *sc;
1555 	char *descbuf;
1556 
1557 	t = bge_devs;
1558 
1559 	sc = device_get_softc(dev);
1560 	bzero(sc, sizeof(struct bge_softc));
1561 	sc->bge_dev = dev;
1562 
1563 	while(t->bge_name != NULL) {
1564 		if ((pci_get_vendor(dev) == t->bge_vid) &&
1565 		    (pci_get_device(dev) == t->bge_did)) {
1566 #ifdef notdef
1567 			bge_vpd_read(sc);
1568 			device_set_desc(dev, sc->bge_vpd_prodname);
1569 #endif
1570 			descbuf = malloc(BGE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
1571 			if (descbuf == NULL)
1572 				return(ENOMEM);
1573 			snprintf(descbuf, BGE_DEVDESC_MAX,
1574 			    "%s, ASIC rev. %#04x", t->bge_name,
1575 			    pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 16);
1576 			device_set_desc_copy(dev, descbuf);
1577 			if (pci_get_subvendor(dev) == DELL_VENDORID)
1578 				sc->bge_no_3_led = 1;
1579 			free(descbuf, M_TEMP);
1580 			return(0);
1581 		}
1582 		t++;
1583 	}
1584 
1585 	return(ENXIO);
1586 }
1587 
1588 static void
1589 bge_dma_free(sc)
1590 	struct bge_softc *sc;
1591 {
1592 	int i;
1593 
1594 
1595 	/* Destroy DMA maps for RX buffers */
1596 
1597 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1598 		if (sc->bge_cdata.bge_rx_std_dmamap[i])
1599 			bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1600 			    sc->bge_cdata.bge_rx_std_dmamap[i]);
1601 	}
1602 
1603 	/* Destroy DMA maps for jumbo RX buffers */
1604 
1605 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1606 		if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
1607 			bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
1608 			    sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1609 	}
1610 
1611 	/* Destroy DMA maps for TX buffers */
1612 
1613 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
1614 		if (sc->bge_cdata.bge_tx_dmamap[i])
1615 			bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1616 			    sc->bge_cdata.bge_tx_dmamap[i]);
1617 	}
1618 
1619 	if (sc->bge_cdata.bge_mtag)
1620 		bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
1621 
1622 
1623 	/* Destroy standard RX ring */
1624 
1625 	if (sc->bge_cdata.bge_rx_std_ring_map)
1626 		bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
1627 		    sc->bge_cdata.bge_rx_std_ring_map);
1628 	if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring)
1629 		bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
1630 		    sc->bge_ldata.bge_rx_std_ring,
1631 		    sc->bge_cdata.bge_rx_std_ring_map);
1632 
1633 	if (sc->bge_cdata.bge_rx_std_ring_tag)
1634 		bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
1635 
1636 	/* Destroy jumbo RX ring */
1637 
1638 	if (sc->bge_cdata.bge_rx_jumbo_ring_map)
1639 		bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1640 		    sc->bge_cdata.bge_rx_jumbo_ring_map);
1641 
1642 	if (sc->bge_cdata.bge_rx_jumbo_ring_map &&
1643 	    sc->bge_ldata.bge_rx_jumbo_ring)
1644 		bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1645 		    sc->bge_ldata.bge_rx_jumbo_ring,
1646 		    sc->bge_cdata.bge_rx_jumbo_ring_map);
1647 
1648 	if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
1649 		bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
1650 
1651 	/* Destroy RX return ring */
1652 
1653 	if (sc->bge_cdata.bge_rx_return_ring_map)
1654 		bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
1655 		    sc->bge_cdata.bge_rx_return_ring_map);
1656 
1657 	if (sc->bge_cdata.bge_rx_return_ring_map &&
1658 	    sc->bge_ldata.bge_rx_return_ring)
1659 		bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
1660 		    sc->bge_ldata.bge_rx_return_ring,
1661 		    sc->bge_cdata.bge_rx_return_ring_map);
1662 
1663 	if (sc->bge_cdata.bge_rx_return_ring_tag)
1664 		bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
1665 
1666 	/* Destroy TX ring */
1667 
1668 	if (sc->bge_cdata.bge_tx_ring_map)
1669 		bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
1670 		    sc->bge_cdata.bge_tx_ring_map);
1671 
1672 	if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring)
1673 		bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
1674 		    sc->bge_ldata.bge_tx_ring,
1675 		    sc->bge_cdata.bge_tx_ring_map);
1676 
1677 	if (sc->bge_cdata.bge_tx_ring_tag)
1678 		bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
1679 
1680 	/* Destroy status block */
1681 
1682 	if (sc->bge_cdata.bge_status_map)
1683 		bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
1684 		    sc->bge_cdata.bge_status_map);
1685 
1686 	if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block)
1687 		bus_dmamem_free(sc->bge_cdata.bge_status_tag,
1688 		    sc->bge_ldata.bge_status_block,
1689 		    sc->bge_cdata.bge_status_map);
1690 
1691 	if (sc->bge_cdata.bge_status_tag)
1692 		bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
1693 
1694 	/* Destroy statistics block */
1695 
1696 	if (sc->bge_cdata.bge_stats_map)
1697 		bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
1698 		    sc->bge_cdata.bge_stats_map);
1699 
1700 	if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats)
1701 		bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
1702 		    sc->bge_ldata.bge_stats,
1703 		    sc->bge_cdata.bge_stats_map);
1704 
1705 	if (sc->bge_cdata.bge_stats_tag)
1706 		bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
1707 
1708 	/* Destroy the parent tag */
1709 
1710 	if (sc->bge_cdata.bge_parent_tag)
1711 		bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
1712 
1713 	return;
1714 }
1715 
1716 static int
1717 bge_dma_alloc(dev)
1718 	device_t dev;
1719 {
1720 	struct bge_softc *sc;
1721 	int i, error;
1722 	struct bge_dmamap_arg ctx;
1723 
1724 	sc = device_get_softc(dev);
1725 
1726 	/*
1727 	 * Allocate the parent bus DMA tag appropriate for PCI.
1728 	 */
1729 	error = bus_dma_tag_create(NULL,	/* parent */
1730 			PAGE_SIZE, 0,		/* alignment, boundary */
1731 			BUS_SPACE_MAXADDR,	/* lowaddr */
1732 			BUS_SPACE_MAXADDR,	/* highaddr */
1733 			NULL, NULL,		/* filter, filterarg */
1734 			MAXBSIZE, BGE_NSEG_NEW,	/* maxsize, nsegments */
1735 			BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
1736 			0,			/* flags */
1737 			NULL, NULL,		/* lockfunc, lockarg */
1738 			&sc->bge_cdata.bge_parent_tag);
1739 
1740 	if (error != 0) {
1741 		device_printf(sc->bge_dev,
1742 		    "could not allocate parent dma tag\n");
1743 		return (ENOMEM);
1744 	}
1745 
1746 	/*
1747 	 * Create tag for RX mbufs.
1748 	 */
1749 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1,
1750 	    0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1751 	    NULL, MCLBYTES * BGE_NSEG_NEW, BGE_NSEG_NEW, MCLBYTES,
1752 	    BUS_DMA_ALLOCNOW, NULL, NULL, &sc->bge_cdata.bge_mtag);
1753 
1754 	if (error) {
1755 		device_printf(sc->bge_dev, "could not allocate dma tag\n");
1756 		return (ENOMEM);
1757 	}
1758 
1759 	/* Create DMA maps for RX buffers */
1760 
1761 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1762 		error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1763 			    &sc->bge_cdata.bge_rx_std_dmamap[i]);
1764 		if (error) {
1765 			device_printf(sc->bge_dev,
1766 			    "can't create DMA map for RX\n");
1767 			return(ENOMEM);
1768 		}
1769 	}
1770 
1771 	/* Create DMA maps for TX buffers */
1772 
1773 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
1774 		error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1775 			    &sc->bge_cdata.bge_tx_dmamap[i]);
1776 		if (error) {
1777 			device_printf(sc->bge_dev,
1778 			    "can't create DMA map for RX\n");
1779 			return(ENOMEM);
1780 		}
1781 	}
1782 
1783 	/* Create tag for standard RX ring */
1784 
1785 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1786 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1787 	    NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0,
1788 	    NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag);
1789 
1790 	if (error) {
1791 		device_printf(sc->bge_dev, "could not allocate dma tag\n");
1792 		return (ENOMEM);
1793 	}
1794 
1795 	/* Allocate DMA'able memory for standard RX ring */
1796 
1797 	error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag,
1798 	    (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT,
1799 	    &sc->bge_cdata.bge_rx_std_ring_map);
1800 	if (error)
1801 		return (ENOMEM);
1802 
1803 	bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1804 
1805 	/* Load the address of the standard RX ring */
1806 
1807 	ctx.bge_maxsegs = 1;
1808 	ctx.sc = sc;
1809 
1810 	error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag,
1811 	    sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring,
1812 	    BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1813 
1814 	if (error)
1815 		return (ENOMEM);
1816 
1817 	sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr;
1818 
1819 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1820 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1821 
1822 		/*
1823 		 * Create tag for jumbo mbufs.
1824 		 * This is really a bit of a kludge. We allocate a special
1825 		 * jumbo buffer pool which (thanks to the way our DMA
1826 		 * memory allocation works) will consist of contiguous
1827 		 * pages. This means that even though a jumbo buffer might
1828 		 * be larger than a page size, we don't really need to
1829 		 * map it into more than one DMA segment. However, the
1830 		 * default mbuf tag will result in multi-segment mappings,
1831 		 * so we have to create a special jumbo mbuf tag that
1832 		 * lets us get away with mapping the jumbo buffers as
1833 		 * a single segment. I think eventually the driver should
1834 		 * be changed so that it uses ordinary mbufs and cluster
1835 		 * buffers, i.e. jumbo frames can span multiple DMA
1836 		 * descriptors. But that's a project for another day.
1837 		 */
1838 
1839 		error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1840 		    1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1841 		    NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
1842 		    0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
1843 
1844 		if (error) {
1845 			device_printf(sc->bge_dev,
1846 			    "could not allocate dma tag\n");
1847 			return (ENOMEM);
1848 		}
1849 
1850 		/* Create tag for jumbo RX ring */
1851 		error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1852 		    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1853 		    NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0,
1854 		    NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag);
1855 
1856 		if (error) {
1857 			device_printf(sc->bge_dev,
1858 			    "could not allocate dma tag\n");
1859 			return (ENOMEM);
1860 		}
1861 
1862 		/* Allocate DMA'able memory for jumbo RX ring */
1863 		error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1864 		    (void **)&sc->bge_ldata.bge_rx_jumbo_ring,
1865 		    BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1866 		    &sc->bge_cdata.bge_rx_jumbo_ring_map);
1867 		if (error)
1868 			return (ENOMEM);
1869 
1870 		/* Load the address of the jumbo RX ring */
1871 		ctx.bge_maxsegs = 1;
1872 		ctx.sc = sc;
1873 
1874 		error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1875 		    sc->bge_cdata.bge_rx_jumbo_ring_map,
1876 		    sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ,
1877 		    bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1878 
1879 		if (error)
1880 			return (ENOMEM);
1881 
1882 		sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr;
1883 
1884 		/* Create DMA maps for jumbo RX buffers */
1885 
1886 		for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1887 			error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
1888 				    0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1889 			if (error) {
1890 				device_printf(sc->bge_dev,
1891 				    "can't create DMA map for RX\n");
1892 				return(ENOMEM);
1893 			}
1894 		}
1895 
1896 	}
1897 
1898 	/* Create tag for RX return ring */
1899 
1900 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1901 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1902 	    NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0,
1903 	    NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag);
1904 
1905 	if (error) {
1906 		device_printf(sc->bge_dev, "could not allocate dma tag\n");
1907 		return (ENOMEM);
1908 	}
1909 
1910 	/* Allocate DMA'able memory for RX return ring */
1911 
1912 	error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag,
1913 	    (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT,
1914 	    &sc->bge_cdata.bge_rx_return_ring_map);
1915 	if (error)
1916 		return (ENOMEM);
1917 
1918 	bzero((char *)sc->bge_ldata.bge_rx_return_ring,
1919 	    BGE_RX_RTN_RING_SZ(sc));
1920 
1921 	/* Load the address of the RX return ring */
1922 
1923 	ctx.bge_maxsegs = 1;
1924 	ctx.sc = sc;
1925 
1926 	error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag,
1927 	    sc->bge_cdata.bge_rx_return_ring_map,
1928 	    sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc),
1929 	    bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1930 
1931 	if (error)
1932 		return (ENOMEM);
1933 
1934 	sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr;
1935 
1936 	/* Create tag for TX ring */
1937 
1938 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1939 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1940 	    NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL,
1941 	    &sc->bge_cdata.bge_tx_ring_tag);
1942 
1943 	if (error) {
1944 		device_printf(sc->bge_dev, "could not allocate dma tag\n");
1945 		return (ENOMEM);
1946 	}
1947 
1948 	/* Allocate DMA'able memory for TX ring */
1949 
1950 	error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag,
1951 	    (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT,
1952 	    &sc->bge_cdata.bge_tx_ring_map);
1953 	if (error)
1954 		return (ENOMEM);
1955 
1956 	bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
1957 
1958 	/* Load the address of the TX ring */
1959 
1960 	ctx.bge_maxsegs = 1;
1961 	ctx.sc = sc;
1962 
1963 	error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag,
1964 	    sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring,
1965 	    BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1966 
1967 	if (error)
1968 		return (ENOMEM);
1969 
1970 	sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr;
1971 
1972 	/* Create tag for status block */
1973 
1974 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1975 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1976 	    NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0,
1977 	    NULL, NULL, &sc->bge_cdata.bge_status_tag);
1978 
1979 	if (error) {
1980 		device_printf(sc->bge_dev, "could not allocate dma tag\n");
1981 		return (ENOMEM);
1982 	}
1983 
1984 	/* Allocate DMA'able memory for status block */
1985 
1986 	error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag,
1987 	    (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT,
1988 	    &sc->bge_cdata.bge_status_map);
1989 	if (error)
1990 		return (ENOMEM);
1991 
1992 	bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
1993 
1994 	/* Load the address of the status block */
1995 
1996 	ctx.sc = sc;
1997 	ctx.bge_maxsegs = 1;
1998 
1999 	error = bus_dmamap_load(sc->bge_cdata.bge_status_tag,
2000 	    sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block,
2001 	    BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2002 
2003 	if (error)
2004 		return (ENOMEM);
2005 
2006 	sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr;
2007 
2008 	/* Create tag for statistics block */
2009 
2010 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2011 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2012 	    NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL,
2013 	    &sc->bge_cdata.bge_stats_tag);
2014 
2015 	if (error) {
2016 		device_printf(sc->bge_dev, "could not allocate dma tag\n");
2017 		return (ENOMEM);
2018 	}
2019 
2020 	/* Allocate DMA'able memory for statistics block */
2021 
2022 	error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag,
2023 	    (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT,
2024 	    &sc->bge_cdata.bge_stats_map);
2025 	if (error)
2026 		return (ENOMEM);
2027 
2028 	bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ);
2029 
2030 	/* Load the address of the statstics block */
2031 
2032 	ctx.sc = sc;
2033 	ctx.bge_maxsegs = 1;
2034 
2035 	error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag,
2036 	    sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats,
2037 	    BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2038 
2039 	if (error)
2040 		return (ENOMEM);
2041 
2042 	sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr;
2043 
2044 	return(0);
2045 }
2046 
2047 static int
2048 bge_attach(dev)
2049 	device_t dev;
2050 {
2051 	struct ifnet *ifp;
2052 	struct bge_softc *sc;
2053 	u_int32_t hwcfg = 0;
2054 	u_int32_t mac_tmp = 0;
2055 	u_char eaddr[6];
2056 	int error = 0, rid;
2057 
2058 	sc = device_get_softc(dev);
2059 	sc->bge_dev = dev;
2060 
2061 	/*
2062 	 * Map control/status registers.
2063 	 */
2064 	pci_enable_busmaster(dev);
2065 
2066 	rid = BGE_PCI_BAR0;
2067 	sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2068 	    RF_ACTIVE|PCI_RF_DENSE);
2069 
2070 	if (sc->bge_res == NULL) {
2071 		device_printf (sc->bge_dev, "couldn't map memory\n");
2072 		error = ENXIO;
2073 		goto fail;
2074 	}
2075 
2076 	sc->bge_btag = rman_get_bustag(sc->bge_res);
2077 	sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
2078 
2079 	/* Allocate interrupt */
2080 	rid = 0;
2081 
2082 	sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2083 	    RF_SHAREABLE | RF_ACTIVE);
2084 
2085 	if (sc->bge_irq == NULL) {
2086 		device_printf(sc->bge_dev, "couldn't map interrupt\n");
2087 		error = ENXIO;
2088 		goto fail;
2089 	}
2090 
2091 	BGE_LOCK_INIT(sc, device_get_nameunit(dev));
2092 
2093 	/* Save ASIC rev. */
2094 
2095 	sc->bge_chipid =
2096 	    pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
2097 	    BGE_PCIMISCCTL_ASICREV;
2098 	sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2099 	sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2100 
2101 	/*
2102 	 * Treat the 5714 and the 5752 like the 5750 until we have more info
2103 	 * on this chip.
2104 	 */
2105 	if (sc->bge_asicrev == BGE_ASICREV_BCM5714 ||
2106             sc->bge_asicrev == BGE_ASICREV_BCM5752)
2107 		sc->bge_asicrev = BGE_ASICREV_BCM5750;
2108 
2109 	/*
2110 	 * XXX: Broadcom Linux driver.  Not in specs or eratta.
2111 	 * PCI-Express?
2112 	 */
2113 	if (sc->bge_asicrev == BGE_ASICREV_BCM5750) {
2114 		u_int32_t v;
2115 
2116 		v = pci_read_config(dev, BGE_PCI_MSI_CAPID, 4);
2117 		if (((v >> 8) & 0xff) == BGE_PCIE_CAPID_REG) {
2118 			v = pci_read_config(dev, BGE_PCIE_CAPID_REG, 4);
2119 			if ((v & 0xff) == BGE_PCIE_CAPID)
2120 				sc->bge_pcie = 1;
2121 		}
2122 	}
2123 
2124 	/* Try to reset the chip. */
2125 	bge_reset(sc);
2126 
2127 	if (bge_chipinit(sc)) {
2128 		device_printf(sc->bge_dev, "chip initialization failed\n");
2129 		bge_release_resources(sc);
2130 		error = ENXIO;
2131 		goto fail;
2132 	}
2133 
2134 	/*
2135 	 * Get station address from the EEPROM.
2136 	 */
2137 	mac_tmp = bge_readmem_ind(sc, 0x0c14);
2138 	if ((mac_tmp >> 16) == 0x484b) {
2139 		eaddr[0] = (u_char)(mac_tmp >> 8);
2140 		eaddr[1] = (u_char)mac_tmp;
2141 		mac_tmp = bge_readmem_ind(sc, 0x0c18);
2142 		eaddr[2] = (u_char)(mac_tmp >> 24);
2143 		eaddr[3] = (u_char)(mac_tmp >> 16);
2144 		eaddr[4] = (u_char)(mac_tmp >> 8);
2145 		eaddr[5] = (u_char)mac_tmp;
2146 	} else if (bge_read_eeprom(sc, eaddr,
2147 	    BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
2148 		device_printf(sc->bge_dev, "failed to read station address\n");
2149 		bge_release_resources(sc);
2150 		error = ENXIO;
2151 		goto fail;
2152 	}
2153 
2154 	/* 5705 limits RX return ring to 512 entries. */
2155 	if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
2156 	    sc->bge_asicrev == BGE_ASICREV_BCM5750)
2157 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2158 	else
2159 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2160 
2161 	if (bge_dma_alloc(dev)) {
2162 		device_printf(sc->bge_dev,
2163 		    "failed to allocate DMA resources\n");
2164 		bge_release_resources(sc);
2165 		error = ENXIO;
2166 		goto fail;
2167 	}
2168 
2169 	/* Set default tuneable values. */
2170 	sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2171 	sc->bge_rx_coal_ticks = 150;
2172 	sc->bge_tx_coal_ticks = 150;
2173 	sc->bge_rx_max_coal_bds = 64;
2174 	sc->bge_tx_max_coal_bds = 128;
2175 
2176 	/* Set up ifnet structure */
2177 	ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
2178 	if (ifp == NULL) {
2179 		device_printf(sc->bge_dev, "failed to if_alloc()\n");
2180 		bge_release_resources(sc);
2181 		error = ENXIO;
2182 		goto fail;
2183 	}
2184 	ifp->if_softc = sc;
2185 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2186 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2187 	ifp->if_ioctl = bge_ioctl;
2188 	ifp->if_start = bge_start;
2189 	ifp->if_watchdog = bge_watchdog;
2190 	ifp->if_init = bge_init;
2191 	ifp->if_mtu = ETHERMTU;
2192 	ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
2193 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
2194 	IFQ_SET_READY(&ifp->if_snd);
2195 	ifp->if_hwassist = BGE_CSUM_FEATURES;
2196 	ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
2197 	    IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM;
2198 	ifp->if_capenable = ifp->if_capabilities;
2199 #ifdef DEVICE_POLLING
2200 	ifp->if_capabilities |= IFCAP_POLLING;
2201 #endif
2202 
2203         /*
2204 	 * 5700 B0 chips do not support checksumming correctly due
2205 	 * to hardware bugs.
2206 	 */
2207 	if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
2208 		ifp->if_capabilities &= ~IFCAP_HWCSUM;
2209 		ifp->if_capenable &= IFCAP_HWCSUM;
2210 		ifp->if_hwassist = 0;
2211 	}
2212 
2213 	/*
2214 	 * Figure out what sort of media we have by checking the
2215 	 * hardware config word in the first 32k of NIC internal memory,
2216 	 * or fall back to examining the EEPROM if necessary.
2217 	 * Note: on some BCM5700 cards, this value appears to be unset.
2218 	 * If that's the case, we have to rely on identifying the NIC
2219 	 * by its PCI subsystem ID, as we do below for the SysKonnect
2220 	 * SK-9D41.
2221 	 */
2222 	if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
2223 		hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2224 	else {
2225 		if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
2226 		    sizeof(hwcfg))) {
2227 			device_printf(sc->bge_dev, "failed to read EEPROM\n");
2228 			bge_release_resources(sc);
2229 			error = ENXIO;
2230 			goto fail;
2231 		}
2232 		hwcfg = ntohl(hwcfg);
2233 	}
2234 
2235 	if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
2236 		sc->bge_tbi = 1;
2237 
2238 	/* The SysKonnect SK-9D41 is a 1000baseSX card. */
2239 	if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41)
2240 		sc->bge_tbi = 1;
2241 
2242 	if (sc->bge_tbi) {
2243 		ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
2244 		    bge_ifmedia_upd, bge_ifmedia_sts);
2245 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2246 		ifmedia_add(&sc->bge_ifmedia,
2247 		    IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
2248 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2249 		ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
2250 		sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
2251 	} else {
2252 		/*
2253 		 * Do transceiver setup.
2254 		 */
2255 		if (mii_phy_probe(dev, &sc->bge_miibus,
2256 		    bge_ifmedia_upd, bge_ifmedia_sts)) {
2257 			device_printf(sc->bge_dev, "MII without any PHY!\n");
2258 			bge_release_resources(sc);
2259 			error = ENXIO;
2260 			goto fail;
2261 		}
2262 	}
2263 
2264 	/*
2265 	 * When using the BCM5701 in PCI-X mode, data corruption has
2266 	 * been observed in the first few bytes of some received packets.
2267 	 * Aligning the packet buffer in memory eliminates the corruption.
2268 	 * Unfortunately, this misaligns the packet payloads.  On platforms
2269 	 * which do not support unaligned accesses, we will realign the
2270 	 * payloads by copying the received packets.
2271 	 */
2272 	switch (sc->bge_chipid) {
2273 	case BGE_CHIPID_BCM5701_A0:
2274 	case BGE_CHIPID_BCM5701_B0:
2275 	case BGE_CHIPID_BCM5701_B2:
2276 	case BGE_CHIPID_BCM5701_B5:
2277 		/* If in PCI-X mode, work around the alignment bug. */
2278 		if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
2279 		    (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) ==
2280 		    BGE_PCISTATE_PCI_BUSSPEED)
2281 			sc->bge_rx_alignment_bug = 1;
2282 		break;
2283 	}
2284 
2285 	/*
2286 	 * Call MI attach routine.
2287 	 */
2288 	ether_ifattach(ifp, eaddr);
2289 	callout_init(&sc->bge_stat_ch, CALLOUT_MPSAFE);
2290 
2291 	/*
2292 	 * Hookup IRQ last.
2293 	 */
2294 	error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
2295 	   bge_intr, sc, &sc->bge_intrhand);
2296 
2297 	if (error) {
2298 		bge_detach(dev);
2299 		device_printf(sc->bge_dev, "couldn't set up irq\n");
2300 	}
2301 
2302 fail:
2303 	return(error);
2304 }
2305 
2306 static int
2307 bge_detach(dev)
2308 	device_t dev;
2309 {
2310 	struct bge_softc *sc;
2311 	struct ifnet *ifp;
2312 
2313 	sc = device_get_softc(dev);
2314 	ifp = sc->bge_ifp;
2315 
2316 #ifdef DEVICE_POLLING
2317 	if (ifp->if_capenable & IFCAP_POLLING)
2318 		ether_poll_deregister(ifp);
2319 #endif
2320 
2321 	BGE_LOCK(sc);
2322 	bge_stop(sc);
2323 	bge_reset(sc);
2324 	BGE_UNLOCK(sc);
2325 
2326 	ether_ifdetach(ifp);
2327 
2328 	if (sc->bge_tbi) {
2329 		ifmedia_removeall(&sc->bge_ifmedia);
2330 	} else {
2331 		bus_generic_detach(dev);
2332 		device_delete_child(dev, sc->bge_miibus);
2333 	}
2334 
2335 	bge_release_resources(sc);
2336 
2337 	return(0);
2338 }
2339 
2340 static void
2341 bge_release_resources(sc)
2342 	struct bge_softc *sc;
2343 {
2344 	device_t dev;
2345 
2346 	dev = sc->bge_dev;
2347 
2348 	if (sc->bge_vpd_prodname != NULL)
2349 		free(sc->bge_vpd_prodname, M_DEVBUF);
2350 
2351 	if (sc->bge_vpd_readonly != NULL)
2352 		free(sc->bge_vpd_readonly, M_DEVBUF);
2353 
2354 	if (sc->bge_intrhand != NULL)
2355 		bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
2356 
2357 	if (sc->bge_irq != NULL)
2358 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
2359 
2360 	if (sc->bge_res != NULL)
2361 		bus_release_resource(dev, SYS_RES_MEMORY,
2362 		    BGE_PCI_BAR0, sc->bge_res);
2363 
2364 	if (sc->bge_ifp != NULL)
2365 		if_free(sc->bge_ifp);
2366 
2367 	bge_dma_free(sc);
2368 
2369 	if (mtx_initialized(&sc->bge_mtx))	/* XXX */
2370 		BGE_LOCK_DESTROY(sc);
2371 
2372 	return;
2373 }
2374 
2375 static void
2376 bge_reset(sc)
2377 	struct bge_softc *sc;
2378 {
2379 	device_t dev;
2380 	u_int32_t cachesize, command, pcistate, reset;
2381 	int i, val = 0;
2382 
2383 	dev = sc->bge_dev;
2384 
2385 	/* Save some important PCI state. */
2386 	cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2387 	command = pci_read_config(dev, BGE_PCI_CMD, 4);
2388 	pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2389 
2390 	pci_write_config(dev, BGE_PCI_MISC_CTL,
2391 	    BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2392 	BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2393 
2394 	reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2395 
2396 	/* XXX: Broadcom Linux driver. */
2397 	if (sc->bge_pcie) {
2398 		if (CSR_READ_4(sc, 0x7e2c) == 0x60)	/* PCIE 1.0 */
2399 			CSR_WRITE_4(sc, 0x7e2c, 0x20);
2400 		if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2401 			/* Prevent PCIE link training during global reset */
2402 			CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2403 			reset |= (1<<29);
2404 		}
2405 	}
2406 
2407 	/* Issue global reset */
2408 	bge_writereg_ind(sc, BGE_MISC_CFG, reset);
2409 
2410 	DELAY(1000);
2411 
2412 	/* XXX: Broadcom Linux driver. */
2413 	if (sc->bge_pcie) {
2414 		if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
2415 			uint32_t v;
2416 
2417 			DELAY(500000); /* wait for link training to complete */
2418 			v = pci_read_config(dev, 0xc4, 4);
2419 			pci_write_config(dev, 0xc4, v | (1<<15), 4);
2420 		}
2421 		/* Set PCIE max payload size and clear error status. */
2422 		pci_write_config(dev, 0xd8, 0xf5000, 4);
2423 	}
2424 
2425 	/* Reset some of the PCI state that got zapped by reset */
2426 	pci_write_config(dev, BGE_PCI_MISC_CTL,
2427 	    BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2428 	    BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2429 	pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2430 	pci_write_config(dev, BGE_PCI_CMD, command, 4);
2431 	bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
2432 
2433 	/* Enable memory arbiter. */
2434 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2435 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
2436 		CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2437 
2438 	/*
2439 	 * Prevent PXE restart: write a magic number to the
2440 	 * general communications memory at 0xB50.
2441 	 */
2442 	bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2443 	/*
2444 	 * Poll the value location we just wrote until
2445 	 * we see the 1's complement of the magic number.
2446 	 * This indicates that the firmware initialization
2447 	 * is complete.
2448 	 */
2449 	for (i = 0; i < BGE_TIMEOUT; i++) {
2450 		val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2451 		if (val == ~BGE_MAGIC_NUMBER)
2452 			break;
2453 		DELAY(10);
2454 	}
2455 
2456 	if (i == BGE_TIMEOUT) {
2457 		device_printf(sc->bge_dev, "firmware handshake timed out\n");
2458 		return;
2459 	}
2460 
2461 	/*
2462 	 * XXX Wait for the value of the PCISTATE register to
2463 	 * return to its original pre-reset state. This is a
2464 	 * fairly good indicator of reset completion. If we don't
2465 	 * wait for the reset to fully complete, trying to read
2466 	 * from the device's non-PCI registers may yield garbage
2467 	 * results.
2468 	 */
2469 	for (i = 0; i < BGE_TIMEOUT; i++) {
2470 		if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
2471 			break;
2472 		DELAY(10);
2473 	}
2474 
2475 	/* Fix up byte swapping */
2476 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
2477 	    BGE_MODECTL_BYTESWAP_DATA);
2478 
2479 	CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2480 
2481 	/*
2482 	 * The 5704 in TBI mode apparently needs some special
2483 	 * adjustment to insure the SERDES drive level is set
2484 	 * to 1.2V.
2485 	 */
2486 	if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && sc->bge_tbi) {
2487 		uint32_t serdescfg;
2488 		serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2489 		serdescfg = (serdescfg & ~0xFFF) | 0x880;
2490 		CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2491 	}
2492 
2493 	/* XXX: Broadcom Linux driver. */
2494 	if (sc->bge_pcie && sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2495 		uint32_t v;
2496 
2497 		v = CSR_READ_4(sc, 0x7c00);
2498 		CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
2499 	}
2500 	DELAY(10000);
2501 
2502 	return;
2503 }
2504 
2505 /*
2506  * Frame reception handling. This is called if there's a frame
2507  * on the receive return list.
2508  *
2509  * Note: we have to be able to handle two possibilities here:
2510  * 1) the frame is from the jumbo receive ring
2511  * 2) the frame is from the standard receive ring
2512  */
2513 
2514 static void
2515 bge_rxeof(sc)
2516 	struct bge_softc *sc;
2517 {
2518 	struct ifnet *ifp;
2519 	int stdcnt = 0, jumbocnt = 0;
2520 
2521 	BGE_LOCK_ASSERT(sc);
2522 
2523 	/* Nothing to do */
2524 	if (sc->bge_rx_saved_considx ==
2525 	    sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx)
2526 		return;
2527 
2528 	ifp = sc->bge_ifp;
2529 
2530 	bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
2531 	    sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
2532 	bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2533 	    sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD);
2534 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2535 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
2536 		bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2537 		    sc->bge_cdata.bge_rx_jumbo_ring_map,
2538 		    BUS_DMASYNC_POSTREAD);
2539 	}
2540 
2541 	while(sc->bge_rx_saved_considx !=
2542 	    sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) {
2543 		struct bge_rx_bd	*cur_rx;
2544 		u_int32_t		rxidx;
2545 		struct mbuf		*m = NULL;
2546 		u_int16_t		vlan_tag = 0;
2547 		int			have_tag = 0;
2548 
2549 #ifdef DEVICE_POLLING
2550 		if (ifp->if_capenable & IFCAP_POLLING) {
2551 			if (sc->rxcycles <= 0)
2552 				break;
2553 			sc->rxcycles--;
2554 		}
2555 #endif
2556 
2557 		cur_rx =
2558 	    &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
2559 
2560 		rxidx = cur_rx->bge_idx;
2561 		BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
2562 
2563 		if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2564 			have_tag = 1;
2565 			vlan_tag = cur_rx->bge_vlan_tag;
2566 		}
2567 
2568 		if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2569 			BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2570 			bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
2571 			    sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx],
2572 			    BUS_DMASYNC_POSTREAD);
2573 			bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
2574 			    sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx]);
2575 			m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
2576 			sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
2577 			jumbocnt++;
2578 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2579 				ifp->if_ierrors++;
2580 				bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2581 				continue;
2582 			}
2583 			if (bge_newbuf_jumbo(sc,
2584 			    sc->bge_jumbo, NULL) == ENOBUFS) {
2585 				ifp->if_ierrors++;
2586 				bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2587 				continue;
2588 			}
2589 		} else {
2590 			BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2591 			bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2592 			    sc->bge_cdata.bge_rx_std_dmamap[rxidx],
2593 			    BUS_DMASYNC_POSTREAD);
2594 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2595 			    sc->bge_cdata.bge_rx_std_dmamap[rxidx]);
2596 			m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2597 			sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2598 			stdcnt++;
2599 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2600 				ifp->if_ierrors++;
2601 				bge_newbuf_std(sc, sc->bge_std, m);
2602 				continue;
2603 			}
2604 			if (bge_newbuf_std(sc, sc->bge_std,
2605 			    NULL) == ENOBUFS) {
2606 				ifp->if_ierrors++;
2607 				bge_newbuf_std(sc, sc->bge_std, m);
2608 				continue;
2609 			}
2610 		}
2611 
2612 		ifp->if_ipackets++;
2613 #ifndef __NO_STRICT_ALIGNMENT
2614 		/*
2615 		 * For architectures with strict alignment we must make sure
2616 		 * the payload is aligned.
2617 		 */
2618 		if (sc->bge_rx_alignment_bug) {
2619 			bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2620 			    cur_rx->bge_len);
2621 			m->m_data += ETHER_ALIGN;
2622 		}
2623 #endif
2624 		m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2625 		m->m_pkthdr.rcvif = ifp;
2626 
2627 		if (ifp->if_capenable & IFCAP_RXCSUM) {
2628 			if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
2629 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2630 				if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2631 					m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2632 			}
2633 			if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
2634 			    m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
2635 				m->m_pkthdr.csum_data =
2636 				    cur_rx->bge_tcp_udp_csum;
2637 				m->m_pkthdr.csum_flags |=
2638 				    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2639 			}
2640 		}
2641 
2642 		/*
2643 		 * If we received a packet with a vlan tag,
2644 		 * attach that information to the packet.
2645 		 */
2646 		if (have_tag) {
2647 			VLAN_INPUT_TAG(ifp, m, vlan_tag);
2648 			if (m == NULL)
2649 				continue;
2650 		}
2651 
2652 		BGE_UNLOCK(sc);
2653 		(*ifp->if_input)(ifp, m);
2654 		BGE_LOCK(sc);
2655 	}
2656 
2657 	if (stdcnt > 0)
2658 		bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2659 		    sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
2660 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2661 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
2662 		if (jumbocnt > 0)
2663 			bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2664 			    sc->bge_cdata.bge_rx_jumbo_ring_map,
2665 			    BUS_DMASYNC_PREWRITE);
2666 	}
2667 
2668 	CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2669 	if (stdcnt)
2670 		CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2671 	if (jumbocnt)
2672 		CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2673 }
2674 
2675 static void
2676 bge_txeof(sc)
2677 	struct bge_softc *sc;
2678 {
2679 	struct bge_tx_bd *cur_tx = NULL;
2680 	struct ifnet *ifp;
2681 
2682 	BGE_LOCK_ASSERT(sc);
2683 
2684 	/* Nothing to do */
2685 	if (sc->bge_tx_saved_considx ==
2686 	    sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx)
2687 		return;
2688 
2689 	ifp = sc->bge_ifp;
2690 
2691 	bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
2692 	    sc->bge_cdata.bge_tx_ring_map,
2693 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2694 	/*
2695 	 * Go through our tx ring and free mbufs for those
2696 	 * frames that have been sent.
2697 	 */
2698 	while (sc->bge_tx_saved_considx !=
2699 	    sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) {
2700 		u_int32_t		idx = 0;
2701 
2702 		idx = sc->bge_tx_saved_considx;
2703 		cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
2704 		if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2705 			ifp->if_opackets++;
2706 		if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
2707 			bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2708 			    sc->bge_cdata.bge_tx_dmamap[idx],
2709 			    BUS_DMASYNC_POSTWRITE);
2710 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2711 			    sc->bge_cdata.bge_tx_dmamap[idx]);
2712 			m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2713 			sc->bge_cdata.bge_tx_chain[idx] = NULL;
2714 		}
2715 		sc->bge_txcnt--;
2716 		BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2717 		ifp->if_timer = 0;
2718 	}
2719 
2720 	if (cur_tx != NULL)
2721 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2722 }
2723 
2724 #ifdef DEVICE_POLLING
2725 static void
2726 bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2727 {
2728 	struct bge_softc *sc = ifp->if_softc;
2729 
2730 	BGE_LOCK(sc);
2731 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2732 		bge_poll_locked(ifp, cmd, count);
2733 	BGE_UNLOCK(sc);
2734 }
2735 
2736 static void
2737 bge_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
2738 {
2739 	struct bge_softc *sc = ifp->if_softc;
2740 	uint32_t statusword;
2741 
2742 	BGE_LOCK_ASSERT(sc);
2743 
2744 	bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2745 	    sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD);
2746 
2747 	statusword = atomic_readandclear_32(&sc->bge_ldata.bge_status_block->bge_status);
2748 
2749 	bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2750 	    sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD);
2751 
2752 	/* Note link event. It will be processed by POLL_AND_CHECK_STATUS cmd */
2753 	if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
2754 		sc->bge_link_evt++;
2755 
2756 	if (cmd == POLL_AND_CHECK_STATUS)
2757 		if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2758 		    sc->bge_chipid != BGE_CHIPID_BCM5700_B1) ||
2759 		    sc->bge_link_evt || sc->bge_tbi)
2760 			bge_link_upd(sc);
2761 
2762 	sc->rxcycles = count;
2763 	bge_rxeof(sc);
2764 	bge_txeof(sc);
2765 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2766 		bge_start_locked(ifp);
2767 }
2768 #endif /* DEVICE_POLLING */
2769 
2770 static void
2771 bge_intr(xsc)
2772 	void *xsc;
2773 {
2774 	struct bge_softc *sc;
2775 	struct ifnet *ifp;
2776 	uint32_t statusword;
2777 
2778 	sc = xsc;
2779 
2780 	BGE_LOCK(sc);
2781 
2782 	ifp = sc->bge_ifp;
2783 
2784 #ifdef DEVICE_POLLING
2785 	if (ifp->if_capenable & IFCAP_POLLING) {
2786 		BGE_UNLOCK(sc);
2787 		return;
2788 	}
2789 #endif
2790 
2791 	bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2792 	    sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD);
2793 
2794 	statusword =
2795 	    atomic_readandclear_32(&sc->bge_ldata.bge_status_block->bge_status);
2796 
2797 	bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2798 	    sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD);
2799 
2800 #ifdef notdef
2801 	/* Avoid this for now -- checking this register is expensive. */
2802 	/* Make sure this is really our interrupt. */
2803 	if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE))
2804 		return;
2805 #endif
2806 	/* Ack interrupt and stop others from occuring. */
2807 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2808 
2809 	if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2810 	    sc->bge_chipid != BGE_CHIPID_BCM5700_B1) ||
2811 	    statusword & BGE_STATFLAG_LINKSTATE_CHANGED || sc->bge_link_evt)
2812 		bge_link_upd(sc);
2813 
2814 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2815 		/* Check RX return ring producer/consumer */
2816 		bge_rxeof(sc);
2817 
2818 		/* Check TX ring producer/consumer */
2819 		bge_txeof(sc);
2820 	}
2821 
2822 	/* Re-enable interrupts. */
2823 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2824 
2825 	if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2826 	    !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2827 		bge_start_locked(ifp);
2828 
2829 	BGE_UNLOCK(sc);
2830 
2831 	return;
2832 }
2833 
2834 static void
2835 bge_tick_locked(sc)
2836 	struct bge_softc *sc;
2837 {
2838 	struct mii_data *mii = NULL;
2839 
2840 	BGE_LOCK_ASSERT(sc);
2841 
2842 	if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
2843 	    sc->bge_asicrev == BGE_ASICREV_BCM5750)
2844 		bge_stats_update_regs(sc);
2845 	else
2846 		bge_stats_update(sc);
2847 
2848 	if (!sc->bge_tbi) {
2849 		mii = device_get_softc(sc->bge_miibus);
2850 		mii_tick(mii);
2851 	} else {
2852 		/*
2853 		 * Since in TBI mode auto-polling can't be used we should poll
2854 		 * link status manually. Here we register pending link event
2855 		 * and trigger interrupt.
2856 		 */
2857 #ifdef DEVICE_POLLING
2858 		/* In polling mode we poll link state in bge_poll_locked() */
2859 		if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING))
2860 #endif
2861 		{
2862 		sc->bge_link_evt++;
2863 		BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
2864 		}
2865 	}
2866 
2867 	callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
2868 }
2869 
2870 static void
2871 bge_tick(xsc)
2872 	void *xsc;
2873 {
2874 	struct bge_softc *sc;
2875 
2876 	sc = xsc;
2877 
2878 	BGE_LOCK(sc);
2879 	bge_tick_locked(sc);
2880 	BGE_UNLOCK(sc);
2881 }
2882 
2883 static void
2884 bge_stats_update_regs(sc)
2885 	struct bge_softc *sc;
2886 {
2887 	struct ifnet *ifp;
2888 	struct bge_mac_stats_regs stats;
2889 	u_int32_t *s;
2890 	u_long cnt;			/* current register value */
2891 	int i;
2892 
2893 	ifp = sc->bge_ifp;
2894 
2895 	s = (u_int32_t *)&stats;
2896 	for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
2897 		*s = CSR_READ_4(sc, BGE_RX_STATS + i);
2898 		s++;
2899 	}
2900 
2901 	cnt = stats.dot3StatsSingleCollisionFrames +
2902 	    stats.dot3StatsMultipleCollisionFrames +
2903 	    stats.dot3StatsExcessiveCollisions +
2904 	    stats.dot3StatsLateCollisions;
2905 	ifp->if_collisions += cnt >= sc->bge_tx_collisions ?
2906 	    cnt - sc->bge_tx_collisions : cnt;
2907 	sc->bge_tx_collisions = cnt;
2908 }
2909 
2910 static void
2911 bge_stats_update(sc)
2912 	struct bge_softc *sc;
2913 {
2914 	struct ifnet *ifp;
2915 	bus_size_t stats;
2916 	u_long cnt;			/* current register value */
2917 
2918 	ifp = sc->bge_ifp;
2919 
2920 	stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
2921 
2922 #define READ_STAT(sc, stats, stat) \
2923 	CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
2924 
2925 	cnt = READ_STAT(sc, stats,
2926 	    txstats.dot3StatsSingleCollisionFrames.bge_addr_lo);
2927 	cnt += READ_STAT(sc, stats,
2928 	    txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo);
2929 	cnt += READ_STAT(sc, stats,
2930 	    txstats.dot3StatsExcessiveCollisions.bge_addr_lo);
2931 	cnt += READ_STAT(sc, stats,
2932 		txstats.dot3StatsLateCollisions.bge_addr_lo);
2933 	ifp->if_collisions += cnt >= sc->bge_tx_collisions ?
2934 	    cnt - sc->bge_tx_collisions : cnt;
2935 	sc->bge_tx_collisions = cnt;
2936 
2937 	cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
2938 	ifp->if_ierrors += cnt >= sc->bge_rx_discards ?
2939 	    cnt - sc->bge_rx_discards : cnt;
2940 	sc->bge_rx_discards = cnt;
2941 
2942 	cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
2943 	ifp->if_oerrors += cnt >= sc->bge_tx_discards ?
2944 	    cnt - sc->bge_tx_discards : cnt;
2945 	sc->bge_tx_discards = cnt;
2946 
2947 #undef READ_STAT
2948 }
2949 
2950 /*
2951  * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
2952  * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
2953  * but when such padded frames employ the bge IP/TCP checksum offload,
2954  * the hardware checksum assist gives incorrect results (possibly
2955  * from incorporating its own padding into the UDP/TCP checksum; who knows).
2956  * If we pad such runts with zeros, the onboard checksum comes out correct.
2957  */
2958 static __inline int
2959 bge_cksum_pad(struct mbuf *m)
2960 {
2961 	int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
2962 	struct mbuf *last;
2963 
2964 	/* If there's only the packet-header and we can pad there, use it. */
2965 	if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
2966 	    M_TRAILINGSPACE(m) >= padlen) {
2967 		last = m;
2968 	} else {
2969 		/*
2970 		 * Walk packet chain to find last mbuf. We will either
2971 		 * pad there, or append a new mbuf and pad it.
2972 		 */
2973 		for (last = m; last->m_next != NULL; last = last->m_next);
2974 		if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
2975 			/* Allocate new empty mbuf, pad it. Compact later. */
2976 			struct mbuf *n;
2977 
2978 			MGET(n, M_DONTWAIT, MT_DATA);
2979 			if (n == NULL)
2980 				return (ENOBUFS);
2981 			n->m_len = 0;
2982 			last->m_next = n;
2983 			last = n;
2984 		}
2985 	}
2986 
2987 	/* Now zero the pad area, to avoid the bge cksum-assist bug. */
2988 	memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
2989 	last->m_len += padlen;
2990 	m->m_pkthdr.len += padlen;
2991 
2992 	return (0);
2993 }
2994 
2995 /*
2996  * Encapsulate an mbuf chain in the tx ring  by coupling the mbuf data
2997  * pointers to descriptors.
2998  */
2999 static int
3000 bge_encap(sc, m_head, txidx)
3001 	struct bge_softc *sc;
3002 	struct mbuf *m_head;
3003 	uint32_t *txidx;
3004 {
3005 	bus_dma_segment_t	segs[BGE_NSEG_NEW];
3006 	bus_dmamap_t		map;
3007 	struct bge_tx_bd	*d = NULL;
3008 	struct m_tag		*mtag;
3009 	uint32_t		idx = *txidx;
3010 	uint16_t		csum_flags = 0;
3011 	int			nsegs, i, error;
3012 
3013 	if (m_head->m_pkthdr.csum_flags) {
3014 		if (m_head->m_pkthdr.csum_flags & CSUM_IP)
3015 			csum_flags |= BGE_TXBDFLAG_IP_CSUM;
3016 		if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
3017 			csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
3018 			if (m_head->m_pkthdr.len < ETHER_MIN_NOPAD &&
3019 			    bge_cksum_pad(m_head) != 0)
3020 				return (ENOBUFS);
3021 		}
3022 		if (m_head->m_flags & M_LASTFRAG)
3023 			csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
3024 		else if (m_head->m_flags & M_FRAG)
3025 			csum_flags |= BGE_TXBDFLAG_IP_FRAG;
3026 	}
3027 
3028 	mtag = VLAN_OUTPUT_TAG(sc->bge_ifp, m_head);
3029 
3030 	map = sc->bge_cdata.bge_tx_dmamap[idx];
3031 	error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag, map,
3032 	    m_head, segs, &nsegs, BUS_DMA_NOWAIT);
3033         if (error) {
3034 		if (error == EFBIG) {
3035 			struct mbuf *m0;
3036 
3037 			m0 = m_defrag(m_head, M_DONTWAIT);
3038 			if (m0 == NULL)
3039 				return (ENOBUFS);
3040 			m_head = m0;
3041 			error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag,
3042 			    map, m_head, segs, &nsegs, BUS_DMA_NOWAIT);
3043 		}
3044 		if (error)
3045 			return (error);
3046 	}
3047 
3048 	/*
3049 	 * Sanity check: avoid coming within 16 descriptors
3050 	 * of the end of the ring.
3051 	 */
3052 	if (nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) {
3053 		bus_dmamap_unload(sc->bge_cdata.bge_mtag, map);
3054 		return (ENOBUFS);
3055 	}
3056 
3057 	bus_dmamap_sync(sc->bge_cdata.bge_mtag, map, BUS_DMASYNC_PREWRITE);
3058 
3059 	for (i = 0; ; i++) {
3060 		d = &sc->bge_ldata.bge_tx_ring[idx];
3061 		d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
3062 		d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
3063 		d->bge_len = segs[i].ds_len;
3064 		d->bge_flags = csum_flags;
3065 		if (i == nsegs - 1)
3066 			break;
3067 		BGE_INC(idx, BGE_TX_RING_CNT);
3068 	}
3069 
3070 	/* Mark the last segment as end of packet... */
3071 	d->bge_flags |= BGE_TXBDFLAG_END;
3072 	/* ... and put VLAN tag into first segment.  */
3073 	d = &sc->bge_ldata.bge_tx_ring[*txidx];
3074 	if (mtag != NULL) {
3075 		d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
3076 		d->bge_vlan_tag = VLAN_TAG_VALUE(mtag);
3077 	} else
3078 		d->bge_vlan_tag = 0;
3079 
3080 	/*
3081 	 * Insure that the map for this transmission
3082 	 * is placed at the array index of the last descriptor
3083 	 * in this chain.
3084 	 */
3085 	sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
3086 	sc->bge_cdata.bge_tx_dmamap[idx] = map;
3087 	sc->bge_cdata.bge_tx_chain[idx] = m_head;
3088 	sc->bge_txcnt += nsegs;
3089 
3090 	BGE_INC(idx, BGE_TX_RING_CNT);
3091 	*txidx = idx;
3092 
3093 	return (0);
3094 }
3095 
3096 /*
3097  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3098  * to the mbuf data regions directly in the transmit descriptors.
3099  */
3100 static void
3101 bge_start_locked(ifp)
3102 	struct ifnet *ifp;
3103 {
3104 	struct bge_softc *sc;
3105 	struct mbuf *m_head = NULL;
3106 	uint32_t prodidx;
3107 	int count = 0;
3108 
3109 	sc = ifp->if_softc;
3110 
3111 	if (!sc->bge_link || IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3112 		return;
3113 
3114 	prodidx = sc->bge_tx_prodidx;
3115 
3116 	while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
3117 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
3118 		if (m_head == NULL)
3119 			break;
3120 
3121 		/*
3122 		 * XXX
3123 		 * The code inside the if() block is never reached since we
3124 		 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
3125 		 * requests to checksum TCP/UDP in a fragmented packet.
3126 		 *
3127 		 * XXX
3128 		 * safety overkill.  If this is a fragmented packet chain
3129 		 * with delayed TCP/UDP checksums, then only encapsulate
3130 		 * it if we have enough descriptors to handle the entire
3131 		 * chain at once.
3132 		 * (paranoia -- may not actually be needed)
3133 		 */
3134 		if (m_head->m_flags & M_FIRSTFRAG &&
3135 		    m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
3136 			if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
3137 			    m_head->m_pkthdr.csum_data + 16) {
3138 				IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3139 				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3140 				break;
3141 			}
3142 		}
3143 
3144 		/*
3145 		 * Pack the data into the transmit ring. If we
3146 		 * don't have room, set the OACTIVE flag and wait
3147 		 * for the NIC to drain the ring.
3148 		 */
3149 		if (bge_encap(sc, m_head, &prodidx)) {
3150 			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3151 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3152 			break;
3153 		}
3154 		++count;
3155 
3156 		/*
3157 		 * If there's a BPF listener, bounce a copy of this frame
3158 		 * to him.
3159 		 */
3160 		BPF_MTAP(ifp, m_head);
3161 	}
3162 
3163 	if (count == 0) {
3164 		/* no packets were dequeued */
3165 		return;
3166 	}
3167 
3168 	/* Transmit */
3169 	CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3170 	/* 5700 b2 errata */
3171 	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
3172 		CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3173 
3174 	sc->bge_tx_prodidx = prodidx;
3175 
3176 	/*
3177 	 * Set a timeout in case the chip goes out to lunch.
3178 	 */
3179 	ifp->if_timer = 5;
3180 
3181 	return;
3182 }
3183 
3184 /*
3185  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3186  * to the mbuf data regions directly in the transmit descriptors.
3187  */
3188 static void
3189 bge_start(ifp)
3190 	struct ifnet *ifp;
3191 {
3192 	struct bge_softc *sc;
3193 
3194 	sc = ifp->if_softc;
3195 	BGE_LOCK(sc);
3196 	bge_start_locked(ifp);
3197 	BGE_UNLOCK(sc);
3198 }
3199 
3200 static void
3201 bge_init_locked(sc)
3202 	struct bge_softc *sc;
3203 {
3204 	struct ifnet *ifp;
3205 	u_int16_t *m;
3206 
3207 	BGE_LOCK_ASSERT(sc);
3208 
3209 	ifp = sc->bge_ifp;
3210 
3211 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3212 		return;
3213 
3214 	/* Cancel pending I/O and flush buffers. */
3215 	bge_stop(sc);
3216 	bge_reset(sc);
3217 	bge_chipinit(sc);
3218 
3219 	/*
3220 	 * Init the various state machines, ring
3221 	 * control blocks and firmware.
3222 	 */
3223 	if (bge_blockinit(sc)) {
3224 		device_printf(sc->bge_dev, "initialization failure\n");
3225 		return;
3226 	}
3227 
3228 	ifp = sc->bge_ifp;
3229 
3230 	/* Specify MTU. */
3231 	CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
3232 	    ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN);
3233 
3234 	/* Load our MAC address. */
3235 	m = (u_int16_t *)IF_LLADDR(sc->bge_ifp);
3236 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3237 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3238 
3239 	/* Enable or disable promiscuous mode as needed. */
3240 	if (ifp->if_flags & IFF_PROMISC) {
3241 		BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3242 	} else {
3243 		BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3244 	}
3245 
3246 	/* Program multicast filter. */
3247 	bge_setmulti(sc);
3248 
3249 	/* Init RX ring. */
3250 	bge_init_rx_ring_std(sc);
3251 
3252 	/*
3253 	 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
3254 	 * memory to insure that the chip has in fact read the first
3255 	 * entry of the ring.
3256 	 */
3257 	if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
3258 		u_int32_t		v, i;
3259 		for (i = 0; i < 10; i++) {
3260 			DELAY(20);
3261 			v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
3262 			if (v == (MCLBYTES - ETHER_ALIGN))
3263 				break;
3264 		}
3265 		if (i == 10)
3266 			device_printf (sc->bge_dev,
3267 			    "5705 A0 chip failed to load RX ring\n");
3268 	}
3269 
3270 	/* Init jumbo RX ring. */
3271 	if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
3272 		bge_init_rx_ring_jumbo(sc);
3273 
3274 	/* Init our RX return ring index */
3275 	sc->bge_rx_saved_considx = 0;
3276 
3277 	/* Init TX ring. */
3278 	bge_init_tx_ring(sc);
3279 
3280 	/* Turn on transmitter */
3281 	BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
3282 
3283 	/* Turn on receiver */
3284 	BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3285 
3286 	/* Tell firmware we're alive. */
3287 	BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3288 
3289 #ifdef DEVICE_POLLING
3290 	/* Disable interrupts if we are polling. */
3291 	if (ifp->if_capenable & IFCAP_POLLING) {
3292 		BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
3293 		    BGE_PCIMISCCTL_MASK_PCI_INTR);
3294 		CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3295 		CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
3296 		CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
3297 	} else
3298 #endif
3299 
3300 	/* Enable host interrupts. */
3301 	{
3302 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
3303 	BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3304 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3305 	}
3306 
3307 	bge_ifmedia_upd(ifp);
3308 
3309 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3310 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3311 
3312 	callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
3313 }
3314 
3315 static void
3316 bge_init(xsc)
3317 	void *xsc;
3318 {
3319 	struct bge_softc *sc = xsc;
3320 
3321 	BGE_LOCK(sc);
3322 	bge_init_locked(sc);
3323 	BGE_UNLOCK(sc);
3324 
3325 	return;
3326 }
3327 
3328 /*
3329  * Set media options.
3330  */
3331 static int
3332 bge_ifmedia_upd(ifp)
3333 	struct ifnet *ifp;
3334 {
3335 	struct bge_softc *sc;
3336 	struct mii_data *mii;
3337 	struct ifmedia *ifm;
3338 
3339 	sc = ifp->if_softc;
3340 	ifm = &sc->bge_ifmedia;
3341 
3342 	/* If this is a 1000baseX NIC, enable the TBI port. */
3343 	if (sc->bge_tbi) {
3344 		if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3345 			return(EINVAL);
3346 		switch(IFM_SUBTYPE(ifm->ifm_media)) {
3347 		case IFM_AUTO:
3348 #ifndef BGE_FAKE_AUTONEG
3349 			/*
3350 			 * The BCM5704 ASIC appears to have a special
3351 			 * mechanism for programming the autoneg
3352 			 * advertisement registers in TBI mode.
3353 			 */
3354 			if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3355 				uint32_t sgdig;
3356 				CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
3357 				sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
3358 				sgdig |= BGE_SGDIGCFG_AUTO|
3359 				    BGE_SGDIGCFG_PAUSE_CAP|
3360 				    BGE_SGDIGCFG_ASYM_PAUSE;
3361 				CSR_WRITE_4(sc, BGE_SGDIG_CFG,
3362 				    sgdig|BGE_SGDIGCFG_SEND);
3363 				DELAY(5);
3364 				CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
3365 			}
3366 #endif
3367 			break;
3368 		case IFM_1000_SX:
3369 			if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3370 				BGE_CLRBIT(sc, BGE_MAC_MODE,
3371 				    BGE_MACMODE_HALF_DUPLEX);
3372 			} else {
3373 				BGE_SETBIT(sc, BGE_MAC_MODE,
3374 				    BGE_MACMODE_HALF_DUPLEX);
3375 			}
3376 			break;
3377 		default:
3378 			return(EINVAL);
3379 		}
3380 		return(0);
3381 	}
3382 
3383 	sc->bge_link_evt++;
3384 	mii = device_get_softc(sc->bge_miibus);
3385 	if (mii->mii_instance) {
3386 		struct mii_softc *miisc;
3387 		for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
3388 		    miisc = LIST_NEXT(miisc, mii_list))
3389 			mii_phy_reset(miisc);
3390 	}
3391 	mii_mediachg(mii);
3392 
3393 	return(0);
3394 }
3395 
3396 /*
3397  * Report current media status.
3398  */
3399 static void
3400 bge_ifmedia_sts(ifp, ifmr)
3401 	struct ifnet *ifp;
3402 	struct ifmediareq *ifmr;
3403 {
3404 	struct bge_softc *sc;
3405 	struct mii_data *mii;
3406 
3407 	sc = ifp->if_softc;
3408 
3409 	if (sc->bge_tbi) {
3410 		ifmr->ifm_status = IFM_AVALID;
3411 		ifmr->ifm_active = IFM_ETHER;
3412 		if (CSR_READ_4(sc, BGE_MAC_STS) &
3413 		    BGE_MACSTAT_TBI_PCS_SYNCHED)
3414 			ifmr->ifm_status |= IFM_ACTIVE;
3415 		ifmr->ifm_active |= IFM_1000_SX;
3416 		if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3417 			ifmr->ifm_active |= IFM_HDX;
3418 		else
3419 			ifmr->ifm_active |= IFM_FDX;
3420 		return;
3421 	}
3422 
3423 	mii = device_get_softc(sc->bge_miibus);
3424 	mii_pollstat(mii);
3425 	ifmr->ifm_active = mii->mii_media_active;
3426 	ifmr->ifm_status = mii->mii_media_status;
3427 
3428 	return;
3429 }
3430 
3431 static int
3432 bge_ioctl(ifp, command, data)
3433 	struct ifnet *ifp;
3434 	u_long command;
3435 	caddr_t data;
3436 {
3437 	struct bge_softc *sc = ifp->if_softc;
3438 	struct ifreq *ifr = (struct ifreq *) data;
3439 	int mask, error = 0;
3440 	struct mii_data *mii;
3441 
3442 	switch(command) {
3443 	case SIOCSIFMTU:
3444 		/* Disallow jumbo frames on 5705. */
3445 		if (((sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
3446 		      sc->bge_asicrev == BGE_ASICREV_BCM5750) &&
3447 		    ifr->ifr_mtu > ETHERMTU) || ifr->ifr_mtu > BGE_JUMBO_MTU)
3448 			error = EINVAL;
3449 		else {
3450 			ifp->if_mtu = ifr->ifr_mtu;
3451 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3452 			bge_init(sc);
3453 		}
3454 		break;
3455 	case SIOCSIFFLAGS:
3456 		BGE_LOCK(sc);
3457 		if (ifp->if_flags & IFF_UP) {
3458 			/*
3459 			 * If only the state of the PROMISC flag changed,
3460 			 * then just use the 'set promisc mode' command
3461 			 * instead of reinitializing the entire NIC. Doing
3462 			 * a full re-init means reloading the firmware and
3463 			 * waiting for it to start up, which may take a
3464 			 * second or two.  Similarly for ALLMULTI.
3465 			 */
3466 			if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3467 			    ifp->if_flags & IFF_PROMISC &&
3468 			    !(sc->bge_if_flags & IFF_PROMISC)) {
3469 				BGE_SETBIT(sc, BGE_RX_MODE,
3470 				    BGE_RXMODE_RX_PROMISC);
3471 			} else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3472 			    !(ifp->if_flags & IFF_PROMISC) &&
3473 			    sc->bge_if_flags & IFF_PROMISC) {
3474 				BGE_CLRBIT(sc, BGE_RX_MODE,
3475 				    BGE_RXMODE_RX_PROMISC);
3476 			} else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3477 			    (ifp->if_flags ^ sc->bge_if_flags) & IFF_ALLMULTI) {
3478 				bge_setmulti(sc);
3479 			} else
3480 				bge_init_locked(sc);
3481 		} else {
3482 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3483 				bge_stop(sc);
3484 			}
3485 		}
3486 		sc->bge_if_flags = ifp->if_flags;
3487 		BGE_UNLOCK(sc);
3488 		error = 0;
3489 		break;
3490 	case SIOCADDMULTI:
3491 	case SIOCDELMULTI:
3492 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3493 			BGE_LOCK(sc);
3494 			bge_setmulti(sc);
3495 			BGE_UNLOCK(sc);
3496 			error = 0;
3497 		}
3498 		break;
3499 	case SIOCSIFMEDIA:
3500 	case SIOCGIFMEDIA:
3501 		if (sc->bge_tbi) {
3502 			error = ifmedia_ioctl(ifp, ifr,
3503 			    &sc->bge_ifmedia, command);
3504 		} else {
3505 			mii = device_get_softc(sc->bge_miibus);
3506 			error = ifmedia_ioctl(ifp, ifr,
3507 			    &mii->mii_media, command);
3508 		}
3509 		break;
3510 	case SIOCSIFCAP:
3511 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3512 #ifdef DEVICE_POLLING
3513 		if (mask & IFCAP_POLLING) {
3514 			if (ifr->ifr_reqcap & IFCAP_POLLING) {
3515 				error = ether_poll_register(bge_poll, ifp);
3516 				if (error)
3517 					return(error);
3518 				BGE_LOCK(sc);
3519 				BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
3520 				    BGE_PCIMISCCTL_MASK_PCI_INTR);
3521 				CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3522 				CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
3523 				CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
3524 				ifp->if_capenable |= IFCAP_POLLING;
3525 				BGE_UNLOCK(sc);
3526 			} else {
3527 				error = ether_poll_deregister(ifp);
3528 				/* Enable interrupt even in error case */
3529 				BGE_LOCK(sc);
3530 				CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
3531 				CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
3532 				BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
3533 				    BGE_PCIMISCCTL_MASK_PCI_INTR);
3534 				CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3535 				ifp->if_capenable &= ~IFCAP_POLLING;
3536 				BGE_UNLOCK(sc);
3537 			}
3538 		}
3539 #endif
3540 		if (mask & IFCAP_HWCSUM) {
3541 			ifp->if_capenable ^= IFCAP_HWCSUM;
3542 			if (IFCAP_HWCSUM & ifp->if_capenable &&
3543 			    IFCAP_HWCSUM & ifp->if_capabilities)
3544 				ifp->if_hwassist = BGE_CSUM_FEATURES;
3545 			else
3546 				ifp->if_hwassist = 0;
3547 			VLAN_CAPABILITIES(ifp);
3548 		}
3549 		break;
3550 	default:
3551 		error = ether_ioctl(ifp, command, data);
3552 		break;
3553 	}
3554 
3555 	return(error);
3556 }
3557 
3558 static void
3559 bge_watchdog(ifp)
3560 	struct ifnet *ifp;
3561 {
3562 	struct bge_softc *sc;
3563 
3564 	sc = ifp->if_softc;
3565 
3566 	if_printf(ifp, "watchdog timeout -- resetting\n");
3567 
3568 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3569 	bge_init(sc);
3570 
3571 	ifp->if_oerrors++;
3572 
3573 	return;
3574 }
3575 
3576 /*
3577  * Stop the adapter and free any mbufs allocated to the
3578  * RX and TX lists.
3579  */
3580 static void
3581 bge_stop(sc)
3582 	struct bge_softc *sc;
3583 {
3584 	struct ifnet *ifp;
3585 	struct ifmedia_entry *ifm;
3586 	struct mii_data *mii = NULL;
3587 	int mtmp, itmp;
3588 
3589 	BGE_LOCK_ASSERT(sc);
3590 
3591 	ifp = sc->bge_ifp;
3592 
3593 	if (!sc->bge_tbi)
3594 		mii = device_get_softc(sc->bge_miibus);
3595 
3596 	callout_stop(&sc->bge_stat_ch);
3597 
3598 	/*
3599 	 * Disable all of the receiver blocks
3600 	 */
3601 	BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3602 	BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3603 	BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3604 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3605 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
3606 		BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
3607 	BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3608 	BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3609 	BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3610 
3611 	/*
3612 	 * Disable all of the transmit blocks
3613 	 */
3614 	BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3615 	BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3616 	BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3617 	BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3618 	BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3619 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3620 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
3621 		BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
3622 	BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3623 
3624 	/*
3625 	 * Shut down all of the memory managers and related
3626 	 * state machines.
3627 	 */
3628 	BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3629 	BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3630 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3631 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
3632 		BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
3633 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3634 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3635 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3636 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
3637 		BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
3638 		BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3639 	}
3640 
3641 	/* Disable host interrupts. */
3642 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3643 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3644 
3645 	/*
3646 	 * Tell firmware we're shutting down.
3647 	 */
3648 	BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3649 
3650 	/* Free the RX lists. */
3651 	bge_free_rx_ring_std(sc);
3652 
3653 	/* Free jumbo RX list. */
3654 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3655 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
3656 		bge_free_rx_ring_jumbo(sc);
3657 
3658 	/* Free TX buffers. */
3659 	bge_free_tx_ring(sc);
3660 
3661 	/*
3662 	 * Isolate/power down the PHY, but leave the media selection
3663 	 * unchanged so that things will be put back to normal when
3664 	 * we bring the interface back up.
3665 	 */
3666 	if (!sc->bge_tbi) {
3667 		itmp = ifp->if_flags;
3668 		ifp->if_flags |= IFF_UP;
3669 		/*
3670 		 * If we are called from bge_detach(), mii is already NULL.
3671 		 */
3672 		if (mii != NULL) {
3673 			ifm = mii->mii_media.ifm_cur;
3674 			mtmp = ifm->ifm_media;
3675 			ifm->ifm_media = IFM_ETHER|IFM_NONE;
3676 			mii_mediachg(mii);
3677 			ifm->ifm_media = mtmp;
3678 		}
3679 		ifp->if_flags = itmp;
3680 	}
3681 
3682 	sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
3683 
3684 	/*
3685 	 * We can't just call bge_link_upd() cause chip is almost stopped so
3686 	 * bge_link_upd -> bge_tick_locked -> bge_stats_update sequence may
3687 	 * lead to hardware deadlock. So we just clearing MAC's link state
3688 	 * (PHY may still have link UP).
3689 	 */
3690 	if (bootverbose && sc->bge_link)
3691 		if_printf(sc->bge_ifp, "link DOWN\n");
3692 	sc->bge_link = 0;
3693 
3694 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3695 }
3696 
3697 /*
3698  * Stop all chip I/O so that the kernel's probe routines don't
3699  * get confused by errant DMAs when rebooting.
3700  */
3701 static void
3702 bge_shutdown(dev)
3703 	device_t dev;
3704 {
3705 	struct bge_softc *sc;
3706 
3707 	sc = device_get_softc(dev);
3708 
3709 	BGE_LOCK(sc);
3710 	bge_stop(sc);
3711 	bge_reset(sc);
3712 	BGE_UNLOCK(sc);
3713 
3714 	return;
3715 }
3716 
3717 static int
3718 bge_suspend(device_t dev)
3719 {
3720 	struct bge_softc *sc;
3721 
3722 	sc = device_get_softc(dev);
3723 	BGE_LOCK(sc);
3724 	bge_stop(sc);
3725 	BGE_UNLOCK(sc);
3726 
3727 	return (0);
3728 }
3729 
3730 static int
3731 bge_resume(device_t dev)
3732 {
3733 	struct bge_softc *sc;
3734 	struct ifnet *ifp;
3735 
3736 	sc = device_get_softc(dev);
3737 	BGE_LOCK(sc);
3738 	ifp = sc->bge_ifp;
3739 	if (ifp->if_flags & IFF_UP) {
3740 		bge_init_locked(sc);
3741 		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3742 			bge_start_locked(ifp);
3743 	}
3744 	BGE_UNLOCK(sc);
3745 
3746 	return (0);
3747 }
3748 
3749 static void
3750 bge_link_upd(sc)
3751 	struct bge_softc *sc;
3752 {
3753 	struct mii_data *mii;
3754 	uint32_t link, status;
3755 
3756 	BGE_LOCK_ASSERT(sc);
3757 
3758 	/* Clear 'pending link event' flag */
3759 	sc->bge_link_evt = 0;
3760 
3761 	/*
3762 	 * Process link state changes.
3763 	 * Grrr. The link status word in the status block does
3764 	 * not work correctly on the BCM5700 rev AX and BX chips,
3765 	 * according to all available information. Hence, we have
3766 	 * to enable MII interrupts in order to properly obtain
3767 	 * async link changes. Unfortunately, this also means that
3768 	 * we have to read the MAC status register to detect link
3769 	 * changes, thereby adding an additional register access to
3770 	 * the interrupt handler.
3771 	 *
3772 	 * XXX: perhaps link state detection procedure used for
3773 	 * BGE_CHIPID_BCM5700_B1 can be used for others BCM5700 revisions.
3774 	 */
3775 
3776 	if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3777 	    sc->bge_chipid != BGE_CHIPID_BCM5700_B1) {
3778 		status = CSR_READ_4(sc, BGE_MAC_STS);
3779 		if (status & BGE_MACSTAT_MI_INTERRUPT) {
3780 			callout_stop(&sc->bge_stat_ch);
3781 			bge_tick_locked(sc);
3782 
3783 			mii = device_get_softc(sc->bge_miibus);
3784 			if (!sc->bge_link &&
3785 			    mii->mii_media_status & IFM_ACTIVE &&
3786 			    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3787 				sc->bge_link++;
3788 				if (bootverbose)
3789 					if_printf(sc->bge_ifp, "link UP\n");
3790 			} else if (sc->bge_link &&
3791 			    (!(mii->mii_media_status & IFM_ACTIVE) ||
3792 			    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3793 				sc->bge_link = 0;
3794 				if (bootverbose)
3795 					if_printf(sc->bge_ifp, "link DOWN\n");
3796 			}
3797 
3798 			/* Clear the interrupt */
3799 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
3800 			    BGE_EVTENB_MI_INTERRUPT);
3801 			bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
3802 			bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
3803 			    BRGPHY_INTRS);
3804 		}
3805 		return;
3806 	}
3807 
3808 	if (sc->bge_tbi) {
3809 		status = CSR_READ_4(sc, BGE_MAC_STS);
3810 		if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
3811 			if (!sc->bge_link) {
3812 				sc->bge_link++;
3813 				if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
3814 					BGE_CLRBIT(sc, BGE_MAC_MODE,
3815 					    BGE_MACMODE_TBI_SEND_CFGS);
3816 				CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
3817 				if (bootverbose)
3818 					if_printf(sc->bge_ifp, "link UP\n");
3819 				if_link_state_change(sc->bge_ifp, LINK_STATE_UP);
3820 			}
3821 		} else if (sc->bge_link) {
3822 			sc->bge_link = 0;
3823 			if (bootverbose)
3824 				if_printf(sc->bge_ifp, "link DOWN\n");
3825 			if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN);
3826 		}
3827 	/* Discard link events for MII/GMII cards if MI auto-polling disabled */
3828 	} else if (CSR_READ_4(sc, BGE_MI_MODE) & BGE_MIMODE_AUTOPOLL) {
3829 		/*
3830 		 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
3831 		 * in status word always set. Workaround this bug by reading
3832 		 * PHY link status directly.
3833 		 */
3834 		link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
3835 
3836 		if (link != sc->bge_link ||
3837 		    sc->bge_asicrev == BGE_ASICREV_BCM5700) {
3838 			callout_stop(&sc->bge_stat_ch);
3839 			bge_tick_locked(sc);
3840 
3841 			mii = device_get_softc(sc->bge_miibus);
3842 			if (!sc->bge_link &&
3843 			    mii->mii_media_status & IFM_ACTIVE &&
3844 			    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3845 				sc->bge_link++;
3846 				if (bootverbose)
3847 					if_printf(sc->bge_ifp, "link UP\n");
3848 			} else if (sc->bge_link &&
3849 			    (!(mii->mii_media_status & IFM_ACTIVE) ||
3850 			    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3851 				sc->bge_link = 0;
3852 				if (bootverbose)
3853 					if_printf(sc->bge_ifp, "link DOWN\n");
3854 			}
3855 		}
3856 	}
3857 
3858 	/* Clear the attention */
3859 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
3860 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
3861 	    BGE_MACSTAT_LINK_CHANGED);
3862 }
3863