xref: /freebsd/sys/dev/bge/if_bge.c (revision 66c14b21d3ab0b18376563ba643ddb49b4fd33dd)
1 /*-
2  * Copyright (c) 2001 Wind River Systems
3  * Copyright (c) 1997, 1998, 1999, 2001
4  *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by Bill Paul.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 /*
38  * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39  *
40  * The Broadcom BCM5700 is based on technology originally developed by
41  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42  * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
43  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45  * frames, highly configurable RX filtering, and 16 RX and TX queues
46  * (which, along with RX filter rules, can be used for QOS applications).
47  * Other features, such as TCP segmentation, may be available as part
48  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49  * firmware images can be stored in hardware and need not be compiled
50  * into the driver.
51  *
52  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53  * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54  *
55  * The BCM5701 is a single-chip solution incorporating both the BCM5700
56  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57  * does not support external SSRAM.
58  *
59  * Broadcom also produces a variation of the BCM5700 under the "Altima"
60  * brand name, which is functionally similar but lacks PCI-X support.
61  *
62  * Without external SSRAM, you can only have at most 4 TX rings,
63  * and the use of the mini RX ring is disabled. This seems to imply
64  * that these features are simply not available on the BCM5701. As a
65  * result, this driver does not implement any support for the mini RX
66  * ring.
67  */
68 
69 #ifdef HAVE_KERNEL_OPTION_HEADERS
70 #include "opt_device_polling.h"
71 #endif
72 
73 #include <sys/param.h>
74 #include <sys/endian.h>
75 #include <sys/systm.h>
76 #include <sys/sockio.h>
77 #include <sys/mbuf.h>
78 #include <sys/malloc.h>
79 #include <sys/kernel.h>
80 #include <sys/module.h>
81 #include <sys/socket.h>
82 
83 #include <net/if.h>
84 #include <net/if_arp.h>
85 #include <net/ethernet.h>
86 #include <net/if_dl.h>
87 #include <net/if_media.h>
88 
89 #include <net/bpf.h>
90 
91 #include <net/if_types.h>
92 #include <net/if_vlan_var.h>
93 
94 #include <netinet/in_systm.h>
95 #include <netinet/in.h>
96 #include <netinet/ip.h>
97 
98 #include <machine/bus.h>
99 #include <machine/resource.h>
100 #include <sys/bus.h>
101 #include <sys/rman.h>
102 
103 #include <dev/mii/mii.h>
104 #include <dev/mii/miivar.h>
105 #include "miidevs.h"
106 #include <dev/mii/brgphyreg.h>
107 
108 #include <dev/pci/pcireg.h>
109 #include <dev/pci/pcivar.h>
110 
111 #include <dev/bge/if_bgereg.h>
112 
113 #define BGE_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
114 #define ETHER_MIN_NOPAD		(ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
115 
116 MODULE_DEPEND(bge, pci, 1, 1, 1);
117 MODULE_DEPEND(bge, ether, 1, 1, 1);
118 MODULE_DEPEND(bge, miibus, 1, 1, 1);
119 
120 /* "device miibus" required.  See GENERIC if you get errors here. */
121 #include "miibus_if.h"
122 
123 /*
124  * Various supported device vendors/types and their names. Note: the
125  * spec seems to indicate that the hardware still has Alteon's vendor
126  * ID burned into it, though it will always be overriden by the vendor
127  * ID in the EEPROM. Just to be safe, we cover all possibilities.
128  */
129 #define BGE_DEVDESC_MAX		64	/* Maximum device description length */
130 
131 static struct bge_type bge_devs[] = {
132 	{ ALT_VENDORID,	ALT_DEVICEID_BCM5700,
133 		"Broadcom BCM5700 Gigabit Ethernet" },
134 	{ ALT_VENDORID,	ALT_DEVICEID_BCM5701,
135 		"Broadcom BCM5701 Gigabit Ethernet" },
136 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5700,
137 		"Broadcom BCM5700 Gigabit Ethernet" },
138 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5701,
139 		"Broadcom BCM5701 Gigabit Ethernet" },
140 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5702,
141 		"Broadcom BCM5702 Gigabit Ethernet" },
142 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5702X,
143 		"Broadcom BCM5702X Gigabit Ethernet" },
144 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5703,
145 		"Broadcom BCM5703 Gigabit Ethernet" },
146 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5703X,
147 		"Broadcom BCM5703X Gigabit Ethernet" },
148 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5704C,
149 		"Broadcom BCM5704C Dual Gigabit Ethernet" },
150 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5704S,
151 		"Broadcom BCM5704S Dual Gigabit Ethernet" },
152 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5705,
153 		"Broadcom BCM5705 Gigabit Ethernet" },
154 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5705K,
155 		"Broadcom BCM5705K Gigabit Ethernet" },
156 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5705M,
157 		"Broadcom BCM5705M Gigabit Ethernet" },
158 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT,
159 		"Broadcom BCM5705M Gigabit Ethernet" },
160 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5714C,
161 		"Broadcom BCM5714C Gigabit Ethernet" },
162 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5721,
163 		"Broadcom BCM5721 Gigabit Ethernet" },
164 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5750,
165 		"Broadcom BCM5750 Gigabit Ethernet" },
166 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5750M,
167 		"Broadcom BCM5750M Gigabit Ethernet" },
168 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5751,
169 		"Broadcom BCM5751 Gigabit Ethernet" },
170 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5751M,
171 		"Broadcom BCM5751M Gigabit Ethernet" },
172 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5752,
173 		"Broadcom BCM5752 Gigabit Ethernet" },
174 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5782,
175 		"Broadcom BCM5782 Gigabit Ethernet" },
176 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5788,
177 		"Broadcom BCM5788 Gigabit Ethernet" },
178 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5789,
179 		"Broadcom BCM5789 Gigabit Ethernet" },
180 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5901,
181 		"Broadcom BCM5901 Fast Ethernet" },
182 	{ BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2,
183 		"Broadcom BCM5901A2 Fast Ethernet" },
184 	{ SK_VENDORID, SK_DEVICEID_ALTIMA,
185 		"SysKonnect Gigabit Ethernet" },
186 	{ ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000,
187 		"Altima AC1000 Gigabit Ethernet" },
188 	{ ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002,
189 		"Altima AC1002 Gigabit Ethernet" },
190 	{ ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100,
191 		"Altima AC9100 Gigabit Ethernet" },
192 	{ 0, 0, NULL }
193 };
194 
195 static int bge_probe		(device_t);
196 static int bge_attach		(device_t);
197 static int bge_detach		(device_t);
198 static int bge_suspend		(device_t);
199 static int bge_resume		(device_t);
200 static void bge_release_resources
201 				(struct bge_softc *);
202 static void bge_dma_map_addr	(void *, bus_dma_segment_t *, int, int);
203 static int bge_dma_alloc	(device_t);
204 static void bge_dma_free	(struct bge_softc *);
205 
206 static void bge_txeof		(struct bge_softc *);
207 static void bge_rxeof		(struct bge_softc *);
208 
209 static void bge_tick_locked	(struct bge_softc *);
210 static void bge_tick		(void *);
211 static void bge_stats_update	(struct bge_softc *);
212 static void bge_stats_update_regs
213 				(struct bge_softc *);
214 static int bge_encap		(struct bge_softc *, struct mbuf *,
215 					u_int32_t *);
216 
217 static void bge_intr		(void *);
218 static void bge_start_locked	(struct ifnet *);
219 static void bge_start		(struct ifnet *);
220 static int bge_ioctl		(struct ifnet *, u_long, caddr_t);
221 static void bge_init_locked	(struct bge_softc *);
222 static void bge_init		(void *);
223 static void bge_stop		(struct bge_softc *);
224 static void bge_watchdog		(struct ifnet *);
225 static void bge_shutdown		(device_t);
226 static int bge_ifmedia_upd	(struct ifnet *);
227 static void bge_ifmedia_sts	(struct ifnet *, struct ifmediareq *);
228 
229 static u_int8_t	bge_eeprom_getbyte	(struct bge_softc *, int, u_int8_t *);
230 static int bge_read_eeprom	(struct bge_softc *, caddr_t, int, int);
231 
232 static void bge_setmulti	(struct bge_softc *);
233 
234 static int bge_newbuf_std	(struct bge_softc *, int, struct mbuf *);
235 static int bge_newbuf_jumbo	(struct bge_softc *, int, struct mbuf *);
236 static int bge_init_rx_ring_std	(struct bge_softc *);
237 static void bge_free_rx_ring_std	(struct bge_softc *);
238 static int bge_init_rx_ring_jumbo	(struct bge_softc *);
239 static void bge_free_rx_ring_jumbo	(struct bge_softc *);
240 static void bge_free_tx_ring	(struct bge_softc *);
241 static int bge_init_tx_ring	(struct bge_softc *);
242 
243 static int bge_chipinit		(struct bge_softc *);
244 static int bge_blockinit	(struct bge_softc *);
245 
246 #ifdef notdef
247 static u_int8_t bge_vpd_readbyte(struct bge_softc *, int);
248 static void bge_vpd_read_res	(struct bge_softc *, struct vpd_res *, int);
249 static void bge_vpd_read	(struct bge_softc *);
250 #endif
251 
252 static u_int32_t bge_readmem_ind
253 				(struct bge_softc *, int);
254 static void bge_writemem_ind	(struct bge_softc *, int, int);
255 #ifdef notdef
256 static u_int32_t bge_readreg_ind
257 				(struct bge_softc *, int);
258 #endif
259 static void bge_writereg_ind	(struct bge_softc *, int, int);
260 
261 static int bge_miibus_readreg	(device_t, int, int);
262 static int bge_miibus_writereg	(device_t, int, int, int);
263 static void bge_miibus_statchg	(device_t);
264 #ifdef DEVICE_POLLING
265 static void bge_poll		(struct ifnet *ifp, enum poll_cmd cmd,
266 				    int count);
267 static void bge_poll_locked	(struct ifnet *ifp, enum poll_cmd cmd,
268 				    int count);
269 #endif
270 
271 static void bge_reset		(struct bge_softc *);
272 static void bge_link_upd	(struct bge_softc *);
273 
274 static device_method_t bge_methods[] = {
275 	/* Device interface */
276 	DEVMETHOD(device_probe,		bge_probe),
277 	DEVMETHOD(device_attach,	bge_attach),
278 	DEVMETHOD(device_detach,	bge_detach),
279 	DEVMETHOD(device_shutdown,	bge_shutdown),
280 	DEVMETHOD(device_suspend,	bge_suspend),
281 	DEVMETHOD(device_resume,	bge_resume),
282 
283 	/* bus interface */
284 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
285 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
286 
287 	/* MII interface */
288 	DEVMETHOD(miibus_readreg,	bge_miibus_readreg),
289 	DEVMETHOD(miibus_writereg,	bge_miibus_writereg),
290 	DEVMETHOD(miibus_statchg,	bge_miibus_statchg),
291 
292 	{ 0, 0 }
293 };
294 
295 static driver_t bge_driver = {
296 	"bge",
297 	bge_methods,
298 	sizeof(struct bge_softc)
299 };
300 
301 static devclass_t bge_devclass;
302 
303 DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
304 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
305 
306 static int bge_fake_autoneg = 0;
307 TUNABLE_INT("hw.bge.fake_autoneg", &bge_fake_autoneg);
308 
309 static u_int32_t
310 bge_readmem_ind(sc, off)
311 	struct bge_softc *sc;
312 	int off;
313 {
314 	device_t dev;
315 
316 	dev = sc->bge_dev;
317 
318 	pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
319 	return(pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4));
320 }
321 
322 static void
323 bge_writemem_ind(sc, off, val)
324 	struct bge_softc *sc;
325 	int off, val;
326 {
327 	device_t dev;
328 
329 	dev = sc->bge_dev;
330 
331 	pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
332 	pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
333 
334 	return;
335 }
336 
337 #ifdef notdef
338 static u_int32_t
339 bge_readreg_ind(sc, off)
340 	struct bge_softc *sc;
341 	int off;
342 {
343 	device_t dev;
344 
345 	dev = sc->bge_dev;
346 
347 	pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
348 	return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
349 }
350 #endif
351 
352 static void
353 bge_writereg_ind(sc, off, val)
354 	struct bge_softc *sc;
355 	int off, val;
356 {
357 	device_t dev;
358 
359 	dev = sc->bge_dev;
360 
361 	pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
362 	pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
363 
364 	return;
365 }
366 
367 /*
368  * Map a single buffer address.
369  */
370 
371 static void
372 bge_dma_map_addr(arg, segs, nseg, error)
373 	void *arg;
374 	bus_dma_segment_t *segs;
375 	int nseg;
376 	int error;
377 {
378 	struct bge_dmamap_arg *ctx;
379 
380 	if (error)
381 		return;
382 
383 	ctx = arg;
384 
385 	if (nseg > ctx->bge_maxsegs) {
386 		ctx->bge_maxsegs = 0;
387 		return;
388 	}
389 
390 	ctx->bge_busaddr = segs->ds_addr;
391 
392 	return;
393 }
394 
395 #ifdef notdef
396 static u_int8_t
397 bge_vpd_readbyte(sc, addr)
398 	struct bge_softc *sc;
399 	int addr;
400 {
401 	int i;
402 	device_t dev;
403 	u_int32_t val;
404 
405 	dev = sc->bge_dev;
406 	pci_write_config(dev, BGE_PCI_VPD_ADDR, addr, 2);
407 	for (i = 0; i < BGE_TIMEOUT * 10; i++) {
408 		DELAY(10);
409 		if (pci_read_config(dev, BGE_PCI_VPD_ADDR, 2) & BGE_VPD_FLAG)
410 			break;
411 	}
412 
413 	if (i == BGE_TIMEOUT) {
414 		device_printf(sc->bge_dev, "VPD read timed out\n");
415 		return(0);
416 	}
417 
418 	val = pci_read_config(dev, BGE_PCI_VPD_DATA, 4);
419 
420 	return((val >> ((addr % 4) * 8)) & 0xFF);
421 }
422 
423 static void
424 bge_vpd_read_res(sc, res, addr)
425 	struct bge_softc *sc;
426 	struct vpd_res *res;
427 	int addr;
428 {
429 	int i;
430 	u_int8_t *ptr;
431 
432 	ptr = (u_int8_t *)res;
433 	for (i = 0; i < sizeof(struct vpd_res); i++)
434 		ptr[i] = bge_vpd_readbyte(sc, i + addr);
435 
436 	return;
437 }
438 
439 static void
440 bge_vpd_read(sc)
441 	struct bge_softc *sc;
442 {
443 	int pos = 0, i;
444 	struct vpd_res res;
445 
446 	if (sc->bge_vpd_prodname != NULL)
447 		free(sc->bge_vpd_prodname, M_DEVBUF);
448 	if (sc->bge_vpd_readonly != NULL)
449 		free(sc->bge_vpd_readonly, M_DEVBUF);
450 	sc->bge_vpd_prodname = NULL;
451 	sc->bge_vpd_readonly = NULL;
452 
453 	bge_vpd_read_res(sc, &res, pos);
454 
455 	if (res.vr_id != VPD_RES_ID) {
456 		device_printf(sc->bge_dev,
457 		    "bad VPD resource id: expected %x got %x\n", VPD_RES_ID,
458 		    res.vr_id);
459 		return;
460 	}
461 
462 	pos += sizeof(res);
463 	sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
464 	for (i = 0; i < res.vr_len; i++)
465 		sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos);
466 	sc->bge_vpd_prodname[i] = '\0';
467 	pos += i;
468 
469 	bge_vpd_read_res(sc, &res, pos);
470 
471 	if (res.vr_id != VPD_RES_READ) {
472 		device_printf(sc->bge_dev,
473 		    "bad VPD resource id: expected %x got %x\n", VPD_RES_READ,
474 		    res.vr_id);
475 		return;
476 	}
477 
478 	pos += sizeof(res);
479 	sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
480 	for (i = 0; i < res.vr_len + 1; i++)
481 		sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos);
482 
483 	return;
484 }
485 #endif
486 
487 /*
488  * Read a byte of data stored in the EEPROM at address 'addr.' The
489  * BCM570x supports both the traditional bitbang interface and an
490  * auto access interface for reading the EEPROM. We use the auto
491  * access method.
492  */
493 static u_int8_t
494 bge_eeprom_getbyte(sc, addr, dest)
495 	struct bge_softc *sc;
496 	int addr;
497 	u_int8_t *dest;
498 {
499 	int i;
500 	u_int32_t byte = 0;
501 
502 	/*
503 	 * Enable use of auto EEPROM access so we can avoid
504 	 * having to use the bitbang method.
505 	 */
506 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
507 
508 	/* Reset the EEPROM, load the clock period. */
509 	CSR_WRITE_4(sc, BGE_EE_ADDR,
510 	    BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
511 	DELAY(20);
512 
513 	/* Issue the read EEPROM command. */
514 	CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
515 
516 	/* Wait for completion */
517 	for(i = 0; i < BGE_TIMEOUT * 10; i++) {
518 		DELAY(10);
519 		if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
520 			break;
521 	}
522 
523 	if (i == BGE_TIMEOUT) {
524 		device_printf(sc->bge_dev, "EEPROM read timed out\n");
525 		return(1);
526 	}
527 
528 	/* Get result. */
529 	byte = CSR_READ_4(sc, BGE_EE_DATA);
530 
531 	*dest = (byte >> ((addr % 4) * 8)) & 0xFF;
532 
533 	return(0);
534 }
535 
536 /*
537  * Read a sequence of bytes from the EEPROM.
538  */
539 static int
540 bge_read_eeprom(sc, dest, off, cnt)
541 	struct bge_softc *sc;
542 	caddr_t dest;
543 	int off;
544 	int cnt;
545 {
546 	int err = 0, i;
547 	u_int8_t byte = 0;
548 
549 	for (i = 0; i < cnt; i++) {
550 		err = bge_eeprom_getbyte(sc, off + i, &byte);
551 		if (err)
552 			break;
553 		*(dest + i) = byte;
554 	}
555 
556 	return(err ? 1 : 0);
557 }
558 
559 static int
560 bge_miibus_readreg(dev, phy, reg)
561 	device_t dev;
562 	int phy, reg;
563 {
564 	struct bge_softc *sc;
565 	u_int32_t val, autopoll;
566 	int i;
567 
568 	sc = device_get_softc(dev);
569 
570 	/*
571 	 * Broadcom's own driver always assumes the internal
572 	 * PHY is at GMII address 1. On some chips, the PHY responds
573 	 * to accesses at all addresses, which could cause us to
574 	 * bogusly attach the PHY 32 times at probe type. Always
575 	 * restricting the lookup to address 1 is simpler than
576 	 * trying to figure out which chips revisions should be
577 	 * special-cased.
578 	 */
579 	if (phy != 1)
580 		return(0);
581 
582 	/* Reading with autopolling on may trigger PCI errors */
583 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
584 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
585 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
586 		DELAY(40);
587 	}
588 
589 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
590 	    BGE_MIPHY(phy)|BGE_MIREG(reg));
591 
592 	for (i = 0; i < BGE_TIMEOUT; i++) {
593 		val = CSR_READ_4(sc, BGE_MI_COMM);
594 		if (!(val & BGE_MICOMM_BUSY))
595 			break;
596 	}
597 
598 	if (i == BGE_TIMEOUT) {
599 		if_printf(sc->bge_ifp, "PHY read timed out\n");
600 		val = 0;
601 		goto done;
602 	}
603 
604 	val = CSR_READ_4(sc, BGE_MI_COMM);
605 
606 done:
607 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
608 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
609 		DELAY(40);
610 	}
611 
612 	if (val & BGE_MICOMM_READFAIL)
613 		return(0);
614 
615 	return(val & 0xFFFF);
616 }
617 
618 static int
619 bge_miibus_writereg(dev, phy, reg, val)
620 	device_t dev;
621 	int phy, reg, val;
622 {
623 	struct bge_softc *sc;
624 	u_int32_t autopoll;
625 	int i;
626 
627 	sc = device_get_softc(dev);
628 
629 	/* Reading with autopolling on may trigger PCI errors */
630 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
631 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
632 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
633 		DELAY(40);
634 	}
635 
636 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
637 	    BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
638 
639 	for (i = 0; i < BGE_TIMEOUT; i++) {
640 		if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
641 			break;
642 	}
643 
644 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
645 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
646 		DELAY(40);
647 	}
648 
649 	if (i == BGE_TIMEOUT) {
650 		if_printf(sc->bge_ifp, "PHY read timed out\n");
651 		return(0);
652 	}
653 
654 	return(0);
655 }
656 
657 static void
658 bge_miibus_statchg(dev)
659 	device_t dev;
660 {
661 	struct bge_softc *sc;
662 	struct mii_data *mii;
663 
664 	sc = device_get_softc(dev);
665 	mii = device_get_softc(sc->bge_miibus);
666 
667 	BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
668 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
669 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
670 	} else {
671 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
672 	}
673 
674 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
675 		BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
676 	} else {
677 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
678 	}
679 
680 	return;
681 }
682 
683 /*
684  * Intialize a standard receive ring descriptor.
685  */
686 static int
687 bge_newbuf_std(sc, i, m)
688 	struct bge_softc	*sc;
689 	int			i;
690 	struct mbuf		*m;
691 {
692 	struct mbuf		*m_new = NULL;
693 	struct bge_rx_bd	*r;
694 	struct bge_dmamap_arg	ctx;
695 	int			error;
696 
697 	if (m == NULL) {
698 		m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
699 		if (m_new == NULL)
700 			return(ENOBUFS);
701 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
702 	} else {
703 		m_new = m;
704 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
705 		m_new->m_data = m_new->m_ext.ext_buf;
706 	}
707 
708 	if (!sc->bge_rx_alignment_bug)
709 		m_adj(m_new, ETHER_ALIGN);
710 	sc->bge_cdata.bge_rx_std_chain[i] = m_new;
711 	r = &sc->bge_ldata.bge_rx_std_ring[i];
712 	ctx.bge_maxsegs = 1;
713 	ctx.sc = sc;
714 	error = bus_dmamap_load(sc->bge_cdata.bge_mtag,
715 	    sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *),
716 	    m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
717 	if (error || ctx.bge_maxsegs == 0) {
718 		if (m == NULL) {
719 			sc->bge_cdata.bge_rx_std_chain[i] = NULL;
720 			m_freem(m_new);
721 		}
722 		return(ENOMEM);
723 	}
724 	r->bge_addr.bge_addr_lo = BGE_ADDR_LO(ctx.bge_busaddr);
725 	r->bge_addr.bge_addr_hi = BGE_ADDR_HI(ctx.bge_busaddr);
726 	r->bge_flags = BGE_RXBDFLAG_END;
727 	r->bge_len = m_new->m_len;
728 	r->bge_idx = i;
729 
730 	bus_dmamap_sync(sc->bge_cdata.bge_mtag,
731 	    sc->bge_cdata.bge_rx_std_dmamap[i],
732 	    BUS_DMASYNC_PREREAD);
733 
734 	return(0);
735 }
736 
737 /*
738  * Initialize a jumbo receive ring descriptor. This allocates
739  * a jumbo buffer from the pool managed internally by the driver.
740  */
741 static int
742 bge_newbuf_jumbo(sc, i, m)
743 	struct bge_softc *sc;
744 	int i;
745 	struct mbuf *m;
746 {
747 	bus_dma_segment_t segs[BGE_NSEG_JUMBO];
748 	struct bge_extrx_bd *r;
749 	struct mbuf *m_new = NULL;
750 	int nsegs;
751 	int error;
752 
753 	if (m == NULL) {
754 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
755 		if (m_new == NULL)
756 			return(ENOBUFS);
757 
758 		m_cljget(m_new, M_DONTWAIT, MJUM9BYTES);
759 		if (!(m_new->m_flags & M_EXT)) {
760 			m_freem(m_new);
761 			return(ENOBUFS);
762 		}
763 		m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES;
764 	} else {
765 		m_new = m;
766 		m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES;
767 		m_new->m_data = m_new->m_ext.ext_buf;
768 	}
769 
770 	if (!sc->bge_rx_alignment_bug)
771 		m_adj(m_new, ETHER_ALIGN);
772 
773 	error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
774 	    sc->bge_cdata.bge_rx_jumbo_dmamap[i],
775 	    m_new, segs, &nsegs, BUS_DMA_NOWAIT);
776 	if (error) {
777 		if (m == NULL)
778 			m_freem(m_new);
779 		return(error);
780 	}
781 	sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
782 
783 	/*
784 	 * Fill in the extended RX buffer descriptor.
785 	 */
786 	r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
787 	r->bge_flags = BGE_RXBDFLAG_JUMBO_RING|BGE_RXBDFLAG_END;
788 	r->bge_idx = i;
789 	r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
790 	switch (nsegs) {
791 	case 4:
792 		r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
793 		r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
794 		r->bge_len3 = segs[3].ds_len;
795 	case 3:
796 		r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
797 		r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
798 		r->bge_len2 = segs[2].ds_len;
799 	case 2:
800 		r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
801 		r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
802 		r->bge_len1 = segs[1].ds_len;
803 	case 1:
804 		r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
805 		r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
806 		r->bge_len0 = segs[0].ds_len;
807 		break;
808 	default:
809 		panic("%s: %d segments\n", __func__, nsegs);
810 	}
811 
812 	bus_dmamap_sync(sc->bge_cdata.bge_mtag,
813 	    sc->bge_cdata.bge_rx_jumbo_dmamap[i],
814 	    BUS_DMASYNC_PREREAD);
815 
816 	return (0);
817 }
818 
819 /*
820  * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
821  * that's 1MB or memory, which is a lot. For now, we fill only the first
822  * 256 ring entries and hope that our CPU is fast enough to keep up with
823  * the NIC.
824  */
825 static int
826 bge_init_rx_ring_std(sc)
827 	struct bge_softc *sc;
828 {
829 	int i;
830 
831 	for (i = 0; i < BGE_SSLOTS; i++) {
832 		if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
833 			return(ENOBUFS);
834 	};
835 
836 	bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
837 	    sc->bge_cdata.bge_rx_std_ring_map,
838 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
839 
840 	sc->bge_std = i - 1;
841 	CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
842 
843 	return(0);
844 }
845 
846 static void
847 bge_free_rx_ring_std(sc)
848 	struct bge_softc *sc;
849 {
850 	int i;
851 
852 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
853 		if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
854 			bus_dmamap_sync(sc->bge_cdata.bge_mtag,
855 			    sc->bge_cdata.bge_rx_std_dmamap[i],
856 			    BUS_DMASYNC_POSTREAD);
857 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
858 			    sc->bge_cdata.bge_rx_std_dmamap[i]);
859 			m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
860 			sc->bge_cdata.bge_rx_std_chain[i] = NULL;
861 		}
862 		bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
863 		    sizeof(struct bge_rx_bd));
864 	}
865 
866 	return;
867 }
868 
869 static int
870 bge_init_rx_ring_jumbo(sc)
871 	struct bge_softc *sc;
872 {
873 	struct bge_rcb *rcb;
874 	int i;
875 
876 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
877 		if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
878 			return(ENOBUFS);
879 	};
880 
881 	bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
882 	    sc->bge_cdata.bge_rx_jumbo_ring_map,
883 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
884 
885 	sc->bge_jumbo = i - 1;
886 
887 	rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
888 	rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
889 				    BGE_RCB_FLAG_USE_EXT_RX_BD);
890 	CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
891 
892 	CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
893 
894 	return(0);
895 }
896 
897 static void
898 bge_free_rx_ring_jumbo(sc)
899 	struct bge_softc *sc;
900 {
901 	int i;
902 
903 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
904 		if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
905 			bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
906 			    sc->bge_cdata.bge_rx_jumbo_dmamap[i],
907 			    BUS_DMASYNC_POSTREAD);
908 			bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
909 			    sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
910 			m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
911 			sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
912 		}
913 		bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
914 		    sizeof(struct bge_extrx_bd));
915 	}
916 
917 	return;
918 }
919 
920 static void
921 bge_free_tx_ring(sc)
922 	struct bge_softc *sc;
923 {
924 	int i;
925 
926 	if (sc->bge_ldata.bge_tx_ring == NULL)
927 		return;
928 
929 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
930 		if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
931 			bus_dmamap_sync(sc->bge_cdata.bge_mtag,
932 			    sc->bge_cdata.bge_tx_dmamap[i],
933 			    BUS_DMASYNC_POSTWRITE);
934 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
935 			    sc->bge_cdata.bge_tx_dmamap[i]);
936 			m_freem(sc->bge_cdata.bge_tx_chain[i]);
937 			sc->bge_cdata.bge_tx_chain[i] = NULL;
938 		}
939 		bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
940 		    sizeof(struct bge_tx_bd));
941 	}
942 
943 	return;
944 }
945 
946 static int
947 bge_init_tx_ring(sc)
948 	struct bge_softc *sc;
949 {
950 	sc->bge_txcnt = 0;
951 	sc->bge_tx_saved_considx = 0;
952 
953 	/* Initialize transmit producer index for host-memory send ring. */
954 	sc->bge_tx_prodidx = 0;
955 	CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
956 
957 	/* 5700 b2 errata */
958 	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
959 		CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
960 
961 	/* NIC-memory send ring not used; initialize to zero. */
962 	CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
963 	/* 5700 b2 errata */
964 	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
965 		CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
966 
967 	return(0);
968 }
969 
970 static void
971 bge_setmulti(sc)
972 	struct bge_softc *sc;
973 {
974 	struct ifnet *ifp;
975 	struct ifmultiaddr *ifma;
976 	u_int32_t hashes[4] = { 0, 0, 0, 0 };
977 	int h, i;
978 
979 	BGE_LOCK_ASSERT(sc);
980 
981 	ifp = sc->bge_ifp;
982 
983 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
984 		for (i = 0; i < 4; i++)
985 			CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
986 		return;
987 	}
988 
989 	/* First, zot all the existing filters. */
990 	for (i = 0; i < 4; i++)
991 		CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
992 
993 	/* Now program new ones. */
994 	IF_ADDR_LOCK(ifp);
995 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
996 		if (ifma->ifma_addr->sa_family != AF_LINK)
997 			continue;
998 		h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
999 		    ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
1000 		hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1001 	}
1002 	IF_ADDR_UNLOCK(ifp);
1003 
1004 	for (i = 0; i < 4; i++)
1005 		CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1006 
1007 	return;
1008 }
1009 
1010 /*
1011  * Do endian, PCI and DMA initialization. Also check the on-board ROM
1012  * self-test results.
1013  */
1014 static int
1015 bge_chipinit(sc)
1016 	struct bge_softc *sc;
1017 {
1018 	int			i;
1019 	u_int32_t		dma_rw_ctl;
1020 
1021 	/* Set endian type before we access any non-PCI registers. */
1022 	pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4);
1023 
1024 	/*
1025 	 * Check the 'ROM failed' bit on the RX CPU to see if
1026 	 * self-tests passed.
1027 	 */
1028 	if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
1029 		device_printf(sc->bge_dev, "RX CPU self-diagnostics failed!\n");
1030 		return(ENODEV);
1031 	}
1032 
1033 	/* Clear the MAC control register */
1034 	CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1035 
1036 	/*
1037 	 * Clear the MAC statistics block in the NIC's
1038 	 * internal memory.
1039 	 */
1040 	for (i = BGE_STATS_BLOCK;
1041 	    i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t))
1042 		BGE_MEMWIN_WRITE(sc, i, 0);
1043 
1044 	for (i = BGE_STATUS_BLOCK;
1045 	    i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t))
1046 		BGE_MEMWIN_WRITE(sc, i, 0);
1047 
1048 	/* Set up the PCI DMA control register. */
1049 	if (sc->bge_pcie) {
1050 		dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1051 		    (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1052 		    (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1053 	} else if (pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
1054 	    BGE_PCISTATE_PCI_BUSMODE) {
1055 		/* Conventional PCI bus */
1056 		dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1057 		    (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1058 		    (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1059 		    (0x0F);
1060 	} else {
1061 		/* PCI-X bus */
1062 		/*
1063 		 * The 5704 uses a different encoding of read/write
1064 		 * watermarks.
1065 		 */
1066 		if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1067 			dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1068 			    (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1069 			    (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1070 		else
1071 			dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1072 			    (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1073 			    (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1074 			    (0x0F);
1075 
1076 		/*
1077 		 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1078 		 * for hardware bugs.
1079 		 */
1080 		if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1081 		    sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1082 			u_int32_t tmp;
1083 
1084 			tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1085 			if (tmp == 0x6 || tmp == 0x7)
1086 				dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1087 		}
1088 	}
1089 
1090 	if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1091 	    sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
1092 	    sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1093 	    sc->bge_asicrev == BGE_ASICREV_BCM5750)
1094 		dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1095 	pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1096 
1097 	/*
1098 	 * Set up general mode register.
1099 	 */
1100 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
1101 	    BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1102 	    BGE_MODECTL_TX_NO_PHDR_CSUM);
1103 
1104 	/*
1105 	 * Disable memory write invalidate.  Apparently it is not supported
1106 	 * properly by these devices.
1107 	 */
1108 	PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1109 
1110 #ifdef __brokenalpha__
1111 	/*
1112 	 * Must insure that we do not cross an 8K (bytes) boundary
1113 	 * for DMA reads.  Our highest limit is 1K bytes.  This is a
1114 	 * restriction on some ALPHA platforms with early revision
1115 	 * 21174 PCI chipsets, such as the AlphaPC 164lx
1116 	 */
1117 	PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL,
1118 	    BGE_PCI_READ_BNDRY_1024BYTES, 4);
1119 #endif
1120 
1121 	/* Set the timer prescaler (always 66Mhz) */
1122 	CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1123 
1124 	return(0);
1125 }
1126 
1127 static int
1128 bge_blockinit(sc)
1129 	struct bge_softc *sc;
1130 {
1131 	struct bge_rcb *rcb;
1132 	bus_size_t vrcb;
1133 	bge_hostaddr taddr;
1134 	int i;
1135 
1136 	/*
1137 	 * Initialize the memory window pointer register so that
1138 	 * we can access the first 32K of internal NIC RAM. This will
1139 	 * allow us to set up the TX send ring RCBs and the RX return
1140 	 * ring RCBs, plus other things which live in NIC memory.
1141 	 */
1142 	CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1143 
1144 	/* Note: the BCM5704 has a smaller mbuf space than other chips. */
1145 
1146 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1147 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1148 		/* Configure mbuf memory pool */
1149 		if (sc->bge_extram) {
1150 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1151 			    BGE_EXT_SSRAM);
1152 			if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1153 				CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1154 			else
1155 				CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1156 		} else {
1157 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1158 			    BGE_BUFFPOOL_1);
1159 			if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1160 				CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1161 			else
1162 				CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1163 		}
1164 
1165 		/* Configure DMA resource pool */
1166 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1167 		    BGE_DMA_DESCRIPTORS);
1168 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1169 	}
1170 
1171 	/* Configure mbuf pool watermarks */
1172 	if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1173 	    sc->bge_asicrev == BGE_ASICREV_BCM5750) {
1174 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1175 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1176 	} else {
1177 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1178 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1179 	}
1180 	CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1181 
1182 	/* Configure DMA resource watermarks */
1183 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1184 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1185 
1186 	/* Enable buffer manager */
1187 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1188 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1189 		CSR_WRITE_4(sc, BGE_BMAN_MODE,
1190 		    BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1191 
1192 		/* Poll for buffer manager start indication */
1193 		for (i = 0; i < BGE_TIMEOUT; i++) {
1194 			if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1195 				break;
1196 			DELAY(10);
1197 		}
1198 
1199 		if (i == BGE_TIMEOUT) {
1200 			device_printf(sc->bge_dev,
1201 			    "buffer manager failed to start\n");
1202 			return(ENXIO);
1203 		}
1204 	}
1205 
1206 	/* Enable flow-through queues */
1207 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1208 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1209 
1210 	/* Wait until queue initialization is complete */
1211 	for (i = 0; i < BGE_TIMEOUT; i++) {
1212 		if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1213 			break;
1214 		DELAY(10);
1215 	}
1216 
1217 	if (i == BGE_TIMEOUT) {
1218 		device_printf(sc->bge_dev, "flow-through queue init failed\n");
1219 		return(ENXIO);
1220 	}
1221 
1222 	/* Initialize the standard RX ring control block */
1223 	rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1224 	rcb->bge_hostaddr.bge_addr_lo =
1225 	    BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1226 	rcb->bge_hostaddr.bge_addr_hi =
1227 	    BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1228 	bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1229 	    sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1230 	if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1231 	    sc->bge_asicrev == BGE_ASICREV_BCM5750)
1232 		rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1233 	else
1234 		rcb->bge_maxlen_flags =
1235 		    BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1236 	if (sc->bge_extram)
1237 		rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
1238 	else
1239 		rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1240 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1241 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1242 
1243 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1244 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1245 
1246 	/*
1247 	 * Initialize the jumbo RX ring control block
1248 	 * We set the 'ring disabled' bit in the flags
1249 	 * field until we're actually ready to start
1250 	 * using this ring (i.e. once we set the MTU
1251 	 * high enough to require it).
1252 	 */
1253 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1254 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1255 		rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1256 
1257 		rcb->bge_hostaddr.bge_addr_lo =
1258 		    BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1259 		rcb->bge_hostaddr.bge_addr_hi =
1260 		    BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1261 		bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1262 		    sc->bge_cdata.bge_rx_jumbo_ring_map,
1263 		    BUS_DMASYNC_PREREAD);
1264 		rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1265 		    BGE_RCB_FLAG_USE_EXT_RX_BD|BGE_RCB_FLAG_RING_DISABLED);
1266 		if (sc->bge_extram)
1267 			rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
1268 		else
1269 			rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1270 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1271 		    rcb->bge_hostaddr.bge_addr_hi);
1272 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1273 		    rcb->bge_hostaddr.bge_addr_lo);
1274 
1275 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1276 		    rcb->bge_maxlen_flags);
1277 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1278 
1279 		/* Set up dummy disabled mini ring RCB */
1280 		rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1281 		rcb->bge_maxlen_flags =
1282 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1283 		CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1284 		    rcb->bge_maxlen_flags);
1285 	}
1286 
1287 	/*
1288 	 * Set the BD ring replentish thresholds. The recommended
1289 	 * values are 1/8th the number of descriptors allocated to
1290 	 * each ring.
1291 	 */
1292 	CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
1293 	CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1294 
1295 	/*
1296 	 * Disable all unused send rings by setting the 'ring disabled'
1297 	 * bit in the flags field of all the TX send ring control blocks.
1298 	 * These are located in NIC memory.
1299 	 */
1300 	vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1301 	for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1302 		RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1303 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1304 		RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1305 		vrcb += sizeof(struct bge_rcb);
1306 	}
1307 
1308 	/* Configure TX RCB 0 (we use only the first ring) */
1309 	vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1310 	BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1311 	RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1312 	RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1313 	RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1314 	    BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1315 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1316 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
1317 		RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1318 		    BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1319 
1320 	/* Disable all unused RX return rings */
1321 	vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1322 	for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1323 		RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1324 		RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1325 		RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1326 		    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1327 		    BGE_RCB_FLAG_RING_DISABLED));
1328 		RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1329 		CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
1330 		    (i * (sizeof(u_int64_t))), 0);
1331 		vrcb += sizeof(struct bge_rcb);
1332 	}
1333 
1334 	/* Initialize RX ring indexes */
1335 	CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1336 	CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1337 	CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1338 
1339 	/*
1340 	 * Set up RX return ring 0
1341 	 * Note that the NIC address for RX return rings is 0x00000000.
1342 	 * The return rings live entirely within the host, so the
1343 	 * nicaddr field in the RCB isn't used.
1344 	 */
1345 	vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1346 	BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1347 	RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1348 	RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1349 	RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000);
1350 	RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1351 	    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1352 
1353 	/* Set random backoff seed for TX */
1354 	CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1355 	    IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
1356 	    IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
1357 	    IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] +
1358 	    BGE_TX_BACKOFF_SEED_MASK);
1359 
1360 	/* Set inter-packet gap */
1361 	CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1362 
1363 	/*
1364 	 * Specify which ring to use for packets that don't match
1365 	 * any RX rules.
1366 	 */
1367 	CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1368 
1369 	/*
1370 	 * Configure number of RX lists. One interrupt distribution
1371 	 * list, sixteen active lists, one bad frames class.
1372 	 */
1373 	CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1374 
1375 	/* Inialize RX list placement stats mask. */
1376 	CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1377 	CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1378 
1379 	/* Disable host coalescing until we get it set up */
1380 	CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1381 
1382 	/* Poll to make sure it's shut down. */
1383 	for (i = 0; i < BGE_TIMEOUT; i++) {
1384 		if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1385 			break;
1386 		DELAY(10);
1387 	}
1388 
1389 	if (i == BGE_TIMEOUT) {
1390 		device_printf(sc->bge_dev,
1391 		    "host coalescing engine failed to idle\n");
1392 		return(ENXIO);
1393 	}
1394 
1395 	/* Set up host coalescing defaults */
1396 	CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1397 	CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1398 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1399 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1400 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1401 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1402 		CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1403 		CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1404 	}
1405 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1406 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1407 
1408 	/* Set up address of statistics block */
1409 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1410 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1411 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1412 		    BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1413 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1414 		    BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1415 		CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1416 		CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1417 		CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1418 	}
1419 
1420 	/* Set up address of status block */
1421 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1422 	    BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1423 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1424 	    BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1425 	sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0;
1426 	sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0;
1427 
1428 	/* Turn on host coalescing state machine */
1429 	CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1430 
1431 	/* Turn on RX BD completion state machine and enable attentions */
1432 	CSR_WRITE_4(sc, BGE_RBDC_MODE,
1433 	    BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1434 
1435 	/* Turn on RX list placement state machine */
1436 	CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1437 
1438 	/* Turn on RX list selector state machine. */
1439 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1440 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
1441 		CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1442 
1443 	/* Turn on DMA, clear stats */
1444 	CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1445 	    BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1446 	    BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1447 	    BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1448 	    (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1449 
1450 	/* Set misc. local control, enable interrupts on attentions */
1451 	CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1452 
1453 #ifdef notdef
1454 	/* Assert GPIO pins for PHY reset */
1455 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1456 	    BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1457 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1458 	    BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1459 #endif
1460 
1461 	/* Turn on DMA completion state machine */
1462 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1463 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
1464 		CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1465 
1466 	/* Turn on write DMA state machine */
1467 	CSR_WRITE_4(sc, BGE_WDMA_MODE,
1468 	    BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
1469 
1470 	/* Turn on read DMA state machine */
1471 	CSR_WRITE_4(sc, BGE_RDMA_MODE,
1472 	    BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
1473 
1474 	/* Turn on RX data completion state machine */
1475 	CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1476 
1477 	/* Turn on RX BD initiator state machine */
1478 	CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1479 
1480 	/* Turn on RX data and RX BD initiator state machine */
1481 	CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1482 
1483 	/* Turn on Mbuf cluster free state machine */
1484 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1485 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
1486 		CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1487 
1488 	/* Turn on send BD completion state machine */
1489 	CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1490 
1491 	/* Turn on send data completion state machine */
1492 	CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1493 
1494 	/* Turn on send data initiator state machine */
1495 	CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1496 
1497 	/* Turn on send BD initiator state machine */
1498 	CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1499 
1500 	/* Turn on send BD selector state machine */
1501 	CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1502 
1503 	CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1504 	CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1505 	    BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1506 
1507 	/* ack/clear link change events */
1508 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1509 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1510 	    BGE_MACSTAT_LINK_CHANGED);
1511 	CSR_WRITE_4(sc, BGE_MI_STS, 0);
1512 
1513 	/* Enable PHY auto polling (for MII/GMII only) */
1514 	if (sc->bge_tbi) {
1515 		CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1516 	} else {
1517 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1518 		if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1519 		    sc->bge_chipid != BGE_CHIPID_BCM5700_B1)
1520 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1521 			    BGE_EVTENB_MI_INTERRUPT);
1522 	}
1523 
1524 	/*
1525 	 * Clear any pending link state attention.
1526 	 * Otherwise some link state change events may be lost until attention
1527 	 * is cleared by bge_intr() -> bge_link_upd() sequence.
1528 	 * It's not necessary on newer BCM chips - perhaps enabling link
1529 	 * state change attentions implies clearing pending attention.
1530 	 */
1531 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1532 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1533 	    BGE_MACSTAT_LINK_CHANGED);
1534 
1535 	/* Enable link state change attentions. */
1536 	BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1537 
1538 	return(0);
1539 }
1540 
1541 /*
1542  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1543  * against our list and return its name if we find a match. Note
1544  * that since the Broadcom controller contains VPD support, we
1545  * can get the device name string from the controller itself instead
1546  * of the compiled-in string. This is a little slow, but it guarantees
1547  * we'll always announce the right product name.
1548  */
1549 static int
1550 bge_probe(dev)
1551 	device_t dev;
1552 {
1553 	struct bge_type *t;
1554 	struct bge_softc *sc;
1555 	char *descbuf;
1556 
1557 	t = bge_devs;
1558 
1559 	sc = device_get_softc(dev);
1560 	bzero(sc, sizeof(struct bge_softc));
1561 	sc->bge_dev = dev;
1562 
1563 	while(t->bge_name != NULL) {
1564 		if ((pci_get_vendor(dev) == t->bge_vid) &&
1565 		    (pci_get_device(dev) == t->bge_did)) {
1566 #ifdef notdef
1567 			bge_vpd_read(sc);
1568 			device_set_desc(dev, sc->bge_vpd_prodname);
1569 #endif
1570 			descbuf = malloc(BGE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
1571 			if (descbuf == NULL)
1572 				return(ENOMEM);
1573 			snprintf(descbuf, BGE_DEVDESC_MAX,
1574 			    "%s, ASIC rev. %#04x", t->bge_name,
1575 			    pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 16);
1576 			device_set_desc_copy(dev, descbuf);
1577 			if (pci_get_subvendor(dev) == DELL_VENDORID)
1578 				sc->bge_no_3_led = 1;
1579 			free(descbuf, M_TEMP);
1580 			return(0);
1581 		}
1582 		t++;
1583 	}
1584 
1585 	return(ENXIO);
1586 }
1587 
1588 static void
1589 bge_dma_free(sc)
1590 	struct bge_softc *sc;
1591 {
1592 	int i;
1593 
1594 
1595 	/* Destroy DMA maps for RX buffers */
1596 
1597 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1598 		if (sc->bge_cdata.bge_rx_std_dmamap[i])
1599 			bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1600 			    sc->bge_cdata.bge_rx_std_dmamap[i]);
1601 	}
1602 
1603 	/* Destroy DMA maps for jumbo RX buffers */
1604 
1605 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1606 		if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
1607 			bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
1608 			    sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1609 	}
1610 
1611 	/* Destroy DMA maps for TX buffers */
1612 
1613 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
1614 		if (sc->bge_cdata.bge_tx_dmamap[i])
1615 			bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1616 			    sc->bge_cdata.bge_tx_dmamap[i]);
1617 	}
1618 
1619 	if (sc->bge_cdata.bge_mtag)
1620 		bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
1621 
1622 
1623 	/* Destroy standard RX ring */
1624 
1625 	if (sc->bge_cdata.bge_rx_std_ring_map)
1626 		bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
1627 		    sc->bge_cdata.bge_rx_std_ring_map);
1628 	if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring)
1629 		bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
1630 		    sc->bge_ldata.bge_rx_std_ring,
1631 		    sc->bge_cdata.bge_rx_std_ring_map);
1632 
1633 	if (sc->bge_cdata.bge_rx_std_ring_tag)
1634 		bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
1635 
1636 	/* Destroy jumbo RX ring */
1637 
1638 	if (sc->bge_cdata.bge_rx_jumbo_ring_map)
1639 		bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1640 		    sc->bge_cdata.bge_rx_jumbo_ring_map);
1641 
1642 	if (sc->bge_cdata.bge_rx_jumbo_ring_map &&
1643 	    sc->bge_ldata.bge_rx_jumbo_ring)
1644 		bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1645 		    sc->bge_ldata.bge_rx_jumbo_ring,
1646 		    sc->bge_cdata.bge_rx_jumbo_ring_map);
1647 
1648 	if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
1649 		bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
1650 
1651 	/* Destroy RX return ring */
1652 
1653 	if (sc->bge_cdata.bge_rx_return_ring_map)
1654 		bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
1655 		    sc->bge_cdata.bge_rx_return_ring_map);
1656 
1657 	if (sc->bge_cdata.bge_rx_return_ring_map &&
1658 	    sc->bge_ldata.bge_rx_return_ring)
1659 		bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
1660 		    sc->bge_ldata.bge_rx_return_ring,
1661 		    sc->bge_cdata.bge_rx_return_ring_map);
1662 
1663 	if (sc->bge_cdata.bge_rx_return_ring_tag)
1664 		bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
1665 
1666 	/* Destroy TX ring */
1667 
1668 	if (sc->bge_cdata.bge_tx_ring_map)
1669 		bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
1670 		    sc->bge_cdata.bge_tx_ring_map);
1671 
1672 	if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring)
1673 		bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
1674 		    sc->bge_ldata.bge_tx_ring,
1675 		    sc->bge_cdata.bge_tx_ring_map);
1676 
1677 	if (sc->bge_cdata.bge_tx_ring_tag)
1678 		bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
1679 
1680 	/* Destroy status block */
1681 
1682 	if (sc->bge_cdata.bge_status_map)
1683 		bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
1684 		    sc->bge_cdata.bge_status_map);
1685 
1686 	if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block)
1687 		bus_dmamem_free(sc->bge_cdata.bge_status_tag,
1688 		    sc->bge_ldata.bge_status_block,
1689 		    sc->bge_cdata.bge_status_map);
1690 
1691 	if (sc->bge_cdata.bge_status_tag)
1692 		bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
1693 
1694 	/* Destroy statistics block */
1695 
1696 	if (sc->bge_cdata.bge_stats_map)
1697 		bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
1698 		    sc->bge_cdata.bge_stats_map);
1699 
1700 	if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats)
1701 		bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
1702 		    sc->bge_ldata.bge_stats,
1703 		    sc->bge_cdata.bge_stats_map);
1704 
1705 	if (sc->bge_cdata.bge_stats_tag)
1706 		bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
1707 
1708 	/* Destroy the parent tag */
1709 
1710 	if (sc->bge_cdata.bge_parent_tag)
1711 		bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
1712 
1713 	return;
1714 }
1715 
1716 static int
1717 bge_dma_alloc(dev)
1718 	device_t dev;
1719 {
1720 	struct bge_softc *sc;
1721 	int i, error;
1722 	struct bge_dmamap_arg ctx;
1723 
1724 	sc = device_get_softc(dev);
1725 
1726 	/*
1727 	 * Allocate the parent bus DMA tag appropriate for PCI.
1728 	 */
1729 	error = bus_dma_tag_create(NULL,	/* parent */
1730 			PAGE_SIZE, 0,		/* alignment, boundary */
1731 			BUS_SPACE_MAXADDR,	/* lowaddr */
1732 			BUS_SPACE_MAXADDR,	/* highaddr */
1733 			NULL, NULL,		/* filter, filterarg */
1734 			MAXBSIZE, BGE_NSEG_NEW,	/* maxsize, nsegments */
1735 			BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
1736 			0,			/* flags */
1737 			NULL, NULL,		/* lockfunc, lockarg */
1738 			&sc->bge_cdata.bge_parent_tag);
1739 
1740 	if (error != 0) {
1741 		device_printf(sc->bge_dev,
1742 		    "could not allocate parent dma tag\n");
1743 		return (ENOMEM);
1744 	}
1745 
1746 	/*
1747 	 * Create tag for RX mbufs.
1748 	 */
1749 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1,
1750 	    0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1751 	    NULL, MCLBYTES * BGE_NSEG_NEW, BGE_NSEG_NEW, MCLBYTES,
1752 	    BUS_DMA_ALLOCNOW, NULL, NULL, &sc->bge_cdata.bge_mtag);
1753 
1754 	if (error) {
1755 		device_printf(sc->bge_dev, "could not allocate dma tag\n");
1756 		return (ENOMEM);
1757 	}
1758 
1759 	/* Create DMA maps for RX buffers */
1760 
1761 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1762 		error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1763 			    &sc->bge_cdata.bge_rx_std_dmamap[i]);
1764 		if (error) {
1765 			device_printf(sc->bge_dev,
1766 			    "can't create DMA map for RX\n");
1767 			return(ENOMEM);
1768 		}
1769 	}
1770 
1771 	/* Create DMA maps for TX buffers */
1772 
1773 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
1774 		error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1775 			    &sc->bge_cdata.bge_tx_dmamap[i]);
1776 		if (error) {
1777 			device_printf(sc->bge_dev,
1778 			    "can't create DMA map for RX\n");
1779 			return(ENOMEM);
1780 		}
1781 	}
1782 
1783 	/* Create tag for standard RX ring */
1784 
1785 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1786 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1787 	    NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0,
1788 	    NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag);
1789 
1790 	if (error) {
1791 		device_printf(sc->bge_dev, "could not allocate dma tag\n");
1792 		return (ENOMEM);
1793 	}
1794 
1795 	/* Allocate DMA'able memory for standard RX ring */
1796 
1797 	error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag,
1798 	    (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT,
1799 	    &sc->bge_cdata.bge_rx_std_ring_map);
1800 	if (error)
1801 		return (ENOMEM);
1802 
1803 	bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1804 
1805 	/* Load the address of the standard RX ring */
1806 
1807 	ctx.bge_maxsegs = 1;
1808 	ctx.sc = sc;
1809 
1810 	error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag,
1811 	    sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring,
1812 	    BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1813 
1814 	if (error)
1815 		return (ENOMEM);
1816 
1817 	sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr;
1818 
1819 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1820 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1821 
1822 		/*
1823 		 * Create tag for jumbo mbufs.
1824 		 * This is really a bit of a kludge. We allocate a special
1825 		 * jumbo buffer pool which (thanks to the way our DMA
1826 		 * memory allocation works) will consist of contiguous
1827 		 * pages. This means that even though a jumbo buffer might
1828 		 * be larger than a page size, we don't really need to
1829 		 * map it into more than one DMA segment. However, the
1830 		 * default mbuf tag will result in multi-segment mappings,
1831 		 * so we have to create a special jumbo mbuf tag that
1832 		 * lets us get away with mapping the jumbo buffers as
1833 		 * a single segment. I think eventually the driver should
1834 		 * be changed so that it uses ordinary mbufs and cluster
1835 		 * buffers, i.e. jumbo frames can span multiple DMA
1836 		 * descriptors. But that's a project for another day.
1837 		 */
1838 
1839 		error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1840 		    1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1841 		    NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
1842 		    0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
1843 
1844 		if (error) {
1845 			device_printf(sc->bge_dev,
1846 			    "could not allocate dma tag\n");
1847 			return (ENOMEM);
1848 		}
1849 
1850 		/* Create tag for jumbo RX ring */
1851 		error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1852 		    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1853 		    NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0,
1854 		    NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag);
1855 
1856 		if (error) {
1857 			device_printf(sc->bge_dev,
1858 			    "could not allocate dma tag\n");
1859 			return (ENOMEM);
1860 		}
1861 
1862 		/* Allocate DMA'able memory for jumbo RX ring */
1863 		error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1864 		    (void **)&sc->bge_ldata.bge_rx_jumbo_ring,
1865 		    BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1866 		    &sc->bge_cdata.bge_rx_jumbo_ring_map);
1867 		if (error)
1868 			return (ENOMEM);
1869 
1870 		/* Load the address of the jumbo RX ring */
1871 		ctx.bge_maxsegs = 1;
1872 		ctx.sc = sc;
1873 
1874 		error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1875 		    sc->bge_cdata.bge_rx_jumbo_ring_map,
1876 		    sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ,
1877 		    bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1878 
1879 		if (error)
1880 			return (ENOMEM);
1881 
1882 		sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr;
1883 
1884 		/* Create DMA maps for jumbo RX buffers */
1885 
1886 		for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1887 			error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
1888 				    0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1889 			if (error) {
1890 				device_printf(sc->bge_dev,
1891 				    "can't create DMA map for RX\n");
1892 				return(ENOMEM);
1893 			}
1894 		}
1895 
1896 	}
1897 
1898 	/* Create tag for RX return ring */
1899 
1900 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1901 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1902 	    NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0,
1903 	    NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag);
1904 
1905 	if (error) {
1906 		device_printf(sc->bge_dev, "could not allocate dma tag\n");
1907 		return (ENOMEM);
1908 	}
1909 
1910 	/* Allocate DMA'able memory for RX return ring */
1911 
1912 	error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag,
1913 	    (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT,
1914 	    &sc->bge_cdata.bge_rx_return_ring_map);
1915 	if (error)
1916 		return (ENOMEM);
1917 
1918 	bzero((char *)sc->bge_ldata.bge_rx_return_ring,
1919 	    BGE_RX_RTN_RING_SZ(sc));
1920 
1921 	/* Load the address of the RX return ring */
1922 
1923 	ctx.bge_maxsegs = 1;
1924 	ctx.sc = sc;
1925 
1926 	error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag,
1927 	    sc->bge_cdata.bge_rx_return_ring_map,
1928 	    sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc),
1929 	    bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1930 
1931 	if (error)
1932 		return (ENOMEM);
1933 
1934 	sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr;
1935 
1936 	/* Create tag for TX ring */
1937 
1938 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1939 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1940 	    NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL,
1941 	    &sc->bge_cdata.bge_tx_ring_tag);
1942 
1943 	if (error) {
1944 		device_printf(sc->bge_dev, "could not allocate dma tag\n");
1945 		return (ENOMEM);
1946 	}
1947 
1948 	/* Allocate DMA'able memory for TX ring */
1949 
1950 	error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag,
1951 	    (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT,
1952 	    &sc->bge_cdata.bge_tx_ring_map);
1953 	if (error)
1954 		return (ENOMEM);
1955 
1956 	bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
1957 
1958 	/* Load the address of the TX ring */
1959 
1960 	ctx.bge_maxsegs = 1;
1961 	ctx.sc = sc;
1962 
1963 	error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag,
1964 	    sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring,
1965 	    BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1966 
1967 	if (error)
1968 		return (ENOMEM);
1969 
1970 	sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr;
1971 
1972 	/* Create tag for status block */
1973 
1974 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1975 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1976 	    NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0,
1977 	    NULL, NULL, &sc->bge_cdata.bge_status_tag);
1978 
1979 	if (error) {
1980 		device_printf(sc->bge_dev, "could not allocate dma tag\n");
1981 		return (ENOMEM);
1982 	}
1983 
1984 	/* Allocate DMA'able memory for status block */
1985 
1986 	error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag,
1987 	    (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT,
1988 	    &sc->bge_cdata.bge_status_map);
1989 	if (error)
1990 		return (ENOMEM);
1991 
1992 	bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
1993 
1994 	/* Load the address of the status block */
1995 
1996 	ctx.sc = sc;
1997 	ctx.bge_maxsegs = 1;
1998 
1999 	error = bus_dmamap_load(sc->bge_cdata.bge_status_tag,
2000 	    sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block,
2001 	    BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2002 
2003 	if (error)
2004 		return (ENOMEM);
2005 
2006 	sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr;
2007 
2008 	/* Create tag for statistics block */
2009 
2010 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2011 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2012 	    NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL,
2013 	    &sc->bge_cdata.bge_stats_tag);
2014 
2015 	if (error) {
2016 		device_printf(sc->bge_dev, "could not allocate dma tag\n");
2017 		return (ENOMEM);
2018 	}
2019 
2020 	/* Allocate DMA'able memory for statistics block */
2021 
2022 	error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag,
2023 	    (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT,
2024 	    &sc->bge_cdata.bge_stats_map);
2025 	if (error)
2026 		return (ENOMEM);
2027 
2028 	bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ);
2029 
2030 	/* Load the address of the statstics block */
2031 
2032 	ctx.sc = sc;
2033 	ctx.bge_maxsegs = 1;
2034 
2035 	error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag,
2036 	    sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats,
2037 	    BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2038 
2039 	if (error)
2040 		return (ENOMEM);
2041 
2042 	sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr;
2043 
2044 	return(0);
2045 }
2046 
2047 static int
2048 bge_attach(dev)
2049 	device_t dev;
2050 {
2051 	struct ifnet *ifp;
2052 	struct bge_softc *sc;
2053 	u_int32_t hwcfg = 0;
2054 	u_int32_t mac_tmp = 0;
2055 	u_char eaddr[6];
2056 	int error = 0, rid;
2057 
2058 	sc = device_get_softc(dev);
2059 	sc->bge_dev = dev;
2060 
2061 	/*
2062 	 * Map control/status registers.
2063 	 */
2064 	pci_enable_busmaster(dev);
2065 
2066 	rid = BGE_PCI_BAR0;
2067 	sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2068 	    RF_ACTIVE|PCI_RF_DENSE);
2069 
2070 	if (sc->bge_res == NULL) {
2071 		device_printf (sc->bge_dev, "couldn't map memory\n");
2072 		error = ENXIO;
2073 		goto fail;
2074 	}
2075 
2076 	sc->bge_btag = rman_get_bustag(sc->bge_res);
2077 	sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
2078 
2079 	/* Allocate interrupt */
2080 	rid = 0;
2081 
2082 	sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2083 	    RF_SHAREABLE | RF_ACTIVE);
2084 
2085 	if (sc->bge_irq == NULL) {
2086 		device_printf(sc->bge_dev, "couldn't map interrupt\n");
2087 		error = ENXIO;
2088 		goto fail;
2089 	}
2090 
2091 	BGE_LOCK_INIT(sc, device_get_nameunit(dev));
2092 
2093 	/* Save ASIC rev. */
2094 
2095 	sc->bge_chipid =
2096 	    pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
2097 	    BGE_PCIMISCCTL_ASICREV;
2098 	sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2099 	sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2100 
2101 	/*
2102 	 * Treat the 5714 and the 5752 like the 5750 until we have more info
2103 	 * on this chip.
2104 	 */
2105 	if (sc->bge_asicrev == BGE_ASICREV_BCM5714 ||
2106             sc->bge_asicrev == BGE_ASICREV_BCM5752)
2107 		sc->bge_asicrev = BGE_ASICREV_BCM5750;
2108 
2109 	/*
2110 	 * XXX: Broadcom Linux driver.  Not in specs or eratta.
2111 	 * PCI-Express?
2112 	 */
2113 	if (sc->bge_asicrev == BGE_ASICREV_BCM5750) {
2114 		u_int32_t v;
2115 
2116 		v = pci_read_config(dev, BGE_PCI_MSI_CAPID, 4);
2117 		if (((v >> 8) & 0xff) == BGE_PCIE_CAPID_REG) {
2118 			v = pci_read_config(dev, BGE_PCIE_CAPID_REG, 4);
2119 			if ((v & 0xff) == BGE_PCIE_CAPID)
2120 				sc->bge_pcie = 1;
2121 		}
2122 	}
2123 
2124 	/* Try to reset the chip. */
2125 	bge_reset(sc);
2126 
2127 	if (bge_chipinit(sc)) {
2128 		device_printf(sc->bge_dev, "chip initialization failed\n");
2129 		bge_release_resources(sc);
2130 		error = ENXIO;
2131 		goto fail;
2132 	}
2133 
2134 	/*
2135 	 * Get station address from the EEPROM.
2136 	 */
2137 	mac_tmp = bge_readmem_ind(sc, 0x0c14);
2138 	if ((mac_tmp >> 16) == 0x484b) {
2139 		eaddr[0] = (u_char)(mac_tmp >> 8);
2140 		eaddr[1] = (u_char)mac_tmp;
2141 		mac_tmp = bge_readmem_ind(sc, 0x0c18);
2142 		eaddr[2] = (u_char)(mac_tmp >> 24);
2143 		eaddr[3] = (u_char)(mac_tmp >> 16);
2144 		eaddr[4] = (u_char)(mac_tmp >> 8);
2145 		eaddr[5] = (u_char)mac_tmp;
2146 	} else if (bge_read_eeprom(sc, eaddr,
2147 	    BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
2148 		device_printf(sc->bge_dev, "failed to read station address\n");
2149 		bge_release_resources(sc);
2150 		error = ENXIO;
2151 		goto fail;
2152 	}
2153 
2154 	/* 5705 limits RX return ring to 512 entries. */
2155 	if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
2156 	    sc->bge_asicrev == BGE_ASICREV_BCM5750)
2157 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2158 	else
2159 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2160 
2161 	if (bge_dma_alloc(dev)) {
2162 		device_printf(sc->bge_dev,
2163 		    "failed to allocate DMA resources\n");
2164 		bge_release_resources(sc);
2165 		error = ENXIO;
2166 		goto fail;
2167 	}
2168 
2169 	/* Set default tuneable values. */
2170 	sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2171 	sc->bge_rx_coal_ticks = 150;
2172 	sc->bge_tx_coal_ticks = 150;
2173 	sc->bge_rx_max_coal_bds = 64;
2174 	sc->bge_tx_max_coal_bds = 128;
2175 
2176 	/* Set up ifnet structure */
2177 	ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
2178 	if (ifp == NULL) {
2179 		device_printf(sc->bge_dev, "failed to if_alloc()\n");
2180 		bge_release_resources(sc);
2181 		error = ENXIO;
2182 		goto fail;
2183 	}
2184 	ifp->if_softc = sc;
2185 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2186 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2187 	ifp->if_ioctl = bge_ioctl;
2188 	ifp->if_start = bge_start;
2189 	ifp->if_watchdog = bge_watchdog;
2190 	ifp->if_init = bge_init;
2191 	ifp->if_mtu = ETHERMTU;
2192 	ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
2193 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
2194 	IFQ_SET_READY(&ifp->if_snd);
2195 	ifp->if_hwassist = BGE_CSUM_FEATURES;
2196 	ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
2197 	    IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM;
2198 	ifp->if_capenable = ifp->if_capabilities;
2199 #ifdef DEVICE_POLLING
2200 	ifp->if_capabilities |= IFCAP_POLLING;
2201 #endif
2202 
2203         /*
2204 	 * 5700 B0 chips do not support checksumming correctly due
2205 	 * to hardware bugs.
2206 	 */
2207 	if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
2208 		ifp->if_capabilities &= ~IFCAP_HWCSUM;
2209 		ifp->if_capenable &= IFCAP_HWCSUM;
2210 		ifp->if_hwassist = 0;
2211 	}
2212 
2213 	/*
2214 	 * Figure out what sort of media we have by checking the
2215 	 * hardware config word in the first 32k of NIC internal memory,
2216 	 * or fall back to examining the EEPROM if necessary.
2217 	 * Note: on some BCM5700 cards, this value appears to be unset.
2218 	 * If that's the case, we have to rely on identifying the NIC
2219 	 * by its PCI subsystem ID, as we do below for the SysKonnect
2220 	 * SK-9D41.
2221 	 */
2222 	if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
2223 		hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2224 	else {
2225 		if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
2226 		    sizeof(hwcfg))) {
2227 			device_printf(sc->bge_dev, "failed to read EEPROM\n");
2228 			bge_release_resources(sc);
2229 			error = ENXIO;
2230 			goto fail;
2231 		}
2232 		hwcfg = ntohl(hwcfg);
2233 	}
2234 
2235 	if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
2236 		sc->bge_tbi = 1;
2237 
2238 	/* The SysKonnect SK-9D41 is a 1000baseSX card. */
2239 	if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41)
2240 		sc->bge_tbi = 1;
2241 
2242 	if (sc->bge_tbi) {
2243 		ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
2244 		    bge_ifmedia_upd, bge_ifmedia_sts);
2245 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2246 		ifmedia_add(&sc->bge_ifmedia,
2247 		    IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
2248 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2249 		ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
2250 		sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
2251 	} else {
2252 		/*
2253 		 * Do transceiver setup.
2254 		 */
2255 		if (mii_phy_probe(dev, &sc->bge_miibus,
2256 		    bge_ifmedia_upd, bge_ifmedia_sts)) {
2257 			device_printf(sc->bge_dev, "MII without any PHY!\n");
2258 			bge_release_resources(sc);
2259 			error = ENXIO;
2260 			goto fail;
2261 		}
2262 	}
2263 
2264 	/*
2265 	 * When using the BCM5701 in PCI-X mode, data corruption has
2266 	 * been observed in the first few bytes of some received packets.
2267 	 * Aligning the packet buffer in memory eliminates the corruption.
2268 	 * Unfortunately, this misaligns the packet payloads.  On platforms
2269 	 * which do not support unaligned accesses, we will realign the
2270 	 * payloads by copying the received packets.
2271 	 */
2272 	switch (sc->bge_chipid) {
2273 	case BGE_CHIPID_BCM5701_A0:
2274 	case BGE_CHIPID_BCM5701_B0:
2275 	case BGE_CHIPID_BCM5701_B2:
2276 	case BGE_CHIPID_BCM5701_B5:
2277 		/* If in PCI-X mode, work around the alignment bug. */
2278 		if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
2279 		    (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) ==
2280 		    BGE_PCISTATE_PCI_BUSSPEED)
2281 			sc->bge_rx_alignment_bug = 1;
2282 		break;
2283 	}
2284 
2285 	/*
2286 	 * Call MI attach routine.
2287 	 */
2288 	ether_ifattach(ifp, eaddr);
2289 	callout_init(&sc->bge_stat_ch, CALLOUT_MPSAFE);
2290 
2291 	/*
2292 	 * Hookup IRQ last.
2293 	 */
2294 	error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
2295 	   bge_intr, sc, &sc->bge_intrhand);
2296 
2297 	if (error) {
2298 		bge_detach(dev);
2299 		device_printf(sc->bge_dev, "couldn't set up irq\n");
2300 	}
2301 
2302 fail:
2303 	return(error);
2304 }
2305 
2306 static int
2307 bge_detach(dev)
2308 	device_t dev;
2309 {
2310 	struct bge_softc *sc;
2311 	struct ifnet *ifp;
2312 
2313 	sc = device_get_softc(dev);
2314 	ifp = sc->bge_ifp;
2315 
2316 #ifdef DEVICE_POLLING
2317 	if (ifp->if_capenable & IFCAP_POLLING)
2318 		ether_poll_deregister(ifp);
2319 #endif
2320 
2321 	BGE_LOCK(sc);
2322 	bge_stop(sc);
2323 	bge_reset(sc);
2324 	BGE_UNLOCK(sc);
2325 
2326 	ether_ifdetach(ifp);
2327 
2328 	if (sc->bge_tbi) {
2329 		ifmedia_removeall(&sc->bge_ifmedia);
2330 	} else {
2331 		bus_generic_detach(dev);
2332 		device_delete_child(dev, sc->bge_miibus);
2333 	}
2334 
2335 	bge_release_resources(sc);
2336 
2337 	return(0);
2338 }
2339 
2340 static void
2341 bge_release_resources(sc)
2342 	struct bge_softc *sc;
2343 {
2344 	device_t dev;
2345 
2346 	dev = sc->bge_dev;
2347 
2348 	if (sc->bge_vpd_prodname != NULL)
2349 		free(sc->bge_vpd_prodname, M_DEVBUF);
2350 
2351 	if (sc->bge_vpd_readonly != NULL)
2352 		free(sc->bge_vpd_readonly, M_DEVBUF);
2353 
2354 	if (sc->bge_intrhand != NULL)
2355 		bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
2356 
2357 	if (sc->bge_irq != NULL)
2358 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
2359 
2360 	if (sc->bge_res != NULL)
2361 		bus_release_resource(dev, SYS_RES_MEMORY,
2362 		    BGE_PCI_BAR0, sc->bge_res);
2363 
2364 	if (sc->bge_ifp != NULL)
2365 		if_free(sc->bge_ifp);
2366 
2367 	bge_dma_free(sc);
2368 
2369 	if (mtx_initialized(&sc->bge_mtx))	/* XXX */
2370 		BGE_LOCK_DESTROY(sc);
2371 
2372 	return;
2373 }
2374 
2375 static void
2376 bge_reset(sc)
2377 	struct bge_softc *sc;
2378 {
2379 	device_t dev;
2380 	u_int32_t cachesize, command, pcistate, reset;
2381 	int i, val = 0;
2382 
2383 	dev = sc->bge_dev;
2384 
2385 	/* Save some important PCI state. */
2386 	cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2387 	command = pci_read_config(dev, BGE_PCI_CMD, 4);
2388 	pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2389 
2390 	pci_write_config(dev, BGE_PCI_MISC_CTL,
2391 	    BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2392 	BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2393 
2394 	reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2395 
2396 	/* XXX: Broadcom Linux driver. */
2397 	if (sc->bge_pcie) {
2398 		if (CSR_READ_4(sc, 0x7e2c) == 0x60)	/* PCIE 1.0 */
2399 			CSR_WRITE_4(sc, 0x7e2c, 0x20);
2400 		if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2401 			/* Prevent PCIE link training during global reset */
2402 			CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2403 			reset |= (1<<29);
2404 		}
2405 	}
2406 
2407 	/* Issue global reset */
2408 	bge_writereg_ind(sc, BGE_MISC_CFG, reset);
2409 
2410 	DELAY(1000);
2411 
2412 	/* XXX: Broadcom Linux driver. */
2413 	if (sc->bge_pcie) {
2414 		if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
2415 			uint32_t v;
2416 
2417 			DELAY(500000); /* wait for link training to complete */
2418 			v = pci_read_config(dev, 0xc4, 4);
2419 			pci_write_config(dev, 0xc4, v | (1<<15), 4);
2420 		}
2421 		/* Set PCIE max payload size and clear error status. */
2422 		pci_write_config(dev, 0xd8, 0xf5000, 4);
2423 	}
2424 
2425 	/* Reset some of the PCI state that got zapped by reset */
2426 	pci_write_config(dev, BGE_PCI_MISC_CTL,
2427 	    BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2428 	    BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2429 	pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2430 	pci_write_config(dev, BGE_PCI_CMD, command, 4);
2431 	bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
2432 
2433 	/* Enable memory arbiter. */
2434 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2435 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
2436 		CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2437 
2438 	/*
2439 	 * Prevent PXE restart: write a magic number to the
2440 	 * general communications memory at 0xB50.
2441 	 */
2442 	bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2443 	/*
2444 	 * Poll the value location we just wrote until
2445 	 * we see the 1's complement of the magic number.
2446 	 * This indicates that the firmware initialization
2447 	 * is complete.
2448 	 */
2449 	for (i = 0; i < BGE_TIMEOUT; i++) {
2450 		val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2451 		if (val == ~BGE_MAGIC_NUMBER)
2452 			break;
2453 		DELAY(10);
2454 	}
2455 
2456 	if (i == BGE_TIMEOUT) {
2457 		device_printf(sc->bge_dev, "firmware handshake timed out\n");
2458 		return;
2459 	}
2460 
2461 	/*
2462 	 * XXX Wait for the value of the PCISTATE register to
2463 	 * return to its original pre-reset state. This is a
2464 	 * fairly good indicator of reset completion. If we don't
2465 	 * wait for the reset to fully complete, trying to read
2466 	 * from the device's non-PCI registers may yield garbage
2467 	 * results.
2468 	 */
2469 	for (i = 0; i < BGE_TIMEOUT; i++) {
2470 		if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
2471 			break;
2472 		DELAY(10);
2473 	}
2474 
2475 	/* Fix up byte swapping */
2476 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
2477 	    BGE_MODECTL_BYTESWAP_DATA);
2478 
2479 	CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2480 
2481 	/*
2482 	 * The 5704 in TBI mode apparently needs some special
2483 	 * adjustment to insure the SERDES drive level is set
2484 	 * to 1.2V.
2485 	 */
2486 	if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && sc->bge_tbi) {
2487 		uint32_t serdescfg;
2488 		serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2489 		serdescfg = (serdescfg & ~0xFFF) | 0x880;
2490 		CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2491 	}
2492 
2493 	/* XXX: Broadcom Linux driver. */
2494 	if (sc->bge_pcie && sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2495 		uint32_t v;
2496 
2497 		v = CSR_READ_4(sc, 0x7c00);
2498 		CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
2499 	}
2500 	DELAY(10000);
2501 
2502 	return;
2503 }
2504 
2505 /*
2506  * Frame reception handling. This is called if there's a frame
2507  * on the receive return list.
2508  *
2509  * Note: we have to be able to handle two possibilities here:
2510  * 1) the frame is from the jumbo receive ring
2511  * 2) the frame is from the standard receive ring
2512  */
2513 
2514 static void
2515 bge_rxeof(sc)
2516 	struct bge_softc *sc;
2517 {
2518 	struct ifnet *ifp;
2519 	int stdcnt = 0, jumbocnt = 0;
2520 
2521 	BGE_LOCK_ASSERT(sc);
2522 
2523 	/* Nothing to do */
2524 	if (sc->bge_rx_saved_considx ==
2525 	    sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx)
2526 		return;
2527 
2528 	ifp = sc->bge_ifp;
2529 
2530 	bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
2531 	    sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
2532 	bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2533 	    sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD);
2534 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2535 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
2536 		bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2537 		    sc->bge_cdata.bge_rx_jumbo_ring_map,
2538 		    BUS_DMASYNC_POSTREAD);
2539 	}
2540 
2541 	while(sc->bge_rx_saved_considx !=
2542 	    sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) {
2543 		struct bge_rx_bd	*cur_rx;
2544 		u_int32_t		rxidx;
2545 		struct mbuf		*m = NULL;
2546 		u_int16_t		vlan_tag = 0;
2547 		int			have_tag = 0;
2548 
2549 #ifdef DEVICE_POLLING
2550 		if (ifp->if_capenable & IFCAP_POLLING) {
2551 			if (sc->rxcycles <= 0)
2552 				break;
2553 			sc->rxcycles--;
2554 		}
2555 #endif
2556 
2557 		cur_rx =
2558 	    &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
2559 
2560 		rxidx = cur_rx->bge_idx;
2561 		BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
2562 
2563 		if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2564 			have_tag = 1;
2565 			vlan_tag = cur_rx->bge_vlan_tag;
2566 		}
2567 
2568 		if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2569 			BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2570 			bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
2571 			    sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx],
2572 			    BUS_DMASYNC_POSTREAD);
2573 			bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
2574 			    sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx]);
2575 			m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
2576 			sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
2577 			jumbocnt++;
2578 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2579 				ifp->if_ierrors++;
2580 				bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2581 				continue;
2582 			}
2583 			if (bge_newbuf_jumbo(sc,
2584 			    sc->bge_jumbo, NULL) == ENOBUFS) {
2585 				ifp->if_ierrors++;
2586 				bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2587 				continue;
2588 			}
2589 		} else {
2590 			BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2591 			bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2592 			    sc->bge_cdata.bge_rx_std_dmamap[rxidx],
2593 			    BUS_DMASYNC_POSTREAD);
2594 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2595 			    sc->bge_cdata.bge_rx_std_dmamap[rxidx]);
2596 			m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2597 			sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2598 			stdcnt++;
2599 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2600 				ifp->if_ierrors++;
2601 				bge_newbuf_std(sc, sc->bge_std, m);
2602 				continue;
2603 			}
2604 			if (bge_newbuf_std(sc, sc->bge_std,
2605 			    NULL) == ENOBUFS) {
2606 				ifp->if_ierrors++;
2607 				bge_newbuf_std(sc, sc->bge_std, m);
2608 				continue;
2609 			}
2610 		}
2611 
2612 		ifp->if_ipackets++;
2613 #ifndef __NO_STRICT_ALIGNMENT
2614 		/*
2615 		 * For architectures with strict alignment we must make sure
2616 		 * the payload is aligned.
2617 		 */
2618 		if (sc->bge_rx_alignment_bug) {
2619 			bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2620 			    cur_rx->bge_len);
2621 			m->m_data += ETHER_ALIGN;
2622 		}
2623 #endif
2624 		m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2625 		m->m_pkthdr.rcvif = ifp;
2626 
2627 		if (ifp->if_capenable & IFCAP_RXCSUM) {
2628 			if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
2629 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2630 				if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2631 					m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2632 			}
2633 			if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
2634 			    m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
2635 				m->m_pkthdr.csum_data =
2636 				    cur_rx->bge_tcp_udp_csum;
2637 				m->m_pkthdr.csum_flags |=
2638 				    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2639 			}
2640 		}
2641 
2642 		/*
2643 		 * If we received a packet with a vlan tag,
2644 		 * attach that information to the packet.
2645 		 */
2646 		if (have_tag) {
2647 			VLAN_INPUT_TAG(ifp, m, vlan_tag);
2648 			if (m == NULL)
2649 				continue;
2650 		}
2651 
2652 		BGE_UNLOCK(sc);
2653 		(*ifp->if_input)(ifp, m);
2654 		BGE_LOCK(sc);
2655 	}
2656 
2657 	if (stdcnt > 0)
2658 		bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2659 		    sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
2660 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2661 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
2662 		if (jumbocnt > 0)
2663 			bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2664 			    sc->bge_cdata.bge_rx_jumbo_ring_map,
2665 			    BUS_DMASYNC_PREWRITE);
2666 	}
2667 
2668 	CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2669 	if (stdcnt)
2670 		CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2671 	if (jumbocnt)
2672 		CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2673 }
2674 
2675 static void
2676 bge_txeof(sc)
2677 	struct bge_softc *sc;
2678 {
2679 	struct bge_tx_bd *cur_tx = NULL;
2680 	struct ifnet *ifp;
2681 
2682 	BGE_LOCK_ASSERT(sc);
2683 
2684 	/* Nothing to do */
2685 	if (sc->bge_tx_saved_considx ==
2686 	    sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx)
2687 		return;
2688 
2689 	ifp = sc->bge_ifp;
2690 
2691 	bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
2692 	    sc->bge_cdata.bge_tx_ring_map,
2693 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2694 	/*
2695 	 * Go through our tx ring and free mbufs for those
2696 	 * frames that have been sent.
2697 	 */
2698 	while (sc->bge_tx_saved_considx !=
2699 	    sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) {
2700 		u_int32_t		idx = 0;
2701 
2702 		idx = sc->bge_tx_saved_considx;
2703 		cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
2704 		if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2705 			ifp->if_opackets++;
2706 		if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
2707 			bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2708 			    sc->bge_cdata.bge_tx_dmamap[idx],
2709 			    BUS_DMASYNC_POSTWRITE);
2710 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2711 			    sc->bge_cdata.bge_tx_dmamap[idx]);
2712 			m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2713 			sc->bge_cdata.bge_tx_chain[idx] = NULL;
2714 		}
2715 		sc->bge_txcnt--;
2716 		BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2717 		ifp->if_timer = 0;
2718 	}
2719 
2720 	if (cur_tx != NULL)
2721 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2722 }
2723 
2724 #ifdef DEVICE_POLLING
2725 static void
2726 bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2727 {
2728 	struct bge_softc *sc = ifp->if_softc;
2729 
2730 	BGE_LOCK(sc);
2731 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2732 		bge_poll_locked(ifp, cmd, count);
2733 	BGE_UNLOCK(sc);
2734 }
2735 
2736 static void
2737 bge_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
2738 {
2739 	struct bge_softc *sc = ifp->if_softc;
2740 	uint32_t statusword;
2741 
2742 	BGE_LOCK_ASSERT(sc);
2743 
2744 	bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2745 	    sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD);
2746 
2747 	statusword = atomic_readandclear_32(&sc->bge_ldata.bge_status_block->bge_status);
2748 
2749 	bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2750 	    sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD);
2751 
2752 	/* Note link event. It will be processed by POLL_AND_CHECK_STATUS cmd */
2753 	if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
2754 		sc->bge_link_evt++;
2755 
2756 	if (cmd == POLL_AND_CHECK_STATUS)
2757 		if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2758 		    sc->bge_chipid != BGE_CHIPID_BCM5700_B1) ||
2759 		    sc->bge_link_evt || sc->bge_tbi)
2760 			bge_link_upd(sc);
2761 
2762 	sc->rxcycles = count;
2763 	bge_rxeof(sc);
2764 	bge_txeof(sc);
2765 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2766 		bge_start_locked(ifp);
2767 }
2768 #endif /* DEVICE_POLLING */
2769 
2770 static void
2771 bge_intr(xsc)
2772 	void *xsc;
2773 {
2774 	struct bge_softc *sc;
2775 	struct ifnet *ifp;
2776 	uint32_t statusword;
2777 
2778 	sc = xsc;
2779 
2780 	BGE_LOCK(sc);
2781 
2782 	ifp = sc->bge_ifp;
2783 
2784 #ifdef DEVICE_POLLING
2785 	if (ifp->if_capenable & IFCAP_POLLING) {
2786 		BGE_UNLOCK(sc);
2787 		return;
2788 	}
2789 #endif
2790 
2791 	/*
2792 	 * Do the mandatory PCI flush as well as get the link status.
2793 	 */
2794 	statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED;
2795 
2796 	/* Ack interrupt and stop others from occuring. */
2797 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2798 
2799 	/* Make sure the descriptor ring indexes are coherent. */
2800 	bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2801 	    sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD);
2802 	bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2803 	    sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD);
2804 
2805 	if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2806 	    sc->bge_chipid != BGE_CHIPID_BCM5700_B1) ||
2807 	    statusword || sc->bge_link_evt)
2808 		bge_link_upd(sc);
2809 
2810 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2811 		/* Check RX return ring producer/consumer */
2812 		bge_rxeof(sc);
2813 
2814 		/* Check TX ring producer/consumer */
2815 		bge_txeof(sc);
2816 	}
2817 
2818 	/* Re-enable interrupts. */
2819 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2820 
2821 	if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2822 	    !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2823 		bge_start_locked(ifp);
2824 
2825 	BGE_UNLOCK(sc);
2826 
2827 	return;
2828 }
2829 
2830 static void
2831 bge_tick_locked(sc)
2832 	struct bge_softc *sc;
2833 {
2834 	struct mii_data *mii = NULL;
2835 
2836 	BGE_LOCK_ASSERT(sc);
2837 
2838 	if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
2839 	    sc->bge_asicrev == BGE_ASICREV_BCM5750)
2840 		bge_stats_update_regs(sc);
2841 	else
2842 		bge_stats_update(sc);
2843 
2844 	if (!sc->bge_tbi) {
2845 		mii = device_get_softc(sc->bge_miibus);
2846 		mii_tick(mii);
2847 	} else {
2848 		/*
2849 		 * Since in TBI mode auto-polling can't be used we should poll
2850 		 * link status manually. Here we register pending link event
2851 		 * and trigger interrupt.
2852 		 */
2853 #ifdef DEVICE_POLLING
2854 		/* In polling mode we poll link state in bge_poll_locked() */
2855 		if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING))
2856 #endif
2857 		{
2858 		sc->bge_link_evt++;
2859 		BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
2860 		}
2861 	}
2862 
2863 	callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
2864 }
2865 
2866 static void
2867 bge_tick(xsc)
2868 	void *xsc;
2869 {
2870 	struct bge_softc *sc;
2871 
2872 	sc = xsc;
2873 
2874 	BGE_LOCK(sc);
2875 	bge_tick_locked(sc);
2876 	BGE_UNLOCK(sc);
2877 }
2878 
2879 static void
2880 bge_stats_update_regs(sc)
2881 	struct bge_softc *sc;
2882 {
2883 	struct ifnet *ifp;
2884 	struct bge_mac_stats_regs stats;
2885 	u_int32_t *s;
2886 	u_long cnt;			/* current register value */
2887 	int i;
2888 
2889 	ifp = sc->bge_ifp;
2890 
2891 	s = (u_int32_t *)&stats;
2892 	for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
2893 		*s = CSR_READ_4(sc, BGE_RX_STATS + i);
2894 		s++;
2895 	}
2896 
2897 	cnt = stats.dot3StatsSingleCollisionFrames +
2898 	    stats.dot3StatsMultipleCollisionFrames +
2899 	    stats.dot3StatsExcessiveCollisions +
2900 	    stats.dot3StatsLateCollisions;
2901 	ifp->if_collisions += cnt >= sc->bge_tx_collisions ?
2902 	    cnt - sc->bge_tx_collisions : cnt;
2903 	sc->bge_tx_collisions = cnt;
2904 }
2905 
2906 static void
2907 bge_stats_update(sc)
2908 	struct bge_softc *sc;
2909 {
2910 	struct ifnet *ifp;
2911 	bus_size_t stats;
2912 	u_long cnt;			/* current register value */
2913 
2914 	ifp = sc->bge_ifp;
2915 
2916 	stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
2917 
2918 #define READ_STAT(sc, stats, stat) \
2919 	CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
2920 
2921 	cnt = READ_STAT(sc, stats,
2922 	    txstats.dot3StatsSingleCollisionFrames.bge_addr_lo);
2923 	cnt += READ_STAT(sc, stats,
2924 	    txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo);
2925 	cnt += READ_STAT(sc, stats,
2926 	    txstats.dot3StatsExcessiveCollisions.bge_addr_lo);
2927 	cnt += READ_STAT(sc, stats,
2928 		txstats.dot3StatsLateCollisions.bge_addr_lo);
2929 	ifp->if_collisions += cnt >= sc->bge_tx_collisions ?
2930 	    cnt - sc->bge_tx_collisions : cnt;
2931 	sc->bge_tx_collisions = cnt;
2932 
2933 	cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
2934 	ifp->if_ierrors += cnt >= sc->bge_rx_discards ?
2935 	    cnt - sc->bge_rx_discards : cnt;
2936 	sc->bge_rx_discards = cnt;
2937 
2938 	cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
2939 	ifp->if_oerrors += cnt >= sc->bge_tx_discards ?
2940 	    cnt - sc->bge_tx_discards : cnt;
2941 	sc->bge_tx_discards = cnt;
2942 
2943 #undef READ_STAT
2944 }
2945 
2946 /*
2947  * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
2948  * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
2949  * but when such padded frames employ the bge IP/TCP checksum offload,
2950  * the hardware checksum assist gives incorrect results (possibly
2951  * from incorporating its own padding into the UDP/TCP checksum; who knows).
2952  * If we pad such runts with zeros, the onboard checksum comes out correct.
2953  */
2954 static __inline int
2955 bge_cksum_pad(struct mbuf *m)
2956 {
2957 	int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
2958 	struct mbuf *last;
2959 
2960 	/* If there's only the packet-header and we can pad there, use it. */
2961 	if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
2962 	    M_TRAILINGSPACE(m) >= padlen) {
2963 		last = m;
2964 	} else {
2965 		/*
2966 		 * Walk packet chain to find last mbuf. We will either
2967 		 * pad there, or append a new mbuf and pad it.
2968 		 */
2969 		for (last = m; last->m_next != NULL; last = last->m_next);
2970 		if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
2971 			/* Allocate new empty mbuf, pad it. Compact later. */
2972 			struct mbuf *n;
2973 
2974 			MGET(n, M_DONTWAIT, MT_DATA);
2975 			if (n == NULL)
2976 				return (ENOBUFS);
2977 			n->m_len = 0;
2978 			last->m_next = n;
2979 			last = n;
2980 		}
2981 	}
2982 
2983 	/* Now zero the pad area, to avoid the bge cksum-assist bug. */
2984 	memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
2985 	last->m_len += padlen;
2986 	m->m_pkthdr.len += padlen;
2987 
2988 	return (0);
2989 }
2990 
2991 /*
2992  * Encapsulate an mbuf chain in the tx ring  by coupling the mbuf data
2993  * pointers to descriptors.
2994  */
2995 static int
2996 bge_encap(sc, m_head, txidx)
2997 	struct bge_softc *sc;
2998 	struct mbuf *m_head;
2999 	uint32_t *txidx;
3000 {
3001 	bus_dma_segment_t	segs[BGE_NSEG_NEW];
3002 	bus_dmamap_t		map;
3003 	struct bge_tx_bd	*d = NULL;
3004 	struct m_tag		*mtag;
3005 	uint32_t		idx = *txidx;
3006 	uint16_t		csum_flags = 0;
3007 	int			nsegs, i, error;
3008 
3009 	if (m_head->m_pkthdr.csum_flags) {
3010 		if (m_head->m_pkthdr.csum_flags & CSUM_IP)
3011 			csum_flags |= BGE_TXBDFLAG_IP_CSUM;
3012 		if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
3013 			csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
3014 			if (m_head->m_pkthdr.len < ETHER_MIN_NOPAD &&
3015 			    bge_cksum_pad(m_head) != 0)
3016 				return (ENOBUFS);
3017 		}
3018 		if (m_head->m_flags & M_LASTFRAG)
3019 			csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
3020 		else if (m_head->m_flags & M_FRAG)
3021 			csum_flags |= BGE_TXBDFLAG_IP_FRAG;
3022 	}
3023 
3024 	mtag = VLAN_OUTPUT_TAG(sc->bge_ifp, m_head);
3025 
3026 	map = sc->bge_cdata.bge_tx_dmamap[idx];
3027 	error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag, map,
3028 	    m_head, segs, &nsegs, BUS_DMA_NOWAIT);
3029         if (error) {
3030 		if (error == EFBIG) {
3031 			struct mbuf *m0;
3032 
3033 			m0 = m_defrag(m_head, M_DONTWAIT);
3034 			if (m0 == NULL)
3035 				return (ENOBUFS);
3036 			m_head = m0;
3037 			error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag,
3038 			    map, m_head, segs, &nsegs, BUS_DMA_NOWAIT);
3039 		}
3040 		if (error)
3041 			return (error);
3042 	}
3043 
3044 	/*
3045 	 * Sanity check: avoid coming within 16 descriptors
3046 	 * of the end of the ring.
3047 	 */
3048 	if (nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) {
3049 		bus_dmamap_unload(sc->bge_cdata.bge_mtag, map);
3050 		return (ENOBUFS);
3051 	}
3052 
3053 	bus_dmamap_sync(sc->bge_cdata.bge_mtag, map, BUS_DMASYNC_PREWRITE);
3054 
3055 	for (i = 0; ; i++) {
3056 		d = &sc->bge_ldata.bge_tx_ring[idx];
3057 		d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
3058 		d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
3059 		d->bge_len = segs[i].ds_len;
3060 		d->bge_flags = csum_flags;
3061 		if (i == nsegs - 1)
3062 			break;
3063 		BGE_INC(idx, BGE_TX_RING_CNT);
3064 	}
3065 
3066 	/* Mark the last segment as end of packet... */
3067 	d->bge_flags |= BGE_TXBDFLAG_END;
3068 	/* ... and put VLAN tag into first segment.  */
3069 	d = &sc->bge_ldata.bge_tx_ring[*txidx];
3070 	if (mtag != NULL) {
3071 		d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
3072 		d->bge_vlan_tag = VLAN_TAG_VALUE(mtag);
3073 	} else
3074 		d->bge_vlan_tag = 0;
3075 
3076 	/*
3077 	 * Insure that the map for this transmission
3078 	 * is placed at the array index of the last descriptor
3079 	 * in this chain.
3080 	 */
3081 	sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
3082 	sc->bge_cdata.bge_tx_dmamap[idx] = map;
3083 	sc->bge_cdata.bge_tx_chain[idx] = m_head;
3084 	sc->bge_txcnt += nsegs;
3085 
3086 	BGE_INC(idx, BGE_TX_RING_CNT);
3087 	*txidx = idx;
3088 
3089 	return (0);
3090 }
3091 
3092 /*
3093  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3094  * to the mbuf data regions directly in the transmit descriptors.
3095  */
3096 static void
3097 bge_start_locked(ifp)
3098 	struct ifnet *ifp;
3099 {
3100 	struct bge_softc *sc;
3101 	struct mbuf *m_head = NULL;
3102 	uint32_t prodidx;
3103 	int count = 0;
3104 
3105 	sc = ifp->if_softc;
3106 
3107 	if (!sc->bge_link || IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3108 		return;
3109 
3110 	prodidx = sc->bge_tx_prodidx;
3111 
3112 	while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
3113 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
3114 		if (m_head == NULL)
3115 			break;
3116 
3117 		/*
3118 		 * XXX
3119 		 * The code inside the if() block is never reached since we
3120 		 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
3121 		 * requests to checksum TCP/UDP in a fragmented packet.
3122 		 *
3123 		 * XXX
3124 		 * safety overkill.  If this is a fragmented packet chain
3125 		 * with delayed TCP/UDP checksums, then only encapsulate
3126 		 * it if we have enough descriptors to handle the entire
3127 		 * chain at once.
3128 		 * (paranoia -- may not actually be needed)
3129 		 */
3130 		if (m_head->m_flags & M_FIRSTFRAG &&
3131 		    m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
3132 			if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
3133 			    m_head->m_pkthdr.csum_data + 16) {
3134 				IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3135 				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3136 				break;
3137 			}
3138 		}
3139 
3140 		/*
3141 		 * Pack the data into the transmit ring. If we
3142 		 * don't have room, set the OACTIVE flag and wait
3143 		 * for the NIC to drain the ring.
3144 		 */
3145 		if (bge_encap(sc, m_head, &prodidx)) {
3146 			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3147 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3148 			break;
3149 		}
3150 		++count;
3151 
3152 		/*
3153 		 * If there's a BPF listener, bounce a copy of this frame
3154 		 * to him.
3155 		 */
3156 		BPF_MTAP(ifp, m_head);
3157 	}
3158 
3159 	if (count == 0) {
3160 		/* no packets were dequeued */
3161 		return;
3162 	}
3163 
3164 	/* Transmit */
3165 	CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3166 	/* 5700 b2 errata */
3167 	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
3168 		CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3169 
3170 	sc->bge_tx_prodidx = prodidx;
3171 
3172 	/*
3173 	 * Set a timeout in case the chip goes out to lunch.
3174 	 */
3175 	ifp->if_timer = 5;
3176 
3177 	return;
3178 }
3179 
3180 /*
3181  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3182  * to the mbuf data regions directly in the transmit descriptors.
3183  */
3184 static void
3185 bge_start(ifp)
3186 	struct ifnet *ifp;
3187 {
3188 	struct bge_softc *sc;
3189 
3190 	sc = ifp->if_softc;
3191 	BGE_LOCK(sc);
3192 	bge_start_locked(ifp);
3193 	BGE_UNLOCK(sc);
3194 }
3195 
3196 static void
3197 bge_init_locked(sc)
3198 	struct bge_softc *sc;
3199 {
3200 	struct ifnet *ifp;
3201 	u_int16_t *m;
3202 
3203 	BGE_LOCK_ASSERT(sc);
3204 
3205 	ifp = sc->bge_ifp;
3206 
3207 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3208 		return;
3209 
3210 	/* Cancel pending I/O and flush buffers. */
3211 	bge_stop(sc);
3212 	bge_reset(sc);
3213 	bge_chipinit(sc);
3214 
3215 	/*
3216 	 * Init the various state machines, ring
3217 	 * control blocks and firmware.
3218 	 */
3219 	if (bge_blockinit(sc)) {
3220 		device_printf(sc->bge_dev, "initialization failure\n");
3221 		return;
3222 	}
3223 
3224 	ifp = sc->bge_ifp;
3225 
3226 	/* Specify MTU. */
3227 	CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
3228 	    ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN);
3229 
3230 	/* Load our MAC address. */
3231 	m = (u_int16_t *)IF_LLADDR(sc->bge_ifp);
3232 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3233 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3234 
3235 	/* Enable or disable promiscuous mode as needed. */
3236 	if (ifp->if_flags & IFF_PROMISC) {
3237 		BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3238 	} else {
3239 		BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3240 	}
3241 
3242 	/* Program multicast filter. */
3243 	bge_setmulti(sc);
3244 
3245 	/* Init RX ring. */
3246 	bge_init_rx_ring_std(sc);
3247 
3248 	/*
3249 	 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
3250 	 * memory to insure that the chip has in fact read the first
3251 	 * entry of the ring.
3252 	 */
3253 	if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
3254 		u_int32_t		v, i;
3255 		for (i = 0; i < 10; i++) {
3256 			DELAY(20);
3257 			v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
3258 			if (v == (MCLBYTES - ETHER_ALIGN))
3259 				break;
3260 		}
3261 		if (i == 10)
3262 			device_printf (sc->bge_dev,
3263 			    "5705 A0 chip failed to load RX ring\n");
3264 	}
3265 
3266 	/* Init jumbo RX ring. */
3267 	if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
3268 		bge_init_rx_ring_jumbo(sc);
3269 
3270 	/* Init our RX return ring index */
3271 	sc->bge_rx_saved_considx = 0;
3272 
3273 	/* Init TX ring. */
3274 	bge_init_tx_ring(sc);
3275 
3276 	/* Turn on transmitter */
3277 	BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
3278 
3279 	/* Turn on receiver */
3280 	BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3281 
3282 	/* Tell firmware we're alive. */
3283 	BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3284 
3285 #ifdef DEVICE_POLLING
3286 	/* Disable interrupts if we are polling. */
3287 	if (ifp->if_capenable & IFCAP_POLLING) {
3288 		BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
3289 		    BGE_PCIMISCCTL_MASK_PCI_INTR);
3290 		CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3291 		CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
3292 		CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
3293 	} else
3294 #endif
3295 
3296 	/* Enable host interrupts. */
3297 	{
3298 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
3299 	BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3300 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3301 	}
3302 
3303 	bge_ifmedia_upd(ifp);
3304 
3305 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3306 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3307 
3308 	callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
3309 }
3310 
3311 static void
3312 bge_init(xsc)
3313 	void *xsc;
3314 {
3315 	struct bge_softc *sc = xsc;
3316 
3317 	BGE_LOCK(sc);
3318 	bge_init_locked(sc);
3319 	BGE_UNLOCK(sc);
3320 
3321 	return;
3322 }
3323 
3324 /*
3325  * Set media options.
3326  */
3327 static int
3328 bge_ifmedia_upd(ifp)
3329 	struct ifnet *ifp;
3330 {
3331 	struct bge_softc *sc;
3332 	struct mii_data *mii;
3333 	struct ifmedia *ifm;
3334 
3335 	sc = ifp->if_softc;
3336 	ifm = &sc->bge_ifmedia;
3337 
3338 	/* If this is a 1000baseX NIC, enable the TBI port. */
3339 	if (sc->bge_tbi) {
3340 		if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3341 			return(EINVAL);
3342 		switch(IFM_SUBTYPE(ifm->ifm_media)) {
3343 		case IFM_AUTO:
3344 			/*
3345 			 * The BCM5704 ASIC appears to have a special
3346 			 * mechanism for programming the autoneg
3347 			 * advertisement registers in TBI mode.
3348 			 */
3349 			if (bge_fake_autoneg == 0 &&
3350 			    sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3351 				uint32_t sgdig;
3352 				CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
3353 				sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
3354 				sgdig |= BGE_SGDIGCFG_AUTO|
3355 				    BGE_SGDIGCFG_PAUSE_CAP|
3356 				    BGE_SGDIGCFG_ASYM_PAUSE;
3357 				CSR_WRITE_4(sc, BGE_SGDIG_CFG,
3358 				    sgdig|BGE_SGDIGCFG_SEND);
3359 				DELAY(5);
3360 				CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
3361 			}
3362 			break;
3363 		case IFM_1000_SX:
3364 			if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3365 				BGE_CLRBIT(sc, BGE_MAC_MODE,
3366 				    BGE_MACMODE_HALF_DUPLEX);
3367 			} else {
3368 				BGE_SETBIT(sc, BGE_MAC_MODE,
3369 				    BGE_MACMODE_HALF_DUPLEX);
3370 			}
3371 			break;
3372 		default:
3373 			return(EINVAL);
3374 		}
3375 		return(0);
3376 	}
3377 
3378 	sc->bge_link_evt++;
3379 	mii = device_get_softc(sc->bge_miibus);
3380 	if (mii->mii_instance) {
3381 		struct mii_softc *miisc;
3382 		for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
3383 		    miisc = LIST_NEXT(miisc, mii_list))
3384 			mii_phy_reset(miisc);
3385 	}
3386 	mii_mediachg(mii);
3387 
3388 	return(0);
3389 }
3390 
3391 /*
3392  * Report current media status.
3393  */
3394 static void
3395 bge_ifmedia_sts(ifp, ifmr)
3396 	struct ifnet *ifp;
3397 	struct ifmediareq *ifmr;
3398 {
3399 	struct bge_softc *sc;
3400 	struct mii_data *mii;
3401 
3402 	sc = ifp->if_softc;
3403 
3404 	if (sc->bge_tbi) {
3405 		ifmr->ifm_status = IFM_AVALID;
3406 		ifmr->ifm_active = IFM_ETHER;
3407 		if (CSR_READ_4(sc, BGE_MAC_STS) &
3408 		    BGE_MACSTAT_TBI_PCS_SYNCHED)
3409 			ifmr->ifm_status |= IFM_ACTIVE;
3410 		ifmr->ifm_active |= IFM_1000_SX;
3411 		if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3412 			ifmr->ifm_active |= IFM_HDX;
3413 		else
3414 			ifmr->ifm_active |= IFM_FDX;
3415 		return;
3416 	}
3417 
3418 	mii = device_get_softc(sc->bge_miibus);
3419 	mii_pollstat(mii);
3420 	ifmr->ifm_active = mii->mii_media_active;
3421 	ifmr->ifm_status = mii->mii_media_status;
3422 
3423 	return;
3424 }
3425 
3426 static int
3427 bge_ioctl(ifp, command, data)
3428 	struct ifnet *ifp;
3429 	u_long command;
3430 	caddr_t data;
3431 {
3432 	struct bge_softc *sc = ifp->if_softc;
3433 	struct ifreq *ifr = (struct ifreq *) data;
3434 	int mask, error = 0;
3435 	struct mii_data *mii;
3436 
3437 	switch(command) {
3438 	case SIOCSIFMTU:
3439 		/* Disallow jumbo frames on 5705. */
3440 		if (((sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
3441 		      sc->bge_asicrev == BGE_ASICREV_BCM5750) &&
3442 		    ifr->ifr_mtu > ETHERMTU) || ifr->ifr_mtu > BGE_JUMBO_MTU)
3443 			error = EINVAL;
3444 		else {
3445 			ifp->if_mtu = ifr->ifr_mtu;
3446 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3447 			bge_init(sc);
3448 		}
3449 		break;
3450 	case SIOCSIFFLAGS:
3451 		BGE_LOCK(sc);
3452 		if (ifp->if_flags & IFF_UP) {
3453 			/*
3454 			 * If only the state of the PROMISC flag changed,
3455 			 * then just use the 'set promisc mode' command
3456 			 * instead of reinitializing the entire NIC. Doing
3457 			 * a full re-init means reloading the firmware and
3458 			 * waiting for it to start up, which may take a
3459 			 * second or two.  Similarly for ALLMULTI.
3460 			 */
3461 			if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3462 			    ifp->if_flags & IFF_PROMISC &&
3463 			    !(sc->bge_if_flags & IFF_PROMISC)) {
3464 				BGE_SETBIT(sc, BGE_RX_MODE,
3465 				    BGE_RXMODE_RX_PROMISC);
3466 			} else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3467 			    !(ifp->if_flags & IFF_PROMISC) &&
3468 			    sc->bge_if_flags & IFF_PROMISC) {
3469 				BGE_CLRBIT(sc, BGE_RX_MODE,
3470 				    BGE_RXMODE_RX_PROMISC);
3471 			} else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3472 			    (ifp->if_flags ^ sc->bge_if_flags) & IFF_ALLMULTI) {
3473 				bge_setmulti(sc);
3474 			} else
3475 				bge_init_locked(sc);
3476 		} else {
3477 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3478 				bge_stop(sc);
3479 			}
3480 		}
3481 		sc->bge_if_flags = ifp->if_flags;
3482 		BGE_UNLOCK(sc);
3483 		error = 0;
3484 		break;
3485 	case SIOCADDMULTI:
3486 	case SIOCDELMULTI:
3487 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3488 			BGE_LOCK(sc);
3489 			bge_setmulti(sc);
3490 			BGE_UNLOCK(sc);
3491 			error = 0;
3492 		}
3493 		break;
3494 	case SIOCSIFMEDIA:
3495 	case SIOCGIFMEDIA:
3496 		if (sc->bge_tbi) {
3497 			error = ifmedia_ioctl(ifp, ifr,
3498 			    &sc->bge_ifmedia, command);
3499 		} else {
3500 			mii = device_get_softc(sc->bge_miibus);
3501 			error = ifmedia_ioctl(ifp, ifr,
3502 			    &mii->mii_media, command);
3503 		}
3504 		break;
3505 	case SIOCSIFCAP:
3506 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3507 #ifdef DEVICE_POLLING
3508 		if (mask & IFCAP_POLLING) {
3509 			if (ifr->ifr_reqcap & IFCAP_POLLING) {
3510 				error = ether_poll_register(bge_poll, ifp);
3511 				if (error)
3512 					return(error);
3513 				BGE_LOCK(sc);
3514 				BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
3515 				    BGE_PCIMISCCTL_MASK_PCI_INTR);
3516 				CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3517 				CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
3518 				CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
3519 				ifp->if_capenable |= IFCAP_POLLING;
3520 				BGE_UNLOCK(sc);
3521 			} else {
3522 				error = ether_poll_deregister(ifp);
3523 				/* Enable interrupt even in error case */
3524 				BGE_LOCK(sc);
3525 				CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
3526 				CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
3527 				BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
3528 				    BGE_PCIMISCCTL_MASK_PCI_INTR);
3529 				CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3530 				ifp->if_capenable &= ~IFCAP_POLLING;
3531 				BGE_UNLOCK(sc);
3532 			}
3533 		}
3534 #endif
3535 		if (mask & IFCAP_HWCSUM) {
3536 			ifp->if_capenable ^= IFCAP_HWCSUM;
3537 			if (IFCAP_HWCSUM & ifp->if_capenable &&
3538 			    IFCAP_HWCSUM & ifp->if_capabilities)
3539 				ifp->if_hwassist = BGE_CSUM_FEATURES;
3540 			else
3541 				ifp->if_hwassist = 0;
3542 			VLAN_CAPABILITIES(ifp);
3543 		}
3544 		break;
3545 	default:
3546 		error = ether_ioctl(ifp, command, data);
3547 		break;
3548 	}
3549 
3550 	return(error);
3551 }
3552 
3553 static void
3554 bge_watchdog(ifp)
3555 	struct ifnet *ifp;
3556 {
3557 	struct bge_softc *sc;
3558 
3559 	sc = ifp->if_softc;
3560 
3561 	if_printf(ifp, "watchdog timeout -- resetting\n");
3562 
3563 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3564 	bge_init(sc);
3565 
3566 	ifp->if_oerrors++;
3567 
3568 	return;
3569 }
3570 
3571 /*
3572  * Stop the adapter and free any mbufs allocated to the
3573  * RX and TX lists.
3574  */
3575 static void
3576 bge_stop(sc)
3577 	struct bge_softc *sc;
3578 {
3579 	struct ifnet *ifp;
3580 	struct ifmedia_entry *ifm;
3581 	struct mii_data *mii = NULL;
3582 	int mtmp, itmp;
3583 
3584 	BGE_LOCK_ASSERT(sc);
3585 
3586 	ifp = sc->bge_ifp;
3587 
3588 	if (!sc->bge_tbi)
3589 		mii = device_get_softc(sc->bge_miibus);
3590 
3591 	callout_stop(&sc->bge_stat_ch);
3592 
3593 	/*
3594 	 * Disable all of the receiver blocks
3595 	 */
3596 	BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3597 	BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3598 	BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3599 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3600 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
3601 		BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
3602 	BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3603 	BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3604 	BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3605 
3606 	/*
3607 	 * Disable all of the transmit blocks
3608 	 */
3609 	BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3610 	BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3611 	BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3612 	BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3613 	BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3614 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3615 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
3616 		BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
3617 	BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3618 
3619 	/*
3620 	 * Shut down all of the memory managers and related
3621 	 * state machines.
3622 	 */
3623 	BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3624 	BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3625 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3626 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
3627 		BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
3628 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3629 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3630 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3631 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
3632 		BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
3633 		BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3634 	}
3635 
3636 	/* Disable host interrupts. */
3637 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3638 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3639 
3640 	/*
3641 	 * Tell firmware we're shutting down.
3642 	 */
3643 	BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3644 
3645 	/* Free the RX lists. */
3646 	bge_free_rx_ring_std(sc);
3647 
3648 	/* Free jumbo RX list. */
3649 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
3650 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
3651 		bge_free_rx_ring_jumbo(sc);
3652 
3653 	/* Free TX buffers. */
3654 	bge_free_tx_ring(sc);
3655 
3656 	/*
3657 	 * Isolate/power down the PHY, but leave the media selection
3658 	 * unchanged so that things will be put back to normal when
3659 	 * we bring the interface back up.
3660 	 */
3661 	if (!sc->bge_tbi) {
3662 		itmp = ifp->if_flags;
3663 		ifp->if_flags |= IFF_UP;
3664 		/*
3665 		 * If we are called from bge_detach(), mii is already NULL.
3666 		 */
3667 		if (mii != NULL) {
3668 			ifm = mii->mii_media.ifm_cur;
3669 			mtmp = ifm->ifm_media;
3670 			ifm->ifm_media = IFM_ETHER|IFM_NONE;
3671 			mii_mediachg(mii);
3672 			ifm->ifm_media = mtmp;
3673 		}
3674 		ifp->if_flags = itmp;
3675 	}
3676 
3677 	sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
3678 
3679 	/*
3680 	 * We can't just call bge_link_upd() cause chip is almost stopped so
3681 	 * bge_link_upd -> bge_tick_locked -> bge_stats_update sequence may
3682 	 * lead to hardware deadlock. So we just clearing MAC's link state
3683 	 * (PHY may still have link UP).
3684 	 */
3685 	if (bootverbose && sc->bge_link)
3686 		if_printf(sc->bge_ifp, "link DOWN\n");
3687 	sc->bge_link = 0;
3688 
3689 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3690 }
3691 
3692 /*
3693  * Stop all chip I/O so that the kernel's probe routines don't
3694  * get confused by errant DMAs when rebooting.
3695  */
3696 static void
3697 bge_shutdown(dev)
3698 	device_t dev;
3699 {
3700 	struct bge_softc *sc;
3701 
3702 	sc = device_get_softc(dev);
3703 
3704 	BGE_LOCK(sc);
3705 	bge_stop(sc);
3706 	bge_reset(sc);
3707 	BGE_UNLOCK(sc);
3708 
3709 	return;
3710 }
3711 
3712 static int
3713 bge_suspend(device_t dev)
3714 {
3715 	struct bge_softc *sc;
3716 
3717 	sc = device_get_softc(dev);
3718 	BGE_LOCK(sc);
3719 	bge_stop(sc);
3720 	BGE_UNLOCK(sc);
3721 
3722 	return (0);
3723 }
3724 
3725 static int
3726 bge_resume(device_t dev)
3727 {
3728 	struct bge_softc *sc;
3729 	struct ifnet *ifp;
3730 
3731 	sc = device_get_softc(dev);
3732 	BGE_LOCK(sc);
3733 	ifp = sc->bge_ifp;
3734 	if (ifp->if_flags & IFF_UP) {
3735 		bge_init_locked(sc);
3736 		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3737 			bge_start_locked(ifp);
3738 	}
3739 	BGE_UNLOCK(sc);
3740 
3741 	return (0);
3742 }
3743 
3744 static void
3745 bge_link_upd(sc)
3746 	struct bge_softc *sc;
3747 {
3748 	struct mii_data *mii;
3749 	uint32_t link, status;
3750 
3751 	BGE_LOCK_ASSERT(sc);
3752 
3753 	/* Clear 'pending link event' flag */
3754 	sc->bge_link_evt = 0;
3755 
3756 	/*
3757 	 * Process link state changes.
3758 	 * Grrr. The link status word in the status block does
3759 	 * not work correctly on the BCM5700 rev AX and BX chips,
3760 	 * according to all available information. Hence, we have
3761 	 * to enable MII interrupts in order to properly obtain
3762 	 * async link changes. Unfortunately, this also means that
3763 	 * we have to read the MAC status register to detect link
3764 	 * changes, thereby adding an additional register access to
3765 	 * the interrupt handler.
3766 	 *
3767 	 * XXX: perhaps link state detection procedure used for
3768 	 * BGE_CHIPID_BCM5700_B1 can be used for others BCM5700 revisions.
3769 	 */
3770 
3771 	if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3772 	    sc->bge_chipid != BGE_CHIPID_BCM5700_B1) {
3773 		status = CSR_READ_4(sc, BGE_MAC_STS);
3774 		if (status & BGE_MACSTAT_MI_INTERRUPT) {
3775 			callout_stop(&sc->bge_stat_ch);
3776 			bge_tick_locked(sc);
3777 
3778 			mii = device_get_softc(sc->bge_miibus);
3779 			if (!sc->bge_link &&
3780 			    mii->mii_media_status & IFM_ACTIVE &&
3781 			    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3782 				sc->bge_link++;
3783 				if (bootverbose)
3784 					if_printf(sc->bge_ifp, "link UP\n");
3785 			} else if (sc->bge_link &&
3786 			    (!(mii->mii_media_status & IFM_ACTIVE) ||
3787 			    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3788 				sc->bge_link = 0;
3789 				if (bootverbose)
3790 					if_printf(sc->bge_ifp, "link DOWN\n");
3791 			}
3792 
3793 			/* Clear the interrupt */
3794 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
3795 			    BGE_EVTENB_MI_INTERRUPT);
3796 			bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
3797 			bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
3798 			    BRGPHY_INTRS);
3799 		}
3800 		return;
3801 	}
3802 
3803 	if (sc->bge_tbi) {
3804 		status = CSR_READ_4(sc, BGE_MAC_STS);
3805 		if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
3806 			if (!sc->bge_link) {
3807 				sc->bge_link++;
3808 				if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
3809 					BGE_CLRBIT(sc, BGE_MAC_MODE,
3810 					    BGE_MACMODE_TBI_SEND_CFGS);
3811 				CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
3812 				if (bootverbose)
3813 					if_printf(sc->bge_ifp, "link UP\n");
3814 				if_link_state_change(sc->bge_ifp, LINK_STATE_UP);
3815 			}
3816 		} else if (sc->bge_link) {
3817 			sc->bge_link = 0;
3818 			if (bootverbose)
3819 				if_printf(sc->bge_ifp, "link DOWN\n");
3820 			if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN);
3821 		}
3822 	/* Discard link events for MII/GMII cards if MI auto-polling disabled */
3823 	} else if (CSR_READ_4(sc, BGE_MI_MODE) & BGE_MIMODE_AUTOPOLL) {
3824 		/*
3825 		 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
3826 		 * in status word always set. Workaround this bug by reading
3827 		 * PHY link status directly.
3828 		 */
3829 		link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
3830 
3831 		if (link != sc->bge_link ||
3832 		    sc->bge_asicrev == BGE_ASICREV_BCM5700) {
3833 			callout_stop(&sc->bge_stat_ch);
3834 			bge_tick_locked(sc);
3835 
3836 			mii = device_get_softc(sc->bge_miibus);
3837 			if (!sc->bge_link &&
3838 			    mii->mii_media_status & IFM_ACTIVE &&
3839 			    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3840 				sc->bge_link++;
3841 				if (bootverbose)
3842 					if_printf(sc->bge_ifp, "link UP\n");
3843 			} else if (sc->bge_link &&
3844 			    (!(mii->mii_media_status & IFM_ACTIVE) ||
3845 			    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3846 				sc->bge_link = 0;
3847 				if (bootverbose)
3848 					if_printf(sc->bge_ifp, "link DOWN\n");
3849 			}
3850 		}
3851 	}
3852 
3853 	/* Clear the attention */
3854 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
3855 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
3856 	    BGE_MACSTAT_LINK_CHANGED);
3857 }
3858