xref: /freebsd/sys/dev/bge/if_bge.c (revision d056fa046c6a91b90cd98165face0e42a33a5173)
1 /*-
2  * Copyright (c) 2001 Wind River Systems
3  * Copyright (c) 1997, 1998, 1999, 2001
4  *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by Bill Paul.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 /*
38  * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39  *
40  * The Broadcom BCM5700 is based on technology originally developed by
41  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42  * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
43  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45  * frames, highly configurable RX filtering, and 16 RX and TX queues
46  * (which, along with RX filter rules, can be used for QOS applications).
47  * Other features, such as TCP segmentation, may be available as part
48  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49  * firmware images can be stored in hardware and need not be compiled
50  * into the driver.
51  *
52  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53  * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54  *
55  * The BCM5701 is a single-chip solution incorporating both the BCM5700
56  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57  * does not support external SSRAM.
58  *
59  * Broadcom also produces a variation of the BCM5700 under the "Altima"
60  * brand name, which is functionally similar but lacks PCI-X support.
61  *
62  * Without external SSRAM, you can only have at most 4 TX rings,
63  * and the use of the mini RX ring is disabled. This seems to imply
64  * that these features are simply not available on the BCM5701. As a
65  * result, this driver does not implement any support for the mini RX
66  * ring.
67  */
68 
69 #ifdef HAVE_KERNEL_OPTION_HEADERS
70 #include "opt_device_polling.h"
71 #endif
72 
73 #include <sys/param.h>
74 #include <sys/endian.h>
75 #include <sys/systm.h>
76 #include <sys/sockio.h>
77 #include <sys/mbuf.h>
78 #include <sys/malloc.h>
79 #include <sys/kernel.h>
80 #include <sys/module.h>
81 #include <sys/socket.h>
82 
83 #include <net/if.h>
84 #include <net/if_arp.h>
85 #include <net/ethernet.h>
86 #include <net/if_dl.h>
87 #include <net/if_media.h>
88 
89 #include <net/bpf.h>
90 
91 #include <net/if_types.h>
92 #include <net/if_vlan_var.h>
93 
94 #include <netinet/in_systm.h>
95 #include <netinet/in.h>
96 #include <netinet/ip.h>
97 
98 #include <machine/bus.h>
99 #include <machine/resource.h>
100 #include <sys/bus.h>
101 #include <sys/rman.h>
102 
103 #include <dev/mii/mii.h>
104 #include <dev/mii/miivar.h>
105 #include "miidevs.h"
106 #include <dev/mii/brgphyreg.h>
107 
108 #include <dev/pci/pcireg.h>
109 #include <dev/pci/pcivar.h>
110 
111 #include <dev/bge/if_bgereg.h>
112 
113 #define BGE_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
114 #define ETHER_MIN_NOPAD		(ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
115 
116 MODULE_DEPEND(bge, pci, 1, 1, 1);
117 MODULE_DEPEND(bge, ether, 1, 1, 1);
118 MODULE_DEPEND(bge, miibus, 1, 1, 1);
119 
120 /* "device miibus" required.  See GENERIC if you get errors here. */
121 #include "miibus_if.h"
122 
123 /*
124  * Various supported device vendors/types and their names. Note: the
125  * spec seems to indicate that the hardware still has Alteon's vendor
126  * ID burned into it, though it will always be overriden by the vendor
127  * ID in the EEPROM. Just to be safe, we cover all possibilities.
128  */
129 static struct bge_type {
130 	uint16_t	bge_vid;
131 	uint16_t	bge_did;
132 } bge_devs[] = {
133 	{ ALTEON_VENDORID,	ALTEON_DEVICEID_BCM5700 },
134 	{ ALTEON_VENDORID,	ALTEON_DEVICEID_BCM5701 },
135 
136 	{ ALTIMA_VENDORID,	ALTIMA_DEVICE_AC1000 },
137 	{ ALTIMA_VENDORID,	ALTIMA_DEVICE_AC1002 },
138 	{ ALTIMA_VENDORID,	ALTIMA_DEVICE_AC9100 },
139 
140 	{ APPLE_VENDORID,	APPLE_DEVICE_BCM5701 },
141 
142 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5700 },
143 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5701 },
144 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5702 },
145 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5702_ALT },
146 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5702X },
147 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5703 },
148 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5703_ALT },
149 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5703X },
150 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5704C },
151 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5704S },
152 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5704S_ALT },
153 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5705 },
154 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5705F },
155 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5705K },
156 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5705M },
157 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5705M_ALT },
158 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5714C },
159 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5714S },
160 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5715 },
161 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5715S },
162 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5720 },
163 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5721 },
164 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5750 },
165 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5750M },
166 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5751 },
167 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5751F },
168 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5751M },
169 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5752 },
170 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5752M },
171 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5753 },
172 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5753F },
173 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5753M },
174 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5754 },
175 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5754M },
176 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5755 },
177 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5755M },
178 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5780 },
179 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5780S },
180 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5781 },
181 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5782 },
182 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5786 },
183 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5787 },
184 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5787M },
185 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5788 },
186 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5789 },
187 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5901 },
188 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5901A2 },
189 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5903M },
190 
191 	{ SK_VENDORID,		SK_DEVICEID_ALTIMA },
192 
193 	{ TC_VENDORID,		TC_DEVICEID_3C985 },
194 	{ TC_VENDORID,		TC_DEVICEID_3C996 },
195 
196 	{ 0, 0 }
197 };
198 
199 static const struct bge_vendor {
200 	uint16_t	v_id;
201 	const char	*v_name;
202 } bge_vendors[] = {
203 	{ ALTEON_VENDORID,	"Alteon" },
204 	{ ALTIMA_VENDORID,	"Altima" },
205 	{ APPLE_VENDORID,	"Apple" },
206 	{ BCOM_VENDORID,	"Broadcom" },
207 	{ SK_VENDORID,		"SysKonnect" },
208 	{ TC_VENDORID,		"3Com" },
209 
210 	{ 0, NULL }
211 };
212 
213 static const struct bge_revision {
214 	uint32_t	br_chipid;
215 	const char	*br_name;
216 } bge_revisions[] = {
217 	{ BGE_CHIPID_BCM5700_A0,	"BCM5700 A0" },
218 	{ BGE_CHIPID_BCM5700_A1,	"BCM5700 A1" },
219 	{ BGE_CHIPID_BCM5700_B0,	"BCM5700 B0" },
220 	{ BGE_CHIPID_BCM5700_B1,	"BCM5700 B1" },
221 	{ BGE_CHIPID_BCM5700_B2,	"BCM5700 B2" },
222 	{ BGE_CHIPID_BCM5700_B3,	"BCM5700 B3" },
223 	{ BGE_CHIPID_BCM5700_ALTIMA,	"BCM5700 Altima" },
224 	{ BGE_CHIPID_BCM5700_C0,	"BCM5700 C0" },
225 	{ BGE_CHIPID_BCM5701_A0,	"BCM5701 A0" },
226 	{ BGE_CHIPID_BCM5701_B0,	"BCM5701 B0" },
227 	{ BGE_CHIPID_BCM5701_B2,	"BCM5701 B2" },
228 	{ BGE_CHIPID_BCM5701_B5,	"BCM5701 B5" },
229 	{ BGE_CHIPID_BCM5703_A0,	"BCM5703 A0" },
230 	{ BGE_CHIPID_BCM5703_A1,	"BCM5703 A1" },
231 	{ BGE_CHIPID_BCM5703_A2,	"BCM5703 A2" },
232 	{ BGE_CHIPID_BCM5703_A3,	"BCM5703 A3" },
233 	{ BGE_CHIPID_BCM5703_B0,	"BCM5703 B0" },
234 	{ BGE_CHIPID_BCM5704_A0,	"BCM5704 A0" },
235 	{ BGE_CHIPID_BCM5704_A1,	"BCM5704 A1" },
236 	{ BGE_CHIPID_BCM5704_A2,	"BCM5704 A2" },
237 	{ BGE_CHIPID_BCM5704_A3,	"BCM5704 A3" },
238 	{ BGE_CHIPID_BCM5704_B0,	"BCM5704 B0" },
239 	{ BGE_CHIPID_BCM5705_A0,	"BCM5705 A0" },
240 	{ BGE_CHIPID_BCM5705_A1,	"BCM5705 A1" },
241 	{ BGE_CHIPID_BCM5705_A2,	"BCM5705 A2" },
242 	{ BGE_CHIPID_BCM5705_A3,	"BCM5705 A3" },
243 	{ BGE_CHIPID_BCM5750_A0,	"BCM5750 A0" },
244 	{ BGE_CHIPID_BCM5750_A1,	"BCM5750 A1" },
245 	{ BGE_CHIPID_BCM5750_A3,	"BCM5750 A3" },
246 	{ BGE_CHIPID_BCM5750_B0,	"BCM5750 B0" },
247 	{ BGE_CHIPID_BCM5750_B1,	"BCM5750 B1" },
248 	{ BGE_CHIPID_BCM5750_C0,	"BCM5750 C0" },
249 	{ BGE_CHIPID_BCM5750_C1,	"BCM5750 C1" },
250 	{ BGE_CHIPID_BCM5714_A0,	"BCM5714 A0" },
251 	{ BGE_CHIPID_BCM5752_A0,	"BCM5752 A0" },
252 	{ BGE_CHIPID_BCM5752_A1,	"BCM5752 A1" },
253 	{ BGE_CHIPID_BCM5752_A2,	"BCM5752 A2" },
254 	{ BGE_CHIPID_BCM5714_B0,	"BCM5714 B0" },
255 	{ BGE_CHIPID_BCM5714_B3,	"BCM5714 B3" },
256 	{ BGE_CHIPID_BCM5715_A0,	"BCM5715 A0" },
257 	{ BGE_CHIPID_BCM5715_A1,	"BCM5715 A1" },
258 
259 	{ 0, NULL }
260 };
261 
262 /*
263  * Some defaults for major revisions, so that newer steppings
264  * that we don't know about have a shot at working.
265  */
266 static const struct bge_revision bge_majorrevs[] = {
267 	{ BGE_ASICREV_BCM5700,		"unknown BCM5700" },
268 	{ BGE_ASICREV_BCM5701,		"unknown BCM5701" },
269 	{ BGE_ASICREV_BCM5703,		"unknown BCM5703" },
270 	{ BGE_ASICREV_BCM5704,		"unknown BCM5704" },
271 	{ BGE_ASICREV_BCM5705,		"unknown BCM5705" },
272 	{ BGE_ASICREV_BCM5750,		"unknown BCM5750" },
273 	{ BGE_ASICREV_BCM5714_A0,	"unknown BCM5714" },
274 	{ BGE_ASICREV_BCM5752,		"unknown BCM5752" },
275 	{ BGE_ASICREV_BCM5780,		"unknown BCM5780" },
276 	{ BGE_ASICREV_BCM5714,		"unknown BCM5714" },
277 	{ BGE_ASICREV_BCM5755,		"unknown BCM5755" },
278 	{ BGE_ASICREV_BCM5787,		"unknown BCM5787" },
279 
280 	{ 0, NULL }
281 };
282 
283 #define BGE_IS_5705_OR_BEYOND(sc)			   \
284 	((sc)->bge_asicrev == BGE_ASICREV_BCM5705	|| \
285 	 (sc)->bge_asicrev == BGE_ASICREV_BCM5750	|| \
286 	 (sc)->bge_asicrev == BGE_ASICREV_BCM5714_A0	|| \
287 	 (sc)->bge_asicrev == BGE_ASICREV_BCM5780	|| \
288 	 (sc)->bge_asicrev == BGE_ASICREV_BCM5714	|| \
289 	 (sc)->bge_asicrev == BGE_ASICREV_BCM5752	|| \
290 	 (sc)->bge_asicrev == BGE_ASICREV_BCM5755	|| \
291 	 (sc)->bge_asicrev == BGE_ASICREV_BCM5787)
292 
293 #define BGE_IS_575X_PLUS(sc)				   \
294 	((sc)->bge_asicrev == BGE_ASICREV_BCM5750	|| \
295 	 (sc)->bge_asicrev == BGE_ASICREV_BCM5714_A0	|| \
296 	 (sc)->bge_asicrev == BGE_ASICREV_BCM5780	|| \
297 	 (sc)->bge_asicrev == BGE_ASICREV_BCM5714	|| \
298 	 (sc)->bge_asicrev == BGE_ASICREV_BCM5752	|| \
299 	 (sc)->bge_asicrev == BGE_ASICREV_BCM5755	|| \
300 	 (sc)->bge_asicrev == BGE_ASICREV_BCM5787)
301 
302 #define BGE_IS_5714_FAMILY(sc)				   \
303 	((sc)->bge_asicrev == BGE_ASICREV_BCM5714_A0	|| \
304 	 (sc)->bge_asicrev == BGE_ASICREV_BCM5780	|| \
305 	 (sc)->bge_asicrev == BGE_ASICREV_BCM5714)
306 
307 #define BGE_IS_JUMBO_CAPABLE(sc) \
308 	((sc)->bge_asicrev == BGE_ASICREV_BCM5700	|| \
309 	 (sc)->bge_asicrev == BGE_ASICREV_BCM5701	|| \
310 	 (sc)->bge_asicrev == BGE_ASICREV_BCM5703	|| \
311 	 (sc)->bge_asicrev == BGE_ASICREV_BCM5704)
312 
313 const struct bge_revision * bge_lookup_rev(uint32_t);
314 const struct bge_vendor * bge_lookup_vendor(uint16_t);
315 static int bge_probe(device_t);
316 static int bge_attach(device_t);
317 static int bge_detach(device_t);
318 static int bge_suspend(device_t);
319 static int bge_resume(device_t);
320 static void bge_release_resources(struct bge_softc *);
321 static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
322 static int bge_dma_alloc(device_t);
323 static void bge_dma_free(struct bge_softc *);
324 
325 static void bge_txeof(struct bge_softc *);
326 static void bge_rxeof(struct bge_softc *);
327 
328 static void bge_tick_locked(struct bge_softc *);
329 static void bge_tick(void *);
330 static void bge_stats_update(struct bge_softc *);
331 static void bge_stats_update_regs(struct bge_softc *);
332 static int bge_encap(struct bge_softc *, struct mbuf *, uint32_t *);
333 
334 static void bge_intr(void *);
335 static void bge_start_locked(struct ifnet *);
336 static void bge_start(struct ifnet *);
337 static int bge_ioctl(struct ifnet *, u_long, caddr_t);
338 static void bge_init_locked(struct bge_softc *);
339 static void bge_init(void *);
340 static void bge_stop(struct bge_softc *);
341 static void bge_watchdog(struct ifnet *);
342 static void bge_shutdown(device_t);
343 static int bge_ifmedia_upd(struct ifnet *);
344 static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
345 
346 static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
347 static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
348 
349 static void bge_setmulti(struct bge_softc *);
350 
351 static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *);
352 static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
353 static int bge_init_rx_ring_std(struct bge_softc *);
354 static void bge_free_rx_ring_std(struct bge_softc *);
355 static int bge_init_rx_ring_jumbo(struct bge_softc *);
356 static void bge_free_rx_ring_jumbo(struct bge_softc *);
357 static void bge_free_tx_ring(struct bge_softc *);
358 static int bge_init_tx_ring(struct bge_softc *);
359 
360 static int bge_chipinit(struct bge_softc *);
361 static int bge_blockinit(struct bge_softc *);
362 
363 static uint32_t bge_readmem_ind(struct bge_softc *, int);
364 static void bge_writemem_ind(struct bge_softc *, int, int);
365 #ifdef notdef
366 static uint32_t bge_readreg_ind(struct bge_softc *, int);
367 #endif
368 static void bge_writereg_ind(struct bge_softc *, int, int);
369 
370 static int bge_miibus_readreg(device_t, int, int);
371 static int bge_miibus_writereg(device_t, int, int, int);
372 static void bge_miibus_statchg(device_t);
373 #ifdef DEVICE_POLLING
374 static void bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
375 #endif
376 
377 static void bge_reset(struct bge_softc *);
378 static void bge_link_upd(struct bge_softc *);
379 
380 static device_method_t bge_methods[] = {
381 	/* Device interface */
382 	DEVMETHOD(device_probe,		bge_probe),
383 	DEVMETHOD(device_attach,	bge_attach),
384 	DEVMETHOD(device_detach,	bge_detach),
385 	DEVMETHOD(device_shutdown,	bge_shutdown),
386 	DEVMETHOD(device_suspend,	bge_suspend),
387 	DEVMETHOD(device_resume,	bge_resume),
388 
389 	/* bus interface */
390 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
391 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
392 
393 	/* MII interface */
394 	DEVMETHOD(miibus_readreg,	bge_miibus_readreg),
395 	DEVMETHOD(miibus_writereg,	bge_miibus_writereg),
396 	DEVMETHOD(miibus_statchg,	bge_miibus_statchg),
397 
398 	{ 0, 0 }
399 };
400 
401 static driver_t bge_driver = {
402 	"bge",
403 	bge_methods,
404 	sizeof(struct bge_softc)
405 };
406 
407 static devclass_t bge_devclass;
408 
409 DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
410 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
411 
412 static int bge_fake_autoneg = 0;
413 TUNABLE_INT("hw.bge.fake_autoneg", &bge_fake_autoneg);
414 
415 static uint32_t
416 bge_readmem_ind(struct bge_softc *sc, int off)
417 {
418 	device_t dev;
419 
420 	dev = sc->bge_dev;
421 
422 	pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
423 	return (pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4));
424 }
425 
426 static void
427 bge_writemem_ind(struct bge_softc *sc, int off, int val)
428 {
429 	device_t dev;
430 
431 	dev = sc->bge_dev;
432 
433 	pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
434 	pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
435 }
436 
437 #ifdef notdef
438 static uint32_t
439 bge_readreg_ind(struct bge_softc *sc, int off)
440 {
441 	device_t dev;
442 
443 	dev = sc->bge_dev;
444 
445 	pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
446 	return (pci_read_config(dev, BGE_PCI_REG_DATA, 4));
447 }
448 #endif
449 
450 static void
451 bge_writereg_ind(struct bge_softc *sc, int off, int val)
452 {
453 	device_t dev;
454 
455 	dev = sc->bge_dev;
456 
457 	pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
458 	pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
459 }
460 
461 /*
462  * Map a single buffer address.
463  */
464 
465 static void
466 bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
467 {
468 	struct bge_dmamap_arg *ctx;
469 
470 	if (error)
471 		return;
472 
473 	ctx = arg;
474 
475 	if (nseg > ctx->bge_maxsegs) {
476 		ctx->bge_maxsegs = 0;
477 		return;
478 	}
479 
480 	ctx->bge_busaddr = segs->ds_addr;
481 }
482 
483 /*
484  * Read a byte of data stored in the EEPROM at address 'addr.' The
485  * BCM570x supports both the traditional bitbang interface and an
486  * auto access interface for reading the EEPROM. We use the auto
487  * access method.
488  */
489 static uint8_t
490 bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
491 {
492 	int i;
493 	uint32_t byte = 0;
494 
495 	/*
496 	 * Enable use of auto EEPROM access so we can avoid
497 	 * having to use the bitbang method.
498 	 */
499 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
500 
501 	/* Reset the EEPROM, load the clock period. */
502 	CSR_WRITE_4(sc, BGE_EE_ADDR,
503 	    BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
504 	DELAY(20);
505 
506 	/* Issue the read EEPROM command. */
507 	CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
508 
509 	/* Wait for completion */
510 	for(i = 0; i < BGE_TIMEOUT * 10; i++) {
511 		DELAY(10);
512 		if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
513 			break;
514 	}
515 
516 	if (i == BGE_TIMEOUT) {
517 		device_printf(sc->bge_dev, "EEPROM read timed out\n");
518 		return (1);
519 	}
520 
521 	/* Get result. */
522 	byte = CSR_READ_4(sc, BGE_EE_DATA);
523 
524 	*dest = (byte >> ((addr % 4) * 8)) & 0xFF;
525 
526 	return (0);
527 }
528 
529 /*
530  * Read a sequence of bytes from the EEPROM.
531  */
532 static int
533 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
534 {
535 	int i, error = 0;
536 	uint8_t byte = 0;
537 
538 	for (i = 0; i < cnt; i++) {
539 		error = bge_eeprom_getbyte(sc, off + i, &byte);
540 		if (error)
541 			break;
542 		*(dest + i) = byte;
543 	}
544 
545 	return (error ? 1 : 0);
546 }
547 
548 static int
549 bge_miibus_readreg(device_t dev, int phy, int reg)
550 {
551 	struct bge_softc *sc;
552 	uint32_t val, autopoll;
553 	int i;
554 
555 	sc = device_get_softc(dev);
556 
557 	/*
558 	 * Broadcom's own driver always assumes the internal
559 	 * PHY is at GMII address 1. On some chips, the PHY responds
560 	 * to accesses at all addresses, which could cause us to
561 	 * bogusly attach the PHY 32 times at probe type. Always
562 	 * restricting the lookup to address 1 is simpler than
563 	 * trying to figure out which chips revisions should be
564 	 * special-cased.
565 	 */
566 	if (phy != 1)
567 		return (0);
568 
569 	/* Reading with autopolling on may trigger PCI errors */
570 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
571 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
572 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
573 		DELAY(40);
574 	}
575 
576 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
577 	    BGE_MIPHY(phy)|BGE_MIREG(reg));
578 
579 	for (i = 0; i < BGE_TIMEOUT; i++) {
580 		val = CSR_READ_4(sc, BGE_MI_COMM);
581 		if (!(val & BGE_MICOMM_BUSY))
582 			break;
583 	}
584 
585 	if (i == BGE_TIMEOUT) {
586 		if_printf(sc->bge_ifp, "PHY read timed out\n");
587 		val = 0;
588 		goto done;
589 	}
590 
591 	val = CSR_READ_4(sc, BGE_MI_COMM);
592 
593 done:
594 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
595 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
596 		DELAY(40);
597 	}
598 
599 	if (val & BGE_MICOMM_READFAIL)
600 		return (0);
601 
602 	return (val & 0xFFFF);
603 }
604 
605 static int
606 bge_miibus_writereg(device_t dev, int phy, int reg, int val)
607 {
608 	struct bge_softc *sc;
609 	uint32_t autopoll;
610 	int i;
611 
612 	sc = device_get_softc(dev);
613 
614 	/* Reading with autopolling on may trigger PCI errors */
615 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
616 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
617 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
618 		DELAY(40);
619 	}
620 
621 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
622 	    BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
623 
624 	for (i = 0; i < BGE_TIMEOUT; i++) {
625 		if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
626 			break;
627 	}
628 
629 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
630 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
631 		DELAY(40);
632 	}
633 
634 	if (i == BGE_TIMEOUT) {
635 		if_printf(sc->bge_ifp, "PHY read timed out\n");
636 		return (0);
637 	}
638 
639 	return (0);
640 }
641 
642 static void
643 bge_miibus_statchg(device_t dev)
644 {
645 	struct bge_softc *sc;
646 	struct mii_data *mii;
647 
648 	sc = device_get_softc(dev);
649 	mii = device_get_softc(sc->bge_miibus);
650 
651 	BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
652 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
653 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
654 	else
655 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
656 
657 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
658 		BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
659 	else
660 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
661 }
662 
663 /*
664  * Intialize a standard receive ring descriptor.
665  */
666 static int
667 bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m)
668 {
669 	struct mbuf *m_new = NULL;
670 	struct bge_rx_bd *r;
671 	struct bge_dmamap_arg ctx;
672 	int error;
673 
674 	if (m == NULL) {
675 		m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
676 		if (m_new == NULL)
677 			return (ENOBUFS);
678 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
679 	} else {
680 		m_new = m;
681 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
682 		m_new->m_data = m_new->m_ext.ext_buf;
683 	}
684 
685 	if (!sc->bge_rx_alignment_bug)
686 		m_adj(m_new, ETHER_ALIGN);
687 	sc->bge_cdata.bge_rx_std_chain[i] = m_new;
688 	r = &sc->bge_ldata.bge_rx_std_ring[i];
689 	ctx.bge_maxsegs = 1;
690 	ctx.sc = sc;
691 	error = bus_dmamap_load(sc->bge_cdata.bge_mtag,
692 	    sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *),
693 	    m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
694 	if (error || ctx.bge_maxsegs == 0) {
695 		if (m == NULL) {
696 			sc->bge_cdata.bge_rx_std_chain[i] = NULL;
697 			m_freem(m_new);
698 		}
699 		return (ENOMEM);
700 	}
701 	r->bge_addr.bge_addr_lo = BGE_ADDR_LO(ctx.bge_busaddr);
702 	r->bge_addr.bge_addr_hi = BGE_ADDR_HI(ctx.bge_busaddr);
703 	r->bge_flags = BGE_RXBDFLAG_END;
704 	r->bge_len = m_new->m_len;
705 	r->bge_idx = i;
706 
707 	bus_dmamap_sync(sc->bge_cdata.bge_mtag,
708 	    sc->bge_cdata.bge_rx_std_dmamap[i],
709 	    BUS_DMASYNC_PREREAD);
710 
711 	return (0);
712 }
713 
714 /*
715  * Initialize a jumbo receive ring descriptor. This allocates
716  * a jumbo buffer from the pool managed internally by the driver.
717  */
718 static int
719 bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m)
720 {
721 	bus_dma_segment_t segs[BGE_NSEG_JUMBO];
722 	struct bge_extrx_bd *r;
723 	struct mbuf *m_new = NULL;
724 	int nsegs;
725 	int error;
726 
727 	if (m == NULL) {
728 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
729 		if (m_new == NULL)
730 			return (ENOBUFS);
731 
732 		m_cljget(m_new, M_DONTWAIT, MJUM9BYTES);
733 		if (!(m_new->m_flags & M_EXT)) {
734 			m_freem(m_new);
735 			return (ENOBUFS);
736 		}
737 		m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES;
738 	} else {
739 		m_new = m;
740 		m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES;
741 		m_new->m_data = m_new->m_ext.ext_buf;
742 	}
743 
744 	if (!sc->bge_rx_alignment_bug)
745 		m_adj(m_new, ETHER_ALIGN);
746 
747 	error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
748 	    sc->bge_cdata.bge_rx_jumbo_dmamap[i],
749 	    m_new, segs, &nsegs, BUS_DMA_NOWAIT);
750 	if (error) {
751 		if (m == NULL)
752 			m_freem(m_new);
753 		return (error);
754 	}
755 	sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
756 
757 	/*
758 	 * Fill in the extended RX buffer descriptor.
759 	 */
760 	r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
761 	r->bge_flags = BGE_RXBDFLAG_JUMBO_RING|BGE_RXBDFLAG_END;
762 	r->bge_idx = i;
763 	r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
764 	switch (nsegs) {
765 	case 4:
766 		r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
767 		r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
768 		r->bge_len3 = segs[3].ds_len;
769 	case 3:
770 		r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
771 		r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
772 		r->bge_len2 = segs[2].ds_len;
773 	case 2:
774 		r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
775 		r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
776 		r->bge_len1 = segs[1].ds_len;
777 	case 1:
778 		r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
779 		r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
780 		r->bge_len0 = segs[0].ds_len;
781 		break;
782 	default:
783 		panic("%s: %d segments\n", __func__, nsegs);
784 	}
785 
786 	bus_dmamap_sync(sc->bge_cdata.bge_mtag,
787 	    sc->bge_cdata.bge_rx_jumbo_dmamap[i],
788 	    BUS_DMASYNC_PREREAD);
789 
790 	return (0);
791 }
792 
793 /*
794  * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
795  * that's 1MB or memory, which is a lot. For now, we fill only the first
796  * 256 ring entries and hope that our CPU is fast enough to keep up with
797  * the NIC.
798  */
799 static int
800 bge_init_rx_ring_std(struct bge_softc *sc)
801 {
802 	int i;
803 
804 	for (i = 0; i < BGE_SSLOTS; i++) {
805 		if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
806 			return (ENOBUFS);
807 	};
808 
809 	bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
810 	    sc->bge_cdata.bge_rx_std_ring_map,
811 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
812 
813 	sc->bge_std = i - 1;
814 	CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
815 
816 	return (0);
817 }
818 
819 static void
820 bge_free_rx_ring_std(struct bge_softc *sc)
821 {
822 	int i;
823 
824 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
825 		if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
826 			bus_dmamap_sync(sc->bge_cdata.bge_mtag,
827 			    sc->bge_cdata.bge_rx_std_dmamap[i],
828 			    BUS_DMASYNC_POSTREAD);
829 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
830 			    sc->bge_cdata.bge_rx_std_dmamap[i]);
831 			m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
832 			sc->bge_cdata.bge_rx_std_chain[i] = NULL;
833 		}
834 		bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
835 		    sizeof(struct bge_rx_bd));
836 	}
837 }
838 
839 static int
840 bge_init_rx_ring_jumbo(struct bge_softc *sc)
841 {
842 	struct bge_rcb *rcb;
843 	int i;
844 
845 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
846 		if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
847 			return (ENOBUFS);
848 	};
849 
850 	bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
851 	    sc->bge_cdata.bge_rx_jumbo_ring_map,
852 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
853 
854 	sc->bge_jumbo = i - 1;
855 
856 	rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
857 	rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
858 				    BGE_RCB_FLAG_USE_EXT_RX_BD);
859 	CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
860 
861 	CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
862 
863 	return (0);
864 }
865 
866 static void
867 bge_free_rx_ring_jumbo(struct bge_softc *sc)
868 {
869 	int i;
870 
871 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
872 		if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
873 			bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
874 			    sc->bge_cdata.bge_rx_jumbo_dmamap[i],
875 			    BUS_DMASYNC_POSTREAD);
876 			bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
877 			    sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
878 			m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
879 			sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
880 		}
881 		bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
882 		    sizeof(struct bge_extrx_bd));
883 	}
884 }
885 
886 static void
887 bge_free_tx_ring(struct bge_softc *sc)
888 {
889 	int i;
890 
891 	if (sc->bge_ldata.bge_tx_ring == NULL)
892 		return;
893 
894 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
895 		if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
896 			bus_dmamap_sync(sc->bge_cdata.bge_mtag,
897 			    sc->bge_cdata.bge_tx_dmamap[i],
898 			    BUS_DMASYNC_POSTWRITE);
899 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
900 			    sc->bge_cdata.bge_tx_dmamap[i]);
901 			m_freem(sc->bge_cdata.bge_tx_chain[i]);
902 			sc->bge_cdata.bge_tx_chain[i] = NULL;
903 		}
904 		bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
905 		    sizeof(struct bge_tx_bd));
906 	}
907 }
908 
909 static int
910 bge_init_tx_ring(struct bge_softc *sc)
911 {
912 	sc->bge_txcnt = 0;
913 	sc->bge_tx_saved_considx = 0;
914 
915 	/* Initialize transmit producer index for host-memory send ring. */
916 	sc->bge_tx_prodidx = 0;
917 	CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
918 
919 	/* 5700 b2 errata */
920 	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
921 		CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
922 
923 	/* NIC-memory send ring not used; initialize to zero. */
924 	CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
925 	/* 5700 b2 errata */
926 	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
927 		CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
928 
929 	return (0);
930 }
931 
932 static void
933 bge_setmulti(struct bge_softc *sc)
934 {
935 	struct ifnet *ifp;
936 	struct ifmultiaddr *ifma;
937 	uint32_t hashes[4] = { 0, 0, 0, 0 };
938 	int h, i;
939 
940 	BGE_LOCK_ASSERT(sc);
941 
942 	ifp = sc->bge_ifp;
943 
944 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
945 		for (i = 0; i < 4; i++)
946 			CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
947 		return;
948 	}
949 
950 	/* First, zot all the existing filters. */
951 	for (i = 0; i < 4; i++)
952 		CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
953 
954 	/* Now program new ones. */
955 	IF_ADDR_LOCK(ifp);
956 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
957 		if (ifma->ifma_addr->sa_family != AF_LINK)
958 			continue;
959 		h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
960 		    ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
961 		hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
962 	}
963 	IF_ADDR_UNLOCK(ifp);
964 
965 	for (i = 0; i < 4; i++)
966 		CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
967 }
968 
969 /*
970  * Do endian, PCI and DMA initialization. Also check the on-board ROM
971  * self-test results.
972  */
973 static int
974 bge_chipinit(struct bge_softc *sc)
975 {
976 	uint32_t dma_rw_ctl;
977 	int i;
978 
979 	/* Set endian type before we access any non-PCI registers. */
980 	pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4);
981 
982 	/*
983 	 * Check the 'ROM failed' bit on the RX CPU to see if
984 	 * self-tests passed.
985 	 */
986 	if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
987 		device_printf(sc->bge_dev, "RX CPU self-diagnostics failed!\n");
988 		return (ENODEV);
989 	}
990 
991 	/* Clear the MAC control register */
992 	CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
993 
994 	/*
995 	 * Clear the MAC statistics block in the NIC's
996 	 * internal memory.
997 	 */
998 	for (i = BGE_STATS_BLOCK;
999 	    i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1000 		BGE_MEMWIN_WRITE(sc, i, 0);
1001 
1002 	for (i = BGE_STATUS_BLOCK;
1003 	    i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1004 		BGE_MEMWIN_WRITE(sc, i, 0);
1005 
1006 	/* Set up the PCI DMA control register. */
1007 	if (sc->bge_pcie) {
1008 		/* PCI Express bus */
1009 		dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1010 		    (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1011 		    (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1012 	} else if (sc->bge_pcix) {
1013 		/* PCI-X bus */
1014 		if (BGE_IS_5714_FAMILY(sc)) {
1015 			dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD;
1016 			dma_rw_ctl &= ~BGE_PCIDMARWCTL_ONEDMA_ATONCE; /* XXX */
1017 			/* XXX magic values, Broadcom-supplied Linux driver */
1018 			if (sc->bge_asicrev == BGE_ASICREV_BCM5780)
1019 				dma_rw_ctl |= (1 << 20) | (1 << 18) |
1020 				    BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1021 			else
1022 				dma_rw_ctl |= (1 << 20) | (1 << 18) | (1 << 15);
1023 
1024 		} else if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1025 			/*
1026 			 * The 5704 uses a different encoding of read/write
1027 			 * watermarks.
1028 			 */
1029 			dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1030 			    (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1031 			    (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1032 		else
1033 			dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1034 			    (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1035 			    (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1036 			    (0x0F);
1037 
1038 		/*
1039 		 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1040 		 * for hardware bugs.
1041 		 */
1042 		if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1043 		    sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1044 			uint32_t tmp;
1045 
1046 			tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1047 			if (tmp == 0x6 || tmp == 0x7)
1048 				dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1049 		}
1050 	} else
1051 		/* Conventional PCI bus */
1052 		dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1053 		    (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1054 		    (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1055 		    (0x0F);
1056 
1057 	if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1058 	    sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
1059 	    sc->bge_asicrev == BGE_ASICREV_BCM5705)
1060 		dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1061 	pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1062 
1063 	/*
1064 	 * Set up general mode register.
1065 	 */
1066 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
1067 	    BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1068 	    BGE_MODECTL_TX_NO_PHDR_CSUM);
1069 
1070 	/*
1071 	 * Disable memory write invalidate.  Apparently it is not supported
1072 	 * properly by these devices.
1073 	 */
1074 	PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1075 
1076 #ifdef __brokenalpha__
1077 	/*
1078 	 * Must insure that we do not cross an 8K (bytes) boundary
1079 	 * for DMA reads.  Our highest limit is 1K bytes.  This is a
1080 	 * restriction on some ALPHA platforms with early revision
1081 	 * 21174 PCI chipsets, such as the AlphaPC 164lx
1082 	 */
1083 	PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL,
1084 	    BGE_PCI_READ_BNDRY_1024BYTES, 4);
1085 #endif
1086 
1087 	/* Set the timer prescaler (always 66Mhz) */
1088 	CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1089 
1090 	return (0);
1091 }
1092 
1093 static int
1094 bge_blockinit(struct bge_softc *sc)
1095 {
1096 	struct bge_rcb *rcb;
1097 	bus_size_t vrcb;
1098 	bge_hostaddr taddr;
1099 	int i;
1100 
1101 	/*
1102 	 * Initialize the memory window pointer register so that
1103 	 * we can access the first 32K of internal NIC RAM. This will
1104 	 * allow us to set up the TX send ring RCBs and the RX return
1105 	 * ring RCBs, plus other things which live in NIC memory.
1106 	 */
1107 	CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1108 
1109 	/* Note: the BCM5704 has a smaller mbuf space than other chips. */
1110 
1111 	if (!(BGE_IS_5705_OR_BEYOND(sc))) {
1112 		/* Configure mbuf memory pool */
1113 		if (sc->bge_extram) {
1114 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1115 			    BGE_EXT_SSRAM);
1116 			if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1117 				CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1118 			else
1119 				CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1120 		} else {
1121 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1122 			    BGE_BUFFPOOL_1);
1123 			if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1124 				CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1125 			else
1126 				CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1127 		}
1128 
1129 		/* Configure DMA resource pool */
1130 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1131 		    BGE_DMA_DESCRIPTORS);
1132 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1133 	}
1134 
1135 	/* Configure mbuf pool watermarks */
1136 	if (!(BGE_IS_5705_OR_BEYOND(sc))) {
1137 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1138 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1139 	} else {
1140 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1141 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1142 	}
1143 	CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1144 
1145 	/* Configure DMA resource watermarks */
1146 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1147 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1148 
1149 	/* Enable buffer manager */
1150 	if (!(BGE_IS_5705_OR_BEYOND(sc))) {
1151 		CSR_WRITE_4(sc, BGE_BMAN_MODE,
1152 		    BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1153 
1154 		/* Poll for buffer manager start indication */
1155 		for (i = 0; i < BGE_TIMEOUT; i++) {
1156 			if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1157 				break;
1158 			DELAY(10);
1159 		}
1160 
1161 		if (i == BGE_TIMEOUT) {
1162 			device_printf(sc->bge_dev,
1163 			    "buffer manager failed to start\n");
1164 			return (ENXIO);
1165 		}
1166 	}
1167 
1168 	/* Enable flow-through queues */
1169 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1170 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1171 
1172 	/* Wait until queue initialization is complete */
1173 	for (i = 0; i < BGE_TIMEOUT; i++) {
1174 		if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1175 			break;
1176 		DELAY(10);
1177 	}
1178 
1179 	if (i == BGE_TIMEOUT) {
1180 		device_printf(sc->bge_dev, "flow-through queue init failed\n");
1181 		return (ENXIO);
1182 	}
1183 
1184 	/* Initialize the standard RX ring control block */
1185 	rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1186 	rcb->bge_hostaddr.bge_addr_lo =
1187 	    BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1188 	rcb->bge_hostaddr.bge_addr_hi =
1189 	    BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1190 	bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1191 	    sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1192 	if (BGE_IS_5705_OR_BEYOND(sc))
1193 		rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1194 	else
1195 		rcb->bge_maxlen_flags =
1196 		    BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1197 	if (sc->bge_extram)
1198 		rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
1199 	else
1200 		rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1201 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1202 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1203 
1204 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1205 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1206 
1207 	/*
1208 	 * Initialize the jumbo RX ring control block
1209 	 * We set the 'ring disabled' bit in the flags
1210 	 * field until we're actually ready to start
1211 	 * using this ring (i.e. once we set the MTU
1212 	 * high enough to require it).
1213 	 */
1214 	if (BGE_IS_JUMBO_CAPABLE(sc)) {
1215 		rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1216 
1217 		rcb->bge_hostaddr.bge_addr_lo =
1218 		    BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1219 		rcb->bge_hostaddr.bge_addr_hi =
1220 		    BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1221 		bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1222 		    sc->bge_cdata.bge_rx_jumbo_ring_map,
1223 		    BUS_DMASYNC_PREREAD);
1224 		rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1225 		    BGE_RCB_FLAG_USE_EXT_RX_BD|BGE_RCB_FLAG_RING_DISABLED);
1226 		if (sc->bge_extram)
1227 			rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
1228 		else
1229 			rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1230 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1231 		    rcb->bge_hostaddr.bge_addr_hi);
1232 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1233 		    rcb->bge_hostaddr.bge_addr_lo);
1234 
1235 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1236 		    rcb->bge_maxlen_flags);
1237 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1238 
1239 		/* Set up dummy disabled mini ring RCB */
1240 		rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1241 		rcb->bge_maxlen_flags =
1242 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1243 		CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1244 		    rcb->bge_maxlen_flags);
1245 	}
1246 
1247 	/*
1248 	 * Set the BD ring replentish thresholds. The recommended
1249 	 * values are 1/8th the number of descriptors allocated to
1250 	 * each ring.
1251 	 */
1252 	CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
1253 	CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1254 
1255 	/*
1256 	 * Disable all unused send rings by setting the 'ring disabled'
1257 	 * bit in the flags field of all the TX send ring control blocks.
1258 	 * These are located in NIC memory.
1259 	 */
1260 	vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1261 	for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1262 		RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1263 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1264 		RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1265 		vrcb += sizeof(struct bge_rcb);
1266 	}
1267 
1268 	/* Configure TX RCB 0 (we use only the first ring) */
1269 	vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1270 	BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1271 	RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1272 	RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1273 	RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1274 	    BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1275 	if (!(BGE_IS_5705_OR_BEYOND(sc)))
1276 		RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1277 		    BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1278 
1279 	/* Disable all unused RX return rings */
1280 	vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1281 	for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1282 		RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1283 		RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1284 		RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1285 		    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1286 		    BGE_RCB_FLAG_RING_DISABLED));
1287 		RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1288 		CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
1289 		    (i * (sizeof(uint64_t))), 0);
1290 		vrcb += sizeof(struct bge_rcb);
1291 	}
1292 
1293 	/* Initialize RX ring indexes */
1294 	CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1295 	CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1296 	CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1297 
1298 	/*
1299 	 * Set up RX return ring 0
1300 	 * Note that the NIC address for RX return rings is 0x00000000.
1301 	 * The return rings live entirely within the host, so the
1302 	 * nicaddr field in the RCB isn't used.
1303 	 */
1304 	vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1305 	BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1306 	RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1307 	RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1308 	RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000);
1309 	RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1310 	    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1311 
1312 	/* Set random backoff seed for TX */
1313 	CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1314 	    IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
1315 	    IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
1316 	    IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] +
1317 	    BGE_TX_BACKOFF_SEED_MASK);
1318 
1319 	/* Set inter-packet gap */
1320 	CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1321 
1322 	/*
1323 	 * Specify which ring to use for packets that don't match
1324 	 * any RX rules.
1325 	 */
1326 	CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1327 
1328 	/*
1329 	 * Configure number of RX lists. One interrupt distribution
1330 	 * list, sixteen active lists, one bad frames class.
1331 	 */
1332 	CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1333 
1334 	/* Inialize RX list placement stats mask. */
1335 	CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1336 	CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1337 
1338 	/* Disable host coalescing until we get it set up */
1339 	CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1340 
1341 	/* Poll to make sure it's shut down. */
1342 	for (i = 0; i < BGE_TIMEOUT; i++) {
1343 		if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1344 			break;
1345 		DELAY(10);
1346 	}
1347 
1348 	if (i == BGE_TIMEOUT) {
1349 		device_printf(sc->bge_dev,
1350 		    "host coalescing engine failed to idle\n");
1351 		return (ENXIO);
1352 	}
1353 
1354 	/* Set up host coalescing defaults */
1355 	CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1356 	CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1357 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1358 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1359 	if (!(BGE_IS_5705_OR_BEYOND(sc))) {
1360 		CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1361 		CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1362 	}
1363 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1364 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1365 
1366 	/* Set up address of statistics block */
1367 	if (!(BGE_IS_5705_OR_BEYOND(sc))) {
1368 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1369 		    BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1370 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1371 		    BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1372 		CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1373 		CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1374 		CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1375 	}
1376 
1377 	/* Set up address of status block */
1378 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1379 	    BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1380 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1381 	    BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1382 	sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0;
1383 	sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0;
1384 
1385 	/* Turn on host coalescing state machine */
1386 	CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1387 
1388 	/* Turn on RX BD completion state machine and enable attentions */
1389 	CSR_WRITE_4(sc, BGE_RBDC_MODE,
1390 	    BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1391 
1392 	/* Turn on RX list placement state machine */
1393 	CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1394 
1395 	/* Turn on RX list selector state machine. */
1396 	if (!(BGE_IS_5705_OR_BEYOND(sc)))
1397 		CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1398 
1399 	/* Turn on DMA, clear stats */
1400 	CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1401 	    BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1402 	    BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1403 	    BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1404 	    (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1405 
1406 	/* Set misc. local control, enable interrupts on attentions */
1407 	CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1408 
1409 #ifdef notdef
1410 	/* Assert GPIO pins for PHY reset */
1411 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1412 	    BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1413 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1414 	    BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1415 #endif
1416 
1417 	/* Turn on DMA completion state machine */
1418 	if (!(BGE_IS_5705_OR_BEYOND(sc)))
1419 		CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1420 
1421 	/* Turn on write DMA state machine */
1422 	CSR_WRITE_4(sc, BGE_WDMA_MODE,
1423 	    BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
1424 
1425 	/* Turn on read DMA state machine */
1426 	CSR_WRITE_4(sc, BGE_RDMA_MODE,
1427 	    BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
1428 
1429 	/* Turn on RX data completion state machine */
1430 	CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1431 
1432 	/* Turn on RX BD initiator state machine */
1433 	CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1434 
1435 	/* Turn on RX data and RX BD initiator state machine */
1436 	CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1437 
1438 	/* Turn on Mbuf cluster free state machine */
1439 	if (!(BGE_IS_5705_OR_BEYOND(sc)))
1440 		CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1441 
1442 	/* Turn on send BD completion state machine */
1443 	CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1444 
1445 	/* Turn on send data completion state machine */
1446 	CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1447 
1448 	/* Turn on send data initiator state machine */
1449 	CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1450 
1451 	/* Turn on send BD initiator state machine */
1452 	CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1453 
1454 	/* Turn on send BD selector state machine */
1455 	CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1456 
1457 	CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1458 	CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1459 	    BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1460 
1461 	/* ack/clear link change events */
1462 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1463 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1464 	    BGE_MACSTAT_LINK_CHANGED);
1465 	CSR_WRITE_4(sc, BGE_MI_STS, 0);
1466 
1467 	/* Enable PHY auto polling (for MII/GMII only) */
1468 	if (sc->bge_tbi) {
1469 		CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1470 	} else {
1471 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1472 		if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1473 		    sc->bge_chipid != BGE_CHIPID_BCM5700_B2)
1474 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1475 			    BGE_EVTENB_MI_INTERRUPT);
1476 	}
1477 
1478 	/*
1479 	 * Clear any pending link state attention.
1480 	 * Otherwise some link state change events may be lost until attention
1481 	 * is cleared by bge_intr() -> bge_link_upd() sequence.
1482 	 * It's not necessary on newer BCM chips - perhaps enabling link
1483 	 * state change attentions implies clearing pending attention.
1484 	 */
1485 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1486 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1487 	    BGE_MACSTAT_LINK_CHANGED);
1488 
1489 	/* Enable link state change attentions. */
1490 	BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1491 
1492 	return (0);
1493 }
1494 
1495 const struct bge_revision *
1496 bge_lookup_rev(uint32_t chipid)
1497 {
1498 	const struct bge_revision *br;
1499 
1500 	for (br = bge_revisions; br->br_name != NULL; br++) {
1501 		if (br->br_chipid == chipid)
1502 			return (br);
1503 	}
1504 
1505 	for (br = bge_majorrevs; br->br_name != NULL; br++) {
1506 		if (br->br_chipid == BGE_ASICREV(chipid))
1507 			return (br);
1508 	}
1509 
1510 	return (NULL);
1511 }
1512 
1513 const struct bge_vendor *
1514 bge_lookup_vendor(uint16_t vid)
1515 {
1516 	const struct bge_vendor *v;
1517 
1518 	for (v = bge_vendors; v->v_name != NULL; v++)
1519 		if (v->v_id == vid)
1520 			return (v);
1521 
1522 	panic("%s: unknown vendor %d", __func__, vid);
1523 	return (NULL);
1524 }
1525 
1526 /*
1527  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1528  * against our list and return its name if we find a match.
1529  *
1530  * Note that since the Broadcom controller contains VPD support, we
1531  * can get the device name string from the controller itself instead
1532  * of the compiled-in string. This is a little slow, but it guarantees
1533  * we'll always announce the right product name. Unfortunately, this
1534  * is possible only later in bge_attach(), when we have established
1535  * access to EEPROM.
1536  */
1537 static int
1538 bge_probe(device_t dev)
1539 {
1540 	struct bge_type *t = bge_devs;
1541 	struct bge_softc *sc = device_get_softc(dev);
1542 
1543 	bzero(sc, sizeof(struct bge_softc));
1544 	sc->bge_dev = dev;
1545 
1546 	while(t->bge_vid != 0) {
1547 		if ((pci_get_vendor(dev) == t->bge_vid) &&
1548 		    (pci_get_device(dev) == t->bge_did)) {
1549 			char buf[64];
1550 			const struct bge_revision *br;
1551 			const struct bge_vendor *v;
1552 			uint32_t id;
1553 
1554 			id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
1555 			    BGE_PCIMISCCTL_ASICREV;
1556 			br = bge_lookup_rev(id);
1557 			id >>= 16;
1558 			v = bge_lookup_vendor(t->bge_vid);
1559 			if (br == NULL)
1560 				snprintf(buf, 64, "%s unknown ASIC (%#04x)",
1561 				    v->v_name, id);
1562 			else
1563 				snprintf(buf, 64, "%s %s, ASIC rev. %#04x",
1564 				    v->v_name, br->br_name, id);
1565 			device_set_desc_copy(dev, buf);
1566 			if (pci_get_subvendor(dev) == DELL_VENDORID)
1567 				sc->bge_no_3_led = 1;
1568 			return (0);
1569 		}
1570 		t++;
1571 	}
1572 
1573 	return (ENXIO);
1574 }
1575 
1576 static void
1577 bge_dma_free(struct bge_softc *sc)
1578 {
1579 	int i;
1580 
1581 	/* Destroy DMA maps for RX buffers. */
1582 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1583 		if (sc->bge_cdata.bge_rx_std_dmamap[i])
1584 			bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1585 			    sc->bge_cdata.bge_rx_std_dmamap[i]);
1586 	}
1587 
1588 	/* Destroy DMA maps for jumbo RX buffers. */
1589 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1590 		if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
1591 			bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
1592 			    sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1593 	}
1594 
1595 	/* Destroy DMA maps for TX buffers. */
1596 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
1597 		if (sc->bge_cdata.bge_tx_dmamap[i])
1598 			bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1599 			    sc->bge_cdata.bge_tx_dmamap[i]);
1600 	}
1601 
1602 	if (sc->bge_cdata.bge_mtag)
1603 		bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
1604 
1605 
1606 	/* Destroy standard RX ring. */
1607 	if (sc->bge_cdata.bge_rx_std_ring_map)
1608 		bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
1609 		    sc->bge_cdata.bge_rx_std_ring_map);
1610 	if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring)
1611 		bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
1612 		    sc->bge_ldata.bge_rx_std_ring,
1613 		    sc->bge_cdata.bge_rx_std_ring_map);
1614 
1615 	if (sc->bge_cdata.bge_rx_std_ring_tag)
1616 		bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
1617 
1618 	/* Destroy jumbo RX ring. */
1619 	if (sc->bge_cdata.bge_rx_jumbo_ring_map)
1620 		bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1621 		    sc->bge_cdata.bge_rx_jumbo_ring_map);
1622 
1623 	if (sc->bge_cdata.bge_rx_jumbo_ring_map &&
1624 	    sc->bge_ldata.bge_rx_jumbo_ring)
1625 		bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1626 		    sc->bge_ldata.bge_rx_jumbo_ring,
1627 		    sc->bge_cdata.bge_rx_jumbo_ring_map);
1628 
1629 	if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
1630 		bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
1631 
1632 	/* Destroy RX return ring. */
1633 	if (sc->bge_cdata.bge_rx_return_ring_map)
1634 		bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
1635 		    sc->bge_cdata.bge_rx_return_ring_map);
1636 
1637 	if (sc->bge_cdata.bge_rx_return_ring_map &&
1638 	    sc->bge_ldata.bge_rx_return_ring)
1639 		bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
1640 		    sc->bge_ldata.bge_rx_return_ring,
1641 		    sc->bge_cdata.bge_rx_return_ring_map);
1642 
1643 	if (sc->bge_cdata.bge_rx_return_ring_tag)
1644 		bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
1645 
1646 	/* Destroy TX ring. */
1647 	if (sc->bge_cdata.bge_tx_ring_map)
1648 		bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
1649 		    sc->bge_cdata.bge_tx_ring_map);
1650 
1651 	if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring)
1652 		bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
1653 		    sc->bge_ldata.bge_tx_ring,
1654 		    sc->bge_cdata.bge_tx_ring_map);
1655 
1656 	if (sc->bge_cdata.bge_tx_ring_tag)
1657 		bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
1658 
1659 	/* Destroy status block. */
1660 	if (sc->bge_cdata.bge_status_map)
1661 		bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
1662 		    sc->bge_cdata.bge_status_map);
1663 
1664 	if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block)
1665 		bus_dmamem_free(sc->bge_cdata.bge_status_tag,
1666 		    sc->bge_ldata.bge_status_block,
1667 		    sc->bge_cdata.bge_status_map);
1668 
1669 	if (sc->bge_cdata.bge_status_tag)
1670 		bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
1671 
1672 	/* Destroy statistics block. */
1673 	if (sc->bge_cdata.bge_stats_map)
1674 		bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
1675 		    sc->bge_cdata.bge_stats_map);
1676 
1677 	if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats)
1678 		bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
1679 		    sc->bge_ldata.bge_stats,
1680 		    sc->bge_cdata.bge_stats_map);
1681 
1682 	if (sc->bge_cdata.bge_stats_tag)
1683 		bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
1684 
1685 	/* Destroy the parent tag. */
1686 	if (sc->bge_cdata.bge_parent_tag)
1687 		bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
1688 }
1689 
1690 static int
1691 bge_dma_alloc(device_t dev)
1692 {
1693 	struct bge_dmamap_arg ctx;
1694 	struct bge_softc *sc;
1695 	int i, error;
1696 
1697 	sc = device_get_softc(dev);
1698 
1699 	/*
1700 	 * Allocate the parent bus DMA tag appropriate for PCI.
1701 	 */
1702 	error = bus_dma_tag_create(NULL,	/* parent */
1703 			PAGE_SIZE, 0,		/* alignment, boundary */
1704 			BUS_SPACE_MAXADDR,	/* lowaddr */
1705 			BUS_SPACE_MAXADDR,	/* highaddr */
1706 			NULL, NULL,		/* filter, filterarg */
1707 			MAXBSIZE, BGE_NSEG_NEW,	/* maxsize, nsegments */
1708 			BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
1709 			0,			/* flags */
1710 			NULL, NULL,		/* lockfunc, lockarg */
1711 			&sc->bge_cdata.bge_parent_tag);
1712 
1713 	if (error != 0) {
1714 		device_printf(sc->bge_dev,
1715 		    "could not allocate parent dma tag\n");
1716 		return (ENOMEM);
1717 	}
1718 
1719 	/*
1720 	 * Create tag for RX mbufs.
1721 	 */
1722 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1,
1723 	    0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1724 	    NULL, MCLBYTES * BGE_NSEG_NEW, BGE_NSEG_NEW, MCLBYTES,
1725 	    BUS_DMA_ALLOCNOW, NULL, NULL, &sc->bge_cdata.bge_mtag);
1726 
1727 	if (error) {
1728 		device_printf(sc->bge_dev, "could not allocate dma tag\n");
1729 		return (ENOMEM);
1730 	}
1731 
1732 	/* Create DMA maps for RX buffers. */
1733 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1734 		error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1735 			    &sc->bge_cdata.bge_rx_std_dmamap[i]);
1736 		if (error) {
1737 			device_printf(sc->bge_dev,
1738 			    "can't create DMA map for RX\n");
1739 			return (ENOMEM);
1740 		}
1741 	}
1742 
1743 	/* Create DMA maps for TX buffers. */
1744 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
1745 		error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1746 			    &sc->bge_cdata.bge_tx_dmamap[i]);
1747 		if (error) {
1748 			device_printf(sc->bge_dev,
1749 			    "can't create DMA map for RX\n");
1750 			return (ENOMEM);
1751 		}
1752 	}
1753 
1754 	/* Create tag for standard RX ring. */
1755 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1756 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1757 	    NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0,
1758 	    NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag);
1759 
1760 	if (error) {
1761 		device_printf(sc->bge_dev, "could not allocate dma tag\n");
1762 		return (ENOMEM);
1763 	}
1764 
1765 	/* Allocate DMA'able memory for standard RX ring. */
1766 	error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag,
1767 	    (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT,
1768 	    &sc->bge_cdata.bge_rx_std_ring_map);
1769 	if (error)
1770 		return (ENOMEM);
1771 
1772 	bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1773 
1774 	/* Load the address of the standard RX ring. */
1775 	ctx.bge_maxsegs = 1;
1776 	ctx.sc = sc;
1777 
1778 	error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag,
1779 	    sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring,
1780 	    BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1781 
1782 	if (error)
1783 		return (ENOMEM);
1784 
1785 	sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr;
1786 
1787 	/* Create tags for jumbo mbufs. */
1788 	if (BGE_IS_JUMBO_CAPABLE(sc)) {
1789 		error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1790 		    1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1791 		    NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
1792 		    0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
1793 		if (error) {
1794 			device_printf(sc->bge_dev,
1795 			    "could not allocate jumbo dma tag\n");
1796 			return (ENOMEM);
1797 		}
1798 
1799 		/* Create tag for jumbo RX ring. */
1800 		error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1801 		    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1802 		    NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0,
1803 		    NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag);
1804 
1805 		if (error) {
1806 			device_printf(sc->bge_dev,
1807 			    "could not allocate jumbo ring dma tag\n");
1808 			return (ENOMEM);
1809 		}
1810 
1811 		/* Allocate DMA'able memory for jumbo RX ring. */
1812 		error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1813 		    (void **)&sc->bge_ldata.bge_rx_jumbo_ring,
1814 		    BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1815 		    &sc->bge_cdata.bge_rx_jumbo_ring_map);
1816 		if (error)
1817 			return (ENOMEM);
1818 
1819 		/* Load the address of the jumbo RX ring. */
1820 		ctx.bge_maxsegs = 1;
1821 		ctx.sc = sc;
1822 
1823 		error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1824 		    sc->bge_cdata.bge_rx_jumbo_ring_map,
1825 		    sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ,
1826 		    bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1827 
1828 		if (error)
1829 			return (ENOMEM);
1830 
1831 		sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr;
1832 
1833 		/* Create DMA maps for jumbo RX buffers. */
1834 		for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1835 			error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
1836 				    0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1837 			if (error) {
1838 				device_printf(sc->bge_dev,
1839 				    "can't create DMA map for jumbo RX\n");
1840 				return (ENOMEM);
1841 			}
1842 		}
1843 
1844 	}
1845 
1846 	/* Create tag for RX return ring. */
1847 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1848 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1849 	    NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0,
1850 	    NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag);
1851 
1852 	if (error) {
1853 		device_printf(sc->bge_dev, "could not allocate dma tag\n");
1854 		return (ENOMEM);
1855 	}
1856 
1857 	/* Allocate DMA'able memory for RX return ring. */
1858 	error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag,
1859 	    (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT,
1860 	    &sc->bge_cdata.bge_rx_return_ring_map);
1861 	if (error)
1862 		return (ENOMEM);
1863 
1864 	bzero((char *)sc->bge_ldata.bge_rx_return_ring,
1865 	    BGE_RX_RTN_RING_SZ(sc));
1866 
1867 	/* Load the address of the RX return ring. */
1868 	ctx.bge_maxsegs = 1;
1869 	ctx.sc = sc;
1870 
1871 	error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag,
1872 	    sc->bge_cdata.bge_rx_return_ring_map,
1873 	    sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc),
1874 	    bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1875 
1876 	if (error)
1877 		return (ENOMEM);
1878 
1879 	sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr;
1880 
1881 	/* Create tag for TX ring. */
1882 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1883 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1884 	    NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL,
1885 	    &sc->bge_cdata.bge_tx_ring_tag);
1886 
1887 	if (error) {
1888 		device_printf(sc->bge_dev, "could not allocate dma tag\n");
1889 		return (ENOMEM);
1890 	}
1891 
1892 	/* Allocate DMA'able memory for TX ring. */
1893 	error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag,
1894 	    (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT,
1895 	    &sc->bge_cdata.bge_tx_ring_map);
1896 	if (error)
1897 		return (ENOMEM);
1898 
1899 	bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
1900 
1901 	/* Load the address of the TX ring. */
1902 	ctx.bge_maxsegs = 1;
1903 	ctx.sc = sc;
1904 
1905 	error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag,
1906 	    sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring,
1907 	    BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1908 
1909 	if (error)
1910 		return (ENOMEM);
1911 
1912 	sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr;
1913 
1914 	/* Create tag for status block. */
1915 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1916 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1917 	    NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0,
1918 	    NULL, NULL, &sc->bge_cdata.bge_status_tag);
1919 
1920 	if (error) {
1921 		device_printf(sc->bge_dev, "could not allocate dma tag\n");
1922 		return (ENOMEM);
1923 	}
1924 
1925 	/* Allocate DMA'able memory for status block. */
1926 	error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag,
1927 	    (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT,
1928 	    &sc->bge_cdata.bge_status_map);
1929 	if (error)
1930 		return (ENOMEM);
1931 
1932 	bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
1933 
1934 	/* Load the address of the status block. */
1935 	ctx.sc = sc;
1936 	ctx.bge_maxsegs = 1;
1937 
1938 	error = bus_dmamap_load(sc->bge_cdata.bge_status_tag,
1939 	    sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block,
1940 	    BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1941 
1942 	if (error)
1943 		return (ENOMEM);
1944 
1945 	sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr;
1946 
1947 	/* Create tag for statistics block. */
1948 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1949 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1950 	    NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL,
1951 	    &sc->bge_cdata.bge_stats_tag);
1952 
1953 	if (error) {
1954 		device_printf(sc->bge_dev, "could not allocate dma tag\n");
1955 		return (ENOMEM);
1956 	}
1957 
1958 	/* Allocate DMA'able memory for statistics block. */
1959 	error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag,
1960 	    (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT,
1961 	    &sc->bge_cdata.bge_stats_map);
1962 	if (error)
1963 		return (ENOMEM);
1964 
1965 	bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ);
1966 
1967 	/* Load the address of the statstics block. */
1968 	ctx.sc = sc;
1969 	ctx.bge_maxsegs = 1;
1970 
1971 	error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag,
1972 	    sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats,
1973 	    BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1974 
1975 	if (error)
1976 		return (ENOMEM);
1977 
1978 	sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr;
1979 
1980 	return (0);
1981 }
1982 
1983 static int
1984 bge_attach(device_t dev)
1985 {
1986 	struct ifnet *ifp;
1987 	struct bge_softc *sc;
1988 	uint32_t hwcfg = 0;
1989 	uint32_t mac_tmp = 0;
1990 	u_char eaddr[6];
1991 	int error = 0, rid;
1992 
1993 	sc = device_get_softc(dev);
1994 	sc->bge_dev = dev;
1995 
1996 	/*
1997 	 * Map control/status registers.
1998 	 */
1999 	pci_enable_busmaster(dev);
2000 
2001 	rid = BGE_PCI_BAR0;
2002 	sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2003 	    RF_ACTIVE|PCI_RF_DENSE);
2004 
2005 	if (sc->bge_res == NULL) {
2006 		device_printf (sc->bge_dev, "couldn't map memory\n");
2007 		error = ENXIO;
2008 		goto fail;
2009 	}
2010 
2011 	sc->bge_btag = rman_get_bustag(sc->bge_res);
2012 	sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
2013 
2014 	/* Allocate interrupt. */
2015 	rid = 0;
2016 
2017 	sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2018 	    RF_SHAREABLE | RF_ACTIVE);
2019 
2020 	if (sc->bge_irq == NULL) {
2021 		device_printf(sc->bge_dev, "couldn't map interrupt\n");
2022 		error = ENXIO;
2023 		goto fail;
2024 	}
2025 
2026 	BGE_LOCK_INIT(sc, device_get_nameunit(dev));
2027 
2028 	/* Save ASIC rev. */
2029 
2030 	sc->bge_chipid =
2031 	    pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
2032 	    BGE_PCIMISCCTL_ASICREV;
2033 	sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2034 	sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2035 
2036 	/*
2037 	 * XXX: Broadcom Linux driver.  Not in specs or eratta.
2038 	 * PCI-Express?
2039 	 */
2040 	if (BGE_IS_5705_OR_BEYOND(sc)) {
2041 		uint32_t v;
2042 
2043 		v = pci_read_config(dev, BGE_PCI_MSI_CAPID, 4);
2044 		if (((v >> 8) & 0xff) == BGE_PCIE_CAPID_REG) {
2045 			v = pci_read_config(dev, BGE_PCIE_CAPID_REG, 4);
2046 			if ((v & 0xff) == BGE_PCIE_CAPID)
2047 				sc->bge_pcie = 1;
2048 		}
2049 	}
2050 
2051 	/*
2052 	 * PCI-X ?
2053 	 */
2054 	if ((pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
2055 	    BGE_PCISTATE_PCI_BUSMODE) == 0)
2056 		sc->bge_pcix = 1;
2057 
2058 	/* Try to reset the chip. */
2059 	bge_reset(sc);
2060 
2061 	if (bge_chipinit(sc)) {
2062 		device_printf(sc->bge_dev, "chip initialization failed\n");
2063 		bge_release_resources(sc);
2064 		error = ENXIO;
2065 		goto fail;
2066 	}
2067 
2068 	/*
2069 	 * Get station address from the EEPROM.
2070 	 */
2071 	mac_tmp = bge_readmem_ind(sc, 0x0c14);
2072 	if ((mac_tmp >> 16) == 0x484b) {
2073 		eaddr[0] = (u_char)(mac_tmp >> 8);
2074 		eaddr[1] = (u_char)mac_tmp;
2075 		mac_tmp = bge_readmem_ind(sc, 0x0c18);
2076 		eaddr[2] = (u_char)(mac_tmp >> 24);
2077 		eaddr[3] = (u_char)(mac_tmp >> 16);
2078 		eaddr[4] = (u_char)(mac_tmp >> 8);
2079 		eaddr[5] = (u_char)mac_tmp;
2080 	} else if (bge_read_eeprom(sc, eaddr,
2081 	    BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
2082 		device_printf(sc->bge_dev, "failed to read station address\n");
2083 		bge_release_resources(sc);
2084 		error = ENXIO;
2085 		goto fail;
2086 	}
2087 
2088 	/* 5705 limits RX return ring to 512 entries. */
2089 	if (BGE_IS_5705_OR_BEYOND(sc))
2090 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2091 	else
2092 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2093 
2094 	if (bge_dma_alloc(dev)) {
2095 		device_printf(sc->bge_dev,
2096 		    "failed to allocate DMA resources\n");
2097 		bge_release_resources(sc);
2098 		error = ENXIO;
2099 		goto fail;
2100 	}
2101 
2102 	/* Set default tuneable values. */
2103 	sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2104 	sc->bge_rx_coal_ticks = 150;
2105 	sc->bge_tx_coal_ticks = 150;
2106 	sc->bge_rx_max_coal_bds = 64;
2107 	sc->bge_tx_max_coal_bds = 128;
2108 
2109 	/* Set up ifnet structure */
2110 	ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
2111 	if (ifp == NULL) {
2112 		device_printf(sc->bge_dev, "failed to if_alloc()\n");
2113 		bge_release_resources(sc);
2114 		error = ENXIO;
2115 		goto fail;
2116 	}
2117 	ifp->if_softc = sc;
2118 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2119 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2120 	ifp->if_ioctl = bge_ioctl;
2121 	ifp->if_start = bge_start;
2122 	ifp->if_watchdog = bge_watchdog;
2123 	ifp->if_init = bge_init;
2124 	ifp->if_mtu = ETHERMTU;
2125 	ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
2126 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
2127 	IFQ_SET_READY(&ifp->if_snd);
2128 	ifp->if_hwassist = BGE_CSUM_FEATURES;
2129 	ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
2130 	    IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM;
2131 	ifp->if_capenable = ifp->if_capabilities;
2132 #ifdef DEVICE_POLLING
2133 	ifp->if_capabilities |= IFCAP_POLLING;
2134 #endif
2135 
2136 	/*
2137 	 * 5700 B0 chips do not support checksumming correctly due
2138 	 * to hardware bugs.
2139 	 */
2140 	if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
2141 		ifp->if_capabilities &= ~IFCAP_HWCSUM;
2142 		ifp->if_capenable &= IFCAP_HWCSUM;
2143 		ifp->if_hwassist = 0;
2144 	}
2145 
2146 	/*
2147 	 * Figure out what sort of media we have by checking the
2148 	 * hardware config word in the first 32k of NIC internal memory,
2149 	 * or fall back to examining the EEPROM if necessary.
2150 	 * Note: on some BCM5700 cards, this value appears to be unset.
2151 	 * If that's the case, we have to rely on identifying the NIC
2152 	 * by its PCI subsystem ID, as we do below for the SysKonnect
2153 	 * SK-9D41.
2154 	 */
2155 	if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
2156 		hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2157 	else {
2158 		if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
2159 		    sizeof(hwcfg))) {
2160 			device_printf(sc->bge_dev, "failed to read EEPROM\n");
2161 			bge_release_resources(sc);
2162 			error = ENXIO;
2163 			goto fail;
2164 		}
2165 		hwcfg = ntohl(hwcfg);
2166 	}
2167 
2168 	if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
2169 		sc->bge_tbi = 1;
2170 
2171 	/* The SysKonnect SK-9D41 is a 1000baseSX card. */
2172 	if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41)
2173 		sc->bge_tbi = 1;
2174 
2175 	if (sc->bge_tbi) {
2176 		ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
2177 		    bge_ifmedia_upd, bge_ifmedia_sts);
2178 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2179 		ifmedia_add(&sc->bge_ifmedia,
2180 		    IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
2181 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2182 		ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
2183 		sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
2184 	} else {
2185 		/*
2186 		 * Do transceiver setup.
2187 		 */
2188 		if (mii_phy_probe(dev, &sc->bge_miibus,
2189 		    bge_ifmedia_upd, bge_ifmedia_sts)) {
2190 			device_printf(sc->bge_dev, "MII without any PHY!\n");
2191 			bge_release_resources(sc);
2192 			error = ENXIO;
2193 			goto fail;
2194 		}
2195 	}
2196 
2197 	/*
2198 	 * When using the BCM5701 in PCI-X mode, data corruption has
2199 	 * been observed in the first few bytes of some received packets.
2200 	 * Aligning the packet buffer in memory eliminates the corruption.
2201 	 * Unfortunately, this misaligns the packet payloads.  On platforms
2202 	 * which do not support unaligned accesses, we will realign the
2203 	 * payloads by copying the received packets.
2204 	 */
2205 	if (sc->bge_asicrev == BGE_ASICREV_BCM5701 && sc->bge_pcix)
2206                 sc->bge_rx_alignment_bug = 1;
2207 
2208 	/*
2209 	 * Call MI attach routine.
2210 	 */
2211 	ether_ifattach(ifp, eaddr);
2212 	callout_init(&sc->bge_stat_ch, CALLOUT_MPSAFE);
2213 
2214 	/*
2215 	 * Hookup IRQ last.
2216 	 */
2217 	error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
2218 	   bge_intr, sc, &sc->bge_intrhand);
2219 
2220 	if (error) {
2221 		bge_detach(dev);
2222 		device_printf(sc->bge_dev, "couldn't set up irq\n");
2223 	}
2224 
2225 fail:
2226 	return (error);
2227 }
2228 
2229 static int
2230 bge_detach(device_t dev)
2231 {
2232 	struct bge_softc *sc;
2233 	struct ifnet *ifp;
2234 
2235 	sc = device_get_softc(dev);
2236 	ifp = sc->bge_ifp;
2237 
2238 #ifdef DEVICE_POLLING
2239 	if (ifp->if_capenable & IFCAP_POLLING)
2240 		ether_poll_deregister(ifp);
2241 #endif
2242 
2243 	BGE_LOCK(sc);
2244 	bge_stop(sc);
2245 	bge_reset(sc);
2246 	BGE_UNLOCK(sc);
2247 
2248 	ether_ifdetach(ifp);
2249 
2250 	if (sc->bge_tbi) {
2251 		ifmedia_removeall(&sc->bge_ifmedia);
2252 	} else {
2253 		bus_generic_detach(dev);
2254 		device_delete_child(dev, sc->bge_miibus);
2255 	}
2256 
2257 	bge_release_resources(sc);
2258 
2259 	return (0);
2260 }
2261 
2262 static void
2263 bge_release_resources(struct bge_softc *sc)
2264 {
2265 	device_t dev;
2266 
2267 	dev = sc->bge_dev;
2268 
2269 	if (sc->bge_vpd_prodname != NULL)
2270 		free(sc->bge_vpd_prodname, M_DEVBUF);
2271 
2272 	if (sc->bge_vpd_readonly != NULL)
2273 		free(sc->bge_vpd_readonly, M_DEVBUF);
2274 
2275 	if (sc->bge_intrhand != NULL)
2276 		bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
2277 
2278 	if (sc->bge_irq != NULL)
2279 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
2280 
2281 	if (sc->bge_res != NULL)
2282 		bus_release_resource(dev, SYS_RES_MEMORY,
2283 		    BGE_PCI_BAR0, sc->bge_res);
2284 
2285 	if (sc->bge_ifp != NULL)
2286 		if_free(sc->bge_ifp);
2287 
2288 	bge_dma_free(sc);
2289 
2290 	if (mtx_initialized(&sc->bge_mtx))	/* XXX */
2291 		BGE_LOCK_DESTROY(sc);
2292 }
2293 
2294 static void
2295 bge_reset(struct bge_softc *sc)
2296 {
2297 	device_t dev;
2298 	uint32_t cachesize, command, pcistate, reset;
2299 	int i, val = 0;
2300 
2301 	dev = sc->bge_dev;
2302 
2303 	/* Save some important PCI state. */
2304 	cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2305 	command = pci_read_config(dev, BGE_PCI_CMD, 4);
2306 	pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2307 
2308 	pci_write_config(dev, BGE_PCI_MISC_CTL,
2309 	    BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2310 	BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2311 
2312 	reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2313 
2314 	/* XXX: Broadcom Linux driver. */
2315 	if (sc->bge_pcie) {
2316 		if (CSR_READ_4(sc, 0x7e2c) == 0x60)	/* PCIE 1.0 */
2317 			CSR_WRITE_4(sc, 0x7e2c, 0x20);
2318 		if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2319 			/* Prevent PCIE link training during global reset */
2320 			CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2321 			reset |= (1<<29);
2322 		}
2323 	}
2324 
2325 	/* Issue global reset */
2326 	bge_writereg_ind(sc, BGE_MISC_CFG, reset);
2327 
2328 	DELAY(1000);
2329 
2330 	/* XXX: Broadcom Linux driver. */
2331 	if (sc->bge_pcie) {
2332 		if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
2333 			uint32_t v;
2334 
2335 			DELAY(500000); /* wait for link training to complete */
2336 			v = pci_read_config(dev, 0xc4, 4);
2337 			pci_write_config(dev, 0xc4, v | (1<<15), 4);
2338 		}
2339 		/* Set PCIE max payload size and clear error status. */
2340 		pci_write_config(dev, 0xd8, 0xf5000, 4);
2341 	}
2342 
2343 	/* Reset some of the PCI state that got zapped by reset. */
2344 	pci_write_config(dev, BGE_PCI_MISC_CTL,
2345 	    BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2346 	    BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2347 	pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2348 	pci_write_config(dev, BGE_PCI_CMD, command, 4);
2349 	bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
2350 
2351 	/* Enable memory arbiter. */
2352 	if (BGE_IS_5714_FAMILY(sc)) {
2353 		uint32_t val;
2354 
2355 		val = CSR_READ_4(sc, BGE_MARB_MODE);
2356 		CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
2357 	} else
2358 		CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2359 
2360 	/*
2361 	 * Prevent PXE restart: write a magic number to the
2362 	 * general communications memory at 0xB50.
2363 	 */
2364 	bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2365 	/*
2366 	 * Poll the value location we just wrote until
2367 	 * we see the 1's complement of the magic number.
2368 	 * This indicates that the firmware initialization
2369 	 * is complete.
2370 	 */
2371 	for (i = 0; i < BGE_TIMEOUT; i++) {
2372 		val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2373 		if (val == ~BGE_MAGIC_NUMBER)
2374 			break;
2375 		DELAY(10);
2376 	}
2377 
2378 	if (i == BGE_TIMEOUT) {
2379 		device_printf(sc->bge_dev, "firmware handshake timed out\n");
2380 		return;
2381 	}
2382 
2383 	/*
2384 	 * XXX Wait for the value of the PCISTATE register to
2385 	 * return to its original pre-reset state. This is a
2386 	 * fairly good indicator of reset completion. If we don't
2387 	 * wait for the reset to fully complete, trying to read
2388 	 * from the device's non-PCI registers may yield garbage
2389 	 * results.
2390 	 */
2391 	for (i = 0; i < BGE_TIMEOUT; i++) {
2392 		if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
2393 			break;
2394 		DELAY(10);
2395 	}
2396 
2397 	/* Fix up byte swapping. */
2398 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
2399 	    BGE_MODECTL_BYTESWAP_DATA);
2400 
2401 	CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2402 
2403 	/*
2404 	 * The 5704 in TBI mode apparently needs some special
2405 	 * adjustment to insure the SERDES drive level is set
2406 	 * to 1.2V.
2407 	 */
2408 	if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && sc->bge_tbi) {
2409 		uint32_t serdescfg;
2410 		serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2411 		serdescfg = (serdescfg & ~0xFFF) | 0x880;
2412 		CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2413 	}
2414 
2415 	/* XXX: Broadcom Linux driver. */
2416 	if (sc->bge_pcie && sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2417 		uint32_t v;
2418 
2419 		v = CSR_READ_4(sc, 0x7c00);
2420 		CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
2421 	}
2422 	DELAY(10000);
2423 }
2424 
2425 /*
2426  * Frame reception handling. This is called if there's a frame
2427  * on the receive return list.
2428  *
2429  * Note: we have to be able to handle two possibilities here:
2430  * 1) the frame is from the jumbo receive ring
2431  * 2) the frame is from the standard receive ring
2432  */
2433 
2434 static void
2435 bge_rxeof(struct bge_softc *sc)
2436 {
2437 	struct ifnet *ifp;
2438 	int stdcnt = 0, jumbocnt = 0;
2439 
2440 	BGE_LOCK_ASSERT(sc);
2441 
2442 	/* Nothing to do. */
2443 	if (sc->bge_rx_saved_considx ==
2444 	    sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx)
2445 		return;
2446 
2447 	ifp = sc->bge_ifp;
2448 
2449 	bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
2450 	    sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
2451 	bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2452 	    sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD);
2453 	if (BGE_IS_JUMBO_CAPABLE(sc))
2454 		bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2455 		    sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTREAD);
2456 
2457 	while(sc->bge_rx_saved_considx !=
2458 	    sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) {
2459 		struct bge_rx_bd	*cur_rx;
2460 		uint32_t		rxidx;
2461 		struct mbuf		*m = NULL;
2462 		uint16_t		vlan_tag = 0;
2463 		int			have_tag = 0;
2464 
2465 #ifdef DEVICE_POLLING
2466 		if (ifp->if_capenable & IFCAP_POLLING) {
2467 			if (sc->rxcycles <= 0)
2468 				break;
2469 			sc->rxcycles--;
2470 		}
2471 #endif
2472 
2473 		cur_rx =
2474 	    &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
2475 
2476 		rxidx = cur_rx->bge_idx;
2477 		BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
2478 
2479 		if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2480 			have_tag = 1;
2481 			vlan_tag = cur_rx->bge_vlan_tag;
2482 		}
2483 
2484 		if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2485 			BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2486 			bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
2487 			    sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx],
2488 			    BUS_DMASYNC_POSTREAD);
2489 			bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
2490 			    sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx]);
2491 			m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
2492 			sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
2493 			jumbocnt++;
2494 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2495 				ifp->if_ierrors++;
2496 				bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2497 				continue;
2498 			}
2499 			if (bge_newbuf_jumbo(sc,
2500 			    sc->bge_jumbo, NULL) == ENOBUFS) {
2501 				ifp->if_ierrors++;
2502 				bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2503 				continue;
2504 			}
2505 		} else {
2506 			BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2507 			bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2508 			    sc->bge_cdata.bge_rx_std_dmamap[rxidx],
2509 			    BUS_DMASYNC_POSTREAD);
2510 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2511 			    sc->bge_cdata.bge_rx_std_dmamap[rxidx]);
2512 			m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2513 			sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2514 			stdcnt++;
2515 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2516 				ifp->if_ierrors++;
2517 				bge_newbuf_std(sc, sc->bge_std, m);
2518 				continue;
2519 			}
2520 			if (bge_newbuf_std(sc, sc->bge_std,
2521 			    NULL) == ENOBUFS) {
2522 				ifp->if_ierrors++;
2523 				bge_newbuf_std(sc, sc->bge_std, m);
2524 				continue;
2525 			}
2526 		}
2527 
2528 		ifp->if_ipackets++;
2529 #ifndef __NO_STRICT_ALIGNMENT
2530 		/*
2531 		 * For architectures with strict alignment we must make sure
2532 		 * the payload is aligned.
2533 		 */
2534 		if (sc->bge_rx_alignment_bug) {
2535 			bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2536 			    cur_rx->bge_len);
2537 			m->m_data += ETHER_ALIGN;
2538 		}
2539 #endif
2540 		m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2541 		m->m_pkthdr.rcvif = ifp;
2542 
2543 		if (ifp->if_capenable & IFCAP_RXCSUM) {
2544 			if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
2545 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2546 				if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2547 					m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2548 			}
2549 			if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
2550 			    m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
2551 				m->m_pkthdr.csum_data =
2552 				    cur_rx->bge_tcp_udp_csum;
2553 				m->m_pkthdr.csum_flags |=
2554 				    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2555 			}
2556 		}
2557 
2558 		/*
2559 		 * If we received a packet with a vlan tag,
2560 		 * attach that information to the packet.
2561 		 */
2562 		if (have_tag) {
2563 			VLAN_INPUT_TAG(ifp, m, vlan_tag);
2564 			if (m == NULL)
2565 				continue;
2566 		}
2567 
2568 		BGE_UNLOCK(sc);
2569 		(*ifp->if_input)(ifp, m);
2570 		BGE_LOCK(sc);
2571 	}
2572 
2573 	if (stdcnt > 0)
2574 		bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2575 		    sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
2576 
2577 	if (BGE_IS_JUMBO_CAPABLE(sc) && jumbocnt > 0)
2578 		bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2579 		    sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
2580 
2581 	CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2582 	if (stdcnt)
2583 		CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2584 	if (jumbocnt)
2585 		CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2586 }
2587 
2588 static void
2589 bge_txeof(struct bge_softc *sc)
2590 {
2591 	struct bge_tx_bd *cur_tx = NULL;
2592 	struct ifnet *ifp;
2593 
2594 	BGE_LOCK_ASSERT(sc);
2595 
2596 	/* Nothing to do. */
2597 	if (sc->bge_tx_saved_considx ==
2598 	    sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx)
2599 		return;
2600 
2601 	ifp = sc->bge_ifp;
2602 
2603 	bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
2604 	    sc->bge_cdata.bge_tx_ring_map,
2605 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2606 	/*
2607 	 * Go through our tx ring and free mbufs for those
2608 	 * frames that have been sent.
2609 	 */
2610 	while (sc->bge_tx_saved_considx !=
2611 	    sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) {
2612 		uint32_t		idx = 0;
2613 
2614 		idx = sc->bge_tx_saved_considx;
2615 		cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
2616 		if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2617 			ifp->if_opackets++;
2618 		if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
2619 			bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2620 			    sc->bge_cdata.bge_tx_dmamap[idx],
2621 			    BUS_DMASYNC_POSTWRITE);
2622 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2623 			    sc->bge_cdata.bge_tx_dmamap[idx]);
2624 			m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2625 			sc->bge_cdata.bge_tx_chain[idx] = NULL;
2626 		}
2627 		sc->bge_txcnt--;
2628 		BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2629 		ifp->if_timer = 0;
2630 	}
2631 
2632 	if (cur_tx != NULL)
2633 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2634 }
2635 
2636 #ifdef DEVICE_POLLING
2637 static void
2638 bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2639 {
2640 	struct bge_softc *sc = ifp->if_softc;
2641 	uint32_t statusword;
2642 
2643 	BGE_LOCK(sc);
2644 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2645 		BGE_UNLOCK(sc);
2646 		return;
2647 	}
2648 
2649 	bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2650 	    sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD);
2651 
2652 	statusword = atomic_readandclear_32(
2653 	    &sc->bge_ldata.bge_status_block->bge_status);
2654 
2655 	bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2656 	    sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD);
2657 
2658 	/* Note link event. It will be processed by POLL_AND_CHECK_STATUS cmd */
2659 	if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
2660 		sc->bge_link_evt++;
2661 
2662 	if (cmd == POLL_AND_CHECK_STATUS)
2663 		if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2664 		    sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
2665 		    sc->bge_link_evt || sc->bge_tbi)
2666 			bge_link_upd(sc);
2667 
2668 	sc->rxcycles = count;
2669 	bge_rxeof(sc);
2670 	bge_txeof(sc);
2671 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2672 		bge_start_locked(ifp);
2673 
2674 	BGE_UNLOCK(sc);
2675 }
2676 #endif /* DEVICE_POLLING */
2677 
2678 static void
2679 bge_intr(void *xsc)
2680 {
2681 	struct bge_softc *sc;
2682 	struct ifnet *ifp;
2683 	uint32_t statusword;
2684 
2685 	sc = xsc;
2686 
2687 	BGE_LOCK(sc);
2688 
2689 	ifp = sc->bge_ifp;
2690 
2691 #ifdef DEVICE_POLLING
2692 	if (ifp->if_capenable & IFCAP_POLLING) {
2693 		BGE_UNLOCK(sc);
2694 		return;
2695 	}
2696 #endif
2697 
2698 	/*
2699 	 * Do the mandatory PCI flush as well as get the link status.
2700 	 */
2701 	statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED;
2702 
2703 	/* Ack interrupt and stop others from occuring. */
2704 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2705 
2706 	/* Make sure the descriptor ring indexes are coherent. */
2707 	bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2708 	    sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD);
2709 	bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2710 	    sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD);
2711 
2712 	if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2713 	    sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
2714 	    statusword || sc->bge_link_evt)
2715 		bge_link_upd(sc);
2716 
2717 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2718 		/* Check RX return ring producer/consumer. */
2719 		bge_rxeof(sc);
2720 
2721 		/* Check TX ring producer/consumer. */
2722 		bge_txeof(sc);
2723 	}
2724 
2725 	/* Re-enable interrupts. */
2726 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2727 
2728 	if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2729 	    !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2730 		bge_start_locked(ifp);
2731 
2732 	BGE_UNLOCK(sc);
2733 }
2734 
2735 static void
2736 bge_tick_locked(struct bge_softc *sc)
2737 {
2738 	struct mii_data *mii = NULL;
2739 
2740 	BGE_LOCK_ASSERT(sc);
2741 
2742 	if (BGE_IS_5705_OR_BEYOND(sc))
2743 		bge_stats_update_regs(sc);
2744 	else
2745 		bge_stats_update(sc);
2746 
2747 	if (!sc->bge_tbi) {
2748 		mii = device_get_softc(sc->bge_miibus);
2749 		mii_tick(mii);
2750 	} else {
2751 		/*
2752 		 * Since in TBI mode auto-polling can't be used we should poll
2753 		 * link status manually. Here we register pending link event
2754 		 * and trigger interrupt.
2755 		 */
2756 #ifdef DEVICE_POLLING
2757 		/* In polling mode we poll link state in bge_poll(). */
2758 		if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING))
2759 #endif
2760 		{
2761 		sc->bge_link_evt++;
2762 		BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
2763 		}
2764 	}
2765 
2766 	callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
2767 }
2768 
2769 static void
2770 bge_tick(void *xsc)
2771 {
2772 	struct bge_softc *sc;
2773 
2774 	sc = xsc;
2775 
2776 	BGE_LOCK(sc);
2777 	bge_tick_locked(sc);
2778 	BGE_UNLOCK(sc);
2779 }
2780 
2781 static void
2782 bge_stats_update_regs(struct bge_softc *sc)
2783 {
2784 	struct bge_mac_stats_regs stats;
2785 	struct ifnet *ifp;
2786 	uint32_t *s;
2787 	u_long cnt;	/* current register value */
2788 	int i;
2789 
2790 	ifp = sc->bge_ifp;
2791 
2792 	s = (uint32_t *)&stats;
2793 	for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
2794 		*s = CSR_READ_4(sc, BGE_RX_STATS + i);
2795 		s++;
2796 	}
2797 
2798 	cnt = stats.dot3StatsSingleCollisionFrames +
2799 	    stats.dot3StatsMultipleCollisionFrames +
2800 	    stats.dot3StatsExcessiveCollisions +
2801 	    stats.dot3StatsLateCollisions;
2802 	ifp->if_collisions += cnt >= sc->bge_tx_collisions ?
2803 	    cnt - sc->bge_tx_collisions : cnt;
2804 	sc->bge_tx_collisions = cnt;
2805 }
2806 
2807 static void
2808 bge_stats_update(struct bge_softc *sc)
2809 {
2810 	struct ifnet *ifp;
2811 	bus_size_t stats;
2812 	u_long cnt;	/* current register value */
2813 
2814 	ifp = sc->bge_ifp;
2815 
2816 	stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
2817 
2818 #define READ_STAT(sc, stats, stat) \
2819 	CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
2820 
2821 	cnt = READ_STAT(sc, stats,
2822 	    txstats.dot3StatsSingleCollisionFrames.bge_addr_lo);
2823 	cnt += READ_STAT(sc, stats,
2824 	    txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo);
2825 	cnt += READ_STAT(sc, stats,
2826 	    txstats.dot3StatsExcessiveCollisions.bge_addr_lo);
2827 	cnt += READ_STAT(sc, stats,
2828 		txstats.dot3StatsLateCollisions.bge_addr_lo);
2829 	ifp->if_collisions += cnt >= sc->bge_tx_collisions ?
2830 	    cnt - sc->bge_tx_collisions : cnt;
2831 	sc->bge_tx_collisions = cnt;
2832 
2833 	cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
2834 	ifp->if_ierrors += cnt >= sc->bge_rx_discards ?
2835 	    cnt - sc->bge_rx_discards : cnt;
2836 	sc->bge_rx_discards = cnt;
2837 
2838 	cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
2839 	ifp->if_oerrors += cnt >= sc->bge_tx_discards ?
2840 	    cnt - sc->bge_tx_discards : cnt;
2841 	sc->bge_tx_discards = cnt;
2842 
2843 #undef READ_STAT
2844 }
2845 
2846 /*
2847  * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
2848  * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
2849  * but when such padded frames employ the bge IP/TCP checksum offload,
2850  * the hardware checksum assist gives incorrect results (possibly
2851  * from incorporating its own padding into the UDP/TCP checksum; who knows).
2852  * If we pad such runts with zeros, the onboard checksum comes out correct.
2853  */
2854 static __inline int
2855 bge_cksum_pad(struct mbuf *m)
2856 {
2857 	int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
2858 	struct mbuf *last;
2859 
2860 	/* If there's only the packet-header and we can pad there, use it. */
2861 	if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
2862 	    M_TRAILINGSPACE(m) >= padlen) {
2863 		last = m;
2864 	} else {
2865 		/*
2866 		 * Walk packet chain to find last mbuf. We will either
2867 		 * pad there, or append a new mbuf and pad it.
2868 		 */
2869 		for (last = m; last->m_next != NULL; last = last->m_next);
2870 		if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
2871 			/* Allocate new empty mbuf, pad it. Compact later. */
2872 			struct mbuf *n;
2873 
2874 			MGET(n, M_DONTWAIT, MT_DATA);
2875 			if (n == NULL)
2876 				return (ENOBUFS);
2877 			n->m_len = 0;
2878 			last->m_next = n;
2879 			last = n;
2880 		}
2881 	}
2882 
2883 	/* Now zero the pad area, to avoid the bge cksum-assist bug. */
2884 	memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
2885 	last->m_len += padlen;
2886 	m->m_pkthdr.len += padlen;
2887 
2888 	return (0);
2889 }
2890 
2891 /*
2892  * Encapsulate an mbuf chain in the tx ring  by coupling the mbuf data
2893  * pointers to descriptors.
2894  */
2895 static int
2896 bge_encap(struct bge_softc *sc, struct mbuf *m_head, uint32_t *txidx)
2897 {
2898 	bus_dma_segment_t	segs[BGE_NSEG_NEW];
2899 	bus_dmamap_t		map;
2900 	struct bge_tx_bd	*d = NULL;
2901 	struct m_tag		*mtag;
2902 	uint32_t		idx = *txidx;
2903 	uint16_t		csum_flags = 0;
2904 	int			nsegs, i, error;
2905 
2906 	if (m_head->m_pkthdr.csum_flags) {
2907 		if (m_head->m_pkthdr.csum_flags & CSUM_IP)
2908 			csum_flags |= BGE_TXBDFLAG_IP_CSUM;
2909 		if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
2910 			csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
2911 			if (m_head->m_pkthdr.len < ETHER_MIN_NOPAD &&
2912 			    bge_cksum_pad(m_head) != 0)
2913 				return (ENOBUFS);
2914 		}
2915 		if (m_head->m_flags & M_LASTFRAG)
2916 			csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
2917 		else if (m_head->m_flags & M_FRAG)
2918 			csum_flags |= BGE_TXBDFLAG_IP_FRAG;
2919 	}
2920 
2921 	mtag = VLAN_OUTPUT_TAG(sc->bge_ifp, m_head);
2922 
2923 	map = sc->bge_cdata.bge_tx_dmamap[idx];
2924 	error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag, map,
2925 	    m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2926 	if (error) {
2927 		if (error == EFBIG) {
2928 			struct mbuf *m0;
2929 
2930 			m0 = m_defrag(m_head, M_DONTWAIT);
2931 			if (m0 == NULL)
2932 				return (ENOBUFS);
2933 			m_head = m0;
2934 			error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag,
2935 			    map, m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2936 		}
2937 		if (error)
2938 			return (error);
2939 	}
2940 
2941 	/*
2942 	 * Sanity check: avoid coming within 16 descriptors
2943 	 * of the end of the ring.
2944 	 */
2945 	if (nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) {
2946 		bus_dmamap_unload(sc->bge_cdata.bge_mtag, map);
2947 		return (ENOBUFS);
2948 	}
2949 
2950 	bus_dmamap_sync(sc->bge_cdata.bge_mtag, map, BUS_DMASYNC_PREWRITE);
2951 
2952 	for (i = 0; ; i++) {
2953 		d = &sc->bge_ldata.bge_tx_ring[idx];
2954 		d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
2955 		d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
2956 		d->bge_len = segs[i].ds_len;
2957 		d->bge_flags = csum_flags;
2958 		if (i == nsegs - 1)
2959 			break;
2960 		BGE_INC(idx, BGE_TX_RING_CNT);
2961 	}
2962 
2963 	/* Mark the last segment as end of packet... */
2964 	d->bge_flags |= BGE_TXBDFLAG_END;
2965 	/* ... and put VLAN tag into first segment.  */
2966 	d = &sc->bge_ldata.bge_tx_ring[*txidx];
2967 	if (mtag != NULL) {
2968 		d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
2969 		d->bge_vlan_tag = VLAN_TAG_VALUE(mtag);
2970 	} else
2971 		d->bge_vlan_tag = 0;
2972 
2973 	/*
2974 	 * Insure that the map for this transmission
2975 	 * is placed at the array index of the last descriptor
2976 	 * in this chain.
2977 	 */
2978 	sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
2979 	sc->bge_cdata.bge_tx_dmamap[idx] = map;
2980 	sc->bge_cdata.bge_tx_chain[idx] = m_head;
2981 	sc->bge_txcnt += nsegs;
2982 
2983 	BGE_INC(idx, BGE_TX_RING_CNT);
2984 	*txidx = idx;
2985 
2986 	return (0);
2987 }
2988 
2989 /*
2990  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2991  * to the mbuf data regions directly in the transmit descriptors.
2992  */
2993 static void
2994 bge_start_locked(struct ifnet *ifp)
2995 {
2996 	struct bge_softc *sc;
2997 	struct mbuf *m_head = NULL;
2998 	uint32_t prodidx;
2999 	int count = 0;
3000 
3001 	sc = ifp->if_softc;
3002 
3003 	if (!sc->bge_link || IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3004 		return;
3005 
3006 	prodidx = sc->bge_tx_prodidx;
3007 
3008 	while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
3009 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
3010 		if (m_head == NULL)
3011 			break;
3012 
3013 		/*
3014 		 * XXX
3015 		 * The code inside the if() block is never reached since we
3016 		 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
3017 		 * requests to checksum TCP/UDP in a fragmented packet.
3018 		 *
3019 		 * XXX
3020 		 * safety overkill.  If this is a fragmented packet chain
3021 		 * with delayed TCP/UDP checksums, then only encapsulate
3022 		 * it if we have enough descriptors to handle the entire
3023 		 * chain at once.
3024 		 * (paranoia -- may not actually be needed)
3025 		 */
3026 		if (m_head->m_flags & M_FIRSTFRAG &&
3027 		    m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
3028 			if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
3029 			    m_head->m_pkthdr.csum_data + 16) {
3030 				IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3031 				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3032 				break;
3033 			}
3034 		}
3035 
3036 		/*
3037 		 * Pack the data into the transmit ring. If we
3038 		 * don't have room, set the OACTIVE flag and wait
3039 		 * for the NIC to drain the ring.
3040 		 */
3041 		if (bge_encap(sc, m_head, &prodidx)) {
3042 			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3043 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3044 			break;
3045 		}
3046 		++count;
3047 
3048 		/*
3049 		 * If there's a BPF listener, bounce a copy of this frame
3050 		 * to him.
3051 		 */
3052 		BPF_MTAP(ifp, m_head);
3053 	}
3054 
3055 	if (count == 0)
3056 		/* No packets were dequeued. */
3057 		return;
3058 
3059 	/* Transmit. */
3060 	CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3061 	/* 5700 b2 errata */
3062 	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
3063 		CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3064 
3065 	sc->bge_tx_prodidx = prodidx;
3066 
3067 	/*
3068 	 * Set a timeout in case the chip goes out to lunch.
3069 	 */
3070 	ifp->if_timer = 5;
3071 }
3072 
3073 /*
3074  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3075  * to the mbuf data regions directly in the transmit descriptors.
3076  */
3077 static void
3078 bge_start(struct ifnet *ifp)
3079 {
3080 	struct bge_softc *sc;
3081 
3082 	sc = ifp->if_softc;
3083 	BGE_LOCK(sc);
3084 	bge_start_locked(ifp);
3085 	BGE_UNLOCK(sc);
3086 }
3087 
3088 static void
3089 bge_init_locked(struct bge_softc *sc)
3090 {
3091 	struct ifnet *ifp;
3092 	uint16_t *m;
3093 
3094 	BGE_LOCK_ASSERT(sc);
3095 
3096 	ifp = sc->bge_ifp;
3097 
3098 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3099 		return;
3100 
3101 	/* Cancel pending I/O and flush buffers. */
3102 	bge_stop(sc);
3103 	bge_reset(sc);
3104 	bge_chipinit(sc);
3105 
3106 	/*
3107 	 * Init the various state machines, ring
3108 	 * control blocks and firmware.
3109 	 */
3110 	if (bge_blockinit(sc)) {
3111 		device_printf(sc->bge_dev, "initialization failure\n");
3112 		return;
3113 	}
3114 
3115 	ifp = sc->bge_ifp;
3116 
3117 	/* Specify MTU. */
3118 	CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
3119 	    ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN);
3120 
3121 	/* Load our MAC address. */
3122 	m = (uint16_t *)IF_LLADDR(sc->bge_ifp);
3123 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3124 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3125 
3126 	/* Enable or disable promiscuous mode as needed. */
3127 	if (ifp->if_flags & IFF_PROMISC) {
3128 		BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3129 	} else {
3130 		BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3131 	}
3132 
3133 	/* Program multicast filter. */
3134 	bge_setmulti(sc);
3135 
3136 	/* Init RX ring. */
3137 	bge_init_rx_ring_std(sc);
3138 
3139 	/*
3140 	 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
3141 	 * memory to insure that the chip has in fact read the first
3142 	 * entry of the ring.
3143 	 */
3144 	if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
3145 		uint32_t		v, i;
3146 		for (i = 0; i < 10; i++) {
3147 			DELAY(20);
3148 			v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
3149 			if (v == (MCLBYTES - ETHER_ALIGN))
3150 				break;
3151 		}
3152 		if (i == 10)
3153 			device_printf (sc->bge_dev,
3154 			    "5705 A0 chip failed to load RX ring\n");
3155 	}
3156 
3157 	/* Init jumbo RX ring. */
3158 	if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
3159 		bge_init_rx_ring_jumbo(sc);
3160 
3161 	/* Init our RX return ring index. */
3162 	sc->bge_rx_saved_considx = 0;
3163 
3164 	/* Init TX ring. */
3165 	bge_init_tx_ring(sc);
3166 
3167 	/* Turn on transmitter. */
3168 	BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
3169 
3170 	/* Turn on receiver. */
3171 	BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3172 
3173 	/* Tell firmware we're alive. */
3174 	BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3175 
3176 #ifdef DEVICE_POLLING
3177 	/* Disable interrupts if we are polling. */
3178 	if (ifp->if_capenable & IFCAP_POLLING) {
3179 		BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
3180 		    BGE_PCIMISCCTL_MASK_PCI_INTR);
3181 		CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3182 		CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
3183 		CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
3184 	} else
3185 #endif
3186 
3187 	/* Enable host interrupts. */
3188 	{
3189 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
3190 	BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3191 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3192 	}
3193 
3194 	bge_ifmedia_upd(ifp);
3195 
3196 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3197 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3198 
3199 	callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
3200 }
3201 
3202 static void
3203 bge_init(void *xsc)
3204 {
3205 	struct bge_softc *sc = xsc;
3206 
3207 	BGE_LOCK(sc);
3208 	bge_init_locked(sc);
3209 	BGE_UNLOCK(sc);
3210 }
3211 
3212 /*
3213  * Set media options.
3214  */
3215 static int
3216 bge_ifmedia_upd(struct ifnet *ifp)
3217 {
3218 	struct bge_softc *sc;
3219 	struct mii_data *mii;
3220 	struct ifmedia *ifm;
3221 
3222 	sc = ifp->if_softc;
3223 	ifm = &sc->bge_ifmedia;
3224 
3225 	/* If this is a 1000baseX NIC, enable the TBI port. */
3226 	if (sc->bge_tbi) {
3227 		if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3228 			return (EINVAL);
3229 		switch(IFM_SUBTYPE(ifm->ifm_media)) {
3230 		case IFM_AUTO:
3231 			/*
3232 			 * The BCM5704 ASIC appears to have a special
3233 			 * mechanism for programming the autoneg
3234 			 * advertisement registers in TBI mode.
3235 			 */
3236 			if (bge_fake_autoneg == 0 &&
3237 			    sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3238 				uint32_t sgdig;
3239 				CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
3240 				sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
3241 				sgdig |= BGE_SGDIGCFG_AUTO|
3242 				    BGE_SGDIGCFG_PAUSE_CAP|
3243 				    BGE_SGDIGCFG_ASYM_PAUSE;
3244 				CSR_WRITE_4(sc, BGE_SGDIG_CFG,
3245 				    sgdig|BGE_SGDIGCFG_SEND);
3246 				DELAY(5);
3247 				CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
3248 			}
3249 			break;
3250 		case IFM_1000_SX:
3251 			if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3252 				BGE_CLRBIT(sc, BGE_MAC_MODE,
3253 				    BGE_MACMODE_HALF_DUPLEX);
3254 			} else {
3255 				BGE_SETBIT(sc, BGE_MAC_MODE,
3256 				    BGE_MACMODE_HALF_DUPLEX);
3257 			}
3258 			break;
3259 		default:
3260 			return (EINVAL);
3261 		}
3262 		return (0);
3263 	}
3264 
3265 	sc->bge_link_evt++;
3266 	mii = device_get_softc(sc->bge_miibus);
3267 	if (mii->mii_instance) {
3268 		struct mii_softc *miisc;
3269 		for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
3270 		    miisc = LIST_NEXT(miisc, mii_list))
3271 			mii_phy_reset(miisc);
3272 	}
3273 	mii_mediachg(mii);
3274 
3275 	return (0);
3276 }
3277 
3278 /*
3279  * Report current media status.
3280  */
3281 static void
3282 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3283 {
3284 	struct bge_softc *sc;
3285 	struct mii_data *mii;
3286 
3287 	sc = ifp->if_softc;
3288 
3289 	if (sc->bge_tbi) {
3290 		ifmr->ifm_status = IFM_AVALID;
3291 		ifmr->ifm_active = IFM_ETHER;
3292 		if (CSR_READ_4(sc, BGE_MAC_STS) &
3293 		    BGE_MACSTAT_TBI_PCS_SYNCHED)
3294 			ifmr->ifm_status |= IFM_ACTIVE;
3295 		else {
3296 			ifmr->ifm_active |= IFM_NONE;
3297 			return;
3298 		}
3299 		ifmr->ifm_active |= IFM_1000_SX;
3300 		if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3301 			ifmr->ifm_active |= IFM_HDX;
3302 		else
3303 			ifmr->ifm_active |= IFM_FDX;
3304 		return;
3305 	}
3306 
3307 	mii = device_get_softc(sc->bge_miibus);
3308 	mii_pollstat(mii);
3309 	ifmr->ifm_active = mii->mii_media_active;
3310 	ifmr->ifm_status = mii->mii_media_status;
3311 }
3312 
3313 static int
3314 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
3315 {
3316 	struct bge_softc *sc = ifp->if_softc;
3317 	struct ifreq *ifr = (struct ifreq *) data;
3318 	struct mii_data *mii;
3319 	int mask, error = 0;
3320 
3321 	switch (command) {
3322 	case SIOCSIFMTU:
3323 		if (ifr->ifr_mtu < ETHERMIN ||
3324 		    ((BGE_IS_JUMBO_CAPABLE(sc)) &&
3325 		    ifr->ifr_mtu > BGE_JUMBO_MTU) ||
3326 		    ((!BGE_IS_JUMBO_CAPABLE(sc)) &&
3327 		    ifr->ifr_mtu > ETHERMTU))
3328 			error = EINVAL;
3329 		else if (ifp->if_mtu != ifr->ifr_mtu) {
3330 			ifp->if_mtu = ifr->ifr_mtu;
3331 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3332 			bge_init(sc);
3333 		}
3334 		break;
3335 	case SIOCSIFFLAGS:
3336 		BGE_LOCK(sc);
3337 		if (ifp->if_flags & IFF_UP) {
3338 			/*
3339 			 * If only the state of the PROMISC flag changed,
3340 			 * then just use the 'set promisc mode' command
3341 			 * instead of reinitializing the entire NIC. Doing
3342 			 * a full re-init means reloading the firmware and
3343 			 * waiting for it to start up, which may take a
3344 			 * second or two.  Similarly for ALLMULTI.
3345 			 */
3346 			if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3347 			    ifp->if_flags & IFF_PROMISC &&
3348 			    !(sc->bge_if_flags & IFF_PROMISC)) {
3349 				BGE_SETBIT(sc, BGE_RX_MODE,
3350 				    BGE_RXMODE_RX_PROMISC);
3351 			} else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3352 			    !(ifp->if_flags & IFF_PROMISC) &&
3353 			    sc->bge_if_flags & IFF_PROMISC) {
3354 				BGE_CLRBIT(sc, BGE_RX_MODE,
3355 				    BGE_RXMODE_RX_PROMISC);
3356 			} else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3357 			    (ifp->if_flags ^ sc->bge_if_flags) & IFF_ALLMULTI) {
3358 				bge_setmulti(sc);
3359 			} else
3360 				bge_init_locked(sc);
3361 		} else {
3362 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3363 				bge_stop(sc);
3364 			}
3365 		}
3366 		sc->bge_if_flags = ifp->if_flags;
3367 		BGE_UNLOCK(sc);
3368 		error = 0;
3369 		break;
3370 	case SIOCADDMULTI:
3371 	case SIOCDELMULTI:
3372 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3373 			BGE_LOCK(sc);
3374 			bge_setmulti(sc);
3375 			BGE_UNLOCK(sc);
3376 			error = 0;
3377 		}
3378 		break;
3379 	case SIOCSIFMEDIA:
3380 	case SIOCGIFMEDIA:
3381 		if (sc->bge_tbi) {
3382 			error = ifmedia_ioctl(ifp, ifr,
3383 			    &sc->bge_ifmedia, command);
3384 		} else {
3385 			mii = device_get_softc(sc->bge_miibus);
3386 			error = ifmedia_ioctl(ifp, ifr,
3387 			    &mii->mii_media, command);
3388 		}
3389 		break;
3390 	case SIOCSIFCAP:
3391 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3392 #ifdef DEVICE_POLLING
3393 		if (mask & IFCAP_POLLING) {
3394 			if (ifr->ifr_reqcap & IFCAP_POLLING) {
3395 				error = ether_poll_register(bge_poll, ifp);
3396 				if (error)
3397 					return (error);
3398 				BGE_LOCK(sc);
3399 				BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
3400 				    BGE_PCIMISCCTL_MASK_PCI_INTR);
3401 				CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3402 				CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
3403 				CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
3404 				ifp->if_capenable |= IFCAP_POLLING;
3405 				BGE_UNLOCK(sc);
3406 			} else {
3407 				error = ether_poll_deregister(ifp);
3408 				/* Enable interrupt even in error case */
3409 				BGE_LOCK(sc);
3410 				CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
3411 				CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
3412 				BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
3413 				    BGE_PCIMISCCTL_MASK_PCI_INTR);
3414 				CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3415 				ifp->if_capenable &= ~IFCAP_POLLING;
3416 				BGE_UNLOCK(sc);
3417 			}
3418 		}
3419 #endif
3420 		if (mask & IFCAP_HWCSUM) {
3421 			ifp->if_capenable ^= IFCAP_HWCSUM;
3422 			if (IFCAP_HWCSUM & ifp->if_capenable &&
3423 			    IFCAP_HWCSUM & ifp->if_capabilities)
3424 				ifp->if_hwassist = BGE_CSUM_FEATURES;
3425 			else
3426 				ifp->if_hwassist = 0;
3427 			VLAN_CAPABILITIES(ifp);
3428 		}
3429 		break;
3430 	default:
3431 		error = ether_ioctl(ifp, command, data);
3432 		break;
3433 	}
3434 
3435 	return (error);
3436 }
3437 
3438 static void
3439 bge_watchdog(struct ifnet *ifp)
3440 {
3441 	struct bge_softc *sc;
3442 
3443 	sc = ifp->if_softc;
3444 
3445 	if_printf(ifp, "watchdog timeout -- resetting\n");
3446 
3447 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3448 	bge_init(sc);
3449 
3450 	ifp->if_oerrors++;
3451 }
3452 
3453 /*
3454  * Stop the adapter and free any mbufs allocated to the
3455  * RX and TX lists.
3456  */
3457 static void
3458 bge_stop(struct bge_softc *sc)
3459 {
3460 	struct ifnet *ifp;
3461 	struct ifmedia_entry *ifm;
3462 	struct mii_data *mii = NULL;
3463 	int mtmp, itmp;
3464 
3465 	BGE_LOCK_ASSERT(sc);
3466 
3467 	ifp = sc->bge_ifp;
3468 
3469 	if (!sc->bge_tbi)
3470 		mii = device_get_softc(sc->bge_miibus);
3471 
3472 	callout_stop(&sc->bge_stat_ch);
3473 
3474 	/*
3475 	 * Disable all of the receiver blocks.
3476 	 */
3477 	BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3478 	BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3479 	BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3480 	if (!(BGE_IS_5705_OR_BEYOND(sc)))
3481 		BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
3482 	BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3483 	BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3484 	BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3485 
3486 	/*
3487 	 * Disable all of the transmit blocks.
3488 	 */
3489 	BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3490 	BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3491 	BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3492 	BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3493 	BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3494 	if (!(BGE_IS_5705_OR_BEYOND(sc)))
3495 		BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
3496 	BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3497 
3498 	/*
3499 	 * Shut down all of the memory managers and related
3500 	 * state machines.
3501 	 */
3502 	BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3503 	BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3504 	if (!(BGE_IS_5705_OR_BEYOND(sc)))
3505 		BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
3506 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3507 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3508 	if (!(BGE_IS_5705_OR_BEYOND(sc))) {
3509 		BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
3510 		BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3511 	}
3512 
3513 	/* Disable host interrupts. */
3514 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3515 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3516 
3517 	/*
3518 	 * Tell firmware we're shutting down.
3519 	 */
3520 	BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3521 
3522 	/* Free the RX lists. */
3523 	bge_free_rx_ring_std(sc);
3524 
3525 	/* Free jumbo RX list. */
3526 	if (BGE_IS_JUMBO_CAPABLE(sc))
3527 		bge_free_rx_ring_jumbo(sc);
3528 
3529 	/* Free TX buffers. */
3530 	bge_free_tx_ring(sc);
3531 
3532 	/*
3533 	 * Isolate/power down the PHY, but leave the media selection
3534 	 * unchanged so that things will be put back to normal when
3535 	 * we bring the interface back up.
3536 	 */
3537 	if (!sc->bge_tbi) {
3538 		itmp = ifp->if_flags;
3539 		ifp->if_flags |= IFF_UP;
3540 		/*
3541 		 * If we are called from bge_detach(), mii is already NULL.
3542 		 */
3543 		if (mii != NULL) {
3544 			ifm = mii->mii_media.ifm_cur;
3545 			mtmp = ifm->ifm_media;
3546 			ifm->ifm_media = IFM_ETHER|IFM_NONE;
3547 			mii_mediachg(mii);
3548 			ifm->ifm_media = mtmp;
3549 		}
3550 		ifp->if_flags = itmp;
3551 	}
3552 
3553 	sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
3554 
3555 	/*
3556 	 * We can't just call bge_link_upd() cause chip is almost stopped so
3557 	 * bge_link_upd -> bge_tick_locked -> bge_stats_update sequence may
3558 	 * lead to hardware deadlock. So we just clearing MAC's link state
3559 	 * (PHY may still have link UP).
3560 	 */
3561 	if (bootverbose && sc->bge_link)
3562 		if_printf(sc->bge_ifp, "link DOWN\n");
3563 	sc->bge_link = 0;
3564 
3565 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3566 }
3567 
3568 /*
3569  * Stop all chip I/O so that the kernel's probe routines don't
3570  * get confused by errant DMAs when rebooting.
3571  */
3572 static void
3573 bge_shutdown(device_t dev)
3574 {
3575 	struct bge_softc *sc;
3576 
3577 	sc = device_get_softc(dev);
3578 
3579 	BGE_LOCK(sc);
3580 	bge_stop(sc);
3581 	bge_reset(sc);
3582 	BGE_UNLOCK(sc);
3583 }
3584 
3585 static int
3586 bge_suspend(device_t dev)
3587 {
3588 	struct bge_softc *sc;
3589 
3590 	sc = device_get_softc(dev);
3591 	BGE_LOCK(sc);
3592 	bge_stop(sc);
3593 	BGE_UNLOCK(sc);
3594 
3595 	return (0);
3596 }
3597 
3598 static int
3599 bge_resume(device_t dev)
3600 {
3601 	struct bge_softc *sc;
3602 	struct ifnet *ifp;
3603 
3604 	sc = device_get_softc(dev);
3605 	BGE_LOCK(sc);
3606 	ifp = sc->bge_ifp;
3607 	if (ifp->if_flags & IFF_UP) {
3608 		bge_init_locked(sc);
3609 		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3610 			bge_start_locked(ifp);
3611 	}
3612 	BGE_UNLOCK(sc);
3613 
3614 	return (0);
3615 }
3616 
3617 static void
3618 bge_link_upd(struct bge_softc *sc)
3619 {
3620 	struct mii_data *mii;
3621 	uint32_t link, status;
3622 
3623 	BGE_LOCK_ASSERT(sc);
3624 
3625 	/* Clear 'pending link event' flag. */
3626 	sc->bge_link_evt = 0;
3627 
3628 	/*
3629 	 * Process link state changes.
3630 	 * Grrr. The link status word in the status block does
3631 	 * not work correctly on the BCM5700 rev AX and BX chips,
3632 	 * according to all available information. Hence, we have
3633 	 * to enable MII interrupts in order to properly obtain
3634 	 * async link changes. Unfortunately, this also means that
3635 	 * we have to read the MAC status register to detect link
3636 	 * changes, thereby adding an additional register access to
3637 	 * the interrupt handler.
3638 	 *
3639 	 * XXX: perhaps link state detection procedure used for
3640 	 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
3641 	 */
3642 
3643 	if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3644 	    sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
3645 		status = CSR_READ_4(sc, BGE_MAC_STS);
3646 		if (status & BGE_MACSTAT_MI_INTERRUPT) {
3647 			callout_stop(&sc->bge_stat_ch);
3648 			bge_tick_locked(sc);
3649 
3650 			mii = device_get_softc(sc->bge_miibus);
3651 			if (!sc->bge_link &&
3652 			    mii->mii_media_status & IFM_ACTIVE &&
3653 			    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3654 				sc->bge_link++;
3655 				if (bootverbose)
3656 					if_printf(sc->bge_ifp, "link UP\n");
3657 			} else if (sc->bge_link &&
3658 			    (!(mii->mii_media_status & IFM_ACTIVE) ||
3659 			    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3660 				sc->bge_link = 0;
3661 				if (bootverbose)
3662 					if_printf(sc->bge_ifp, "link DOWN\n");
3663 			}
3664 
3665 			/* Clear the interrupt. */
3666 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
3667 			    BGE_EVTENB_MI_INTERRUPT);
3668 			bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
3669 			bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
3670 			    BRGPHY_INTRS);
3671 		}
3672 		return;
3673 	}
3674 
3675 	if (sc->bge_tbi) {
3676 		status = CSR_READ_4(sc, BGE_MAC_STS);
3677 		if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
3678 			if (!sc->bge_link) {
3679 				sc->bge_link++;
3680 				if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
3681 					BGE_CLRBIT(sc, BGE_MAC_MODE,
3682 					    BGE_MACMODE_TBI_SEND_CFGS);
3683 				CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
3684 				if (bootverbose)
3685 					if_printf(sc->bge_ifp, "link UP\n");
3686 				if_link_state_change(sc->bge_ifp,
3687 				    LINK_STATE_UP);
3688 			}
3689 		} else if (sc->bge_link) {
3690 			sc->bge_link = 0;
3691 			if (bootverbose)
3692 				if_printf(sc->bge_ifp, "link DOWN\n");
3693 			if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN);
3694 		}
3695 	/* Discard link events for MII/GMII cards if MI auto-polling disabled */
3696 	} else if (CSR_READ_4(sc, BGE_MI_MODE) & BGE_MIMODE_AUTOPOLL) {
3697 		/*
3698 		 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
3699 		 * in status word always set. Workaround this bug by reading
3700 		 * PHY link status directly.
3701 		 */
3702 		link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
3703 
3704 		if (link != sc->bge_link ||
3705 		    sc->bge_asicrev == BGE_ASICREV_BCM5700) {
3706 			callout_stop(&sc->bge_stat_ch);
3707 			bge_tick_locked(sc);
3708 
3709 			mii = device_get_softc(sc->bge_miibus);
3710 			if (!sc->bge_link &&
3711 			    mii->mii_media_status & IFM_ACTIVE &&
3712 			    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3713 				sc->bge_link++;
3714 				if (bootverbose)
3715 					if_printf(sc->bge_ifp, "link UP\n");
3716 			} else if (sc->bge_link &&
3717 			    (!(mii->mii_media_status & IFM_ACTIVE) ||
3718 			    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3719 				sc->bge_link = 0;
3720 				if (bootverbose)
3721 					if_printf(sc->bge_ifp, "link DOWN\n");
3722 			}
3723 		}
3724 	}
3725 
3726 	/* Clear the attention. */
3727 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
3728 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
3729 	    BGE_MACSTAT_LINK_CHANGED);
3730 }
3731