xref: /freebsd/sys/dev/bge/if_bge.c (revision c96ae1968a6ab7056427a739bce81bf07447c2d4)
1 /*-
2  * Copyright (c) 2001 Wind River Systems
3  * Copyright (c) 1997, 1998, 1999, 2001
4  *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by Bill Paul.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 /*
38  * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39  *
40  * The Broadcom BCM5700 is based on technology originally developed by
41  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42  * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
43  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45  * frames, highly configurable RX filtering, and 16 RX and TX queues
46  * (which, along with RX filter rules, can be used for QOS applications).
47  * Other features, such as TCP segmentation, may be available as part
48  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49  * firmware images can be stored in hardware and need not be compiled
50  * into the driver.
51  *
52  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53  * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54  *
55  * The BCM5701 is a single-chip solution incorporating both the BCM5700
56  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57  * does not support external SSRAM.
58  *
59  * Broadcom also produces a variation of the BCM5700 under the "Altima"
60  * brand name, which is functionally similar but lacks PCI-X support.
61  *
62  * Without external SSRAM, you can only have at most 4 TX rings,
63  * and the use of the mini RX ring is disabled. This seems to imply
64  * that these features are simply not available on the BCM5701. As a
65  * result, this driver does not implement any support for the mini RX
66  * ring.
67  */
68 
69 #ifdef HAVE_KERNEL_OPTION_HEADERS
70 #include "opt_device_polling.h"
71 #endif
72 
73 #include <sys/param.h>
74 #include <sys/endian.h>
75 #include <sys/systm.h>
76 #include <sys/sockio.h>
77 #include <sys/mbuf.h>
78 #include <sys/malloc.h>
79 #include <sys/kernel.h>
80 #include <sys/module.h>
81 #include <sys/socket.h>
82 #include <sys/sysctl.h>
83 
84 #include <net/if.h>
85 #include <net/if_arp.h>
86 #include <net/ethernet.h>
87 #include <net/if_dl.h>
88 #include <net/if_media.h>
89 
90 #include <net/bpf.h>
91 
92 #include <net/if_types.h>
93 #include <net/if_vlan_var.h>
94 
95 #include <netinet/in_systm.h>
96 #include <netinet/in.h>
97 #include <netinet/ip.h>
98 
99 #include <machine/bus.h>
100 #include <machine/resource.h>
101 #include <sys/bus.h>
102 #include <sys/rman.h>
103 
104 #include <dev/mii/mii.h>
105 #include <dev/mii/miivar.h>
106 #include "miidevs.h"
107 #include <dev/mii/brgphyreg.h>
108 
109 #include <dev/pci/pcireg.h>
110 #include <dev/pci/pcivar.h>
111 
112 #include <dev/bge/if_bgereg.h>
113 
114 #define BGE_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
115 #define ETHER_MIN_NOPAD		(ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
116 
117 MODULE_DEPEND(bge, pci, 1, 1, 1);
118 MODULE_DEPEND(bge, ether, 1, 1, 1);
119 MODULE_DEPEND(bge, miibus, 1, 1, 1);
120 
121 /* "device miibus" required.  See GENERIC if you get errors here. */
122 #include "miibus_if.h"
123 
124 /*
125  * Various supported device vendors/types and their names. Note: the
126  * spec seems to indicate that the hardware still has Alteon's vendor
127  * ID burned into it, though it will always be overriden by the vendor
128  * ID in the EEPROM. Just to be safe, we cover all possibilities.
129  */
130 static struct bge_type {
131 	uint16_t	bge_vid;
132 	uint16_t	bge_did;
133 } bge_devs[] = {
134 	{ ALTEON_VENDORID,	ALTEON_DEVICEID_BCM5700 },
135 	{ ALTEON_VENDORID,	ALTEON_DEVICEID_BCM5701 },
136 
137 	{ ALTIMA_VENDORID,	ALTIMA_DEVICE_AC1000 },
138 	{ ALTIMA_VENDORID,	ALTIMA_DEVICE_AC1002 },
139 	{ ALTIMA_VENDORID,	ALTIMA_DEVICE_AC9100 },
140 
141 	{ APPLE_VENDORID,	APPLE_DEVICE_BCM5701 },
142 
143 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5700 },
144 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5701 },
145 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5702 },
146 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5702_ALT },
147 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5702X },
148 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5703 },
149 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5703_ALT },
150 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5703X },
151 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5704C },
152 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5704S },
153 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5704S_ALT },
154 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5705 },
155 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5705F },
156 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5705K },
157 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5705M },
158 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5705M_ALT },
159 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5714C },
160 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5714S },
161 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5715 },
162 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5715S },
163 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5720 },
164 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5721 },
165 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5750 },
166 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5750M },
167 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5751 },
168 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5751F },
169 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5751M },
170 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5752 },
171 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5752M },
172 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5753 },
173 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5753F },
174 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5753M },
175 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5754 },
176 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5754M },
177 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5755 },
178 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5755M },
179 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5780 },
180 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5780S },
181 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5781 },
182 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5782 },
183 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5786 },
184 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5787 },
185 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5787M },
186 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5788 },
187 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5789 },
188 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5901 },
189 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5901A2 },
190 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5903M },
191 
192 	{ SK_VENDORID,		SK_DEVICEID_ALTIMA },
193 
194 	{ TC_VENDORID,		TC_DEVICEID_3C996 },
195 
196 	{ 0, 0 }
197 };
198 
199 static const struct bge_vendor {
200 	uint16_t	v_id;
201 	const char	*v_name;
202 } bge_vendors[] = {
203 	{ ALTEON_VENDORID,	"Alteon" },
204 	{ ALTIMA_VENDORID,	"Altima" },
205 	{ APPLE_VENDORID,	"Apple" },
206 	{ BCOM_VENDORID,	"Broadcom" },
207 	{ SK_VENDORID,		"SysKonnect" },
208 	{ TC_VENDORID,		"3Com" },
209 
210 	{ 0, NULL }
211 };
212 
213 static const struct bge_revision {
214 	uint32_t	br_chipid;
215 	const char	*br_name;
216 } bge_revisions[] = {
217 	{ BGE_CHIPID_BCM5700_A0,	"BCM5700 A0" },
218 	{ BGE_CHIPID_BCM5700_A1,	"BCM5700 A1" },
219 	{ BGE_CHIPID_BCM5700_B0,	"BCM5700 B0" },
220 	{ BGE_CHIPID_BCM5700_B1,	"BCM5700 B1" },
221 	{ BGE_CHIPID_BCM5700_B2,	"BCM5700 B2" },
222 	{ BGE_CHIPID_BCM5700_B3,	"BCM5700 B3" },
223 	{ BGE_CHIPID_BCM5700_ALTIMA,	"BCM5700 Altima" },
224 	{ BGE_CHIPID_BCM5700_C0,	"BCM5700 C0" },
225 	{ BGE_CHIPID_BCM5701_A0,	"BCM5701 A0" },
226 	{ BGE_CHIPID_BCM5701_B0,	"BCM5701 B0" },
227 	{ BGE_CHIPID_BCM5701_B2,	"BCM5701 B2" },
228 	{ BGE_CHIPID_BCM5701_B5,	"BCM5701 B5" },
229 	{ BGE_CHIPID_BCM5703_A0,	"BCM5703 A0" },
230 	{ BGE_CHIPID_BCM5703_A1,	"BCM5703 A1" },
231 	{ BGE_CHIPID_BCM5703_A2,	"BCM5703 A2" },
232 	{ BGE_CHIPID_BCM5703_A3,	"BCM5703 A3" },
233 	{ BGE_CHIPID_BCM5703_B0,	"BCM5703 B0" },
234 	{ BGE_CHIPID_BCM5704_A0,	"BCM5704 A0" },
235 	{ BGE_CHIPID_BCM5704_A1,	"BCM5704 A1" },
236 	{ BGE_CHIPID_BCM5704_A2,	"BCM5704 A2" },
237 	{ BGE_CHIPID_BCM5704_A3,	"BCM5704 A3" },
238 	{ BGE_CHIPID_BCM5704_B0,	"BCM5704 B0" },
239 	{ BGE_CHIPID_BCM5705_A0,	"BCM5705 A0" },
240 	{ BGE_CHIPID_BCM5705_A1,	"BCM5705 A1" },
241 	{ BGE_CHIPID_BCM5705_A2,	"BCM5705 A2" },
242 	{ BGE_CHIPID_BCM5705_A3,	"BCM5705 A3" },
243 	{ BGE_CHIPID_BCM5750_A0,	"BCM5750 A0" },
244 	{ BGE_CHIPID_BCM5750_A1,	"BCM5750 A1" },
245 	{ BGE_CHIPID_BCM5750_A3,	"BCM5750 A3" },
246 	{ BGE_CHIPID_BCM5750_B0,	"BCM5750 B0" },
247 	{ BGE_CHIPID_BCM5750_B1,	"BCM5750 B1" },
248 	{ BGE_CHIPID_BCM5750_C0,	"BCM5750 C0" },
249 	{ BGE_CHIPID_BCM5750_C1,	"BCM5750 C1" },
250 	{ BGE_CHIPID_BCM5750_C2,	"BCM5750 C2" },
251 	{ BGE_CHIPID_BCM5714_A0,	"BCM5714 A0" },
252 	{ BGE_CHIPID_BCM5752_A0,	"BCM5752 A0" },
253 	{ BGE_CHIPID_BCM5752_A1,	"BCM5752 A1" },
254 	{ BGE_CHIPID_BCM5752_A2,	"BCM5752 A2" },
255 	{ BGE_CHIPID_BCM5714_B0,	"BCM5714 B0" },
256 	{ BGE_CHIPID_BCM5714_B3,	"BCM5714 B3" },
257 	{ BGE_CHIPID_BCM5715_A0,	"BCM5715 A0" },
258 	{ BGE_CHIPID_BCM5715_A1,	"BCM5715 A1" },
259 	/* 5784 and 5787 share the same ASIC ID */
260 	{ BGE_CHIPID_BCM5787_A0,	"BCM5754/5787 A0" },
261 	{ BGE_CHIPID_BCM5787_A1,	"BCM5754/5787 A1" },
262 	{ BGE_CHIPID_BCM5787_A2,	"BCM5754/5787 A2" },
263 
264 	{ 0, NULL }
265 };
266 
267 /*
268  * Some defaults for major revisions, so that newer steppings
269  * that we don't know about have a shot at working.
270  */
271 static const struct bge_revision bge_majorrevs[] = {
272 	{ BGE_ASICREV_BCM5700,		"unknown BCM5700" },
273 	{ BGE_ASICREV_BCM5701,		"unknown BCM5701" },
274 	{ BGE_ASICREV_BCM5703,		"unknown BCM5703" },
275 	{ BGE_ASICREV_BCM5704,		"unknown BCM5704" },
276 	{ BGE_ASICREV_BCM5705,		"unknown BCM5705" },
277 	{ BGE_ASICREV_BCM5750,		"unknown BCM5750" },
278 	{ BGE_ASICREV_BCM5714_A0,	"unknown BCM5714" },
279 	{ BGE_ASICREV_BCM5752,		"unknown BCM5752" },
280 	{ BGE_ASICREV_BCM5780,		"unknown BCM5780" },
281 	{ BGE_ASICREV_BCM5714,		"unknown BCM5714" },
282 	{ BGE_ASICREV_BCM5755,		"unknown BCM5755" },
283 	/* 5784 and 5787 share the same ASIC ID */
284 	{ BGE_ASICREV_BCM5787,		"unknown BCM5754/5787" },
285 
286 	{ 0, NULL }
287 };
288 
289 #define BGE_IS_JUMBO_CAPABLE(sc)	((sc)->bge_flags & BGE_FLAG_JUMBO)
290 #define BGE_IS_5700_FAMILY(sc)		((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
291 #define BGE_IS_5705_PLUS(sc)		((sc)->bge_flags & BGE_FLAG_5705_PLUS)
292 #define BGE_IS_5714_FAMILY(sc)		((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
293 #define BGE_IS_575X_PLUS(sc)		((sc)->bge_flags & BGE_FLAG_575X_PLUS)
294 
295 const struct bge_revision * bge_lookup_rev(uint32_t);
296 const struct bge_vendor * bge_lookup_vendor(uint16_t);
297 static int bge_probe(device_t);
298 static int bge_attach(device_t);
299 static int bge_detach(device_t);
300 static int bge_suspend(device_t);
301 static int bge_resume(device_t);
302 static void bge_release_resources(struct bge_softc *);
303 static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
304 static int bge_dma_alloc(device_t);
305 static void bge_dma_free(struct bge_softc *);
306 
307 static void bge_txeof(struct bge_softc *);
308 static void bge_rxeof(struct bge_softc *);
309 
310 static void bge_asf_driver_up (struct bge_softc *);
311 static void bge_tick(void *);
312 static void bge_stats_update(struct bge_softc *);
313 static void bge_stats_update_regs(struct bge_softc *);
314 static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
315 
316 static void bge_intr(void *);
317 static void bge_start_locked(struct ifnet *);
318 static void bge_start(struct ifnet *);
319 static int bge_ioctl(struct ifnet *, u_long, caddr_t);
320 static void bge_init_locked(struct bge_softc *);
321 static void bge_init(void *);
322 static void bge_stop(struct bge_softc *);
323 static void bge_watchdog(struct bge_softc *);
324 static void bge_shutdown(device_t);
325 static int bge_ifmedia_upd_locked(struct ifnet *);
326 static int bge_ifmedia_upd(struct ifnet *);
327 static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
328 
329 static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
330 static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
331 
332 static void bge_setpromisc(struct bge_softc *);
333 static void bge_setmulti(struct bge_softc *);
334 
335 static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *);
336 static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
337 static int bge_init_rx_ring_std(struct bge_softc *);
338 static void bge_free_rx_ring_std(struct bge_softc *);
339 static int bge_init_rx_ring_jumbo(struct bge_softc *);
340 static void bge_free_rx_ring_jumbo(struct bge_softc *);
341 static void bge_free_tx_ring(struct bge_softc *);
342 static int bge_init_tx_ring(struct bge_softc *);
343 
344 static int bge_chipinit(struct bge_softc *);
345 static int bge_blockinit(struct bge_softc *);
346 
347 static uint32_t bge_readmem_ind(struct bge_softc *, int);
348 static void bge_writemem_ind(struct bge_softc *, int, int);
349 #ifdef notdef
350 static uint32_t bge_readreg_ind(struct bge_softc *, int);
351 #endif
352 static void bge_writemem_direct(struct bge_softc *, int, int);
353 static void bge_writereg_ind(struct bge_softc *, int, int);
354 
355 static int bge_miibus_readreg(device_t, int, int);
356 static int bge_miibus_writereg(device_t, int, int, int);
357 static void bge_miibus_statchg(device_t);
358 #ifdef DEVICE_POLLING
359 static void bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
360 #endif
361 
362 #define BGE_RESET_START 1
363 #define BGE_RESET_STOP  2
364 static void bge_sig_post_reset(struct bge_softc *, int);
365 static void bge_sig_legacy(struct bge_softc *, int);
366 static void bge_sig_pre_reset(struct bge_softc *, int);
367 static int bge_reset(struct bge_softc *);
368 static void bge_link_upd(struct bge_softc *);
369 
370 /*
371  * The BGE_REGISTER_DEBUG option is only for low-level debugging.  It may
372  * leak information to untrusted users.  It is also known to cause alignment
373  * traps on certain architectures.
374  */
375 #ifdef BGE_REGISTER_DEBUG
376 static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
377 static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS);
378 static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS);
379 #endif
380 static void bge_add_sysctls(struct bge_softc *);
381 
382 static device_method_t bge_methods[] = {
383 	/* Device interface */
384 	DEVMETHOD(device_probe,		bge_probe),
385 	DEVMETHOD(device_attach,	bge_attach),
386 	DEVMETHOD(device_detach,	bge_detach),
387 	DEVMETHOD(device_shutdown,	bge_shutdown),
388 	DEVMETHOD(device_suspend,	bge_suspend),
389 	DEVMETHOD(device_resume,	bge_resume),
390 
391 	/* bus interface */
392 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
393 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
394 
395 	/* MII interface */
396 	DEVMETHOD(miibus_readreg,	bge_miibus_readreg),
397 	DEVMETHOD(miibus_writereg,	bge_miibus_writereg),
398 	DEVMETHOD(miibus_statchg,	bge_miibus_statchg),
399 
400 	{ 0, 0 }
401 };
402 
403 static driver_t bge_driver = {
404 	"bge",
405 	bge_methods,
406 	sizeof(struct bge_softc)
407 };
408 
409 static devclass_t bge_devclass;
410 
411 DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
412 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
413 
414 static int bge_fake_autoneg = 0;
415 static int bge_allow_asf = 1;
416 
417 TUNABLE_INT("hw.bge.fake_autoneg", &bge_fake_autoneg);
418 TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf);
419 
420 SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters");
421 SYSCTL_INT(_hw_bge, OID_AUTO, fake_autoneg, CTLFLAG_RD, &bge_fake_autoneg, 0,
422 	"Enable fake autonegotiation for certain blade systems");
423 SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0,
424 	"Allow ASF mode if available");
425 
426 static uint32_t
427 bge_readmem_ind(struct bge_softc *sc, int off)
428 {
429 	device_t dev;
430 	uint32_t val;
431 
432 	dev = sc->bge_dev;
433 
434 	pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
435 	val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
436 	pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
437 	return (val);
438 }
439 
440 static void
441 bge_writemem_ind(struct bge_softc *sc, int off, int val)
442 {
443 	device_t dev;
444 
445 	dev = sc->bge_dev;
446 
447 	pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
448 	pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
449 	pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
450 }
451 
452 #ifdef notdef
453 static uint32_t
454 bge_readreg_ind(struct bge_softc *sc, int off)
455 {
456 	device_t dev;
457 
458 	dev = sc->bge_dev;
459 
460 	pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
461 	return (pci_read_config(dev, BGE_PCI_REG_DATA, 4));
462 }
463 #endif
464 
465 static void
466 bge_writereg_ind(struct bge_softc *sc, int off, int val)
467 {
468 	device_t dev;
469 
470 	dev = sc->bge_dev;
471 
472 	pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
473 	pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
474 }
475 
476 static void
477 bge_writemem_direct(struct bge_softc *sc, int off, int val)
478 {
479 	CSR_WRITE_4(sc, off, val);
480 }
481 
482 /*
483  * Map a single buffer address.
484  */
485 
486 static void
487 bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
488 {
489 	struct bge_dmamap_arg *ctx;
490 
491 	if (error)
492 		return;
493 
494 	ctx = arg;
495 
496 	if (nseg > ctx->bge_maxsegs) {
497 		ctx->bge_maxsegs = 0;
498 		return;
499 	}
500 
501 	ctx->bge_busaddr = segs->ds_addr;
502 }
503 
504 /*
505  * Read a byte of data stored in the EEPROM at address 'addr.' The
506  * BCM570x supports both the traditional bitbang interface and an
507  * auto access interface for reading the EEPROM. We use the auto
508  * access method.
509  */
510 static uint8_t
511 bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
512 {
513 	int i;
514 	uint32_t byte = 0;
515 
516 	/*
517 	 * Enable use of auto EEPROM access so we can avoid
518 	 * having to use the bitbang method.
519 	 */
520 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
521 
522 	/* Reset the EEPROM, load the clock period. */
523 	CSR_WRITE_4(sc, BGE_EE_ADDR,
524 	    BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
525 	DELAY(20);
526 
527 	/* Issue the read EEPROM command. */
528 	CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
529 
530 	/* Wait for completion */
531 	for(i = 0; i < BGE_TIMEOUT * 10; i++) {
532 		DELAY(10);
533 		if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
534 			break;
535 	}
536 
537 	if (i == BGE_TIMEOUT) {
538 		device_printf(sc->bge_dev, "EEPROM read timed out\n");
539 		return (1);
540 	}
541 
542 	/* Get result. */
543 	byte = CSR_READ_4(sc, BGE_EE_DATA);
544 
545 	*dest = (byte >> ((addr % 4) * 8)) & 0xFF;
546 
547 	return (0);
548 }
549 
550 /*
551  * Read a sequence of bytes from the EEPROM.
552  */
553 static int
554 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
555 {
556 	int i, error = 0;
557 	uint8_t byte = 0;
558 
559 	for (i = 0; i < cnt; i++) {
560 		error = bge_eeprom_getbyte(sc, off + i, &byte);
561 		if (error)
562 			break;
563 		*(dest + i) = byte;
564 	}
565 
566 	return (error ? 1 : 0);
567 }
568 
569 static int
570 bge_miibus_readreg(device_t dev, int phy, int reg)
571 {
572 	struct bge_softc *sc;
573 	uint32_t val, autopoll;
574 	int i;
575 
576 	sc = device_get_softc(dev);
577 
578 	/*
579 	 * Broadcom's own driver always assumes the internal
580 	 * PHY is at GMII address 1. On some chips, the PHY responds
581 	 * to accesses at all addresses, which could cause us to
582 	 * bogusly attach the PHY 32 times at probe type. Always
583 	 * restricting the lookup to address 1 is simpler than
584 	 * trying to figure out which chips revisions should be
585 	 * special-cased.
586 	 */
587 	if (phy != 1)
588 		return (0);
589 
590 	/* Reading with autopolling on may trigger PCI errors */
591 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
592 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
593 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
594 		DELAY(40);
595 	}
596 
597 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
598 	    BGE_MIPHY(phy)|BGE_MIREG(reg));
599 
600 	for (i = 0; i < BGE_TIMEOUT; i++) {
601 		val = CSR_READ_4(sc, BGE_MI_COMM);
602 		if (!(val & BGE_MICOMM_BUSY))
603 			break;
604 	}
605 
606 	if (i == BGE_TIMEOUT) {
607 		device_printf(sc->bge_dev, "PHY read timed out\n");
608 		val = 0;
609 		goto done;
610 	}
611 
612 	val = CSR_READ_4(sc, BGE_MI_COMM);
613 
614 done:
615 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
616 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
617 		DELAY(40);
618 	}
619 
620 	if (val & BGE_MICOMM_READFAIL)
621 		return (0);
622 
623 	return (val & 0xFFFF);
624 }
625 
626 static int
627 bge_miibus_writereg(device_t dev, int phy, int reg, int val)
628 {
629 	struct bge_softc *sc;
630 	uint32_t autopoll;
631 	int i;
632 
633 	sc = device_get_softc(dev);
634 
635 	/* Reading with autopolling on may trigger PCI errors */
636 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
637 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
638 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
639 		DELAY(40);
640 	}
641 
642 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
643 	    BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
644 
645 	for (i = 0; i < BGE_TIMEOUT; i++) {
646 		if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
647 			break;
648 	}
649 
650 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
651 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
652 		DELAY(40);
653 	}
654 
655 	if (i == BGE_TIMEOUT) {
656 		device_printf(sc->bge_dev, "PHY read timed out\n");
657 		return (0);
658 	}
659 
660 	return (0);
661 }
662 
663 static void
664 bge_miibus_statchg(device_t dev)
665 {
666 	struct bge_softc *sc;
667 	struct mii_data *mii;
668 	sc = device_get_softc(dev);
669 	mii = device_get_softc(sc->bge_miibus);
670 
671 	BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
672 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
673 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
674 	else
675 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
676 
677 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
678 		BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
679 	else
680 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
681 }
682 
683 /*
684  * Intialize a standard receive ring descriptor.
685  */
686 static int
687 bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m)
688 {
689 	struct mbuf *m_new = NULL;
690 	struct bge_rx_bd *r;
691 	struct bge_dmamap_arg ctx;
692 	int error;
693 
694 	if (m == NULL) {
695 		m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
696 		if (m_new == NULL)
697 			return (ENOBUFS);
698 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
699 	} else {
700 		m_new = m;
701 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
702 		m_new->m_data = m_new->m_ext.ext_buf;
703 	}
704 
705 	if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
706 		m_adj(m_new, ETHER_ALIGN);
707 	sc->bge_cdata.bge_rx_std_chain[i] = m_new;
708 	r = &sc->bge_ldata.bge_rx_std_ring[i];
709 	ctx.bge_maxsegs = 1;
710 	ctx.sc = sc;
711 	error = bus_dmamap_load(sc->bge_cdata.bge_mtag,
712 	    sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *),
713 	    m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
714 	if (error || ctx.bge_maxsegs == 0) {
715 		if (m == NULL) {
716 			sc->bge_cdata.bge_rx_std_chain[i] = NULL;
717 			m_freem(m_new);
718 		}
719 		return (ENOMEM);
720 	}
721 	r->bge_addr.bge_addr_lo = BGE_ADDR_LO(ctx.bge_busaddr);
722 	r->bge_addr.bge_addr_hi = BGE_ADDR_HI(ctx.bge_busaddr);
723 	r->bge_flags = BGE_RXBDFLAG_END;
724 	r->bge_len = m_new->m_len;
725 	r->bge_idx = i;
726 
727 	bus_dmamap_sync(sc->bge_cdata.bge_mtag,
728 	    sc->bge_cdata.bge_rx_std_dmamap[i],
729 	    BUS_DMASYNC_PREREAD);
730 
731 	return (0);
732 }
733 
734 /*
735  * Initialize a jumbo receive ring descriptor. This allocates
736  * a jumbo buffer from the pool managed internally by the driver.
737  */
738 static int
739 bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m)
740 {
741 	bus_dma_segment_t segs[BGE_NSEG_JUMBO];
742 	struct bge_extrx_bd *r;
743 	struct mbuf *m_new = NULL;
744 	int nsegs;
745 	int error;
746 
747 	if (m == NULL) {
748 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
749 		if (m_new == NULL)
750 			return (ENOBUFS);
751 
752 		m_cljget(m_new, M_DONTWAIT, MJUM9BYTES);
753 		if (!(m_new->m_flags & M_EXT)) {
754 			m_freem(m_new);
755 			return (ENOBUFS);
756 		}
757 		m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES;
758 	} else {
759 		m_new = m;
760 		m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES;
761 		m_new->m_data = m_new->m_ext.ext_buf;
762 	}
763 
764 	if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
765 		m_adj(m_new, ETHER_ALIGN);
766 
767 	error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
768 	    sc->bge_cdata.bge_rx_jumbo_dmamap[i],
769 	    m_new, segs, &nsegs, BUS_DMA_NOWAIT);
770 	if (error) {
771 		if (m == NULL)
772 			m_freem(m_new);
773 		return (error);
774 	}
775 	sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
776 
777 	/*
778 	 * Fill in the extended RX buffer descriptor.
779 	 */
780 	r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
781 	r->bge_flags = BGE_RXBDFLAG_JUMBO_RING|BGE_RXBDFLAG_END;
782 	r->bge_idx = i;
783 	r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
784 	switch (nsegs) {
785 	case 4:
786 		r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
787 		r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
788 		r->bge_len3 = segs[3].ds_len;
789 	case 3:
790 		r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
791 		r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
792 		r->bge_len2 = segs[2].ds_len;
793 	case 2:
794 		r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
795 		r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
796 		r->bge_len1 = segs[1].ds_len;
797 	case 1:
798 		r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
799 		r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
800 		r->bge_len0 = segs[0].ds_len;
801 		break;
802 	default:
803 		panic("%s: %d segments\n", __func__, nsegs);
804 	}
805 
806 	bus_dmamap_sync(sc->bge_cdata.bge_mtag,
807 	    sc->bge_cdata.bge_rx_jumbo_dmamap[i],
808 	    BUS_DMASYNC_PREREAD);
809 
810 	return (0);
811 }
812 
813 /*
814  * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
815  * that's 1MB or memory, which is a lot. For now, we fill only the first
816  * 256 ring entries and hope that our CPU is fast enough to keep up with
817  * the NIC.
818  */
819 static int
820 bge_init_rx_ring_std(struct bge_softc *sc)
821 {
822 	int i;
823 
824 	for (i = 0; i < BGE_SSLOTS; i++) {
825 		if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
826 			return (ENOBUFS);
827 	};
828 
829 	bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
830 	    sc->bge_cdata.bge_rx_std_ring_map,
831 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
832 
833 	sc->bge_std = i - 1;
834 	CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
835 
836 	return (0);
837 }
838 
839 static void
840 bge_free_rx_ring_std(struct bge_softc *sc)
841 {
842 	int i;
843 
844 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
845 		if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
846 			bus_dmamap_sync(sc->bge_cdata.bge_mtag,
847 			    sc->bge_cdata.bge_rx_std_dmamap[i],
848 			    BUS_DMASYNC_POSTREAD);
849 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
850 			    sc->bge_cdata.bge_rx_std_dmamap[i]);
851 			m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
852 			sc->bge_cdata.bge_rx_std_chain[i] = NULL;
853 		}
854 		bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
855 		    sizeof(struct bge_rx_bd));
856 	}
857 }
858 
859 static int
860 bge_init_rx_ring_jumbo(struct bge_softc *sc)
861 {
862 	struct bge_rcb *rcb;
863 	int i;
864 
865 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
866 		if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
867 			return (ENOBUFS);
868 	};
869 
870 	bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
871 	    sc->bge_cdata.bge_rx_jumbo_ring_map,
872 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
873 
874 	sc->bge_jumbo = i - 1;
875 
876 	rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
877 	rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
878 				    BGE_RCB_FLAG_USE_EXT_RX_BD);
879 	CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
880 
881 	CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
882 
883 	return (0);
884 }
885 
886 static void
887 bge_free_rx_ring_jumbo(struct bge_softc *sc)
888 {
889 	int i;
890 
891 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
892 		if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
893 			bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
894 			    sc->bge_cdata.bge_rx_jumbo_dmamap[i],
895 			    BUS_DMASYNC_POSTREAD);
896 			bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
897 			    sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
898 			m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
899 			sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
900 		}
901 		bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
902 		    sizeof(struct bge_extrx_bd));
903 	}
904 }
905 
906 static void
907 bge_free_tx_ring(struct bge_softc *sc)
908 {
909 	int i;
910 
911 	if (sc->bge_ldata.bge_tx_ring == NULL)
912 		return;
913 
914 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
915 		if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
916 			bus_dmamap_sync(sc->bge_cdata.bge_mtag,
917 			    sc->bge_cdata.bge_tx_dmamap[i],
918 			    BUS_DMASYNC_POSTWRITE);
919 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
920 			    sc->bge_cdata.bge_tx_dmamap[i]);
921 			m_freem(sc->bge_cdata.bge_tx_chain[i]);
922 			sc->bge_cdata.bge_tx_chain[i] = NULL;
923 		}
924 		bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
925 		    sizeof(struct bge_tx_bd));
926 	}
927 }
928 
929 static int
930 bge_init_tx_ring(struct bge_softc *sc)
931 {
932 	sc->bge_txcnt = 0;
933 	sc->bge_tx_saved_considx = 0;
934 
935 	/* Initialize transmit producer index for host-memory send ring. */
936 	sc->bge_tx_prodidx = 0;
937 	CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
938 
939 	/* 5700 b2 errata */
940 	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
941 		CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
942 
943 	/* NIC-memory send ring not used; initialize to zero. */
944 	CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
945 	/* 5700 b2 errata */
946 	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
947 		CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
948 
949 	return (0);
950 }
951 
952 static void
953 bge_setpromisc(struct bge_softc *sc)
954 {
955 	struct ifnet *ifp;
956 
957 	BGE_LOCK_ASSERT(sc);
958 
959 	ifp = sc->bge_ifp;
960 
961 	/* Enable or disable promiscuous mode as needed. */
962 	if (ifp->if_flags & IFF_PROMISC)
963 		BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
964 	else
965 		BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
966 }
967 
968 static void
969 bge_setmulti(struct bge_softc *sc)
970 {
971 	struct ifnet *ifp;
972 	struct ifmultiaddr *ifma;
973 	uint32_t hashes[4] = { 0, 0, 0, 0 };
974 	int h, i;
975 
976 	BGE_LOCK_ASSERT(sc);
977 
978 	ifp = sc->bge_ifp;
979 
980 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
981 		for (i = 0; i < 4; i++)
982 			CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
983 		return;
984 	}
985 
986 	/* First, zot all the existing filters. */
987 	for (i = 0; i < 4; i++)
988 		CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
989 
990 	/* Now program new ones. */
991 	IF_ADDR_LOCK(ifp);
992 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
993 		if (ifma->ifma_addr->sa_family != AF_LINK)
994 			continue;
995 		h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
996 		    ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
997 		hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
998 	}
999 	IF_ADDR_UNLOCK(ifp);
1000 
1001 	for (i = 0; i < 4; i++)
1002 		CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1003 }
1004 
1005 static void
1006 bge_sig_pre_reset(sc, type)
1007 	struct bge_softc *sc;
1008 	int type;
1009 {
1010 	/*
1011 	 * Some chips don't like this so only do this if ASF is enabled
1012 	 */
1013 	if (sc->bge_asf_mode)
1014 		bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
1015 
1016 	if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1017 		switch (type) {
1018 		case BGE_RESET_START:
1019 			bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1020 			break;
1021 		case BGE_RESET_STOP:
1022 			bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1023 			break;
1024 		}
1025 	}
1026 }
1027 
1028 static void
1029 bge_sig_post_reset(sc, type)
1030 	struct bge_softc *sc;
1031 	int type;
1032 {
1033 	if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1034 		switch (type) {
1035 		case BGE_RESET_START:
1036 			bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000001);
1037 			/* START DONE */
1038 			break;
1039 		case BGE_RESET_STOP:
1040 			bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000002);
1041 			break;
1042 		}
1043 	}
1044 }
1045 
1046 static void
1047 bge_sig_legacy(sc, type)
1048 	struct bge_softc *sc;
1049 	int type;
1050 {
1051 	if (sc->bge_asf_mode) {
1052 		switch (type) {
1053 		case BGE_RESET_START:
1054 			bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1055 			break;
1056 		case BGE_RESET_STOP:
1057 			bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1058 			break;
1059 		}
1060 	}
1061 }
1062 
1063 void bge_stop_fw(struct bge_softc *);
1064 void
1065 bge_stop_fw(sc)
1066 	struct bge_softc *sc;
1067 {
1068 	int i;
1069 
1070 	if (sc->bge_asf_mode) {
1071 		bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, BGE_FW_PAUSE);
1072 		CSR_WRITE_4(sc, BGE_CPU_EVENT,
1073 			    CSR_READ_4(sc, BGE_CPU_EVENT) != (1 << 14));
1074 
1075 		for (i = 0; i < 100; i++ ) {
1076 			if (!(CSR_READ_4(sc, BGE_CPU_EVENT) & (1 << 14)))
1077 				break;
1078 			DELAY(10);
1079 		}
1080 	}
1081 }
1082 
1083 /*
1084  * Do endian, PCI and DMA initialization. Also check the on-board ROM
1085  * self-test results.
1086  */
1087 static int
1088 bge_chipinit(struct bge_softc *sc)
1089 {
1090 	uint32_t dma_rw_ctl;
1091 	int i;
1092 
1093 	/* Set endianness before we access any non-PCI registers. */
1094 	pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4);
1095 
1096 	/*
1097 	 * Check the 'ROM failed' bit on the RX CPU to see if
1098 	 * self-tests passed.
1099 	 */
1100 	if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
1101 		device_printf(sc->bge_dev, "RX CPU self-diagnostics failed!\n");
1102 		return (ENODEV);
1103 	}
1104 
1105 	/* Clear the MAC control register */
1106 	CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1107 
1108 	/*
1109 	 * Clear the MAC statistics block in the NIC's
1110 	 * internal memory.
1111 	 */
1112 	for (i = BGE_STATS_BLOCK;
1113 	    i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1114 		BGE_MEMWIN_WRITE(sc, i, 0);
1115 
1116 	for (i = BGE_STATUS_BLOCK;
1117 	    i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1118 		BGE_MEMWIN_WRITE(sc, i, 0);
1119 
1120 	/* Set up the PCI DMA control register. */
1121 	if (sc->bge_flags & BGE_FLAG_PCIE) {
1122 		/* PCI Express bus */
1123 		dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1124 		    (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1125 		    (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1126 	} else if (sc->bge_flags & BGE_FLAG_PCIX) {
1127 		/* PCI-X bus */
1128 		if (BGE_IS_5714_FAMILY(sc)) {
1129 			dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD;
1130 			dma_rw_ctl &= ~BGE_PCIDMARWCTL_ONEDMA_ATONCE; /* XXX */
1131 			/* XXX magic values, Broadcom-supplied Linux driver */
1132 			if (sc->bge_asicrev == BGE_ASICREV_BCM5780)
1133 				dma_rw_ctl |= (1 << 20) | (1 << 18) |
1134 				    BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1135 			else
1136 				dma_rw_ctl |= (1 << 20) | (1 << 18) | (1 << 15);
1137 
1138 		} else if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1139 			/*
1140 			 * The 5704 uses a different encoding of read/write
1141 			 * watermarks.
1142 			 */
1143 			dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1144 			    (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1145 			    (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1146 		else
1147 			dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1148 			    (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1149 			    (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1150 			    (0x0F);
1151 
1152 		/*
1153 		 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1154 		 * for hardware bugs.
1155 		 */
1156 		if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1157 		    sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1158 			uint32_t tmp;
1159 
1160 			tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1161 			if (tmp == 0x6 || tmp == 0x7)
1162 				dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1163 		}
1164 	} else
1165 		/* Conventional PCI bus */
1166 		dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1167 		    (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1168 		    (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1169 		    (0x0F);
1170 
1171 	if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1172 	    sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
1173 	    sc->bge_asicrev == BGE_ASICREV_BCM5705)
1174 		dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1175 	pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1176 
1177 	/*
1178 	 * Set up general mode register.
1179 	 */
1180 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
1181 	    BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1182 	    BGE_MODECTL_TX_NO_PHDR_CSUM);
1183 
1184 	/*
1185 	 * Tell the firmware the driver is running
1186 	 */
1187 	if (sc->bge_asf_mode & ASF_STACKUP)
1188 		BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
1189 
1190 	/*
1191 	 * Disable memory write invalidate.  Apparently it is not supported
1192 	 * properly by these devices.
1193 	 */
1194 	PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1195 
1196 #ifdef __brokenalpha__
1197 	/*
1198 	 * Must insure that we do not cross an 8K (bytes) boundary
1199 	 * for DMA reads.  Our highest limit is 1K bytes.  This is a
1200 	 * restriction on some ALPHA platforms with early revision
1201 	 * 21174 PCI chipsets, such as the AlphaPC 164lx
1202 	 */
1203 	PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL,
1204 	    BGE_PCI_READ_BNDRY_1024BYTES, 4);
1205 #endif
1206 
1207 	/* Set the timer prescaler (always 66Mhz) */
1208 	CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1209 
1210 	return (0);
1211 }
1212 
1213 static int
1214 bge_blockinit(struct bge_softc *sc)
1215 {
1216 	struct bge_rcb *rcb;
1217 	bus_size_t vrcb;
1218 	bge_hostaddr taddr;
1219 	uint32_t val;
1220 	int i;
1221 
1222 	/*
1223 	 * Initialize the memory window pointer register so that
1224 	 * we can access the first 32K of internal NIC RAM. This will
1225 	 * allow us to set up the TX send ring RCBs and the RX return
1226 	 * ring RCBs, plus other things which live in NIC memory.
1227 	 */
1228 	CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1229 
1230 	/* Note: the BCM5704 has a smaller mbuf space than other chips. */
1231 
1232 	if (!(BGE_IS_5705_PLUS(sc))) {
1233 		/* Configure mbuf memory pool */
1234 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1235 		if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1236 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1237 		else
1238 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1239 
1240 		/* Configure DMA resource pool */
1241 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1242 		    BGE_DMA_DESCRIPTORS);
1243 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1244 	}
1245 
1246 	/* Configure mbuf pool watermarks */
1247 	if (!(BGE_IS_5705_PLUS(sc))) {
1248 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1249 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1250 	} else {
1251 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1252 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1253 	}
1254 	CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1255 
1256 	/* Configure DMA resource watermarks */
1257 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1258 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1259 
1260 	/* Enable buffer manager */
1261 	if (!(BGE_IS_5705_PLUS(sc))) {
1262 		CSR_WRITE_4(sc, BGE_BMAN_MODE,
1263 		    BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1264 
1265 		/* Poll for buffer manager start indication */
1266 		for (i = 0; i < BGE_TIMEOUT; i++) {
1267 			if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1268 				break;
1269 			DELAY(10);
1270 		}
1271 
1272 		if (i == BGE_TIMEOUT) {
1273 			device_printf(sc->bge_dev,
1274 			    "buffer manager failed to start\n");
1275 			return (ENXIO);
1276 		}
1277 	}
1278 
1279 	/* Enable flow-through queues */
1280 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1281 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1282 
1283 	/* Wait until queue initialization is complete */
1284 	for (i = 0; i < BGE_TIMEOUT; i++) {
1285 		if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1286 			break;
1287 		DELAY(10);
1288 	}
1289 
1290 	if (i == BGE_TIMEOUT) {
1291 		device_printf(sc->bge_dev, "flow-through queue init failed\n");
1292 		return (ENXIO);
1293 	}
1294 
1295 	/* Initialize the standard RX ring control block */
1296 	rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1297 	rcb->bge_hostaddr.bge_addr_lo =
1298 	    BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1299 	rcb->bge_hostaddr.bge_addr_hi =
1300 	    BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1301 	bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1302 	    sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1303 	if (BGE_IS_5705_PLUS(sc))
1304 		rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1305 	else
1306 		rcb->bge_maxlen_flags =
1307 		    BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1308 	rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1309 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1310 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1311 
1312 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1313 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1314 
1315 	/*
1316 	 * Initialize the jumbo RX ring control block
1317 	 * We set the 'ring disabled' bit in the flags
1318 	 * field until we're actually ready to start
1319 	 * using this ring (i.e. once we set the MTU
1320 	 * high enough to require it).
1321 	 */
1322 	if (BGE_IS_JUMBO_CAPABLE(sc)) {
1323 		rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1324 
1325 		rcb->bge_hostaddr.bge_addr_lo =
1326 		    BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1327 		rcb->bge_hostaddr.bge_addr_hi =
1328 		    BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1329 		bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1330 		    sc->bge_cdata.bge_rx_jumbo_ring_map,
1331 		    BUS_DMASYNC_PREREAD);
1332 		rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1333 		    BGE_RCB_FLAG_USE_EXT_RX_BD|BGE_RCB_FLAG_RING_DISABLED);
1334 		rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1335 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1336 		    rcb->bge_hostaddr.bge_addr_hi);
1337 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1338 		    rcb->bge_hostaddr.bge_addr_lo);
1339 
1340 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1341 		    rcb->bge_maxlen_flags);
1342 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1343 
1344 		/* Set up dummy disabled mini ring RCB */
1345 		rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1346 		rcb->bge_maxlen_flags =
1347 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1348 		CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1349 		    rcb->bge_maxlen_flags);
1350 	}
1351 
1352 	/*
1353 	 * Set the BD ring replentish thresholds. The recommended
1354 	 * values are 1/8th the number of descriptors allocated to
1355 	 * each ring.
1356 	 * XXX The 5754 requires a lower threshold, so it might be a
1357 	 * requirement of all 575x family chips.  The Linux driver sets
1358 	 * the lower threshold for all 5705 family chips as well, but there
1359 	 * are reports that it might not need to be so strict.
1360 	 */
1361 	if (BGE_IS_5705_PLUS(sc))
1362 		val = 8;
1363 	else
1364 		val = BGE_STD_RX_RING_CNT / 8;
1365 	CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1366 	CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1367 
1368 	/*
1369 	 * Disable all unused send rings by setting the 'ring disabled'
1370 	 * bit in the flags field of all the TX send ring control blocks.
1371 	 * These are located in NIC memory.
1372 	 */
1373 	vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1374 	for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1375 		RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1376 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1377 		RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1378 		vrcb += sizeof(struct bge_rcb);
1379 	}
1380 
1381 	/* Configure TX RCB 0 (we use only the first ring) */
1382 	vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1383 	BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1384 	RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1385 	RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1386 	RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1387 	    BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1388 	if (!(BGE_IS_5705_PLUS(sc)))
1389 		RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1390 		    BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1391 
1392 	/* Disable all unused RX return rings */
1393 	vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1394 	for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1395 		RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1396 		RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1397 		RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1398 		    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1399 		    BGE_RCB_FLAG_RING_DISABLED));
1400 		RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1401 		CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
1402 		    (i * (sizeof(uint64_t))), 0);
1403 		vrcb += sizeof(struct bge_rcb);
1404 	}
1405 
1406 	/* Initialize RX ring indexes */
1407 	CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1408 	CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1409 	CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1410 
1411 	/*
1412 	 * Set up RX return ring 0
1413 	 * Note that the NIC address for RX return rings is 0x00000000.
1414 	 * The return rings live entirely within the host, so the
1415 	 * nicaddr field in the RCB isn't used.
1416 	 */
1417 	vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1418 	BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1419 	RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1420 	RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1421 	RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000);
1422 	RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1423 	    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1424 
1425 	/* Set random backoff seed for TX */
1426 	CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1427 	    IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
1428 	    IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
1429 	    IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] +
1430 	    BGE_TX_BACKOFF_SEED_MASK);
1431 
1432 	/* Set inter-packet gap */
1433 	CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1434 
1435 	/*
1436 	 * Specify which ring to use for packets that don't match
1437 	 * any RX rules.
1438 	 */
1439 	CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1440 
1441 	/*
1442 	 * Configure number of RX lists. One interrupt distribution
1443 	 * list, sixteen active lists, one bad frames class.
1444 	 */
1445 	CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1446 
1447 	/* Inialize RX list placement stats mask. */
1448 	CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1449 	CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1450 
1451 	/* Disable host coalescing until we get it set up */
1452 	CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1453 
1454 	/* Poll to make sure it's shut down. */
1455 	for (i = 0; i < BGE_TIMEOUT; i++) {
1456 		if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1457 			break;
1458 		DELAY(10);
1459 	}
1460 
1461 	if (i == BGE_TIMEOUT) {
1462 		device_printf(sc->bge_dev,
1463 		    "host coalescing engine failed to idle\n");
1464 		return (ENXIO);
1465 	}
1466 
1467 	/* Set up host coalescing defaults */
1468 	CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1469 	CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1470 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1471 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1472 	if (!(BGE_IS_5705_PLUS(sc))) {
1473 		CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1474 		CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1475 	}
1476 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
1477 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
1478 
1479 	/* Set up address of statistics block */
1480 	if (!(BGE_IS_5705_PLUS(sc))) {
1481 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1482 		    BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1483 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1484 		    BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1485 		CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1486 		CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1487 		CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1488 	}
1489 
1490 	/* Set up address of status block */
1491 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1492 	    BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1493 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1494 	    BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1495 	sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0;
1496 	sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0;
1497 
1498 	/* Turn on host coalescing state machine */
1499 	CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1500 
1501 	/* Turn on RX BD completion state machine and enable attentions */
1502 	CSR_WRITE_4(sc, BGE_RBDC_MODE,
1503 	    BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1504 
1505 	/* Turn on RX list placement state machine */
1506 	CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1507 
1508 	/* Turn on RX list selector state machine. */
1509 	if (!(BGE_IS_5705_PLUS(sc)))
1510 		CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1511 
1512 	/* Turn on DMA, clear stats */
1513 	CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1514 	    BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1515 	    BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1516 	    BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1517 	    ((sc->bge_flags & BGE_FLAG_TBI) ?
1518 	    BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1519 
1520 	/* Set misc. local control, enable interrupts on attentions */
1521 	CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1522 
1523 #ifdef notdef
1524 	/* Assert GPIO pins for PHY reset */
1525 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1526 	    BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1527 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1528 	    BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1529 #endif
1530 
1531 	/* Turn on DMA completion state machine */
1532 	if (!(BGE_IS_5705_PLUS(sc)))
1533 		CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1534 
1535 	val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS;
1536 
1537 	/* Enable host coalescing bug fix. */
1538 	if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
1539 	    sc->bge_asicrev == BGE_ASICREV_BCM5787)
1540 			val |= (1 << 29);
1541 
1542 	/* Turn on write DMA state machine */
1543 	CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1544 
1545 	/* Turn on read DMA state machine */
1546 	CSR_WRITE_4(sc, BGE_RDMA_MODE,
1547 	    BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
1548 
1549 	/* Turn on RX data completion state machine */
1550 	CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1551 
1552 	/* Turn on RX BD initiator state machine */
1553 	CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1554 
1555 	/* Turn on RX data and RX BD initiator state machine */
1556 	CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1557 
1558 	/* Turn on Mbuf cluster free state machine */
1559 	if (!(BGE_IS_5705_PLUS(sc)))
1560 		CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1561 
1562 	/* Turn on send BD completion state machine */
1563 	CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1564 
1565 	/* Turn on send data completion state machine */
1566 	CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1567 
1568 	/* Turn on send data initiator state machine */
1569 	CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1570 
1571 	/* Turn on send BD initiator state machine */
1572 	CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1573 
1574 	/* Turn on send BD selector state machine */
1575 	CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1576 
1577 	CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1578 	CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1579 	    BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1580 
1581 	/* ack/clear link change events */
1582 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1583 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1584 	    BGE_MACSTAT_LINK_CHANGED);
1585 	CSR_WRITE_4(sc, BGE_MI_STS, 0);
1586 
1587 	/* Enable PHY auto polling (for MII/GMII only) */
1588 	if (sc->bge_flags & BGE_FLAG_TBI) {
1589 		CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1590 	} else {
1591 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1592 		if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1593 		    sc->bge_chipid != BGE_CHIPID_BCM5700_B2)
1594 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1595 			    BGE_EVTENB_MI_INTERRUPT);
1596 	}
1597 
1598 	/*
1599 	 * Clear any pending link state attention.
1600 	 * Otherwise some link state change events may be lost until attention
1601 	 * is cleared by bge_intr() -> bge_link_upd() sequence.
1602 	 * It's not necessary on newer BCM chips - perhaps enabling link
1603 	 * state change attentions implies clearing pending attention.
1604 	 */
1605 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1606 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1607 	    BGE_MACSTAT_LINK_CHANGED);
1608 
1609 	/* Enable link state change attentions. */
1610 	BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1611 
1612 	return (0);
1613 }
1614 
1615 const struct bge_revision *
1616 bge_lookup_rev(uint32_t chipid)
1617 {
1618 	const struct bge_revision *br;
1619 
1620 	for (br = bge_revisions; br->br_name != NULL; br++) {
1621 		if (br->br_chipid == chipid)
1622 			return (br);
1623 	}
1624 
1625 	for (br = bge_majorrevs; br->br_name != NULL; br++) {
1626 		if (br->br_chipid == BGE_ASICREV(chipid))
1627 			return (br);
1628 	}
1629 
1630 	return (NULL);
1631 }
1632 
1633 const struct bge_vendor *
1634 bge_lookup_vendor(uint16_t vid)
1635 {
1636 	const struct bge_vendor *v;
1637 
1638 	for (v = bge_vendors; v->v_name != NULL; v++)
1639 		if (v->v_id == vid)
1640 			return (v);
1641 
1642 	panic("%s: unknown vendor %d", __func__, vid);
1643 	return (NULL);
1644 }
1645 
1646 /*
1647  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1648  * against our list and return its name if we find a match.
1649  *
1650  * Note that since the Broadcom controller contains VPD support, we
1651  * try to get the device name string from the controller itself instead
1652  * of the compiled-in string. It guarantees we'll always announce the
1653  * right product name. We fall back to the compiled-in string when
1654  * VPD is unavailable or corrupt.
1655  */
1656 static int
1657 bge_probe(device_t dev)
1658 {
1659 	struct bge_type *t = bge_devs;
1660 	struct bge_softc *sc = device_get_softc(dev);
1661 	uint16_t vid, did;
1662 
1663 	sc->bge_dev = dev;
1664 	vid = pci_get_vendor(dev);
1665 	did = pci_get_device(dev);
1666 	while(t->bge_vid != 0) {
1667 		if ((vid == t->bge_vid) && (did == t->bge_did)) {
1668 			char model[64], buf[96];
1669 			const struct bge_revision *br;
1670 			const struct bge_vendor *v;
1671 			const char *pname;
1672 			uint32_t id;
1673 
1674 			id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
1675 			    BGE_PCIMISCCTL_ASICREV;
1676 			br = bge_lookup_rev(id);
1677 			v = bge_lookup_vendor(vid);
1678 			if (pci_get_vpd_ident(dev, &pname))
1679 				snprintf(model, 64, "%s %s",
1680 				    v->v_name,
1681 				    br != NULL ? br->br_name :
1682 					"NetXtreme Ethernet Controller");
1683 			else
1684 				snprintf(model, 64, "%s", pname);
1685 			snprintf(buf, 96, "%s, %sASIC rev. %#04x", model,
1686 			    br != NULL ? "" : "unknown ", id >> 16);
1687 			device_set_desc_copy(dev, buf);
1688 			if (pci_get_subvendor(dev) == DELL_VENDORID)
1689 				sc->bge_flags |= BGE_FLAG_NO_3LED;
1690 			return (0);
1691 		}
1692 		t++;
1693 	}
1694 
1695 	return (ENXIO);
1696 }
1697 
1698 static void
1699 bge_dma_free(struct bge_softc *sc)
1700 {
1701 	int i;
1702 
1703 	/* Destroy DMA maps for RX buffers. */
1704 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1705 		if (sc->bge_cdata.bge_rx_std_dmamap[i])
1706 			bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1707 			    sc->bge_cdata.bge_rx_std_dmamap[i]);
1708 	}
1709 
1710 	/* Destroy DMA maps for jumbo RX buffers. */
1711 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1712 		if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
1713 			bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
1714 			    sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1715 	}
1716 
1717 	/* Destroy DMA maps for TX buffers. */
1718 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
1719 		if (sc->bge_cdata.bge_tx_dmamap[i])
1720 			bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1721 			    sc->bge_cdata.bge_tx_dmamap[i]);
1722 	}
1723 
1724 	if (sc->bge_cdata.bge_mtag)
1725 		bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
1726 
1727 
1728 	/* Destroy standard RX ring. */
1729 	if (sc->bge_cdata.bge_rx_std_ring_map)
1730 		bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
1731 		    sc->bge_cdata.bge_rx_std_ring_map);
1732 	if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring)
1733 		bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
1734 		    sc->bge_ldata.bge_rx_std_ring,
1735 		    sc->bge_cdata.bge_rx_std_ring_map);
1736 
1737 	if (sc->bge_cdata.bge_rx_std_ring_tag)
1738 		bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
1739 
1740 	/* Destroy jumbo RX ring. */
1741 	if (sc->bge_cdata.bge_rx_jumbo_ring_map)
1742 		bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1743 		    sc->bge_cdata.bge_rx_jumbo_ring_map);
1744 
1745 	if (sc->bge_cdata.bge_rx_jumbo_ring_map &&
1746 	    sc->bge_ldata.bge_rx_jumbo_ring)
1747 		bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1748 		    sc->bge_ldata.bge_rx_jumbo_ring,
1749 		    sc->bge_cdata.bge_rx_jumbo_ring_map);
1750 
1751 	if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
1752 		bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
1753 
1754 	/* Destroy RX return ring. */
1755 	if (sc->bge_cdata.bge_rx_return_ring_map)
1756 		bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
1757 		    sc->bge_cdata.bge_rx_return_ring_map);
1758 
1759 	if (sc->bge_cdata.bge_rx_return_ring_map &&
1760 	    sc->bge_ldata.bge_rx_return_ring)
1761 		bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
1762 		    sc->bge_ldata.bge_rx_return_ring,
1763 		    sc->bge_cdata.bge_rx_return_ring_map);
1764 
1765 	if (sc->bge_cdata.bge_rx_return_ring_tag)
1766 		bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
1767 
1768 	/* Destroy TX ring. */
1769 	if (sc->bge_cdata.bge_tx_ring_map)
1770 		bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
1771 		    sc->bge_cdata.bge_tx_ring_map);
1772 
1773 	if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring)
1774 		bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
1775 		    sc->bge_ldata.bge_tx_ring,
1776 		    sc->bge_cdata.bge_tx_ring_map);
1777 
1778 	if (sc->bge_cdata.bge_tx_ring_tag)
1779 		bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
1780 
1781 	/* Destroy status block. */
1782 	if (sc->bge_cdata.bge_status_map)
1783 		bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
1784 		    sc->bge_cdata.bge_status_map);
1785 
1786 	if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block)
1787 		bus_dmamem_free(sc->bge_cdata.bge_status_tag,
1788 		    sc->bge_ldata.bge_status_block,
1789 		    sc->bge_cdata.bge_status_map);
1790 
1791 	if (sc->bge_cdata.bge_status_tag)
1792 		bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
1793 
1794 	/* Destroy statistics block. */
1795 	if (sc->bge_cdata.bge_stats_map)
1796 		bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
1797 		    sc->bge_cdata.bge_stats_map);
1798 
1799 	if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats)
1800 		bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
1801 		    sc->bge_ldata.bge_stats,
1802 		    sc->bge_cdata.bge_stats_map);
1803 
1804 	if (sc->bge_cdata.bge_stats_tag)
1805 		bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
1806 
1807 	/* Destroy the parent tag. */
1808 	if (sc->bge_cdata.bge_parent_tag)
1809 		bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
1810 }
1811 
1812 static int
1813 bge_dma_alloc(device_t dev)
1814 {
1815 	struct bge_dmamap_arg ctx;
1816 	struct bge_softc *sc;
1817 	int i, error;
1818 
1819 	sc = device_get_softc(dev);
1820 
1821 	/*
1822 	 * Allocate the parent bus DMA tag appropriate for PCI.
1823 	 */
1824 	error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),/* parent */
1825 			1, 0,			/* alignment, boundary */
1826 			BUS_SPACE_MAXADDR,	/* lowaddr */
1827 			BUS_SPACE_MAXADDR,	/* highaddr */
1828 			NULL, NULL,		/* filter, filterarg */
1829 			MAXBSIZE, BGE_NSEG_NEW,	/* maxsize, nsegments */
1830 			BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
1831 			0,			/* flags */
1832 			NULL, NULL,		/* lockfunc, lockarg */
1833 			&sc->bge_cdata.bge_parent_tag);
1834 
1835 	if (error != 0) {
1836 		device_printf(sc->bge_dev,
1837 		    "could not allocate parent dma tag\n");
1838 		return (ENOMEM);
1839 	}
1840 
1841 	/*
1842 	 * Create tag for RX mbufs.
1843 	 */
1844 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1,
1845 	    0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1846 	    NULL, MCLBYTES * BGE_NSEG_NEW, BGE_NSEG_NEW, MCLBYTES,
1847 	    BUS_DMA_ALLOCNOW, NULL, NULL, &sc->bge_cdata.bge_mtag);
1848 
1849 	if (error) {
1850 		device_printf(sc->bge_dev, "could not allocate dma tag\n");
1851 		return (ENOMEM);
1852 	}
1853 
1854 	/* Create DMA maps for RX buffers. */
1855 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1856 		error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1857 			    &sc->bge_cdata.bge_rx_std_dmamap[i]);
1858 		if (error) {
1859 			device_printf(sc->bge_dev,
1860 			    "can't create DMA map for RX\n");
1861 			return (ENOMEM);
1862 		}
1863 	}
1864 
1865 	/* Create DMA maps for TX buffers. */
1866 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
1867 		error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1868 			    &sc->bge_cdata.bge_tx_dmamap[i]);
1869 		if (error) {
1870 			device_printf(sc->bge_dev,
1871 			    "can't create DMA map for RX\n");
1872 			return (ENOMEM);
1873 		}
1874 	}
1875 
1876 	/* Create tag for standard RX ring. */
1877 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1878 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1879 	    NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0,
1880 	    NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag);
1881 
1882 	if (error) {
1883 		device_printf(sc->bge_dev, "could not allocate dma tag\n");
1884 		return (ENOMEM);
1885 	}
1886 
1887 	/* Allocate DMA'able memory for standard RX ring. */
1888 	error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag,
1889 	    (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT,
1890 	    &sc->bge_cdata.bge_rx_std_ring_map);
1891 	if (error)
1892 		return (ENOMEM);
1893 
1894 	bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1895 
1896 	/* Load the address of the standard RX ring. */
1897 	ctx.bge_maxsegs = 1;
1898 	ctx.sc = sc;
1899 
1900 	error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag,
1901 	    sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring,
1902 	    BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1903 
1904 	if (error)
1905 		return (ENOMEM);
1906 
1907 	sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr;
1908 
1909 	/* Create tags for jumbo mbufs. */
1910 	if (BGE_IS_JUMBO_CAPABLE(sc)) {
1911 		error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1912 		    1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1913 		    NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
1914 		    0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
1915 		if (error) {
1916 			device_printf(sc->bge_dev,
1917 			    "could not allocate jumbo dma tag\n");
1918 			return (ENOMEM);
1919 		}
1920 
1921 		/* Create tag for jumbo RX ring. */
1922 		error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1923 		    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1924 		    NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0,
1925 		    NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag);
1926 
1927 		if (error) {
1928 			device_printf(sc->bge_dev,
1929 			    "could not allocate jumbo ring dma tag\n");
1930 			return (ENOMEM);
1931 		}
1932 
1933 		/* Allocate DMA'able memory for jumbo RX ring. */
1934 		error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1935 		    (void **)&sc->bge_ldata.bge_rx_jumbo_ring,
1936 		    BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1937 		    &sc->bge_cdata.bge_rx_jumbo_ring_map);
1938 		if (error)
1939 			return (ENOMEM);
1940 
1941 		/* Load the address of the jumbo RX ring. */
1942 		ctx.bge_maxsegs = 1;
1943 		ctx.sc = sc;
1944 
1945 		error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1946 		    sc->bge_cdata.bge_rx_jumbo_ring_map,
1947 		    sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ,
1948 		    bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1949 
1950 		if (error)
1951 			return (ENOMEM);
1952 
1953 		sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr;
1954 
1955 		/* Create DMA maps for jumbo RX buffers. */
1956 		for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1957 			error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
1958 				    0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1959 			if (error) {
1960 				device_printf(sc->bge_dev,
1961 				    "can't create DMA map for jumbo RX\n");
1962 				return (ENOMEM);
1963 			}
1964 		}
1965 
1966 	}
1967 
1968 	/* Create tag for RX return ring. */
1969 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1970 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1971 	    NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0,
1972 	    NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag);
1973 
1974 	if (error) {
1975 		device_printf(sc->bge_dev, "could not allocate dma tag\n");
1976 		return (ENOMEM);
1977 	}
1978 
1979 	/* Allocate DMA'able memory for RX return ring. */
1980 	error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag,
1981 	    (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT,
1982 	    &sc->bge_cdata.bge_rx_return_ring_map);
1983 	if (error)
1984 		return (ENOMEM);
1985 
1986 	bzero((char *)sc->bge_ldata.bge_rx_return_ring,
1987 	    BGE_RX_RTN_RING_SZ(sc));
1988 
1989 	/* Load the address of the RX return ring. */
1990 	ctx.bge_maxsegs = 1;
1991 	ctx.sc = sc;
1992 
1993 	error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag,
1994 	    sc->bge_cdata.bge_rx_return_ring_map,
1995 	    sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc),
1996 	    bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1997 
1998 	if (error)
1999 		return (ENOMEM);
2000 
2001 	sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr;
2002 
2003 	/* Create tag for TX ring. */
2004 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2005 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2006 	    NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL,
2007 	    &sc->bge_cdata.bge_tx_ring_tag);
2008 
2009 	if (error) {
2010 		device_printf(sc->bge_dev, "could not allocate dma tag\n");
2011 		return (ENOMEM);
2012 	}
2013 
2014 	/* Allocate DMA'able memory for TX ring. */
2015 	error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag,
2016 	    (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT,
2017 	    &sc->bge_cdata.bge_tx_ring_map);
2018 	if (error)
2019 		return (ENOMEM);
2020 
2021 	bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
2022 
2023 	/* Load the address of the TX ring. */
2024 	ctx.bge_maxsegs = 1;
2025 	ctx.sc = sc;
2026 
2027 	error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag,
2028 	    sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring,
2029 	    BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2030 
2031 	if (error)
2032 		return (ENOMEM);
2033 
2034 	sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr;
2035 
2036 	/* Create tag for status block. */
2037 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2038 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2039 	    NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0,
2040 	    NULL, NULL, &sc->bge_cdata.bge_status_tag);
2041 
2042 	if (error) {
2043 		device_printf(sc->bge_dev, "could not allocate dma tag\n");
2044 		return (ENOMEM);
2045 	}
2046 
2047 	/* Allocate DMA'able memory for status block. */
2048 	error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag,
2049 	    (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT,
2050 	    &sc->bge_cdata.bge_status_map);
2051 	if (error)
2052 		return (ENOMEM);
2053 
2054 	bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
2055 
2056 	/* Load the address of the status block. */
2057 	ctx.sc = sc;
2058 	ctx.bge_maxsegs = 1;
2059 
2060 	error = bus_dmamap_load(sc->bge_cdata.bge_status_tag,
2061 	    sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block,
2062 	    BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2063 
2064 	if (error)
2065 		return (ENOMEM);
2066 
2067 	sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr;
2068 
2069 	/* Create tag for statistics block. */
2070 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2071 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2072 	    NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL,
2073 	    &sc->bge_cdata.bge_stats_tag);
2074 
2075 	if (error) {
2076 		device_printf(sc->bge_dev, "could not allocate dma tag\n");
2077 		return (ENOMEM);
2078 	}
2079 
2080 	/* Allocate DMA'able memory for statistics block. */
2081 	error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag,
2082 	    (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT,
2083 	    &sc->bge_cdata.bge_stats_map);
2084 	if (error)
2085 		return (ENOMEM);
2086 
2087 	bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ);
2088 
2089 	/* Load the address of the statstics block. */
2090 	ctx.sc = sc;
2091 	ctx.bge_maxsegs = 1;
2092 
2093 	error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag,
2094 	    sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats,
2095 	    BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2096 
2097 	if (error)
2098 		return (ENOMEM);
2099 
2100 	sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr;
2101 
2102 	return (0);
2103 }
2104 
2105 /*
2106  * Return true if this device has more than one port.
2107  */
2108 static int
2109 bge_has_multiple_ports(struct bge_softc *sc)
2110 {
2111 	device_t dev = sc->bge_dev;
2112 	u_int b, s, f, fscan;
2113 
2114 	b = pci_get_bus(dev);
2115 	s = pci_get_slot(dev);
2116 	f = pci_get_function(dev);
2117 	for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++)
2118 		if (fscan != f && pci_find_bsf(b, s, fscan) != NULL)
2119 			return (1);
2120 	return (0);
2121 }
2122 
2123 /*
2124  * Return true if MSI can be used with this device.
2125  */
2126 static int
2127 bge_can_use_msi(struct bge_softc *sc)
2128 {
2129 	int can_use_msi = 0;
2130 
2131 	switch (sc->bge_asicrev) {
2132 	case BGE_ASICREV_BCM5714:
2133 		/*
2134 		 * Apparently, MSI doesn't work when this chip is configured
2135 		 * in single-port mode.
2136 		 */
2137 		if (bge_has_multiple_ports(sc))
2138 			can_use_msi = 1;
2139 		break;
2140 	case BGE_ASICREV_BCM5750:
2141 		if (sc->bge_chiprev != BGE_CHIPREV_5750_AX &&
2142 		    sc->bge_chiprev != BGE_CHIPREV_5750_BX)
2143 			can_use_msi = 1;
2144 		break;
2145 	case BGE_ASICREV_BCM5752:
2146 	case BGE_ASICREV_BCM5780:
2147 		can_use_msi = 1;
2148 		break;
2149 	}
2150 	return (can_use_msi);
2151 }
2152 
2153 static int
2154 bge_attach(device_t dev)
2155 {
2156 	struct ifnet *ifp;
2157 	struct bge_softc *sc;
2158 	uint32_t hwcfg = 0;
2159 	uint32_t mac_tmp = 0;
2160 	u_char eaddr[6];
2161 	int error = 0, msicount, rid, trys, reg;
2162 
2163 	sc = device_get_softc(dev);
2164 	sc->bge_dev = dev;
2165 
2166 	/*
2167 	 * Map control/status registers.
2168 	 */
2169 	pci_enable_busmaster(dev);
2170 
2171 	rid = BGE_PCI_BAR0;
2172 	sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2173 	    RF_ACTIVE|PCI_RF_DENSE);
2174 
2175 	if (sc->bge_res == NULL) {
2176 		device_printf (sc->bge_dev, "couldn't map memory\n");
2177 		error = ENXIO;
2178 		goto fail;
2179 	}
2180 
2181 	sc->bge_btag = rman_get_bustag(sc->bge_res);
2182 	sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
2183 
2184 	/* Save ASIC rev. */
2185 
2186 	sc->bge_chipid =
2187 	    pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
2188 	    BGE_PCIMISCCTL_ASICREV;
2189 	sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2190 	sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2191 
2192 	/* Save chipset family. */
2193 	switch (sc->bge_asicrev) {
2194 	case BGE_ASICREV_BCM5700:
2195 	case BGE_ASICREV_BCM5701:
2196 	case BGE_ASICREV_BCM5703:
2197 	case BGE_ASICREV_BCM5704:
2198 		sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
2199 		break;
2200 	case BGE_ASICREV_BCM5714_A0:
2201 	case BGE_ASICREV_BCM5780:
2202 	case BGE_ASICREV_BCM5714:
2203 		sc->bge_flags |= BGE_FLAG_5714_FAMILY /* | BGE_FLAG_JUMBO */;
2204 		/* FALLTHRU */
2205 	case BGE_ASICREV_BCM5750:
2206 	case BGE_ASICREV_BCM5752:
2207 	case BGE_ASICREV_BCM5755:
2208 	case BGE_ASICREV_BCM5787:
2209 		sc->bge_flags |= BGE_FLAG_575X_PLUS;
2210 		/* FALLTHRU */
2211 	case BGE_ASICREV_BCM5705:
2212 		sc->bge_flags |= BGE_FLAG_5705_PLUS;
2213 		break;
2214 	}
2215 
2216 	/* Set various bug flags. */
2217 	if (sc->bge_chiprev == BGE_CHIPREV_5703_AX ||
2218 	    sc->bge_chiprev == BGE_CHIPREV_5704_AX)
2219 		sc->bge_flags |= BGE_FLAG_ADC_BUG;
2220 	if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
2221 		sc->bge_flags |= BGE_FLAG_5704_A0_BUG;
2222 	if (BGE_IS_5705_PLUS(sc)) {
2223 		if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
2224 		    sc->bge_asicrev == BGE_ASICREV_BCM5787)
2225 			sc->bge_flags |= BGE_FLAG_JITTER_BUG;
2226 		else
2227 			sc->bge_flags |= BGE_FLAG_BER_BUG;
2228 	}
2229 
2230   	/*
2231 	 * Check if this is a PCI-X or PCI Express device.
2232   	 */
2233 #if __FreeBSD_version > 700010
2234 	if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
2235 		/*
2236 		 * Found a PCI Express capabilities register, this
2237 		 * must be a PCI Express device.
2238 		 */
2239 		if (reg != 0)
2240 			sc->bge_flags |= BGE_FLAG_PCIE;
2241 	} else if (pci_find_extcap(dev, PCIY_PCIX, &reg) == 0) {
2242 		if (reg != 0)
2243 			sc->bge_flags |= BGE_FLAG_PCIX;
2244 	}
2245 
2246 #else
2247 	if (BGE_IS_5705_PLUS(sc)) {
2248 		reg = pci_read_config(dev, BGE_PCIE_CAPID_REG, 4);
2249 		if ((reg & 0xff) == BGE_PCIE_CAPID)
2250 			sc->bge_flags |= BGE_FLAG_PCIE;
2251 	} else {
2252 		/*
2253 		 * Check if the device is in PCI-X Mode.
2254 		 * (This bit is not valid on PCI Express controllers.)
2255 		 */
2256 		if ((pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
2257 		    BGE_PCISTATE_PCI_BUSMODE) == 0)
2258 			sc->bge_flags |= BGE_FLAG_PCIX;
2259 	}
2260 #endif
2261 
2262 	/*
2263 	 * Allocate the interrupt, using MSI if possible.  These devices
2264 	 * support 8 MSI messages, but only the first one is used in
2265 	 * normal operation.
2266 	 */
2267 	if (bge_can_use_msi(sc)) {
2268 		msicount = pci_msi_count(dev);
2269 		if (msicount > 1)
2270 			msicount = 1;
2271 	} else
2272 		msicount = 0;
2273 	if (msicount == 1 && pci_alloc_msi(dev, &msicount) == 0) {
2274 		rid = 1;
2275 		sc->bge_flags |= BGE_FLAG_MSI;
2276 	} else
2277 		rid = 0;
2278 
2279 	sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2280 	    RF_SHAREABLE | RF_ACTIVE);
2281 
2282 	if (sc->bge_irq == NULL) {
2283 		device_printf(sc->bge_dev, "couldn't map interrupt\n");
2284 		error = ENXIO;
2285 		goto fail;
2286 	}
2287 
2288 	BGE_LOCK_INIT(sc, device_get_nameunit(dev));
2289 
2290 	/* Try to reset the chip. */
2291 	if (bge_reset(sc)) {
2292 		device_printf(sc->bge_dev, "chip reset failed\n");
2293 		bge_release_resources(sc);
2294 		error = ENXIO;
2295 		goto fail;
2296 	}
2297 
2298 	sc->bge_asf_mode = 0;
2299 	if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG)
2300 	    == BGE_MAGIC_NUMBER)) {
2301 		if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG)
2302 		    & BGE_HWCFG_ASF) {
2303 			sc->bge_asf_mode |= ASF_ENABLE;
2304 			sc->bge_asf_mode |= ASF_STACKUP;
2305 			if (sc->bge_asicrev == BGE_ASICREV_BCM5750) {
2306 				sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
2307 			}
2308 		}
2309 	}
2310 
2311 	/* Try to reset the chip again the nice way. */
2312 	bge_stop_fw(sc);
2313 	bge_sig_pre_reset(sc, BGE_RESET_STOP);
2314 	if (bge_reset(sc)) {
2315 		device_printf(sc->bge_dev, "chip reset failed\n");
2316 		bge_release_resources(sc);
2317 		error = ENXIO;
2318 		goto fail;
2319 	}
2320 
2321 	bge_sig_legacy(sc, BGE_RESET_STOP);
2322 	bge_sig_post_reset(sc, BGE_RESET_STOP);
2323 
2324 	if (bge_chipinit(sc)) {
2325 		device_printf(sc->bge_dev, "chip initialization failed\n");
2326 		bge_release_resources(sc);
2327 		error = ENXIO;
2328 		goto fail;
2329 	}
2330 
2331 	/*
2332 	 * Get station address from the EEPROM.
2333 	 */
2334 	mac_tmp = bge_readmem_ind(sc, 0x0c14);
2335 	if ((mac_tmp >> 16) == 0x484b) {
2336 		eaddr[0] = (u_char)(mac_tmp >> 8);
2337 		eaddr[1] = (u_char)mac_tmp;
2338 		mac_tmp = bge_readmem_ind(sc, 0x0c18);
2339 		eaddr[2] = (u_char)(mac_tmp >> 24);
2340 		eaddr[3] = (u_char)(mac_tmp >> 16);
2341 		eaddr[4] = (u_char)(mac_tmp >> 8);
2342 		eaddr[5] = (u_char)mac_tmp;
2343 	} else if (bge_read_eeprom(sc, eaddr,
2344 	    BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
2345 		device_printf(sc->bge_dev, "failed to read station address\n");
2346 		bge_release_resources(sc);
2347 		error = ENXIO;
2348 		goto fail;
2349 	}
2350 
2351 	/* 5705 limits RX return ring to 512 entries. */
2352 	if (BGE_IS_5705_PLUS(sc))
2353 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2354 	else
2355 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2356 
2357 	if (bge_dma_alloc(dev)) {
2358 		device_printf(sc->bge_dev,
2359 		    "failed to allocate DMA resources\n");
2360 		bge_release_resources(sc);
2361 		error = ENXIO;
2362 		goto fail;
2363 	}
2364 
2365 	/* Set default tuneable values. */
2366 	sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2367 	sc->bge_rx_coal_ticks = 150;
2368 	sc->bge_tx_coal_ticks = 150;
2369 	sc->bge_rx_max_coal_bds = 10;
2370 	sc->bge_tx_max_coal_bds = 10;
2371 
2372 	/* Set up ifnet structure */
2373 	ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
2374 	if (ifp == NULL) {
2375 		device_printf(sc->bge_dev, "failed to if_alloc()\n");
2376 		bge_release_resources(sc);
2377 		error = ENXIO;
2378 		goto fail;
2379 	}
2380 	ifp->if_softc = sc;
2381 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2382 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2383 	ifp->if_ioctl = bge_ioctl;
2384 	ifp->if_start = bge_start;
2385 	ifp->if_init = bge_init;
2386 	ifp->if_mtu = ETHERMTU;
2387 	ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
2388 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
2389 	IFQ_SET_READY(&ifp->if_snd);
2390 	ifp->if_hwassist = BGE_CSUM_FEATURES;
2391 	ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
2392 	    IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM;
2393 	ifp->if_capenable = ifp->if_capabilities;
2394 #ifdef DEVICE_POLLING
2395 	ifp->if_capabilities |= IFCAP_POLLING;
2396 #endif
2397 
2398 	/*
2399 	 * 5700 B0 chips do not support checksumming correctly due
2400 	 * to hardware bugs.
2401 	 */
2402 	if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
2403 		ifp->if_capabilities &= ~IFCAP_HWCSUM;
2404 		ifp->if_capenable &= IFCAP_HWCSUM;
2405 		ifp->if_hwassist = 0;
2406 	}
2407 
2408 	/*
2409 	 * Figure out what sort of media we have by checking the
2410 	 * hardware config word in the first 32k of NIC internal memory,
2411 	 * or fall back to examining the EEPROM if necessary.
2412 	 * Note: on some BCM5700 cards, this value appears to be unset.
2413 	 * If that's the case, we have to rely on identifying the NIC
2414 	 * by its PCI subsystem ID, as we do below for the SysKonnect
2415 	 * SK-9D41.
2416 	 */
2417 	if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
2418 		hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2419 	else {
2420 		if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
2421 		    sizeof(hwcfg))) {
2422 			device_printf(sc->bge_dev, "failed to read EEPROM\n");
2423 			bge_release_resources(sc);
2424 			error = ENXIO;
2425 			goto fail;
2426 		}
2427 		hwcfg = ntohl(hwcfg);
2428 	}
2429 
2430 	if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
2431 		sc->bge_flags |= BGE_FLAG_TBI;
2432 
2433 	/* The SysKonnect SK-9D41 is a 1000baseSX card. */
2434 	if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41)
2435 		sc->bge_flags |= BGE_FLAG_TBI;
2436 
2437 	if (sc->bge_flags & BGE_FLAG_TBI) {
2438 		ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
2439 		    bge_ifmedia_upd, bge_ifmedia_sts);
2440 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2441 		ifmedia_add(&sc->bge_ifmedia,
2442 		    IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
2443 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2444 		ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
2445 		sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
2446 	} else {
2447 		/*
2448 		 * Do transceiver setup and tell the firmware the
2449 		 * driver is down so we can try to get access the
2450 		 * probe if ASF is running.  Retry a couple of times
2451 		 * if we get a conflict with the ASF firmware accessing
2452 		 * the PHY.
2453 		 */
2454 		BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2455 again:
2456 		bge_asf_driver_up(sc);
2457 
2458 		trys = 0;
2459 		if (mii_phy_probe(dev, &sc->bge_miibus,
2460 		    bge_ifmedia_upd, bge_ifmedia_sts)) {
2461 			if (trys++ < 4) {
2462 				device_printf(sc->bge_dev, "Try again\n");
2463 				bge_miibus_writereg(sc->bge_dev, 1, MII_BMCR, BMCR_RESET);
2464 				goto again;
2465 			}
2466 
2467 			device_printf(sc->bge_dev, "MII without any PHY!\n");
2468 			bge_release_resources(sc);
2469 			error = ENXIO;
2470 			goto fail;
2471 		}
2472 
2473 		/*
2474 		 * Now tell the firmware we are going up after probing the PHY
2475 		 */
2476 		if (sc->bge_asf_mode & ASF_STACKUP)
2477 			BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2478 	}
2479 
2480 	/*
2481 	 * When using the BCM5701 in PCI-X mode, data corruption has
2482 	 * been observed in the first few bytes of some received packets.
2483 	 * Aligning the packet buffer in memory eliminates the corruption.
2484 	 * Unfortunately, this misaligns the packet payloads.  On platforms
2485 	 * which do not support unaligned accesses, we will realign the
2486 	 * payloads by copying the received packets.
2487 	 */
2488 	if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
2489 	    sc->bge_flags & BGE_FLAG_PCIX)
2490                 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
2491 
2492 	/*
2493 	 * Call MI attach routine.
2494 	 */
2495 	ether_ifattach(ifp, eaddr);
2496 	callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0);
2497 
2498 	/*
2499 	 * Hookup IRQ last.
2500 	 */
2501 	error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
2502 	   bge_intr, sc, &sc->bge_intrhand);
2503 
2504 	if (error) {
2505 		bge_detach(dev);
2506 		device_printf(sc->bge_dev, "couldn't set up irq\n");
2507 	}
2508 
2509 	bge_add_sysctls(sc);
2510 
2511 fail:
2512 	return (error);
2513 }
2514 
2515 static int
2516 bge_detach(device_t dev)
2517 {
2518 	struct bge_softc *sc;
2519 	struct ifnet *ifp;
2520 
2521 	sc = device_get_softc(dev);
2522 	ifp = sc->bge_ifp;
2523 
2524 #ifdef DEVICE_POLLING
2525 	if (ifp->if_capenable & IFCAP_POLLING)
2526 		ether_poll_deregister(ifp);
2527 #endif
2528 
2529 	BGE_LOCK(sc);
2530 	bge_stop(sc);
2531 	bge_reset(sc);
2532 	BGE_UNLOCK(sc);
2533 
2534 	callout_drain(&sc->bge_stat_ch);
2535 
2536 	ether_ifdetach(ifp);
2537 
2538 	if (sc->bge_flags & BGE_FLAG_TBI) {
2539 		ifmedia_removeall(&sc->bge_ifmedia);
2540 	} else {
2541 		bus_generic_detach(dev);
2542 		device_delete_child(dev, sc->bge_miibus);
2543 	}
2544 
2545 	bge_release_resources(sc);
2546 
2547 	return (0);
2548 }
2549 
2550 static void
2551 bge_release_resources(struct bge_softc *sc)
2552 {
2553 	device_t dev;
2554 
2555 	dev = sc->bge_dev;
2556 
2557 	if (sc->bge_intrhand != NULL)
2558 		bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
2559 
2560 	if (sc->bge_irq != NULL)
2561 		bus_release_resource(dev, SYS_RES_IRQ,
2562 		    sc->bge_flags & BGE_FLAG_MSI ? 1 : 0, sc->bge_irq);
2563 
2564 	if (sc->bge_flags & BGE_FLAG_MSI)
2565 		pci_release_msi(dev);
2566 
2567 	if (sc->bge_res != NULL)
2568 		bus_release_resource(dev, SYS_RES_MEMORY,
2569 		    BGE_PCI_BAR0, sc->bge_res);
2570 
2571 	if (sc->bge_ifp != NULL)
2572 		if_free(sc->bge_ifp);
2573 
2574 	bge_dma_free(sc);
2575 
2576 	if (mtx_initialized(&sc->bge_mtx))	/* XXX */
2577 		BGE_LOCK_DESTROY(sc);
2578 }
2579 
2580 static int
2581 bge_reset(struct bge_softc *sc)
2582 {
2583 	device_t dev;
2584 	uint32_t cachesize, command, pcistate, reset;
2585 	void (*write_op)(struct bge_softc *, int, int);
2586 	int i, val = 0;
2587 
2588 	dev = sc->bge_dev;
2589 
2590 	if (BGE_IS_5705_PLUS(sc) && !BGE_IS_5714_FAMILY(sc)) {
2591 		if (sc->bge_flags & BGE_FLAG_PCIE)
2592 			write_op = bge_writemem_direct;
2593 		else
2594 			write_op = bge_writemem_ind;
2595 	} else
2596 		write_op = bge_writereg_ind;
2597 
2598 	/* Save some important PCI state. */
2599 	cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2600 	command = pci_read_config(dev, BGE_PCI_CMD, 4);
2601 	pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2602 
2603 	pci_write_config(dev, BGE_PCI_MISC_CTL,
2604 	    BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2605 	BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2606 
2607 	/* Disable fastboot on controllers that support it. */
2608 	if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
2609 	    sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
2610 	    sc->bge_asicrev == BGE_ASICREV_BCM5787) {
2611 		if (bootverbose)
2612 			device_printf(sc->bge_dev, "Disabling fastboot\n");
2613 		CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
2614 	}
2615 
2616 	/*
2617 	 * Write the magic number to SRAM at offset 0xB50.
2618 	 * When firmware finishes its initialization it will
2619 	 * write ~BGE_MAGIC_NUMBER to the same location.
2620 	 */
2621 	bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2622 
2623 	reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2624 
2625 	/* XXX: Broadcom Linux driver. */
2626 	if (sc->bge_flags & BGE_FLAG_PCIE) {
2627 		if (CSR_READ_4(sc, 0x7e2c) == 0x60)	/* PCIE 1.0 */
2628 			CSR_WRITE_4(sc, 0x7e2c, 0x20);
2629 		if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2630 			/* Prevent PCIE link training during global reset */
2631 			CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2632 			reset |= (1<<29);
2633 		}
2634 	}
2635 
2636 	/*
2637 	 * Set GPHY Power Down Override to leave GPHY
2638 	 * powered up in D0 uninitialized.
2639 	 */
2640 	if (BGE_IS_5705_PLUS(sc))
2641 		reset |= 0x04000000;
2642 
2643 	/* Issue global reset */
2644 	write_op(sc, BGE_MISC_CFG, reset);
2645 
2646 	DELAY(1000);
2647 
2648 	/* XXX: Broadcom Linux driver. */
2649 	if (sc->bge_flags & BGE_FLAG_PCIE) {
2650 		if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
2651 			uint32_t v;
2652 
2653 			DELAY(500000); /* wait for link training to complete */
2654 			v = pci_read_config(dev, 0xc4, 4);
2655 			pci_write_config(dev, 0xc4, v | (1<<15), 4);
2656 		}
2657 		/*
2658 		 * Set PCIE max payload size to 128 bytes and clear error
2659 		 * status.
2660 		 */
2661 		pci_write_config(dev, 0xd8, 0xf5000, 4);
2662 	}
2663 
2664 	/* Reset some of the PCI state that got zapped by reset. */
2665 	pci_write_config(dev, BGE_PCI_MISC_CTL,
2666 	    BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2667 	    BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2668 	pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2669 	pci_write_config(dev, BGE_PCI_CMD, command, 4);
2670 	write_op(sc, BGE_MISC_CFG, (65 << 1));
2671 
2672 	/* Re-enable MSI, if neccesary, and enable the memory arbiter. */
2673 	if (BGE_IS_5714_FAMILY(sc)) {
2674 		uint32_t val;
2675 
2676 		/* This chip disables MSI on reset. */
2677 		if (sc->bge_flags & BGE_FLAG_MSI) {
2678 			val = pci_read_config(dev, BGE_PCI_MSI_CTL, 2);
2679 			pci_write_config(dev, BGE_PCI_MSI_CTL,
2680 			    val | PCIM_MSICTRL_MSI_ENABLE, 2);
2681 			val = CSR_READ_4(sc, BGE_MSI_MODE);
2682 			CSR_WRITE_4(sc, BGE_MSI_MODE,
2683 			    val | BGE_MSIMODE_ENABLE);
2684 		}
2685 		val = CSR_READ_4(sc, BGE_MARB_MODE);
2686 		CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
2687 	} else
2688 		CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2689 
2690 	/*
2691 	 * Poll until we see the 1's complement of the magic number.
2692 	 * This indicates that the firmware initialization
2693 	 * is complete.
2694 	 */
2695 	for (i = 0; i < BGE_TIMEOUT; i++) {
2696 		val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2697 		if (val == ~BGE_MAGIC_NUMBER)
2698 			break;
2699 		DELAY(10);
2700 	}
2701 
2702 	if (i == BGE_TIMEOUT) {
2703 		device_printf(sc->bge_dev, "firmware handshake timed out, "
2704 		    "found 0x%08x\n", val);
2705 	}
2706 
2707 	/*
2708 	 * XXX Wait for the value of the PCISTATE register to
2709 	 * return to its original pre-reset state. This is a
2710 	 * fairly good indicator of reset completion. If we don't
2711 	 * wait for the reset to fully complete, trying to read
2712 	 * from the device's non-PCI registers may yield garbage
2713 	 * results.
2714 	 */
2715 	for (i = 0; i < BGE_TIMEOUT; i++) {
2716 		if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
2717 			break;
2718 		DELAY(10);
2719 	}
2720 
2721 	if (sc->bge_flags & BGE_FLAG_PCIE) {
2722 		reset = bge_readmem_ind(sc, 0x7c00);
2723 		bge_writemem_ind(sc, 0x7c00, reset | (1 << 25));
2724 	}
2725 
2726 	/* Fix up byte swapping. */
2727 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
2728 	    BGE_MODECTL_BYTESWAP_DATA);
2729 
2730 	/* Tell the ASF firmware we are up */
2731 	if (sc->bge_asf_mode & ASF_STACKUP)
2732 		BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2733 
2734 	CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2735 
2736 	/*
2737 	 * The 5704 in TBI mode apparently needs some special
2738 	 * adjustment to insure the SERDES drive level is set
2739 	 * to 1.2V.
2740 	 */
2741 	if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
2742 	    sc->bge_flags & BGE_FLAG_TBI) {
2743 		uint32_t serdescfg;
2744 
2745 		serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2746 		serdescfg = (serdescfg & ~0xFFF) | 0x880;
2747 		CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2748 	}
2749 
2750 	/* XXX: Broadcom Linux driver. */
2751 	if (sc->bge_flags & BGE_FLAG_PCIE &&
2752 	    sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2753 		uint32_t v;
2754 
2755 		v = CSR_READ_4(sc, 0x7c00);
2756 		CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
2757 	}
2758 	DELAY(10000);
2759 
2760 	return(0);
2761 }
2762 
2763 /*
2764  * Frame reception handling. This is called if there's a frame
2765  * on the receive return list.
2766  *
2767  * Note: we have to be able to handle two possibilities here:
2768  * 1) the frame is from the jumbo receive ring
2769  * 2) the frame is from the standard receive ring
2770  */
2771 
2772 static void
2773 bge_rxeof(struct bge_softc *sc)
2774 {
2775 	struct ifnet *ifp;
2776 	int stdcnt = 0, jumbocnt = 0;
2777 
2778 	BGE_LOCK_ASSERT(sc);
2779 
2780 	/* Nothing to do. */
2781 	if (sc->bge_rx_saved_considx ==
2782 	    sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx)
2783 		return;
2784 
2785 	ifp = sc->bge_ifp;
2786 
2787 	bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
2788 	    sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
2789 	bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2790 	    sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD);
2791 	if (BGE_IS_JUMBO_CAPABLE(sc))
2792 		bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2793 		    sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTREAD);
2794 
2795 	while(sc->bge_rx_saved_considx !=
2796 	    sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) {
2797 		struct bge_rx_bd	*cur_rx;
2798 		uint32_t		rxidx;
2799 		struct mbuf		*m = NULL;
2800 		uint16_t		vlan_tag = 0;
2801 		int			have_tag = 0;
2802 
2803 #ifdef DEVICE_POLLING
2804 		if (ifp->if_capenable & IFCAP_POLLING) {
2805 			if (sc->rxcycles <= 0)
2806 				break;
2807 			sc->rxcycles--;
2808 		}
2809 #endif
2810 
2811 		cur_rx =
2812 	    &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
2813 
2814 		rxidx = cur_rx->bge_idx;
2815 		BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
2816 
2817 		if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2818 			have_tag = 1;
2819 			vlan_tag = cur_rx->bge_vlan_tag;
2820 		}
2821 
2822 		if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2823 			BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2824 			bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
2825 			    sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx],
2826 			    BUS_DMASYNC_POSTREAD);
2827 			bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
2828 			    sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx]);
2829 			m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
2830 			sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
2831 			jumbocnt++;
2832 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2833 				ifp->if_ierrors++;
2834 				bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2835 				continue;
2836 			}
2837 			if (bge_newbuf_jumbo(sc,
2838 			    sc->bge_jumbo, NULL) == ENOBUFS) {
2839 				ifp->if_ierrors++;
2840 				bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2841 				continue;
2842 			}
2843 		} else {
2844 			BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2845 			bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2846 			    sc->bge_cdata.bge_rx_std_dmamap[rxidx],
2847 			    BUS_DMASYNC_POSTREAD);
2848 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2849 			    sc->bge_cdata.bge_rx_std_dmamap[rxidx]);
2850 			m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2851 			sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2852 			stdcnt++;
2853 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2854 				ifp->if_ierrors++;
2855 				bge_newbuf_std(sc, sc->bge_std, m);
2856 				continue;
2857 			}
2858 			if (bge_newbuf_std(sc, sc->bge_std,
2859 			    NULL) == ENOBUFS) {
2860 				ifp->if_ierrors++;
2861 				bge_newbuf_std(sc, sc->bge_std, m);
2862 				continue;
2863 			}
2864 		}
2865 
2866 		ifp->if_ipackets++;
2867 #ifndef __NO_STRICT_ALIGNMENT
2868 		/*
2869 		 * For architectures with strict alignment we must make sure
2870 		 * the payload is aligned.
2871 		 */
2872 		if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
2873 			bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2874 			    cur_rx->bge_len);
2875 			m->m_data += ETHER_ALIGN;
2876 		}
2877 #endif
2878 		m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2879 		m->m_pkthdr.rcvif = ifp;
2880 
2881 		if (ifp->if_capenable & IFCAP_RXCSUM) {
2882 			if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
2883 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2884 				if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2885 					m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2886 			}
2887 			if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
2888 			    m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
2889 				m->m_pkthdr.csum_data =
2890 				    cur_rx->bge_tcp_udp_csum;
2891 				m->m_pkthdr.csum_flags |=
2892 				    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2893 			}
2894 		}
2895 
2896 		/*
2897 		 * If we received a packet with a vlan tag,
2898 		 * attach that information to the packet.
2899 		 */
2900 		if (have_tag) {
2901 			m->m_pkthdr.ether_vtag = vlan_tag;
2902 			m->m_flags |= M_VLANTAG;
2903 		}
2904 
2905 		BGE_UNLOCK(sc);
2906 		(*ifp->if_input)(ifp, m);
2907 		BGE_LOCK(sc);
2908 	}
2909 
2910 	if (stdcnt > 0)
2911 		bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2912 		    sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
2913 
2914 	if (BGE_IS_JUMBO_CAPABLE(sc) && jumbocnt > 0)
2915 		bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2916 		    sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
2917 
2918 	CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2919 	if (stdcnt)
2920 		CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2921 	if (jumbocnt)
2922 		CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2923 #ifdef notyet
2924 	/*
2925 	 * This register wraps very quickly under heavy packet drops.
2926 	 * If you need correct statistics, you can enable this check.
2927 	 */
2928 	if (BGE_IS_5705_PLUS(sc))
2929 		ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
2930 #endif
2931 }
2932 
2933 static void
2934 bge_txeof(struct bge_softc *sc)
2935 {
2936 	struct bge_tx_bd *cur_tx = NULL;
2937 	struct ifnet *ifp;
2938 
2939 	BGE_LOCK_ASSERT(sc);
2940 
2941 	/* Nothing to do. */
2942 	if (sc->bge_tx_saved_considx ==
2943 	    sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx)
2944 		return;
2945 
2946 	ifp = sc->bge_ifp;
2947 
2948 	bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
2949 	    sc->bge_cdata.bge_tx_ring_map,
2950 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2951 	/*
2952 	 * Go through our tx ring and free mbufs for those
2953 	 * frames that have been sent.
2954 	 */
2955 	while (sc->bge_tx_saved_considx !=
2956 	    sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) {
2957 		uint32_t		idx = 0;
2958 
2959 		idx = sc->bge_tx_saved_considx;
2960 		cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
2961 		if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2962 			ifp->if_opackets++;
2963 		if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
2964 			bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2965 			    sc->bge_cdata.bge_tx_dmamap[idx],
2966 			    BUS_DMASYNC_POSTWRITE);
2967 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2968 			    sc->bge_cdata.bge_tx_dmamap[idx]);
2969 			m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2970 			sc->bge_cdata.bge_tx_chain[idx] = NULL;
2971 		}
2972 		sc->bge_txcnt--;
2973 		BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2974 	}
2975 
2976 	if (cur_tx != NULL)
2977 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2978 	if (sc->bge_txcnt == 0)
2979 		sc->bge_timer = 0;
2980 }
2981 
2982 #ifdef DEVICE_POLLING
2983 static void
2984 bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2985 {
2986 	struct bge_softc *sc = ifp->if_softc;
2987 	uint32_t statusword;
2988 
2989 	BGE_LOCK(sc);
2990 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2991 		BGE_UNLOCK(sc);
2992 		return;
2993 	}
2994 
2995 	bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2996 	    sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD);
2997 
2998 	statusword = atomic_readandclear_32(
2999 	    &sc->bge_ldata.bge_status_block->bge_status);
3000 
3001 	bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3002 	    sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD);
3003 
3004 	/* Note link event. It will be processed by POLL_AND_CHECK_STATUS cmd */
3005 	if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
3006 		sc->bge_link_evt++;
3007 
3008 	if (cmd == POLL_AND_CHECK_STATUS)
3009 		if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3010 		    sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3011 		    sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI))
3012 			bge_link_upd(sc);
3013 
3014 	sc->rxcycles = count;
3015 	bge_rxeof(sc);
3016 	bge_txeof(sc);
3017 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3018 		bge_start_locked(ifp);
3019 
3020 	BGE_UNLOCK(sc);
3021 }
3022 #endif /* DEVICE_POLLING */
3023 
3024 static void
3025 bge_intr(void *xsc)
3026 {
3027 	struct bge_softc *sc;
3028 	struct ifnet *ifp;
3029 	uint32_t statusword;
3030 
3031 	sc = xsc;
3032 
3033 	BGE_LOCK(sc);
3034 
3035 	ifp = sc->bge_ifp;
3036 
3037 #ifdef DEVICE_POLLING
3038 	if (ifp->if_capenable & IFCAP_POLLING) {
3039 		BGE_UNLOCK(sc);
3040 		return;
3041 	}
3042 #endif
3043 
3044 	/*
3045 	 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO.  Don't
3046 	 * disable interrupts by writing nonzero like we used to, since with
3047 	 * our current organization this just gives complications and
3048 	 * pessimizations for re-enabling interrupts.  We used to have races
3049 	 * instead of the necessary complications.  Disabling interrupts
3050 	 * would just reduce the chance of a status update while we are
3051 	 * running (by switching to the interrupt-mode coalescence
3052 	 * parameters), but this chance is already very low so it is more
3053 	 * efficient to get another interrupt than prevent it.
3054 	 *
3055 	 * We do the ack first to ensure another interrupt if there is a
3056 	 * status update after the ack.  We don't check for the status
3057 	 * changing later because it is more efficient to get another
3058 	 * interrupt than prevent it, not quite as above (not checking is
3059 	 * a smaller optimization than not toggling the interrupt enable,
3060 	 * since checking doesn't involve PCI accesses and toggling require
3061 	 * the status check).  So toggling would probably be a pessimization
3062 	 * even with MSI.  It would only be needed for using a task queue.
3063 	 */
3064 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3065 
3066 	/*
3067 	 * Do the mandatory PCI flush as well as get the link status.
3068 	 */
3069 	statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED;
3070 
3071 	/* Make sure the descriptor ring indexes are coherent. */
3072 	bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3073 	    sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD);
3074 	bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3075 	    sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD);
3076 
3077 	if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3078 	    sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3079 	    statusword || sc->bge_link_evt)
3080 		bge_link_upd(sc);
3081 
3082 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3083 		/* Check RX return ring producer/consumer. */
3084 		bge_rxeof(sc);
3085 
3086 		/* Check TX ring producer/consumer. */
3087 		bge_txeof(sc);
3088 	}
3089 
3090 	if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3091 	    !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3092 		bge_start_locked(ifp);
3093 
3094 	BGE_UNLOCK(sc);
3095 }
3096 
3097 static void
3098 bge_asf_driver_up(struct bge_softc *sc)
3099 {
3100 	if (sc->bge_asf_mode & ASF_STACKUP) {
3101 		/* Send ASF heartbeat aprox. every 2s */
3102 		if (sc->bge_asf_count)
3103 			sc->bge_asf_count --;
3104 		else {
3105 			sc->bge_asf_count = 5;
3106 			bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW,
3107 			    BGE_FW_DRV_ALIVE);
3108 			bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_LEN, 4);
3109 			bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_DATA, 3);
3110 			CSR_WRITE_4(sc, BGE_CPU_EVENT,
3111 			    CSR_READ_4(sc, BGE_CPU_EVENT) != (1 << 14));
3112 		}
3113 	}
3114 }
3115 
3116 static void
3117 bge_tick(void *xsc)
3118 {
3119 	struct bge_softc *sc = xsc;
3120 	struct mii_data *mii = NULL;
3121 
3122 	BGE_LOCK_ASSERT(sc);
3123 
3124 	/* Synchronize with possible callout reset/stop. */
3125 	if (callout_pending(&sc->bge_stat_ch) ||
3126 	    !callout_active(&sc->bge_stat_ch))
3127 	    	return;
3128 
3129 	if (BGE_IS_5705_PLUS(sc))
3130 		bge_stats_update_regs(sc);
3131 	else
3132 		bge_stats_update(sc);
3133 
3134 	if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
3135 		mii = device_get_softc(sc->bge_miibus);
3136 		/* Don't mess with the PHY in IPMI/ASF mode */
3137 		if (!((sc->bge_asf_mode & ASF_STACKUP) && (sc->bge_link)))
3138 			mii_tick(mii);
3139 	} else {
3140 		/*
3141 		 * Since in TBI mode auto-polling can't be used we should poll
3142 		 * link status manually. Here we register pending link event
3143 		 * and trigger interrupt.
3144 		 */
3145 #ifdef DEVICE_POLLING
3146 		/* In polling mode we poll link state in bge_poll(). */
3147 		if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING))
3148 #endif
3149 		{
3150 		sc->bge_link_evt++;
3151 		BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3152 		}
3153 	}
3154 
3155 	bge_asf_driver_up(sc);
3156 	bge_watchdog(sc);
3157 
3158 	callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
3159 }
3160 
3161 static void
3162 bge_stats_update_regs(struct bge_softc *sc)
3163 {
3164 	struct ifnet *ifp;
3165 
3166 	ifp = sc->bge_ifp;
3167 
3168 	ifp->if_collisions += CSR_READ_4(sc, BGE_MAC_STATS +
3169 	    offsetof(struct bge_mac_stats_regs, etherStatsCollisions));
3170 
3171 	ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3172 }
3173 
3174 static void
3175 bge_stats_update(struct bge_softc *sc)
3176 {
3177 	struct ifnet *ifp;
3178 	bus_size_t stats;
3179 	uint32_t cnt;	/* current register value */
3180 
3181 	ifp = sc->bge_ifp;
3182 
3183 	stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
3184 
3185 #define READ_STAT(sc, stats, stat) \
3186 	CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
3187 
3188 	cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo);
3189 	ifp->if_collisions += (uint32_t)(cnt - sc->bge_tx_collisions);
3190 	sc->bge_tx_collisions = cnt;
3191 
3192 	cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
3193 	ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_discards);
3194 	sc->bge_rx_discards = cnt;
3195 
3196 	cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
3197 	ifp->if_oerrors += (uint32_t)(cnt - sc->bge_tx_discards);
3198 	sc->bge_tx_discards = cnt;
3199 
3200 #undef READ_STAT
3201 }
3202 
3203 /*
3204  * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
3205  * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
3206  * but when such padded frames employ the bge IP/TCP checksum offload,
3207  * the hardware checksum assist gives incorrect results (possibly
3208  * from incorporating its own padding into the UDP/TCP checksum; who knows).
3209  * If we pad such runts with zeros, the onboard checksum comes out correct.
3210  */
3211 static __inline int
3212 bge_cksum_pad(struct mbuf *m)
3213 {
3214 	int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
3215 	struct mbuf *last;
3216 
3217 	/* If there's only the packet-header and we can pad there, use it. */
3218 	if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
3219 	    M_TRAILINGSPACE(m) >= padlen) {
3220 		last = m;
3221 	} else {
3222 		/*
3223 		 * Walk packet chain to find last mbuf. We will either
3224 		 * pad there, or append a new mbuf and pad it.
3225 		 */
3226 		for (last = m; last->m_next != NULL; last = last->m_next);
3227 		if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
3228 			/* Allocate new empty mbuf, pad it. Compact later. */
3229 			struct mbuf *n;
3230 
3231 			MGET(n, M_DONTWAIT, MT_DATA);
3232 			if (n == NULL)
3233 				return (ENOBUFS);
3234 			n->m_len = 0;
3235 			last->m_next = n;
3236 			last = n;
3237 		}
3238 	}
3239 
3240 	/* Now zero the pad area, to avoid the bge cksum-assist bug. */
3241 	memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
3242 	last->m_len += padlen;
3243 	m->m_pkthdr.len += padlen;
3244 
3245 	return (0);
3246 }
3247 
3248 /*
3249  * Encapsulate an mbuf chain in the tx ring  by coupling the mbuf data
3250  * pointers to descriptors.
3251  */
3252 static int
3253 bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
3254 {
3255 	bus_dma_segment_t	segs[BGE_NSEG_NEW];
3256 	bus_dmamap_t		map;
3257 	struct bge_tx_bd	*d;
3258 	struct mbuf		*m = *m_head;
3259 	uint32_t		idx = *txidx;
3260 	uint16_t		csum_flags;
3261 	int			nsegs, i, error;
3262 
3263 	csum_flags = 0;
3264 	if (m->m_pkthdr.csum_flags) {
3265 		if (m->m_pkthdr.csum_flags & CSUM_IP)
3266 			csum_flags |= BGE_TXBDFLAG_IP_CSUM;
3267 		if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
3268 			csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
3269 			if (m->m_pkthdr.len < ETHER_MIN_NOPAD &&
3270 			    (error = bge_cksum_pad(m)) != 0) {
3271 				m_freem(m);
3272 				*m_head = NULL;
3273 				return (error);
3274 			}
3275 		}
3276 		if (m->m_flags & M_LASTFRAG)
3277 			csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
3278 		else if (m->m_flags & M_FRAG)
3279 			csum_flags |= BGE_TXBDFLAG_IP_FRAG;
3280 	}
3281 
3282 	map = sc->bge_cdata.bge_tx_dmamap[idx];
3283 	error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag, map, m, segs,
3284 	    &nsegs, BUS_DMA_NOWAIT);
3285 	if (error == EFBIG) {
3286 		m = m_defrag(m, M_DONTWAIT);
3287 		if (m == NULL) {
3288 			m_freem(*m_head);
3289 			*m_head = NULL;
3290 			return (ENOBUFS);
3291 		}
3292 		*m_head = m;
3293 		error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag, map, m,
3294 		    segs, &nsegs, BUS_DMA_NOWAIT);
3295 		if (error) {
3296 			m_freem(m);
3297 			*m_head = NULL;
3298 			return (error);
3299 		}
3300 	} else if (error != 0)
3301 		return (error);
3302 
3303 	/*
3304 	 * Sanity check: avoid coming within 16 descriptors
3305 	 * of the end of the ring.
3306 	 */
3307 	if (nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) {
3308 		bus_dmamap_unload(sc->bge_cdata.bge_mtag, map);
3309 		return (ENOBUFS);
3310 	}
3311 
3312 	bus_dmamap_sync(sc->bge_cdata.bge_mtag, map, BUS_DMASYNC_PREWRITE);
3313 
3314 	for (i = 0; ; i++) {
3315 		d = &sc->bge_ldata.bge_tx_ring[idx];
3316 		d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
3317 		d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
3318 		d->bge_len = segs[i].ds_len;
3319 		d->bge_flags = csum_flags;
3320 		if (i == nsegs - 1)
3321 			break;
3322 		BGE_INC(idx, BGE_TX_RING_CNT);
3323 	}
3324 
3325 	/* Mark the last segment as end of packet... */
3326 	d->bge_flags |= BGE_TXBDFLAG_END;
3327 
3328 	/* ... and put VLAN tag into first segment.  */
3329 	d = &sc->bge_ldata.bge_tx_ring[*txidx];
3330 	if (m->m_flags & M_VLANTAG) {
3331 		d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
3332 		d->bge_vlan_tag = m->m_pkthdr.ether_vtag;
3333 	} else
3334 		d->bge_vlan_tag = 0;
3335 
3336 	/*
3337 	 * Insure that the map for this transmission
3338 	 * is placed at the array index of the last descriptor
3339 	 * in this chain.
3340 	 */
3341 	sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
3342 	sc->bge_cdata.bge_tx_dmamap[idx] = map;
3343 	sc->bge_cdata.bge_tx_chain[idx] = m;
3344 	sc->bge_txcnt += nsegs;
3345 
3346 	BGE_INC(idx, BGE_TX_RING_CNT);
3347 	*txidx = idx;
3348 
3349 	return (0);
3350 }
3351 
3352 /*
3353  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3354  * to the mbuf data regions directly in the transmit descriptors.
3355  */
3356 static void
3357 bge_start_locked(struct ifnet *ifp)
3358 {
3359 	struct bge_softc *sc;
3360 	struct mbuf *m_head = NULL;
3361 	uint32_t prodidx;
3362 	int count = 0;
3363 
3364 	sc = ifp->if_softc;
3365 
3366 	if (!sc->bge_link || IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3367 		return;
3368 
3369 	prodidx = sc->bge_tx_prodidx;
3370 
3371 	while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
3372 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
3373 		if (m_head == NULL)
3374 			break;
3375 
3376 		/*
3377 		 * XXX
3378 		 * The code inside the if() block is never reached since we
3379 		 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
3380 		 * requests to checksum TCP/UDP in a fragmented packet.
3381 		 *
3382 		 * XXX
3383 		 * safety overkill.  If this is a fragmented packet chain
3384 		 * with delayed TCP/UDP checksums, then only encapsulate
3385 		 * it if we have enough descriptors to handle the entire
3386 		 * chain at once.
3387 		 * (paranoia -- may not actually be needed)
3388 		 */
3389 		if (m_head->m_flags & M_FIRSTFRAG &&
3390 		    m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
3391 			if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
3392 			    m_head->m_pkthdr.csum_data + 16) {
3393 				IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3394 				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3395 				break;
3396 			}
3397 		}
3398 
3399 		/*
3400 		 * Pack the data into the transmit ring. If we
3401 		 * don't have room, set the OACTIVE flag and wait
3402 		 * for the NIC to drain the ring.
3403 		 */
3404 		if (bge_encap(sc, &m_head, &prodidx)) {
3405 			if (m_head == NULL)
3406 				break;
3407 			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3408 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3409 			break;
3410 		}
3411 		++count;
3412 
3413 		/*
3414 		 * If there's a BPF listener, bounce a copy of this frame
3415 		 * to him.
3416 		 */
3417 		ETHER_BPF_MTAP(ifp, m_head);
3418 	}
3419 
3420 	if (count == 0)
3421 		/* No packets were dequeued. */
3422 		return;
3423 
3424 	/* Transmit. */
3425 	CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3426 	/* 5700 b2 errata */
3427 	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
3428 		CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3429 
3430 	sc->bge_tx_prodidx = prodidx;
3431 
3432 	/*
3433 	 * Set a timeout in case the chip goes out to lunch.
3434 	 */
3435 	sc->bge_timer = 5;
3436 }
3437 
3438 /*
3439  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3440  * to the mbuf data regions directly in the transmit descriptors.
3441  */
3442 static void
3443 bge_start(struct ifnet *ifp)
3444 {
3445 	struct bge_softc *sc;
3446 
3447 	sc = ifp->if_softc;
3448 	BGE_LOCK(sc);
3449 	bge_start_locked(ifp);
3450 	BGE_UNLOCK(sc);
3451 }
3452 
3453 static void
3454 bge_init_locked(struct bge_softc *sc)
3455 {
3456 	struct ifnet *ifp;
3457 	uint16_t *m;
3458 
3459 	BGE_LOCK_ASSERT(sc);
3460 
3461 	ifp = sc->bge_ifp;
3462 
3463 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3464 		return;
3465 
3466 	/* Cancel pending I/O and flush buffers. */
3467 	bge_stop(sc);
3468 
3469 	bge_stop_fw(sc);
3470 	bge_sig_pre_reset(sc, BGE_RESET_START);
3471 	bge_reset(sc);
3472 	bge_sig_legacy(sc, BGE_RESET_START);
3473 	bge_sig_post_reset(sc, BGE_RESET_START);
3474 
3475 	bge_chipinit(sc);
3476 
3477 	/*
3478 	 * Init the various state machines, ring
3479 	 * control blocks and firmware.
3480 	 */
3481 	if (bge_blockinit(sc)) {
3482 		device_printf(sc->bge_dev, "initialization failure\n");
3483 		return;
3484 	}
3485 
3486 	ifp = sc->bge_ifp;
3487 
3488 	/* Specify MTU. */
3489 	CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
3490 	    ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN);
3491 
3492 	/* Load our MAC address. */
3493 	m = (uint16_t *)IF_LLADDR(sc->bge_ifp);
3494 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3495 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3496 
3497 	/* Program promiscuous mode. */
3498 	bge_setpromisc(sc);
3499 
3500 	/* Program multicast filter. */
3501 	bge_setmulti(sc);
3502 
3503 	/* Init RX ring. */
3504 	bge_init_rx_ring_std(sc);
3505 
3506 	/*
3507 	 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
3508 	 * memory to insure that the chip has in fact read the first
3509 	 * entry of the ring.
3510 	 */
3511 	if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
3512 		uint32_t		v, i;
3513 		for (i = 0; i < 10; i++) {
3514 			DELAY(20);
3515 			v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
3516 			if (v == (MCLBYTES - ETHER_ALIGN))
3517 				break;
3518 		}
3519 		if (i == 10)
3520 			device_printf (sc->bge_dev,
3521 			    "5705 A0 chip failed to load RX ring\n");
3522 	}
3523 
3524 	/* Init jumbo RX ring. */
3525 	if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
3526 		bge_init_rx_ring_jumbo(sc);
3527 
3528 	/* Init our RX return ring index. */
3529 	sc->bge_rx_saved_considx = 0;
3530 
3531 	/* Init our RX/TX stat counters. */
3532 	sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0;
3533 
3534 	/* Init TX ring. */
3535 	bge_init_tx_ring(sc);
3536 
3537 	/* Turn on transmitter. */
3538 	BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
3539 
3540 	/* Turn on receiver. */
3541 	BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3542 
3543 	/* Tell firmware we're alive. */
3544 	BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3545 
3546 #ifdef DEVICE_POLLING
3547 	/* Disable interrupts if we are polling. */
3548 	if (ifp->if_capenable & IFCAP_POLLING) {
3549 		BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
3550 		    BGE_PCIMISCCTL_MASK_PCI_INTR);
3551 		CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3552 	} else
3553 #endif
3554 
3555 	/* Enable host interrupts. */
3556 	{
3557 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
3558 	BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3559 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3560 	}
3561 
3562 	bge_ifmedia_upd_locked(ifp);
3563 
3564 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3565 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3566 
3567 	callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
3568 }
3569 
3570 static void
3571 bge_init(void *xsc)
3572 {
3573 	struct bge_softc *sc = xsc;
3574 
3575 	BGE_LOCK(sc);
3576 	bge_init_locked(sc);
3577 	BGE_UNLOCK(sc);
3578 }
3579 
3580 /*
3581  * Set media options.
3582  */
3583 static int
3584 bge_ifmedia_upd(struct ifnet *ifp)
3585 {
3586 	struct bge_softc *sc = ifp->if_softc;
3587 	int res;
3588 
3589 	BGE_LOCK(sc);
3590 	res = bge_ifmedia_upd_locked(ifp);
3591 	BGE_UNLOCK(sc);
3592 
3593 	return (res);
3594 }
3595 
3596 static int
3597 bge_ifmedia_upd_locked(struct ifnet *ifp)
3598 {
3599 	struct bge_softc *sc = ifp->if_softc;
3600 	struct mii_data *mii;
3601 	struct ifmedia *ifm;
3602 
3603 	BGE_LOCK_ASSERT(sc);
3604 
3605 	ifm = &sc->bge_ifmedia;
3606 
3607 	/* If this is a 1000baseX NIC, enable the TBI port. */
3608 	if (sc->bge_flags & BGE_FLAG_TBI) {
3609 		if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3610 			return (EINVAL);
3611 		switch(IFM_SUBTYPE(ifm->ifm_media)) {
3612 		case IFM_AUTO:
3613 			/*
3614 			 * The BCM5704 ASIC appears to have a special
3615 			 * mechanism for programming the autoneg
3616 			 * advertisement registers in TBI mode.
3617 			 */
3618 			if (bge_fake_autoneg == 0 &&
3619 			    sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3620 				uint32_t sgdig;
3621 				CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
3622 				sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
3623 				sgdig |= BGE_SGDIGCFG_AUTO|
3624 				    BGE_SGDIGCFG_PAUSE_CAP|
3625 				    BGE_SGDIGCFG_ASYM_PAUSE;
3626 				CSR_WRITE_4(sc, BGE_SGDIG_CFG,
3627 				    sgdig|BGE_SGDIGCFG_SEND);
3628 				DELAY(5);
3629 				CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
3630 			}
3631 			break;
3632 		case IFM_1000_SX:
3633 			if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3634 				BGE_CLRBIT(sc, BGE_MAC_MODE,
3635 				    BGE_MACMODE_HALF_DUPLEX);
3636 			} else {
3637 				BGE_SETBIT(sc, BGE_MAC_MODE,
3638 				    BGE_MACMODE_HALF_DUPLEX);
3639 			}
3640 			break;
3641 		default:
3642 			return (EINVAL);
3643 		}
3644 		return (0);
3645 	}
3646 
3647 	sc->bge_link_evt++;
3648 	mii = device_get_softc(sc->bge_miibus);
3649 	if (mii->mii_instance) {
3650 		struct mii_softc *miisc;
3651 		for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
3652 		    miisc = LIST_NEXT(miisc, mii_list))
3653 			mii_phy_reset(miisc);
3654 	}
3655 	mii_mediachg(mii);
3656 
3657 	return (0);
3658 }
3659 
3660 /*
3661  * Report current media status.
3662  */
3663 static void
3664 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3665 {
3666 	struct bge_softc *sc = ifp->if_softc;
3667 	struct mii_data *mii;
3668 
3669 	BGE_LOCK(sc);
3670 
3671 	if (sc->bge_flags & BGE_FLAG_TBI) {
3672 		ifmr->ifm_status = IFM_AVALID;
3673 		ifmr->ifm_active = IFM_ETHER;
3674 		if (CSR_READ_4(sc, BGE_MAC_STS) &
3675 		    BGE_MACSTAT_TBI_PCS_SYNCHED)
3676 			ifmr->ifm_status |= IFM_ACTIVE;
3677 		else {
3678 			ifmr->ifm_active |= IFM_NONE;
3679 			BGE_UNLOCK(sc);
3680 			return;
3681 		}
3682 		ifmr->ifm_active |= IFM_1000_SX;
3683 		if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3684 			ifmr->ifm_active |= IFM_HDX;
3685 		else
3686 			ifmr->ifm_active |= IFM_FDX;
3687 		BGE_UNLOCK(sc);
3688 		return;
3689 	}
3690 
3691 	mii = device_get_softc(sc->bge_miibus);
3692 	mii_pollstat(mii);
3693 	ifmr->ifm_active = mii->mii_media_active;
3694 	ifmr->ifm_status = mii->mii_media_status;
3695 
3696 	BGE_UNLOCK(sc);
3697 }
3698 
3699 static int
3700 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
3701 {
3702 	struct bge_softc *sc = ifp->if_softc;
3703 	struct ifreq *ifr = (struct ifreq *) data;
3704 	struct mii_data *mii;
3705 	int flags, mask, error = 0;
3706 
3707 	switch (command) {
3708 	case SIOCSIFMTU:
3709 		if (ifr->ifr_mtu < ETHERMIN ||
3710 		    ((BGE_IS_JUMBO_CAPABLE(sc)) &&
3711 		    ifr->ifr_mtu > BGE_JUMBO_MTU) ||
3712 		    ((!BGE_IS_JUMBO_CAPABLE(sc)) &&
3713 		    ifr->ifr_mtu > ETHERMTU))
3714 			error = EINVAL;
3715 		else if (ifp->if_mtu != ifr->ifr_mtu) {
3716 			ifp->if_mtu = ifr->ifr_mtu;
3717 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3718 			bge_init(sc);
3719 		}
3720 		break;
3721 	case SIOCSIFFLAGS:
3722 		BGE_LOCK(sc);
3723 		if (ifp->if_flags & IFF_UP) {
3724 			/*
3725 			 * If only the state of the PROMISC flag changed,
3726 			 * then just use the 'set promisc mode' command
3727 			 * instead of reinitializing the entire NIC. Doing
3728 			 * a full re-init means reloading the firmware and
3729 			 * waiting for it to start up, which may take a
3730 			 * second or two.  Similarly for ALLMULTI.
3731 			 */
3732 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3733 				flags = ifp->if_flags ^ sc->bge_if_flags;
3734 				if (flags & IFF_PROMISC)
3735 					bge_setpromisc(sc);
3736 				if (flags & IFF_ALLMULTI)
3737 					bge_setmulti(sc);
3738 			} else
3739 				bge_init_locked(sc);
3740 		} else {
3741 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3742 				bge_stop(sc);
3743 			}
3744 		}
3745 		sc->bge_if_flags = ifp->if_flags;
3746 		BGE_UNLOCK(sc);
3747 		error = 0;
3748 		break;
3749 	case SIOCADDMULTI:
3750 	case SIOCDELMULTI:
3751 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3752 			BGE_LOCK(sc);
3753 			bge_setmulti(sc);
3754 			BGE_UNLOCK(sc);
3755 			error = 0;
3756 		}
3757 		break;
3758 	case SIOCSIFMEDIA:
3759 	case SIOCGIFMEDIA:
3760 		if (sc->bge_flags & BGE_FLAG_TBI) {
3761 			error = ifmedia_ioctl(ifp, ifr,
3762 			    &sc->bge_ifmedia, command);
3763 		} else {
3764 			mii = device_get_softc(sc->bge_miibus);
3765 			error = ifmedia_ioctl(ifp, ifr,
3766 			    &mii->mii_media, command);
3767 		}
3768 		break;
3769 	case SIOCSIFCAP:
3770 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3771 #ifdef DEVICE_POLLING
3772 		if (mask & IFCAP_POLLING) {
3773 			if (ifr->ifr_reqcap & IFCAP_POLLING) {
3774 				error = ether_poll_register(bge_poll, ifp);
3775 				if (error)
3776 					return (error);
3777 				BGE_LOCK(sc);
3778 				BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
3779 				    BGE_PCIMISCCTL_MASK_PCI_INTR);
3780 				CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3781 				ifp->if_capenable |= IFCAP_POLLING;
3782 				BGE_UNLOCK(sc);
3783 			} else {
3784 				error = ether_poll_deregister(ifp);
3785 				/* Enable interrupt even in error case */
3786 				BGE_LOCK(sc);
3787 				BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
3788 				    BGE_PCIMISCCTL_MASK_PCI_INTR);
3789 				CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3790 				ifp->if_capenable &= ~IFCAP_POLLING;
3791 				BGE_UNLOCK(sc);
3792 			}
3793 		}
3794 #endif
3795 		if (mask & IFCAP_HWCSUM) {
3796 			ifp->if_capenable ^= IFCAP_HWCSUM;
3797 			if (IFCAP_HWCSUM & ifp->if_capenable &&
3798 			    IFCAP_HWCSUM & ifp->if_capabilities)
3799 				ifp->if_hwassist = BGE_CSUM_FEATURES;
3800 			else
3801 				ifp->if_hwassist = 0;
3802 			VLAN_CAPABILITIES(ifp);
3803 		}
3804 		break;
3805 	default:
3806 		error = ether_ioctl(ifp, command, data);
3807 		break;
3808 	}
3809 
3810 	return (error);
3811 }
3812 
3813 static void
3814 bge_watchdog(struct bge_softc *sc)
3815 {
3816 	struct ifnet *ifp;
3817 
3818 	BGE_LOCK_ASSERT(sc);
3819 
3820 	if (sc->bge_timer == 0 || --sc->bge_timer)
3821 		return;
3822 
3823 	ifp = sc->bge_ifp;
3824 
3825 	if_printf(ifp, "watchdog timeout -- resetting\n");
3826 
3827 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3828 	bge_init_locked(sc);
3829 
3830 	ifp->if_oerrors++;
3831 }
3832 
3833 /*
3834  * Stop the adapter and free any mbufs allocated to the
3835  * RX and TX lists.
3836  */
3837 static void
3838 bge_stop(struct bge_softc *sc)
3839 {
3840 	struct ifnet *ifp;
3841 	struct ifmedia_entry *ifm;
3842 	struct mii_data *mii = NULL;
3843 	int mtmp, itmp;
3844 
3845 	BGE_LOCK_ASSERT(sc);
3846 
3847 	ifp = sc->bge_ifp;
3848 
3849 	if ((sc->bge_flags & BGE_FLAG_TBI) == 0)
3850 		mii = device_get_softc(sc->bge_miibus);
3851 
3852 	callout_stop(&sc->bge_stat_ch);
3853 
3854 	/*
3855 	 * Disable all of the receiver blocks.
3856 	 */
3857 	BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3858 	BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3859 	BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3860 	if (!(BGE_IS_5705_PLUS(sc)))
3861 		BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
3862 	BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3863 	BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3864 	BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3865 
3866 	/*
3867 	 * Disable all of the transmit blocks.
3868 	 */
3869 	BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3870 	BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3871 	BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3872 	BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3873 	BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3874 	if (!(BGE_IS_5705_PLUS(sc)))
3875 		BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
3876 	BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3877 
3878 	/*
3879 	 * Shut down all of the memory managers and related
3880 	 * state machines.
3881 	 */
3882 	BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3883 	BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3884 	if (!(BGE_IS_5705_PLUS(sc)))
3885 		BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
3886 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3887 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3888 	if (!(BGE_IS_5705_PLUS(sc))) {
3889 		BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
3890 		BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3891 	}
3892 
3893 	/* Disable host interrupts. */
3894 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3895 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3896 
3897 	/*
3898 	 * Tell firmware we're shutting down.
3899 	 */
3900 
3901 	bge_stop_fw(sc);
3902 	bge_sig_pre_reset(sc, BGE_RESET_STOP);
3903 	bge_reset(sc);
3904 	bge_sig_legacy(sc, BGE_RESET_STOP);
3905 	bge_sig_post_reset(sc, BGE_RESET_STOP);
3906 
3907 	/*
3908 	 * Keep the ASF firmware running if up.
3909 	 */
3910 	if (sc->bge_asf_mode & ASF_STACKUP)
3911 		BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3912 	else
3913 		BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3914 
3915 	/* Free the RX lists. */
3916 	bge_free_rx_ring_std(sc);
3917 
3918 	/* Free jumbo RX list. */
3919 	if (BGE_IS_JUMBO_CAPABLE(sc))
3920 		bge_free_rx_ring_jumbo(sc);
3921 
3922 	/* Free TX buffers. */
3923 	bge_free_tx_ring(sc);
3924 
3925 	/*
3926 	 * Isolate/power down the PHY, but leave the media selection
3927 	 * unchanged so that things will be put back to normal when
3928 	 * we bring the interface back up.
3929 	 */
3930 	if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
3931 		itmp = ifp->if_flags;
3932 		ifp->if_flags |= IFF_UP;
3933 		/*
3934 		 * If we are called from bge_detach(), mii is already NULL.
3935 		 */
3936 		if (mii != NULL) {
3937 			ifm = mii->mii_media.ifm_cur;
3938 			mtmp = ifm->ifm_media;
3939 			ifm->ifm_media = IFM_ETHER|IFM_NONE;
3940 			mii_mediachg(mii);
3941 			ifm->ifm_media = mtmp;
3942 		}
3943 		ifp->if_flags = itmp;
3944 	}
3945 
3946 	sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
3947 
3948 	/* Clear MAC's link state (PHY may still have link UP). */
3949 	if (bootverbose && sc->bge_link)
3950 		if_printf(sc->bge_ifp, "link DOWN\n");
3951 	sc->bge_link = 0;
3952 
3953 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3954 }
3955 
3956 /*
3957  * Stop all chip I/O so that the kernel's probe routines don't
3958  * get confused by errant DMAs when rebooting.
3959  */
3960 static void
3961 bge_shutdown(device_t dev)
3962 {
3963 	struct bge_softc *sc;
3964 
3965 	sc = device_get_softc(dev);
3966 
3967 	BGE_LOCK(sc);
3968 	bge_stop(sc);
3969 	bge_reset(sc);
3970 	BGE_UNLOCK(sc);
3971 }
3972 
3973 static int
3974 bge_suspend(device_t dev)
3975 {
3976 	struct bge_softc *sc;
3977 
3978 	sc = device_get_softc(dev);
3979 	BGE_LOCK(sc);
3980 	bge_stop(sc);
3981 	BGE_UNLOCK(sc);
3982 
3983 	return (0);
3984 }
3985 
3986 static int
3987 bge_resume(device_t dev)
3988 {
3989 	struct bge_softc *sc;
3990 	struct ifnet *ifp;
3991 
3992 	sc = device_get_softc(dev);
3993 	BGE_LOCK(sc);
3994 	ifp = sc->bge_ifp;
3995 	if (ifp->if_flags & IFF_UP) {
3996 		bge_init_locked(sc);
3997 		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3998 			bge_start_locked(ifp);
3999 	}
4000 	BGE_UNLOCK(sc);
4001 
4002 	return (0);
4003 }
4004 
4005 static void
4006 bge_link_upd(struct bge_softc *sc)
4007 {
4008 	struct mii_data *mii;
4009 	uint32_t link, status;
4010 
4011 	BGE_LOCK_ASSERT(sc);
4012 
4013 	/* Clear 'pending link event' flag. */
4014 	sc->bge_link_evt = 0;
4015 
4016 	/*
4017 	 * Process link state changes.
4018 	 * Grrr. The link status word in the status block does
4019 	 * not work correctly on the BCM5700 rev AX and BX chips,
4020 	 * according to all available information. Hence, we have
4021 	 * to enable MII interrupts in order to properly obtain
4022 	 * async link changes. Unfortunately, this also means that
4023 	 * we have to read the MAC status register to detect link
4024 	 * changes, thereby adding an additional register access to
4025 	 * the interrupt handler.
4026 	 *
4027 	 * XXX: perhaps link state detection procedure used for
4028 	 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
4029 	 */
4030 
4031 	if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
4032 	    sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
4033 		status = CSR_READ_4(sc, BGE_MAC_STS);
4034 		if (status & BGE_MACSTAT_MI_INTERRUPT) {
4035 			mii = device_get_softc(sc->bge_miibus);
4036 			mii_pollstat(mii);
4037 			if (!sc->bge_link &&
4038 			    mii->mii_media_status & IFM_ACTIVE &&
4039 			    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
4040 				sc->bge_link++;
4041 				if (bootverbose)
4042 					if_printf(sc->bge_ifp, "link UP\n");
4043 			} else if (sc->bge_link &&
4044 			    (!(mii->mii_media_status & IFM_ACTIVE) ||
4045 			    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
4046 				sc->bge_link = 0;
4047 				if (bootverbose)
4048 					if_printf(sc->bge_ifp, "link DOWN\n");
4049 			}
4050 
4051 			/* Clear the interrupt. */
4052 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
4053 			    BGE_EVTENB_MI_INTERRUPT);
4054 			bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
4055 			bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
4056 			    BRGPHY_INTRS);
4057 		}
4058 		return;
4059 	}
4060 
4061 	if (sc->bge_flags & BGE_FLAG_TBI) {
4062 		status = CSR_READ_4(sc, BGE_MAC_STS);
4063 		if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
4064 			if (!sc->bge_link) {
4065 				sc->bge_link++;
4066 				if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
4067 					BGE_CLRBIT(sc, BGE_MAC_MODE,
4068 					    BGE_MACMODE_TBI_SEND_CFGS);
4069 				CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
4070 				if (bootverbose)
4071 					if_printf(sc->bge_ifp, "link UP\n");
4072 				if_link_state_change(sc->bge_ifp,
4073 				    LINK_STATE_UP);
4074 			}
4075 		} else if (sc->bge_link) {
4076 			sc->bge_link = 0;
4077 			if (bootverbose)
4078 				if_printf(sc->bge_ifp, "link DOWN\n");
4079 			if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN);
4080 		}
4081 	/* Discard link events for MII/GMII cards if MI auto-polling disabled */
4082 	} else if (CSR_READ_4(sc, BGE_MI_MODE) & BGE_MIMODE_AUTOPOLL) {
4083 		/*
4084 		 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
4085 		 * in status word always set. Workaround this bug by reading
4086 		 * PHY link status directly.
4087 		 */
4088 		link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
4089 
4090 		if (link != sc->bge_link ||
4091 		    sc->bge_asicrev == BGE_ASICREV_BCM5700) {
4092 			mii = device_get_softc(sc->bge_miibus);
4093 			mii_pollstat(mii);
4094 			if (!sc->bge_link &&
4095 			    mii->mii_media_status & IFM_ACTIVE &&
4096 			    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
4097 				sc->bge_link++;
4098 				if (bootverbose)
4099 					if_printf(sc->bge_ifp, "link UP\n");
4100 			} else if (sc->bge_link &&
4101 			    (!(mii->mii_media_status & IFM_ACTIVE) ||
4102 			    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
4103 				sc->bge_link = 0;
4104 				if (bootverbose)
4105 					if_printf(sc->bge_ifp, "link DOWN\n");
4106 			}
4107 		}
4108 	}
4109 
4110 	/* Clear the attention. */
4111 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
4112 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
4113 	    BGE_MACSTAT_LINK_CHANGED);
4114 }
4115 
4116 static void
4117 bge_add_sysctls(struct bge_softc *sc)
4118 {
4119 	struct sysctl_ctx_list *ctx;
4120 	struct sysctl_oid_list *children;
4121 
4122 	ctx = device_get_sysctl_ctx(sc->bge_dev);
4123 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev));
4124 
4125 #ifdef BGE_REGISTER_DEBUG
4126 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info",
4127 	    CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_debug_info, "I",
4128 	    "Debug Information");
4129 
4130 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read",
4131 	    CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_reg_read, "I",
4132 	    "Register Read");
4133 
4134 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read",
4135 	    CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_mem_read, "I",
4136 	    "Memory Read");
4137 
4138 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "stat_IfHcInOctets",
4139 	    CTLFLAG_RD,
4140 	    &sc->bge_ldata.bge_stats->rxstats.ifHCInOctets.bge_addr_lo,
4141 	    "Bytes received");
4142 
4143 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "stat_IfHcOutOctets",
4144 	    CTLFLAG_RD,
4145 	    &sc->bge_ldata.bge_stats->txstats.ifHCOutOctets.bge_addr_lo,
4146 	    "Bytes received");
4147 #endif
4148 }
4149 
4150 #ifdef BGE_REGISTER_DEBUG
4151 static int
4152 bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
4153 {
4154 	struct bge_softc *sc;
4155 	uint16_t *sbdata;
4156 	int error;
4157 	int result;
4158 	int i, j;
4159 
4160 	result = -1;
4161 	error = sysctl_handle_int(oidp, &result, 0, req);
4162 	if (error || (req->newptr == NULL))
4163 		return (error);
4164 
4165 	if (result == 1) {
4166 		sc = (struct bge_softc *)arg1;
4167 
4168 		sbdata = (uint16_t *)sc->bge_ldata.bge_status_block;
4169 		printf("Status Block:\n");
4170 		for (i = 0x0; i < (BGE_STATUS_BLK_SZ / 4); ) {
4171 			printf("%06x:", i);
4172 			for (j = 0; j < 8; j++) {
4173 				printf(" %04x", sbdata[i]);
4174 				i += 4;
4175 			}
4176 			printf("\n");
4177 		}
4178 
4179 		printf("Registers:\n");
4180 		for (i = 0x800; i < 0xa00; ) {
4181 			printf("%06x:", i);
4182 			for (j = 0; j < 8; j++) {
4183 				printf(" %08x", CSR_READ_4(sc, i));
4184 				i += 4;
4185 			}
4186 			printf("\n");
4187 		}
4188 
4189 		printf("Hardware Flags:\n");
4190 		if (BGE_IS_575X_PLUS(sc))
4191 			printf(" - 575X Plus\n");
4192 		if (BGE_IS_5705_PLUS(sc))
4193 			printf(" - 5705 Plus\n");
4194 		if (BGE_IS_5714_FAMILY(sc))
4195 			printf(" - 5714 Family\n");
4196 		if (BGE_IS_5700_FAMILY(sc))
4197 			printf(" - 5700 Family\n");
4198 		if (sc->bge_flags & BGE_FLAG_JUMBO)
4199 			printf(" - Supports Jumbo Frames\n");
4200 		if (sc->bge_flags & BGE_FLAG_PCIX)
4201 			printf(" - PCI-X Bus\n");
4202 		if (sc->bge_flags & BGE_FLAG_PCIE)
4203 			printf(" - PCI Express Bus\n");
4204 		if (sc->bge_flags & BGE_FLAG_NO_3LED)
4205 			printf(" - No 3 LEDs\n");
4206 		if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG)
4207 			printf(" - RX Alignment Bug\n");
4208 	}
4209 
4210 	return (error);
4211 }
4212 
4213 static int
4214 bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
4215 {
4216 	struct bge_softc *sc;
4217 	int error;
4218 	uint16_t result;
4219 	uint32_t val;
4220 
4221 	result = -1;
4222 	error = sysctl_handle_int(oidp, &result, 0, req);
4223 	if (error || (req->newptr == NULL))
4224 		return (error);
4225 
4226 	if (result < 0x8000) {
4227 		sc = (struct bge_softc *)arg1;
4228 		val = CSR_READ_4(sc, result);
4229 		printf("reg 0x%06X = 0x%08X\n", result, val);
4230 	}
4231 
4232 	return (error);
4233 }
4234 
4235 static int
4236 bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS)
4237 {
4238 	struct bge_softc *sc;
4239 	int error;
4240 	uint16_t result;
4241 	uint32_t val;
4242 
4243 	result = -1;
4244 	error = sysctl_handle_int(oidp, &result, 0, req);
4245 	if (error || (req->newptr == NULL))
4246 		return (error);
4247 
4248 	if (result < 0x8000) {
4249 		sc = (struct bge_softc *)arg1;
4250 		val = bge_readmem_ind(sc, result);
4251 		printf("mem 0x%06X = 0x%08X\n", result, val);
4252 	}
4253 
4254 	return (error);
4255 }
4256 #endif
4257