xref: /freebsd/sys/dev/bge/if_bge.c (revision f856af0466c076beef4ea9b15d088e1119a945b8)
1 /*-
2  * Copyright (c) 2001 Wind River Systems
3  * Copyright (c) 1997, 1998, 1999, 2001
4  *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by Bill Paul.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 /*
38  * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39  *
40  * The Broadcom BCM5700 is based on technology originally developed by
41  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42  * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
43  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45  * frames, highly configurable RX filtering, and 16 RX and TX queues
46  * (which, along with RX filter rules, can be used for QOS applications).
47  * Other features, such as TCP segmentation, may be available as part
48  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49  * firmware images can be stored in hardware and need not be compiled
50  * into the driver.
51  *
52  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53  * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54  *
55  * The BCM5701 is a single-chip solution incorporating both the BCM5700
56  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57  * does not support external SSRAM.
58  *
59  * Broadcom also produces a variation of the BCM5700 under the "Altima"
60  * brand name, which is functionally similar but lacks PCI-X support.
61  *
62  * Without external SSRAM, you can only have at most 4 TX rings,
63  * and the use of the mini RX ring is disabled. This seems to imply
64  * that these features are simply not available on the BCM5701. As a
65  * result, this driver does not implement any support for the mini RX
66  * ring.
67  */
68 
69 #ifdef HAVE_KERNEL_OPTION_HEADERS
70 #include "opt_device_polling.h"
71 #endif
72 
73 #include <sys/param.h>
74 #include <sys/endian.h>
75 #include <sys/systm.h>
76 #include <sys/sockio.h>
77 #include <sys/mbuf.h>
78 #include <sys/malloc.h>
79 #include <sys/kernel.h>
80 #include <sys/module.h>
81 #include <sys/socket.h>
82 #include <sys/sysctl.h>
83 
84 #include <net/if.h>
85 #include <net/if_arp.h>
86 #include <net/ethernet.h>
87 #include <net/if_dl.h>
88 #include <net/if_media.h>
89 
90 #include <net/bpf.h>
91 
92 #include <net/if_types.h>
93 #include <net/if_vlan_var.h>
94 
95 #include <netinet/in_systm.h>
96 #include <netinet/in.h>
97 #include <netinet/ip.h>
98 
99 #include <machine/bus.h>
100 #include <machine/resource.h>
101 #include <sys/bus.h>
102 #include <sys/rman.h>
103 
104 #include <dev/mii/mii.h>
105 #include <dev/mii/miivar.h>
106 #include "miidevs.h"
107 #include <dev/mii/brgphyreg.h>
108 
109 #include <dev/pci/pcireg.h>
110 #include <dev/pci/pcivar.h>
111 
112 #include <dev/bge/if_bgereg.h>
113 
114 #define BGE_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
115 #define ETHER_MIN_NOPAD		(ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
116 
117 MODULE_DEPEND(bge, pci, 1, 1, 1);
118 MODULE_DEPEND(bge, ether, 1, 1, 1);
119 MODULE_DEPEND(bge, miibus, 1, 1, 1);
120 
121 /* "device miibus" required.  See GENERIC if you get errors here. */
122 #include "miibus_if.h"
123 
124 /*
125  * Various supported device vendors/types and their names. Note: the
126  * spec seems to indicate that the hardware still has Alteon's vendor
127  * ID burned into it, though it will always be overriden by the vendor
128  * ID in the EEPROM. Just to be safe, we cover all possibilities.
129  */
130 static struct bge_type {
131 	uint16_t	bge_vid;
132 	uint16_t	bge_did;
133 } bge_devs[] = {
134 	{ ALTEON_VENDORID,	ALTEON_DEVICEID_BCM5700 },
135 	{ ALTEON_VENDORID,	ALTEON_DEVICEID_BCM5701 },
136 
137 	{ ALTIMA_VENDORID,	ALTIMA_DEVICE_AC1000 },
138 	{ ALTIMA_VENDORID,	ALTIMA_DEVICE_AC1002 },
139 	{ ALTIMA_VENDORID,	ALTIMA_DEVICE_AC9100 },
140 
141 	{ APPLE_VENDORID,	APPLE_DEVICE_BCM5701 },
142 
143 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5700 },
144 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5701 },
145 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5702 },
146 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5702_ALT },
147 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5702X },
148 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5703 },
149 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5703_ALT },
150 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5703X },
151 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5704C },
152 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5704S },
153 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5704S_ALT },
154 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5705 },
155 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5705F },
156 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5705K },
157 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5705M },
158 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5705M_ALT },
159 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5714C },
160 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5714S },
161 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5715 },
162 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5715S },
163 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5720 },
164 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5721 },
165 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5750 },
166 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5750M },
167 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5751 },
168 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5751F },
169 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5751M },
170 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5752 },
171 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5752M },
172 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5753 },
173 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5753F },
174 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5753M },
175 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5754 },
176 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5754M },
177 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5755 },
178 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5755M },
179 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5780 },
180 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5780S },
181 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5781 },
182 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5782 },
183 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5786 },
184 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5787 },
185 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5787M },
186 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5788 },
187 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5789 },
188 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5901 },
189 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5901A2 },
190 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5903M },
191 
192 	{ SK_VENDORID,		SK_DEVICEID_ALTIMA },
193 
194 	{ TC_VENDORID,		TC_DEVICEID_3C996 },
195 
196 	{ 0, 0 }
197 };
198 
199 static const struct bge_vendor {
200 	uint16_t	v_id;
201 	const char	*v_name;
202 } bge_vendors[] = {
203 	{ ALTEON_VENDORID,	"Alteon" },
204 	{ ALTIMA_VENDORID,	"Altima" },
205 	{ APPLE_VENDORID,	"Apple" },
206 	{ BCOM_VENDORID,	"Broadcom" },
207 	{ SK_VENDORID,		"SysKonnect" },
208 	{ TC_VENDORID,		"3Com" },
209 
210 	{ 0, NULL }
211 };
212 
213 static const struct bge_revision {
214 	uint32_t	br_chipid;
215 	const char	*br_name;
216 } bge_revisions[] = {
217 	{ BGE_CHIPID_BCM5700_A0,	"BCM5700 A0" },
218 	{ BGE_CHIPID_BCM5700_A1,	"BCM5700 A1" },
219 	{ BGE_CHIPID_BCM5700_B0,	"BCM5700 B0" },
220 	{ BGE_CHIPID_BCM5700_B1,	"BCM5700 B1" },
221 	{ BGE_CHIPID_BCM5700_B2,	"BCM5700 B2" },
222 	{ BGE_CHIPID_BCM5700_B3,	"BCM5700 B3" },
223 	{ BGE_CHIPID_BCM5700_ALTIMA,	"BCM5700 Altima" },
224 	{ BGE_CHIPID_BCM5700_C0,	"BCM5700 C0" },
225 	{ BGE_CHIPID_BCM5701_A0,	"BCM5701 A0" },
226 	{ BGE_CHIPID_BCM5701_B0,	"BCM5701 B0" },
227 	{ BGE_CHIPID_BCM5701_B2,	"BCM5701 B2" },
228 	{ BGE_CHIPID_BCM5701_B5,	"BCM5701 B5" },
229 	{ BGE_CHIPID_BCM5703_A0,	"BCM5703 A0" },
230 	{ BGE_CHIPID_BCM5703_A1,	"BCM5703 A1" },
231 	{ BGE_CHIPID_BCM5703_A2,	"BCM5703 A2" },
232 	{ BGE_CHIPID_BCM5703_A3,	"BCM5703 A3" },
233 	{ BGE_CHIPID_BCM5703_B0,	"BCM5703 B0" },
234 	{ BGE_CHIPID_BCM5704_A0,	"BCM5704 A0" },
235 	{ BGE_CHIPID_BCM5704_A1,	"BCM5704 A1" },
236 	{ BGE_CHIPID_BCM5704_A2,	"BCM5704 A2" },
237 	{ BGE_CHIPID_BCM5704_A3,	"BCM5704 A3" },
238 	{ BGE_CHIPID_BCM5704_B0,	"BCM5704 B0" },
239 	{ BGE_CHIPID_BCM5705_A0,	"BCM5705 A0" },
240 	{ BGE_CHIPID_BCM5705_A1,	"BCM5705 A1" },
241 	{ BGE_CHIPID_BCM5705_A2,	"BCM5705 A2" },
242 	{ BGE_CHIPID_BCM5705_A3,	"BCM5705 A3" },
243 	{ BGE_CHIPID_BCM5750_A0,	"BCM5750 A0" },
244 	{ BGE_CHIPID_BCM5750_A1,	"BCM5750 A1" },
245 	{ BGE_CHIPID_BCM5750_A3,	"BCM5750 A3" },
246 	{ BGE_CHIPID_BCM5750_B0,	"BCM5750 B0" },
247 	{ BGE_CHIPID_BCM5750_B1,	"BCM5750 B1" },
248 	{ BGE_CHIPID_BCM5750_C0,	"BCM5750 C0" },
249 	{ BGE_CHIPID_BCM5750_C1,	"BCM5750 C1" },
250 	{ BGE_CHIPID_BCM5750_C2,	"BCM5750 C2" },
251 	{ BGE_CHIPID_BCM5714_A0,	"BCM5714 A0" },
252 	{ BGE_CHIPID_BCM5752_A0,	"BCM5752 A0" },
253 	{ BGE_CHIPID_BCM5752_A1,	"BCM5752 A1" },
254 	{ BGE_CHIPID_BCM5752_A2,	"BCM5752 A2" },
255 	{ BGE_CHIPID_BCM5714_B0,	"BCM5714 B0" },
256 	{ BGE_CHIPID_BCM5714_B3,	"BCM5714 B3" },
257 	{ BGE_CHIPID_BCM5715_A0,	"BCM5715 A0" },
258 	{ BGE_CHIPID_BCM5715_A1,	"BCM5715 A1" },
259 	/* 5784 and 5787 share the same ASIC ID */
260 	{ BGE_CHIPID_BCM5787_A0,	"BCM5754/5787 A0" },
261 	{ BGE_CHIPID_BCM5787_A1,	"BCM5754/5787 A1" },
262 	{ BGE_CHIPID_BCM5787_A2,	"BCM5754/5787 A2" },
263 
264 	{ 0, NULL }
265 };
266 
267 /*
268  * Some defaults for major revisions, so that newer steppings
269  * that we don't know about have a shot at working.
270  */
271 static const struct bge_revision bge_majorrevs[] = {
272 	{ BGE_ASICREV_BCM5700,		"unknown BCM5700" },
273 	{ BGE_ASICREV_BCM5701,		"unknown BCM5701" },
274 	{ BGE_ASICREV_BCM5703,		"unknown BCM5703" },
275 	{ BGE_ASICREV_BCM5704,		"unknown BCM5704" },
276 	{ BGE_ASICREV_BCM5705,		"unknown BCM5705" },
277 	{ BGE_ASICREV_BCM5750,		"unknown BCM5750" },
278 	{ BGE_ASICREV_BCM5714_A0,	"unknown BCM5714" },
279 	{ BGE_ASICREV_BCM5752,		"unknown BCM5752" },
280 	{ BGE_ASICREV_BCM5780,		"unknown BCM5780" },
281 	{ BGE_ASICREV_BCM5714,		"unknown BCM5714" },
282 	{ BGE_ASICREV_BCM5755,		"unknown BCM5755" },
283 	/* 5784 and 5787 share the same ASIC ID */
284 	{ BGE_ASICREV_BCM5787,		"unknown BCM5754/5787" },
285 
286 	{ 0, NULL }
287 };
288 
289 #define BGE_IS_JUMBO_CAPABLE(sc)	((sc)->bge_flags & BGE_FLAG_JUMBO)
290 #define BGE_IS_5700_FAMILY(sc)		((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
291 #define BGE_IS_5705_PLUS(sc)		((sc)->bge_flags & BGE_FLAG_5705_PLUS)
292 #define BGE_IS_5714_FAMILY(sc)		((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
293 #define BGE_IS_575X_PLUS(sc)		((sc)->bge_flags & BGE_FLAG_575X_PLUS)
294 
295 const struct bge_revision * bge_lookup_rev(uint32_t);
296 const struct bge_vendor * bge_lookup_vendor(uint16_t);
297 static int bge_probe(device_t);
298 static int bge_attach(device_t);
299 static int bge_detach(device_t);
300 static int bge_suspend(device_t);
301 static int bge_resume(device_t);
302 static void bge_release_resources(struct bge_softc *);
303 static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
304 static int bge_dma_alloc(device_t);
305 static void bge_dma_free(struct bge_softc *);
306 
307 static void bge_txeof(struct bge_softc *);
308 static void bge_rxeof(struct bge_softc *);
309 
310 static void bge_asf_driver_up (struct bge_softc *);
311 static void bge_tick(void *);
312 static void bge_stats_update(struct bge_softc *);
313 static void bge_stats_update_regs(struct bge_softc *);
314 static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
315 
316 static void bge_intr(void *);
317 static void bge_start_locked(struct ifnet *);
318 static void bge_start(struct ifnet *);
319 static int bge_ioctl(struct ifnet *, u_long, caddr_t);
320 static void bge_init_locked(struct bge_softc *);
321 static void bge_init(void *);
322 static void bge_stop(struct bge_softc *);
323 static void bge_watchdog(struct bge_softc *);
324 static void bge_shutdown(device_t);
325 static int bge_ifmedia_upd_locked(struct ifnet *);
326 static int bge_ifmedia_upd(struct ifnet *);
327 static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
328 
329 static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
330 static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
331 
332 static void bge_setpromisc(struct bge_softc *);
333 static void bge_setmulti(struct bge_softc *);
334 
335 static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *);
336 static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
337 static int bge_init_rx_ring_std(struct bge_softc *);
338 static void bge_free_rx_ring_std(struct bge_softc *);
339 static int bge_init_rx_ring_jumbo(struct bge_softc *);
340 static void bge_free_rx_ring_jumbo(struct bge_softc *);
341 static void bge_free_tx_ring(struct bge_softc *);
342 static int bge_init_tx_ring(struct bge_softc *);
343 
344 static int bge_chipinit(struct bge_softc *);
345 static int bge_blockinit(struct bge_softc *);
346 
347 static uint32_t bge_readmem_ind(struct bge_softc *, int);
348 static void bge_writemem_ind(struct bge_softc *, int, int);
349 #ifdef notdef
350 static uint32_t bge_readreg_ind(struct bge_softc *, int);
351 #endif
352 static void bge_writemem_direct(struct bge_softc *, int, int);
353 static void bge_writereg_ind(struct bge_softc *, int, int);
354 
355 static int bge_miibus_readreg(device_t, int, int);
356 static int bge_miibus_writereg(device_t, int, int, int);
357 static void bge_miibus_statchg(device_t);
358 #ifdef DEVICE_POLLING
359 static void bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
360 #endif
361 
362 #define BGE_RESET_START 1
363 #define BGE_RESET_STOP  2
364 static void bge_sig_post_reset(struct bge_softc *, int);
365 static void bge_sig_legacy(struct bge_softc *, int);
366 static void bge_sig_pre_reset(struct bge_softc *, int);
367 static int bge_reset(struct bge_softc *);
368 static void bge_link_upd(struct bge_softc *);
369 
370 /*
371  * The BGE_REGISTER_DEBUG option is only for low-level debugging.  It may
372  * leak information to untrusted users.  It is also known to cause alignment
373  * traps on certain architectures.
374  */
375 #ifdef BGE_REGISTER_DEBUG
376 static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
377 static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS);
378 static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS);
379 #endif
380 static void bge_add_sysctls(struct bge_softc *);
381 
382 static device_method_t bge_methods[] = {
383 	/* Device interface */
384 	DEVMETHOD(device_probe,		bge_probe),
385 	DEVMETHOD(device_attach,	bge_attach),
386 	DEVMETHOD(device_detach,	bge_detach),
387 	DEVMETHOD(device_shutdown,	bge_shutdown),
388 	DEVMETHOD(device_suspend,	bge_suspend),
389 	DEVMETHOD(device_resume,	bge_resume),
390 
391 	/* bus interface */
392 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
393 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
394 
395 	/* MII interface */
396 	DEVMETHOD(miibus_readreg,	bge_miibus_readreg),
397 	DEVMETHOD(miibus_writereg,	bge_miibus_writereg),
398 	DEVMETHOD(miibus_statchg,	bge_miibus_statchg),
399 
400 	{ 0, 0 }
401 };
402 
403 static driver_t bge_driver = {
404 	"bge",
405 	bge_methods,
406 	sizeof(struct bge_softc)
407 };
408 
409 static devclass_t bge_devclass;
410 
411 DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
412 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
413 
414 static int bge_fake_autoneg = 0;
415 static int bge_allow_asf = 1;
416 
417 TUNABLE_INT("hw.bge.fake_autoneg", &bge_fake_autoneg);
418 TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf);
419 
420 SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters");
421 SYSCTL_INT(_hw_bge, OID_AUTO, fake_autoneg, CTLFLAG_RD, &bge_fake_autoneg, 0,
422 	"Enable fake autonegotiation for certain blade systems");
423 SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0,
424 	"Allow ASF mode if available");
425 
426 static uint32_t
427 bge_readmem_ind(struct bge_softc *sc, int off)
428 {
429 	device_t dev;
430 	uint32_t val;
431 
432 	dev = sc->bge_dev;
433 
434 	pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
435 	val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
436 	pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
437 	return (val);
438 }
439 
440 static void
441 bge_writemem_ind(struct bge_softc *sc, int off, int val)
442 {
443 	device_t dev;
444 
445 	dev = sc->bge_dev;
446 
447 	pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
448 	pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
449 	pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
450 }
451 
452 #ifdef notdef
453 static uint32_t
454 bge_readreg_ind(struct bge_softc *sc, int off)
455 {
456 	device_t dev;
457 
458 	dev = sc->bge_dev;
459 
460 	pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
461 	return (pci_read_config(dev, BGE_PCI_REG_DATA, 4));
462 }
463 #endif
464 
465 static void
466 bge_writereg_ind(struct bge_softc *sc, int off, int val)
467 {
468 	device_t dev;
469 
470 	dev = sc->bge_dev;
471 
472 	pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
473 	pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
474 }
475 
476 static void
477 bge_writemem_direct(struct bge_softc *sc, int off, int val)
478 {
479 	CSR_WRITE_4(sc, off, val);
480 }
481 
482 /*
483  * Map a single buffer address.
484  */
485 
486 static void
487 bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
488 {
489 	struct bge_dmamap_arg *ctx;
490 
491 	if (error)
492 		return;
493 
494 	ctx = arg;
495 
496 	if (nseg > ctx->bge_maxsegs) {
497 		ctx->bge_maxsegs = 0;
498 		return;
499 	}
500 
501 	ctx->bge_busaddr = segs->ds_addr;
502 }
503 
504 /*
505  * Read a byte of data stored in the EEPROM at address 'addr.' The
506  * BCM570x supports both the traditional bitbang interface and an
507  * auto access interface for reading the EEPROM. We use the auto
508  * access method.
509  */
510 static uint8_t
511 bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
512 {
513 	int i;
514 	uint32_t byte = 0;
515 
516 	/*
517 	 * Enable use of auto EEPROM access so we can avoid
518 	 * having to use the bitbang method.
519 	 */
520 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
521 
522 	/* Reset the EEPROM, load the clock period. */
523 	CSR_WRITE_4(sc, BGE_EE_ADDR,
524 	    BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
525 	DELAY(20);
526 
527 	/* Issue the read EEPROM command. */
528 	CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
529 
530 	/* Wait for completion */
531 	for(i = 0; i < BGE_TIMEOUT * 10; i++) {
532 		DELAY(10);
533 		if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
534 			break;
535 	}
536 
537 	if (i == BGE_TIMEOUT) {
538 		device_printf(sc->bge_dev, "EEPROM read timed out\n");
539 		return (1);
540 	}
541 
542 	/* Get result. */
543 	byte = CSR_READ_4(sc, BGE_EE_DATA);
544 
545 	*dest = (byte >> ((addr % 4) * 8)) & 0xFF;
546 
547 	return (0);
548 }
549 
550 /*
551  * Read a sequence of bytes from the EEPROM.
552  */
553 static int
554 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
555 {
556 	int i, error = 0;
557 	uint8_t byte = 0;
558 
559 	for (i = 0; i < cnt; i++) {
560 		error = bge_eeprom_getbyte(sc, off + i, &byte);
561 		if (error)
562 			break;
563 		*(dest + i) = byte;
564 	}
565 
566 	return (error ? 1 : 0);
567 }
568 
569 static int
570 bge_miibus_readreg(device_t dev, int phy, int reg)
571 {
572 	struct bge_softc *sc;
573 	uint32_t val, autopoll;
574 	int i;
575 
576 	sc = device_get_softc(dev);
577 
578 	/*
579 	 * Broadcom's own driver always assumes the internal
580 	 * PHY is at GMII address 1. On some chips, the PHY responds
581 	 * to accesses at all addresses, which could cause us to
582 	 * bogusly attach the PHY 32 times at probe type. Always
583 	 * restricting the lookup to address 1 is simpler than
584 	 * trying to figure out which chips revisions should be
585 	 * special-cased.
586 	 */
587 	if (phy != 1)
588 		return (0);
589 
590 	/* Reading with autopolling on may trigger PCI errors */
591 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
592 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
593 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
594 		DELAY(40);
595 	}
596 
597 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
598 	    BGE_MIPHY(phy)|BGE_MIREG(reg));
599 
600 	for (i = 0; i < BGE_TIMEOUT; i++) {
601 		val = CSR_READ_4(sc, BGE_MI_COMM);
602 		if (!(val & BGE_MICOMM_BUSY))
603 			break;
604 	}
605 
606 	if (i == BGE_TIMEOUT) {
607 		device_printf(sc->bge_dev, "PHY read timed out\n");
608 		val = 0;
609 		goto done;
610 	}
611 
612 	val = CSR_READ_4(sc, BGE_MI_COMM);
613 
614 done:
615 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
616 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
617 		DELAY(40);
618 	}
619 
620 	if (val & BGE_MICOMM_READFAIL)
621 		return (0);
622 
623 	return (val & 0xFFFF);
624 }
625 
626 static int
627 bge_miibus_writereg(device_t dev, int phy, int reg, int val)
628 {
629 	struct bge_softc *sc;
630 	uint32_t autopoll;
631 	int i;
632 
633 	sc = device_get_softc(dev);
634 
635 	/* Reading with autopolling on may trigger PCI errors */
636 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
637 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
638 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
639 		DELAY(40);
640 	}
641 
642 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
643 	    BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
644 
645 	for (i = 0; i < BGE_TIMEOUT; i++) {
646 		if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
647 			break;
648 	}
649 
650 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
651 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
652 		DELAY(40);
653 	}
654 
655 	if (i == BGE_TIMEOUT) {
656 		device_printf(sc->bge_dev, "PHY read timed out\n");
657 		return (0);
658 	}
659 
660 	return (0);
661 }
662 
663 static void
664 bge_miibus_statchg(device_t dev)
665 {
666 	struct bge_softc *sc;
667 	struct mii_data *mii;
668 	sc = device_get_softc(dev);
669 	mii = device_get_softc(sc->bge_miibus);
670 
671 	BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
672 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
673 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
674 	else
675 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
676 
677 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
678 		BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
679 	else
680 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
681 }
682 
683 /*
684  * Intialize a standard receive ring descriptor.
685  */
686 static int
687 bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m)
688 {
689 	struct mbuf *m_new = NULL;
690 	struct bge_rx_bd *r;
691 	struct bge_dmamap_arg ctx;
692 	int error;
693 
694 	if (m == NULL) {
695 		m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
696 		if (m_new == NULL)
697 			return (ENOBUFS);
698 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
699 	} else {
700 		m_new = m;
701 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
702 		m_new->m_data = m_new->m_ext.ext_buf;
703 	}
704 
705 	if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
706 		m_adj(m_new, ETHER_ALIGN);
707 	sc->bge_cdata.bge_rx_std_chain[i] = m_new;
708 	r = &sc->bge_ldata.bge_rx_std_ring[i];
709 	ctx.bge_maxsegs = 1;
710 	ctx.sc = sc;
711 	error = bus_dmamap_load(sc->bge_cdata.bge_mtag,
712 	    sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *),
713 	    m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
714 	if (error || ctx.bge_maxsegs == 0) {
715 		if (m == NULL) {
716 			sc->bge_cdata.bge_rx_std_chain[i] = NULL;
717 			m_freem(m_new);
718 		}
719 		return (ENOMEM);
720 	}
721 	r->bge_addr.bge_addr_lo = BGE_ADDR_LO(ctx.bge_busaddr);
722 	r->bge_addr.bge_addr_hi = BGE_ADDR_HI(ctx.bge_busaddr);
723 	r->bge_flags = BGE_RXBDFLAG_END;
724 	r->bge_len = m_new->m_len;
725 	r->bge_idx = i;
726 
727 	bus_dmamap_sync(sc->bge_cdata.bge_mtag,
728 	    sc->bge_cdata.bge_rx_std_dmamap[i],
729 	    BUS_DMASYNC_PREREAD);
730 
731 	return (0);
732 }
733 
734 /*
735  * Initialize a jumbo receive ring descriptor. This allocates
736  * a jumbo buffer from the pool managed internally by the driver.
737  */
738 static int
739 bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m)
740 {
741 	bus_dma_segment_t segs[BGE_NSEG_JUMBO];
742 	struct bge_extrx_bd *r;
743 	struct mbuf *m_new = NULL;
744 	int nsegs;
745 	int error;
746 
747 	if (m == NULL) {
748 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
749 		if (m_new == NULL)
750 			return (ENOBUFS);
751 
752 		m_cljget(m_new, M_DONTWAIT, MJUM9BYTES);
753 		if (!(m_new->m_flags & M_EXT)) {
754 			m_freem(m_new);
755 			return (ENOBUFS);
756 		}
757 		m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES;
758 	} else {
759 		m_new = m;
760 		m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES;
761 		m_new->m_data = m_new->m_ext.ext_buf;
762 	}
763 
764 	if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
765 		m_adj(m_new, ETHER_ALIGN);
766 
767 	error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
768 	    sc->bge_cdata.bge_rx_jumbo_dmamap[i],
769 	    m_new, segs, &nsegs, BUS_DMA_NOWAIT);
770 	if (error) {
771 		if (m == NULL)
772 			m_freem(m_new);
773 		return (error);
774 	}
775 	sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
776 
777 	/*
778 	 * Fill in the extended RX buffer descriptor.
779 	 */
780 	r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
781 	r->bge_flags = BGE_RXBDFLAG_JUMBO_RING|BGE_RXBDFLAG_END;
782 	r->bge_idx = i;
783 	r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
784 	switch (nsegs) {
785 	case 4:
786 		r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
787 		r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
788 		r->bge_len3 = segs[3].ds_len;
789 	case 3:
790 		r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
791 		r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
792 		r->bge_len2 = segs[2].ds_len;
793 	case 2:
794 		r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
795 		r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
796 		r->bge_len1 = segs[1].ds_len;
797 	case 1:
798 		r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
799 		r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
800 		r->bge_len0 = segs[0].ds_len;
801 		break;
802 	default:
803 		panic("%s: %d segments\n", __func__, nsegs);
804 	}
805 
806 	bus_dmamap_sync(sc->bge_cdata.bge_mtag,
807 	    sc->bge_cdata.bge_rx_jumbo_dmamap[i],
808 	    BUS_DMASYNC_PREREAD);
809 
810 	return (0);
811 }
812 
813 /*
814  * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
815  * that's 1MB or memory, which is a lot. For now, we fill only the first
816  * 256 ring entries and hope that our CPU is fast enough to keep up with
817  * the NIC.
818  */
819 static int
820 bge_init_rx_ring_std(struct bge_softc *sc)
821 {
822 	int i;
823 
824 	for (i = 0; i < BGE_SSLOTS; i++) {
825 		if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
826 			return (ENOBUFS);
827 	};
828 
829 	bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
830 	    sc->bge_cdata.bge_rx_std_ring_map,
831 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
832 
833 	sc->bge_std = i - 1;
834 	CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
835 
836 	return (0);
837 }
838 
839 static void
840 bge_free_rx_ring_std(struct bge_softc *sc)
841 {
842 	int i;
843 
844 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
845 		if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
846 			bus_dmamap_sync(sc->bge_cdata.bge_mtag,
847 			    sc->bge_cdata.bge_rx_std_dmamap[i],
848 			    BUS_DMASYNC_POSTREAD);
849 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
850 			    sc->bge_cdata.bge_rx_std_dmamap[i]);
851 			m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
852 			sc->bge_cdata.bge_rx_std_chain[i] = NULL;
853 		}
854 		bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
855 		    sizeof(struct bge_rx_bd));
856 	}
857 }
858 
859 static int
860 bge_init_rx_ring_jumbo(struct bge_softc *sc)
861 {
862 	struct bge_rcb *rcb;
863 	int i;
864 
865 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
866 		if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
867 			return (ENOBUFS);
868 	};
869 
870 	bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
871 	    sc->bge_cdata.bge_rx_jumbo_ring_map,
872 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
873 
874 	sc->bge_jumbo = i - 1;
875 
876 	rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
877 	rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
878 				    BGE_RCB_FLAG_USE_EXT_RX_BD);
879 	CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
880 
881 	CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
882 
883 	return (0);
884 }
885 
886 static void
887 bge_free_rx_ring_jumbo(struct bge_softc *sc)
888 {
889 	int i;
890 
891 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
892 		if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
893 			bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
894 			    sc->bge_cdata.bge_rx_jumbo_dmamap[i],
895 			    BUS_DMASYNC_POSTREAD);
896 			bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
897 			    sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
898 			m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
899 			sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
900 		}
901 		bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
902 		    sizeof(struct bge_extrx_bd));
903 	}
904 }
905 
906 static void
907 bge_free_tx_ring(struct bge_softc *sc)
908 {
909 	int i;
910 
911 	if (sc->bge_ldata.bge_tx_ring == NULL)
912 		return;
913 
914 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
915 		if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
916 			bus_dmamap_sync(sc->bge_cdata.bge_mtag,
917 			    sc->bge_cdata.bge_tx_dmamap[i],
918 			    BUS_DMASYNC_POSTWRITE);
919 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
920 			    sc->bge_cdata.bge_tx_dmamap[i]);
921 			m_freem(sc->bge_cdata.bge_tx_chain[i]);
922 			sc->bge_cdata.bge_tx_chain[i] = NULL;
923 		}
924 		bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
925 		    sizeof(struct bge_tx_bd));
926 	}
927 }
928 
929 static int
930 bge_init_tx_ring(struct bge_softc *sc)
931 {
932 	sc->bge_txcnt = 0;
933 	sc->bge_tx_saved_considx = 0;
934 
935 	/* Initialize transmit producer index for host-memory send ring. */
936 	sc->bge_tx_prodidx = 0;
937 	CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
938 
939 	/* 5700 b2 errata */
940 	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
941 		CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
942 
943 	/* NIC-memory send ring not used; initialize to zero. */
944 	CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
945 	/* 5700 b2 errata */
946 	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
947 		CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
948 
949 	return (0);
950 }
951 
952 static void
953 bge_setpromisc(struct bge_softc *sc)
954 {
955 	struct ifnet *ifp;
956 
957 	BGE_LOCK_ASSERT(sc);
958 
959 	ifp = sc->bge_ifp;
960 
961 	/* Enable or disable promiscuous mode as needed. */
962 	if (ifp->if_flags & IFF_PROMISC)
963 		BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
964 	else
965 		BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
966 }
967 
968 static void
969 bge_setmulti(struct bge_softc *sc)
970 {
971 	struct ifnet *ifp;
972 	struct ifmultiaddr *ifma;
973 	uint32_t hashes[4] = { 0, 0, 0, 0 };
974 	int h, i;
975 
976 	BGE_LOCK_ASSERT(sc);
977 
978 	ifp = sc->bge_ifp;
979 
980 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
981 		for (i = 0; i < 4; i++)
982 			CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
983 		return;
984 	}
985 
986 	/* First, zot all the existing filters. */
987 	for (i = 0; i < 4; i++)
988 		CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
989 
990 	/* Now program new ones. */
991 	IF_ADDR_LOCK(ifp);
992 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
993 		if (ifma->ifma_addr->sa_family != AF_LINK)
994 			continue;
995 		h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
996 		    ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
997 		hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
998 	}
999 	IF_ADDR_UNLOCK(ifp);
1000 
1001 	for (i = 0; i < 4; i++)
1002 		CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1003 }
1004 
1005 static void
1006 bge_sig_pre_reset(sc, type)
1007 	struct bge_softc *sc;
1008 	int type;
1009 {
1010 	/*
1011 	 * Some chips don't like this so only do this if ASF is enabled
1012 	 */
1013 	if (sc->bge_asf_mode)
1014 		bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
1015 
1016 	if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1017 		switch (type) {
1018 		case BGE_RESET_START:
1019 			bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1020 			break;
1021 		case BGE_RESET_STOP:
1022 			bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1023 			break;
1024 		}
1025 	}
1026 }
1027 
1028 static void
1029 bge_sig_post_reset(sc, type)
1030 	struct bge_softc *sc;
1031 	int type;
1032 {
1033 	if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1034 		switch (type) {
1035 		case BGE_RESET_START:
1036 			bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000001);
1037 			/* START DONE */
1038 			break;
1039 		case BGE_RESET_STOP:
1040 			bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000002);
1041 			break;
1042 		}
1043 	}
1044 }
1045 
1046 static void
1047 bge_sig_legacy(sc, type)
1048 	struct bge_softc *sc;
1049 	int type;
1050 {
1051 	if (sc->bge_asf_mode) {
1052 		switch (type) {
1053 		case BGE_RESET_START:
1054 			bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1055 			break;
1056 		case BGE_RESET_STOP:
1057 			bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1058 			break;
1059 		}
1060 	}
1061 }
1062 
1063 void bge_stop_fw(struct bge_softc *);
1064 void
1065 bge_stop_fw(sc)
1066 	struct bge_softc *sc;
1067 {
1068 	int i;
1069 
1070 	if (sc->bge_asf_mode) {
1071 		bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, BGE_FW_PAUSE);
1072 		CSR_WRITE_4(sc, BGE_CPU_EVENT,
1073 			    CSR_READ_4(sc, BGE_CPU_EVENT) != (1 << 14));
1074 
1075 		for (i = 0; i < 100; i++ ) {
1076 			if (!(CSR_READ_4(sc, BGE_CPU_EVENT) & (1 << 14)))
1077 				break;
1078 			DELAY(10);
1079 		}
1080 	}
1081 }
1082 
1083 /*
1084  * Do endian, PCI and DMA initialization. Also check the on-board ROM
1085  * self-test results.
1086  */
1087 static int
1088 bge_chipinit(struct bge_softc *sc)
1089 {
1090 	uint32_t dma_rw_ctl;
1091 	int i;
1092 
1093 	/* Set endianness before we access any non-PCI registers. */
1094 	pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4);
1095 
1096 	/*
1097 	 * Check the 'ROM failed' bit on the RX CPU to see if
1098 	 * self-tests passed.
1099 	 */
1100 	if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
1101 		device_printf(sc->bge_dev, "RX CPU self-diagnostics failed!\n");
1102 		return (ENODEV);
1103 	}
1104 
1105 	/* Clear the MAC control register */
1106 	CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1107 
1108 	/*
1109 	 * Clear the MAC statistics block in the NIC's
1110 	 * internal memory.
1111 	 */
1112 	for (i = BGE_STATS_BLOCK;
1113 	    i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1114 		BGE_MEMWIN_WRITE(sc, i, 0);
1115 
1116 	for (i = BGE_STATUS_BLOCK;
1117 	    i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1118 		BGE_MEMWIN_WRITE(sc, i, 0);
1119 
1120 	/* Set up the PCI DMA control register. */
1121 	if (sc->bge_flags & BGE_FLAG_PCIE) {
1122 		/* PCI Express bus */
1123 		dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1124 		    (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1125 		    (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1126 	} else if (sc->bge_flags & BGE_FLAG_PCIX) {
1127 		/* PCI-X bus */
1128 		if (BGE_IS_5714_FAMILY(sc)) {
1129 			dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD;
1130 			dma_rw_ctl &= ~BGE_PCIDMARWCTL_ONEDMA_ATONCE; /* XXX */
1131 			/* XXX magic values, Broadcom-supplied Linux driver */
1132 			if (sc->bge_asicrev == BGE_ASICREV_BCM5780)
1133 				dma_rw_ctl |= (1 << 20) | (1 << 18) |
1134 				    BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1135 			else
1136 				dma_rw_ctl |= (1 << 20) | (1 << 18) | (1 << 15);
1137 
1138 		} else if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1139 			/*
1140 			 * The 5704 uses a different encoding of read/write
1141 			 * watermarks.
1142 			 */
1143 			dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1144 			    (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1145 			    (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1146 		else
1147 			dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1148 			    (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1149 			    (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1150 			    (0x0F);
1151 
1152 		/*
1153 		 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1154 		 * for hardware bugs.
1155 		 */
1156 		if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1157 		    sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1158 			uint32_t tmp;
1159 
1160 			tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1161 			if (tmp == 0x6 || tmp == 0x7)
1162 				dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1163 		}
1164 	} else
1165 		/* Conventional PCI bus */
1166 		dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1167 		    (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1168 		    (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1169 		    (0x0F);
1170 
1171 	if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1172 	    sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
1173 	    sc->bge_asicrev == BGE_ASICREV_BCM5705)
1174 		dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1175 	pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1176 
1177 	/*
1178 	 * Set up general mode register.
1179 	 */
1180 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
1181 	    BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1182 	    BGE_MODECTL_TX_NO_PHDR_CSUM);
1183 
1184 	/*
1185 	 * Tell the firmware the driver is running
1186 	 */
1187 	if (sc->bge_asf_mode & ASF_STACKUP)
1188 		BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
1189 
1190 	/*
1191 	 * Disable memory write invalidate.  Apparently it is not supported
1192 	 * properly by these devices.
1193 	 */
1194 	PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1195 
1196 #ifdef __brokenalpha__
1197 	/*
1198 	 * Must insure that we do not cross an 8K (bytes) boundary
1199 	 * for DMA reads.  Our highest limit is 1K bytes.  This is a
1200 	 * restriction on some ALPHA platforms with early revision
1201 	 * 21174 PCI chipsets, such as the AlphaPC 164lx
1202 	 */
1203 	PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL,
1204 	    BGE_PCI_READ_BNDRY_1024BYTES, 4);
1205 #endif
1206 
1207 	/* Set the timer prescaler (always 66Mhz) */
1208 	CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1209 
1210 	return (0);
1211 }
1212 
1213 static int
1214 bge_blockinit(struct bge_softc *sc)
1215 {
1216 	struct bge_rcb *rcb;
1217 	bus_size_t vrcb;
1218 	bge_hostaddr taddr;
1219 	uint32_t val;
1220 	int i;
1221 
1222 	/*
1223 	 * Initialize the memory window pointer register so that
1224 	 * we can access the first 32K of internal NIC RAM. This will
1225 	 * allow us to set up the TX send ring RCBs and the RX return
1226 	 * ring RCBs, plus other things which live in NIC memory.
1227 	 */
1228 	CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1229 
1230 	/* Note: the BCM5704 has a smaller mbuf space than other chips. */
1231 
1232 	if (!(BGE_IS_5705_PLUS(sc))) {
1233 		/* Configure mbuf memory pool */
1234 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1235 		if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1236 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1237 		else
1238 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1239 
1240 		/* Configure DMA resource pool */
1241 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1242 		    BGE_DMA_DESCRIPTORS);
1243 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1244 	}
1245 
1246 	/* Configure mbuf pool watermarks */
1247 	if (!(BGE_IS_5705_PLUS(sc))) {
1248 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1249 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1250 	} else {
1251 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1252 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1253 	}
1254 	CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1255 
1256 	/* Configure DMA resource watermarks */
1257 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1258 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1259 
1260 	/* Enable buffer manager */
1261 	if (!(BGE_IS_5705_PLUS(sc))) {
1262 		CSR_WRITE_4(sc, BGE_BMAN_MODE,
1263 		    BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1264 
1265 		/* Poll for buffer manager start indication */
1266 		for (i = 0; i < BGE_TIMEOUT; i++) {
1267 			if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1268 				break;
1269 			DELAY(10);
1270 		}
1271 
1272 		if (i == BGE_TIMEOUT) {
1273 			device_printf(sc->bge_dev,
1274 			    "buffer manager failed to start\n");
1275 			return (ENXIO);
1276 		}
1277 	}
1278 
1279 	/* Enable flow-through queues */
1280 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1281 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1282 
1283 	/* Wait until queue initialization is complete */
1284 	for (i = 0; i < BGE_TIMEOUT; i++) {
1285 		if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1286 			break;
1287 		DELAY(10);
1288 	}
1289 
1290 	if (i == BGE_TIMEOUT) {
1291 		device_printf(sc->bge_dev, "flow-through queue init failed\n");
1292 		return (ENXIO);
1293 	}
1294 
1295 	/* Initialize the standard RX ring control block */
1296 	rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1297 	rcb->bge_hostaddr.bge_addr_lo =
1298 	    BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1299 	rcb->bge_hostaddr.bge_addr_hi =
1300 	    BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1301 	bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1302 	    sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1303 	if (BGE_IS_5705_PLUS(sc))
1304 		rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1305 	else
1306 		rcb->bge_maxlen_flags =
1307 		    BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1308 	rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1309 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1310 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1311 
1312 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1313 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1314 
1315 	/*
1316 	 * Initialize the jumbo RX ring control block
1317 	 * We set the 'ring disabled' bit in the flags
1318 	 * field until we're actually ready to start
1319 	 * using this ring (i.e. once we set the MTU
1320 	 * high enough to require it).
1321 	 */
1322 	if (BGE_IS_JUMBO_CAPABLE(sc)) {
1323 		rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1324 
1325 		rcb->bge_hostaddr.bge_addr_lo =
1326 		    BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1327 		rcb->bge_hostaddr.bge_addr_hi =
1328 		    BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1329 		bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1330 		    sc->bge_cdata.bge_rx_jumbo_ring_map,
1331 		    BUS_DMASYNC_PREREAD);
1332 		rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1333 		    BGE_RCB_FLAG_USE_EXT_RX_BD|BGE_RCB_FLAG_RING_DISABLED);
1334 		rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1335 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1336 		    rcb->bge_hostaddr.bge_addr_hi);
1337 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1338 		    rcb->bge_hostaddr.bge_addr_lo);
1339 
1340 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1341 		    rcb->bge_maxlen_flags);
1342 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1343 
1344 		/* Set up dummy disabled mini ring RCB */
1345 		rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1346 		rcb->bge_maxlen_flags =
1347 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1348 		CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1349 		    rcb->bge_maxlen_flags);
1350 	}
1351 
1352 	/*
1353 	 * Set the BD ring replentish thresholds. The recommended
1354 	 * values are 1/8th the number of descriptors allocated to
1355 	 * each ring.
1356 	 * XXX The 5754 requires a lower threshold, so it might be a
1357 	 * requirement of all 575x family chips.  The Linux driver sets
1358 	 * the lower threshold for all 5705 family chips as well, but there
1359 	 * are reports that it might not need to be so strict.
1360 	 */
1361 	if (BGE_IS_5705_PLUS(sc))
1362 		val = 8;
1363 	else
1364 		val = BGE_STD_RX_RING_CNT / 8;
1365 	CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1366 	CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1367 
1368 	/*
1369 	 * Disable all unused send rings by setting the 'ring disabled'
1370 	 * bit in the flags field of all the TX send ring control blocks.
1371 	 * These are located in NIC memory.
1372 	 */
1373 	vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1374 	for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1375 		RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1376 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1377 		RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1378 		vrcb += sizeof(struct bge_rcb);
1379 	}
1380 
1381 	/* Configure TX RCB 0 (we use only the first ring) */
1382 	vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1383 	BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1384 	RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1385 	RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1386 	RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1387 	    BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1388 	if (!(BGE_IS_5705_PLUS(sc)))
1389 		RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1390 		    BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1391 
1392 	/* Disable all unused RX return rings */
1393 	vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1394 	for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1395 		RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1396 		RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1397 		RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1398 		    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1399 		    BGE_RCB_FLAG_RING_DISABLED));
1400 		RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1401 		CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
1402 		    (i * (sizeof(uint64_t))), 0);
1403 		vrcb += sizeof(struct bge_rcb);
1404 	}
1405 
1406 	/* Initialize RX ring indexes */
1407 	CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1408 	CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1409 	CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1410 
1411 	/*
1412 	 * Set up RX return ring 0
1413 	 * Note that the NIC address for RX return rings is 0x00000000.
1414 	 * The return rings live entirely within the host, so the
1415 	 * nicaddr field in the RCB isn't used.
1416 	 */
1417 	vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1418 	BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1419 	RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1420 	RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1421 	RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000);
1422 	RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1423 	    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1424 
1425 	/* Set random backoff seed for TX */
1426 	CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1427 	    IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
1428 	    IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
1429 	    IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] +
1430 	    BGE_TX_BACKOFF_SEED_MASK);
1431 
1432 	/* Set inter-packet gap */
1433 	CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1434 
1435 	/*
1436 	 * Specify which ring to use for packets that don't match
1437 	 * any RX rules.
1438 	 */
1439 	CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1440 
1441 	/*
1442 	 * Configure number of RX lists. One interrupt distribution
1443 	 * list, sixteen active lists, one bad frames class.
1444 	 */
1445 	CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1446 
1447 	/* Inialize RX list placement stats mask. */
1448 	CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1449 	CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1450 
1451 	/* Disable host coalescing until we get it set up */
1452 	CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1453 
1454 	/* Poll to make sure it's shut down. */
1455 	for (i = 0; i < BGE_TIMEOUT; i++) {
1456 		if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1457 			break;
1458 		DELAY(10);
1459 	}
1460 
1461 	if (i == BGE_TIMEOUT) {
1462 		device_printf(sc->bge_dev,
1463 		    "host coalescing engine failed to idle\n");
1464 		return (ENXIO);
1465 	}
1466 
1467 	/* Set up host coalescing defaults */
1468 	CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1469 	CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1470 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1471 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1472 	if (!(BGE_IS_5705_PLUS(sc))) {
1473 		CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1474 		CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1475 	}
1476 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
1477 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
1478 
1479 	/* Set up address of statistics block */
1480 	if (!(BGE_IS_5705_PLUS(sc))) {
1481 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1482 		    BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1483 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1484 		    BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1485 		CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1486 		CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1487 		CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1488 	}
1489 
1490 	/* Set up address of status block */
1491 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1492 	    BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1493 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1494 	    BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1495 	sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0;
1496 	sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0;
1497 
1498 	/* Turn on host coalescing state machine */
1499 	CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1500 
1501 	/* Turn on RX BD completion state machine and enable attentions */
1502 	CSR_WRITE_4(sc, BGE_RBDC_MODE,
1503 	    BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1504 
1505 	/* Turn on RX list placement state machine */
1506 	CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1507 
1508 	/* Turn on RX list selector state machine. */
1509 	if (!(BGE_IS_5705_PLUS(sc)))
1510 		CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1511 
1512 	/* Turn on DMA, clear stats */
1513 	CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1514 	    BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1515 	    BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1516 	    BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1517 	    ((sc->bge_flags & BGE_FLAG_TBI) ?
1518 	    BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1519 
1520 	/* Set misc. local control, enable interrupts on attentions */
1521 	CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1522 
1523 #ifdef notdef
1524 	/* Assert GPIO pins for PHY reset */
1525 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1526 	    BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1527 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1528 	    BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1529 #endif
1530 
1531 	/* Turn on DMA completion state machine */
1532 	if (!(BGE_IS_5705_PLUS(sc)))
1533 		CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1534 
1535 	val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS;
1536 
1537 	/* Enable host coalescing bug fix. */
1538 	if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
1539 	    sc->bge_asicrev == BGE_ASICREV_BCM5787)
1540 			val |= (1 << 29);
1541 
1542 	/* Turn on write DMA state machine */
1543 	CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1544 
1545 	/* Turn on read DMA state machine */
1546 	CSR_WRITE_4(sc, BGE_RDMA_MODE,
1547 	    BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
1548 
1549 	/* Turn on RX data completion state machine */
1550 	CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1551 
1552 	/* Turn on RX BD initiator state machine */
1553 	CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1554 
1555 	/* Turn on RX data and RX BD initiator state machine */
1556 	CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1557 
1558 	/* Turn on Mbuf cluster free state machine */
1559 	if (!(BGE_IS_5705_PLUS(sc)))
1560 		CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1561 
1562 	/* Turn on send BD completion state machine */
1563 	CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1564 
1565 	/* Turn on send data completion state machine */
1566 	CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1567 
1568 	/* Turn on send data initiator state machine */
1569 	CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1570 
1571 	/* Turn on send BD initiator state machine */
1572 	CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1573 
1574 	/* Turn on send BD selector state machine */
1575 	CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1576 
1577 	CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1578 	CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1579 	    BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1580 
1581 	/* ack/clear link change events */
1582 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1583 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1584 	    BGE_MACSTAT_LINK_CHANGED);
1585 	CSR_WRITE_4(sc, BGE_MI_STS, 0);
1586 
1587 	/* Enable PHY auto polling (for MII/GMII only) */
1588 	if (sc->bge_flags & BGE_FLAG_TBI) {
1589 		CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1590 	} else {
1591 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1592 		if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1593 		    sc->bge_chipid != BGE_CHIPID_BCM5700_B2)
1594 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1595 			    BGE_EVTENB_MI_INTERRUPT);
1596 	}
1597 
1598 	/*
1599 	 * Clear any pending link state attention.
1600 	 * Otherwise some link state change events may be lost until attention
1601 	 * is cleared by bge_intr() -> bge_link_upd() sequence.
1602 	 * It's not necessary on newer BCM chips - perhaps enabling link
1603 	 * state change attentions implies clearing pending attention.
1604 	 */
1605 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1606 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1607 	    BGE_MACSTAT_LINK_CHANGED);
1608 
1609 	/* Enable link state change attentions. */
1610 	BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1611 
1612 	return (0);
1613 }
1614 
1615 const struct bge_revision *
1616 bge_lookup_rev(uint32_t chipid)
1617 {
1618 	const struct bge_revision *br;
1619 
1620 	for (br = bge_revisions; br->br_name != NULL; br++) {
1621 		if (br->br_chipid == chipid)
1622 			return (br);
1623 	}
1624 
1625 	for (br = bge_majorrevs; br->br_name != NULL; br++) {
1626 		if (br->br_chipid == BGE_ASICREV(chipid))
1627 			return (br);
1628 	}
1629 
1630 	return (NULL);
1631 }
1632 
1633 const struct bge_vendor *
1634 bge_lookup_vendor(uint16_t vid)
1635 {
1636 	const struct bge_vendor *v;
1637 
1638 	for (v = bge_vendors; v->v_name != NULL; v++)
1639 		if (v->v_id == vid)
1640 			return (v);
1641 
1642 	panic("%s: unknown vendor %d", __func__, vid);
1643 	return (NULL);
1644 }
1645 
1646 /*
1647  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1648  * against our list and return its name if we find a match.
1649  *
1650  * Note that since the Broadcom controller contains VPD support, we
1651  * try to get the device name string from the controller itself instead
1652  * of the compiled-in string. It guarantees we'll always announce the
1653  * right product name. We fall back to the compiled-in string when
1654  * VPD is unavailable or corrupt.
1655  */
1656 static int
1657 bge_probe(device_t dev)
1658 {
1659 	struct bge_type *t = bge_devs;
1660 	struct bge_softc *sc = device_get_softc(dev);
1661 	uint16_t vid, did;
1662 
1663 	sc->bge_dev = dev;
1664 	vid = pci_get_vendor(dev);
1665 	did = pci_get_device(dev);
1666 	while(t->bge_vid != 0) {
1667 		if ((vid == t->bge_vid) && (did == t->bge_did)) {
1668 			char model[64], buf[96];
1669 			const struct bge_revision *br;
1670 			const struct bge_vendor *v;
1671 			const char *pname;
1672 			uint32_t id;
1673 
1674 			id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
1675 			    BGE_PCIMISCCTL_ASICREV;
1676 			br = bge_lookup_rev(id);
1677 			v = bge_lookup_vendor(vid);
1678 			if (pci_get_vpd_ident(dev, &pname))
1679 				snprintf(model, 64, "%s %s",
1680 				    v->v_name,
1681 				    br != NULL ? br->br_name :
1682 					"NetXtreme Ethernet Controller");
1683 			else
1684 				snprintf(model, 64, "%s", pname);
1685 			snprintf(buf, 96, "%s, %sASIC rev. %#04x", model,
1686 			    br != NULL ? "" : "unknown ", id >> 16);
1687 			device_set_desc_copy(dev, buf);
1688 			if (pci_get_subvendor(dev) == DELL_VENDORID)
1689 				sc->bge_flags |= BGE_FLAG_NO3LED;
1690 			return (0);
1691 		}
1692 		t++;
1693 	}
1694 
1695 	return (ENXIO);
1696 }
1697 
1698 static void
1699 bge_dma_free(struct bge_softc *sc)
1700 {
1701 	int i;
1702 
1703 	/* Destroy DMA maps for RX buffers. */
1704 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1705 		if (sc->bge_cdata.bge_rx_std_dmamap[i])
1706 			bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1707 			    sc->bge_cdata.bge_rx_std_dmamap[i]);
1708 	}
1709 
1710 	/* Destroy DMA maps for jumbo RX buffers. */
1711 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1712 		if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
1713 			bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
1714 			    sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1715 	}
1716 
1717 	/* Destroy DMA maps for TX buffers. */
1718 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
1719 		if (sc->bge_cdata.bge_tx_dmamap[i])
1720 			bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1721 			    sc->bge_cdata.bge_tx_dmamap[i]);
1722 	}
1723 
1724 	if (sc->bge_cdata.bge_mtag)
1725 		bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
1726 
1727 
1728 	/* Destroy standard RX ring. */
1729 	if (sc->bge_cdata.bge_rx_std_ring_map)
1730 		bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
1731 		    sc->bge_cdata.bge_rx_std_ring_map);
1732 	if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring)
1733 		bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
1734 		    sc->bge_ldata.bge_rx_std_ring,
1735 		    sc->bge_cdata.bge_rx_std_ring_map);
1736 
1737 	if (sc->bge_cdata.bge_rx_std_ring_tag)
1738 		bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
1739 
1740 	/* Destroy jumbo RX ring. */
1741 	if (sc->bge_cdata.bge_rx_jumbo_ring_map)
1742 		bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1743 		    sc->bge_cdata.bge_rx_jumbo_ring_map);
1744 
1745 	if (sc->bge_cdata.bge_rx_jumbo_ring_map &&
1746 	    sc->bge_ldata.bge_rx_jumbo_ring)
1747 		bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1748 		    sc->bge_ldata.bge_rx_jumbo_ring,
1749 		    sc->bge_cdata.bge_rx_jumbo_ring_map);
1750 
1751 	if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
1752 		bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
1753 
1754 	/* Destroy RX return ring. */
1755 	if (sc->bge_cdata.bge_rx_return_ring_map)
1756 		bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
1757 		    sc->bge_cdata.bge_rx_return_ring_map);
1758 
1759 	if (sc->bge_cdata.bge_rx_return_ring_map &&
1760 	    sc->bge_ldata.bge_rx_return_ring)
1761 		bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
1762 		    sc->bge_ldata.bge_rx_return_ring,
1763 		    sc->bge_cdata.bge_rx_return_ring_map);
1764 
1765 	if (sc->bge_cdata.bge_rx_return_ring_tag)
1766 		bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
1767 
1768 	/* Destroy TX ring. */
1769 	if (sc->bge_cdata.bge_tx_ring_map)
1770 		bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
1771 		    sc->bge_cdata.bge_tx_ring_map);
1772 
1773 	if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring)
1774 		bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
1775 		    sc->bge_ldata.bge_tx_ring,
1776 		    sc->bge_cdata.bge_tx_ring_map);
1777 
1778 	if (sc->bge_cdata.bge_tx_ring_tag)
1779 		bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
1780 
1781 	/* Destroy status block. */
1782 	if (sc->bge_cdata.bge_status_map)
1783 		bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
1784 		    sc->bge_cdata.bge_status_map);
1785 
1786 	if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block)
1787 		bus_dmamem_free(sc->bge_cdata.bge_status_tag,
1788 		    sc->bge_ldata.bge_status_block,
1789 		    sc->bge_cdata.bge_status_map);
1790 
1791 	if (sc->bge_cdata.bge_status_tag)
1792 		bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
1793 
1794 	/* Destroy statistics block. */
1795 	if (sc->bge_cdata.bge_stats_map)
1796 		bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
1797 		    sc->bge_cdata.bge_stats_map);
1798 
1799 	if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats)
1800 		bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
1801 		    sc->bge_ldata.bge_stats,
1802 		    sc->bge_cdata.bge_stats_map);
1803 
1804 	if (sc->bge_cdata.bge_stats_tag)
1805 		bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
1806 
1807 	/* Destroy the parent tag. */
1808 	if (sc->bge_cdata.bge_parent_tag)
1809 		bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
1810 }
1811 
1812 static int
1813 bge_dma_alloc(device_t dev)
1814 {
1815 	struct bge_dmamap_arg ctx;
1816 	struct bge_softc *sc;
1817 	int i, error;
1818 
1819 	sc = device_get_softc(dev);
1820 
1821 	/*
1822 	 * Allocate the parent bus DMA tag appropriate for PCI.
1823 	 */
1824 	error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),/* parent */
1825 			1, 0,			/* alignment, boundary */
1826 			BUS_SPACE_MAXADDR,	/* lowaddr */
1827 			BUS_SPACE_MAXADDR,	/* highaddr */
1828 			NULL, NULL,		/* filter, filterarg */
1829 			MAXBSIZE, BGE_NSEG_NEW,	/* maxsize, nsegments */
1830 			BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
1831 			0,			/* flags */
1832 			NULL, NULL,		/* lockfunc, lockarg */
1833 			&sc->bge_cdata.bge_parent_tag);
1834 
1835 	if (error != 0) {
1836 		device_printf(sc->bge_dev,
1837 		    "could not allocate parent dma tag\n");
1838 		return (ENOMEM);
1839 	}
1840 
1841 	/*
1842 	 * Create tag for RX mbufs.
1843 	 */
1844 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1,
1845 	    0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1846 	    NULL, MCLBYTES * BGE_NSEG_NEW, BGE_NSEG_NEW, MCLBYTES,
1847 	    BUS_DMA_ALLOCNOW, NULL, NULL, &sc->bge_cdata.bge_mtag);
1848 
1849 	if (error) {
1850 		device_printf(sc->bge_dev, "could not allocate dma tag\n");
1851 		return (ENOMEM);
1852 	}
1853 
1854 	/* Create DMA maps for RX buffers. */
1855 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1856 		error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1857 			    &sc->bge_cdata.bge_rx_std_dmamap[i]);
1858 		if (error) {
1859 			device_printf(sc->bge_dev,
1860 			    "can't create DMA map for RX\n");
1861 			return (ENOMEM);
1862 		}
1863 	}
1864 
1865 	/* Create DMA maps for TX buffers. */
1866 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
1867 		error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1868 			    &sc->bge_cdata.bge_tx_dmamap[i]);
1869 		if (error) {
1870 			device_printf(sc->bge_dev,
1871 			    "can't create DMA map for RX\n");
1872 			return (ENOMEM);
1873 		}
1874 	}
1875 
1876 	/* Create tag for standard RX ring. */
1877 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1878 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1879 	    NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0,
1880 	    NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag);
1881 
1882 	if (error) {
1883 		device_printf(sc->bge_dev, "could not allocate dma tag\n");
1884 		return (ENOMEM);
1885 	}
1886 
1887 	/* Allocate DMA'able memory for standard RX ring. */
1888 	error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag,
1889 	    (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT,
1890 	    &sc->bge_cdata.bge_rx_std_ring_map);
1891 	if (error)
1892 		return (ENOMEM);
1893 
1894 	bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1895 
1896 	/* Load the address of the standard RX ring. */
1897 	ctx.bge_maxsegs = 1;
1898 	ctx.sc = sc;
1899 
1900 	error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag,
1901 	    sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring,
1902 	    BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1903 
1904 	if (error)
1905 		return (ENOMEM);
1906 
1907 	sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr;
1908 
1909 	/* Create tags for jumbo mbufs. */
1910 	if (BGE_IS_JUMBO_CAPABLE(sc)) {
1911 		error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1912 		    1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1913 		    NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
1914 		    0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
1915 		if (error) {
1916 			device_printf(sc->bge_dev,
1917 			    "could not allocate jumbo dma tag\n");
1918 			return (ENOMEM);
1919 		}
1920 
1921 		/* Create tag for jumbo RX ring. */
1922 		error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1923 		    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1924 		    NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0,
1925 		    NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag);
1926 
1927 		if (error) {
1928 			device_printf(sc->bge_dev,
1929 			    "could not allocate jumbo ring dma tag\n");
1930 			return (ENOMEM);
1931 		}
1932 
1933 		/* Allocate DMA'able memory for jumbo RX ring. */
1934 		error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1935 		    (void **)&sc->bge_ldata.bge_rx_jumbo_ring,
1936 		    BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1937 		    &sc->bge_cdata.bge_rx_jumbo_ring_map);
1938 		if (error)
1939 			return (ENOMEM);
1940 
1941 		/* Load the address of the jumbo RX ring. */
1942 		ctx.bge_maxsegs = 1;
1943 		ctx.sc = sc;
1944 
1945 		error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1946 		    sc->bge_cdata.bge_rx_jumbo_ring_map,
1947 		    sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ,
1948 		    bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1949 
1950 		if (error)
1951 			return (ENOMEM);
1952 
1953 		sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr;
1954 
1955 		/* Create DMA maps for jumbo RX buffers. */
1956 		for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1957 			error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
1958 				    0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1959 			if (error) {
1960 				device_printf(sc->bge_dev,
1961 				    "can't create DMA map for jumbo RX\n");
1962 				return (ENOMEM);
1963 			}
1964 		}
1965 
1966 	}
1967 
1968 	/* Create tag for RX return ring. */
1969 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1970 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1971 	    NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0,
1972 	    NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag);
1973 
1974 	if (error) {
1975 		device_printf(sc->bge_dev, "could not allocate dma tag\n");
1976 		return (ENOMEM);
1977 	}
1978 
1979 	/* Allocate DMA'able memory for RX return ring. */
1980 	error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag,
1981 	    (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT,
1982 	    &sc->bge_cdata.bge_rx_return_ring_map);
1983 	if (error)
1984 		return (ENOMEM);
1985 
1986 	bzero((char *)sc->bge_ldata.bge_rx_return_ring,
1987 	    BGE_RX_RTN_RING_SZ(sc));
1988 
1989 	/* Load the address of the RX return ring. */
1990 	ctx.bge_maxsegs = 1;
1991 	ctx.sc = sc;
1992 
1993 	error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag,
1994 	    sc->bge_cdata.bge_rx_return_ring_map,
1995 	    sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc),
1996 	    bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1997 
1998 	if (error)
1999 		return (ENOMEM);
2000 
2001 	sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr;
2002 
2003 	/* Create tag for TX ring. */
2004 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2005 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2006 	    NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL,
2007 	    &sc->bge_cdata.bge_tx_ring_tag);
2008 
2009 	if (error) {
2010 		device_printf(sc->bge_dev, "could not allocate dma tag\n");
2011 		return (ENOMEM);
2012 	}
2013 
2014 	/* Allocate DMA'able memory for TX ring. */
2015 	error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag,
2016 	    (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT,
2017 	    &sc->bge_cdata.bge_tx_ring_map);
2018 	if (error)
2019 		return (ENOMEM);
2020 
2021 	bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
2022 
2023 	/* Load the address of the TX ring. */
2024 	ctx.bge_maxsegs = 1;
2025 	ctx.sc = sc;
2026 
2027 	error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag,
2028 	    sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring,
2029 	    BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2030 
2031 	if (error)
2032 		return (ENOMEM);
2033 
2034 	sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr;
2035 
2036 	/* Create tag for status block. */
2037 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2038 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2039 	    NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0,
2040 	    NULL, NULL, &sc->bge_cdata.bge_status_tag);
2041 
2042 	if (error) {
2043 		device_printf(sc->bge_dev, "could not allocate dma tag\n");
2044 		return (ENOMEM);
2045 	}
2046 
2047 	/* Allocate DMA'able memory for status block. */
2048 	error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag,
2049 	    (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT,
2050 	    &sc->bge_cdata.bge_status_map);
2051 	if (error)
2052 		return (ENOMEM);
2053 
2054 	bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
2055 
2056 	/* Load the address of the status block. */
2057 	ctx.sc = sc;
2058 	ctx.bge_maxsegs = 1;
2059 
2060 	error = bus_dmamap_load(sc->bge_cdata.bge_status_tag,
2061 	    sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block,
2062 	    BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2063 
2064 	if (error)
2065 		return (ENOMEM);
2066 
2067 	sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr;
2068 
2069 	/* Create tag for statistics block. */
2070 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2071 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2072 	    NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL,
2073 	    &sc->bge_cdata.bge_stats_tag);
2074 
2075 	if (error) {
2076 		device_printf(sc->bge_dev, "could not allocate dma tag\n");
2077 		return (ENOMEM);
2078 	}
2079 
2080 	/* Allocate DMA'able memory for statistics block. */
2081 	error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag,
2082 	    (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT,
2083 	    &sc->bge_cdata.bge_stats_map);
2084 	if (error)
2085 		return (ENOMEM);
2086 
2087 	bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ);
2088 
2089 	/* Load the address of the statstics block. */
2090 	ctx.sc = sc;
2091 	ctx.bge_maxsegs = 1;
2092 
2093 	error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag,
2094 	    sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats,
2095 	    BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2096 
2097 	if (error)
2098 		return (ENOMEM);
2099 
2100 	sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr;
2101 
2102 	return (0);
2103 }
2104 
2105 /*
2106  * Return true if this device has more than one port.
2107  */
2108 static int
2109 bge_has_multiple_ports(struct bge_softc *sc)
2110 {
2111 	device_t dev = sc->bge_dev;
2112 	u_int b, s, f, fscan;
2113 
2114 	b = pci_get_bus(dev);
2115 	s = pci_get_slot(dev);
2116 	f = pci_get_function(dev);
2117 	for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++)
2118 		if (fscan != f && pci_find_bsf(b, s, fscan) != NULL)
2119 			return (1);
2120 	return (0);
2121 }
2122 
2123 /*
2124  * Return true if MSI can be used with this device.
2125  */
2126 static int
2127 bge_can_use_msi(struct bge_softc *sc)
2128 {
2129 	int can_use_msi = 0;
2130 
2131 	switch (sc->bge_asicrev) {
2132 	case BGE_ASICREV_BCM5714:
2133 		/*
2134 		 * Apparently, MSI doesn't work when this chip is configured
2135 		 * in single-port mode.
2136 		 */
2137 		if (bge_has_multiple_ports(sc))
2138 			can_use_msi = 1;
2139 		break;
2140 	case BGE_ASICREV_BCM5750:
2141 		if (sc->bge_chiprev != BGE_CHIPREV_5750_AX &&
2142 		    sc->bge_chiprev != BGE_CHIPREV_5750_BX)
2143 			can_use_msi = 1;
2144 		break;
2145 	case BGE_ASICREV_BCM5752:
2146 	case BGE_ASICREV_BCM5780:
2147 		can_use_msi = 1;
2148 		break;
2149 	}
2150 	return (can_use_msi);
2151 }
2152 
2153 static int
2154 bge_attach(device_t dev)
2155 {
2156 	struct ifnet *ifp;
2157 	struct bge_softc *sc;
2158 	uint32_t hwcfg = 0;
2159 	uint32_t mac_tmp = 0;
2160 	u_char eaddr[6];
2161 	int error = 0, msicount, rid, trys, reg;
2162 
2163 	sc = device_get_softc(dev);
2164 	sc->bge_dev = dev;
2165 
2166 	/*
2167 	 * Map control/status registers.
2168 	 */
2169 	pci_enable_busmaster(dev);
2170 
2171 	rid = BGE_PCI_BAR0;
2172 	sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2173 	    RF_ACTIVE|PCI_RF_DENSE);
2174 
2175 	if (sc->bge_res == NULL) {
2176 		device_printf (sc->bge_dev, "couldn't map memory\n");
2177 		error = ENXIO;
2178 		goto fail;
2179 	}
2180 
2181 	sc->bge_btag = rman_get_bustag(sc->bge_res);
2182 	sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
2183 
2184 	/* Save ASIC rev. */
2185 
2186 	sc->bge_chipid =
2187 	    pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
2188 	    BGE_PCIMISCCTL_ASICREV;
2189 	sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2190 	sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2191 
2192 	/* Save chipset family. */
2193 	switch (sc->bge_asicrev) {
2194 	case BGE_ASICREV_BCM5700:
2195 	case BGE_ASICREV_BCM5701:
2196 	case BGE_ASICREV_BCM5703:
2197 	case BGE_ASICREV_BCM5704:
2198 		sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
2199 		break;
2200 
2201 	case BGE_ASICREV_BCM5714_A0:
2202 	case BGE_ASICREV_BCM5780:
2203 	case BGE_ASICREV_BCM5714:
2204 		sc->bge_flags |= BGE_FLAG_5714_FAMILY /* | BGE_FLAG_JUMBO */;
2205 		/* Fall through */
2206 
2207 	case BGE_ASICREV_BCM5750:
2208 	case BGE_ASICREV_BCM5752:
2209 	case BGE_ASICREV_BCM5755:
2210 	case BGE_ASICREV_BCM5787:
2211 		sc->bge_flags |= BGE_FLAG_575X_PLUS;
2212 		/* Fall through */
2213 
2214 	case BGE_ASICREV_BCM5705:
2215 		sc->bge_flags |= BGE_FLAG_5705_PLUS;
2216 		break;
2217 	}
2218 
2219   	/*
2220 	 * Check if this is a PCI-X or PCI Express device.
2221   	 */
2222 #if __FreeBSD_version > 700010
2223 	if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
2224 		/*
2225 		 * Found a PCI Express capabilities register, this
2226 		 * must be a PCI Express device.
2227 		 */
2228 		if (reg != 0)
2229 			sc->bge_flags |= BGE_FLAG_PCIE;
2230 	} else if (pci_find_extcap(dev, PCIY_PCIX, &reg) == 0) {
2231 		if (reg != 0)
2232 			sc->bge_flags |= BGE_FLAG_PCIX;
2233 	}
2234 
2235 #else
2236 	if (BGE_IS_5705_PLUS(sc)) {
2237 		reg = pci_read_config(dev, BGE_PCIE_CAPID_REG, 4);
2238 		if ((reg & 0xff) == BGE_PCIE_CAPID)
2239 			sc->bge_flags |= BGE_FLAG_PCIE;
2240 	} else {
2241 		/*
2242 		 * Check if the device is in PCI-X Mode.
2243 		 * (This bit is not valid on PCI Express controllers.)
2244 		 */
2245 		if ((pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
2246 		    BGE_PCISTATE_PCI_BUSMODE) == 0)
2247 			sc->bge_flags |= BGE_FLAG_PCIX;
2248 	}
2249 #endif
2250 
2251 	/*
2252 	 * Allocate the interrupt, using MSI if possible.  These devices
2253 	 * support 8 MSI messages, but only the first one is used in
2254 	 * normal operation.
2255 	 */
2256 	if (bge_can_use_msi(sc)) {
2257 		msicount = pci_msi_count(dev);
2258 		if (msicount > 1)
2259 			msicount = 1;
2260 	} else
2261 		msicount = 0;
2262 	if (msicount == 1 && pci_alloc_msi(dev, &msicount) == 0) {
2263 		rid = 1;
2264 		sc->bge_flags |= BGE_FLAG_MSI;
2265 	} else
2266 		rid = 0;
2267 
2268 	sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2269 	    RF_SHAREABLE | RF_ACTIVE);
2270 
2271 	if (sc->bge_irq == NULL) {
2272 		device_printf(sc->bge_dev, "couldn't map interrupt\n");
2273 		error = ENXIO;
2274 		goto fail;
2275 	}
2276 
2277 	BGE_LOCK_INIT(sc, device_get_nameunit(dev));
2278 
2279 	/* Try to reset the chip. */
2280 	if (bge_reset(sc)) {
2281 		device_printf(sc->bge_dev, "chip reset failed\n");
2282 		bge_release_resources(sc);
2283 		error = ENXIO;
2284 		goto fail;
2285 	}
2286 
2287 	sc->bge_asf_mode = 0;
2288 	if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG)
2289 	    == BGE_MAGIC_NUMBER)) {
2290 		if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG)
2291 		    & BGE_HWCFG_ASF) {
2292 			sc->bge_asf_mode |= ASF_ENABLE;
2293 			sc->bge_asf_mode |= ASF_STACKUP;
2294 			if (sc->bge_asicrev == BGE_ASICREV_BCM5750) {
2295 				sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
2296 			}
2297 		}
2298 	}
2299 
2300 	/* Try to reset the chip again the nice way. */
2301 	bge_stop_fw(sc);
2302 	bge_sig_pre_reset(sc, BGE_RESET_STOP);
2303 	if (bge_reset(sc)) {
2304 		device_printf(sc->bge_dev, "chip reset failed\n");
2305 		bge_release_resources(sc);
2306 		error = ENXIO;
2307 		goto fail;
2308 	}
2309 
2310 	bge_sig_legacy(sc, BGE_RESET_STOP);
2311 	bge_sig_post_reset(sc, BGE_RESET_STOP);
2312 
2313 	if (bge_chipinit(sc)) {
2314 		device_printf(sc->bge_dev, "chip initialization failed\n");
2315 		bge_release_resources(sc);
2316 		error = ENXIO;
2317 		goto fail;
2318 	}
2319 
2320 	/*
2321 	 * Get station address from the EEPROM.
2322 	 */
2323 	mac_tmp = bge_readmem_ind(sc, 0x0c14);
2324 	if ((mac_tmp >> 16) == 0x484b) {
2325 		eaddr[0] = (u_char)(mac_tmp >> 8);
2326 		eaddr[1] = (u_char)mac_tmp;
2327 		mac_tmp = bge_readmem_ind(sc, 0x0c18);
2328 		eaddr[2] = (u_char)(mac_tmp >> 24);
2329 		eaddr[3] = (u_char)(mac_tmp >> 16);
2330 		eaddr[4] = (u_char)(mac_tmp >> 8);
2331 		eaddr[5] = (u_char)mac_tmp;
2332 	} else if (bge_read_eeprom(sc, eaddr,
2333 	    BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
2334 		device_printf(sc->bge_dev, "failed to read station address\n");
2335 		bge_release_resources(sc);
2336 		error = ENXIO;
2337 		goto fail;
2338 	}
2339 
2340 	/* 5705 limits RX return ring to 512 entries. */
2341 	if (BGE_IS_5705_PLUS(sc))
2342 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2343 	else
2344 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2345 
2346 	if (bge_dma_alloc(dev)) {
2347 		device_printf(sc->bge_dev,
2348 		    "failed to allocate DMA resources\n");
2349 		bge_release_resources(sc);
2350 		error = ENXIO;
2351 		goto fail;
2352 	}
2353 
2354 	/* Set default tuneable values. */
2355 	sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2356 	sc->bge_rx_coal_ticks = 150;
2357 	sc->bge_tx_coal_ticks = 150;
2358 	sc->bge_rx_max_coal_bds = 10;
2359 	sc->bge_tx_max_coal_bds = 10;
2360 
2361 	/* Set up ifnet structure */
2362 	ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
2363 	if (ifp == NULL) {
2364 		device_printf(sc->bge_dev, "failed to if_alloc()\n");
2365 		bge_release_resources(sc);
2366 		error = ENXIO;
2367 		goto fail;
2368 	}
2369 	ifp->if_softc = sc;
2370 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2371 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2372 	ifp->if_ioctl = bge_ioctl;
2373 	ifp->if_start = bge_start;
2374 	ifp->if_init = bge_init;
2375 	ifp->if_mtu = ETHERMTU;
2376 	ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
2377 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
2378 	IFQ_SET_READY(&ifp->if_snd);
2379 	ifp->if_hwassist = BGE_CSUM_FEATURES;
2380 	ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
2381 	    IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM;
2382 	ifp->if_capenable = ifp->if_capabilities;
2383 #ifdef DEVICE_POLLING
2384 	ifp->if_capabilities |= IFCAP_POLLING;
2385 #endif
2386 
2387 	/*
2388 	 * 5700 B0 chips do not support checksumming correctly due
2389 	 * to hardware bugs.
2390 	 */
2391 	if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
2392 		ifp->if_capabilities &= ~IFCAP_HWCSUM;
2393 		ifp->if_capenable &= IFCAP_HWCSUM;
2394 		ifp->if_hwassist = 0;
2395 	}
2396 
2397 	/*
2398 	 * Figure out what sort of media we have by checking the
2399 	 * hardware config word in the first 32k of NIC internal memory,
2400 	 * or fall back to examining the EEPROM if necessary.
2401 	 * Note: on some BCM5700 cards, this value appears to be unset.
2402 	 * If that's the case, we have to rely on identifying the NIC
2403 	 * by its PCI subsystem ID, as we do below for the SysKonnect
2404 	 * SK-9D41.
2405 	 */
2406 	if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
2407 		hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2408 	else {
2409 		if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
2410 		    sizeof(hwcfg))) {
2411 			device_printf(sc->bge_dev, "failed to read EEPROM\n");
2412 			bge_release_resources(sc);
2413 			error = ENXIO;
2414 			goto fail;
2415 		}
2416 		hwcfg = ntohl(hwcfg);
2417 	}
2418 
2419 	if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
2420 		sc->bge_flags |= BGE_FLAG_TBI;
2421 
2422 	/* The SysKonnect SK-9D41 is a 1000baseSX card. */
2423 	if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41)
2424 		sc->bge_flags |= BGE_FLAG_TBI;
2425 
2426 	if (sc->bge_flags & BGE_FLAG_TBI) {
2427 		ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
2428 		    bge_ifmedia_upd, bge_ifmedia_sts);
2429 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2430 		ifmedia_add(&sc->bge_ifmedia,
2431 		    IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
2432 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2433 		ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
2434 		sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
2435 	} else {
2436 		/*
2437 		 * Do transceiver setup and tell the firmware the
2438 		 * driver is down so we can try to get access the
2439 		 * probe if ASF is running.  Retry a couple of times
2440 		 * if we get a conflict with the ASF firmware accessing
2441 		 * the PHY.
2442 		 */
2443 		BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2444 again:
2445 		bge_asf_driver_up(sc);
2446 
2447 		trys = 0;
2448 		if (mii_phy_probe(dev, &sc->bge_miibus,
2449 		    bge_ifmedia_upd, bge_ifmedia_sts)) {
2450 			if (trys++ < 4) {
2451 				device_printf(sc->bge_dev, "Try again\n");
2452 				bge_miibus_writereg(sc->bge_dev, 1, MII_BMCR, BMCR_RESET);
2453 				goto again;
2454 			}
2455 
2456 			device_printf(sc->bge_dev, "MII without any PHY!\n");
2457 			bge_release_resources(sc);
2458 			error = ENXIO;
2459 			goto fail;
2460 		}
2461 
2462 		/*
2463 		 * Now tell the firmware we are going up after probing the PHY
2464 		 */
2465 		if (sc->bge_asf_mode & ASF_STACKUP)
2466 			BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2467 	}
2468 
2469 	/*
2470 	 * When using the BCM5701 in PCI-X mode, data corruption has
2471 	 * been observed in the first few bytes of some received packets.
2472 	 * Aligning the packet buffer in memory eliminates the corruption.
2473 	 * Unfortunately, this misaligns the packet payloads.  On platforms
2474 	 * which do not support unaligned accesses, we will realign the
2475 	 * payloads by copying the received packets.
2476 	 */
2477 	if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
2478 	    sc->bge_flags & BGE_FLAG_PCIX)
2479                 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
2480 
2481 	/*
2482 	 * Call MI attach routine.
2483 	 */
2484 	ether_ifattach(ifp, eaddr);
2485 	callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0);
2486 
2487 	/*
2488 	 * Hookup IRQ last.
2489 	 */
2490 	error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
2491 	   bge_intr, sc, &sc->bge_intrhand);
2492 
2493 	if (error) {
2494 		bge_detach(dev);
2495 		device_printf(sc->bge_dev, "couldn't set up irq\n");
2496 	}
2497 
2498 	bge_add_sysctls(sc);
2499 
2500 fail:
2501 	return (error);
2502 }
2503 
2504 static int
2505 bge_detach(device_t dev)
2506 {
2507 	struct bge_softc *sc;
2508 	struct ifnet *ifp;
2509 
2510 	sc = device_get_softc(dev);
2511 	ifp = sc->bge_ifp;
2512 
2513 #ifdef DEVICE_POLLING
2514 	if (ifp->if_capenable & IFCAP_POLLING)
2515 		ether_poll_deregister(ifp);
2516 #endif
2517 
2518 	BGE_LOCK(sc);
2519 	bge_stop(sc);
2520 	bge_reset(sc);
2521 	BGE_UNLOCK(sc);
2522 
2523 	callout_drain(&sc->bge_stat_ch);
2524 
2525 	ether_ifdetach(ifp);
2526 
2527 	if (sc->bge_flags & BGE_FLAG_TBI) {
2528 		ifmedia_removeall(&sc->bge_ifmedia);
2529 	} else {
2530 		bus_generic_detach(dev);
2531 		device_delete_child(dev, sc->bge_miibus);
2532 	}
2533 
2534 	bge_release_resources(sc);
2535 
2536 	return (0);
2537 }
2538 
2539 static void
2540 bge_release_resources(struct bge_softc *sc)
2541 {
2542 	device_t dev;
2543 
2544 	dev = sc->bge_dev;
2545 
2546 	if (sc->bge_intrhand != NULL)
2547 		bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
2548 
2549 	if (sc->bge_irq != NULL)
2550 		bus_release_resource(dev, SYS_RES_IRQ,
2551 		    sc->bge_flags & BGE_FLAG_MSI ? 1 : 0, sc->bge_irq);
2552 
2553 	if (sc->bge_flags & BGE_FLAG_MSI)
2554 		pci_release_msi(dev);
2555 
2556 	if (sc->bge_res != NULL)
2557 		bus_release_resource(dev, SYS_RES_MEMORY,
2558 		    BGE_PCI_BAR0, sc->bge_res);
2559 
2560 	if (sc->bge_ifp != NULL)
2561 		if_free(sc->bge_ifp);
2562 
2563 	bge_dma_free(sc);
2564 
2565 	if (mtx_initialized(&sc->bge_mtx))	/* XXX */
2566 		BGE_LOCK_DESTROY(sc);
2567 }
2568 
2569 static int
2570 bge_reset(struct bge_softc *sc)
2571 {
2572 	device_t dev;
2573 	uint32_t cachesize, command, pcistate, reset;
2574 	void (*write_op)(struct bge_softc *, int, int);
2575 	int i, val = 0;
2576 
2577 	dev = sc->bge_dev;
2578 
2579 	if (BGE_IS_5705_PLUS(sc) && !BGE_IS_5714_FAMILY(sc)) {
2580 		if (sc->bge_flags & BGE_FLAG_PCIE)
2581 			write_op = bge_writemem_direct;
2582 		else
2583 			write_op = bge_writemem_ind;
2584 	} else
2585 		write_op = bge_writereg_ind;
2586 
2587 	/* Save some important PCI state. */
2588 	cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2589 	command = pci_read_config(dev, BGE_PCI_CMD, 4);
2590 	pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2591 
2592 	pci_write_config(dev, BGE_PCI_MISC_CTL,
2593 	    BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2594 	BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2595 
2596 	/* Disable fastboot on controllers that support it. */
2597 	if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
2598 	    sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
2599 	    sc->bge_asicrev == BGE_ASICREV_BCM5787) {
2600 		if (bootverbose)
2601 			device_printf(sc->bge_dev, "Disabling fastboot\n");
2602 		CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
2603 	}
2604 
2605 	/*
2606 	 * Write the magic number to SRAM at offset 0xB50.
2607 	 * When firmware finishes its initialization it will
2608 	 * write ~BGE_MAGIC_NUMBER to the same location.
2609 	 */
2610 	bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2611 
2612 	reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2613 
2614 	/* XXX: Broadcom Linux driver. */
2615 	if (sc->bge_flags & BGE_FLAG_PCIE) {
2616 		if (CSR_READ_4(sc, 0x7e2c) == 0x60)	/* PCIE 1.0 */
2617 			CSR_WRITE_4(sc, 0x7e2c, 0x20);
2618 		if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2619 			/* Prevent PCIE link training during global reset */
2620 			CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2621 			reset |= (1<<29);
2622 		}
2623 	}
2624 
2625 	/*
2626 	 * Set GPHY Power Down Override to leave GPHY
2627 	 * powered up in D0 uninitialized.
2628 	 */
2629 	if (BGE_IS_5705_PLUS(sc))
2630 		reset |= 0x04000000;
2631 
2632 	/* Issue global reset */
2633 	write_op(sc, BGE_MISC_CFG, reset);
2634 
2635 	DELAY(1000);
2636 
2637 	/* XXX: Broadcom Linux driver. */
2638 	if (sc->bge_flags & BGE_FLAG_PCIE) {
2639 		if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
2640 			uint32_t v;
2641 
2642 			DELAY(500000); /* wait for link training to complete */
2643 			v = pci_read_config(dev, 0xc4, 4);
2644 			pci_write_config(dev, 0xc4, v | (1<<15), 4);
2645 		}
2646 		/*
2647 		 * Set PCIE max payload size to 128 bytes and clear error
2648 		 * status.
2649 		 */
2650 		pci_write_config(dev, 0xd8, 0xf5000, 4);
2651 	}
2652 
2653 	/* Reset some of the PCI state that got zapped by reset. */
2654 	pci_write_config(dev, BGE_PCI_MISC_CTL,
2655 	    BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2656 	    BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2657 	pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2658 	pci_write_config(dev, BGE_PCI_CMD, command, 4);
2659 	write_op(sc, BGE_MISC_CFG, (65 << 1));
2660 
2661 	/* Re-enable MSI, if neccesary, and enable the memory arbiter. */
2662 	if (BGE_IS_5714_FAMILY(sc)) {
2663 		uint32_t val;
2664 
2665 		/* This chip disables MSI on reset. */
2666 		if (sc->bge_flags & BGE_FLAG_MSI) {
2667 			val = pci_read_config(dev, BGE_PCI_MSI_CTL, 2);
2668 			pci_write_config(dev, BGE_PCI_MSI_CTL,
2669 			    val | PCIM_MSICTRL_MSI_ENABLE, 2);
2670 			val = CSR_READ_4(sc, BGE_MSI_MODE);
2671 			CSR_WRITE_4(sc, BGE_MSI_MODE,
2672 			    val | BGE_MSIMODE_ENABLE);
2673 		}
2674 		val = CSR_READ_4(sc, BGE_MARB_MODE);
2675 		CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
2676 	} else
2677 		CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2678 
2679 	/*
2680 	 * Poll until we see the 1's complement of the magic number.
2681 	 * This indicates that the firmware initialization
2682 	 * is complete.
2683 	 */
2684 	for (i = 0; i < BGE_TIMEOUT; i++) {
2685 		val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2686 		if (val == ~BGE_MAGIC_NUMBER)
2687 			break;
2688 		DELAY(10);
2689 	}
2690 
2691 	if (i == BGE_TIMEOUT) {
2692 		device_printf(sc->bge_dev, "firmware handshake timed out, "
2693 		    "found 0x%08x\n", val);
2694 	}
2695 
2696 	/*
2697 	 * XXX Wait for the value of the PCISTATE register to
2698 	 * return to its original pre-reset state. This is a
2699 	 * fairly good indicator of reset completion. If we don't
2700 	 * wait for the reset to fully complete, trying to read
2701 	 * from the device's non-PCI registers may yield garbage
2702 	 * results.
2703 	 */
2704 	for (i = 0; i < BGE_TIMEOUT; i++) {
2705 		if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
2706 			break;
2707 		DELAY(10);
2708 	}
2709 
2710 	if (sc->bge_flags & BGE_FLAG_PCIE) {
2711 		reset = bge_readmem_ind(sc, 0x7c00);
2712 		bge_writemem_ind(sc, 0x7c00, reset | (1 << 25));
2713 	}
2714 
2715 	/* Fix up byte swapping. */
2716 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
2717 	    BGE_MODECTL_BYTESWAP_DATA);
2718 
2719 	/* Tell the ASF firmware we are up */
2720 	if (sc->bge_asf_mode & ASF_STACKUP)
2721 		BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2722 
2723 	CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2724 
2725 	/*
2726 	 * The 5704 in TBI mode apparently needs some special
2727 	 * adjustment to insure the SERDES drive level is set
2728 	 * to 1.2V.
2729 	 */
2730 	if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
2731 	    sc->bge_flags & BGE_FLAG_TBI) {
2732 		uint32_t serdescfg;
2733 
2734 		serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2735 		serdescfg = (serdescfg & ~0xFFF) | 0x880;
2736 		CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2737 	}
2738 
2739 	/* XXX: Broadcom Linux driver. */
2740 	if (sc->bge_flags & BGE_FLAG_PCIE &&
2741 	    sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2742 		uint32_t v;
2743 
2744 		v = CSR_READ_4(sc, 0x7c00);
2745 		CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
2746 	}
2747 	DELAY(10000);
2748 
2749 	return(0);
2750 }
2751 
2752 /*
2753  * Frame reception handling. This is called if there's a frame
2754  * on the receive return list.
2755  *
2756  * Note: we have to be able to handle two possibilities here:
2757  * 1) the frame is from the jumbo receive ring
2758  * 2) the frame is from the standard receive ring
2759  */
2760 
2761 static void
2762 bge_rxeof(struct bge_softc *sc)
2763 {
2764 	struct ifnet *ifp;
2765 	int stdcnt = 0, jumbocnt = 0;
2766 
2767 	BGE_LOCK_ASSERT(sc);
2768 
2769 	/* Nothing to do. */
2770 	if (sc->bge_rx_saved_considx ==
2771 	    sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx)
2772 		return;
2773 
2774 	ifp = sc->bge_ifp;
2775 
2776 	bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
2777 	    sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
2778 	bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2779 	    sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD);
2780 	if (BGE_IS_JUMBO_CAPABLE(sc))
2781 		bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2782 		    sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTREAD);
2783 
2784 	while(sc->bge_rx_saved_considx !=
2785 	    sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) {
2786 		struct bge_rx_bd	*cur_rx;
2787 		uint32_t		rxidx;
2788 		struct mbuf		*m = NULL;
2789 		uint16_t		vlan_tag = 0;
2790 		int			have_tag = 0;
2791 
2792 #ifdef DEVICE_POLLING
2793 		if (ifp->if_capenable & IFCAP_POLLING) {
2794 			if (sc->rxcycles <= 0)
2795 				break;
2796 			sc->rxcycles--;
2797 		}
2798 #endif
2799 
2800 		cur_rx =
2801 	    &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
2802 
2803 		rxidx = cur_rx->bge_idx;
2804 		BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
2805 
2806 		if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2807 			have_tag = 1;
2808 			vlan_tag = cur_rx->bge_vlan_tag;
2809 		}
2810 
2811 		if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2812 			BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2813 			bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
2814 			    sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx],
2815 			    BUS_DMASYNC_POSTREAD);
2816 			bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
2817 			    sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx]);
2818 			m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
2819 			sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
2820 			jumbocnt++;
2821 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2822 				ifp->if_ierrors++;
2823 				bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2824 				continue;
2825 			}
2826 			if (bge_newbuf_jumbo(sc,
2827 			    sc->bge_jumbo, NULL) == ENOBUFS) {
2828 				ifp->if_ierrors++;
2829 				bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2830 				continue;
2831 			}
2832 		} else {
2833 			BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2834 			bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2835 			    sc->bge_cdata.bge_rx_std_dmamap[rxidx],
2836 			    BUS_DMASYNC_POSTREAD);
2837 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2838 			    sc->bge_cdata.bge_rx_std_dmamap[rxidx]);
2839 			m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2840 			sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2841 			stdcnt++;
2842 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2843 				ifp->if_ierrors++;
2844 				bge_newbuf_std(sc, sc->bge_std, m);
2845 				continue;
2846 			}
2847 			if (bge_newbuf_std(sc, sc->bge_std,
2848 			    NULL) == ENOBUFS) {
2849 				ifp->if_ierrors++;
2850 				bge_newbuf_std(sc, sc->bge_std, m);
2851 				continue;
2852 			}
2853 		}
2854 
2855 		ifp->if_ipackets++;
2856 #ifndef __NO_STRICT_ALIGNMENT
2857 		/*
2858 		 * For architectures with strict alignment we must make sure
2859 		 * the payload is aligned.
2860 		 */
2861 		if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
2862 			bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2863 			    cur_rx->bge_len);
2864 			m->m_data += ETHER_ALIGN;
2865 		}
2866 #endif
2867 		m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2868 		m->m_pkthdr.rcvif = ifp;
2869 
2870 		if (ifp->if_capenable & IFCAP_RXCSUM) {
2871 			if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
2872 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2873 				if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2874 					m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2875 			}
2876 			if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
2877 			    m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
2878 				m->m_pkthdr.csum_data =
2879 				    cur_rx->bge_tcp_udp_csum;
2880 				m->m_pkthdr.csum_flags |=
2881 				    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2882 			}
2883 		}
2884 
2885 		/*
2886 		 * If we received a packet with a vlan tag,
2887 		 * attach that information to the packet.
2888 		 */
2889 		if (have_tag) {
2890 			m->m_pkthdr.ether_vtag = vlan_tag;
2891 			m->m_flags |= M_VLANTAG;
2892 		}
2893 
2894 		BGE_UNLOCK(sc);
2895 		(*ifp->if_input)(ifp, m);
2896 		BGE_LOCK(sc);
2897 	}
2898 
2899 	if (stdcnt > 0)
2900 		bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2901 		    sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
2902 
2903 	if (BGE_IS_JUMBO_CAPABLE(sc) && jumbocnt > 0)
2904 		bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2905 		    sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
2906 
2907 	CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2908 	if (stdcnt)
2909 		CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2910 	if (jumbocnt)
2911 		CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2912 #ifdef notyet
2913 	/*
2914 	 * This register wraps very quickly under heavy packet drops.
2915 	 * If you need correct statistics, you can enable this check.
2916 	 */
2917 	if (BGE_IS_5705_PLUS(sc))
2918 		ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
2919 #endif
2920 }
2921 
2922 static void
2923 bge_txeof(struct bge_softc *sc)
2924 {
2925 	struct bge_tx_bd *cur_tx = NULL;
2926 	struct ifnet *ifp;
2927 
2928 	BGE_LOCK_ASSERT(sc);
2929 
2930 	/* Nothing to do. */
2931 	if (sc->bge_tx_saved_considx ==
2932 	    sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx)
2933 		return;
2934 
2935 	ifp = sc->bge_ifp;
2936 
2937 	bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
2938 	    sc->bge_cdata.bge_tx_ring_map,
2939 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2940 	/*
2941 	 * Go through our tx ring and free mbufs for those
2942 	 * frames that have been sent.
2943 	 */
2944 	while (sc->bge_tx_saved_considx !=
2945 	    sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) {
2946 		uint32_t		idx = 0;
2947 
2948 		idx = sc->bge_tx_saved_considx;
2949 		cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
2950 		if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2951 			ifp->if_opackets++;
2952 		if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
2953 			bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2954 			    sc->bge_cdata.bge_tx_dmamap[idx],
2955 			    BUS_DMASYNC_POSTWRITE);
2956 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2957 			    sc->bge_cdata.bge_tx_dmamap[idx]);
2958 			m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2959 			sc->bge_cdata.bge_tx_chain[idx] = NULL;
2960 		}
2961 		sc->bge_txcnt--;
2962 		BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2963 	}
2964 
2965 	if (cur_tx != NULL)
2966 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2967 	if (sc->bge_txcnt == 0)
2968 		sc->bge_timer = 0;
2969 }
2970 
2971 #ifdef DEVICE_POLLING
2972 static void
2973 bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2974 {
2975 	struct bge_softc *sc = ifp->if_softc;
2976 	uint32_t statusword;
2977 
2978 	BGE_LOCK(sc);
2979 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2980 		BGE_UNLOCK(sc);
2981 		return;
2982 	}
2983 
2984 	bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2985 	    sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD);
2986 
2987 	statusword = atomic_readandclear_32(
2988 	    &sc->bge_ldata.bge_status_block->bge_status);
2989 
2990 	bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2991 	    sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD);
2992 
2993 	/* Note link event. It will be processed by POLL_AND_CHECK_STATUS cmd */
2994 	if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
2995 		sc->bge_link_evt++;
2996 
2997 	if (cmd == POLL_AND_CHECK_STATUS)
2998 		if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2999 		    sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3000 		    sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI))
3001 			bge_link_upd(sc);
3002 
3003 	sc->rxcycles = count;
3004 	bge_rxeof(sc);
3005 	bge_txeof(sc);
3006 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3007 		bge_start_locked(ifp);
3008 
3009 	BGE_UNLOCK(sc);
3010 }
3011 #endif /* DEVICE_POLLING */
3012 
3013 static void
3014 bge_intr(void *xsc)
3015 {
3016 	struct bge_softc *sc;
3017 	struct ifnet *ifp;
3018 	uint32_t statusword;
3019 
3020 	sc = xsc;
3021 
3022 	BGE_LOCK(sc);
3023 
3024 	ifp = sc->bge_ifp;
3025 
3026 #ifdef DEVICE_POLLING
3027 	if (ifp->if_capenable & IFCAP_POLLING) {
3028 		BGE_UNLOCK(sc);
3029 		return;
3030 	}
3031 #endif
3032 
3033 	/*
3034 	 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO.  Don't
3035 	 * disable interrupts by writing nonzero like we used to, since with
3036 	 * our current organization this just gives complications and
3037 	 * pessimizations for re-enabling interrupts.  We used to have races
3038 	 * instead of the necessary complications.  Disabling interrupts
3039 	 * would just reduce the chance of a status update while we are
3040 	 * running (by switching to the interrupt-mode coalescence
3041 	 * parameters), but this chance is already very low so it is more
3042 	 * efficient to get another interrupt than prevent it.
3043 	 *
3044 	 * We do the ack first to ensure another interrupt if there is a
3045 	 * status update after the ack.  We don't check for the status
3046 	 * changing later because it is more efficient to get another
3047 	 * interrupt than prevent it, not quite as above (not checking is
3048 	 * a smaller optimization than not toggling the interrupt enable,
3049 	 * since checking doesn't involve PCI accesses and toggling require
3050 	 * the status check).  So toggling would probably be a pessimization
3051 	 * even with MSI.  It would only be needed for using a task queue.
3052 	 */
3053 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3054 
3055 	/*
3056 	 * Do the mandatory PCI flush as well as get the link status.
3057 	 */
3058 	statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED;
3059 
3060 	/* Make sure the descriptor ring indexes are coherent. */
3061 	bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3062 	    sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD);
3063 	bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3064 	    sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD);
3065 
3066 	if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3067 	    sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3068 	    statusword || sc->bge_link_evt)
3069 		bge_link_upd(sc);
3070 
3071 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3072 		/* Check RX return ring producer/consumer. */
3073 		bge_rxeof(sc);
3074 
3075 		/* Check TX ring producer/consumer. */
3076 		bge_txeof(sc);
3077 	}
3078 
3079 	if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3080 	    !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3081 		bge_start_locked(ifp);
3082 
3083 	BGE_UNLOCK(sc);
3084 }
3085 
3086 static void
3087 bge_asf_driver_up(struct bge_softc *sc)
3088 {
3089 	if (sc->bge_asf_mode & ASF_STACKUP) {
3090 		/* Send ASF heartbeat aprox. every 2s */
3091 		if (sc->bge_asf_count)
3092 			sc->bge_asf_count --;
3093 		else {
3094 			sc->bge_asf_count = 5;
3095 			bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW,
3096 			    BGE_FW_DRV_ALIVE);
3097 			bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_LEN, 4);
3098 			bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_DATA, 3);
3099 			CSR_WRITE_4(sc, BGE_CPU_EVENT,
3100 			    CSR_READ_4(sc, BGE_CPU_EVENT) != (1 << 14));
3101 		}
3102 	}
3103 }
3104 
3105 static void
3106 bge_tick(void *xsc)
3107 {
3108 	struct bge_softc *sc = xsc;
3109 	struct mii_data *mii = NULL;
3110 
3111 	BGE_LOCK_ASSERT(sc);
3112 
3113 	/* Synchronize with possible callout reset/stop. */
3114 	if (callout_pending(&sc->bge_stat_ch) ||
3115 	    !callout_active(&sc->bge_stat_ch))
3116 	    	return;
3117 
3118 	if (BGE_IS_5705_PLUS(sc))
3119 		bge_stats_update_regs(sc);
3120 	else
3121 		bge_stats_update(sc);
3122 
3123 	if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
3124 		mii = device_get_softc(sc->bge_miibus);
3125 		/* Don't mess with the PHY in IPMI/ASF mode */
3126 		if (!((sc->bge_asf_mode & ASF_STACKUP) && (sc->bge_link)))
3127 			mii_tick(mii);
3128 	} else {
3129 		/*
3130 		 * Since in TBI mode auto-polling can't be used we should poll
3131 		 * link status manually. Here we register pending link event
3132 		 * and trigger interrupt.
3133 		 */
3134 #ifdef DEVICE_POLLING
3135 		/* In polling mode we poll link state in bge_poll(). */
3136 		if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING))
3137 #endif
3138 		{
3139 		sc->bge_link_evt++;
3140 		BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3141 		}
3142 	}
3143 
3144 	bge_asf_driver_up(sc);
3145 	bge_watchdog(sc);
3146 
3147 	callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
3148 }
3149 
3150 static void
3151 bge_stats_update_regs(struct bge_softc *sc)
3152 {
3153 	struct ifnet *ifp;
3154 
3155 	ifp = sc->bge_ifp;
3156 
3157 	ifp->if_collisions += CSR_READ_4(sc, BGE_MAC_STATS +
3158 	    offsetof(struct bge_mac_stats_regs, etherStatsCollisions));
3159 
3160 	ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3161 }
3162 
3163 static void
3164 bge_stats_update(struct bge_softc *sc)
3165 {
3166 	struct ifnet *ifp;
3167 	bus_size_t stats;
3168 	uint32_t cnt;	/* current register value */
3169 
3170 	ifp = sc->bge_ifp;
3171 
3172 	stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
3173 
3174 #define READ_STAT(sc, stats, stat) \
3175 	CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
3176 
3177 	cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo);
3178 	ifp->if_collisions += (uint32_t)(cnt - sc->bge_tx_collisions);
3179 	sc->bge_tx_collisions = cnt;
3180 
3181 	cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
3182 	ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_discards);
3183 	sc->bge_rx_discards = cnt;
3184 
3185 	cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
3186 	ifp->if_oerrors += (uint32_t)(cnt - sc->bge_tx_discards);
3187 	sc->bge_tx_discards = cnt;
3188 
3189 #undef READ_STAT
3190 }
3191 
3192 /*
3193  * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
3194  * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
3195  * but when such padded frames employ the bge IP/TCP checksum offload,
3196  * the hardware checksum assist gives incorrect results (possibly
3197  * from incorporating its own padding into the UDP/TCP checksum; who knows).
3198  * If we pad such runts with zeros, the onboard checksum comes out correct.
3199  */
3200 static __inline int
3201 bge_cksum_pad(struct mbuf *m)
3202 {
3203 	int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
3204 	struct mbuf *last;
3205 
3206 	/* If there's only the packet-header and we can pad there, use it. */
3207 	if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
3208 	    M_TRAILINGSPACE(m) >= padlen) {
3209 		last = m;
3210 	} else {
3211 		/*
3212 		 * Walk packet chain to find last mbuf. We will either
3213 		 * pad there, or append a new mbuf and pad it.
3214 		 */
3215 		for (last = m; last->m_next != NULL; last = last->m_next);
3216 		if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
3217 			/* Allocate new empty mbuf, pad it. Compact later. */
3218 			struct mbuf *n;
3219 
3220 			MGET(n, M_DONTWAIT, MT_DATA);
3221 			if (n == NULL)
3222 				return (ENOBUFS);
3223 			n->m_len = 0;
3224 			last->m_next = n;
3225 			last = n;
3226 		}
3227 	}
3228 
3229 	/* Now zero the pad area, to avoid the bge cksum-assist bug. */
3230 	memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
3231 	last->m_len += padlen;
3232 	m->m_pkthdr.len += padlen;
3233 
3234 	return (0);
3235 }
3236 
3237 /*
3238  * Encapsulate an mbuf chain in the tx ring  by coupling the mbuf data
3239  * pointers to descriptors.
3240  */
3241 static int
3242 bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
3243 {
3244 	bus_dma_segment_t	segs[BGE_NSEG_NEW];
3245 	bus_dmamap_t		map;
3246 	struct bge_tx_bd	*d;
3247 	struct mbuf		*m = *m_head;
3248 	uint32_t		idx = *txidx;
3249 	uint16_t		csum_flags;
3250 	int			nsegs, i, error;
3251 
3252 	csum_flags = 0;
3253 	if (m->m_pkthdr.csum_flags) {
3254 		if (m->m_pkthdr.csum_flags & CSUM_IP)
3255 			csum_flags |= BGE_TXBDFLAG_IP_CSUM;
3256 		if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
3257 			csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
3258 			if (m->m_pkthdr.len < ETHER_MIN_NOPAD &&
3259 			    (error = bge_cksum_pad(m)) != 0) {
3260 				m_freem(m);
3261 				*m_head = NULL;
3262 				return (error);
3263 			}
3264 		}
3265 		if (m->m_flags & M_LASTFRAG)
3266 			csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
3267 		else if (m->m_flags & M_FRAG)
3268 			csum_flags |= BGE_TXBDFLAG_IP_FRAG;
3269 	}
3270 
3271 	map = sc->bge_cdata.bge_tx_dmamap[idx];
3272 	error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag, map, m, segs,
3273 	    &nsegs, BUS_DMA_NOWAIT);
3274 	if (error == EFBIG) {
3275 		m = m_defrag(m, M_DONTWAIT);
3276 		if (m == NULL) {
3277 			m_freem(*m_head);
3278 			*m_head = NULL;
3279 			return (ENOBUFS);
3280 		}
3281 		*m_head = m;
3282 		error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag, map, m,
3283 		    segs, &nsegs, BUS_DMA_NOWAIT);
3284 		if (error) {
3285 			m_freem(m);
3286 			*m_head = NULL;
3287 			return (error);
3288 		}
3289 	} else if (error != 0)
3290 		return (error);
3291 
3292 	/*
3293 	 * Sanity check: avoid coming within 16 descriptors
3294 	 * of the end of the ring.
3295 	 */
3296 	if (nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) {
3297 		bus_dmamap_unload(sc->bge_cdata.bge_mtag, map);
3298 		return (ENOBUFS);
3299 	}
3300 
3301 	bus_dmamap_sync(sc->bge_cdata.bge_mtag, map, BUS_DMASYNC_PREWRITE);
3302 
3303 	for (i = 0; ; i++) {
3304 		d = &sc->bge_ldata.bge_tx_ring[idx];
3305 		d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
3306 		d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
3307 		d->bge_len = segs[i].ds_len;
3308 		d->bge_flags = csum_flags;
3309 		if (i == nsegs - 1)
3310 			break;
3311 		BGE_INC(idx, BGE_TX_RING_CNT);
3312 	}
3313 
3314 	/* Mark the last segment as end of packet... */
3315 	d->bge_flags |= BGE_TXBDFLAG_END;
3316 
3317 	/* ... and put VLAN tag into first segment.  */
3318 	d = &sc->bge_ldata.bge_tx_ring[*txidx];
3319 	if (m->m_flags & M_VLANTAG) {
3320 		d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
3321 		d->bge_vlan_tag = m->m_pkthdr.ether_vtag;
3322 	} else
3323 		d->bge_vlan_tag = 0;
3324 
3325 	/*
3326 	 * Insure that the map for this transmission
3327 	 * is placed at the array index of the last descriptor
3328 	 * in this chain.
3329 	 */
3330 	sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
3331 	sc->bge_cdata.bge_tx_dmamap[idx] = map;
3332 	sc->bge_cdata.bge_tx_chain[idx] = m;
3333 	sc->bge_txcnt += nsegs;
3334 
3335 	BGE_INC(idx, BGE_TX_RING_CNT);
3336 	*txidx = idx;
3337 
3338 	return (0);
3339 }
3340 
3341 /*
3342  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3343  * to the mbuf data regions directly in the transmit descriptors.
3344  */
3345 static void
3346 bge_start_locked(struct ifnet *ifp)
3347 {
3348 	struct bge_softc *sc;
3349 	struct mbuf *m_head = NULL;
3350 	uint32_t prodidx;
3351 	int count = 0;
3352 
3353 	sc = ifp->if_softc;
3354 
3355 	if (!sc->bge_link || IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3356 		return;
3357 
3358 	prodidx = sc->bge_tx_prodidx;
3359 
3360 	while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
3361 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
3362 		if (m_head == NULL)
3363 			break;
3364 
3365 		/*
3366 		 * XXX
3367 		 * The code inside the if() block is never reached since we
3368 		 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
3369 		 * requests to checksum TCP/UDP in a fragmented packet.
3370 		 *
3371 		 * XXX
3372 		 * safety overkill.  If this is a fragmented packet chain
3373 		 * with delayed TCP/UDP checksums, then only encapsulate
3374 		 * it if we have enough descriptors to handle the entire
3375 		 * chain at once.
3376 		 * (paranoia -- may not actually be needed)
3377 		 */
3378 		if (m_head->m_flags & M_FIRSTFRAG &&
3379 		    m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
3380 			if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
3381 			    m_head->m_pkthdr.csum_data + 16) {
3382 				IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3383 				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3384 				break;
3385 			}
3386 		}
3387 
3388 		/*
3389 		 * Pack the data into the transmit ring. If we
3390 		 * don't have room, set the OACTIVE flag and wait
3391 		 * for the NIC to drain the ring.
3392 		 */
3393 		if (bge_encap(sc, &m_head, &prodidx)) {
3394 			if (m_head == NULL)
3395 				break;
3396 			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3397 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3398 			break;
3399 		}
3400 		++count;
3401 
3402 		/*
3403 		 * If there's a BPF listener, bounce a copy of this frame
3404 		 * to him.
3405 		 */
3406 		ETHER_BPF_MTAP(ifp, m_head);
3407 	}
3408 
3409 	if (count == 0)
3410 		/* No packets were dequeued. */
3411 		return;
3412 
3413 	/* Transmit. */
3414 	CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3415 	/* 5700 b2 errata */
3416 	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
3417 		CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3418 
3419 	sc->bge_tx_prodidx = prodidx;
3420 
3421 	/*
3422 	 * Set a timeout in case the chip goes out to lunch.
3423 	 */
3424 	sc->bge_timer = 5;
3425 }
3426 
3427 /*
3428  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3429  * to the mbuf data regions directly in the transmit descriptors.
3430  */
3431 static void
3432 bge_start(struct ifnet *ifp)
3433 {
3434 	struct bge_softc *sc;
3435 
3436 	sc = ifp->if_softc;
3437 	BGE_LOCK(sc);
3438 	bge_start_locked(ifp);
3439 	BGE_UNLOCK(sc);
3440 }
3441 
3442 static void
3443 bge_init_locked(struct bge_softc *sc)
3444 {
3445 	struct ifnet *ifp;
3446 	uint16_t *m;
3447 
3448 	BGE_LOCK_ASSERT(sc);
3449 
3450 	ifp = sc->bge_ifp;
3451 
3452 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3453 		return;
3454 
3455 	/* Cancel pending I/O and flush buffers. */
3456 	bge_stop(sc);
3457 
3458 	bge_stop_fw(sc);
3459 	bge_sig_pre_reset(sc, BGE_RESET_START);
3460 	bge_reset(sc);
3461 	bge_sig_legacy(sc, BGE_RESET_START);
3462 	bge_sig_post_reset(sc, BGE_RESET_START);
3463 
3464 	bge_chipinit(sc);
3465 
3466 	/*
3467 	 * Init the various state machines, ring
3468 	 * control blocks and firmware.
3469 	 */
3470 	if (bge_blockinit(sc)) {
3471 		device_printf(sc->bge_dev, "initialization failure\n");
3472 		return;
3473 	}
3474 
3475 	ifp = sc->bge_ifp;
3476 
3477 	/* Specify MTU. */
3478 	CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
3479 	    ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN);
3480 
3481 	/* Load our MAC address. */
3482 	m = (uint16_t *)IF_LLADDR(sc->bge_ifp);
3483 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3484 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3485 
3486 	/* Program promiscuous mode. */
3487 	bge_setpromisc(sc);
3488 
3489 	/* Program multicast filter. */
3490 	bge_setmulti(sc);
3491 
3492 	/* Init RX ring. */
3493 	bge_init_rx_ring_std(sc);
3494 
3495 	/*
3496 	 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
3497 	 * memory to insure that the chip has in fact read the first
3498 	 * entry of the ring.
3499 	 */
3500 	if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
3501 		uint32_t		v, i;
3502 		for (i = 0; i < 10; i++) {
3503 			DELAY(20);
3504 			v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
3505 			if (v == (MCLBYTES - ETHER_ALIGN))
3506 				break;
3507 		}
3508 		if (i == 10)
3509 			device_printf (sc->bge_dev,
3510 			    "5705 A0 chip failed to load RX ring\n");
3511 	}
3512 
3513 	/* Init jumbo RX ring. */
3514 	if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
3515 		bge_init_rx_ring_jumbo(sc);
3516 
3517 	/* Init our RX return ring index. */
3518 	sc->bge_rx_saved_considx = 0;
3519 
3520 	/* Init our RX/TX stat counters. */
3521 	sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0;
3522 
3523 	/* Init TX ring. */
3524 	bge_init_tx_ring(sc);
3525 
3526 	/* Turn on transmitter. */
3527 	BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
3528 
3529 	/* Turn on receiver. */
3530 	BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3531 
3532 	/* Tell firmware we're alive. */
3533 	BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3534 
3535 #ifdef DEVICE_POLLING
3536 	/* Disable interrupts if we are polling. */
3537 	if (ifp->if_capenable & IFCAP_POLLING) {
3538 		BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
3539 		    BGE_PCIMISCCTL_MASK_PCI_INTR);
3540 		CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3541 	} else
3542 #endif
3543 
3544 	/* Enable host interrupts. */
3545 	{
3546 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
3547 	BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3548 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3549 	}
3550 
3551 	bge_ifmedia_upd_locked(ifp);
3552 
3553 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3554 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3555 
3556 	callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
3557 }
3558 
3559 static void
3560 bge_init(void *xsc)
3561 {
3562 	struct bge_softc *sc = xsc;
3563 
3564 	BGE_LOCK(sc);
3565 	bge_init_locked(sc);
3566 	BGE_UNLOCK(sc);
3567 }
3568 
3569 /*
3570  * Set media options.
3571  */
3572 static int
3573 bge_ifmedia_upd(struct ifnet *ifp)
3574 {
3575 	struct bge_softc *sc = ifp->if_softc;
3576 	int res;
3577 
3578 	BGE_LOCK(sc);
3579 	res = bge_ifmedia_upd_locked(ifp);
3580 	BGE_UNLOCK(sc);
3581 
3582 	return (res);
3583 }
3584 
3585 static int
3586 bge_ifmedia_upd_locked(struct ifnet *ifp)
3587 {
3588 	struct bge_softc *sc = ifp->if_softc;
3589 	struct mii_data *mii;
3590 	struct ifmedia *ifm;
3591 
3592 	BGE_LOCK_ASSERT(sc);
3593 
3594 	ifm = &sc->bge_ifmedia;
3595 
3596 	/* If this is a 1000baseX NIC, enable the TBI port. */
3597 	if (sc->bge_flags & BGE_FLAG_TBI) {
3598 		if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3599 			return (EINVAL);
3600 		switch(IFM_SUBTYPE(ifm->ifm_media)) {
3601 		case IFM_AUTO:
3602 			/*
3603 			 * The BCM5704 ASIC appears to have a special
3604 			 * mechanism for programming the autoneg
3605 			 * advertisement registers in TBI mode.
3606 			 */
3607 			if (bge_fake_autoneg == 0 &&
3608 			    sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3609 				uint32_t sgdig;
3610 				CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
3611 				sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
3612 				sgdig |= BGE_SGDIGCFG_AUTO|
3613 				    BGE_SGDIGCFG_PAUSE_CAP|
3614 				    BGE_SGDIGCFG_ASYM_PAUSE;
3615 				CSR_WRITE_4(sc, BGE_SGDIG_CFG,
3616 				    sgdig|BGE_SGDIGCFG_SEND);
3617 				DELAY(5);
3618 				CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
3619 			}
3620 			break;
3621 		case IFM_1000_SX:
3622 			if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3623 				BGE_CLRBIT(sc, BGE_MAC_MODE,
3624 				    BGE_MACMODE_HALF_DUPLEX);
3625 			} else {
3626 				BGE_SETBIT(sc, BGE_MAC_MODE,
3627 				    BGE_MACMODE_HALF_DUPLEX);
3628 			}
3629 			break;
3630 		default:
3631 			return (EINVAL);
3632 		}
3633 		return (0);
3634 	}
3635 
3636 	sc->bge_link_evt++;
3637 	mii = device_get_softc(sc->bge_miibus);
3638 	if (mii->mii_instance) {
3639 		struct mii_softc *miisc;
3640 		for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
3641 		    miisc = LIST_NEXT(miisc, mii_list))
3642 			mii_phy_reset(miisc);
3643 	}
3644 	mii_mediachg(mii);
3645 
3646 	return (0);
3647 }
3648 
3649 /*
3650  * Report current media status.
3651  */
3652 static void
3653 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3654 {
3655 	struct bge_softc *sc = ifp->if_softc;
3656 	struct mii_data *mii;
3657 
3658 	BGE_LOCK(sc);
3659 
3660 	if (sc->bge_flags & BGE_FLAG_TBI) {
3661 		ifmr->ifm_status = IFM_AVALID;
3662 		ifmr->ifm_active = IFM_ETHER;
3663 		if (CSR_READ_4(sc, BGE_MAC_STS) &
3664 		    BGE_MACSTAT_TBI_PCS_SYNCHED)
3665 			ifmr->ifm_status |= IFM_ACTIVE;
3666 		else {
3667 			ifmr->ifm_active |= IFM_NONE;
3668 			BGE_UNLOCK(sc);
3669 			return;
3670 		}
3671 		ifmr->ifm_active |= IFM_1000_SX;
3672 		if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3673 			ifmr->ifm_active |= IFM_HDX;
3674 		else
3675 			ifmr->ifm_active |= IFM_FDX;
3676 		BGE_UNLOCK(sc);
3677 		return;
3678 	}
3679 
3680 	mii = device_get_softc(sc->bge_miibus);
3681 	mii_pollstat(mii);
3682 	ifmr->ifm_active = mii->mii_media_active;
3683 	ifmr->ifm_status = mii->mii_media_status;
3684 
3685 	BGE_UNLOCK(sc);
3686 }
3687 
3688 static int
3689 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
3690 {
3691 	struct bge_softc *sc = ifp->if_softc;
3692 	struct ifreq *ifr = (struct ifreq *) data;
3693 	struct mii_data *mii;
3694 	int flags, mask, error = 0;
3695 
3696 	switch (command) {
3697 	case SIOCSIFMTU:
3698 		if (ifr->ifr_mtu < ETHERMIN ||
3699 		    ((BGE_IS_JUMBO_CAPABLE(sc)) &&
3700 		    ifr->ifr_mtu > BGE_JUMBO_MTU) ||
3701 		    ((!BGE_IS_JUMBO_CAPABLE(sc)) &&
3702 		    ifr->ifr_mtu > ETHERMTU))
3703 			error = EINVAL;
3704 		else if (ifp->if_mtu != ifr->ifr_mtu) {
3705 			ifp->if_mtu = ifr->ifr_mtu;
3706 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3707 			bge_init(sc);
3708 		}
3709 		break;
3710 	case SIOCSIFFLAGS:
3711 		BGE_LOCK(sc);
3712 		if (ifp->if_flags & IFF_UP) {
3713 			/*
3714 			 * If only the state of the PROMISC flag changed,
3715 			 * then just use the 'set promisc mode' command
3716 			 * instead of reinitializing the entire NIC. Doing
3717 			 * a full re-init means reloading the firmware and
3718 			 * waiting for it to start up, which may take a
3719 			 * second or two.  Similarly for ALLMULTI.
3720 			 */
3721 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3722 				flags = ifp->if_flags ^ sc->bge_if_flags;
3723 				if (flags & IFF_PROMISC)
3724 					bge_setpromisc(sc);
3725 				if (flags & IFF_ALLMULTI)
3726 					bge_setmulti(sc);
3727 			} else
3728 				bge_init_locked(sc);
3729 		} else {
3730 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3731 				bge_stop(sc);
3732 			}
3733 		}
3734 		sc->bge_if_flags = ifp->if_flags;
3735 		BGE_UNLOCK(sc);
3736 		error = 0;
3737 		break;
3738 	case SIOCADDMULTI:
3739 	case SIOCDELMULTI:
3740 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3741 			BGE_LOCK(sc);
3742 			bge_setmulti(sc);
3743 			BGE_UNLOCK(sc);
3744 			error = 0;
3745 		}
3746 		break;
3747 	case SIOCSIFMEDIA:
3748 	case SIOCGIFMEDIA:
3749 		if (sc->bge_flags & BGE_FLAG_TBI) {
3750 			error = ifmedia_ioctl(ifp, ifr,
3751 			    &sc->bge_ifmedia, command);
3752 		} else {
3753 			mii = device_get_softc(sc->bge_miibus);
3754 			error = ifmedia_ioctl(ifp, ifr,
3755 			    &mii->mii_media, command);
3756 		}
3757 		break;
3758 	case SIOCSIFCAP:
3759 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3760 #ifdef DEVICE_POLLING
3761 		if (mask & IFCAP_POLLING) {
3762 			if (ifr->ifr_reqcap & IFCAP_POLLING) {
3763 				error = ether_poll_register(bge_poll, ifp);
3764 				if (error)
3765 					return (error);
3766 				BGE_LOCK(sc);
3767 				BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
3768 				    BGE_PCIMISCCTL_MASK_PCI_INTR);
3769 				CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3770 				ifp->if_capenable |= IFCAP_POLLING;
3771 				BGE_UNLOCK(sc);
3772 			} else {
3773 				error = ether_poll_deregister(ifp);
3774 				/* Enable interrupt even in error case */
3775 				BGE_LOCK(sc);
3776 				BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
3777 				    BGE_PCIMISCCTL_MASK_PCI_INTR);
3778 				CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3779 				ifp->if_capenable &= ~IFCAP_POLLING;
3780 				BGE_UNLOCK(sc);
3781 			}
3782 		}
3783 #endif
3784 		if (mask & IFCAP_HWCSUM) {
3785 			ifp->if_capenable ^= IFCAP_HWCSUM;
3786 			if (IFCAP_HWCSUM & ifp->if_capenable &&
3787 			    IFCAP_HWCSUM & ifp->if_capabilities)
3788 				ifp->if_hwassist = BGE_CSUM_FEATURES;
3789 			else
3790 				ifp->if_hwassist = 0;
3791 			VLAN_CAPABILITIES(ifp);
3792 		}
3793 		break;
3794 	default:
3795 		error = ether_ioctl(ifp, command, data);
3796 		break;
3797 	}
3798 
3799 	return (error);
3800 }
3801 
3802 static void
3803 bge_watchdog(struct bge_softc *sc)
3804 {
3805 	struct ifnet *ifp;
3806 
3807 	BGE_LOCK_ASSERT(sc);
3808 
3809 	if (sc->bge_timer == 0 || --sc->bge_timer)
3810 		return;
3811 
3812 	ifp = sc->bge_ifp;
3813 
3814 	if_printf(ifp, "watchdog timeout -- resetting\n");
3815 
3816 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3817 	bge_init_locked(sc);
3818 
3819 	ifp->if_oerrors++;
3820 }
3821 
3822 /*
3823  * Stop the adapter and free any mbufs allocated to the
3824  * RX and TX lists.
3825  */
3826 static void
3827 bge_stop(struct bge_softc *sc)
3828 {
3829 	struct ifnet *ifp;
3830 	struct ifmedia_entry *ifm;
3831 	struct mii_data *mii = NULL;
3832 	int mtmp, itmp;
3833 
3834 	BGE_LOCK_ASSERT(sc);
3835 
3836 	ifp = sc->bge_ifp;
3837 
3838 	if ((sc->bge_flags & BGE_FLAG_TBI) == 0)
3839 		mii = device_get_softc(sc->bge_miibus);
3840 
3841 	callout_stop(&sc->bge_stat_ch);
3842 
3843 	/*
3844 	 * Disable all of the receiver blocks.
3845 	 */
3846 	BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3847 	BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3848 	BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3849 	if (!(BGE_IS_5705_PLUS(sc)))
3850 		BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
3851 	BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3852 	BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3853 	BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3854 
3855 	/*
3856 	 * Disable all of the transmit blocks.
3857 	 */
3858 	BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3859 	BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3860 	BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3861 	BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3862 	BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3863 	if (!(BGE_IS_5705_PLUS(sc)))
3864 		BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
3865 	BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3866 
3867 	/*
3868 	 * Shut down all of the memory managers and related
3869 	 * state machines.
3870 	 */
3871 	BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3872 	BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3873 	if (!(BGE_IS_5705_PLUS(sc)))
3874 		BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
3875 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3876 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3877 	if (!(BGE_IS_5705_PLUS(sc))) {
3878 		BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
3879 		BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3880 	}
3881 
3882 	/* Disable host interrupts. */
3883 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3884 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3885 
3886 	/*
3887 	 * Tell firmware we're shutting down.
3888 	 */
3889 
3890 	bge_stop_fw(sc);
3891 	bge_sig_pre_reset(sc, BGE_RESET_STOP);
3892 	bge_reset(sc);
3893 	bge_sig_legacy(sc, BGE_RESET_STOP);
3894 	bge_sig_post_reset(sc, BGE_RESET_STOP);
3895 
3896 	/*
3897 	 * Keep the ASF firmware running if up.
3898 	 */
3899 	if (sc->bge_asf_mode & ASF_STACKUP)
3900 		BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3901 	else
3902 		BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3903 
3904 	/* Free the RX lists. */
3905 	bge_free_rx_ring_std(sc);
3906 
3907 	/* Free jumbo RX list. */
3908 	if (BGE_IS_JUMBO_CAPABLE(sc))
3909 		bge_free_rx_ring_jumbo(sc);
3910 
3911 	/* Free TX buffers. */
3912 	bge_free_tx_ring(sc);
3913 
3914 	/*
3915 	 * Isolate/power down the PHY, but leave the media selection
3916 	 * unchanged so that things will be put back to normal when
3917 	 * we bring the interface back up.
3918 	 */
3919 	if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
3920 		itmp = ifp->if_flags;
3921 		ifp->if_flags |= IFF_UP;
3922 		/*
3923 		 * If we are called from bge_detach(), mii is already NULL.
3924 		 */
3925 		if (mii != NULL) {
3926 			ifm = mii->mii_media.ifm_cur;
3927 			mtmp = ifm->ifm_media;
3928 			ifm->ifm_media = IFM_ETHER|IFM_NONE;
3929 			mii_mediachg(mii);
3930 			ifm->ifm_media = mtmp;
3931 		}
3932 		ifp->if_flags = itmp;
3933 	}
3934 
3935 	sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
3936 
3937 	/* Clear MAC's link state (PHY may still have link UP). */
3938 	if (bootverbose && sc->bge_link)
3939 		if_printf(sc->bge_ifp, "link DOWN\n");
3940 	sc->bge_link = 0;
3941 
3942 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3943 }
3944 
3945 /*
3946  * Stop all chip I/O so that the kernel's probe routines don't
3947  * get confused by errant DMAs when rebooting.
3948  */
3949 static void
3950 bge_shutdown(device_t dev)
3951 {
3952 	struct bge_softc *sc;
3953 
3954 	sc = device_get_softc(dev);
3955 
3956 	BGE_LOCK(sc);
3957 	bge_stop(sc);
3958 	bge_reset(sc);
3959 	BGE_UNLOCK(sc);
3960 }
3961 
3962 static int
3963 bge_suspend(device_t dev)
3964 {
3965 	struct bge_softc *sc;
3966 
3967 	sc = device_get_softc(dev);
3968 	BGE_LOCK(sc);
3969 	bge_stop(sc);
3970 	BGE_UNLOCK(sc);
3971 
3972 	return (0);
3973 }
3974 
3975 static int
3976 bge_resume(device_t dev)
3977 {
3978 	struct bge_softc *sc;
3979 	struct ifnet *ifp;
3980 
3981 	sc = device_get_softc(dev);
3982 	BGE_LOCK(sc);
3983 	ifp = sc->bge_ifp;
3984 	if (ifp->if_flags & IFF_UP) {
3985 		bge_init_locked(sc);
3986 		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3987 			bge_start_locked(ifp);
3988 	}
3989 	BGE_UNLOCK(sc);
3990 
3991 	return (0);
3992 }
3993 
3994 static void
3995 bge_link_upd(struct bge_softc *sc)
3996 {
3997 	struct mii_data *mii;
3998 	uint32_t link, status;
3999 
4000 	BGE_LOCK_ASSERT(sc);
4001 
4002 	/* Clear 'pending link event' flag. */
4003 	sc->bge_link_evt = 0;
4004 
4005 	/*
4006 	 * Process link state changes.
4007 	 * Grrr. The link status word in the status block does
4008 	 * not work correctly on the BCM5700 rev AX and BX chips,
4009 	 * according to all available information. Hence, we have
4010 	 * to enable MII interrupts in order to properly obtain
4011 	 * async link changes. Unfortunately, this also means that
4012 	 * we have to read the MAC status register to detect link
4013 	 * changes, thereby adding an additional register access to
4014 	 * the interrupt handler.
4015 	 *
4016 	 * XXX: perhaps link state detection procedure used for
4017 	 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
4018 	 */
4019 
4020 	if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
4021 	    sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
4022 		status = CSR_READ_4(sc, BGE_MAC_STS);
4023 		if (status & BGE_MACSTAT_MI_INTERRUPT) {
4024 			mii = device_get_softc(sc->bge_miibus);
4025 			mii_pollstat(mii);
4026 			if (!sc->bge_link &&
4027 			    mii->mii_media_status & IFM_ACTIVE &&
4028 			    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
4029 				sc->bge_link++;
4030 				if (bootverbose)
4031 					if_printf(sc->bge_ifp, "link UP\n");
4032 			} else if (sc->bge_link &&
4033 			    (!(mii->mii_media_status & IFM_ACTIVE) ||
4034 			    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
4035 				sc->bge_link = 0;
4036 				if (bootverbose)
4037 					if_printf(sc->bge_ifp, "link DOWN\n");
4038 			}
4039 
4040 			/* Clear the interrupt. */
4041 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
4042 			    BGE_EVTENB_MI_INTERRUPT);
4043 			bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
4044 			bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
4045 			    BRGPHY_INTRS);
4046 		}
4047 		return;
4048 	}
4049 
4050 	if (sc->bge_flags & BGE_FLAG_TBI) {
4051 		status = CSR_READ_4(sc, BGE_MAC_STS);
4052 		if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
4053 			if (!sc->bge_link) {
4054 				sc->bge_link++;
4055 				if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
4056 					BGE_CLRBIT(sc, BGE_MAC_MODE,
4057 					    BGE_MACMODE_TBI_SEND_CFGS);
4058 				CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
4059 				if (bootverbose)
4060 					if_printf(sc->bge_ifp, "link UP\n");
4061 				if_link_state_change(sc->bge_ifp,
4062 				    LINK_STATE_UP);
4063 			}
4064 		} else if (sc->bge_link) {
4065 			sc->bge_link = 0;
4066 			if (bootverbose)
4067 				if_printf(sc->bge_ifp, "link DOWN\n");
4068 			if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN);
4069 		}
4070 	/* Discard link events for MII/GMII cards if MI auto-polling disabled */
4071 	} else if (CSR_READ_4(sc, BGE_MI_MODE) & BGE_MIMODE_AUTOPOLL) {
4072 		/*
4073 		 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
4074 		 * in status word always set. Workaround this bug by reading
4075 		 * PHY link status directly.
4076 		 */
4077 		link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
4078 
4079 		if (link != sc->bge_link ||
4080 		    sc->bge_asicrev == BGE_ASICREV_BCM5700) {
4081 			mii = device_get_softc(sc->bge_miibus);
4082 			mii_pollstat(mii);
4083 			if (!sc->bge_link &&
4084 			    mii->mii_media_status & IFM_ACTIVE &&
4085 			    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
4086 				sc->bge_link++;
4087 				if (bootverbose)
4088 					if_printf(sc->bge_ifp, "link UP\n");
4089 			} else if (sc->bge_link &&
4090 			    (!(mii->mii_media_status & IFM_ACTIVE) ||
4091 			    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
4092 				sc->bge_link = 0;
4093 				if (bootverbose)
4094 					if_printf(sc->bge_ifp, "link DOWN\n");
4095 			}
4096 		}
4097 	}
4098 
4099 	/* Clear the attention. */
4100 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
4101 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
4102 	    BGE_MACSTAT_LINK_CHANGED);
4103 }
4104 
4105 static void
4106 bge_add_sysctls(struct bge_softc *sc)
4107 {
4108 	struct sysctl_ctx_list *ctx;
4109 	struct sysctl_oid_list *children;
4110 
4111 	ctx = device_get_sysctl_ctx(sc->bge_dev);
4112 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev));
4113 
4114 #ifdef BGE_REGISTER_DEBUG
4115 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info",
4116 	    CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_debug_info, "I",
4117 	    "Debug Information");
4118 
4119 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read",
4120 	    CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_reg_read, "I",
4121 	    "Register Read");
4122 
4123 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read",
4124 	    CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_mem_read, "I",
4125 	    "Memory Read");
4126 
4127 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "stat_IfHcInOctets",
4128 	    CTLFLAG_RD,
4129 	    &sc->bge_ldata.bge_stats->rxstats.ifHCInOctets.bge_addr_lo,
4130 	    "Bytes received");
4131 
4132 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "stat_IfHcOutOctets",
4133 	    CTLFLAG_RD,
4134 	    &sc->bge_ldata.bge_stats->txstats.ifHCOutOctets.bge_addr_lo,
4135 	    "Bytes received");
4136 #endif
4137 }
4138 
4139 #ifdef BGE_REGISTER_DEBUG
4140 static int
4141 bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
4142 {
4143 	struct bge_softc *sc;
4144 	uint16_t *sbdata;
4145 	int error;
4146 	int result;
4147 	int i, j;
4148 
4149 	result = -1;
4150 	error = sysctl_handle_int(oidp, &result, 0, req);
4151 	if (error || (req->newptr == NULL))
4152 		return (error);
4153 
4154 	if (result == 1) {
4155 		sc = (struct bge_softc *)arg1;
4156 
4157 		sbdata = (uint16_t *)sc->bge_ldata.bge_status_block;
4158 		printf("Status Block:\n");
4159 		for (i = 0x0; i < (BGE_STATUS_BLK_SZ / 4); ) {
4160 			printf("%06x:", i);
4161 			for (j = 0; j < 8; j++) {
4162 				printf(" %04x", sbdata[i]);
4163 				i += 4;
4164 			}
4165 			printf("\n");
4166 		}
4167 
4168 		printf("Registers:\n");
4169 		for (i = 0x800; i < 0xa00; ) {
4170 			printf("%06x:", i);
4171 			for (j = 0; j < 8; j++) {
4172 				printf(" %08x", CSR_READ_4(sc, i));
4173 				i += 4;
4174 			}
4175 			printf("\n");
4176 		}
4177 
4178 		printf("Hardware Flags:\n");
4179 		if (BGE_IS_575X_PLUS(sc))
4180 			printf(" - 575X Plus\n");
4181 		if (BGE_IS_5705_PLUS(sc))
4182 			printf(" - 5705 Plus\n");
4183 		if (BGE_IS_5714_FAMILY(sc))
4184 			printf(" - 5714 Family\n");
4185 		if (BGE_IS_5700_FAMILY(sc))
4186 			printf(" - 5700 Family\n");
4187 		if (sc->bge_flags & BGE_FLAG_JUMBO)
4188 			printf(" - Supports Jumbo Frames\n");
4189 		if (sc->bge_flags & BGE_FLAG_PCIX)
4190 			printf(" - PCI-X Bus\n");
4191 		if (sc->bge_flags & BGE_FLAG_PCIE)
4192 			printf(" - PCI Express Bus\n");
4193 		if (sc->bge_flags & BGE_FLAG_NO3LED)
4194 			printf(" - No 3 LEDs\n");
4195 		if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG)
4196 			printf(" - RX Alignment Bug\n");
4197 	}
4198 
4199 	return (error);
4200 }
4201 
4202 static int
4203 bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
4204 {
4205 	struct bge_softc *sc;
4206 	int error;
4207 	uint16_t result;
4208 	uint32_t val;
4209 
4210 	result = -1;
4211 	error = sysctl_handle_int(oidp, &result, 0, req);
4212 	if (error || (req->newptr == NULL))
4213 		return (error);
4214 
4215 	if (result < 0x8000) {
4216 		sc = (struct bge_softc *)arg1;
4217 		val = CSR_READ_4(sc, result);
4218 		printf("reg 0x%06X = 0x%08X\n", result, val);
4219 	}
4220 
4221 	return (error);
4222 }
4223 
4224 static int
4225 bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS)
4226 {
4227 	struct bge_softc *sc;
4228 	int error;
4229 	uint16_t result;
4230 	uint32_t val;
4231 
4232 	result = -1;
4233 	error = sysctl_handle_int(oidp, &result, 0, req);
4234 	if (error || (req->newptr == NULL))
4235 		return (error);
4236 
4237 	if (result < 0x8000) {
4238 		sc = (struct bge_softc *)arg1;
4239 		val = bge_readmem_ind(sc, result);
4240 		printf("mem 0x%06X = 0x%08X\n", result, val);
4241 	}
4242 
4243 	return (error);
4244 }
4245 #endif
4246