xref: /freebsd/sys/dev/bge/if_bge.c (revision 3fe92528afe8313fecf48822dde74bad5e380f48)
1 /*-
2  * Copyright (c) 2001 Wind River Systems
3  * Copyright (c) 1997, 1998, 1999, 2001
4  *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by Bill Paul.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 /*
38  * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39  *
40  * The Broadcom BCM5700 is based on technology originally developed by
41  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42  * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
43  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45  * frames, highly configurable RX filtering, and 16 RX and TX queues
46  * (which, along with RX filter rules, can be used for QOS applications).
47  * Other features, such as TCP segmentation, may be available as part
48  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49  * firmware images can be stored in hardware and need not be compiled
50  * into the driver.
51  *
52  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53  * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54  *
55  * The BCM5701 is a single-chip solution incorporating both the BCM5700
56  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57  * does not support external SSRAM.
58  *
59  * Broadcom also produces a variation of the BCM5700 under the "Altima"
60  * brand name, which is functionally similar but lacks PCI-X support.
61  *
62  * Without external SSRAM, you can only have at most 4 TX rings,
63  * and the use of the mini RX ring is disabled. This seems to imply
64  * that these features are simply not available on the BCM5701. As a
65  * result, this driver does not implement any support for the mini RX
66  * ring.
67  */
68 
69 #ifdef HAVE_KERNEL_OPTION_HEADERS
70 #include "opt_device_polling.h"
71 #endif
72 
73 #include <sys/param.h>
74 #include <sys/endian.h>
75 #include <sys/systm.h>
76 #include <sys/sockio.h>
77 #include <sys/mbuf.h>
78 #include <sys/malloc.h>
79 #include <sys/kernel.h>
80 #include <sys/module.h>
81 #include <sys/socket.h>
82 
83 #include <net/if.h>
84 #include <net/if_arp.h>
85 #include <net/ethernet.h>
86 #include <net/if_dl.h>
87 #include <net/if_media.h>
88 
89 #include <net/bpf.h>
90 
91 #include <net/if_types.h>
92 #include <net/if_vlan_var.h>
93 
94 #include <netinet/in_systm.h>
95 #include <netinet/in.h>
96 #include <netinet/ip.h>
97 
98 #include <machine/bus.h>
99 #include <machine/resource.h>
100 #include <sys/bus.h>
101 #include <sys/rman.h>
102 
103 #include <dev/mii/mii.h>
104 #include <dev/mii/miivar.h>
105 #include "miidevs.h"
106 #include <dev/mii/brgphyreg.h>
107 
108 #include <dev/pci/pcireg.h>
109 #include <dev/pci/pcivar.h>
110 
111 #include <dev/bge/if_bgereg.h>
112 
113 #define BGE_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
114 #define ETHER_MIN_NOPAD		(ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
115 
116 MODULE_DEPEND(bge, pci, 1, 1, 1);
117 MODULE_DEPEND(bge, ether, 1, 1, 1);
118 MODULE_DEPEND(bge, miibus, 1, 1, 1);
119 
120 /* "device miibus" required.  See GENERIC if you get errors here. */
121 #include "miibus_if.h"
122 
123 /*
124  * Various supported device vendors/types and their names. Note: the
125  * spec seems to indicate that the hardware still has Alteon's vendor
126  * ID burned into it, though it will always be overriden by the vendor
127  * ID in the EEPROM. Just to be safe, we cover all possibilities.
128  */
129 static struct bge_type {
130 	uint16_t	bge_vid;
131 	uint16_t	bge_did;
132 } bge_devs[] = {
133 	{ ALTEON_VENDORID,	ALTEON_DEVICEID_BCM5700 },
134 	{ ALTEON_VENDORID,	ALTEON_DEVICEID_BCM5701 },
135 
136 	{ ALTIMA_VENDORID,	ALTIMA_DEVICE_AC1000 },
137 	{ ALTIMA_VENDORID,	ALTIMA_DEVICE_AC1002 },
138 	{ ALTIMA_VENDORID,	ALTIMA_DEVICE_AC9100 },
139 
140 	{ APPLE_VENDORID,	APPLE_DEVICE_BCM5701 },
141 
142 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5700 },
143 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5701 },
144 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5702 },
145 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5702_ALT },
146 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5702X },
147 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5703 },
148 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5703_ALT },
149 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5703X },
150 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5704C },
151 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5704S },
152 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5704S_ALT },
153 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5705 },
154 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5705F },
155 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5705K },
156 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5705M },
157 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5705M_ALT },
158 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5714C },
159 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5714S },
160 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5715 },
161 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5715S },
162 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5720 },
163 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5721 },
164 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5750 },
165 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5750M },
166 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5751 },
167 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5751F },
168 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5751M },
169 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5752 },
170 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5752M },
171 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5753 },
172 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5753F },
173 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5753M },
174 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5754 },
175 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5754M },
176 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5755 },
177 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5755M },
178 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5780 },
179 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5780S },
180 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5781 },
181 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5782 },
182 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5786 },
183 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5787 },
184 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5787M },
185 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5788 },
186 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5789 },
187 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5901 },
188 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5901A2 },
189 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5903M },
190 
191 	{ SK_VENDORID,		SK_DEVICEID_ALTIMA },
192 
193 	{ TC_VENDORID,		TC_DEVICEID_3C985 },
194 	{ TC_VENDORID,		TC_DEVICEID_3C996 },
195 
196 	{ 0, 0 }
197 };
198 
199 static const struct bge_vendor {
200 	uint16_t	v_id;
201 	const char	*v_name;
202 } bge_vendors[] = {
203 	{ ALTEON_VENDORID,	"Alteon" },
204 	{ ALTIMA_VENDORID,	"Altima" },
205 	{ APPLE_VENDORID,	"Apple" },
206 	{ BCOM_VENDORID,	"Broadcom" },
207 	{ SK_VENDORID,		"SysKonnect" },
208 	{ TC_VENDORID,		"3Com" },
209 
210 	{ 0, NULL }
211 };
212 
213 static const struct bge_revision {
214 	uint32_t	br_chipid;
215 	const char	*br_name;
216 } bge_revisions[] = {
217 	{ BGE_CHIPID_BCM5700_A0,	"BCM5700 A0" },
218 	{ BGE_CHIPID_BCM5700_A1,	"BCM5700 A1" },
219 	{ BGE_CHIPID_BCM5700_B0,	"BCM5700 B0" },
220 	{ BGE_CHIPID_BCM5700_B1,	"BCM5700 B1" },
221 	{ BGE_CHIPID_BCM5700_B2,	"BCM5700 B2" },
222 	{ BGE_CHIPID_BCM5700_B3,	"BCM5700 B3" },
223 	{ BGE_CHIPID_BCM5700_ALTIMA,	"BCM5700 Altima" },
224 	{ BGE_CHIPID_BCM5700_C0,	"BCM5700 C0" },
225 	{ BGE_CHIPID_BCM5701_A0,	"BCM5701 A0" },
226 	{ BGE_CHIPID_BCM5701_B0,	"BCM5701 B0" },
227 	{ BGE_CHIPID_BCM5701_B2,	"BCM5701 B2" },
228 	{ BGE_CHIPID_BCM5701_B5,	"BCM5701 B5" },
229 	{ BGE_CHIPID_BCM5703_A0,	"BCM5703 A0" },
230 	{ BGE_CHIPID_BCM5703_A1,	"BCM5703 A1" },
231 	{ BGE_CHIPID_BCM5703_A2,	"BCM5703 A2" },
232 	{ BGE_CHIPID_BCM5703_A3,	"BCM5703 A3" },
233 	{ BGE_CHIPID_BCM5703_B0,	"BCM5703 B0" },
234 	{ BGE_CHIPID_BCM5704_A0,	"BCM5704 A0" },
235 	{ BGE_CHIPID_BCM5704_A1,	"BCM5704 A1" },
236 	{ BGE_CHIPID_BCM5704_A2,	"BCM5704 A2" },
237 	{ BGE_CHIPID_BCM5704_A3,	"BCM5704 A3" },
238 	{ BGE_CHIPID_BCM5704_B0,	"BCM5704 B0" },
239 	{ BGE_CHIPID_BCM5705_A0,	"BCM5705 A0" },
240 	{ BGE_CHIPID_BCM5705_A1,	"BCM5705 A1" },
241 	{ BGE_CHIPID_BCM5705_A2,	"BCM5705 A2" },
242 	{ BGE_CHIPID_BCM5705_A3,	"BCM5705 A3" },
243 	{ BGE_CHIPID_BCM5750_A0,	"BCM5750 A0" },
244 	{ BGE_CHIPID_BCM5750_A1,	"BCM5750 A1" },
245 	{ BGE_CHIPID_BCM5750_A3,	"BCM5750 A3" },
246 	{ BGE_CHIPID_BCM5750_B0,	"BCM5750 B0" },
247 	{ BGE_CHIPID_BCM5750_B1,	"BCM5750 B1" },
248 	{ BGE_CHIPID_BCM5750_C0,	"BCM5750 C0" },
249 	{ BGE_CHIPID_BCM5750_C1,	"BCM5750 C1" },
250 	{ BGE_CHIPID_BCM5750_C2,	"BCM5750 C2" },
251 	{ BGE_CHIPID_BCM5714_A0,	"BCM5714 A0" },
252 	{ BGE_CHIPID_BCM5752_A0,	"BCM5752 A0" },
253 	{ BGE_CHIPID_BCM5752_A1,	"BCM5752 A1" },
254 	{ BGE_CHIPID_BCM5752_A2,	"BCM5752 A2" },
255 	{ BGE_CHIPID_BCM5714_B0,	"BCM5714 B0" },
256 	{ BGE_CHIPID_BCM5714_B3,	"BCM5714 B3" },
257 	{ BGE_CHIPID_BCM5715_A0,	"BCM5715 A0" },
258 	{ BGE_CHIPID_BCM5715_A1,	"BCM5715 A1" },
259 
260 	{ 0, NULL }
261 };
262 
263 /*
264  * Some defaults for major revisions, so that newer steppings
265  * that we don't know about have a shot at working.
266  */
267 static const struct bge_revision bge_majorrevs[] = {
268 	{ BGE_ASICREV_BCM5700,		"unknown BCM5700" },
269 	{ BGE_ASICREV_BCM5701,		"unknown BCM5701" },
270 	{ BGE_ASICREV_BCM5703,		"unknown BCM5703" },
271 	{ BGE_ASICREV_BCM5704,		"unknown BCM5704" },
272 	{ BGE_ASICREV_BCM5705,		"unknown BCM5705" },
273 	{ BGE_ASICREV_BCM5750,		"unknown BCM5750" },
274 	{ BGE_ASICREV_BCM5714_A0,	"unknown BCM5714" },
275 	{ BGE_ASICREV_BCM5752,		"unknown BCM5752" },
276 	{ BGE_ASICREV_BCM5780,		"unknown BCM5780" },
277 	{ BGE_ASICREV_BCM5714,		"unknown BCM5714" },
278 	{ BGE_ASICREV_BCM5755,		"unknown BCM5755" },
279 	{ BGE_ASICREV_BCM5787,		"unknown BCM5787" },
280 
281 	{ 0, NULL }
282 };
283 
284 #define BGE_IS_5705_OR_BEYOND(sc)			   \
285 	((sc)->bge_asicrev == BGE_ASICREV_BCM5705	|| \
286 	 (sc)->bge_asicrev == BGE_ASICREV_BCM5750	|| \
287 	 (sc)->bge_asicrev == BGE_ASICREV_BCM5714_A0	|| \
288 	 (sc)->bge_asicrev == BGE_ASICREV_BCM5780	|| \
289 	 (sc)->bge_asicrev == BGE_ASICREV_BCM5714	|| \
290 	 (sc)->bge_asicrev == BGE_ASICREV_BCM5752	|| \
291 	 (sc)->bge_asicrev == BGE_ASICREV_BCM5755	|| \
292 	 (sc)->bge_asicrev == BGE_ASICREV_BCM5787)
293 
294 #define BGE_IS_575X_PLUS(sc)				   \
295 	((sc)->bge_asicrev == BGE_ASICREV_BCM5750	|| \
296 	 (sc)->bge_asicrev == BGE_ASICREV_BCM5714_A0	|| \
297 	 (sc)->bge_asicrev == BGE_ASICREV_BCM5780	|| \
298 	 (sc)->bge_asicrev == BGE_ASICREV_BCM5714	|| \
299 	 (sc)->bge_asicrev == BGE_ASICREV_BCM5752	|| \
300 	 (sc)->bge_asicrev == BGE_ASICREV_BCM5755	|| \
301 	 (sc)->bge_asicrev == BGE_ASICREV_BCM5787)
302 
303 #define BGE_IS_5714_FAMILY(sc)				   \
304 	((sc)->bge_asicrev == BGE_ASICREV_BCM5714_A0	|| \
305 	 (sc)->bge_asicrev == BGE_ASICREV_BCM5780	|| \
306 	 (sc)->bge_asicrev == BGE_ASICREV_BCM5714)
307 
308 #define BGE_IS_JUMBO_CAPABLE(sc) \
309 	((sc)->bge_asicrev == BGE_ASICREV_BCM5700	|| \
310 	 (sc)->bge_asicrev == BGE_ASICREV_BCM5701	|| \
311 	 (sc)->bge_asicrev == BGE_ASICREV_BCM5703	|| \
312 	 (sc)->bge_asicrev == BGE_ASICREV_BCM5704)
313 
314 const struct bge_revision * bge_lookup_rev(uint32_t);
315 const struct bge_vendor * bge_lookup_vendor(uint16_t);
316 static int bge_probe(device_t);
317 static int bge_attach(device_t);
318 static int bge_detach(device_t);
319 static int bge_suspend(device_t);
320 static int bge_resume(device_t);
321 static void bge_release_resources(struct bge_softc *);
322 static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
323 static int bge_dma_alloc(device_t);
324 static void bge_dma_free(struct bge_softc *);
325 
326 static void bge_txeof(struct bge_softc *);
327 static void bge_rxeof(struct bge_softc *);
328 
329 static void bge_asf_driver_up (struct bge_softc *);
330 static void bge_tick_locked(struct bge_softc *);
331 static void bge_tick(void *);
332 static void bge_stats_update(struct bge_softc *);
333 static void bge_stats_update_regs(struct bge_softc *);
334 static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
335 
336 static void bge_intr(void *);
337 static void bge_start_locked(struct ifnet *);
338 static void bge_start(struct ifnet *);
339 static int bge_ioctl(struct ifnet *, u_long, caddr_t);
340 static void bge_init_locked(struct bge_softc *);
341 static void bge_init(void *);
342 static void bge_stop(struct bge_softc *);
343 static void bge_watchdog(struct ifnet *);
344 static void bge_shutdown(device_t);
345 static int bge_ifmedia_upd_locked(struct ifnet *);
346 static int bge_ifmedia_upd(struct ifnet *);
347 static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
348 
349 static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
350 static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
351 
352 static void bge_setmulti(struct bge_softc *);
353 
354 static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *);
355 static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
356 static int bge_init_rx_ring_std(struct bge_softc *);
357 static void bge_free_rx_ring_std(struct bge_softc *);
358 static int bge_init_rx_ring_jumbo(struct bge_softc *);
359 static void bge_free_rx_ring_jumbo(struct bge_softc *);
360 static void bge_free_tx_ring(struct bge_softc *);
361 static int bge_init_tx_ring(struct bge_softc *);
362 
363 static int bge_chipinit(struct bge_softc *);
364 static int bge_blockinit(struct bge_softc *);
365 
366 static uint32_t bge_readmem_ind(struct bge_softc *, int);
367 static void bge_writemem_ind(struct bge_softc *, int, int);
368 #ifdef notdef
369 static uint32_t bge_readreg_ind(struct bge_softc *, int);
370 #endif
371 static void bge_writereg_ind(struct bge_softc *, int, int);
372 
373 static int bge_miibus_readreg(device_t, int, int);
374 static int bge_miibus_writereg(device_t, int, int, int);
375 static void bge_miibus_statchg(device_t);
376 #ifdef DEVICE_POLLING
377 static void bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
378 #endif
379 
380 #define BGE_RESET_START 1
381 #define BGE_RESET_STOP  2
382 static void bge_sig_post_reset(struct bge_softc *, int);
383 static void bge_sig_legacy(struct bge_softc *, int);
384 static void bge_sig_pre_reset(struct bge_softc *, int);
385 static int bge_reset(struct bge_softc *);
386 static void bge_link_upd(struct bge_softc *);
387 
388 static device_method_t bge_methods[] = {
389 	/* Device interface */
390 	DEVMETHOD(device_probe,		bge_probe),
391 	DEVMETHOD(device_attach,	bge_attach),
392 	DEVMETHOD(device_detach,	bge_detach),
393 	DEVMETHOD(device_shutdown,	bge_shutdown),
394 	DEVMETHOD(device_suspend,	bge_suspend),
395 	DEVMETHOD(device_resume,	bge_resume),
396 
397 	/* bus interface */
398 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
399 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
400 
401 	/* MII interface */
402 	DEVMETHOD(miibus_readreg,	bge_miibus_readreg),
403 	DEVMETHOD(miibus_writereg,	bge_miibus_writereg),
404 	DEVMETHOD(miibus_statchg,	bge_miibus_statchg),
405 
406 	{ 0, 0 }
407 };
408 
409 static driver_t bge_driver = {
410 	"bge",
411 	bge_methods,
412 	sizeof(struct bge_softc)
413 };
414 
415 static devclass_t bge_devclass;
416 
417 DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
418 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
419 
420 static int bge_fake_autoneg = 0;
421 TUNABLE_INT("hw.bge.fake_autoneg", &bge_fake_autoneg);
422 
423 static uint32_t
424 bge_readmem_ind(struct bge_softc *sc, int off)
425 {
426 	device_t dev;
427 
428 	dev = sc->bge_dev;
429 
430 	pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
431 	return (pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4));
432 }
433 
434 static void
435 bge_writemem_ind(struct bge_softc *sc, int off, int val)
436 {
437 	device_t dev;
438 
439 	dev = sc->bge_dev;
440 
441 	pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
442 	pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
443 }
444 
445 #ifdef notdef
446 static uint32_t
447 bge_readreg_ind(struct bge_softc *sc, int off)
448 {
449 	device_t dev;
450 
451 	dev = sc->bge_dev;
452 
453 	pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
454 	return (pci_read_config(dev, BGE_PCI_REG_DATA, 4));
455 }
456 #endif
457 
458 static void
459 bge_writereg_ind(struct bge_softc *sc, int off, int val)
460 {
461 	device_t dev;
462 
463 	dev = sc->bge_dev;
464 
465 	pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
466 	pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
467 }
468 
469 /*
470  * Map a single buffer address.
471  */
472 
473 static void
474 bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
475 {
476 	struct bge_dmamap_arg *ctx;
477 
478 	if (error)
479 		return;
480 
481 	ctx = arg;
482 
483 	if (nseg > ctx->bge_maxsegs) {
484 		ctx->bge_maxsegs = 0;
485 		return;
486 	}
487 
488 	ctx->bge_busaddr = segs->ds_addr;
489 }
490 
491 /*
492  * Read a byte of data stored in the EEPROM at address 'addr.' The
493  * BCM570x supports both the traditional bitbang interface and an
494  * auto access interface for reading the EEPROM. We use the auto
495  * access method.
496  */
497 static uint8_t
498 bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
499 {
500 	int i;
501 	uint32_t byte = 0;
502 
503 	/*
504 	 * Enable use of auto EEPROM access so we can avoid
505 	 * having to use the bitbang method.
506 	 */
507 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
508 
509 	/* Reset the EEPROM, load the clock period. */
510 	CSR_WRITE_4(sc, BGE_EE_ADDR,
511 	    BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
512 	DELAY(20);
513 
514 	/* Issue the read EEPROM command. */
515 	CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
516 
517 	/* Wait for completion */
518 	for(i = 0; i < BGE_TIMEOUT * 10; i++) {
519 		DELAY(10);
520 		if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
521 			break;
522 	}
523 
524 	if (i == BGE_TIMEOUT) {
525 		device_printf(sc->bge_dev, "EEPROM read timed out\n");
526 		return (1);
527 	}
528 
529 	/* Get result. */
530 	byte = CSR_READ_4(sc, BGE_EE_DATA);
531 
532 	*dest = (byte >> ((addr % 4) * 8)) & 0xFF;
533 
534 	return (0);
535 }
536 
537 /*
538  * Read a sequence of bytes from the EEPROM.
539  */
540 static int
541 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
542 {
543 	int i, error = 0;
544 	uint8_t byte = 0;
545 
546 	for (i = 0; i < cnt; i++) {
547 		error = bge_eeprom_getbyte(sc, off + i, &byte);
548 		if (error)
549 			break;
550 		*(dest + i) = byte;
551 	}
552 
553 	return (error ? 1 : 0);
554 }
555 
556 static int
557 bge_miibus_readreg(device_t dev, int phy, int reg)
558 {
559 	struct bge_softc *sc;
560 	uint32_t val, autopoll;
561 	int i;
562 
563 	sc = device_get_softc(dev);
564 
565 	/*
566 	 * Broadcom's own driver always assumes the internal
567 	 * PHY is at GMII address 1. On some chips, the PHY responds
568 	 * to accesses at all addresses, which could cause us to
569 	 * bogusly attach the PHY 32 times at probe type. Always
570 	 * restricting the lookup to address 1 is simpler than
571 	 * trying to figure out which chips revisions should be
572 	 * special-cased.
573 	 */
574 	if (phy != 1)
575 		return (0);
576 
577 	/* Reading with autopolling on may trigger PCI errors */
578 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
579 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
580 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
581 		DELAY(40);
582 	}
583 
584 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
585 	    BGE_MIPHY(phy)|BGE_MIREG(reg));
586 
587 	for (i = 0; i < BGE_TIMEOUT; i++) {
588 		val = CSR_READ_4(sc, BGE_MI_COMM);
589 		if (!(val & BGE_MICOMM_BUSY))
590 			break;
591 	}
592 
593 	if (i == BGE_TIMEOUT) {
594 		device_printf(sc->bge_dev, "PHY read timed out\n");
595 		val = 0;
596 		goto done;
597 	}
598 
599 	val = CSR_READ_4(sc, BGE_MI_COMM);
600 
601 done:
602 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
603 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
604 		DELAY(40);
605 	}
606 
607 	if (val & BGE_MICOMM_READFAIL)
608 		return (0);
609 
610 	return (val & 0xFFFF);
611 }
612 
613 static int
614 bge_miibus_writereg(device_t dev, int phy, int reg, int val)
615 {
616 	struct bge_softc *sc;
617 	uint32_t autopoll;
618 	int i;
619 
620 	sc = device_get_softc(dev);
621 
622 	/* Reading with autopolling on may trigger PCI errors */
623 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
624 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
625 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
626 		DELAY(40);
627 	}
628 
629 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
630 	    BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
631 
632 	for (i = 0; i < BGE_TIMEOUT; i++) {
633 		if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
634 			break;
635 	}
636 
637 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
638 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
639 		DELAY(40);
640 	}
641 
642 	if (i == BGE_TIMEOUT) {
643 		device_printf(sc->bge_dev, "PHY read timed out\n");
644 		return (0);
645 	}
646 
647 	return (0);
648 }
649 
650 static void
651 bge_miibus_statchg(device_t dev)
652 {
653 	struct bge_softc *sc;
654 	struct mii_data *mii;
655 	sc = device_get_softc(dev);
656 	mii = device_get_softc(sc->bge_miibus);
657 
658 	BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
659 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
660 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
661 	else
662 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
663 
664 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
665 		BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
666 	else
667 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
668 }
669 
670 /*
671  * Intialize a standard receive ring descriptor.
672  */
673 static int
674 bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m)
675 {
676 	struct mbuf *m_new = NULL;
677 	struct bge_rx_bd *r;
678 	struct bge_dmamap_arg ctx;
679 	int error;
680 
681 	if (m == NULL) {
682 		m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
683 		if (m_new == NULL)
684 			return (ENOBUFS);
685 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
686 	} else {
687 		m_new = m;
688 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
689 		m_new->m_data = m_new->m_ext.ext_buf;
690 	}
691 
692 	if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
693 		m_adj(m_new, ETHER_ALIGN);
694 	sc->bge_cdata.bge_rx_std_chain[i] = m_new;
695 	r = &sc->bge_ldata.bge_rx_std_ring[i];
696 	ctx.bge_maxsegs = 1;
697 	ctx.sc = sc;
698 	error = bus_dmamap_load(sc->bge_cdata.bge_mtag,
699 	    sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *),
700 	    m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
701 	if (error || ctx.bge_maxsegs == 0) {
702 		if (m == NULL) {
703 			sc->bge_cdata.bge_rx_std_chain[i] = NULL;
704 			m_freem(m_new);
705 		}
706 		return (ENOMEM);
707 	}
708 	r->bge_addr.bge_addr_lo = BGE_ADDR_LO(ctx.bge_busaddr);
709 	r->bge_addr.bge_addr_hi = BGE_ADDR_HI(ctx.bge_busaddr);
710 	r->bge_flags = BGE_RXBDFLAG_END;
711 	r->bge_len = m_new->m_len;
712 	r->bge_idx = i;
713 
714 	bus_dmamap_sync(sc->bge_cdata.bge_mtag,
715 	    sc->bge_cdata.bge_rx_std_dmamap[i],
716 	    BUS_DMASYNC_PREREAD);
717 
718 	return (0);
719 }
720 
721 /*
722  * Initialize a jumbo receive ring descriptor. This allocates
723  * a jumbo buffer from the pool managed internally by the driver.
724  */
725 static int
726 bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m)
727 {
728 	bus_dma_segment_t segs[BGE_NSEG_JUMBO];
729 	struct bge_extrx_bd *r;
730 	struct mbuf *m_new = NULL;
731 	int nsegs;
732 	int error;
733 
734 	if (m == NULL) {
735 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
736 		if (m_new == NULL)
737 			return (ENOBUFS);
738 
739 		m_cljget(m_new, M_DONTWAIT, MJUM9BYTES);
740 		if (!(m_new->m_flags & M_EXT)) {
741 			m_freem(m_new);
742 			return (ENOBUFS);
743 		}
744 		m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES;
745 	} else {
746 		m_new = m;
747 		m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES;
748 		m_new->m_data = m_new->m_ext.ext_buf;
749 	}
750 
751 	if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
752 		m_adj(m_new, ETHER_ALIGN);
753 
754 	error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
755 	    sc->bge_cdata.bge_rx_jumbo_dmamap[i],
756 	    m_new, segs, &nsegs, BUS_DMA_NOWAIT);
757 	if (error) {
758 		if (m == NULL)
759 			m_freem(m_new);
760 		return (error);
761 	}
762 	sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
763 
764 	/*
765 	 * Fill in the extended RX buffer descriptor.
766 	 */
767 	r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
768 	r->bge_flags = BGE_RXBDFLAG_JUMBO_RING|BGE_RXBDFLAG_END;
769 	r->bge_idx = i;
770 	r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
771 	switch (nsegs) {
772 	case 4:
773 		r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
774 		r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
775 		r->bge_len3 = segs[3].ds_len;
776 	case 3:
777 		r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
778 		r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
779 		r->bge_len2 = segs[2].ds_len;
780 	case 2:
781 		r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
782 		r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
783 		r->bge_len1 = segs[1].ds_len;
784 	case 1:
785 		r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
786 		r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
787 		r->bge_len0 = segs[0].ds_len;
788 		break;
789 	default:
790 		panic("%s: %d segments\n", __func__, nsegs);
791 	}
792 
793 	bus_dmamap_sync(sc->bge_cdata.bge_mtag,
794 	    sc->bge_cdata.bge_rx_jumbo_dmamap[i],
795 	    BUS_DMASYNC_PREREAD);
796 
797 	return (0);
798 }
799 
800 /*
801  * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
802  * that's 1MB or memory, which is a lot. For now, we fill only the first
803  * 256 ring entries and hope that our CPU is fast enough to keep up with
804  * the NIC.
805  */
806 static int
807 bge_init_rx_ring_std(struct bge_softc *sc)
808 {
809 	int i;
810 
811 	for (i = 0; i < BGE_SSLOTS; i++) {
812 		if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
813 			return (ENOBUFS);
814 	};
815 
816 	bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
817 	    sc->bge_cdata.bge_rx_std_ring_map,
818 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
819 
820 	sc->bge_std = i - 1;
821 	CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
822 
823 	return (0);
824 }
825 
826 static void
827 bge_free_rx_ring_std(struct bge_softc *sc)
828 {
829 	int i;
830 
831 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
832 		if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
833 			bus_dmamap_sync(sc->bge_cdata.bge_mtag,
834 			    sc->bge_cdata.bge_rx_std_dmamap[i],
835 			    BUS_DMASYNC_POSTREAD);
836 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
837 			    sc->bge_cdata.bge_rx_std_dmamap[i]);
838 			m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
839 			sc->bge_cdata.bge_rx_std_chain[i] = NULL;
840 		}
841 		bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
842 		    sizeof(struct bge_rx_bd));
843 	}
844 }
845 
846 static int
847 bge_init_rx_ring_jumbo(struct bge_softc *sc)
848 {
849 	struct bge_rcb *rcb;
850 	int i;
851 
852 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
853 		if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
854 			return (ENOBUFS);
855 	};
856 
857 	bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
858 	    sc->bge_cdata.bge_rx_jumbo_ring_map,
859 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
860 
861 	sc->bge_jumbo = i - 1;
862 
863 	rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
864 	rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
865 				    BGE_RCB_FLAG_USE_EXT_RX_BD);
866 	CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
867 
868 	CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
869 
870 	return (0);
871 }
872 
873 static void
874 bge_free_rx_ring_jumbo(struct bge_softc *sc)
875 {
876 	int i;
877 
878 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
879 		if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
880 			bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
881 			    sc->bge_cdata.bge_rx_jumbo_dmamap[i],
882 			    BUS_DMASYNC_POSTREAD);
883 			bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
884 			    sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
885 			m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
886 			sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
887 		}
888 		bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
889 		    sizeof(struct bge_extrx_bd));
890 	}
891 }
892 
893 static void
894 bge_free_tx_ring(struct bge_softc *sc)
895 {
896 	int i;
897 
898 	if (sc->bge_ldata.bge_tx_ring == NULL)
899 		return;
900 
901 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
902 		if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
903 			bus_dmamap_sync(sc->bge_cdata.bge_mtag,
904 			    sc->bge_cdata.bge_tx_dmamap[i],
905 			    BUS_DMASYNC_POSTWRITE);
906 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
907 			    sc->bge_cdata.bge_tx_dmamap[i]);
908 			m_freem(sc->bge_cdata.bge_tx_chain[i]);
909 			sc->bge_cdata.bge_tx_chain[i] = NULL;
910 		}
911 		bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
912 		    sizeof(struct bge_tx_bd));
913 	}
914 }
915 
916 static int
917 bge_init_tx_ring(struct bge_softc *sc)
918 {
919 	sc->bge_txcnt = 0;
920 	sc->bge_tx_saved_considx = 0;
921 
922 	/* Initialize transmit producer index for host-memory send ring. */
923 	sc->bge_tx_prodidx = 0;
924 	CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
925 
926 	/* 5700 b2 errata */
927 	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
928 		CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
929 
930 	/* NIC-memory send ring not used; initialize to zero. */
931 	CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
932 	/* 5700 b2 errata */
933 	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
934 		CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
935 
936 	return (0);
937 }
938 
939 static void
940 bge_setmulti(struct bge_softc *sc)
941 {
942 	struct ifnet *ifp;
943 	struct ifmultiaddr *ifma;
944 	uint32_t hashes[4] = { 0, 0, 0, 0 };
945 	int h, i;
946 
947 	BGE_LOCK_ASSERT(sc);
948 
949 	ifp = sc->bge_ifp;
950 
951 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
952 		for (i = 0; i < 4; i++)
953 			CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
954 		return;
955 	}
956 
957 	/* First, zot all the existing filters. */
958 	for (i = 0; i < 4; i++)
959 		CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
960 
961 	/* Now program new ones. */
962 	IF_ADDR_LOCK(ifp);
963 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
964 		if (ifma->ifma_addr->sa_family != AF_LINK)
965 			continue;
966 		h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
967 		    ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
968 		hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
969 	}
970 	IF_ADDR_UNLOCK(ifp);
971 
972 	for (i = 0; i < 4; i++)
973 		CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
974 }
975 
976 static void
977 bge_sig_pre_reset(sc, type)
978 	struct bge_softc *sc;
979 	int type;
980 {
981 	/*
982 	 * Some chips don't like this so only do this if ASF is enabled
983 	 */
984 	if (sc->bge_asf_mode)
985 		bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
986 
987 	if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
988 		switch (type) {
989 		case BGE_RESET_START:
990 			bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
991 			break;
992 		case BGE_RESET_STOP:
993 			bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
994 			break;
995 		}
996 	}
997 }
998 
999 static void
1000 bge_sig_post_reset(sc, type)
1001 	struct bge_softc *sc;
1002 	int type;
1003 {
1004 	if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1005 		switch (type) {
1006 		case BGE_RESET_START:
1007 			bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000001);
1008 			/* START DONE */
1009 			break;
1010 		case BGE_RESET_STOP:
1011 			bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000002);
1012 			break;
1013 		}
1014 	}
1015 }
1016 
1017 static void
1018 bge_sig_legacy(sc, type)
1019 	struct bge_softc *sc;
1020 	int type;
1021 {
1022 	if (sc->bge_asf_mode) {
1023 		switch (type) {
1024 		case BGE_RESET_START:
1025 			bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1026 			break;
1027 		case BGE_RESET_STOP:
1028 			bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1029 			break;
1030 		}
1031 	}
1032 }
1033 
1034 void bge_stop_fw(struct bge_softc *);
1035 void
1036 bge_stop_fw(sc)
1037 	struct bge_softc *sc;
1038 {
1039 	int i;
1040 
1041 	if (sc->bge_asf_mode) {
1042 		bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, BGE_FW_PAUSE);
1043 		CSR_WRITE_4(sc, BGE_CPU_EVENT,
1044 			    CSR_READ_4(sc, BGE_CPU_EVENT) != (1 << 14));
1045 
1046 		for (i = 0; i < 100; i++ ) {
1047 			if (!(CSR_READ_4(sc, BGE_CPU_EVENT) & (1 << 14)))
1048 				break;
1049 			DELAY(10);
1050 		}
1051 	}
1052 }
1053 
1054 /*
1055  * Do endian, PCI and DMA initialization. Also check the on-board ROM
1056  * self-test results.
1057  */
1058 static int
1059 bge_chipinit(struct bge_softc *sc)
1060 {
1061 	uint32_t dma_rw_ctl;
1062 	int i;
1063 
1064 	/* Set endianness before we access any non-PCI registers. */
1065 	pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4);
1066 
1067 	/*
1068 	 * Check the 'ROM failed' bit on the RX CPU to see if
1069 	 * self-tests passed.
1070 	 */
1071 	if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
1072 		device_printf(sc->bge_dev, "RX CPU self-diagnostics failed!\n");
1073 		return (ENODEV);
1074 	}
1075 
1076 	/* Clear the MAC control register */
1077 	CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1078 
1079 	/*
1080 	 * Clear the MAC statistics block in the NIC's
1081 	 * internal memory.
1082 	 */
1083 	for (i = BGE_STATS_BLOCK;
1084 	    i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1085 		BGE_MEMWIN_WRITE(sc, i, 0);
1086 
1087 	for (i = BGE_STATUS_BLOCK;
1088 	    i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1089 		BGE_MEMWIN_WRITE(sc, i, 0);
1090 
1091 	/* Set up the PCI DMA control register. */
1092 	if (sc->bge_flags & BGE_FLAG_PCIE) {
1093 		/* PCI Express bus */
1094 		dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1095 		    (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1096 		    (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1097 	} else if (sc->bge_flags & BGE_FLAG_PCIX) {
1098 		/* PCI-X bus */
1099 		if (BGE_IS_5714_FAMILY(sc)) {
1100 			dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD;
1101 			dma_rw_ctl &= ~BGE_PCIDMARWCTL_ONEDMA_ATONCE; /* XXX */
1102 			/* XXX magic values, Broadcom-supplied Linux driver */
1103 			if (sc->bge_asicrev == BGE_ASICREV_BCM5780)
1104 				dma_rw_ctl |= (1 << 20) | (1 << 18) |
1105 				    BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1106 			else
1107 				dma_rw_ctl |= (1 << 20) | (1 << 18) | (1 << 15);
1108 
1109 		} else if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1110 			/*
1111 			 * The 5704 uses a different encoding of read/write
1112 			 * watermarks.
1113 			 */
1114 			dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1115 			    (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1116 			    (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1117 		else
1118 			dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1119 			    (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1120 			    (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1121 			    (0x0F);
1122 
1123 		/*
1124 		 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1125 		 * for hardware bugs.
1126 		 */
1127 		if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1128 		    sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1129 			uint32_t tmp;
1130 
1131 			tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1132 			if (tmp == 0x6 || tmp == 0x7)
1133 				dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1134 		}
1135 	} else
1136 		/* Conventional PCI bus */
1137 		dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1138 		    (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1139 		    (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1140 		    (0x0F);
1141 
1142 	if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1143 	    sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
1144 	    sc->bge_asicrev == BGE_ASICREV_BCM5705)
1145 		dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1146 	pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1147 
1148 	/*
1149 	 * Set up general mode register.
1150 	 */
1151 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
1152 	    BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1153 	    BGE_MODECTL_TX_NO_PHDR_CSUM);
1154 
1155 	/*
1156 	 * Tell the firmware the driver is running
1157 	 */
1158 	if (sc->bge_asf_mode & ASF_STACKUP)
1159 		BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
1160 
1161 	/*
1162 	 * Disable memory write invalidate.  Apparently it is not supported
1163 	 * properly by these devices.
1164 	 */
1165 	PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1166 
1167 #ifdef __brokenalpha__
1168 	/*
1169 	 * Must insure that we do not cross an 8K (bytes) boundary
1170 	 * for DMA reads.  Our highest limit is 1K bytes.  This is a
1171 	 * restriction on some ALPHA platforms with early revision
1172 	 * 21174 PCI chipsets, such as the AlphaPC 164lx
1173 	 */
1174 	PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL,
1175 	    BGE_PCI_READ_BNDRY_1024BYTES, 4);
1176 #endif
1177 
1178 	/* Set the timer prescaler (always 66Mhz) */
1179 	CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1180 
1181 	return (0);
1182 }
1183 
1184 static int
1185 bge_blockinit(struct bge_softc *sc)
1186 {
1187 	struct bge_rcb *rcb;
1188 	bus_size_t vrcb;
1189 	bge_hostaddr taddr;
1190 	int i;
1191 
1192 	/*
1193 	 * Initialize the memory window pointer register so that
1194 	 * we can access the first 32K of internal NIC RAM. This will
1195 	 * allow us to set up the TX send ring RCBs and the RX return
1196 	 * ring RCBs, plus other things which live in NIC memory.
1197 	 */
1198 	CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1199 
1200 	/* Note: the BCM5704 has a smaller mbuf space than other chips. */
1201 
1202 	if (!(BGE_IS_5705_OR_BEYOND(sc))) {
1203 		/* Configure mbuf memory pool */
1204 		if (sc->bge_flags & BGE_FLAG_EXTRAM) {
1205 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1206 			    BGE_EXT_SSRAM);
1207 			if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1208 				CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1209 			else
1210 				CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1211 		} else {
1212 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1213 			    BGE_BUFFPOOL_1);
1214 			if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1215 				CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1216 			else
1217 				CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1218 		}
1219 
1220 		/* Configure DMA resource pool */
1221 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1222 		    BGE_DMA_DESCRIPTORS);
1223 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1224 	}
1225 
1226 	/* Configure mbuf pool watermarks */
1227 	if (!(BGE_IS_5705_OR_BEYOND(sc))) {
1228 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1229 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1230 	} else {
1231 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1232 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1233 	}
1234 	CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1235 
1236 	/* Configure DMA resource watermarks */
1237 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1238 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1239 
1240 	/* Enable buffer manager */
1241 	if (!(BGE_IS_5705_OR_BEYOND(sc))) {
1242 		CSR_WRITE_4(sc, BGE_BMAN_MODE,
1243 		    BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1244 
1245 		/* Poll for buffer manager start indication */
1246 		for (i = 0; i < BGE_TIMEOUT; i++) {
1247 			if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1248 				break;
1249 			DELAY(10);
1250 		}
1251 
1252 		if (i == BGE_TIMEOUT) {
1253 			device_printf(sc->bge_dev,
1254 			    "buffer manager failed to start\n");
1255 			return (ENXIO);
1256 		}
1257 	}
1258 
1259 	/* Enable flow-through queues */
1260 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1261 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1262 
1263 	/* Wait until queue initialization is complete */
1264 	for (i = 0; i < BGE_TIMEOUT; i++) {
1265 		if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1266 			break;
1267 		DELAY(10);
1268 	}
1269 
1270 	if (i == BGE_TIMEOUT) {
1271 		device_printf(sc->bge_dev, "flow-through queue init failed\n");
1272 		return (ENXIO);
1273 	}
1274 
1275 	/* Initialize the standard RX ring control block */
1276 	rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1277 	rcb->bge_hostaddr.bge_addr_lo =
1278 	    BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1279 	rcb->bge_hostaddr.bge_addr_hi =
1280 	    BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1281 	bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1282 	    sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1283 	if (BGE_IS_5705_OR_BEYOND(sc))
1284 		rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1285 	else
1286 		rcb->bge_maxlen_flags =
1287 		    BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1288 	if (sc->bge_flags & BGE_FLAG_EXTRAM)
1289 		rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
1290 	else
1291 		rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1292 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1293 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1294 
1295 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1296 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1297 
1298 	/*
1299 	 * Initialize the jumbo RX ring control block
1300 	 * We set the 'ring disabled' bit in the flags
1301 	 * field until we're actually ready to start
1302 	 * using this ring (i.e. once we set the MTU
1303 	 * high enough to require it).
1304 	 */
1305 	if (BGE_IS_JUMBO_CAPABLE(sc)) {
1306 		rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1307 
1308 		rcb->bge_hostaddr.bge_addr_lo =
1309 		    BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1310 		rcb->bge_hostaddr.bge_addr_hi =
1311 		    BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1312 		bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1313 		    sc->bge_cdata.bge_rx_jumbo_ring_map,
1314 		    BUS_DMASYNC_PREREAD);
1315 		rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1316 		    BGE_RCB_FLAG_USE_EXT_RX_BD|BGE_RCB_FLAG_RING_DISABLED);
1317 		if (sc->bge_flags & BGE_FLAG_EXTRAM)
1318 			rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
1319 		else
1320 			rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1321 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1322 		    rcb->bge_hostaddr.bge_addr_hi);
1323 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1324 		    rcb->bge_hostaddr.bge_addr_lo);
1325 
1326 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1327 		    rcb->bge_maxlen_flags);
1328 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1329 
1330 		/* Set up dummy disabled mini ring RCB */
1331 		rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1332 		rcb->bge_maxlen_flags =
1333 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1334 		CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1335 		    rcb->bge_maxlen_flags);
1336 	}
1337 
1338 	/*
1339 	 * Set the BD ring replentish thresholds. The recommended
1340 	 * values are 1/8th the number of descriptors allocated to
1341 	 * each ring.
1342 	 */
1343 	CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
1344 	CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1345 
1346 	/*
1347 	 * Disable all unused send rings by setting the 'ring disabled'
1348 	 * bit in the flags field of all the TX send ring control blocks.
1349 	 * These are located in NIC memory.
1350 	 */
1351 	vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1352 	for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1353 		RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1354 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1355 		RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1356 		vrcb += sizeof(struct bge_rcb);
1357 	}
1358 
1359 	/* Configure TX RCB 0 (we use only the first ring) */
1360 	vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1361 	BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1362 	RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1363 	RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1364 	RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1365 	    BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1366 	if (!(BGE_IS_5705_OR_BEYOND(sc)))
1367 		RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1368 		    BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1369 
1370 	/* Disable all unused RX return rings */
1371 	vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1372 	for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1373 		RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1374 		RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1375 		RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1376 		    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1377 		    BGE_RCB_FLAG_RING_DISABLED));
1378 		RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1379 		CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
1380 		    (i * (sizeof(uint64_t))), 0);
1381 		vrcb += sizeof(struct bge_rcb);
1382 	}
1383 
1384 	/* Initialize RX ring indexes */
1385 	CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1386 	CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1387 	CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1388 
1389 	/*
1390 	 * Set up RX return ring 0
1391 	 * Note that the NIC address for RX return rings is 0x00000000.
1392 	 * The return rings live entirely within the host, so the
1393 	 * nicaddr field in the RCB isn't used.
1394 	 */
1395 	vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1396 	BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1397 	RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1398 	RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1399 	RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000);
1400 	RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1401 	    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1402 
1403 	/* Set random backoff seed for TX */
1404 	CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1405 	    IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
1406 	    IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
1407 	    IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] +
1408 	    BGE_TX_BACKOFF_SEED_MASK);
1409 
1410 	/* Set inter-packet gap */
1411 	CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1412 
1413 	/*
1414 	 * Specify which ring to use for packets that don't match
1415 	 * any RX rules.
1416 	 */
1417 	CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1418 
1419 	/*
1420 	 * Configure number of RX lists. One interrupt distribution
1421 	 * list, sixteen active lists, one bad frames class.
1422 	 */
1423 	CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1424 
1425 	/* Inialize RX list placement stats mask. */
1426 	CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1427 	CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1428 
1429 	/* Disable host coalescing until we get it set up */
1430 	CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1431 
1432 	/* Poll to make sure it's shut down. */
1433 	for (i = 0; i < BGE_TIMEOUT; i++) {
1434 		if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1435 			break;
1436 		DELAY(10);
1437 	}
1438 
1439 	if (i == BGE_TIMEOUT) {
1440 		device_printf(sc->bge_dev,
1441 		    "host coalescing engine failed to idle\n");
1442 		return (ENXIO);
1443 	}
1444 
1445 	/* Set up host coalescing defaults */
1446 	CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1447 	CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1448 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1449 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1450 	if (!(BGE_IS_5705_OR_BEYOND(sc))) {
1451 		CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1452 		CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1453 	}
1454 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1455 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1456 
1457 	/* Set up address of statistics block */
1458 	if (!(BGE_IS_5705_OR_BEYOND(sc))) {
1459 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1460 		    BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1461 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1462 		    BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1463 		CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1464 		CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1465 		CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1466 	}
1467 
1468 	/* Set up address of status block */
1469 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1470 	    BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1471 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1472 	    BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1473 	sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0;
1474 	sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0;
1475 
1476 	/* Turn on host coalescing state machine */
1477 	CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1478 
1479 	/* Turn on RX BD completion state machine and enable attentions */
1480 	CSR_WRITE_4(sc, BGE_RBDC_MODE,
1481 	    BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1482 
1483 	/* Turn on RX list placement state machine */
1484 	CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1485 
1486 	/* Turn on RX list selector state machine. */
1487 	if (!(BGE_IS_5705_OR_BEYOND(sc)))
1488 		CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1489 
1490 	/* Turn on DMA, clear stats */
1491 	CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1492 	    BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1493 	    BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1494 	    BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1495 	    ((sc->bge_flags & BGE_FLAG_TBI) ?
1496 	    BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1497 
1498 	/* Set misc. local control, enable interrupts on attentions */
1499 	CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1500 
1501 #ifdef notdef
1502 	/* Assert GPIO pins for PHY reset */
1503 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1504 	    BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1505 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1506 	    BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1507 #endif
1508 
1509 	/* Turn on DMA completion state machine */
1510 	if (!(BGE_IS_5705_OR_BEYOND(sc)))
1511 		CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1512 
1513 	/* Turn on write DMA state machine */
1514 	CSR_WRITE_4(sc, BGE_WDMA_MODE,
1515 	    BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
1516 
1517 	/* Turn on read DMA state machine */
1518 	CSR_WRITE_4(sc, BGE_RDMA_MODE,
1519 	    BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
1520 
1521 	/* Turn on RX data completion state machine */
1522 	CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1523 
1524 	/* Turn on RX BD initiator state machine */
1525 	CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1526 
1527 	/* Turn on RX data and RX BD initiator state machine */
1528 	CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1529 
1530 	/* Turn on Mbuf cluster free state machine */
1531 	if (!(BGE_IS_5705_OR_BEYOND(sc)))
1532 		CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1533 
1534 	/* Turn on send BD completion state machine */
1535 	CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1536 
1537 	/* Turn on send data completion state machine */
1538 	CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1539 
1540 	/* Turn on send data initiator state machine */
1541 	CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1542 
1543 	/* Turn on send BD initiator state machine */
1544 	CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1545 
1546 	/* Turn on send BD selector state machine */
1547 	CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1548 
1549 	CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1550 	CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1551 	    BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1552 
1553 	/* ack/clear link change events */
1554 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1555 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1556 	    BGE_MACSTAT_LINK_CHANGED);
1557 	CSR_WRITE_4(sc, BGE_MI_STS, 0);
1558 
1559 	/* Enable PHY auto polling (for MII/GMII only) */
1560 	if (sc->bge_flags & BGE_FLAG_TBI) {
1561 		CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1562 	} else {
1563 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1564 		if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1565 		    sc->bge_chipid != BGE_CHIPID_BCM5700_B2)
1566 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1567 			    BGE_EVTENB_MI_INTERRUPT);
1568 	}
1569 
1570 	/*
1571 	 * Clear any pending link state attention.
1572 	 * Otherwise some link state change events may be lost until attention
1573 	 * is cleared by bge_intr() -> bge_link_upd() sequence.
1574 	 * It's not necessary on newer BCM chips - perhaps enabling link
1575 	 * state change attentions implies clearing pending attention.
1576 	 */
1577 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1578 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1579 	    BGE_MACSTAT_LINK_CHANGED);
1580 
1581 	/* Enable link state change attentions. */
1582 	BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1583 
1584 	return (0);
1585 }
1586 
1587 const struct bge_revision *
1588 bge_lookup_rev(uint32_t chipid)
1589 {
1590 	const struct bge_revision *br;
1591 
1592 	for (br = bge_revisions; br->br_name != NULL; br++) {
1593 		if (br->br_chipid == chipid)
1594 			return (br);
1595 	}
1596 
1597 	for (br = bge_majorrevs; br->br_name != NULL; br++) {
1598 		if (br->br_chipid == BGE_ASICREV(chipid))
1599 			return (br);
1600 	}
1601 
1602 	return (NULL);
1603 }
1604 
1605 const struct bge_vendor *
1606 bge_lookup_vendor(uint16_t vid)
1607 {
1608 	const struct bge_vendor *v;
1609 
1610 	for (v = bge_vendors; v->v_name != NULL; v++)
1611 		if (v->v_id == vid)
1612 			return (v);
1613 
1614 	panic("%s: unknown vendor %d", __func__, vid);
1615 	return (NULL);
1616 }
1617 
1618 /*
1619  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1620  * against our list and return its name if we find a match.
1621  *
1622  * Note that since the Broadcom controller contains VPD support, we
1623  * can get the device name string from the controller itself instead
1624  * of the compiled-in string. This is a little slow, but it guarantees
1625  * we'll always announce the right product name. Unfortunately, this
1626  * is possible only later in bge_attach(), when we have established
1627  * access to EEPROM.
1628  */
1629 static int
1630 bge_probe(device_t dev)
1631 {
1632 	struct bge_type *t = bge_devs;
1633 	struct bge_softc *sc = device_get_softc(dev);
1634 
1635 	bzero(sc, sizeof(struct bge_softc));
1636 	sc->bge_dev = dev;
1637 
1638 	while(t->bge_vid != 0) {
1639 		if ((pci_get_vendor(dev) == t->bge_vid) &&
1640 		    (pci_get_device(dev) == t->bge_did)) {
1641 			char buf[64];
1642 			const struct bge_revision *br;
1643 			const struct bge_vendor *v;
1644 			uint32_t id;
1645 
1646 			id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
1647 			    BGE_PCIMISCCTL_ASICREV;
1648 			br = bge_lookup_rev(id);
1649 			id >>= 16;
1650 			v = bge_lookup_vendor(t->bge_vid);
1651 			if (br == NULL)
1652 				snprintf(buf, 64, "%s unknown ASIC (%#04x)",
1653 				    v->v_name, id);
1654 			else
1655 				snprintf(buf, 64, "%s %s, ASIC rev. %#04x",
1656 				    v->v_name, br->br_name, id);
1657 			device_set_desc_copy(dev, buf);
1658 			if (pci_get_subvendor(dev) == DELL_VENDORID)
1659 				sc->bge_flags |= BGE_FLAG_NO3LED;
1660 			return (0);
1661 		}
1662 		t++;
1663 	}
1664 
1665 	return (ENXIO);
1666 }
1667 
1668 static void
1669 bge_dma_free(struct bge_softc *sc)
1670 {
1671 	int i;
1672 
1673 	/* Destroy DMA maps for RX buffers. */
1674 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1675 		if (sc->bge_cdata.bge_rx_std_dmamap[i])
1676 			bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1677 			    sc->bge_cdata.bge_rx_std_dmamap[i]);
1678 	}
1679 
1680 	/* Destroy DMA maps for jumbo RX buffers. */
1681 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1682 		if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
1683 			bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
1684 			    sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1685 	}
1686 
1687 	/* Destroy DMA maps for TX buffers. */
1688 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
1689 		if (sc->bge_cdata.bge_tx_dmamap[i])
1690 			bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1691 			    sc->bge_cdata.bge_tx_dmamap[i]);
1692 	}
1693 
1694 	if (sc->bge_cdata.bge_mtag)
1695 		bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
1696 
1697 
1698 	/* Destroy standard RX ring. */
1699 	if (sc->bge_cdata.bge_rx_std_ring_map)
1700 		bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
1701 		    sc->bge_cdata.bge_rx_std_ring_map);
1702 	if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring)
1703 		bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
1704 		    sc->bge_ldata.bge_rx_std_ring,
1705 		    sc->bge_cdata.bge_rx_std_ring_map);
1706 
1707 	if (sc->bge_cdata.bge_rx_std_ring_tag)
1708 		bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
1709 
1710 	/* Destroy jumbo RX ring. */
1711 	if (sc->bge_cdata.bge_rx_jumbo_ring_map)
1712 		bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1713 		    sc->bge_cdata.bge_rx_jumbo_ring_map);
1714 
1715 	if (sc->bge_cdata.bge_rx_jumbo_ring_map &&
1716 	    sc->bge_ldata.bge_rx_jumbo_ring)
1717 		bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1718 		    sc->bge_ldata.bge_rx_jumbo_ring,
1719 		    sc->bge_cdata.bge_rx_jumbo_ring_map);
1720 
1721 	if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
1722 		bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
1723 
1724 	/* Destroy RX return ring. */
1725 	if (sc->bge_cdata.bge_rx_return_ring_map)
1726 		bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
1727 		    sc->bge_cdata.bge_rx_return_ring_map);
1728 
1729 	if (sc->bge_cdata.bge_rx_return_ring_map &&
1730 	    sc->bge_ldata.bge_rx_return_ring)
1731 		bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
1732 		    sc->bge_ldata.bge_rx_return_ring,
1733 		    sc->bge_cdata.bge_rx_return_ring_map);
1734 
1735 	if (sc->bge_cdata.bge_rx_return_ring_tag)
1736 		bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
1737 
1738 	/* Destroy TX ring. */
1739 	if (sc->bge_cdata.bge_tx_ring_map)
1740 		bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
1741 		    sc->bge_cdata.bge_tx_ring_map);
1742 
1743 	if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring)
1744 		bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
1745 		    sc->bge_ldata.bge_tx_ring,
1746 		    sc->bge_cdata.bge_tx_ring_map);
1747 
1748 	if (sc->bge_cdata.bge_tx_ring_tag)
1749 		bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
1750 
1751 	/* Destroy status block. */
1752 	if (sc->bge_cdata.bge_status_map)
1753 		bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
1754 		    sc->bge_cdata.bge_status_map);
1755 
1756 	if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block)
1757 		bus_dmamem_free(sc->bge_cdata.bge_status_tag,
1758 		    sc->bge_ldata.bge_status_block,
1759 		    sc->bge_cdata.bge_status_map);
1760 
1761 	if (sc->bge_cdata.bge_status_tag)
1762 		bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
1763 
1764 	/* Destroy statistics block. */
1765 	if (sc->bge_cdata.bge_stats_map)
1766 		bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
1767 		    sc->bge_cdata.bge_stats_map);
1768 
1769 	if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats)
1770 		bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
1771 		    sc->bge_ldata.bge_stats,
1772 		    sc->bge_cdata.bge_stats_map);
1773 
1774 	if (sc->bge_cdata.bge_stats_tag)
1775 		bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
1776 
1777 	/* Destroy the parent tag. */
1778 	if (sc->bge_cdata.bge_parent_tag)
1779 		bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
1780 }
1781 
1782 static int
1783 bge_dma_alloc(device_t dev)
1784 {
1785 	struct bge_dmamap_arg ctx;
1786 	struct bge_softc *sc;
1787 	int i, error;
1788 
1789 	sc = device_get_softc(dev);
1790 
1791 	/*
1792 	 * Allocate the parent bus DMA tag appropriate for PCI.
1793 	 */
1794 	error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),/* parent */
1795 			PAGE_SIZE, 0,		/* alignment, boundary */
1796 			BUS_SPACE_MAXADDR,	/* lowaddr */
1797 			BUS_SPACE_MAXADDR,	/* highaddr */
1798 			NULL, NULL,		/* filter, filterarg */
1799 			MAXBSIZE, BGE_NSEG_NEW,	/* maxsize, nsegments */
1800 			BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
1801 			0,			/* flags */
1802 			NULL, NULL,		/* lockfunc, lockarg */
1803 			&sc->bge_cdata.bge_parent_tag);
1804 
1805 	if (error != 0) {
1806 		device_printf(sc->bge_dev,
1807 		    "could not allocate parent dma tag\n");
1808 		return (ENOMEM);
1809 	}
1810 
1811 	/*
1812 	 * Create tag for RX mbufs.
1813 	 */
1814 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1,
1815 	    0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1816 	    NULL, MCLBYTES * BGE_NSEG_NEW, BGE_NSEG_NEW, MCLBYTES,
1817 	    BUS_DMA_ALLOCNOW, NULL, NULL, &sc->bge_cdata.bge_mtag);
1818 
1819 	if (error) {
1820 		device_printf(sc->bge_dev, "could not allocate dma tag\n");
1821 		return (ENOMEM);
1822 	}
1823 
1824 	/* Create DMA maps for RX buffers. */
1825 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1826 		error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1827 			    &sc->bge_cdata.bge_rx_std_dmamap[i]);
1828 		if (error) {
1829 			device_printf(sc->bge_dev,
1830 			    "can't create DMA map for RX\n");
1831 			return (ENOMEM);
1832 		}
1833 	}
1834 
1835 	/* Create DMA maps for TX buffers. */
1836 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
1837 		error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1838 			    &sc->bge_cdata.bge_tx_dmamap[i]);
1839 		if (error) {
1840 			device_printf(sc->bge_dev,
1841 			    "can't create DMA map for RX\n");
1842 			return (ENOMEM);
1843 		}
1844 	}
1845 
1846 	/* Create tag for standard RX ring. */
1847 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1848 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1849 	    NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0,
1850 	    NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag);
1851 
1852 	if (error) {
1853 		device_printf(sc->bge_dev, "could not allocate dma tag\n");
1854 		return (ENOMEM);
1855 	}
1856 
1857 	/* Allocate DMA'able memory for standard RX ring. */
1858 	error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag,
1859 	    (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT,
1860 	    &sc->bge_cdata.bge_rx_std_ring_map);
1861 	if (error)
1862 		return (ENOMEM);
1863 
1864 	bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1865 
1866 	/* Load the address of the standard RX ring. */
1867 	ctx.bge_maxsegs = 1;
1868 	ctx.sc = sc;
1869 
1870 	error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag,
1871 	    sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring,
1872 	    BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1873 
1874 	if (error)
1875 		return (ENOMEM);
1876 
1877 	sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr;
1878 
1879 	/* Create tags for jumbo mbufs. */
1880 	if (BGE_IS_JUMBO_CAPABLE(sc)) {
1881 		error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1882 		    1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1883 		    NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
1884 		    0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
1885 		if (error) {
1886 			device_printf(sc->bge_dev,
1887 			    "could not allocate jumbo dma tag\n");
1888 			return (ENOMEM);
1889 		}
1890 
1891 		/* Create tag for jumbo RX ring. */
1892 		error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1893 		    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1894 		    NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0,
1895 		    NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag);
1896 
1897 		if (error) {
1898 			device_printf(sc->bge_dev,
1899 			    "could not allocate jumbo ring dma tag\n");
1900 			return (ENOMEM);
1901 		}
1902 
1903 		/* Allocate DMA'able memory for jumbo RX ring. */
1904 		error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1905 		    (void **)&sc->bge_ldata.bge_rx_jumbo_ring,
1906 		    BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1907 		    &sc->bge_cdata.bge_rx_jumbo_ring_map);
1908 		if (error)
1909 			return (ENOMEM);
1910 
1911 		/* Load the address of the jumbo RX ring. */
1912 		ctx.bge_maxsegs = 1;
1913 		ctx.sc = sc;
1914 
1915 		error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1916 		    sc->bge_cdata.bge_rx_jumbo_ring_map,
1917 		    sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ,
1918 		    bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1919 
1920 		if (error)
1921 			return (ENOMEM);
1922 
1923 		sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr;
1924 
1925 		/* Create DMA maps for jumbo RX buffers. */
1926 		for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1927 			error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
1928 				    0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1929 			if (error) {
1930 				device_printf(sc->bge_dev,
1931 				    "can't create DMA map for jumbo RX\n");
1932 				return (ENOMEM);
1933 			}
1934 		}
1935 
1936 	}
1937 
1938 	/* Create tag for RX return ring. */
1939 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1940 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1941 	    NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0,
1942 	    NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag);
1943 
1944 	if (error) {
1945 		device_printf(sc->bge_dev, "could not allocate dma tag\n");
1946 		return (ENOMEM);
1947 	}
1948 
1949 	/* Allocate DMA'able memory for RX return ring. */
1950 	error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag,
1951 	    (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT,
1952 	    &sc->bge_cdata.bge_rx_return_ring_map);
1953 	if (error)
1954 		return (ENOMEM);
1955 
1956 	bzero((char *)sc->bge_ldata.bge_rx_return_ring,
1957 	    BGE_RX_RTN_RING_SZ(sc));
1958 
1959 	/* Load the address of the RX return ring. */
1960 	ctx.bge_maxsegs = 1;
1961 	ctx.sc = sc;
1962 
1963 	error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag,
1964 	    sc->bge_cdata.bge_rx_return_ring_map,
1965 	    sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc),
1966 	    bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1967 
1968 	if (error)
1969 		return (ENOMEM);
1970 
1971 	sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr;
1972 
1973 	/* Create tag for TX ring. */
1974 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1975 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1976 	    NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL,
1977 	    &sc->bge_cdata.bge_tx_ring_tag);
1978 
1979 	if (error) {
1980 		device_printf(sc->bge_dev, "could not allocate dma tag\n");
1981 		return (ENOMEM);
1982 	}
1983 
1984 	/* Allocate DMA'able memory for TX ring. */
1985 	error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag,
1986 	    (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT,
1987 	    &sc->bge_cdata.bge_tx_ring_map);
1988 	if (error)
1989 		return (ENOMEM);
1990 
1991 	bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
1992 
1993 	/* Load the address of the TX ring. */
1994 	ctx.bge_maxsegs = 1;
1995 	ctx.sc = sc;
1996 
1997 	error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag,
1998 	    sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring,
1999 	    BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2000 
2001 	if (error)
2002 		return (ENOMEM);
2003 
2004 	sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr;
2005 
2006 	/* Create tag for status block. */
2007 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2008 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2009 	    NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0,
2010 	    NULL, NULL, &sc->bge_cdata.bge_status_tag);
2011 
2012 	if (error) {
2013 		device_printf(sc->bge_dev, "could not allocate dma tag\n");
2014 		return (ENOMEM);
2015 	}
2016 
2017 	/* Allocate DMA'able memory for status block. */
2018 	error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag,
2019 	    (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT,
2020 	    &sc->bge_cdata.bge_status_map);
2021 	if (error)
2022 		return (ENOMEM);
2023 
2024 	bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
2025 
2026 	/* Load the address of the status block. */
2027 	ctx.sc = sc;
2028 	ctx.bge_maxsegs = 1;
2029 
2030 	error = bus_dmamap_load(sc->bge_cdata.bge_status_tag,
2031 	    sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block,
2032 	    BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2033 
2034 	if (error)
2035 		return (ENOMEM);
2036 
2037 	sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr;
2038 
2039 	/* Create tag for statistics block. */
2040 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2041 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2042 	    NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL,
2043 	    &sc->bge_cdata.bge_stats_tag);
2044 
2045 	if (error) {
2046 		device_printf(sc->bge_dev, "could not allocate dma tag\n");
2047 		return (ENOMEM);
2048 	}
2049 
2050 	/* Allocate DMA'able memory for statistics block. */
2051 	error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag,
2052 	    (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT,
2053 	    &sc->bge_cdata.bge_stats_map);
2054 	if (error)
2055 		return (ENOMEM);
2056 
2057 	bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ);
2058 
2059 	/* Load the address of the statstics block. */
2060 	ctx.sc = sc;
2061 	ctx.bge_maxsegs = 1;
2062 
2063 	error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag,
2064 	    sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats,
2065 	    BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2066 
2067 	if (error)
2068 		return (ENOMEM);
2069 
2070 	sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr;
2071 
2072 	return (0);
2073 }
2074 
2075 static int
2076 bge_attach(device_t dev)
2077 {
2078 	struct ifnet *ifp;
2079 	struct bge_softc *sc;
2080 	uint32_t hwcfg = 0;
2081 	uint32_t mac_tmp = 0;
2082 	u_char eaddr[6];
2083 	int error = 0, rid;
2084 	int trys;
2085 
2086 	sc = device_get_softc(dev);
2087 	sc->bge_dev = dev;
2088 
2089 	/*
2090 	 * Map control/status registers.
2091 	 */
2092 	pci_enable_busmaster(dev);
2093 
2094 	rid = BGE_PCI_BAR0;
2095 	sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2096 	    RF_ACTIVE|PCI_RF_DENSE);
2097 
2098 	if (sc->bge_res == NULL) {
2099 		device_printf (sc->bge_dev, "couldn't map memory\n");
2100 		error = ENXIO;
2101 		goto fail;
2102 	}
2103 
2104 	sc->bge_btag = rman_get_bustag(sc->bge_res);
2105 	sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
2106 
2107 	/* Allocate interrupt. */
2108 	rid = 0;
2109 
2110 	sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2111 	    RF_SHAREABLE | RF_ACTIVE);
2112 
2113 	if (sc->bge_irq == NULL) {
2114 		device_printf(sc->bge_dev, "couldn't map interrupt\n");
2115 		error = ENXIO;
2116 		goto fail;
2117 	}
2118 
2119 	BGE_LOCK_INIT(sc, device_get_nameunit(dev));
2120 
2121 	/* Save ASIC rev. */
2122 
2123 	sc->bge_chipid =
2124 	    pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
2125 	    BGE_PCIMISCCTL_ASICREV;
2126 	sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2127 	sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2128 
2129 	/*
2130 	 * XXX: Broadcom Linux driver.  Not in specs or eratta.
2131 	 * PCI-Express?
2132 	 */
2133 	if (BGE_IS_5705_OR_BEYOND(sc)) {
2134 		uint32_t v;
2135 
2136 		v = pci_read_config(dev, BGE_PCI_MSI_CAPID, 4);
2137 		if (((v >> 8) & 0xff) == BGE_PCIE_CAPID_REG) {
2138 			v = pci_read_config(dev, BGE_PCIE_CAPID_REG, 4);
2139 			if ((v & 0xff) == BGE_PCIE_CAPID)
2140 				sc->bge_flags |= BGE_FLAG_PCIE;
2141 		}
2142 	}
2143 
2144 	/*
2145 	 * PCI-X ?
2146 	 */
2147 	if ((pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
2148 	    BGE_PCISTATE_PCI_BUSMODE) == 0)
2149 		sc->bge_flags |= BGE_FLAG_PCIX;
2150 
2151 	/* Try to reset the chip. */
2152 	if (bge_reset(sc)) {
2153 		device_printf(sc->bge_dev, "chip reset failed\n");
2154 		bge_release_resources(sc);
2155 		error = ENXIO;
2156 		goto fail;
2157 	}
2158 
2159 	sc->bge_asf_mode = 0;
2160 	if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG)
2161 	    == BGE_MAGIC_NUMBER) {
2162 		if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG)
2163 		    & BGE_HWCFG_ASF) {
2164 			sc->bge_asf_mode |= ASF_ENABLE;
2165 			sc->bge_asf_mode |= ASF_STACKUP;
2166 			if (sc->bge_asicrev == BGE_ASICREV_BCM5750) {
2167 				sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
2168 			}
2169 		}
2170 	}
2171 
2172 	/* Try to reset the chip again the nice way. */
2173 	bge_stop_fw(sc);
2174 	bge_sig_pre_reset(sc, BGE_RESET_STOP);
2175 	if (bge_reset(sc)) {
2176 		device_printf(sc->bge_dev, "chip reset failed\n");
2177 		bge_release_resources(sc);
2178 		error = ENXIO;
2179 		goto fail;
2180 	}
2181 
2182 	bge_sig_legacy(sc, BGE_RESET_STOP);
2183 	bge_sig_post_reset(sc, BGE_RESET_STOP);
2184 
2185 	if (bge_chipinit(sc)) {
2186 		device_printf(sc->bge_dev, "chip initialization failed\n");
2187 		bge_release_resources(sc);
2188 		error = ENXIO;
2189 		goto fail;
2190 	}
2191 
2192 	/*
2193 	 * Get station address from the EEPROM.
2194 	 */
2195 	mac_tmp = bge_readmem_ind(sc, 0x0c14);
2196 	if ((mac_tmp >> 16) == 0x484b) {
2197 		eaddr[0] = (u_char)(mac_tmp >> 8);
2198 		eaddr[1] = (u_char)mac_tmp;
2199 		mac_tmp = bge_readmem_ind(sc, 0x0c18);
2200 		eaddr[2] = (u_char)(mac_tmp >> 24);
2201 		eaddr[3] = (u_char)(mac_tmp >> 16);
2202 		eaddr[4] = (u_char)(mac_tmp >> 8);
2203 		eaddr[5] = (u_char)mac_tmp;
2204 	} else if (bge_read_eeprom(sc, eaddr,
2205 	    BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
2206 		device_printf(sc->bge_dev, "failed to read station address\n");
2207 		bge_release_resources(sc);
2208 		error = ENXIO;
2209 		goto fail;
2210 	}
2211 
2212 	/* 5705 limits RX return ring to 512 entries. */
2213 	if (BGE_IS_5705_OR_BEYOND(sc))
2214 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2215 	else
2216 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2217 
2218 	if (bge_dma_alloc(dev)) {
2219 		device_printf(sc->bge_dev,
2220 		    "failed to allocate DMA resources\n");
2221 		bge_release_resources(sc);
2222 		error = ENXIO;
2223 		goto fail;
2224 	}
2225 
2226 	/* Set default tuneable values. */
2227 	sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2228 	sc->bge_rx_coal_ticks = 150;
2229 	sc->bge_tx_coal_ticks = 150;
2230 	sc->bge_rx_max_coal_bds = 64;
2231 	sc->bge_tx_max_coal_bds = 128;
2232 
2233 	/* Set up ifnet structure */
2234 	ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
2235 	if (ifp == NULL) {
2236 		device_printf(sc->bge_dev, "failed to if_alloc()\n");
2237 		bge_release_resources(sc);
2238 		error = ENXIO;
2239 		goto fail;
2240 	}
2241 	ifp->if_softc = sc;
2242 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2243 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2244 	ifp->if_ioctl = bge_ioctl;
2245 	ifp->if_start = bge_start;
2246 	ifp->if_watchdog = bge_watchdog;
2247 	ifp->if_init = bge_init;
2248 	ifp->if_mtu = ETHERMTU;
2249 	ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
2250 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
2251 	IFQ_SET_READY(&ifp->if_snd);
2252 	ifp->if_hwassist = BGE_CSUM_FEATURES;
2253 	ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
2254 	    IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM;
2255 	ifp->if_capenable = ifp->if_capabilities;
2256 #ifdef DEVICE_POLLING
2257 	ifp->if_capabilities |= IFCAP_POLLING;
2258 #endif
2259 
2260 	/*
2261 	 * 5700 B0 chips do not support checksumming correctly due
2262 	 * to hardware bugs.
2263 	 */
2264 	if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
2265 		ifp->if_capabilities &= ~IFCAP_HWCSUM;
2266 		ifp->if_capenable &= IFCAP_HWCSUM;
2267 		ifp->if_hwassist = 0;
2268 	}
2269 
2270 	/*
2271 	 * Figure out what sort of media we have by checking the
2272 	 * hardware config word in the first 32k of NIC internal memory,
2273 	 * or fall back to examining the EEPROM if necessary.
2274 	 * Note: on some BCM5700 cards, this value appears to be unset.
2275 	 * If that's the case, we have to rely on identifying the NIC
2276 	 * by its PCI subsystem ID, as we do below for the SysKonnect
2277 	 * SK-9D41.
2278 	 */
2279 	if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
2280 		hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2281 	else {
2282 		if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
2283 		    sizeof(hwcfg))) {
2284 			device_printf(sc->bge_dev, "failed to read EEPROM\n");
2285 			bge_release_resources(sc);
2286 			error = ENXIO;
2287 			goto fail;
2288 		}
2289 		hwcfg = ntohl(hwcfg);
2290 	}
2291 
2292 	if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
2293 		sc->bge_flags |= BGE_FLAG_TBI;
2294 
2295 	/* The SysKonnect SK-9D41 is a 1000baseSX card. */
2296 	if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41)
2297 		sc->bge_flags |= BGE_FLAG_TBI;
2298 
2299 	if (sc->bge_flags & BGE_FLAG_TBI) {
2300 		ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
2301 		    bge_ifmedia_upd, bge_ifmedia_sts);
2302 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2303 		ifmedia_add(&sc->bge_ifmedia,
2304 		    IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
2305 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2306 		ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
2307 		sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
2308 	} else {
2309 		/*
2310 		 * Do transceiver setup and tell the firmware the
2311 		 * driver is down so we can try to get access the
2312 		 * probe if ASF is running.  Retry a couple of times
2313 		 * if we get a conflict with the ASF firmware accessing
2314 		 * the PHY.
2315 		 */
2316 		BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2317 again:
2318 		bge_asf_driver_up(sc);
2319 
2320 		trys = 0;
2321 		if (mii_phy_probe(dev, &sc->bge_miibus,
2322 		    bge_ifmedia_upd, bge_ifmedia_sts)) {
2323 			if (trys++ < 4) {
2324 				device_printf(sc->bge_dev, "Try again\n");
2325 				bge_miibus_writereg(sc->bge_dev, 1, MII_BMCR, BMCR_RESET);
2326 				goto again;
2327 			}
2328 
2329 			device_printf(sc->bge_dev, "MII without any PHY!\n");
2330 			bge_release_resources(sc);
2331 			error = ENXIO;
2332 			goto fail;
2333 		}
2334 
2335 		/*
2336 		 * Now tell the firmware we are going up after probing the PHY
2337 		 */
2338 		if (sc->bge_asf_mode & ASF_STACKUP)
2339 			BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2340 	}
2341 
2342 	/*
2343 	 * When using the BCM5701 in PCI-X mode, data corruption has
2344 	 * been observed in the first few bytes of some received packets.
2345 	 * Aligning the packet buffer in memory eliminates the corruption.
2346 	 * Unfortunately, this misaligns the packet payloads.  On platforms
2347 	 * which do not support unaligned accesses, we will realign the
2348 	 * payloads by copying the received packets.
2349 	 */
2350 	if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
2351 	    sc->bge_flags & BGE_FLAG_PCIX)
2352                 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
2353 
2354 	/*
2355 	 * Call MI attach routine.
2356 	 */
2357 	ether_ifattach(ifp, eaddr);
2358 	callout_init(&sc->bge_stat_ch, CALLOUT_MPSAFE);
2359 
2360 	/*
2361 	 * Hookup IRQ last.
2362 	 */
2363 	error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
2364 	   bge_intr, sc, &sc->bge_intrhand);
2365 
2366 	if (error) {
2367 		bge_detach(dev);
2368 		device_printf(sc->bge_dev, "couldn't set up irq\n");
2369 	}
2370 
2371 fail:
2372 	return (error);
2373 }
2374 
2375 static int
2376 bge_detach(device_t dev)
2377 {
2378 	struct bge_softc *sc;
2379 	struct ifnet *ifp;
2380 
2381 	sc = device_get_softc(dev);
2382 	ifp = sc->bge_ifp;
2383 
2384 #ifdef DEVICE_POLLING
2385 	if (ifp->if_capenable & IFCAP_POLLING)
2386 		ether_poll_deregister(ifp);
2387 #endif
2388 
2389 	BGE_LOCK(sc);
2390 	bge_stop(sc);
2391 	bge_reset(sc);
2392 	BGE_UNLOCK(sc);
2393 
2394 	ether_ifdetach(ifp);
2395 
2396 	if (sc->bge_flags & BGE_FLAG_TBI) {
2397 		ifmedia_removeall(&sc->bge_ifmedia);
2398 	} else {
2399 		bus_generic_detach(dev);
2400 		device_delete_child(dev, sc->bge_miibus);
2401 	}
2402 
2403 	bge_release_resources(sc);
2404 
2405 	return (0);
2406 }
2407 
2408 static void
2409 bge_release_resources(struct bge_softc *sc)
2410 {
2411 	device_t dev;
2412 
2413 	dev = sc->bge_dev;
2414 
2415 	if (sc->bge_vpd_prodname != NULL)
2416 		free(sc->bge_vpd_prodname, M_DEVBUF);
2417 
2418 	if (sc->bge_vpd_readonly != NULL)
2419 		free(sc->bge_vpd_readonly, M_DEVBUF);
2420 
2421 	if (sc->bge_intrhand != NULL)
2422 		bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
2423 
2424 	if (sc->bge_irq != NULL)
2425 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
2426 
2427 	if (sc->bge_res != NULL)
2428 		bus_release_resource(dev, SYS_RES_MEMORY,
2429 		    BGE_PCI_BAR0, sc->bge_res);
2430 
2431 	if (sc->bge_ifp != NULL)
2432 		if_free(sc->bge_ifp);
2433 
2434 	bge_dma_free(sc);
2435 
2436 	if (mtx_initialized(&sc->bge_mtx))	/* XXX */
2437 		BGE_LOCK_DESTROY(sc);
2438 }
2439 
2440 static int
2441 bge_reset(struct bge_softc *sc)
2442 {
2443 	device_t dev;
2444 	uint32_t cachesize, command, pcistate, reset;
2445 	int i, val = 0;
2446 
2447 	dev = sc->bge_dev;
2448 
2449 	/* Save some important PCI state. */
2450 	cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2451 	command = pci_read_config(dev, BGE_PCI_CMD, 4);
2452 	pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2453 
2454 	pci_write_config(dev, BGE_PCI_MISC_CTL,
2455 	    BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2456 	BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2457 
2458 	reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2459 
2460 	/* XXX: Broadcom Linux driver. */
2461 	if (sc->bge_flags & BGE_FLAG_PCIE) {
2462 		if (CSR_READ_4(sc, 0x7e2c) == 0x60)	/* PCIE 1.0 */
2463 			CSR_WRITE_4(sc, 0x7e2c, 0x20);
2464 		if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2465 			/* Prevent PCIE link training during global reset */
2466 			CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2467 			reset |= (1<<29);
2468 		}
2469 	}
2470 
2471 	/*
2472 	 * Write the magic number to the firmware mailbox at 0xb50
2473          * so that the driver can synchronize with the firmware.
2474 	 */
2475 	bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2476 
2477 	/* Issue global reset */
2478 	bge_writereg_ind(sc, BGE_MISC_CFG, reset);
2479 
2480 	DELAY(1000);
2481 
2482 	/* XXX: Broadcom Linux driver. */
2483 	if (sc->bge_flags & BGE_FLAG_PCIE) {
2484 		if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
2485 			uint32_t v;
2486 
2487 			DELAY(500000); /* wait for link training to complete */
2488 			v = pci_read_config(dev, 0xc4, 4);
2489 			pci_write_config(dev, 0xc4, v | (1<<15), 4);
2490 		}
2491 		/* Set PCIE max payload size and clear error status. */
2492 		pci_write_config(dev, 0xd8, 0xf5000, 4);
2493 	}
2494 
2495 	/* Reset some of the PCI state that got zapped by reset. */
2496 	pci_write_config(dev, BGE_PCI_MISC_CTL,
2497 	    BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2498 	    BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2499 	pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2500 	pci_write_config(dev, BGE_PCI_CMD, command, 4);
2501 	bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
2502 
2503 	/* Enable memory arbiter. */
2504 	if (BGE_IS_5714_FAMILY(sc)) {
2505 		uint32_t val;
2506 
2507 		val = CSR_READ_4(sc, BGE_MARB_MODE);
2508 		CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
2509 	} else
2510 		CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2511 
2512 	/*
2513 	 * Poll the value location we just wrote until
2514 	 * we see the 1's complement of the magic number.
2515 	 * This indicates that the firmware initialization
2516 	 * is complete.
2517 	 */
2518 	for (i = 0; i < BGE_TIMEOUT; i++) {
2519 		val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2520 		if (val == ~BGE_MAGIC_NUMBER)
2521 			break;
2522 		DELAY(10);
2523 	}
2524 
2525 	if (i == BGE_TIMEOUT) {
2526 		device_printf(sc->bge_dev, "firmware handshake timed out\n");
2527 		return(0);
2528 	}
2529 
2530 	/*
2531 	 * XXX Wait for the value of the PCISTATE register to
2532 	 * return to its original pre-reset state. This is a
2533 	 * fairly good indicator of reset completion. If we don't
2534 	 * wait for the reset to fully complete, trying to read
2535 	 * from the device's non-PCI registers may yield garbage
2536 	 * results.
2537 	 */
2538 	for (i = 0; i < BGE_TIMEOUT; i++) {
2539 		if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
2540 			break;
2541 		DELAY(10);
2542 	}
2543 
2544 	/* Fix up byte swapping. */
2545 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
2546 	    BGE_MODECTL_BYTESWAP_DATA);
2547 
2548 	/* Tell the ASF firmware we are up */
2549 	if (sc->bge_asf_mode & ASF_STACKUP)
2550 		BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2551 
2552 	CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2553 
2554 	/*
2555 	 * The 5704 in TBI mode apparently needs some special
2556 	 * adjustment to insure the SERDES drive level is set
2557 	 * to 1.2V.
2558 	 */
2559 	if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
2560 	    sc->bge_flags & BGE_FLAG_TBI) {
2561 		uint32_t serdescfg;
2562 
2563 		serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2564 		serdescfg = (serdescfg & ~0xFFF) | 0x880;
2565 		CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2566 	}
2567 
2568 	/* XXX: Broadcom Linux driver. */
2569 	if (sc->bge_flags & BGE_FLAG_PCIE &&
2570 	    sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2571 		uint32_t v;
2572 
2573 		v = CSR_READ_4(sc, 0x7c00);
2574 		CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
2575 	}
2576 	DELAY(10000);
2577 
2578 	return(0);
2579 }
2580 
2581 /*
2582  * Frame reception handling. This is called if there's a frame
2583  * on the receive return list.
2584  *
2585  * Note: we have to be able to handle two possibilities here:
2586  * 1) the frame is from the jumbo receive ring
2587  * 2) the frame is from the standard receive ring
2588  */
2589 
2590 static void
2591 bge_rxeof(struct bge_softc *sc)
2592 {
2593 	struct ifnet *ifp;
2594 	int stdcnt = 0, jumbocnt = 0;
2595 
2596 	BGE_LOCK_ASSERT(sc);
2597 
2598 	/* Nothing to do. */
2599 	if (sc->bge_rx_saved_considx ==
2600 	    sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx)
2601 		return;
2602 
2603 	ifp = sc->bge_ifp;
2604 
2605 	bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
2606 	    sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
2607 	bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2608 	    sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD);
2609 	if (BGE_IS_JUMBO_CAPABLE(sc))
2610 		bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2611 		    sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTREAD);
2612 
2613 	while(sc->bge_rx_saved_considx !=
2614 	    sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) {
2615 		struct bge_rx_bd	*cur_rx;
2616 		uint32_t		rxidx;
2617 		struct mbuf		*m = NULL;
2618 		uint16_t		vlan_tag = 0;
2619 		int			have_tag = 0;
2620 
2621 #ifdef DEVICE_POLLING
2622 		if (ifp->if_capenable & IFCAP_POLLING) {
2623 			if (sc->rxcycles <= 0)
2624 				break;
2625 			sc->rxcycles--;
2626 		}
2627 #endif
2628 
2629 		cur_rx =
2630 	    &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
2631 
2632 		rxidx = cur_rx->bge_idx;
2633 		BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
2634 
2635 		if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2636 			have_tag = 1;
2637 			vlan_tag = cur_rx->bge_vlan_tag;
2638 		}
2639 
2640 		if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2641 			BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2642 			bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
2643 			    sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx],
2644 			    BUS_DMASYNC_POSTREAD);
2645 			bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
2646 			    sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx]);
2647 			m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
2648 			sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
2649 			jumbocnt++;
2650 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2651 				ifp->if_ierrors++;
2652 				bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2653 				continue;
2654 			}
2655 			if (bge_newbuf_jumbo(sc,
2656 			    sc->bge_jumbo, NULL) == ENOBUFS) {
2657 				ifp->if_ierrors++;
2658 				bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2659 				continue;
2660 			}
2661 		} else {
2662 			BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2663 			bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2664 			    sc->bge_cdata.bge_rx_std_dmamap[rxidx],
2665 			    BUS_DMASYNC_POSTREAD);
2666 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2667 			    sc->bge_cdata.bge_rx_std_dmamap[rxidx]);
2668 			m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2669 			sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2670 			stdcnt++;
2671 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2672 				ifp->if_ierrors++;
2673 				bge_newbuf_std(sc, sc->bge_std, m);
2674 				continue;
2675 			}
2676 			if (bge_newbuf_std(sc, sc->bge_std,
2677 			    NULL) == ENOBUFS) {
2678 				ifp->if_ierrors++;
2679 				bge_newbuf_std(sc, sc->bge_std, m);
2680 				continue;
2681 			}
2682 		}
2683 
2684 		ifp->if_ipackets++;
2685 #ifndef __NO_STRICT_ALIGNMENT
2686 		/*
2687 		 * For architectures with strict alignment we must make sure
2688 		 * the payload is aligned.
2689 		 */
2690 		if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
2691 			bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2692 			    cur_rx->bge_len);
2693 			m->m_data += ETHER_ALIGN;
2694 		}
2695 #endif
2696 		m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2697 		m->m_pkthdr.rcvif = ifp;
2698 
2699 		if (ifp->if_capenable & IFCAP_RXCSUM) {
2700 			if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
2701 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2702 				if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2703 					m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2704 			}
2705 			if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
2706 			    m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
2707 				m->m_pkthdr.csum_data =
2708 				    cur_rx->bge_tcp_udp_csum;
2709 				m->m_pkthdr.csum_flags |=
2710 				    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2711 			}
2712 		}
2713 
2714 		/*
2715 		 * If we received a packet with a vlan tag,
2716 		 * attach that information to the packet.
2717 		 */
2718 		if (have_tag) {
2719 			m->m_pkthdr.ether_vtag = vlan_tag;
2720 			m->m_flags |= M_VLANTAG;
2721 		}
2722 
2723 		BGE_UNLOCK(sc);
2724 		(*ifp->if_input)(ifp, m);
2725 		BGE_LOCK(sc);
2726 	}
2727 
2728 	if (stdcnt > 0)
2729 		bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2730 		    sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
2731 
2732 	if (BGE_IS_JUMBO_CAPABLE(sc) && jumbocnt > 0)
2733 		bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2734 		    sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
2735 
2736 	CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2737 	if (stdcnt)
2738 		CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2739 	if (jumbocnt)
2740 		CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2741 }
2742 
2743 static void
2744 bge_txeof(struct bge_softc *sc)
2745 {
2746 	struct bge_tx_bd *cur_tx = NULL;
2747 	struct ifnet *ifp;
2748 
2749 	BGE_LOCK_ASSERT(sc);
2750 
2751 	/* Nothing to do. */
2752 	if (sc->bge_tx_saved_considx ==
2753 	    sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx)
2754 		return;
2755 
2756 	ifp = sc->bge_ifp;
2757 
2758 	bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
2759 	    sc->bge_cdata.bge_tx_ring_map,
2760 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2761 	/*
2762 	 * Go through our tx ring and free mbufs for those
2763 	 * frames that have been sent.
2764 	 */
2765 	while (sc->bge_tx_saved_considx !=
2766 	    sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) {
2767 		uint32_t		idx = 0;
2768 
2769 		idx = sc->bge_tx_saved_considx;
2770 		cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
2771 		if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2772 			ifp->if_opackets++;
2773 		if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
2774 			bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2775 			    sc->bge_cdata.bge_tx_dmamap[idx],
2776 			    BUS_DMASYNC_POSTWRITE);
2777 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2778 			    sc->bge_cdata.bge_tx_dmamap[idx]);
2779 			m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2780 			sc->bge_cdata.bge_tx_chain[idx] = NULL;
2781 		}
2782 		sc->bge_txcnt--;
2783 		BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2784 		ifp->if_timer = 0;
2785 	}
2786 
2787 	if (cur_tx != NULL)
2788 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2789 }
2790 
2791 #ifdef DEVICE_POLLING
2792 static void
2793 bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2794 {
2795 	struct bge_softc *sc = ifp->if_softc;
2796 	uint32_t statusword;
2797 
2798 	BGE_LOCK(sc);
2799 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2800 		BGE_UNLOCK(sc);
2801 		return;
2802 	}
2803 
2804 	bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2805 	    sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD);
2806 
2807 	statusword = atomic_readandclear_32(
2808 	    &sc->bge_ldata.bge_status_block->bge_status);
2809 
2810 	bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2811 	    sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD);
2812 
2813 	/* Note link event. It will be processed by POLL_AND_CHECK_STATUS cmd */
2814 	if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
2815 		sc->bge_link_evt++;
2816 
2817 	if (cmd == POLL_AND_CHECK_STATUS)
2818 		if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2819 		    sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
2820 		    sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI))
2821 			bge_link_upd(sc);
2822 
2823 	sc->rxcycles = count;
2824 	bge_rxeof(sc);
2825 	bge_txeof(sc);
2826 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2827 		bge_start_locked(ifp);
2828 
2829 	BGE_UNLOCK(sc);
2830 }
2831 #endif /* DEVICE_POLLING */
2832 
2833 static void
2834 bge_intr(void *xsc)
2835 {
2836 	struct bge_softc *sc;
2837 	struct ifnet *ifp;
2838 	uint32_t statusword;
2839 
2840 	sc = xsc;
2841 
2842 	BGE_LOCK(sc);
2843 
2844 	ifp = sc->bge_ifp;
2845 
2846 #ifdef DEVICE_POLLING
2847 	if (ifp->if_capenable & IFCAP_POLLING) {
2848 		BGE_UNLOCK(sc);
2849 		return;
2850 	}
2851 #endif
2852 
2853 	/*
2854 	 * Do the mandatory PCI flush as well as get the link status.
2855 	 */
2856 	statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED;
2857 
2858 	/* Ack interrupt and stop others from occuring. */
2859 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2860 
2861 	/* Make sure the descriptor ring indexes are coherent. */
2862 	bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2863 	    sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD);
2864 	bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2865 	    sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD);
2866 
2867 	if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2868 	    sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
2869 	    statusword || sc->bge_link_evt)
2870 		bge_link_upd(sc);
2871 
2872 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2873 		/* Check RX return ring producer/consumer. */
2874 		bge_rxeof(sc);
2875 
2876 		/* Check TX ring producer/consumer. */
2877 		bge_txeof(sc);
2878 	}
2879 
2880 	/* Re-enable interrupts. */
2881 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2882 
2883 	if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2884 	    !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2885 		bge_start_locked(ifp);
2886 
2887 	BGE_UNLOCK(sc);
2888 }
2889 
2890 static void
2891 bge_asf_driver_up(struct bge_softc *sc)
2892 {
2893 	if (sc->bge_asf_mode & ASF_STACKUP) {
2894 		/* Send ASF heartbeat aprox. every 2s */
2895 		if (sc->bge_asf_count)
2896 			sc->bge_asf_count --;
2897 		else {
2898 			sc->bge_asf_count = 5;
2899 			bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW,
2900 			    BGE_FW_DRV_ALIVE);
2901 			bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_LEN, 4);
2902 			bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_DATA, 3);
2903 			CSR_WRITE_4(sc, BGE_CPU_EVENT,
2904 			    CSR_READ_4(sc, BGE_CPU_EVENT) != (1 << 14));
2905 		}
2906 	}
2907 }
2908 
2909 static void
2910 bge_tick_locked(struct bge_softc *sc)
2911 {
2912 	struct mii_data *mii = NULL;
2913 
2914 	BGE_LOCK_ASSERT(sc);
2915 
2916 	if (BGE_IS_5705_OR_BEYOND(sc))
2917 		bge_stats_update_regs(sc);
2918 	else
2919 		bge_stats_update(sc);
2920 
2921 	if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
2922 		mii = device_get_softc(sc->bge_miibus);
2923 		/* Don't mess with the PHY in IPMI/ASF mode */
2924 		if (!((sc->bge_asf_mode & ASF_STACKUP) && (sc->bge_link)))
2925 			mii_tick(mii);
2926 	} else {
2927 		/*
2928 		 * Since in TBI mode auto-polling can't be used we should poll
2929 		 * link status manually. Here we register pending link event
2930 		 * and trigger interrupt.
2931 		 */
2932 #ifdef DEVICE_POLLING
2933 		/* In polling mode we poll link state in bge_poll(). */
2934 		if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING))
2935 #endif
2936 		{
2937 		sc->bge_link_evt++;
2938 		BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
2939 		}
2940 	}
2941 
2942 	bge_asf_driver_up(sc);
2943 
2944 	callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
2945 }
2946 
2947 static void
2948 bge_tick(void *xsc)
2949 {
2950 	struct bge_softc *sc;
2951 
2952 	sc = xsc;
2953 
2954 	BGE_LOCK(sc);
2955 	bge_tick_locked(sc);
2956 	BGE_UNLOCK(sc);
2957 }
2958 
2959 static void
2960 bge_stats_update_regs(struct bge_softc *sc)
2961 {
2962 	struct bge_mac_stats_regs stats;
2963 	struct ifnet *ifp;
2964 	uint32_t *s;
2965 	u_long cnt;	/* current register value */
2966 	int i;
2967 
2968 	ifp = sc->bge_ifp;
2969 
2970 	s = (uint32_t *)&stats;
2971 	for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
2972 		*s = CSR_READ_4(sc, BGE_RX_STATS + i);
2973 		s++;
2974 	}
2975 
2976 	cnt = stats.dot3StatsSingleCollisionFrames +
2977 	    stats.dot3StatsMultipleCollisionFrames +
2978 	    stats.dot3StatsExcessiveCollisions +
2979 	    stats.dot3StatsLateCollisions;
2980 	ifp->if_collisions += cnt >= sc->bge_tx_collisions ?
2981 	    cnt - sc->bge_tx_collisions : cnt;
2982 	sc->bge_tx_collisions = cnt;
2983 }
2984 
2985 static void
2986 bge_stats_update(struct bge_softc *sc)
2987 {
2988 	struct ifnet *ifp;
2989 	bus_size_t stats;
2990 	u_long cnt;	/* current register value */
2991 
2992 	ifp = sc->bge_ifp;
2993 
2994 	stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
2995 
2996 #define READ_STAT(sc, stats, stat) \
2997 	CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
2998 
2999 	cnt = READ_STAT(sc, stats,
3000 	    txstats.dot3StatsSingleCollisionFrames.bge_addr_lo);
3001 	cnt += READ_STAT(sc, stats,
3002 	    txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo);
3003 	cnt += READ_STAT(sc, stats,
3004 	    txstats.dot3StatsExcessiveCollisions.bge_addr_lo);
3005 	cnt += READ_STAT(sc, stats,
3006 		txstats.dot3StatsLateCollisions.bge_addr_lo);
3007 	ifp->if_collisions += cnt >= sc->bge_tx_collisions ?
3008 	    cnt - sc->bge_tx_collisions : cnt;
3009 	sc->bge_tx_collisions = cnt;
3010 
3011 	cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
3012 	ifp->if_ierrors += cnt >= sc->bge_rx_discards ?
3013 	    cnt - sc->bge_rx_discards : cnt;
3014 	sc->bge_rx_discards = cnt;
3015 
3016 	cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
3017 	ifp->if_oerrors += cnt >= sc->bge_tx_discards ?
3018 	    cnt - sc->bge_tx_discards : cnt;
3019 	sc->bge_tx_discards = cnt;
3020 
3021 #undef READ_STAT
3022 }
3023 
3024 /*
3025  * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
3026  * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
3027  * but when such padded frames employ the bge IP/TCP checksum offload,
3028  * the hardware checksum assist gives incorrect results (possibly
3029  * from incorporating its own padding into the UDP/TCP checksum; who knows).
3030  * If we pad such runts with zeros, the onboard checksum comes out correct.
3031  */
3032 static __inline int
3033 bge_cksum_pad(struct mbuf *m)
3034 {
3035 	int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
3036 	struct mbuf *last;
3037 
3038 	/* If there's only the packet-header and we can pad there, use it. */
3039 	if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
3040 	    M_TRAILINGSPACE(m) >= padlen) {
3041 		last = m;
3042 	} else {
3043 		/*
3044 		 * Walk packet chain to find last mbuf. We will either
3045 		 * pad there, or append a new mbuf and pad it.
3046 		 */
3047 		for (last = m; last->m_next != NULL; last = last->m_next);
3048 		if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
3049 			/* Allocate new empty mbuf, pad it. Compact later. */
3050 			struct mbuf *n;
3051 
3052 			MGET(n, M_DONTWAIT, MT_DATA);
3053 			if (n == NULL)
3054 				return (ENOBUFS);
3055 			n->m_len = 0;
3056 			last->m_next = n;
3057 			last = n;
3058 		}
3059 	}
3060 
3061 	/* Now zero the pad area, to avoid the bge cksum-assist bug. */
3062 	memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
3063 	last->m_len += padlen;
3064 	m->m_pkthdr.len += padlen;
3065 
3066 	return (0);
3067 }
3068 
3069 /*
3070  * Encapsulate an mbuf chain in the tx ring  by coupling the mbuf data
3071  * pointers to descriptors.
3072  */
3073 static int
3074 bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
3075 {
3076 	bus_dma_segment_t	segs[BGE_NSEG_NEW];
3077 	bus_dmamap_t		map;
3078 	struct bge_tx_bd	*d;
3079 	struct mbuf		*m = *m_head;
3080 	uint32_t		idx = *txidx;
3081 	uint16_t		csum_flags;
3082 	int			nsegs, i, error;
3083 
3084 	csum_flags = 0;
3085 	if (m->m_pkthdr.csum_flags) {
3086 		if (m->m_pkthdr.csum_flags & CSUM_IP)
3087 			csum_flags |= BGE_TXBDFLAG_IP_CSUM;
3088 		if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
3089 			csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
3090 			if (m->m_pkthdr.len < ETHER_MIN_NOPAD &&
3091 			    (error = bge_cksum_pad(m)) != 0) {
3092 				m_freem(m);
3093 				*m_head = NULL;
3094 				return (error);
3095 			}
3096 		}
3097 		if (m->m_flags & M_LASTFRAG)
3098 			csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
3099 		else if (m->m_flags & M_FRAG)
3100 			csum_flags |= BGE_TXBDFLAG_IP_FRAG;
3101 	}
3102 
3103 	map = sc->bge_cdata.bge_tx_dmamap[idx];
3104 	error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag, map, m, segs,
3105 	    &nsegs, BUS_DMA_NOWAIT);
3106 	if (error == EFBIG) {
3107 		m = m_defrag(m, M_DONTWAIT);
3108 		if (m == NULL) {
3109 			m_freem(*m_head);
3110 			*m_head = NULL;
3111 			return (ENOBUFS);
3112 		}
3113 		*m_head = m;
3114 		error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag, map, m,
3115 		    segs, &nsegs, BUS_DMA_NOWAIT);
3116 		if (error) {
3117 			m_freem(m);
3118 			*m_head = NULL;
3119 			return (error);
3120 		}
3121 	} else if (error != 0)
3122 		return (error);
3123 
3124 	/*
3125 	 * Sanity check: avoid coming within 16 descriptors
3126 	 * of the end of the ring.
3127 	 */
3128 	if (nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) {
3129 		bus_dmamap_unload(sc->bge_cdata.bge_mtag, map);
3130 		return (ENOBUFS);
3131 	}
3132 
3133 	bus_dmamap_sync(sc->bge_cdata.bge_mtag, map, BUS_DMASYNC_PREWRITE);
3134 
3135 	for (i = 0; ; i++) {
3136 		d = &sc->bge_ldata.bge_tx_ring[idx];
3137 		d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
3138 		d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
3139 		d->bge_len = segs[i].ds_len;
3140 		d->bge_flags = csum_flags;
3141 		if (i == nsegs - 1)
3142 			break;
3143 		BGE_INC(idx, BGE_TX_RING_CNT);
3144 	}
3145 
3146 	/* Mark the last segment as end of packet... */
3147 	d->bge_flags |= BGE_TXBDFLAG_END;
3148 
3149 	/* ... and put VLAN tag into first segment.  */
3150 	d = &sc->bge_ldata.bge_tx_ring[*txidx];
3151 	if (m->m_flags & M_VLANTAG) {
3152 		d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
3153 		d->bge_vlan_tag = m->m_pkthdr.ether_vtag;
3154 	} else
3155 		d->bge_vlan_tag = 0;
3156 
3157 	/*
3158 	 * Insure that the map for this transmission
3159 	 * is placed at the array index of the last descriptor
3160 	 * in this chain.
3161 	 */
3162 	sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
3163 	sc->bge_cdata.bge_tx_dmamap[idx] = map;
3164 	sc->bge_cdata.bge_tx_chain[idx] = m;
3165 	sc->bge_txcnt += nsegs;
3166 
3167 	BGE_INC(idx, BGE_TX_RING_CNT);
3168 	*txidx = idx;
3169 
3170 	return (0);
3171 }
3172 
3173 /*
3174  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3175  * to the mbuf data regions directly in the transmit descriptors.
3176  */
3177 static void
3178 bge_start_locked(struct ifnet *ifp)
3179 {
3180 	struct bge_softc *sc;
3181 	struct mbuf *m_head = NULL;
3182 	uint32_t prodidx;
3183 	int count = 0;
3184 
3185 	sc = ifp->if_softc;
3186 
3187 	if (!sc->bge_link || IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3188 		return;
3189 
3190 	prodidx = sc->bge_tx_prodidx;
3191 
3192 	while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
3193 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
3194 		if (m_head == NULL)
3195 			break;
3196 
3197 		/*
3198 		 * XXX
3199 		 * The code inside the if() block is never reached since we
3200 		 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
3201 		 * requests to checksum TCP/UDP in a fragmented packet.
3202 		 *
3203 		 * XXX
3204 		 * safety overkill.  If this is a fragmented packet chain
3205 		 * with delayed TCP/UDP checksums, then only encapsulate
3206 		 * it if we have enough descriptors to handle the entire
3207 		 * chain at once.
3208 		 * (paranoia -- may not actually be needed)
3209 		 */
3210 		if (m_head->m_flags & M_FIRSTFRAG &&
3211 		    m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
3212 			if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
3213 			    m_head->m_pkthdr.csum_data + 16) {
3214 				IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3215 				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3216 				break;
3217 			}
3218 		}
3219 
3220 		/*
3221 		 * Pack the data into the transmit ring. If we
3222 		 * don't have room, set the OACTIVE flag and wait
3223 		 * for the NIC to drain the ring.
3224 		 */
3225 		if (bge_encap(sc, &m_head, &prodidx)) {
3226 			if (m_head == NULL)
3227 				break;
3228 			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3229 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3230 			break;
3231 		}
3232 		++count;
3233 
3234 		/*
3235 		 * If there's a BPF listener, bounce a copy of this frame
3236 		 * to him.
3237 		 */
3238 		BPF_MTAP(ifp, m_head);
3239 	}
3240 
3241 	if (count == 0)
3242 		/* No packets were dequeued. */
3243 		return;
3244 
3245 	/* Transmit. */
3246 	CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3247 	/* 5700 b2 errata */
3248 	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
3249 		CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3250 
3251 	sc->bge_tx_prodidx = prodidx;
3252 
3253 	/*
3254 	 * Set a timeout in case the chip goes out to lunch.
3255 	 */
3256 	ifp->if_timer = 5;
3257 }
3258 
3259 /*
3260  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3261  * to the mbuf data regions directly in the transmit descriptors.
3262  */
3263 static void
3264 bge_start(struct ifnet *ifp)
3265 {
3266 	struct bge_softc *sc;
3267 
3268 	sc = ifp->if_softc;
3269 	BGE_LOCK(sc);
3270 	bge_start_locked(ifp);
3271 	BGE_UNLOCK(sc);
3272 }
3273 
3274 static void
3275 bge_init_locked(struct bge_softc *sc)
3276 {
3277 	struct ifnet *ifp;
3278 	uint16_t *m;
3279 
3280 	BGE_LOCK_ASSERT(sc);
3281 
3282 	ifp = sc->bge_ifp;
3283 
3284 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3285 		return;
3286 
3287 	/* Cancel pending I/O and flush buffers. */
3288 	bge_stop(sc);
3289 
3290 	bge_stop_fw(sc);
3291 	bge_sig_pre_reset(sc, BGE_RESET_START);
3292 	bge_reset(sc);
3293 	bge_sig_legacy(sc, BGE_RESET_START);
3294 	bge_sig_post_reset(sc, BGE_RESET_START);
3295 
3296 	bge_chipinit(sc);
3297 
3298 	/*
3299 	 * Init the various state machines, ring
3300 	 * control blocks and firmware.
3301 	 */
3302 	if (bge_blockinit(sc)) {
3303 		device_printf(sc->bge_dev, "initialization failure\n");
3304 		return;
3305 	}
3306 
3307 	ifp = sc->bge_ifp;
3308 
3309 	/* Specify MTU. */
3310 	CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
3311 	    ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN);
3312 
3313 	/* Load our MAC address. */
3314 	m = (uint16_t *)IF_LLADDR(sc->bge_ifp);
3315 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3316 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3317 
3318 	/* Enable or disable promiscuous mode as needed. */
3319 	if (ifp->if_flags & IFF_PROMISC) {
3320 		BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3321 	} else {
3322 		BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3323 	}
3324 
3325 	/* Program multicast filter. */
3326 	bge_setmulti(sc);
3327 
3328 	/* Init RX ring. */
3329 	bge_init_rx_ring_std(sc);
3330 
3331 	/*
3332 	 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
3333 	 * memory to insure that the chip has in fact read the first
3334 	 * entry of the ring.
3335 	 */
3336 	if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
3337 		uint32_t		v, i;
3338 		for (i = 0; i < 10; i++) {
3339 			DELAY(20);
3340 			v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
3341 			if (v == (MCLBYTES - ETHER_ALIGN))
3342 				break;
3343 		}
3344 		if (i == 10)
3345 			device_printf (sc->bge_dev,
3346 			    "5705 A0 chip failed to load RX ring\n");
3347 	}
3348 
3349 	/* Init jumbo RX ring. */
3350 	if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
3351 		bge_init_rx_ring_jumbo(sc);
3352 
3353 	/* Init our RX return ring index. */
3354 	sc->bge_rx_saved_considx = 0;
3355 
3356 	/* Init TX ring. */
3357 	bge_init_tx_ring(sc);
3358 
3359 	/* Turn on transmitter. */
3360 	BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
3361 
3362 	/* Turn on receiver. */
3363 	BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3364 
3365 	/* Tell firmware we're alive. */
3366 	BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3367 
3368 #ifdef DEVICE_POLLING
3369 	/* Disable interrupts if we are polling. */
3370 	if (ifp->if_capenable & IFCAP_POLLING) {
3371 		BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
3372 		    BGE_PCIMISCCTL_MASK_PCI_INTR);
3373 		CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3374 		CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
3375 		CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
3376 	} else
3377 #endif
3378 
3379 	/* Enable host interrupts. */
3380 	{
3381 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
3382 	BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3383 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3384 	}
3385 
3386 	bge_ifmedia_upd_locked(ifp);
3387 
3388 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3389 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3390 
3391 	callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
3392 }
3393 
3394 static void
3395 bge_init(void *xsc)
3396 {
3397 	struct bge_softc *sc = xsc;
3398 
3399 	BGE_LOCK(sc);
3400 	bge_init_locked(sc);
3401 	BGE_UNLOCK(sc);
3402 }
3403 
3404 /*
3405  * Set media options.
3406  */
3407 static int
3408 bge_ifmedia_upd(struct ifnet *ifp)
3409 {
3410 	struct bge_softc *sc = ifp->if_softc;
3411 	int res;
3412 
3413 	BGE_LOCK(sc);
3414 	res = bge_ifmedia_upd_locked(ifp);
3415 	BGE_UNLOCK(sc);
3416 
3417 	return (res);
3418 }
3419 
3420 static int
3421 bge_ifmedia_upd_locked(struct ifnet *ifp)
3422 {
3423 	struct bge_softc *sc = ifp->if_softc;
3424 	struct mii_data *mii;
3425 	struct ifmedia *ifm;
3426 
3427 	BGE_LOCK_ASSERT(sc);
3428 
3429 	ifm = &sc->bge_ifmedia;
3430 
3431 	/* If this is a 1000baseX NIC, enable the TBI port. */
3432 	if (sc->bge_flags & BGE_FLAG_TBI) {
3433 		if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3434 			return (EINVAL);
3435 		switch(IFM_SUBTYPE(ifm->ifm_media)) {
3436 		case IFM_AUTO:
3437 			/*
3438 			 * The BCM5704 ASIC appears to have a special
3439 			 * mechanism for programming the autoneg
3440 			 * advertisement registers in TBI mode.
3441 			 */
3442 			if (bge_fake_autoneg == 0 &&
3443 			    sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3444 				uint32_t sgdig;
3445 				CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
3446 				sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
3447 				sgdig |= BGE_SGDIGCFG_AUTO|
3448 				    BGE_SGDIGCFG_PAUSE_CAP|
3449 				    BGE_SGDIGCFG_ASYM_PAUSE;
3450 				CSR_WRITE_4(sc, BGE_SGDIG_CFG,
3451 				    sgdig|BGE_SGDIGCFG_SEND);
3452 				DELAY(5);
3453 				CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
3454 			}
3455 			break;
3456 		case IFM_1000_SX:
3457 			if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3458 				BGE_CLRBIT(sc, BGE_MAC_MODE,
3459 				    BGE_MACMODE_HALF_DUPLEX);
3460 			} else {
3461 				BGE_SETBIT(sc, BGE_MAC_MODE,
3462 				    BGE_MACMODE_HALF_DUPLEX);
3463 			}
3464 			break;
3465 		default:
3466 			return (EINVAL);
3467 		}
3468 		return (0);
3469 	}
3470 
3471 	sc->bge_link_evt++;
3472 	mii = device_get_softc(sc->bge_miibus);
3473 	if (mii->mii_instance) {
3474 		struct mii_softc *miisc;
3475 		for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
3476 		    miisc = LIST_NEXT(miisc, mii_list))
3477 			mii_phy_reset(miisc);
3478 	}
3479 	mii_mediachg(mii);
3480 
3481 	return (0);
3482 }
3483 
3484 /*
3485  * Report current media status.
3486  */
3487 static void
3488 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3489 {
3490 	struct bge_softc *sc = ifp->if_softc;
3491 	struct mii_data *mii;
3492 
3493 	BGE_LOCK(sc);
3494 
3495 	if (sc->bge_flags & BGE_FLAG_TBI) {
3496 		ifmr->ifm_status = IFM_AVALID;
3497 		ifmr->ifm_active = IFM_ETHER;
3498 		if (CSR_READ_4(sc, BGE_MAC_STS) &
3499 		    BGE_MACSTAT_TBI_PCS_SYNCHED)
3500 			ifmr->ifm_status |= IFM_ACTIVE;
3501 		else {
3502 			ifmr->ifm_active |= IFM_NONE;
3503 			BGE_UNLOCK(sc);
3504 			return;
3505 		}
3506 		ifmr->ifm_active |= IFM_1000_SX;
3507 		if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3508 			ifmr->ifm_active |= IFM_HDX;
3509 		else
3510 			ifmr->ifm_active |= IFM_FDX;
3511 		BGE_UNLOCK(sc);
3512 		return;
3513 	}
3514 
3515 	mii = device_get_softc(sc->bge_miibus);
3516 	mii_pollstat(mii);
3517 	ifmr->ifm_active = mii->mii_media_active;
3518 	ifmr->ifm_status = mii->mii_media_status;
3519 
3520 	BGE_UNLOCK(sc);
3521 }
3522 
3523 static int
3524 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
3525 {
3526 	struct bge_softc *sc = ifp->if_softc;
3527 	struct ifreq *ifr = (struct ifreq *) data;
3528 	struct mii_data *mii;
3529 	int mask, error = 0;
3530 
3531 	switch (command) {
3532 	case SIOCSIFMTU:
3533 		if (ifr->ifr_mtu < ETHERMIN ||
3534 		    ((BGE_IS_JUMBO_CAPABLE(sc)) &&
3535 		    ifr->ifr_mtu > BGE_JUMBO_MTU) ||
3536 		    ((!BGE_IS_JUMBO_CAPABLE(sc)) &&
3537 		    ifr->ifr_mtu > ETHERMTU))
3538 			error = EINVAL;
3539 		else if (ifp->if_mtu != ifr->ifr_mtu) {
3540 			ifp->if_mtu = ifr->ifr_mtu;
3541 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3542 			bge_init(sc);
3543 		}
3544 		break;
3545 	case SIOCSIFFLAGS:
3546 		BGE_LOCK(sc);
3547 		if (ifp->if_flags & IFF_UP) {
3548 			/*
3549 			 * If only the state of the PROMISC flag changed,
3550 			 * then just use the 'set promisc mode' command
3551 			 * instead of reinitializing the entire NIC. Doing
3552 			 * a full re-init means reloading the firmware and
3553 			 * waiting for it to start up, which may take a
3554 			 * second or two.  Similarly for ALLMULTI.
3555 			 */
3556 			if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3557 			    ifp->if_flags & IFF_PROMISC &&
3558 			    !(sc->bge_if_flags & IFF_PROMISC)) {
3559 				BGE_SETBIT(sc, BGE_RX_MODE,
3560 				    BGE_RXMODE_RX_PROMISC);
3561 			} else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3562 			    !(ifp->if_flags & IFF_PROMISC) &&
3563 			    sc->bge_if_flags & IFF_PROMISC) {
3564 				BGE_CLRBIT(sc, BGE_RX_MODE,
3565 				    BGE_RXMODE_RX_PROMISC);
3566 			} else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3567 			    (ifp->if_flags ^ sc->bge_if_flags) & IFF_ALLMULTI) {
3568 				bge_setmulti(sc);
3569 			} else
3570 				bge_init_locked(sc);
3571 		} else {
3572 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3573 				bge_stop(sc);
3574 			}
3575 		}
3576 		sc->bge_if_flags = ifp->if_flags;
3577 		BGE_UNLOCK(sc);
3578 		error = 0;
3579 		break;
3580 	case SIOCADDMULTI:
3581 	case SIOCDELMULTI:
3582 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3583 			BGE_LOCK(sc);
3584 			bge_setmulti(sc);
3585 			BGE_UNLOCK(sc);
3586 			error = 0;
3587 		}
3588 		break;
3589 	case SIOCSIFMEDIA:
3590 	case SIOCGIFMEDIA:
3591 		if (sc->bge_flags & BGE_FLAG_TBI) {
3592 			error = ifmedia_ioctl(ifp, ifr,
3593 			    &sc->bge_ifmedia, command);
3594 		} else {
3595 			mii = device_get_softc(sc->bge_miibus);
3596 			error = ifmedia_ioctl(ifp, ifr,
3597 			    &mii->mii_media, command);
3598 		}
3599 		break;
3600 	case SIOCSIFCAP:
3601 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3602 #ifdef DEVICE_POLLING
3603 		if (mask & IFCAP_POLLING) {
3604 			if (ifr->ifr_reqcap & IFCAP_POLLING) {
3605 				error = ether_poll_register(bge_poll, ifp);
3606 				if (error)
3607 					return (error);
3608 				BGE_LOCK(sc);
3609 				BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
3610 				    BGE_PCIMISCCTL_MASK_PCI_INTR);
3611 				CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3612 				CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
3613 				CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
3614 				ifp->if_capenable |= IFCAP_POLLING;
3615 				BGE_UNLOCK(sc);
3616 			} else {
3617 				error = ether_poll_deregister(ifp);
3618 				/* Enable interrupt even in error case */
3619 				BGE_LOCK(sc);
3620 				CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
3621 				CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
3622 				BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
3623 				    BGE_PCIMISCCTL_MASK_PCI_INTR);
3624 				CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3625 				ifp->if_capenable &= ~IFCAP_POLLING;
3626 				BGE_UNLOCK(sc);
3627 			}
3628 		}
3629 #endif
3630 		if (mask & IFCAP_HWCSUM) {
3631 			ifp->if_capenable ^= IFCAP_HWCSUM;
3632 			if (IFCAP_HWCSUM & ifp->if_capenable &&
3633 			    IFCAP_HWCSUM & ifp->if_capabilities)
3634 				ifp->if_hwassist = BGE_CSUM_FEATURES;
3635 			else
3636 				ifp->if_hwassist = 0;
3637 			VLAN_CAPABILITIES(ifp);
3638 		}
3639 		break;
3640 	default:
3641 		error = ether_ioctl(ifp, command, data);
3642 		break;
3643 	}
3644 
3645 	return (error);
3646 }
3647 
3648 static void
3649 bge_watchdog(struct ifnet *ifp)
3650 {
3651 	struct bge_softc *sc;
3652 
3653 	sc = ifp->if_softc;
3654 
3655 	if_printf(ifp, "watchdog timeout -- resetting\n");
3656 
3657 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3658 	bge_init(sc);
3659 
3660 	ifp->if_oerrors++;
3661 }
3662 
3663 /*
3664  * Stop the adapter and free any mbufs allocated to the
3665  * RX and TX lists.
3666  */
3667 static void
3668 bge_stop(struct bge_softc *sc)
3669 {
3670 	struct ifnet *ifp;
3671 	struct ifmedia_entry *ifm;
3672 	struct mii_data *mii = NULL;
3673 	int mtmp, itmp;
3674 
3675 	BGE_LOCK_ASSERT(sc);
3676 
3677 	ifp = sc->bge_ifp;
3678 
3679 	if ((sc->bge_flags & BGE_FLAG_TBI) == 0)
3680 		mii = device_get_softc(sc->bge_miibus);
3681 
3682 	callout_stop(&sc->bge_stat_ch);
3683 
3684 	/*
3685 	 * Disable all of the receiver blocks.
3686 	 */
3687 	BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3688 	BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3689 	BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3690 	if (!(BGE_IS_5705_OR_BEYOND(sc)))
3691 		BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
3692 	BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3693 	BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3694 	BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3695 
3696 	/*
3697 	 * Disable all of the transmit blocks.
3698 	 */
3699 	BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3700 	BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3701 	BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3702 	BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3703 	BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3704 	if (!(BGE_IS_5705_OR_BEYOND(sc)))
3705 		BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
3706 	BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3707 
3708 	/*
3709 	 * Shut down all of the memory managers and related
3710 	 * state machines.
3711 	 */
3712 	BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3713 	BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3714 	if (!(BGE_IS_5705_OR_BEYOND(sc)))
3715 		BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
3716 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3717 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3718 	if (!(BGE_IS_5705_OR_BEYOND(sc))) {
3719 		BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
3720 		BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3721 	}
3722 
3723 	/* Disable host interrupts. */
3724 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3725 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3726 
3727 	/*
3728 	 * Tell firmware we're shutting down.
3729 	 */
3730 
3731 	bge_stop_fw(sc);
3732 	bge_sig_pre_reset(sc, BGE_RESET_STOP);
3733 	bge_reset(sc);
3734 	bge_sig_legacy(sc, BGE_RESET_STOP);
3735 	bge_sig_post_reset(sc, BGE_RESET_STOP);
3736 
3737 	/*
3738 	 * Keep the ASF firmware running if up.
3739 	 */
3740 	if (sc->bge_asf_mode & ASF_STACKUP)
3741 		BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3742 	else
3743 		BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3744 
3745 	/* Free the RX lists. */
3746 	bge_free_rx_ring_std(sc);
3747 
3748 	/* Free jumbo RX list. */
3749 	if (BGE_IS_JUMBO_CAPABLE(sc))
3750 		bge_free_rx_ring_jumbo(sc);
3751 
3752 	/* Free TX buffers. */
3753 	bge_free_tx_ring(sc);
3754 
3755 	/*
3756 	 * Isolate/power down the PHY, but leave the media selection
3757 	 * unchanged so that things will be put back to normal when
3758 	 * we bring the interface back up.
3759 	 */
3760 	if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
3761 		itmp = ifp->if_flags;
3762 		ifp->if_flags |= IFF_UP;
3763 		/*
3764 		 * If we are called from bge_detach(), mii is already NULL.
3765 		 */
3766 		if (mii != NULL) {
3767 			ifm = mii->mii_media.ifm_cur;
3768 			mtmp = ifm->ifm_media;
3769 			ifm->ifm_media = IFM_ETHER|IFM_NONE;
3770 			mii_mediachg(mii);
3771 			ifm->ifm_media = mtmp;
3772 		}
3773 		ifp->if_flags = itmp;
3774 	}
3775 
3776 	sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
3777 
3778 	/*
3779 	 * We can't just call bge_link_upd() cause chip is almost stopped so
3780 	 * bge_link_upd -> bge_tick_locked -> bge_stats_update sequence may
3781 	 * lead to hardware deadlock. So we just clearing MAC's link state
3782 	 * (PHY may still have link UP).
3783 	 */
3784 	if (bootverbose && sc->bge_link)
3785 		if_printf(sc->bge_ifp, "link DOWN\n");
3786 	sc->bge_link = 0;
3787 
3788 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3789 }
3790 
3791 /*
3792  * Stop all chip I/O so that the kernel's probe routines don't
3793  * get confused by errant DMAs when rebooting.
3794  */
3795 static void
3796 bge_shutdown(device_t dev)
3797 {
3798 	struct bge_softc *sc;
3799 
3800 	sc = device_get_softc(dev);
3801 
3802 	BGE_LOCK(sc);
3803 	bge_stop(sc);
3804 	bge_reset(sc);
3805 	BGE_UNLOCK(sc);
3806 }
3807 
3808 static int
3809 bge_suspend(device_t dev)
3810 {
3811 	struct bge_softc *sc;
3812 
3813 	sc = device_get_softc(dev);
3814 	BGE_LOCK(sc);
3815 	bge_stop(sc);
3816 	BGE_UNLOCK(sc);
3817 
3818 	return (0);
3819 }
3820 
3821 static int
3822 bge_resume(device_t dev)
3823 {
3824 	struct bge_softc *sc;
3825 	struct ifnet *ifp;
3826 
3827 	sc = device_get_softc(dev);
3828 	BGE_LOCK(sc);
3829 	ifp = sc->bge_ifp;
3830 	if (ifp->if_flags & IFF_UP) {
3831 		bge_init_locked(sc);
3832 		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3833 			bge_start_locked(ifp);
3834 	}
3835 	BGE_UNLOCK(sc);
3836 
3837 	return (0);
3838 }
3839 
3840 static void
3841 bge_link_upd(struct bge_softc *sc)
3842 {
3843 	struct mii_data *mii;
3844 	uint32_t link, status;
3845 
3846 	BGE_LOCK_ASSERT(sc);
3847 
3848 	/* Clear 'pending link event' flag. */
3849 	sc->bge_link_evt = 0;
3850 
3851 	/*
3852 	 * Process link state changes.
3853 	 * Grrr. The link status word in the status block does
3854 	 * not work correctly on the BCM5700 rev AX and BX chips,
3855 	 * according to all available information. Hence, we have
3856 	 * to enable MII interrupts in order to properly obtain
3857 	 * async link changes. Unfortunately, this also means that
3858 	 * we have to read the MAC status register to detect link
3859 	 * changes, thereby adding an additional register access to
3860 	 * the interrupt handler.
3861 	 *
3862 	 * XXX: perhaps link state detection procedure used for
3863 	 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
3864 	 */
3865 
3866 	if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3867 	    sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
3868 		status = CSR_READ_4(sc, BGE_MAC_STS);
3869 		if (status & BGE_MACSTAT_MI_INTERRUPT) {
3870 			callout_stop(&sc->bge_stat_ch);
3871 			bge_tick_locked(sc);
3872 
3873 			mii = device_get_softc(sc->bge_miibus);
3874 			if (!sc->bge_link &&
3875 			    mii->mii_media_status & IFM_ACTIVE &&
3876 			    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3877 				sc->bge_link++;
3878 				if (bootverbose)
3879 					if_printf(sc->bge_ifp, "link UP\n");
3880 			} else if (sc->bge_link &&
3881 			    (!(mii->mii_media_status & IFM_ACTIVE) ||
3882 			    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3883 				sc->bge_link = 0;
3884 				if (bootverbose)
3885 					if_printf(sc->bge_ifp, "link DOWN\n");
3886 			}
3887 
3888 			/* Clear the interrupt. */
3889 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
3890 			    BGE_EVTENB_MI_INTERRUPT);
3891 			bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
3892 			bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
3893 			    BRGPHY_INTRS);
3894 		}
3895 		return;
3896 	}
3897 
3898 	if (sc->bge_flags & BGE_FLAG_TBI) {
3899 		status = CSR_READ_4(sc, BGE_MAC_STS);
3900 		if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
3901 			if (!sc->bge_link) {
3902 				sc->bge_link++;
3903 				if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
3904 					BGE_CLRBIT(sc, BGE_MAC_MODE,
3905 					    BGE_MACMODE_TBI_SEND_CFGS);
3906 				CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
3907 				if (bootverbose)
3908 					if_printf(sc->bge_ifp, "link UP\n");
3909 				if_link_state_change(sc->bge_ifp,
3910 				    LINK_STATE_UP);
3911 			}
3912 		} else if (sc->bge_link) {
3913 			sc->bge_link = 0;
3914 			if (bootverbose)
3915 				if_printf(sc->bge_ifp, "link DOWN\n");
3916 			if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN);
3917 		}
3918 	/* Discard link events for MII/GMII cards if MI auto-polling disabled */
3919 	} else if (CSR_READ_4(sc, BGE_MI_MODE) & BGE_MIMODE_AUTOPOLL) {
3920 		/*
3921 		 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
3922 		 * in status word always set. Workaround this bug by reading
3923 		 * PHY link status directly.
3924 		 */
3925 		link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
3926 
3927 		if (link != sc->bge_link ||
3928 		    sc->bge_asicrev == BGE_ASICREV_BCM5700) {
3929 			callout_stop(&sc->bge_stat_ch);
3930 			bge_tick_locked(sc);
3931 
3932 			mii = device_get_softc(sc->bge_miibus);
3933 			if (!sc->bge_link &&
3934 			    mii->mii_media_status & IFM_ACTIVE &&
3935 			    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3936 				sc->bge_link++;
3937 				if (bootverbose)
3938 					if_printf(sc->bge_ifp, "link UP\n");
3939 			} else if (sc->bge_link &&
3940 			    (!(mii->mii_media_status & IFM_ACTIVE) ||
3941 			    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3942 				sc->bge_link = 0;
3943 				if (bootverbose)
3944 					if_printf(sc->bge_ifp, "link DOWN\n");
3945 			}
3946 		}
3947 	}
3948 
3949 	/* Clear the attention. */
3950 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
3951 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
3952 	    BGE_MACSTAT_LINK_CHANGED);
3953 }
3954