xref: /freebsd/sys/dev/bge/if_bge.c (revision 588ff6c0cc9aaf10ba19080d9f8acbd8be36abf3)
1 /*-
2  * Copyright (c) 2001 Wind River Systems
3  * Copyright (c) 1997, 1998, 1999, 2001
4  *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by Bill Paul.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 /*
38  * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
39  *
40  * The Broadcom BCM5700 is based on technology originally developed by
41  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
42  * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
43  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45  * frames, highly configurable RX filtering, and 16 RX and TX queues
46  * (which, along with RX filter rules, can be used for QOS applications).
47  * Other features, such as TCP segmentation, may be available as part
48  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49  * firmware images can be stored in hardware and need not be compiled
50  * into the driver.
51  *
52  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53  * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54  *
55  * The BCM5701 is a single-chip solution incorporating both the BCM5700
56  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57  * does not support external SSRAM.
58  *
59  * Broadcom also produces a variation of the BCM5700 under the "Altima"
60  * brand name, which is functionally similar but lacks PCI-X support.
61  *
62  * Without external SSRAM, you can only have at most 4 TX rings,
63  * and the use of the mini RX ring is disabled. This seems to imply
64  * that these features are simply not available on the BCM5701. As a
65  * result, this driver does not implement any support for the mini RX
66  * ring.
67  */
68 
69 #ifdef HAVE_KERNEL_OPTION_HEADERS
70 #include "opt_device_polling.h"
71 #endif
72 
73 #include <sys/param.h>
74 #include <sys/endian.h>
75 #include <sys/systm.h>
76 #include <sys/sockio.h>
77 #include <sys/mbuf.h>
78 #include <sys/malloc.h>
79 #include <sys/kernel.h>
80 #include <sys/module.h>
81 #include <sys/socket.h>
82 #include <sys/sysctl.h>
83 
84 #include <net/if.h>
85 #include <net/if_arp.h>
86 #include <net/ethernet.h>
87 #include <net/if_dl.h>
88 #include <net/if_media.h>
89 
90 #include <net/bpf.h>
91 
92 #include <net/if_types.h>
93 #include <net/if_vlan_var.h>
94 
95 #include <netinet/in_systm.h>
96 #include <netinet/in.h>
97 #include <netinet/ip.h>
98 
99 #include <machine/bus.h>
100 #include <machine/resource.h>
101 #include <sys/bus.h>
102 #include <sys/rman.h>
103 
104 #include <dev/mii/mii.h>
105 #include <dev/mii/miivar.h>
106 #include "miidevs.h"
107 #include <dev/mii/brgphyreg.h>
108 
109 #include <dev/pci/pcireg.h>
110 #include <dev/pci/pcivar.h>
111 
112 #include <dev/bge/if_bgereg.h>
113 
114 #define	BGE_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
115 #define	ETHER_MIN_NOPAD		(ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
116 
117 MODULE_DEPEND(bge, pci, 1, 1, 1);
118 MODULE_DEPEND(bge, ether, 1, 1, 1);
119 MODULE_DEPEND(bge, miibus, 1, 1, 1);
120 
121 /* "device miibus" required.  See GENERIC if you get errors here. */
122 #include "miibus_if.h"
123 
124 /*
125  * Various supported device vendors/types and their names. Note: the
126  * spec seems to indicate that the hardware still has Alteon's vendor
127  * ID burned into it, though it will always be overriden by the vendor
128  * ID in the EEPROM. Just to be safe, we cover all possibilities.
129  */
130 static struct bge_type {
131 	uint16_t	bge_vid;
132 	uint16_t	bge_did;
133 } bge_devs[] = {
134 	{ ALTEON_VENDORID,	ALTEON_DEVICEID_BCM5700 },
135 	{ ALTEON_VENDORID,	ALTEON_DEVICEID_BCM5701 },
136 
137 	{ ALTIMA_VENDORID,	ALTIMA_DEVICE_AC1000 },
138 	{ ALTIMA_VENDORID,	ALTIMA_DEVICE_AC1002 },
139 	{ ALTIMA_VENDORID,	ALTIMA_DEVICE_AC9100 },
140 
141 	{ APPLE_VENDORID,	APPLE_DEVICE_BCM5701 },
142 
143 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5700 },
144 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5701 },
145 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5702 },
146 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5702_ALT },
147 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5702X },
148 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5703 },
149 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5703_ALT },
150 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5703X },
151 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5704C },
152 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5704S },
153 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5704S_ALT },
154 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5705 },
155 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5705F },
156 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5705K },
157 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5705M },
158 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5705M_ALT },
159 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5714C },
160 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5714S },
161 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5715 },
162 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5715S },
163 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5720 },
164 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5721 },
165 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5750 },
166 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5750M },
167 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5751 },
168 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5751F },
169 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5751M },
170 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5752 },
171 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5752M },
172 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5753 },
173 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5753F },
174 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5753M },
175 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5754 },
176 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5754M },
177 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5755 },
178 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5755M },
179 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5780 },
180 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5780S },
181 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5781 },
182 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5782 },
183 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5786 },
184 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5787 },
185 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5787M },
186 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5788 },
187 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5789 },
188 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5901 },
189 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5901A2 },
190 	{ BCOM_VENDORID,	BCOM_DEVICEID_BCM5903M },
191 
192 	{ SK_VENDORID,		SK_DEVICEID_ALTIMA },
193 
194 	{ TC_VENDORID,		TC_DEVICEID_3C996 },
195 
196 	{ 0, 0 }
197 };
198 
199 static const struct bge_vendor {
200 	uint16_t	v_id;
201 	const char	*v_name;
202 } bge_vendors[] = {
203 	{ ALTEON_VENDORID,	"Alteon" },
204 	{ ALTIMA_VENDORID,	"Altima" },
205 	{ APPLE_VENDORID,	"Apple" },
206 	{ BCOM_VENDORID,	"Broadcom" },
207 	{ SK_VENDORID,		"SysKonnect" },
208 	{ TC_VENDORID,		"3Com" },
209 
210 	{ 0, NULL }
211 };
212 
213 static const struct bge_revision {
214 	uint32_t	br_chipid;
215 	const char	*br_name;
216 } bge_revisions[] = {
217 	{ BGE_CHIPID_BCM5700_A0,	"BCM5700 A0" },
218 	{ BGE_CHIPID_BCM5700_A1,	"BCM5700 A1" },
219 	{ BGE_CHIPID_BCM5700_B0,	"BCM5700 B0" },
220 	{ BGE_CHIPID_BCM5700_B1,	"BCM5700 B1" },
221 	{ BGE_CHIPID_BCM5700_B2,	"BCM5700 B2" },
222 	{ BGE_CHIPID_BCM5700_B3,	"BCM5700 B3" },
223 	{ BGE_CHIPID_BCM5700_ALTIMA,	"BCM5700 Altima" },
224 	{ BGE_CHIPID_BCM5700_C0,	"BCM5700 C0" },
225 	{ BGE_CHIPID_BCM5701_A0,	"BCM5701 A0" },
226 	{ BGE_CHIPID_BCM5701_B0,	"BCM5701 B0" },
227 	{ BGE_CHIPID_BCM5701_B2,	"BCM5701 B2" },
228 	{ BGE_CHIPID_BCM5701_B5,	"BCM5701 B5" },
229 	{ BGE_CHIPID_BCM5703_A0,	"BCM5703 A0" },
230 	{ BGE_CHIPID_BCM5703_A1,	"BCM5703 A1" },
231 	{ BGE_CHIPID_BCM5703_A2,	"BCM5703 A2" },
232 	{ BGE_CHIPID_BCM5703_A3,	"BCM5703 A3" },
233 	{ BGE_CHIPID_BCM5703_B0,	"BCM5703 B0" },
234 	{ BGE_CHIPID_BCM5704_A0,	"BCM5704 A0" },
235 	{ BGE_CHIPID_BCM5704_A1,	"BCM5704 A1" },
236 	{ BGE_CHIPID_BCM5704_A2,	"BCM5704 A2" },
237 	{ BGE_CHIPID_BCM5704_A3,	"BCM5704 A3" },
238 	{ BGE_CHIPID_BCM5704_B0,	"BCM5704 B0" },
239 	{ BGE_CHIPID_BCM5705_A0,	"BCM5705 A0" },
240 	{ BGE_CHIPID_BCM5705_A1,	"BCM5705 A1" },
241 	{ BGE_CHIPID_BCM5705_A2,	"BCM5705 A2" },
242 	{ BGE_CHIPID_BCM5705_A3,	"BCM5705 A3" },
243 	{ BGE_CHIPID_BCM5750_A0,	"BCM5750 A0" },
244 	{ BGE_CHIPID_BCM5750_A1,	"BCM5750 A1" },
245 	{ BGE_CHIPID_BCM5750_A3,	"BCM5750 A3" },
246 	{ BGE_CHIPID_BCM5750_B0,	"BCM5750 B0" },
247 	{ BGE_CHIPID_BCM5750_B1,	"BCM5750 B1" },
248 	{ BGE_CHIPID_BCM5750_C0,	"BCM5750 C0" },
249 	{ BGE_CHIPID_BCM5750_C1,	"BCM5750 C1" },
250 	{ BGE_CHIPID_BCM5750_C2,	"BCM5750 C2" },
251 	{ BGE_CHIPID_BCM5714_A0,	"BCM5714 A0" },
252 	{ BGE_CHIPID_BCM5752_A0,	"BCM5752 A0" },
253 	{ BGE_CHIPID_BCM5752_A1,	"BCM5752 A1" },
254 	{ BGE_CHIPID_BCM5752_A2,	"BCM5752 A2" },
255 	{ BGE_CHIPID_BCM5714_B0,	"BCM5714 B0" },
256 	{ BGE_CHIPID_BCM5714_B3,	"BCM5714 B3" },
257 	{ BGE_CHIPID_BCM5715_A0,	"BCM5715 A0" },
258 	{ BGE_CHIPID_BCM5715_A1,	"BCM5715 A1" },
259 	/* 5754 and 5787 share the same ASIC ID */
260 	{ BGE_CHIPID_BCM5787_A0,	"BCM5754/5787 A0" },
261 	{ BGE_CHIPID_BCM5787_A1,	"BCM5754/5787 A1" },
262 	{ BGE_CHIPID_BCM5787_A2,	"BCM5754/5787 A2" },
263 
264 	{ 0, NULL }
265 };
266 
267 /*
268  * Some defaults for major revisions, so that newer steppings
269  * that we don't know about have a shot at working.
270  */
271 static const struct bge_revision bge_majorrevs[] = {
272 	{ BGE_ASICREV_BCM5700,		"unknown BCM5700" },
273 	{ BGE_ASICREV_BCM5701,		"unknown BCM5701" },
274 	{ BGE_ASICREV_BCM5703,		"unknown BCM5703" },
275 	{ BGE_ASICREV_BCM5704,		"unknown BCM5704" },
276 	{ BGE_ASICREV_BCM5705,		"unknown BCM5705" },
277 	{ BGE_ASICREV_BCM5750,		"unknown BCM5750" },
278 	{ BGE_ASICREV_BCM5714_A0,	"unknown BCM5714" },
279 	{ BGE_ASICREV_BCM5752,		"unknown BCM5752" },
280 	{ BGE_ASICREV_BCM5780,		"unknown BCM5780" },
281 	{ BGE_ASICREV_BCM5714,		"unknown BCM5714" },
282 	{ BGE_ASICREV_BCM5755,		"unknown BCM5755" },
283 	/* 5754 and 5787 share the same ASIC ID */
284 	{ BGE_ASICREV_BCM5787,		"unknown BCM5754/5787" },
285 
286 	{ 0, NULL }
287 };
288 
289 #define	BGE_IS_JUMBO_CAPABLE(sc)	((sc)->bge_flags & BGE_FLAG_JUMBO)
290 #define	BGE_IS_5700_FAMILY(sc)		((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
291 #define	BGE_IS_5705_PLUS(sc)		((sc)->bge_flags & BGE_FLAG_5705_PLUS)
292 #define	BGE_IS_5714_FAMILY(sc)		((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
293 #define	BGE_IS_575X_PLUS(sc)		((sc)->bge_flags & BGE_FLAG_575X_PLUS)
294 
295 const struct bge_revision * bge_lookup_rev(uint32_t);
296 const struct bge_vendor * bge_lookup_vendor(uint16_t);
297 static int bge_probe(device_t);
298 static int bge_attach(device_t);
299 static int bge_detach(device_t);
300 static int bge_suspend(device_t);
301 static int bge_resume(device_t);
302 static void bge_release_resources(struct bge_softc *);
303 static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
304 static int bge_dma_alloc(device_t);
305 static void bge_dma_free(struct bge_softc *);
306 
307 static void bge_txeof(struct bge_softc *);
308 static void bge_rxeof(struct bge_softc *);
309 
310 static void bge_asf_driver_up (struct bge_softc *);
311 static void bge_tick(void *);
312 static void bge_stats_update(struct bge_softc *);
313 static void bge_stats_update_regs(struct bge_softc *);
314 static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
315 
316 static void bge_intr(void *);
317 static void bge_start_locked(struct ifnet *);
318 static void bge_start(struct ifnet *);
319 static int bge_ioctl(struct ifnet *, u_long, caddr_t);
320 static void bge_init_locked(struct bge_softc *);
321 static void bge_init(void *);
322 static void bge_stop(struct bge_softc *);
323 static void bge_watchdog(struct bge_softc *);
324 static void bge_shutdown(device_t);
325 static int bge_ifmedia_upd_locked(struct ifnet *);
326 static int bge_ifmedia_upd(struct ifnet *);
327 static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
328 
329 static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
330 static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
331 
332 static void bge_setpromisc(struct bge_softc *);
333 static void bge_setmulti(struct bge_softc *);
334 
335 static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *);
336 static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
337 static int bge_init_rx_ring_std(struct bge_softc *);
338 static void bge_free_rx_ring_std(struct bge_softc *);
339 static int bge_init_rx_ring_jumbo(struct bge_softc *);
340 static void bge_free_rx_ring_jumbo(struct bge_softc *);
341 static void bge_free_tx_ring(struct bge_softc *);
342 static int bge_init_tx_ring(struct bge_softc *);
343 
344 static int bge_chipinit(struct bge_softc *);
345 static int bge_blockinit(struct bge_softc *);
346 
347 static uint32_t bge_readmem_ind(struct bge_softc *, int);
348 static void bge_writemem_ind(struct bge_softc *, int, int);
349 #ifdef notdef
350 static uint32_t bge_readreg_ind(struct bge_softc *, int);
351 #endif
352 static void bge_writemem_direct(struct bge_softc *, int, int);
353 static void bge_writereg_ind(struct bge_softc *, int, int);
354 
355 static int bge_miibus_readreg(device_t, int, int);
356 static int bge_miibus_writereg(device_t, int, int, int);
357 static void bge_miibus_statchg(device_t);
358 #ifdef DEVICE_POLLING
359 static void bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
360 #endif
361 
362 #define	BGE_RESET_START 1
363 #define	BGE_RESET_STOP  2
364 static void bge_sig_post_reset(struct bge_softc *, int);
365 static void bge_sig_legacy(struct bge_softc *, int);
366 static void bge_sig_pre_reset(struct bge_softc *, int);
367 static int bge_reset(struct bge_softc *);
368 static void bge_link_upd(struct bge_softc *);
369 
370 /*
371  * The BGE_REGISTER_DEBUG option is only for low-level debugging.  It may
372  * leak information to untrusted users.  It is also known to cause alignment
373  * traps on certain architectures.
374  */
375 #ifdef BGE_REGISTER_DEBUG
376 static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
377 static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS);
378 static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS);
379 #endif
380 static void bge_add_sysctls(struct bge_softc *);
381 
382 static device_method_t bge_methods[] = {
383 	/* Device interface */
384 	DEVMETHOD(device_probe,		bge_probe),
385 	DEVMETHOD(device_attach,	bge_attach),
386 	DEVMETHOD(device_detach,	bge_detach),
387 	DEVMETHOD(device_shutdown,	bge_shutdown),
388 	DEVMETHOD(device_suspend,	bge_suspend),
389 	DEVMETHOD(device_resume,	bge_resume),
390 
391 	/* bus interface */
392 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
393 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
394 
395 	/* MII interface */
396 	DEVMETHOD(miibus_readreg,	bge_miibus_readreg),
397 	DEVMETHOD(miibus_writereg,	bge_miibus_writereg),
398 	DEVMETHOD(miibus_statchg,	bge_miibus_statchg),
399 
400 	{ 0, 0 }
401 };
402 
403 static driver_t bge_driver = {
404 	"bge",
405 	bge_methods,
406 	sizeof(struct bge_softc)
407 };
408 
409 static devclass_t bge_devclass;
410 
411 DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0);
412 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
413 
414 static int bge_fake_autoneg = 0;
415 static int bge_allow_asf = 1;
416 
417 TUNABLE_INT("hw.bge.fake_autoneg", &bge_fake_autoneg);
418 TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf);
419 
420 SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters");
421 SYSCTL_INT(_hw_bge, OID_AUTO, fake_autoneg, CTLFLAG_RD, &bge_fake_autoneg, 0,
422 	"Enable fake autonegotiation for certain blade systems");
423 SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0,
424 	"Allow ASF mode if available");
425 
426 static uint32_t
427 bge_readmem_ind(struct bge_softc *sc, int off)
428 {
429 	device_t dev;
430 	uint32_t val;
431 
432 	dev = sc->bge_dev;
433 
434 	pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
435 	val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
436 	pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
437 	return (val);
438 }
439 
440 static void
441 bge_writemem_ind(struct bge_softc *sc, int off, int val)
442 {
443 	device_t dev;
444 
445 	dev = sc->bge_dev;
446 
447 	pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
448 	pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
449 	pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
450 }
451 
452 #ifdef notdef
453 static uint32_t
454 bge_readreg_ind(struct bge_softc *sc, int off)
455 {
456 	device_t dev;
457 
458 	dev = sc->bge_dev;
459 
460 	pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
461 	return (pci_read_config(dev, BGE_PCI_REG_DATA, 4));
462 }
463 #endif
464 
465 static void
466 bge_writereg_ind(struct bge_softc *sc, int off, int val)
467 {
468 	device_t dev;
469 
470 	dev = sc->bge_dev;
471 
472 	pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
473 	pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
474 }
475 
476 static void
477 bge_writemem_direct(struct bge_softc *sc, int off, int val)
478 {
479 	CSR_WRITE_4(sc, off, val);
480 }
481 
482 /*
483  * Map a single buffer address.
484  */
485 
486 static void
487 bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
488 {
489 	struct bge_dmamap_arg *ctx;
490 
491 	if (error)
492 		return;
493 
494 	ctx = arg;
495 
496 	if (nseg > ctx->bge_maxsegs) {
497 		ctx->bge_maxsegs = 0;
498 		return;
499 	}
500 
501 	ctx->bge_busaddr = segs->ds_addr;
502 }
503 
504 /*
505  * Read a byte of data stored in the EEPROM at address 'addr.' The
506  * BCM570x supports both the traditional bitbang interface and an
507  * auto access interface for reading the EEPROM. We use the auto
508  * access method.
509  */
510 static uint8_t
511 bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
512 {
513 	int i;
514 	uint32_t byte = 0;
515 
516 	/*
517 	 * Enable use of auto EEPROM access so we can avoid
518 	 * having to use the bitbang method.
519 	 */
520 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
521 
522 	/* Reset the EEPROM, load the clock period. */
523 	CSR_WRITE_4(sc, BGE_EE_ADDR,
524 	    BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
525 	DELAY(20);
526 
527 	/* Issue the read EEPROM command. */
528 	CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
529 
530 	/* Wait for completion */
531 	for(i = 0; i < BGE_TIMEOUT * 10; i++) {
532 		DELAY(10);
533 		if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
534 			break;
535 	}
536 
537 	if (i == BGE_TIMEOUT) {
538 		device_printf(sc->bge_dev, "EEPROM read timed out\n");
539 		return (1);
540 	}
541 
542 	/* Get result. */
543 	byte = CSR_READ_4(sc, BGE_EE_DATA);
544 
545 	*dest = (byte >> ((addr % 4) * 8)) & 0xFF;
546 
547 	return (0);
548 }
549 
550 /*
551  * Read a sequence of bytes from the EEPROM.
552  */
553 static int
554 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
555 {
556 	int i, error = 0;
557 	uint8_t byte = 0;
558 
559 	for (i = 0; i < cnt; i++) {
560 		error = bge_eeprom_getbyte(sc, off + i, &byte);
561 		if (error)
562 			break;
563 		*(dest + i) = byte;
564 	}
565 
566 	return (error ? 1 : 0);
567 }
568 
569 static int
570 bge_miibus_readreg(device_t dev, int phy, int reg)
571 {
572 	struct bge_softc *sc;
573 	uint32_t val, autopoll;
574 	int i;
575 
576 	sc = device_get_softc(dev);
577 
578 	/*
579 	 * Broadcom's own driver always assumes the internal
580 	 * PHY is at GMII address 1. On some chips, the PHY responds
581 	 * to accesses at all addresses, which could cause us to
582 	 * bogusly attach the PHY 32 times at probe type. Always
583 	 * restricting the lookup to address 1 is simpler than
584 	 * trying to figure out which chips revisions should be
585 	 * special-cased.
586 	 */
587 	if (phy != 1)
588 		return (0);
589 
590 	/* Reading with autopolling on may trigger PCI errors */
591 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
592 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
593 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
594 		DELAY(40);
595 	}
596 
597 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
598 	    BGE_MIPHY(phy)|BGE_MIREG(reg));
599 
600 	for (i = 0; i < BGE_TIMEOUT; i++) {
601 		val = CSR_READ_4(sc, BGE_MI_COMM);
602 		if (!(val & BGE_MICOMM_BUSY))
603 			break;
604 	}
605 
606 	if (i == BGE_TIMEOUT) {
607 		device_printf(sc->bge_dev, "PHY read timed out\n");
608 		val = 0;
609 		goto done;
610 	}
611 
612 	val = CSR_READ_4(sc, BGE_MI_COMM);
613 
614 done:
615 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
616 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
617 		DELAY(40);
618 	}
619 
620 	if (val & BGE_MICOMM_READFAIL)
621 		return (0);
622 
623 	return (val & 0xFFFF);
624 }
625 
626 static int
627 bge_miibus_writereg(device_t dev, int phy, int reg, int val)
628 {
629 	struct bge_softc *sc;
630 	uint32_t autopoll;
631 	int i;
632 
633 	sc = device_get_softc(dev);
634 
635 	/* Reading with autopolling on may trigger PCI errors */
636 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
637 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
638 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
639 		DELAY(40);
640 	}
641 
642 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
643 	    BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
644 
645 	for (i = 0; i < BGE_TIMEOUT; i++) {
646 		if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
647 			break;
648 	}
649 
650 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
651 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
652 		DELAY(40);
653 	}
654 
655 	if (i == BGE_TIMEOUT) {
656 		device_printf(sc->bge_dev, "PHY read timed out\n");
657 		return (0);
658 	}
659 
660 	return (0);
661 }
662 
663 static void
664 bge_miibus_statchg(device_t dev)
665 {
666 	struct bge_softc *sc;
667 	struct mii_data *mii;
668 	sc = device_get_softc(dev);
669 	mii = device_get_softc(sc->bge_miibus);
670 
671 	BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
672 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
673 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
674 	else
675 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
676 
677 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
678 		BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
679 	else
680 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
681 }
682 
683 /*
684  * Intialize a standard receive ring descriptor.
685  */
686 static int
687 bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m)
688 {
689 	struct mbuf *m_new = NULL;
690 	struct bge_rx_bd *r;
691 	struct bge_dmamap_arg ctx;
692 	int error;
693 
694 	if (m == NULL) {
695 		m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
696 		if (m_new == NULL)
697 			return (ENOBUFS);
698 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
699 	} else {
700 		m_new = m;
701 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
702 		m_new->m_data = m_new->m_ext.ext_buf;
703 	}
704 
705 	if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
706 		m_adj(m_new, ETHER_ALIGN);
707 	sc->bge_cdata.bge_rx_std_chain[i] = m_new;
708 	r = &sc->bge_ldata.bge_rx_std_ring[i];
709 	ctx.bge_maxsegs = 1;
710 	ctx.sc = sc;
711 	error = bus_dmamap_load(sc->bge_cdata.bge_mtag,
712 	    sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *),
713 	    m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
714 	if (error || ctx.bge_maxsegs == 0) {
715 		if (m == NULL) {
716 			sc->bge_cdata.bge_rx_std_chain[i] = NULL;
717 			m_freem(m_new);
718 		}
719 		return (ENOMEM);
720 	}
721 	r->bge_addr.bge_addr_lo = BGE_ADDR_LO(ctx.bge_busaddr);
722 	r->bge_addr.bge_addr_hi = BGE_ADDR_HI(ctx.bge_busaddr);
723 	r->bge_flags = BGE_RXBDFLAG_END;
724 	r->bge_len = m_new->m_len;
725 	r->bge_idx = i;
726 
727 	bus_dmamap_sync(sc->bge_cdata.bge_mtag,
728 	    sc->bge_cdata.bge_rx_std_dmamap[i],
729 	    BUS_DMASYNC_PREREAD);
730 
731 	return (0);
732 }
733 
734 /*
735  * Initialize a jumbo receive ring descriptor. This allocates
736  * a jumbo buffer from the pool managed internally by the driver.
737  */
738 static int
739 bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m)
740 {
741 	bus_dma_segment_t segs[BGE_NSEG_JUMBO];
742 	struct bge_extrx_bd *r;
743 	struct mbuf *m_new = NULL;
744 	int nsegs;
745 	int error;
746 
747 	if (m == NULL) {
748 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
749 		if (m_new == NULL)
750 			return (ENOBUFS);
751 
752 		m_cljget(m_new, M_DONTWAIT, MJUM9BYTES);
753 		if (!(m_new->m_flags & M_EXT)) {
754 			m_freem(m_new);
755 			return (ENOBUFS);
756 		}
757 		m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES;
758 	} else {
759 		m_new = m;
760 		m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES;
761 		m_new->m_data = m_new->m_ext.ext_buf;
762 	}
763 
764 	if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
765 		m_adj(m_new, ETHER_ALIGN);
766 
767 	error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
768 	    sc->bge_cdata.bge_rx_jumbo_dmamap[i],
769 	    m_new, segs, &nsegs, BUS_DMA_NOWAIT);
770 	if (error) {
771 		if (m == NULL)
772 			m_freem(m_new);
773 		return (error);
774 	}
775 	sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
776 
777 	/*
778 	 * Fill in the extended RX buffer descriptor.
779 	 */
780 	r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
781 	r->bge_flags = BGE_RXBDFLAG_JUMBO_RING|BGE_RXBDFLAG_END;
782 	r->bge_idx = i;
783 	r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
784 	switch (nsegs) {
785 	case 4:
786 		r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
787 		r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
788 		r->bge_len3 = segs[3].ds_len;
789 	case 3:
790 		r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
791 		r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
792 		r->bge_len2 = segs[2].ds_len;
793 	case 2:
794 		r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
795 		r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
796 		r->bge_len1 = segs[1].ds_len;
797 	case 1:
798 		r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
799 		r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
800 		r->bge_len0 = segs[0].ds_len;
801 		break;
802 	default:
803 		panic("%s: %d segments\n", __func__, nsegs);
804 	}
805 
806 	bus_dmamap_sync(sc->bge_cdata.bge_mtag,
807 	    sc->bge_cdata.bge_rx_jumbo_dmamap[i],
808 	    BUS_DMASYNC_PREREAD);
809 
810 	return (0);
811 }
812 
813 /*
814  * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
815  * that's 1MB or memory, which is a lot. For now, we fill only the first
816  * 256 ring entries and hope that our CPU is fast enough to keep up with
817  * the NIC.
818  */
819 static int
820 bge_init_rx_ring_std(struct bge_softc *sc)
821 {
822 	int i;
823 
824 	for (i = 0; i < BGE_SSLOTS; i++) {
825 		if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
826 			return (ENOBUFS);
827 	};
828 
829 	bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
830 	    sc->bge_cdata.bge_rx_std_ring_map,
831 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
832 
833 	sc->bge_std = i - 1;
834 	CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
835 
836 	return (0);
837 }
838 
839 static void
840 bge_free_rx_ring_std(struct bge_softc *sc)
841 {
842 	int i;
843 
844 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
845 		if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
846 			bus_dmamap_sync(sc->bge_cdata.bge_mtag,
847 			    sc->bge_cdata.bge_rx_std_dmamap[i],
848 			    BUS_DMASYNC_POSTREAD);
849 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
850 			    sc->bge_cdata.bge_rx_std_dmamap[i]);
851 			m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
852 			sc->bge_cdata.bge_rx_std_chain[i] = NULL;
853 		}
854 		bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
855 		    sizeof(struct bge_rx_bd));
856 	}
857 }
858 
859 static int
860 bge_init_rx_ring_jumbo(struct bge_softc *sc)
861 {
862 	struct bge_rcb *rcb;
863 	int i;
864 
865 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
866 		if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
867 			return (ENOBUFS);
868 	};
869 
870 	bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
871 	    sc->bge_cdata.bge_rx_jumbo_ring_map,
872 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
873 
874 	sc->bge_jumbo = i - 1;
875 
876 	rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
877 	rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
878 				    BGE_RCB_FLAG_USE_EXT_RX_BD);
879 	CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
880 
881 	CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
882 
883 	return (0);
884 }
885 
886 static void
887 bge_free_rx_ring_jumbo(struct bge_softc *sc)
888 {
889 	int i;
890 
891 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
892 		if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
893 			bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
894 			    sc->bge_cdata.bge_rx_jumbo_dmamap[i],
895 			    BUS_DMASYNC_POSTREAD);
896 			bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
897 			    sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
898 			m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
899 			sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
900 		}
901 		bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
902 		    sizeof(struct bge_extrx_bd));
903 	}
904 }
905 
906 static void
907 bge_free_tx_ring(struct bge_softc *sc)
908 {
909 	int i;
910 
911 	if (sc->bge_ldata.bge_tx_ring == NULL)
912 		return;
913 
914 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
915 		if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
916 			bus_dmamap_sync(sc->bge_cdata.bge_mtag,
917 			    sc->bge_cdata.bge_tx_dmamap[i],
918 			    BUS_DMASYNC_POSTWRITE);
919 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
920 			    sc->bge_cdata.bge_tx_dmamap[i]);
921 			m_freem(sc->bge_cdata.bge_tx_chain[i]);
922 			sc->bge_cdata.bge_tx_chain[i] = NULL;
923 		}
924 		bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
925 		    sizeof(struct bge_tx_bd));
926 	}
927 }
928 
929 static int
930 bge_init_tx_ring(struct bge_softc *sc)
931 {
932 	sc->bge_txcnt = 0;
933 	sc->bge_tx_saved_considx = 0;
934 
935 	/* Initialize transmit producer index for host-memory send ring. */
936 	sc->bge_tx_prodidx = 0;
937 	CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
938 
939 	/* 5700 b2 errata */
940 	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
941 		CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
942 
943 	/* NIC-memory send ring not used; initialize to zero. */
944 	CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
945 	/* 5700 b2 errata */
946 	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
947 		CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
948 
949 	return (0);
950 }
951 
952 static void
953 bge_setpromisc(struct bge_softc *sc)
954 {
955 	struct ifnet *ifp;
956 
957 	BGE_LOCK_ASSERT(sc);
958 
959 	ifp = sc->bge_ifp;
960 
961 	/* Enable or disable promiscuous mode as needed. */
962 	if (ifp->if_flags & IFF_PROMISC)
963 		BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
964 	else
965 		BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
966 }
967 
968 static void
969 bge_setmulti(struct bge_softc *sc)
970 {
971 	struct ifnet *ifp;
972 	struct ifmultiaddr *ifma;
973 	uint32_t hashes[4] = { 0, 0, 0, 0 };
974 	int h, i;
975 
976 	BGE_LOCK_ASSERT(sc);
977 
978 	ifp = sc->bge_ifp;
979 
980 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
981 		for (i = 0; i < 4; i++)
982 			CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
983 		return;
984 	}
985 
986 	/* First, zot all the existing filters. */
987 	for (i = 0; i < 4; i++)
988 		CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
989 
990 	/* Now program new ones. */
991 	IF_ADDR_LOCK(ifp);
992 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
993 		if (ifma->ifma_addr->sa_family != AF_LINK)
994 			continue;
995 		h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
996 		    ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
997 		hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
998 	}
999 	IF_ADDR_UNLOCK(ifp);
1000 
1001 	for (i = 0; i < 4; i++)
1002 		CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1003 }
1004 
1005 static void
1006 bge_sig_pre_reset(sc, type)
1007 	struct bge_softc *sc;
1008 	int type;
1009 {
1010 	/*
1011 	 * Some chips don't like this so only do this if ASF is enabled
1012 	 */
1013 	if (sc->bge_asf_mode)
1014 		bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
1015 
1016 	if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1017 		switch (type) {
1018 		case BGE_RESET_START:
1019 			bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1020 			break;
1021 		case BGE_RESET_STOP:
1022 			bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1023 			break;
1024 		}
1025 	}
1026 }
1027 
1028 static void
1029 bge_sig_post_reset(sc, type)
1030 	struct bge_softc *sc;
1031 	int type;
1032 {
1033 	if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1034 		switch (type) {
1035 		case BGE_RESET_START:
1036 			bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000001);
1037 			/* START DONE */
1038 			break;
1039 		case BGE_RESET_STOP:
1040 			bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000002);
1041 			break;
1042 		}
1043 	}
1044 }
1045 
1046 static void
1047 bge_sig_legacy(sc, type)
1048 	struct bge_softc *sc;
1049 	int type;
1050 {
1051 	if (sc->bge_asf_mode) {
1052 		switch (type) {
1053 		case BGE_RESET_START:
1054 			bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */
1055 			break;
1056 		case BGE_RESET_STOP:
1057 			bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */
1058 			break;
1059 		}
1060 	}
1061 }
1062 
1063 void bge_stop_fw(struct bge_softc *);
1064 void
1065 bge_stop_fw(sc)
1066 	struct bge_softc *sc;
1067 {
1068 	int i;
1069 
1070 	if (sc->bge_asf_mode) {
1071 		bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, BGE_FW_PAUSE);
1072 		CSR_WRITE_4(sc, BGE_CPU_EVENT,
1073 			    CSR_READ_4(sc, BGE_CPU_EVENT) != (1 << 14));
1074 
1075 		for (i = 0; i < 100; i++ ) {
1076 			if (!(CSR_READ_4(sc, BGE_CPU_EVENT) & (1 << 14)))
1077 				break;
1078 			DELAY(10);
1079 		}
1080 	}
1081 }
1082 
1083 /*
1084  * Do endian, PCI and DMA initialization. Also check the on-board ROM
1085  * self-test results.
1086  */
1087 static int
1088 bge_chipinit(struct bge_softc *sc)
1089 {
1090 	uint32_t dma_rw_ctl;
1091 	int i;
1092 
1093 	/* Set endianness before we access any non-PCI registers. */
1094 	pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4);
1095 
1096 	/*
1097 	 * Check the 'ROM failed' bit on the RX CPU to see if
1098 	 * self-tests passed.
1099 	 */
1100 	if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
1101 		device_printf(sc->bge_dev, "RX CPU self-diagnostics failed!\n");
1102 		return (ENODEV);
1103 	}
1104 
1105 	/* Clear the MAC control register */
1106 	CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1107 
1108 	/*
1109 	 * Clear the MAC statistics block in the NIC's
1110 	 * internal memory.
1111 	 */
1112 	for (i = BGE_STATS_BLOCK;
1113 	    i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1114 		BGE_MEMWIN_WRITE(sc, i, 0);
1115 
1116 	for (i = BGE_STATUS_BLOCK;
1117 	    i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1118 		BGE_MEMWIN_WRITE(sc, i, 0);
1119 
1120 	/* Set up the PCI DMA control register. */
1121 	if (sc->bge_flags & BGE_FLAG_PCIE) {
1122 		/* PCI Express bus */
1123 		dma_rw_ctl = BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD |
1124 		    BGE_PCIDMARWCTL_RD_WAT_SHIFT(0xf) |
1125 		    BGE_PCIDMARWCTL_WR_WAT_SHIFT(0x2);
1126 	} else if (sc->bge_flags & BGE_FLAG_PCIX) {
1127 		/* PCI-X bus */
1128 		if (BGE_IS_5714_FAMILY(sc)) {
1129 			dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD;
1130 			dma_rw_ctl &= ~BGE_PCIDMARWCTL_ONEDMA_ATONCE; /* XXX */
1131 			/* XXX magic values, Broadcom-supplied Linux driver */
1132 			if (sc->bge_asicrev == BGE_ASICREV_BCM5780)
1133 				dma_rw_ctl |= (1 << 20) | (1 << 18) |
1134 				    BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1135 			else
1136 				dma_rw_ctl |= (1 << 20) | (1 << 18) | (1 << 15);
1137 
1138 		} else if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1139 			/*
1140 			 * The 5704 uses a different encoding of read/write
1141 			 * watermarks.
1142 			 */
1143 			dma_rw_ctl = BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD |
1144 			    BGE_PCIDMARWCTL_RD_WAT_SHIFT(0x7) |
1145 			    BGE_PCIDMARWCTL_WR_WAT_SHIFT(0x3);
1146 		else
1147 			dma_rw_ctl = BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD |
1148 			    BGE_PCIDMARWCTL_RD_WAT_SHIFT(0x3) |
1149 			    BGE_PCIDMARWCTL_WR_WAT_SHIFT(0x3) |
1150 			    0x0f;
1151 
1152 		/*
1153 		 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1154 		 * for hardware bugs.
1155 		 */
1156 		if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1157 		    sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1158 			uint32_t tmp;
1159 
1160 			tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1161 			if (tmp == 0x6 || tmp == 0x7)
1162 				dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1163 		}
1164 	} else
1165 		/* Conventional PCI bus */
1166 		dma_rw_ctl = BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD |
1167 		    BGE_PCIDMARWCTL_RD_WAT_SHIFT(0x7) |
1168 		    BGE_PCIDMARWCTL_WR_WAT_SHIFT(0x7) |
1169 		    0x0f;
1170 
1171 	if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1172 	    sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
1173 	    sc->bge_asicrev == BGE_ASICREV_BCM5705)
1174 		dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1175 	pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1176 
1177 	/*
1178 	 * Set up general mode register.
1179 	 */
1180 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
1181 	    BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1182 	    BGE_MODECTL_TX_NO_PHDR_CSUM);
1183 
1184 	/*
1185 	 * Tell the firmware the driver is running
1186 	 */
1187 	if (sc->bge_asf_mode & ASF_STACKUP)
1188 		BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
1189 
1190 	/*
1191 	 * Disable memory write invalidate.  Apparently it is not supported
1192 	 * properly by these devices.
1193 	 */
1194 	PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1195 
1196 #ifdef __brokenalpha__
1197 	/*
1198 	 * Must insure that we do not cross an 8K (bytes) boundary
1199 	 * for DMA reads.  Our highest limit is 1K bytes.  This is a
1200 	 * restriction on some ALPHA platforms with early revision
1201 	 * 21174 PCI chipsets, such as the AlphaPC 164lx
1202 	 */
1203 	PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL,
1204 	    BGE_PCI_READ_BNDRY_1024BYTES, 4);
1205 #endif
1206 
1207 	/* Set the timer prescaler (always 66Mhz) */
1208 	CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1209 
1210 	return (0);
1211 }
1212 
1213 static int
1214 bge_blockinit(struct bge_softc *sc)
1215 {
1216 	struct bge_rcb *rcb;
1217 	bus_size_t vrcb;
1218 	bge_hostaddr taddr;
1219 	uint32_t val;
1220 	int i;
1221 
1222 	/*
1223 	 * Initialize the memory window pointer register so that
1224 	 * we can access the first 32K of internal NIC RAM. This will
1225 	 * allow us to set up the TX send ring RCBs and the RX return
1226 	 * ring RCBs, plus other things which live in NIC memory.
1227 	 */
1228 	CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1229 
1230 	/* Note: the BCM5704 has a smaller mbuf space than other chips. */
1231 
1232 	if (!(BGE_IS_5705_PLUS(sc))) {
1233 		/* Configure mbuf memory pool */
1234 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1235 		if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1236 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1237 		else
1238 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1239 
1240 		/* Configure DMA resource pool */
1241 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1242 		    BGE_DMA_DESCRIPTORS);
1243 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1244 	}
1245 
1246 	/* Configure mbuf pool watermarks */
1247 	if (!(BGE_IS_5705_PLUS(sc))) {
1248 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1249 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1250 	} else {
1251 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1252 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1253 	}
1254 	CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1255 
1256 	/* Configure DMA resource watermarks */
1257 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1258 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1259 
1260 	/* Enable buffer manager */
1261 	if (!(BGE_IS_5705_PLUS(sc))) {
1262 		CSR_WRITE_4(sc, BGE_BMAN_MODE,
1263 		    BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1264 
1265 		/* Poll for buffer manager start indication */
1266 		for (i = 0; i < BGE_TIMEOUT; i++) {
1267 			if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1268 				break;
1269 			DELAY(10);
1270 		}
1271 
1272 		if (i == BGE_TIMEOUT) {
1273 			device_printf(sc->bge_dev,
1274 			    "buffer manager failed to start\n");
1275 			return (ENXIO);
1276 		}
1277 	}
1278 
1279 	/* Enable flow-through queues */
1280 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1281 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1282 
1283 	/* Wait until queue initialization is complete */
1284 	for (i = 0; i < BGE_TIMEOUT; i++) {
1285 		if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1286 			break;
1287 		DELAY(10);
1288 	}
1289 
1290 	if (i == BGE_TIMEOUT) {
1291 		device_printf(sc->bge_dev, "flow-through queue init failed\n");
1292 		return (ENXIO);
1293 	}
1294 
1295 	/* Initialize the standard RX ring control block */
1296 	rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1297 	rcb->bge_hostaddr.bge_addr_lo =
1298 	    BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1299 	rcb->bge_hostaddr.bge_addr_hi =
1300 	    BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1301 	bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1302 	    sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1303 	if (BGE_IS_5705_PLUS(sc))
1304 		rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1305 	else
1306 		rcb->bge_maxlen_flags =
1307 		    BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1308 	rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1309 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1310 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1311 
1312 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1313 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1314 
1315 	/*
1316 	 * Initialize the jumbo RX ring control block
1317 	 * We set the 'ring disabled' bit in the flags
1318 	 * field until we're actually ready to start
1319 	 * using this ring (i.e. once we set the MTU
1320 	 * high enough to require it).
1321 	 */
1322 	if (BGE_IS_JUMBO_CAPABLE(sc)) {
1323 		rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1324 
1325 		rcb->bge_hostaddr.bge_addr_lo =
1326 		    BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1327 		rcb->bge_hostaddr.bge_addr_hi =
1328 		    BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1329 		bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1330 		    sc->bge_cdata.bge_rx_jumbo_ring_map,
1331 		    BUS_DMASYNC_PREREAD);
1332 		rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1333 		    BGE_RCB_FLAG_USE_EXT_RX_BD|BGE_RCB_FLAG_RING_DISABLED);
1334 		rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1335 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1336 		    rcb->bge_hostaddr.bge_addr_hi);
1337 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1338 		    rcb->bge_hostaddr.bge_addr_lo);
1339 
1340 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1341 		    rcb->bge_maxlen_flags);
1342 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1343 
1344 		/* Set up dummy disabled mini ring RCB */
1345 		rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1346 		rcb->bge_maxlen_flags =
1347 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1348 		CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1349 		    rcb->bge_maxlen_flags);
1350 	}
1351 
1352 	/*
1353 	 * Set the BD ring replentish thresholds. The recommended
1354 	 * values are 1/8th the number of descriptors allocated to
1355 	 * each ring.
1356 	 * XXX The 5754 requires a lower threshold, so it might be a
1357 	 * requirement of all 575x family chips.  The Linux driver sets
1358 	 * the lower threshold for all 5705 family chips as well, but there
1359 	 * are reports that it might not need to be so strict.
1360 	 */
1361 	if (BGE_IS_5705_PLUS(sc))
1362 		val = 8;
1363 	else
1364 		val = BGE_STD_RX_RING_CNT / 8;
1365 	CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1366 	CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1367 
1368 	/*
1369 	 * Disable all unused send rings by setting the 'ring disabled'
1370 	 * bit in the flags field of all the TX send ring control blocks.
1371 	 * These are located in NIC memory.
1372 	 */
1373 	vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1374 	for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1375 		RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1376 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1377 		RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1378 		vrcb += sizeof(struct bge_rcb);
1379 	}
1380 
1381 	/* Configure TX RCB 0 (we use only the first ring) */
1382 	vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1383 	BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1384 	RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1385 	RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1386 	RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1387 	    BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1388 	if (!(BGE_IS_5705_PLUS(sc)))
1389 		RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1390 		    BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1391 
1392 	/* Disable all unused RX return rings */
1393 	vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1394 	for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1395 		RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1396 		RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1397 		RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1398 		    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1399 		    BGE_RCB_FLAG_RING_DISABLED));
1400 		RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1401 		CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
1402 		    (i * (sizeof(uint64_t))), 0);
1403 		vrcb += sizeof(struct bge_rcb);
1404 	}
1405 
1406 	/* Initialize RX ring indexes */
1407 	CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1408 	CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1409 	CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1410 
1411 	/*
1412 	 * Set up RX return ring 0
1413 	 * Note that the NIC address for RX return rings is 0x00000000.
1414 	 * The return rings live entirely within the host, so the
1415 	 * nicaddr field in the RCB isn't used.
1416 	 */
1417 	vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1418 	BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1419 	RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1420 	RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1421 	RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000);
1422 	RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1423 	    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1424 
1425 	/* Set random backoff seed for TX */
1426 	CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1427 	    IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] +
1428 	    IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] +
1429 	    IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] +
1430 	    BGE_TX_BACKOFF_SEED_MASK);
1431 
1432 	/* Set inter-packet gap */
1433 	CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1434 
1435 	/*
1436 	 * Specify which ring to use for packets that don't match
1437 	 * any RX rules.
1438 	 */
1439 	CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1440 
1441 	/*
1442 	 * Configure number of RX lists. One interrupt distribution
1443 	 * list, sixteen active lists, one bad frames class.
1444 	 */
1445 	CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1446 
1447 	/* Inialize RX list placement stats mask. */
1448 	CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1449 	CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1450 
1451 	/* Disable host coalescing until we get it set up */
1452 	CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1453 
1454 	/* Poll to make sure it's shut down. */
1455 	for (i = 0; i < BGE_TIMEOUT; i++) {
1456 		if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1457 			break;
1458 		DELAY(10);
1459 	}
1460 
1461 	if (i == BGE_TIMEOUT) {
1462 		device_printf(sc->bge_dev,
1463 		    "host coalescing engine failed to idle\n");
1464 		return (ENXIO);
1465 	}
1466 
1467 	/* Set up host coalescing defaults */
1468 	CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1469 	CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1470 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1471 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1472 	if (!(BGE_IS_5705_PLUS(sc))) {
1473 		CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1474 		CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1475 	}
1476 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
1477 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
1478 
1479 	/* Set up address of statistics block */
1480 	if (!(BGE_IS_5705_PLUS(sc))) {
1481 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1482 		    BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1483 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1484 		    BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1485 		CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1486 		CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1487 		CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1488 	}
1489 
1490 	/* Set up address of status block */
1491 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1492 	    BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1493 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1494 	    BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1495 	sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0;
1496 	sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0;
1497 
1498 	/* Turn on host coalescing state machine */
1499 	CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1500 
1501 	/* Turn on RX BD completion state machine and enable attentions */
1502 	CSR_WRITE_4(sc, BGE_RBDC_MODE,
1503 	    BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1504 
1505 	/* Turn on RX list placement state machine */
1506 	CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1507 
1508 	/* Turn on RX list selector state machine. */
1509 	if (!(BGE_IS_5705_PLUS(sc)))
1510 		CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1511 
1512 	/* Turn on DMA, clear stats */
1513 	CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1514 	    BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1515 	    BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1516 	    BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1517 	    ((sc->bge_flags & BGE_FLAG_TBI) ?
1518 	    BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1519 
1520 	/* Set misc. local control, enable interrupts on attentions */
1521 	CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1522 
1523 #ifdef notdef
1524 	/* Assert GPIO pins for PHY reset */
1525 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1526 	    BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1527 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1528 	    BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1529 #endif
1530 
1531 	/* Turn on DMA completion state machine */
1532 	if (!(BGE_IS_5705_PLUS(sc)))
1533 		CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1534 
1535 	val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS;
1536 
1537 	/* Enable host coalescing bug fix. */
1538 	if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
1539 	    sc->bge_asicrev == BGE_ASICREV_BCM5787)
1540 			val |= (1 << 29);
1541 
1542 	/* Turn on write DMA state machine */
1543 	CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1544 
1545 	/* Turn on read DMA state machine */
1546 	CSR_WRITE_4(sc, BGE_RDMA_MODE,
1547 	    BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
1548 
1549 	/* Turn on RX data completion state machine */
1550 	CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1551 
1552 	/* Turn on RX BD initiator state machine */
1553 	CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1554 
1555 	/* Turn on RX data and RX BD initiator state machine */
1556 	CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1557 
1558 	/* Turn on Mbuf cluster free state machine */
1559 	if (!(BGE_IS_5705_PLUS(sc)))
1560 		CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1561 
1562 	/* Turn on send BD completion state machine */
1563 	CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1564 
1565 	/* Turn on send data completion state machine */
1566 	CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1567 
1568 	/* Turn on send data initiator state machine */
1569 	CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1570 
1571 	/* Turn on send BD initiator state machine */
1572 	CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1573 
1574 	/* Turn on send BD selector state machine */
1575 	CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1576 
1577 	CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1578 	CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1579 	    BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1580 
1581 	/* ack/clear link change events */
1582 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1583 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1584 	    BGE_MACSTAT_LINK_CHANGED);
1585 	CSR_WRITE_4(sc, BGE_MI_STS, 0);
1586 
1587 	/* Enable PHY auto polling (for MII/GMII only) */
1588 	if (sc->bge_flags & BGE_FLAG_TBI) {
1589 		CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1590 	} else {
1591 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1592 		if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1593 		    sc->bge_chipid != BGE_CHIPID_BCM5700_B2)
1594 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1595 			    BGE_EVTENB_MI_INTERRUPT);
1596 	}
1597 
1598 	/*
1599 	 * Clear any pending link state attention.
1600 	 * Otherwise some link state change events may be lost until attention
1601 	 * is cleared by bge_intr() -> bge_link_upd() sequence.
1602 	 * It's not necessary on newer BCM chips - perhaps enabling link
1603 	 * state change attentions implies clearing pending attention.
1604 	 */
1605 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1606 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1607 	    BGE_MACSTAT_LINK_CHANGED);
1608 
1609 	/* Enable link state change attentions. */
1610 	BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1611 
1612 	return (0);
1613 }
1614 
1615 const struct bge_revision *
1616 bge_lookup_rev(uint32_t chipid)
1617 {
1618 	const struct bge_revision *br;
1619 
1620 	for (br = bge_revisions; br->br_name != NULL; br++) {
1621 		if (br->br_chipid == chipid)
1622 			return (br);
1623 	}
1624 
1625 	for (br = bge_majorrevs; br->br_name != NULL; br++) {
1626 		if (br->br_chipid == BGE_ASICREV(chipid))
1627 			return (br);
1628 	}
1629 
1630 	return (NULL);
1631 }
1632 
1633 const struct bge_vendor *
1634 bge_lookup_vendor(uint16_t vid)
1635 {
1636 	const struct bge_vendor *v;
1637 
1638 	for (v = bge_vendors; v->v_name != NULL; v++)
1639 		if (v->v_id == vid)
1640 			return (v);
1641 
1642 	panic("%s: unknown vendor %d", __func__, vid);
1643 	return (NULL);
1644 }
1645 
1646 /*
1647  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1648  * against our list and return its name if we find a match.
1649  *
1650  * Note that since the Broadcom controller contains VPD support, we
1651  * try to get the device name string from the controller itself instead
1652  * of the compiled-in string. It guarantees we'll always announce the
1653  * right product name. We fall back to the compiled-in string when
1654  * VPD is unavailable or corrupt.
1655  */
1656 static int
1657 bge_probe(device_t dev)
1658 {
1659 	struct bge_type *t = bge_devs;
1660 	struct bge_softc *sc = device_get_softc(dev);
1661 	uint16_t vid, did;
1662 
1663 	sc->bge_dev = dev;
1664 	vid = pci_get_vendor(dev);
1665 	did = pci_get_device(dev);
1666 	while(t->bge_vid != 0) {
1667 		if ((vid == t->bge_vid) && (did == t->bge_did)) {
1668 			char model[64], buf[96];
1669 			const struct bge_revision *br;
1670 			const struct bge_vendor *v;
1671 			const char *pname;
1672 			uint32_t id;
1673 
1674 			id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
1675 			    BGE_PCIMISCCTL_ASICREV;
1676 			br = bge_lookup_rev(id);
1677 			v = bge_lookup_vendor(vid);
1678 			if (pci_get_vpd_ident(dev, &pname))
1679 				snprintf(model, 64, "%s %s",
1680 				    v->v_name,
1681 				    br != NULL ? br->br_name :
1682 					"NetXtreme Ethernet Controller");
1683 			else
1684 				snprintf(model, 64, "%s", pname);
1685 			snprintf(buf, 96, "%s, %sASIC rev. %#04x", model,
1686 			    br != NULL ? "" : "unknown ", id >> 16);
1687 			device_set_desc_copy(dev, buf);
1688 			if (pci_get_subvendor(dev) == DELL_VENDORID)
1689 				sc->bge_flags |= BGE_FLAG_NO_3LED;
1690 			if (did == BCOM_DEVICEID_BCM5755M)
1691 				sc->bge_flags |= BGE_FLAG_ADJUST_TRIM;
1692 			return (0);
1693 		}
1694 		t++;
1695 	}
1696 
1697 	return (ENXIO);
1698 }
1699 
1700 static void
1701 bge_dma_free(struct bge_softc *sc)
1702 {
1703 	int i;
1704 
1705 	/* Destroy DMA maps for RX buffers. */
1706 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1707 		if (sc->bge_cdata.bge_rx_std_dmamap[i])
1708 			bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1709 			    sc->bge_cdata.bge_rx_std_dmamap[i]);
1710 	}
1711 
1712 	/* Destroy DMA maps for jumbo RX buffers. */
1713 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1714 		if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
1715 			bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
1716 			    sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1717 	}
1718 
1719 	/* Destroy DMA maps for TX buffers. */
1720 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
1721 		if (sc->bge_cdata.bge_tx_dmamap[i])
1722 			bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
1723 			    sc->bge_cdata.bge_tx_dmamap[i]);
1724 	}
1725 
1726 	if (sc->bge_cdata.bge_mtag)
1727 		bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
1728 
1729 
1730 	/* Destroy standard RX ring. */
1731 	if (sc->bge_cdata.bge_rx_std_ring_map)
1732 		bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
1733 		    sc->bge_cdata.bge_rx_std_ring_map);
1734 	if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring)
1735 		bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
1736 		    sc->bge_ldata.bge_rx_std_ring,
1737 		    sc->bge_cdata.bge_rx_std_ring_map);
1738 
1739 	if (sc->bge_cdata.bge_rx_std_ring_tag)
1740 		bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
1741 
1742 	/* Destroy jumbo RX ring. */
1743 	if (sc->bge_cdata.bge_rx_jumbo_ring_map)
1744 		bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1745 		    sc->bge_cdata.bge_rx_jumbo_ring_map);
1746 
1747 	if (sc->bge_cdata.bge_rx_jumbo_ring_map &&
1748 	    sc->bge_ldata.bge_rx_jumbo_ring)
1749 		bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1750 		    sc->bge_ldata.bge_rx_jumbo_ring,
1751 		    sc->bge_cdata.bge_rx_jumbo_ring_map);
1752 
1753 	if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
1754 		bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
1755 
1756 	/* Destroy RX return ring. */
1757 	if (sc->bge_cdata.bge_rx_return_ring_map)
1758 		bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
1759 		    sc->bge_cdata.bge_rx_return_ring_map);
1760 
1761 	if (sc->bge_cdata.bge_rx_return_ring_map &&
1762 	    sc->bge_ldata.bge_rx_return_ring)
1763 		bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
1764 		    sc->bge_ldata.bge_rx_return_ring,
1765 		    sc->bge_cdata.bge_rx_return_ring_map);
1766 
1767 	if (sc->bge_cdata.bge_rx_return_ring_tag)
1768 		bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
1769 
1770 	/* Destroy TX ring. */
1771 	if (sc->bge_cdata.bge_tx_ring_map)
1772 		bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
1773 		    sc->bge_cdata.bge_tx_ring_map);
1774 
1775 	if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring)
1776 		bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
1777 		    sc->bge_ldata.bge_tx_ring,
1778 		    sc->bge_cdata.bge_tx_ring_map);
1779 
1780 	if (sc->bge_cdata.bge_tx_ring_tag)
1781 		bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
1782 
1783 	/* Destroy status block. */
1784 	if (sc->bge_cdata.bge_status_map)
1785 		bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
1786 		    sc->bge_cdata.bge_status_map);
1787 
1788 	if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block)
1789 		bus_dmamem_free(sc->bge_cdata.bge_status_tag,
1790 		    sc->bge_ldata.bge_status_block,
1791 		    sc->bge_cdata.bge_status_map);
1792 
1793 	if (sc->bge_cdata.bge_status_tag)
1794 		bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
1795 
1796 	/* Destroy statistics block. */
1797 	if (sc->bge_cdata.bge_stats_map)
1798 		bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
1799 		    sc->bge_cdata.bge_stats_map);
1800 
1801 	if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats)
1802 		bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
1803 		    sc->bge_ldata.bge_stats,
1804 		    sc->bge_cdata.bge_stats_map);
1805 
1806 	if (sc->bge_cdata.bge_stats_tag)
1807 		bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
1808 
1809 	/* Destroy the parent tag. */
1810 	if (sc->bge_cdata.bge_parent_tag)
1811 		bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
1812 }
1813 
1814 static int
1815 bge_dma_alloc(device_t dev)
1816 {
1817 	struct bge_dmamap_arg ctx;
1818 	struct bge_softc *sc;
1819 	int i, error;
1820 
1821 	sc = device_get_softc(dev);
1822 
1823 	/*
1824 	 * Allocate the parent bus DMA tag appropriate for PCI.
1825 	 */
1826 	error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),/* parent */
1827 			1, 0,			/* alignment, boundary */
1828 			BUS_SPACE_MAXADDR,	/* lowaddr */
1829 			BUS_SPACE_MAXADDR,	/* highaddr */
1830 			NULL, NULL,		/* filter, filterarg */
1831 			MAXBSIZE, BGE_NSEG_NEW,	/* maxsize, nsegments */
1832 			BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
1833 			0,			/* flags */
1834 			NULL, NULL,		/* lockfunc, lockarg */
1835 			&sc->bge_cdata.bge_parent_tag);
1836 
1837 	if (error != 0) {
1838 		device_printf(sc->bge_dev,
1839 		    "could not allocate parent dma tag\n");
1840 		return (ENOMEM);
1841 	}
1842 
1843 	/*
1844 	 * Create tag for RX mbufs.
1845 	 */
1846 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1,
1847 	    0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1848 	    NULL, MCLBYTES * BGE_NSEG_NEW, BGE_NSEG_NEW, MCLBYTES,
1849 	    BUS_DMA_ALLOCNOW, NULL, NULL, &sc->bge_cdata.bge_mtag);
1850 
1851 	if (error) {
1852 		device_printf(sc->bge_dev, "could not allocate dma tag\n");
1853 		return (ENOMEM);
1854 	}
1855 
1856 	/* Create DMA maps for RX buffers. */
1857 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1858 		error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1859 			    &sc->bge_cdata.bge_rx_std_dmamap[i]);
1860 		if (error) {
1861 			device_printf(sc->bge_dev,
1862 			    "can't create DMA map for RX\n");
1863 			return (ENOMEM);
1864 		}
1865 	}
1866 
1867 	/* Create DMA maps for TX buffers. */
1868 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
1869 		error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
1870 			    &sc->bge_cdata.bge_tx_dmamap[i]);
1871 		if (error) {
1872 			device_printf(sc->bge_dev,
1873 			    "can't create DMA map for RX\n");
1874 			return (ENOMEM);
1875 		}
1876 	}
1877 
1878 	/* Create tag for standard RX ring. */
1879 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1880 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1881 	    NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0,
1882 	    NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag);
1883 
1884 	if (error) {
1885 		device_printf(sc->bge_dev, "could not allocate dma tag\n");
1886 		return (ENOMEM);
1887 	}
1888 
1889 	/* Allocate DMA'able memory for standard RX ring. */
1890 	error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag,
1891 	    (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT,
1892 	    &sc->bge_cdata.bge_rx_std_ring_map);
1893 	if (error)
1894 		return (ENOMEM);
1895 
1896 	bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1897 
1898 	/* Load the address of the standard RX ring. */
1899 	ctx.bge_maxsegs = 1;
1900 	ctx.sc = sc;
1901 
1902 	error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag,
1903 	    sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring,
1904 	    BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1905 
1906 	if (error)
1907 		return (ENOMEM);
1908 
1909 	sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr;
1910 
1911 	/* Create tags for jumbo mbufs. */
1912 	if (BGE_IS_JUMBO_CAPABLE(sc)) {
1913 		error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1914 		    1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1915 		    NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
1916 		    0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
1917 		if (error) {
1918 			device_printf(sc->bge_dev,
1919 			    "could not allocate jumbo dma tag\n");
1920 			return (ENOMEM);
1921 		}
1922 
1923 		/* Create tag for jumbo RX ring. */
1924 		error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1925 		    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1926 		    NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0,
1927 		    NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag);
1928 
1929 		if (error) {
1930 			device_printf(sc->bge_dev,
1931 			    "could not allocate jumbo ring dma tag\n");
1932 			return (ENOMEM);
1933 		}
1934 
1935 		/* Allocate DMA'able memory for jumbo RX ring. */
1936 		error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1937 		    (void **)&sc->bge_ldata.bge_rx_jumbo_ring,
1938 		    BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1939 		    &sc->bge_cdata.bge_rx_jumbo_ring_map);
1940 		if (error)
1941 			return (ENOMEM);
1942 
1943 		/* Load the address of the jumbo RX ring. */
1944 		ctx.bge_maxsegs = 1;
1945 		ctx.sc = sc;
1946 
1947 		error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1948 		    sc->bge_cdata.bge_rx_jumbo_ring_map,
1949 		    sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ,
1950 		    bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1951 
1952 		if (error)
1953 			return (ENOMEM);
1954 
1955 		sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr;
1956 
1957 		/* Create DMA maps for jumbo RX buffers. */
1958 		for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1959 			error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
1960 				    0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1961 			if (error) {
1962 				device_printf(sc->bge_dev,
1963 				    "can't create DMA map for jumbo RX\n");
1964 				return (ENOMEM);
1965 			}
1966 		}
1967 
1968 	}
1969 
1970 	/* Create tag for RX return ring. */
1971 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
1972 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1973 	    NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0,
1974 	    NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag);
1975 
1976 	if (error) {
1977 		device_printf(sc->bge_dev, "could not allocate dma tag\n");
1978 		return (ENOMEM);
1979 	}
1980 
1981 	/* Allocate DMA'able memory for RX return ring. */
1982 	error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag,
1983 	    (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT,
1984 	    &sc->bge_cdata.bge_rx_return_ring_map);
1985 	if (error)
1986 		return (ENOMEM);
1987 
1988 	bzero((char *)sc->bge_ldata.bge_rx_return_ring,
1989 	    BGE_RX_RTN_RING_SZ(sc));
1990 
1991 	/* Load the address of the RX return ring. */
1992 	ctx.bge_maxsegs = 1;
1993 	ctx.sc = sc;
1994 
1995 	error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag,
1996 	    sc->bge_cdata.bge_rx_return_ring_map,
1997 	    sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc),
1998 	    bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
1999 
2000 	if (error)
2001 		return (ENOMEM);
2002 
2003 	sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr;
2004 
2005 	/* Create tag for TX ring. */
2006 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2007 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2008 	    NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL,
2009 	    &sc->bge_cdata.bge_tx_ring_tag);
2010 
2011 	if (error) {
2012 		device_printf(sc->bge_dev, "could not allocate dma tag\n");
2013 		return (ENOMEM);
2014 	}
2015 
2016 	/* Allocate DMA'able memory for TX ring. */
2017 	error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag,
2018 	    (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT,
2019 	    &sc->bge_cdata.bge_tx_ring_map);
2020 	if (error)
2021 		return (ENOMEM);
2022 
2023 	bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
2024 
2025 	/* Load the address of the TX ring. */
2026 	ctx.bge_maxsegs = 1;
2027 	ctx.sc = sc;
2028 
2029 	error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag,
2030 	    sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring,
2031 	    BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2032 
2033 	if (error)
2034 		return (ENOMEM);
2035 
2036 	sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr;
2037 
2038 	/* Create tag for status block. */
2039 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2040 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2041 	    NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0,
2042 	    NULL, NULL, &sc->bge_cdata.bge_status_tag);
2043 
2044 	if (error) {
2045 		device_printf(sc->bge_dev, "could not allocate dma tag\n");
2046 		return (ENOMEM);
2047 	}
2048 
2049 	/* Allocate DMA'able memory for status block. */
2050 	error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag,
2051 	    (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT,
2052 	    &sc->bge_cdata.bge_status_map);
2053 	if (error)
2054 		return (ENOMEM);
2055 
2056 	bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
2057 
2058 	/* Load the address of the status block. */
2059 	ctx.sc = sc;
2060 	ctx.bge_maxsegs = 1;
2061 
2062 	error = bus_dmamap_load(sc->bge_cdata.bge_status_tag,
2063 	    sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block,
2064 	    BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2065 
2066 	if (error)
2067 		return (ENOMEM);
2068 
2069 	sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr;
2070 
2071 	/* Create tag for statistics block. */
2072 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2073 	    PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
2074 	    NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL,
2075 	    &sc->bge_cdata.bge_stats_tag);
2076 
2077 	if (error) {
2078 		device_printf(sc->bge_dev, "could not allocate dma tag\n");
2079 		return (ENOMEM);
2080 	}
2081 
2082 	/* Allocate DMA'able memory for statistics block. */
2083 	error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag,
2084 	    (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT,
2085 	    &sc->bge_cdata.bge_stats_map);
2086 	if (error)
2087 		return (ENOMEM);
2088 
2089 	bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ);
2090 
2091 	/* Load the address of the statstics block. */
2092 	ctx.sc = sc;
2093 	ctx.bge_maxsegs = 1;
2094 
2095 	error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag,
2096 	    sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats,
2097 	    BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
2098 
2099 	if (error)
2100 		return (ENOMEM);
2101 
2102 	sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr;
2103 
2104 	return (0);
2105 }
2106 
2107 /*
2108  * Return true if this device has more than one port.
2109  */
2110 static int
2111 bge_has_multiple_ports(struct bge_softc *sc)
2112 {
2113 	device_t dev = sc->bge_dev;
2114 	u_int b, s, f, fscan;
2115 
2116 	b = pci_get_bus(dev);
2117 	s = pci_get_slot(dev);
2118 	f = pci_get_function(dev);
2119 	for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++)
2120 		if (fscan != f && pci_find_bsf(b, s, fscan) != NULL)
2121 			return (1);
2122 	return (0);
2123 }
2124 
2125 /*
2126  * Return true if MSI can be used with this device.
2127  */
2128 static int
2129 bge_can_use_msi(struct bge_softc *sc)
2130 {
2131 	int can_use_msi = 0;
2132 
2133 	switch (sc->bge_asicrev) {
2134 	case BGE_ASICREV_BCM5714:
2135 		/*
2136 		 * Apparently, MSI doesn't work when this chip is configured
2137 		 * in single-port mode.
2138 		 */
2139 		if (bge_has_multiple_ports(sc))
2140 			can_use_msi = 1;
2141 		break;
2142 	case BGE_ASICREV_BCM5750:
2143 		if (sc->bge_chiprev != BGE_CHIPREV_5750_AX &&
2144 		    sc->bge_chiprev != BGE_CHIPREV_5750_BX)
2145 			can_use_msi = 1;
2146 		break;
2147 	case BGE_ASICREV_BCM5752:
2148 	case BGE_ASICREV_BCM5780:
2149 		can_use_msi = 1;
2150 		break;
2151 	}
2152 	return (can_use_msi);
2153 }
2154 
2155 static int
2156 bge_attach(device_t dev)
2157 {
2158 	struct ifnet *ifp;
2159 	struct bge_softc *sc;
2160 	uint32_t hwcfg = 0;
2161 	uint32_t mac_tmp = 0;
2162 	u_char eaddr[6];
2163 	int error = 0, msicount, rid, trys, reg;
2164 
2165 	sc = device_get_softc(dev);
2166 	sc->bge_dev = dev;
2167 
2168 	/*
2169 	 * Map control/status registers.
2170 	 */
2171 	pci_enable_busmaster(dev);
2172 
2173 	rid = BGE_PCI_BAR0;
2174 	sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2175 	    RF_ACTIVE|PCI_RF_DENSE);
2176 
2177 	if (sc->bge_res == NULL) {
2178 		device_printf (sc->bge_dev, "couldn't map memory\n");
2179 		error = ENXIO;
2180 		goto fail;
2181 	}
2182 
2183 	sc->bge_btag = rman_get_bustag(sc->bge_res);
2184 	sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
2185 
2186 	/* Save ASIC rev. */
2187 
2188 	sc->bge_chipid =
2189 	    pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
2190 	    BGE_PCIMISCCTL_ASICREV;
2191 	sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
2192 	sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
2193 
2194 	/* Save chipset family. */
2195 	switch (sc->bge_asicrev) {
2196 	case BGE_ASICREV_BCM5700:
2197 	case BGE_ASICREV_BCM5701:
2198 	case BGE_ASICREV_BCM5703:
2199 	case BGE_ASICREV_BCM5704:
2200 		sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
2201 		break;
2202 	case BGE_ASICREV_BCM5714_A0:
2203 	case BGE_ASICREV_BCM5780:
2204 	case BGE_ASICREV_BCM5714:
2205 		sc->bge_flags |= BGE_FLAG_5714_FAMILY /* | BGE_FLAG_JUMBO */;
2206 		/* FALLTHRU */
2207 	case BGE_ASICREV_BCM5750:
2208 	case BGE_ASICREV_BCM5752:
2209 	case BGE_ASICREV_BCM5755:
2210 	case BGE_ASICREV_BCM5787:
2211 		sc->bge_flags |= BGE_FLAG_575X_PLUS;
2212 		/* FALLTHRU */
2213 	case BGE_ASICREV_BCM5705:
2214 		sc->bge_flags |= BGE_FLAG_5705_PLUS;
2215 		break;
2216 	}
2217 
2218 	/* Set various bug flags. */
2219 	if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
2220 	    sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
2221 		sc->bge_flags |= BGE_FLAG_CRC_BUG;
2222 	if (sc->bge_chiprev == BGE_CHIPREV_5703_AX ||
2223 	    sc->bge_chiprev == BGE_CHIPREV_5704_AX)
2224 		sc->bge_flags |= BGE_FLAG_ADC_BUG;
2225 	if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
2226 		sc->bge_flags |= BGE_FLAG_5704_A0_BUG;
2227 	if (BGE_IS_5705_PLUS(sc) &&
2228 	    !(sc->bge_flags & BGE_FLAG_ADJUST_TRIM)) {
2229 		if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
2230 		    sc->bge_asicrev == BGE_ASICREV_BCM5787)
2231 			sc->bge_flags |= BGE_FLAG_JITTER_BUG;
2232 		else
2233 			sc->bge_flags |= BGE_FLAG_BER_BUG;
2234 	}
2235 
2236   	/*
2237 	 * Check if this is a PCI-X or PCI Express device.
2238   	 */
2239 #if __FreeBSD_version > 700010
2240 	if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
2241 		/*
2242 		 * Found a PCI Express capabilities register, this
2243 		 * must be a PCI Express device.
2244 		 */
2245 		if (reg != 0)
2246 			sc->bge_flags |= BGE_FLAG_PCIE;
2247 	} else if (pci_find_extcap(dev, PCIY_PCIX, &reg) == 0) {
2248 		if (reg != 0)
2249 			sc->bge_flags |= BGE_FLAG_PCIX;
2250 	}
2251 
2252 #else
2253 	if (BGE_IS_5705_PLUS(sc)) {
2254 		reg = pci_read_config(dev, BGE_PCIE_CAPID_REG, 4);
2255 		if ((reg & 0xff) == BGE_PCIE_CAPID)
2256 			sc->bge_flags |= BGE_FLAG_PCIE;
2257 	} else {
2258 		/*
2259 		 * Check if the device is in PCI-X Mode.
2260 		 * (This bit is not valid on PCI Express controllers.)
2261 		 */
2262 		if ((pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
2263 		    BGE_PCISTATE_PCI_BUSMODE) == 0)
2264 			sc->bge_flags |= BGE_FLAG_PCIX;
2265 	}
2266 #endif
2267 
2268 	/*
2269 	 * Allocate the interrupt, using MSI if possible.  These devices
2270 	 * support 8 MSI messages, but only the first one is used in
2271 	 * normal operation.
2272 	 */
2273 	if (bge_can_use_msi(sc)) {
2274 		msicount = pci_msi_count(dev);
2275 		if (msicount > 1)
2276 			msicount = 1;
2277 	} else
2278 		msicount = 0;
2279 	if (msicount == 1 && pci_alloc_msi(dev, &msicount) == 0) {
2280 		rid = 1;
2281 		sc->bge_flags |= BGE_FLAG_MSI;
2282 	} else
2283 		rid = 0;
2284 
2285 	sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2286 	    RF_SHAREABLE | RF_ACTIVE);
2287 
2288 	if (sc->bge_irq == NULL) {
2289 		device_printf(sc->bge_dev, "couldn't map interrupt\n");
2290 		error = ENXIO;
2291 		goto fail;
2292 	}
2293 
2294 	BGE_LOCK_INIT(sc, device_get_nameunit(dev));
2295 
2296 	/* Try to reset the chip. */
2297 	if (bge_reset(sc)) {
2298 		device_printf(sc->bge_dev, "chip reset failed\n");
2299 		bge_release_resources(sc);
2300 		error = ENXIO;
2301 		goto fail;
2302 	}
2303 
2304 	sc->bge_asf_mode = 0;
2305 	if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG)
2306 	    == BGE_MAGIC_NUMBER)) {
2307 		if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG)
2308 		    & BGE_HWCFG_ASF) {
2309 			sc->bge_asf_mode |= ASF_ENABLE;
2310 			sc->bge_asf_mode |= ASF_STACKUP;
2311 			if (sc->bge_asicrev == BGE_ASICREV_BCM5750) {
2312 				sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
2313 			}
2314 		}
2315 	}
2316 
2317 	/* Try to reset the chip again the nice way. */
2318 	bge_stop_fw(sc);
2319 	bge_sig_pre_reset(sc, BGE_RESET_STOP);
2320 	if (bge_reset(sc)) {
2321 		device_printf(sc->bge_dev, "chip reset failed\n");
2322 		bge_release_resources(sc);
2323 		error = ENXIO;
2324 		goto fail;
2325 	}
2326 
2327 	bge_sig_legacy(sc, BGE_RESET_STOP);
2328 	bge_sig_post_reset(sc, BGE_RESET_STOP);
2329 
2330 	if (bge_chipinit(sc)) {
2331 		device_printf(sc->bge_dev, "chip initialization failed\n");
2332 		bge_release_resources(sc);
2333 		error = ENXIO;
2334 		goto fail;
2335 	}
2336 
2337 	/*
2338 	 * Get station address from the EEPROM.
2339 	 */
2340 	mac_tmp = bge_readmem_ind(sc, 0x0c14);
2341 	if ((mac_tmp >> 16) == 0x484b) {
2342 		eaddr[0] = (u_char)(mac_tmp >> 8);
2343 		eaddr[1] = (u_char)mac_tmp;
2344 		mac_tmp = bge_readmem_ind(sc, 0x0c18);
2345 		eaddr[2] = (u_char)(mac_tmp >> 24);
2346 		eaddr[3] = (u_char)(mac_tmp >> 16);
2347 		eaddr[4] = (u_char)(mac_tmp >> 8);
2348 		eaddr[5] = (u_char)mac_tmp;
2349 	} else if (bge_read_eeprom(sc, eaddr,
2350 	    BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
2351 		device_printf(sc->bge_dev, "failed to read station address\n");
2352 		bge_release_resources(sc);
2353 		error = ENXIO;
2354 		goto fail;
2355 	}
2356 
2357 	/* 5705 limits RX return ring to 512 entries. */
2358 	if (BGE_IS_5705_PLUS(sc))
2359 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2360 	else
2361 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2362 
2363 	if (bge_dma_alloc(dev)) {
2364 		device_printf(sc->bge_dev,
2365 		    "failed to allocate DMA resources\n");
2366 		bge_release_resources(sc);
2367 		error = ENXIO;
2368 		goto fail;
2369 	}
2370 
2371 	/* Set default tuneable values. */
2372 	sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2373 	sc->bge_rx_coal_ticks = 150;
2374 	sc->bge_tx_coal_ticks = 150;
2375 	sc->bge_rx_max_coal_bds = 10;
2376 	sc->bge_tx_max_coal_bds = 10;
2377 
2378 	/* Set up ifnet structure */
2379 	ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
2380 	if (ifp == NULL) {
2381 		device_printf(sc->bge_dev, "failed to if_alloc()\n");
2382 		bge_release_resources(sc);
2383 		error = ENXIO;
2384 		goto fail;
2385 	}
2386 	ifp->if_softc = sc;
2387 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2388 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2389 	ifp->if_ioctl = bge_ioctl;
2390 	ifp->if_start = bge_start;
2391 	ifp->if_init = bge_init;
2392 	ifp->if_mtu = ETHERMTU;
2393 	ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
2394 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
2395 	IFQ_SET_READY(&ifp->if_snd);
2396 	ifp->if_hwassist = BGE_CSUM_FEATURES;
2397 	ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
2398 	    IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM;
2399 	ifp->if_capenable = ifp->if_capabilities;
2400 #ifdef DEVICE_POLLING
2401 	ifp->if_capabilities |= IFCAP_POLLING;
2402 #endif
2403 
2404 	/*
2405 	 * 5700 B0 chips do not support checksumming correctly due
2406 	 * to hardware bugs.
2407 	 */
2408 	if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
2409 		ifp->if_capabilities &= ~IFCAP_HWCSUM;
2410 		ifp->if_capenable &= IFCAP_HWCSUM;
2411 		ifp->if_hwassist = 0;
2412 	}
2413 
2414 	/*
2415 	 * Figure out what sort of media we have by checking the
2416 	 * hardware config word in the first 32k of NIC internal memory,
2417 	 * or fall back to examining the EEPROM if necessary.
2418 	 * Note: on some BCM5700 cards, this value appears to be unset.
2419 	 * If that's the case, we have to rely on identifying the NIC
2420 	 * by its PCI subsystem ID, as we do below for the SysKonnect
2421 	 * SK-9D41.
2422 	 */
2423 	if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
2424 		hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2425 	else {
2426 		if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
2427 		    sizeof(hwcfg))) {
2428 			device_printf(sc->bge_dev, "failed to read EEPROM\n");
2429 			bge_release_resources(sc);
2430 			error = ENXIO;
2431 			goto fail;
2432 		}
2433 		hwcfg = ntohl(hwcfg);
2434 	}
2435 
2436 	if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
2437 		sc->bge_flags |= BGE_FLAG_TBI;
2438 
2439 	/* The SysKonnect SK-9D41 is a 1000baseSX card. */
2440 	if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41)
2441 		sc->bge_flags |= BGE_FLAG_TBI;
2442 
2443 	if (sc->bge_flags & BGE_FLAG_TBI) {
2444 		ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
2445 		    bge_ifmedia_upd, bge_ifmedia_sts);
2446 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2447 		ifmedia_add(&sc->bge_ifmedia,
2448 		    IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
2449 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2450 		ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
2451 		sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
2452 	} else {
2453 		/*
2454 		 * Do transceiver setup and tell the firmware the
2455 		 * driver is down so we can try to get access the
2456 		 * probe if ASF is running.  Retry a couple of times
2457 		 * if we get a conflict with the ASF firmware accessing
2458 		 * the PHY.
2459 		 */
2460 		BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2461 again:
2462 		bge_asf_driver_up(sc);
2463 
2464 		trys = 0;
2465 		if (mii_phy_probe(dev, &sc->bge_miibus,
2466 		    bge_ifmedia_upd, bge_ifmedia_sts)) {
2467 			if (trys++ < 4) {
2468 				device_printf(sc->bge_dev, "Try again\n");
2469 				bge_miibus_writereg(sc->bge_dev, 1, MII_BMCR, BMCR_RESET);
2470 				goto again;
2471 			}
2472 
2473 			device_printf(sc->bge_dev, "MII without any PHY!\n");
2474 			bge_release_resources(sc);
2475 			error = ENXIO;
2476 			goto fail;
2477 		}
2478 
2479 		/*
2480 		 * Now tell the firmware we are going up after probing the PHY
2481 		 */
2482 		if (sc->bge_asf_mode & ASF_STACKUP)
2483 			BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2484 	}
2485 
2486 	/*
2487 	 * When using the BCM5701 in PCI-X mode, data corruption has
2488 	 * been observed in the first few bytes of some received packets.
2489 	 * Aligning the packet buffer in memory eliminates the corruption.
2490 	 * Unfortunately, this misaligns the packet payloads.  On platforms
2491 	 * which do not support unaligned accesses, we will realign the
2492 	 * payloads by copying the received packets.
2493 	 */
2494 	if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
2495 	    sc->bge_flags & BGE_FLAG_PCIX)
2496                 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
2497 
2498 	/*
2499 	 * Call MI attach routine.
2500 	 */
2501 	ether_ifattach(ifp, eaddr);
2502 	callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0);
2503 
2504 	/*
2505 	 * Hookup IRQ last.
2506 	 */
2507 	error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE,
2508 	   bge_intr, sc, &sc->bge_intrhand);
2509 
2510 	if (error) {
2511 		bge_detach(dev);
2512 		device_printf(sc->bge_dev, "couldn't set up irq\n");
2513 	}
2514 
2515 	bge_add_sysctls(sc);
2516 
2517 fail:
2518 	return (error);
2519 }
2520 
2521 static int
2522 bge_detach(device_t dev)
2523 {
2524 	struct bge_softc *sc;
2525 	struct ifnet *ifp;
2526 
2527 	sc = device_get_softc(dev);
2528 	ifp = sc->bge_ifp;
2529 
2530 #ifdef DEVICE_POLLING
2531 	if (ifp->if_capenable & IFCAP_POLLING)
2532 		ether_poll_deregister(ifp);
2533 #endif
2534 
2535 	BGE_LOCK(sc);
2536 	bge_stop(sc);
2537 	bge_reset(sc);
2538 	BGE_UNLOCK(sc);
2539 
2540 	callout_drain(&sc->bge_stat_ch);
2541 
2542 	ether_ifdetach(ifp);
2543 
2544 	if (sc->bge_flags & BGE_FLAG_TBI) {
2545 		ifmedia_removeall(&sc->bge_ifmedia);
2546 	} else {
2547 		bus_generic_detach(dev);
2548 		device_delete_child(dev, sc->bge_miibus);
2549 	}
2550 
2551 	bge_release_resources(sc);
2552 
2553 	return (0);
2554 }
2555 
2556 static void
2557 bge_release_resources(struct bge_softc *sc)
2558 {
2559 	device_t dev;
2560 
2561 	dev = sc->bge_dev;
2562 
2563 	if (sc->bge_intrhand != NULL)
2564 		bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
2565 
2566 	if (sc->bge_irq != NULL)
2567 		bus_release_resource(dev, SYS_RES_IRQ,
2568 		    sc->bge_flags & BGE_FLAG_MSI ? 1 : 0, sc->bge_irq);
2569 
2570 	if (sc->bge_flags & BGE_FLAG_MSI)
2571 		pci_release_msi(dev);
2572 
2573 	if (sc->bge_res != NULL)
2574 		bus_release_resource(dev, SYS_RES_MEMORY,
2575 		    BGE_PCI_BAR0, sc->bge_res);
2576 
2577 	if (sc->bge_ifp != NULL)
2578 		if_free(sc->bge_ifp);
2579 
2580 	bge_dma_free(sc);
2581 
2582 	if (mtx_initialized(&sc->bge_mtx))	/* XXX */
2583 		BGE_LOCK_DESTROY(sc);
2584 }
2585 
2586 static int
2587 bge_reset(struct bge_softc *sc)
2588 {
2589 	device_t dev;
2590 	uint32_t cachesize, command, pcistate, reset;
2591 	void (*write_op)(struct bge_softc *, int, int);
2592 	int i, val = 0;
2593 
2594 	dev = sc->bge_dev;
2595 
2596 	if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc)) {
2597 		if (sc->bge_flags & BGE_FLAG_PCIE)
2598 			write_op = bge_writemem_direct;
2599 		else
2600 			write_op = bge_writemem_ind;
2601 	} else
2602 		write_op = bge_writereg_ind;
2603 
2604 	/* Save some important PCI state. */
2605 	cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2606 	command = pci_read_config(dev, BGE_PCI_CMD, 4);
2607 	pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2608 
2609 	pci_write_config(dev, BGE_PCI_MISC_CTL,
2610 	    BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2611 	BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2612 
2613 	/* Disable fastboot on controllers that support it. */
2614 	if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
2615 	    sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
2616 	    sc->bge_asicrev == BGE_ASICREV_BCM5787) {
2617 		if (bootverbose)
2618 			device_printf(sc->bge_dev, "Disabling fastboot\n");
2619 		CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
2620 	}
2621 
2622 	/*
2623 	 * Write the magic number to SRAM at offset 0xB50.
2624 	 * When firmware finishes its initialization it will
2625 	 * write ~BGE_MAGIC_NUMBER to the same location.
2626 	 */
2627 	bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2628 
2629 	reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2630 
2631 	/* XXX: Broadcom Linux driver. */
2632 	if (sc->bge_flags & BGE_FLAG_PCIE) {
2633 		if (CSR_READ_4(sc, 0x7e2c) == 0x60)	/* PCIE 1.0 */
2634 			CSR_WRITE_4(sc, 0x7e2c, 0x20);
2635 		if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2636 			/* Prevent PCIE link training during global reset */
2637 			CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2638 			reset |= (1<<29);
2639 		}
2640 	}
2641 
2642 	/*
2643 	 * Set GPHY Power Down Override to leave GPHY
2644 	 * powered up in D0 uninitialized.
2645 	 */
2646 	if (BGE_IS_5705_PLUS(sc))
2647 		reset |= 0x04000000;
2648 
2649 	/* Issue global reset */
2650 	write_op(sc, BGE_MISC_CFG, reset);
2651 
2652 	DELAY(1000);
2653 
2654 	/* XXX: Broadcom Linux driver. */
2655 	if (sc->bge_flags & BGE_FLAG_PCIE) {
2656 		if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
2657 			uint32_t v;
2658 
2659 			DELAY(500000); /* wait for link training to complete */
2660 			v = pci_read_config(dev, 0xc4, 4);
2661 			pci_write_config(dev, 0xc4, v | (1<<15), 4);
2662 		}
2663 		/*
2664 		 * Set PCIE max payload size to 128 bytes and clear error
2665 		 * status.
2666 		 */
2667 		pci_write_config(dev, 0xd8, 0xf5000, 4);
2668 	}
2669 
2670 	/* Reset some of the PCI state that got zapped by reset. */
2671 	pci_write_config(dev, BGE_PCI_MISC_CTL,
2672 	    BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2673 	    BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2674 	pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2675 	pci_write_config(dev, BGE_PCI_CMD, command, 4);
2676 	write_op(sc, BGE_MISC_CFG, (65 << 1));
2677 
2678 	/* Re-enable MSI, if neccesary, and enable the memory arbiter. */
2679 	if (BGE_IS_5714_FAMILY(sc)) {
2680 		uint32_t val;
2681 
2682 		/* This chip disables MSI on reset. */
2683 		if (sc->bge_flags & BGE_FLAG_MSI) {
2684 			val = pci_read_config(dev, BGE_PCI_MSI_CTL, 2);
2685 			pci_write_config(dev, BGE_PCI_MSI_CTL,
2686 			    val | PCIM_MSICTRL_MSI_ENABLE, 2);
2687 			val = CSR_READ_4(sc, BGE_MSI_MODE);
2688 			CSR_WRITE_4(sc, BGE_MSI_MODE,
2689 			    val | BGE_MSIMODE_ENABLE);
2690 		}
2691 		val = CSR_READ_4(sc, BGE_MARB_MODE);
2692 		CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
2693 	} else
2694 		CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2695 
2696 	/*
2697 	 * Poll until we see the 1's complement of the magic number.
2698 	 * This indicates that the firmware initialization
2699 	 * is complete.
2700 	 */
2701 	for (i = 0; i < BGE_TIMEOUT; i++) {
2702 		val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2703 		if (val == ~BGE_MAGIC_NUMBER)
2704 			break;
2705 		DELAY(10);
2706 	}
2707 
2708 	if (i == BGE_TIMEOUT) {
2709 		device_printf(sc->bge_dev, "firmware handshake timed out, "
2710 		    "found 0x%08x\n", val);
2711 	}
2712 
2713 	/*
2714 	 * XXX Wait for the value of the PCISTATE register to
2715 	 * return to its original pre-reset state. This is a
2716 	 * fairly good indicator of reset completion. If we don't
2717 	 * wait for the reset to fully complete, trying to read
2718 	 * from the device's non-PCI registers may yield garbage
2719 	 * results.
2720 	 */
2721 	for (i = 0; i < BGE_TIMEOUT; i++) {
2722 		if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
2723 			break;
2724 		DELAY(10);
2725 	}
2726 
2727 	if (sc->bge_flags & BGE_FLAG_PCIE) {
2728 		reset = bge_readmem_ind(sc, 0x7c00);
2729 		bge_writemem_ind(sc, 0x7c00, reset | (1 << 25));
2730 	}
2731 
2732 	/* Fix up byte swapping. */
2733 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
2734 	    BGE_MODECTL_BYTESWAP_DATA);
2735 
2736 	/* Tell the ASF firmware we are up */
2737 	if (sc->bge_asf_mode & ASF_STACKUP)
2738 		BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2739 
2740 	CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2741 
2742 	/*
2743 	 * The 5704 in TBI mode apparently needs some special
2744 	 * adjustment to insure the SERDES drive level is set
2745 	 * to 1.2V.
2746 	 */
2747 	if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
2748 	    sc->bge_flags & BGE_FLAG_TBI) {
2749 		uint32_t serdescfg;
2750 
2751 		serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2752 		serdescfg = (serdescfg & ~0xFFF) | 0x880;
2753 		CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2754 	}
2755 
2756 	/* XXX: Broadcom Linux driver. */
2757 	if (sc->bge_flags & BGE_FLAG_PCIE &&
2758 	    sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2759 		uint32_t v;
2760 
2761 		v = CSR_READ_4(sc, 0x7c00);
2762 		CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
2763 	}
2764 	DELAY(10000);
2765 
2766 	return(0);
2767 }
2768 
2769 /*
2770  * Frame reception handling. This is called if there's a frame
2771  * on the receive return list.
2772  *
2773  * Note: we have to be able to handle two possibilities here:
2774  * 1) the frame is from the jumbo receive ring
2775  * 2) the frame is from the standard receive ring
2776  */
2777 
2778 static void
2779 bge_rxeof(struct bge_softc *sc)
2780 {
2781 	struct ifnet *ifp;
2782 	int stdcnt = 0, jumbocnt = 0;
2783 
2784 	BGE_LOCK_ASSERT(sc);
2785 
2786 	/* Nothing to do. */
2787 	if (sc->bge_rx_saved_considx ==
2788 	    sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx)
2789 		return;
2790 
2791 	ifp = sc->bge_ifp;
2792 
2793 	bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
2794 	    sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
2795 	bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2796 	    sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD);
2797 	if (BGE_IS_JUMBO_CAPABLE(sc))
2798 		bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2799 		    sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTREAD);
2800 
2801 	while(sc->bge_rx_saved_considx !=
2802 	    sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) {
2803 		struct bge_rx_bd	*cur_rx;
2804 		uint32_t		rxidx;
2805 		struct mbuf		*m = NULL;
2806 		uint16_t		vlan_tag = 0;
2807 		int			have_tag = 0;
2808 
2809 #ifdef DEVICE_POLLING
2810 		if (ifp->if_capenable & IFCAP_POLLING) {
2811 			if (sc->rxcycles <= 0)
2812 				break;
2813 			sc->rxcycles--;
2814 		}
2815 #endif
2816 
2817 		cur_rx =
2818 	    &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
2819 
2820 		rxidx = cur_rx->bge_idx;
2821 		BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
2822 
2823 		if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2824 			have_tag = 1;
2825 			vlan_tag = cur_rx->bge_vlan_tag;
2826 		}
2827 
2828 		if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2829 			BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2830 			bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
2831 			    sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx],
2832 			    BUS_DMASYNC_POSTREAD);
2833 			bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
2834 			    sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx]);
2835 			m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
2836 			sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
2837 			jumbocnt++;
2838 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2839 				ifp->if_ierrors++;
2840 				bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2841 				continue;
2842 			}
2843 			if (bge_newbuf_jumbo(sc,
2844 			    sc->bge_jumbo, NULL) == ENOBUFS) {
2845 				ifp->if_ierrors++;
2846 				bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2847 				continue;
2848 			}
2849 		} else {
2850 			BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2851 			bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2852 			    sc->bge_cdata.bge_rx_std_dmamap[rxidx],
2853 			    BUS_DMASYNC_POSTREAD);
2854 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2855 			    sc->bge_cdata.bge_rx_std_dmamap[rxidx]);
2856 			m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2857 			sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2858 			stdcnt++;
2859 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2860 				ifp->if_ierrors++;
2861 				bge_newbuf_std(sc, sc->bge_std, m);
2862 				continue;
2863 			}
2864 			if (bge_newbuf_std(sc, sc->bge_std,
2865 			    NULL) == ENOBUFS) {
2866 				ifp->if_ierrors++;
2867 				bge_newbuf_std(sc, sc->bge_std, m);
2868 				continue;
2869 			}
2870 		}
2871 
2872 		ifp->if_ipackets++;
2873 #ifndef __NO_STRICT_ALIGNMENT
2874 		/*
2875 		 * For architectures with strict alignment we must make sure
2876 		 * the payload is aligned.
2877 		 */
2878 		if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
2879 			bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2880 			    cur_rx->bge_len);
2881 			m->m_data += ETHER_ALIGN;
2882 		}
2883 #endif
2884 		m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2885 		m->m_pkthdr.rcvif = ifp;
2886 
2887 		if (ifp->if_capenable & IFCAP_RXCSUM) {
2888 			if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
2889 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2890 				if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2891 					m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2892 			}
2893 			if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
2894 			    m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
2895 				m->m_pkthdr.csum_data =
2896 				    cur_rx->bge_tcp_udp_csum;
2897 				m->m_pkthdr.csum_flags |=
2898 				    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2899 			}
2900 		}
2901 
2902 		/*
2903 		 * If we received a packet with a vlan tag,
2904 		 * attach that information to the packet.
2905 		 */
2906 		if (have_tag) {
2907 			m->m_pkthdr.ether_vtag = vlan_tag;
2908 			m->m_flags |= M_VLANTAG;
2909 		}
2910 
2911 		BGE_UNLOCK(sc);
2912 		(*ifp->if_input)(ifp, m);
2913 		BGE_LOCK(sc);
2914 	}
2915 
2916 	if (stdcnt > 0)
2917 		bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2918 		    sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
2919 
2920 	if (BGE_IS_JUMBO_CAPABLE(sc) && jumbocnt > 0)
2921 		bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2922 		    sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
2923 
2924 	CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2925 	if (stdcnt)
2926 		CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2927 	if (jumbocnt)
2928 		CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2929 #ifdef notyet
2930 	/*
2931 	 * This register wraps very quickly under heavy packet drops.
2932 	 * If you need correct statistics, you can enable this check.
2933 	 */
2934 	if (BGE_IS_5705_PLUS(sc))
2935 		ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
2936 #endif
2937 }
2938 
2939 static void
2940 bge_txeof(struct bge_softc *sc)
2941 {
2942 	struct bge_tx_bd *cur_tx = NULL;
2943 	struct ifnet *ifp;
2944 
2945 	BGE_LOCK_ASSERT(sc);
2946 
2947 	/* Nothing to do. */
2948 	if (sc->bge_tx_saved_considx ==
2949 	    sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx)
2950 		return;
2951 
2952 	ifp = sc->bge_ifp;
2953 
2954 	bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
2955 	    sc->bge_cdata.bge_tx_ring_map,
2956 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2957 	/*
2958 	 * Go through our tx ring and free mbufs for those
2959 	 * frames that have been sent.
2960 	 */
2961 	while (sc->bge_tx_saved_considx !=
2962 	    sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) {
2963 		uint32_t		idx = 0;
2964 
2965 		idx = sc->bge_tx_saved_considx;
2966 		cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
2967 		if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2968 			ifp->if_opackets++;
2969 		if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
2970 			bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2971 			    sc->bge_cdata.bge_tx_dmamap[idx],
2972 			    BUS_DMASYNC_POSTWRITE);
2973 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2974 			    sc->bge_cdata.bge_tx_dmamap[idx]);
2975 			m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2976 			sc->bge_cdata.bge_tx_chain[idx] = NULL;
2977 		}
2978 		sc->bge_txcnt--;
2979 		BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2980 	}
2981 
2982 	if (cur_tx != NULL)
2983 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2984 	if (sc->bge_txcnt == 0)
2985 		sc->bge_timer = 0;
2986 }
2987 
2988 #ifdef DEVICE_POLLING
2989 static void
2990 bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2991 {
2992 	struct bge_softc *sc = ifp->if_softc;
2993 	uint32_t statusword;
2994 
2995 	BGE_LOCK(sc);
2996 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2997 		BGE_UNLOCK(sc);
2998 		return;
2999 	}
3000 
3001 	bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3002 	    sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD);
3003 
3004 	statusword = atomic_readandclear_32(
3005 	    &sc->bge_ldata.bge_status_block->bge_status);
3006 
3007 	bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3008 	    sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD);
3009 
3010 	/* Note link event. It will be processed by POLL_AND_CHECK_STATUS cmd */
3011 	if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
3012 		sc->bge_link_evt++;
3013 
3014 	if (cmd == POLL_AND_CHECK_STATUS)
3015 		if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3016 		    sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3017 		    sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI))
3018 			bge_link_upd(sc);
3019 
3020 	sc->rxcycles = count;
3021 	bge_rxeof(sc);
3022 	bge_txeof(sc);
3023 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3024 		bge_start_locked(ifp);
3025 
3026 	BGE_UNLOCK(sc);
3027 }
3028 #endif /* DEVICE_POLLING */
3029 
3030 static void
3031 bge_intr(void *xsc)
3032 {
3033 	struct bge_softc *sc;
3034 	struct ifnet *ifp;
3035 	uint32_t statusword;
3036 
3037 	sc = xsc;
3038 
3039 	BGE_LOCK(sc);
3040 
3041 	ifp = sc->bge_ifp;
3042 
3043 #ifdef DEVICE_POLLING
3044 	if (ifp->if_capenable & IFCAP_POLLING) {
3045 		BGE_UNLOCK(sc);
3046 		return;
3047 	}
3048 #endif
3049 
3050 	/*
3051 	 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO.  Don't
3052 	 * disable interrupts by writing nonzero like we used to, since with
3053 	 * our current organization this just gives complications and
3054 	 * pessimizations for re-enabling interrupts.  We used to have races
3055 	 * instead of the necessary complications.  Disabling interrupts
3056 	 * would just reduce the chance of a status update while we are
3057 	 * running (by switching to the interrupt-mode coalescence
3058 	 * parameters), but this chance is already very low so it is more
3059 	 * efficient to get another interrupt than prevent it.
3060 	 *
3061 	 * We do the ack first to ensure another interrupt if there is a
3062 	 * status update after the ack.  We don't check for the status
3063 	 * changing later because it is more efficient to get another
3064 	 * interrupt than prevent it, not quite as above (not checking is
3065 	 * a smaller optimization than not toggling the interrupt enable,
3066 	 * since checking doesn't involve PCI accesses and toggling require
3067 	 * the status check).  So toggling would probably be a pessimization
3068 	 * even with MSI.  It would only be needed for using a task queue.
3069 	 */
3070 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3071 
3072 	/*
3073 	 * Do the mandatory PCI flush as well as get the link status.
3074 	 */
3075 	statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED;
3076 
3077 	/* Make sure the descriptor ring indexes are coherent. */
3078 	bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3079 	    sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD);
3080 	bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
3081 	    sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD);
3082 
3083 	if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
3084 	    sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
3085 	    statusword || sc->bge_link_evt)
3086 		bge_link_upd(sc);
3087 
3088 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3089 		/* Check RX return ring producer/consumer. */
3090 		bge_rxeof(sc);
3091 
3092 		/* Check TX ring producer/consumer. */
3093 		bge_txeof(sc);
3094 	}
3095 
3096 	if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
3097 	    !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3098 		bge_start_locked(ifp);
3099 
3100 	BGE_UNLOCK(sc);
3101 }
3102 
3103 static void
3104 bge_asf_driver_up(struct bge_softc *sc)
3105 {
3106 	if (sc->bge_asf_mode & ASF_STACKUP) {
3107 		/* Send ASF heartbeat aprox. every 2s */
3108 		if (sc->bge_asf_count)
3109 			sc->bge_asf_count --;
3110 		else {
3111 			sc->bge_asf_count = 5;
3112 			bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW,
3113 			    BGE_FW_DRV_ALIVE);
3114 			bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_LEN, 4);
3115 			bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_DATA, 3);
3116 			CSR_WRITE_4(sc, BGE_CPU_EVENT,
3117 			    CSR_READ_4(sc, BGE_CPU_EVENT) != (1 << 14));
3118 		}
3119 	}
3120 }
3121 
3122 static void
3123 bge_tick(void *xsc)
3124 {
3125 	struct bge_softc *sc = xsc;
3126 	struct mii_data *mii = NULL;
3127 
3128 	BGE_LOCK_ASSERT(sc);
3129 
3130 	/* Synchronize with possible callout reset/stop. */
3131 	if (callout_pending(&sc->bge_stat_ch) ||
3132 	    !callout_active(&sc->bge_stat_ch))
3133 	    	return;
3134 
3135 	if (BGE_IS_5705_PLUS(sc))
3136 		bge_stats_update_regs(sc);
3137 	else
3138 		bge_stats_update(sc);
3139 
3140 	if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
3141 		mii = device_get_softc(sc->bge_miibus);
3142 		/* Don't mess with the PHY in IPMI/ASF mode */
3143 		if (!((sc->bge_asf_mode & ASF_STACKUP) && (sc->bge_link)))
3144 			mii_tick(mii);
3145 	} else {
3146 		/*
3147 		 * Since in TBI mode auto-polling can't be used we should poll
3148 		 * link status manually. Here we register pending link event
3149 		 * and trigger interrupt.
3150 		 */
3151 #ifdef DEVICE_POLLING
3152 		/* In polling mode we poll link state in bge_poll(). */
3153 		if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING))
3154 #endif
3155 		{
3156 		sc->bge_link_evt++;
3157 		BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3158 		}
3159 	}
3160 
3161 	bge_asf_driver_up(sc);
3162 	bge_watchdog(sc);
3163 
3164 	callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
3165 }
3166 
3167 static void
3168 bge_stats_update_regs(struct bge_softc *sc)
3169 {
3170 	struct ifnet *ifp;
3171 
3172 	ifp = sc->bge_ifp;
3173 
3174 	ifp->if_collisions += CSR_READ_4(sc, BGE_MAC_STATS +
3175 	    offsetof(struct bge_mac_stats_regs, etherStatsCollisions));
3176 
3177 	ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3178 }
3179 
3180 static void
3181 bge_stats_update(struct bge_softc *sc)
3182 {
3183 	struct ifnet *ifp;
3184 	bus_size_t stats;
3185 	uint32_t cnt;	/* current register value */
3186 
3187 	ifp = sc->bge_ifp;
3188 
3189 	stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
3190 
3191 #define	READ_STAT(sc, stats, stat) \
3192 	CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
3193 
3194 	cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo);
3195 	ifp->if_collisions += (uint32_t)(cnt - sc->bge_tx_collisions);
3196 	sc->bge_tx_collisions = cnt;
3197 
3198 	cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
3199 	ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_discards);
3200 	sc->bge_rx_discards = cnt;
3201 
3202 	cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
3203 	ifp->if_oerrors += (uint32_t)(cnt - sc->bge_tx_discards);
3204 	sc->bge_tx_discards = cnt;
3205 
3206 #undef	READ_STAT
3207 }
3208 
3209 /*
3210  * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
3211  * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
3212  * but when such padded frames employ the bge IP/TCP checksum offload,
3213  * the hardware checksum assist gives incorrect results (possibly
3214  * from incorporating its own padding into the UDP/TCP checksum; who knows).
3215  * If we pad such runts with zeros, the onboard checksum comes out correct.
3216  */
3217 static __inline int
3218 bge_cksum_pad(struct mbuf *m)
3219 {
3220 	int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
3221 	struct mbuf *last;
3222 
3223 	/* If there's only the packet-header and we can pad there, use it. */
3224 	if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
3225 	    M_TRAILINGSPACE(m) >= padlen) {
3226 		last = m;
3227 	} else {
3228 		/*
3229 		 * Walk packet chain to find last mbuf. We will either
3230 		 * pad there, or append a new mbuf and pad it.
3231 		 */
3232 		for (last = m; last->m_next != NULL; last = last->m_next);
3233 		if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
3234 			/* Allocate new empty mbuf, pad it. Compact later. */
3235 			struct mbuf *n;
3236 
3237 			MGET(n, M_DONTWAIT, MT_DATA);
3238 			if (n == NULL)
3239 				return (ENOBUFS);
3240 			n->m_len = 0;
3241 			last->m_next = n;
3242 			last = n;
3243 		}
3244 	}
3245 
3246 	/* Now zero the pad area, to avoid the bge cksum-assist bug. */
3247 	memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
3248 	last->m_len += padlen;
3249 	m->m_pkthdr.len += padlen;
3250 
3251 	return (0);
3252 }
3253 
3254 /*
3255  * Encapsulate an mbuf chain in the tx ring  by coupling the mbuf data
3256  * pointers to descriptors.
3257  */
3258 static int
3259 bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
3260 {
3261 	bus_dma_segment_t	segs[BGE_NSEG_NEW];
3262 	bus_dmamap_t		map;
3263 	struct bge_tx_bd	*d;
3264 	struct mbuf		*m = *m_head;
3265 	uint32_t		idx = *txidx;
3266 	uint16_t		csum_flags;
3267 	int			nsegs, i, error;
3268 
3269 	csum_flags = 0;
3270 	if (m->m_pkthdr.csum_flags) {
3271 		if (m->m_pkthdr.csum_flags & CSUM_IP)
3272 			csum_flags |= BGE_TXBDFLAG_IP_CSUM;
3273 		if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
3274 			csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
3275 			if (m->m_pkthdr.len < ETHER_MIN_NOPAD &&
3276 			    (error = bge_cksum_pad(m)) != 0) {
3277 				m_freem(m);
3278 				*m_head = NULL;
3279 				return (error);
3280 			}
3281 		}
3282 		if (m->m_flags & M_LASTFRAG)
3283 			csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
3284 		else if (m->m_flags & M_FRAG)
3285 			csum_flags |= BGE_TXBDFLAG_IP_FRAG;
3286 	}
3287 
3288 	map = sc->bge_cdata.bge_tx_dmamap[idx];
3289 	error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag, map, m, segs,
3290 	    &nsegs, BUS_DMA_NOWAIT);
3291 	if (error == EFBIG) {
3292 		m = m_defrag(m, M_DONTWAIT);
3293 		if (m == NULL) {
3294 			m_freem(*m_head);
3295 			*m_head = NULL;
3296 			return (ENOBUFS);
3297 		}
3298 		*m_head = m;
3299 		error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag, map, m,
3300 		    segs, &nsegs, BUS_DMA_NOWAIT);
3301 		if (error) {
3302 			m_freem(m);
3303 			*m_head = NULL;
3304 			return (error);
3305 		}
3306 	} else if (error != 0)
3307 		return (error);
3308 
3309 	/*
3310 	 * Sanity check: avoid coming within 16 descriptors
3311 	 * of the end of the ring.
3312 	 */
3313 	if (nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) {
3314 		bus_dmamap_unload(sc->bge_cdata.bge_mtag, map);
3315 		return (ENOBUFS);
3316 	}
3317 
3318 	bus_dmamap_sync(sc->bge_cdata.bge_mtag, map, BUS_DMASYNC_PREWRITE);
3319 
3320 	for (i = 0; ; i++) {
3321 		d = &sc->bge_ldata.bge_tx_ring[idx];
3322 		d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
3323 		d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
3324 		d->bge_len = segs[i].ds_len;
3325 		d->bge_flags = csum_flags;
3326 		if (i == nsegs - 1)
3327 			break;
3328 		BGE_INC(idx, BGE_TX_RING_CNT);
3329 	}
3330 
3331 	/* Mark the last segment as end of packet... */
3332 	d->bge_flags |= BGE_TXBDFLAG_END;
3333 
3334 	/* ... and put VLAN tag into first segment.  */
3335 	d = &sc->bge_ldata.bge_tx_ring[*txidx];
3336 	if (m->m_flags & M_VLANTAG) {
3337 		d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
3338 		d->bge_vlan_tag = m->m_pkthdr.ether_vtag;
3339 	} else
3340 		d->bge_vlan_tag = 0;
3341 
3342 	/*
3343 	 * Insure that the map for this transmission
3344 	 * is placed at the array index of the last descriptor
3345 	 * in this chain.
3346 	 */
3347 	sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
3348 	sc->bge_cdata.bge_tx_dmamap[idx] = map;
3349 	sc->bge_cdata.bge_tx_chain[idx] = m;
3350 	sc->bge_txcnt += nsegs;
3351 
3352 	BGE_INC(idx, BGE_TX_RING_CNT);
3353 	*txidx = idx;
3354 
3355 	return (0);
3356 }
3357 
3358 /*
3359  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3360  * to the mbuf data regions directly in the transmit descriptors.
3361  */
3362 static void
3363 bge_start_locked(struct ifnet *ifp)
3364 {
3365 	struct bge_softc *sc;
3366 	struct mbuf *m_head = NULL;
3367 	uint32_t prodidx;
3368 	int count = 0;
3369 
3370 	sc = ifp->if_softc;
3371 
3372 	if (!sc->bge_link || IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3373 		return;
3374 
3375 	prodidx = sc->bge_tx_prodidx;
3376 
3377 	while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
3378 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
3379 		if (m_head == NULL)
3380 			break;
3381 
3382 		/*
3383 		 * XXX
3384 		 * The code inside the if() block is never reached since we
3385 		 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
3386 		 * requests to checksum TCP/UDP in a fragmented packet.
3387 		 *
3388 		 * XXX
3389 		 * safety overkill.  If this is a fragmented packet chain
3390 		 * with delayed TCP/UDP checksums, then only encapsulate
3391 		 * it if we have enough descriptors to handle the entire
3392 		 * chain at once.
3393 		 * (paranoia -- may not actually be needed)
3394 		 */
3395 		if (m_head->m_flags & M_FIRSTFRAG &&
3396 		    m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
3397 			if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
3398 			    m_head->m_pkthdr.csum_data + 16) {
3399 				IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3400 				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3401 				break;
3402 			}
3403 		}
3404 
3405 		/*
3406 		 * Pack the data into the transmit ring. If we
3407 		 * don't have room, set the OACTIVE flag and wait
3408 		 * for the NIC to drain the ring.
3409 		 */
3410 		if (bge_encap(sc, &m_head, &prodidx)) {
3411 			if (m_head == NULL)
3412 				break;
3413 			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3414 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3415 			break;
3416 		}
3417 		++count;
3418 
3419 		/*
3420 		 * If there's a BPF listener, bounce a copy of this frame
3421 		 * to him.
3422 		 */
3423 		ETHER_BPF_MTAP(ifp, m_head);
3424 	}
3425 
3426 	if (count == 0)
3427 		/* No packets were dequeued. */
3428 		return;
3429 
3430 	/* Transmit. */
3431 	CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3432 	/* 5700 b2 errata */
3433 	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
3434 		CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3435 
3436 	sc->bge_tx_prodidx = prodidx;
3437 
3438 	/*
3439 	 * Set a timeout in case the chip goes out to lunch.
3440 	 */
3441 	sc->bge_timer = 5;
3442 }
3443 
3444 /*
3445  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3446  * to the mbuf data regions directly in the transmit descriptors.
3447  */
3448 static void
3449 bge_start(struct ifnet *ifp)
3450 {
3451 	struct bge_softc *sc;
3452 
3453 	sc = ifp->if_softc;
3454 	BGE_LOCK(sc);
3455 	bge_start_locked(ifp);
3456 	BGE_UNLOCK(sc);
3457 }
3458 
3459 static void
3460 bge_init_locked(struct bge_softc *sc)
3461 {
3462 	struct ifnet *ifp;
3463 	uint16_t *m;
3464 
3465 	BGE_LOCK_ASSERT(sc);
3466 
3467 	ifp = sc->bge_ifp;
3468 
3469 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3470 		return;
3471 
3472 	/* Cancel pending I/O and flush buffers. */
3473 	bge_stop(sc);
3474 
3475 	bge_stop_fw(sc);
3476 	bge_sig_pre_reset(sc, BGE_RESET_START);
3477 	bge_reset(sc);
3478 	bge_sig_legacy(sc, BGE_RESET_START);
3479 	bge_sig_post_reset(sc, BGE_RESET_START);
3480 
3481 	bge_chipinit(sc);
3482 
3483 	/*
3484 	 * Init the various state machines, ring
3485 	 * control blocks and firmware.
3486 	 */
3487 	if (bge_blockinit(sc)) {
3488 		device_printf(sc->bge_dev, "initialization failure\n");
3489 		return;
3490 	}
3491 
3492 	ifp = sc->bge_ifp;
3493 
3494 	/* Specify MTU. */
3495 	CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
3496 	    ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN);
3497 
3498 	/* Load our MAC address. */
3499 	m = (uint16_t *)IF_LLADDR(sc->bge_ifp);
3500 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3501 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3502 
3503 	/* Program promiscuous mode. */
3504 	bge_setpromisc(sc);
3505 
3506 	/* Program multicast filter. */
3507 	bge_setmulti(sc);
3508 
3509 	/* Init RX ring. */
3510 	bge_init_rx_ring_std(sc);
3511 
3512 	/*
3513 	 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
3514 	 * memory to insure that the chip has in fact read the first
3515 	 * entry of the ring.
3516 	 */
3517 	if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
3518 		uint32_t		v, i;
3519 		for (i = 0; i < 10; i++) {
3520 			DELAY(20);
3521 			v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
3522 			if (v == (MCLBYTES - ETHER_ALIGN))
3523 				break;
3524 		}
3525 		if (i == 10)
3526 			device_printf (sc->bge_dev,
3527 			    "5705 A0 chip failed to load RX ring\n");
3528 	}
3529 
3530 	/* Init jumbo RX ring. */
3531 	if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
3532 		bge_init_rx_ring_jumbo(sc);
3533 
3534 	/* Init our RX return ring index. */
3535 	sc->bge_rx_saved_considx = 0;
3536 
3537 	/* Init our RX/TX stat counters. */
3538 	sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0;
3539 
3540 	/* Init TX ring. */
3541 	bge_init_tx_ring(sc);
3542 
3543 	/* Turn on transmitter. */
3544 	BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
3545 
3546 	/* Turn on receiver. */
3547 	BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3548 
3549 	/* Tell firmware we're alive. */
3550 	BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3551 
3552 #ifdef DEVICE_POLLING
3553 	/* Disable interrupts if we are polling. */
3554 	if (ifp->if_capenable & IFCAP_POLLING) {
3555 		BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
3556 		    BGE_PCIMISCCTL_MASK_PCI_INTR);
3557 		CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3558 	} else
3559 #endif
3560 
3561 	/* Enable host interrupts. */
3562 	{
3563 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
3564 	BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3565 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3566 	}
3567 
3568 	bge_ifmedia_upd_locked(ifp);
3569 
3570 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3571 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3572 
3573 	callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
3574 }
3575 
3576 static void
3577 bge_init(void *xsc)
3578 {
3579 	struct bge_softc *sc = xsc;
3580 
3581 	BGE_LOCK(sc);
3582 	bge_init_locked(sc);
3583 	BGE_UNLOCK(sc);
3584 }
3585 
3586 /*
3587  * Set media options.
3588  */
3589 static int
3590 bge_ifmedia_upd(struct ifnet *ifp)
3591 {
3592 	struct bge_softc *sc = ifp->if_softc;
3593 	int res;
3594 
3595 	BGE_LOCK(sc);
3596 	res = bge_ifmedia_upd_locked(ifp);
3597 	BGE_UNLOCK(sc);
3598 
3599 	return (res);
3600 }
3601 
3602 static int
3603 bge_ifmedia_upd_locked(struct ifnet *ifp)
3604 {
3605 	struct bge_softc *sc = ifp->if_softc;
3606 	struct mii_data *mii;
3607 	struct ifmedia *ifm;
3608 
3609 	BGE_LOCK_ASSERT(sc);
3610 
3611 	ifm = &sc->bge_ifmedia;
3612 
3613 	/* If this is a 1000baseX NIC, enable the TBI port. */
3614 	if (sc->bge_flags & BGE_FLAG_TBI) {
3615 		if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3616 			return (EINVAL);
3617 		switch(IFM_SUBTYPE(ifm->ifm_media)) {
3618 		case IFM_AUTO:
3619 			/*
3620 			 * The BCM5704 ASIC appears to have a special
3621 			 * mechanism for programming the autoneg
3622 			 * advertisement registers in TBI mode.
3623 			 */
3624 			if (bge_fake_autoneg == 0 &&
3625 			    sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3626 				uint32_t sgdig;
3627 				CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
3628 				sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
3629 				sgdig |= BGE_SGDIGCFG_AUTO|
3630 				    BGE_SGDIGCFG_PAUSE_CAP|
3631 				    BGE_SGDIGCFG_ASYM_PAUSE;
3632 				CSR_WRITE_4(sc, BGE_SGDIG_CFG,
3633 				    sgdig|BGE_SGDIGCFG_SEND);
3634 				DELAY(5);
3635 				CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
3636 			}
3637 			break;
3638 		case IFM_1000_SX:
3639 			if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3640 				BGE_CLRBIT(sc, BGE_MAC_MODE,
3641 				    BGE_MACMODE_HALF_DUPLEX);
3642 			} else {
3643 				BGE_SETBIT(sc, BGE_MAC_MODE,
3644 				    BGE_MACMODE_HALF_DUPLEX);
3645 			}
3646 			break;
3647 		default:
3648 			return (EINVAL);
3649 		}
3650 		return (0);
3651 	}
3652 
3653 	sc->bge_link_evt++;
3654 	mii = device_get_softc(sc->bge_miibus);
3655 	if (mii->mii_instance) {
3656 		struct mii_softc *miisc;
3657 		for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
3658 		    miisc = LIST_NEXT(miisc, mii_list))
3659 			mii_phy_reset(miisc);
3660 	}
3661 	mii_mediachg(mii);
3662 
3663 	return (0);
3664 }
3665 
3666 /*
3667  * Report current media status.
3668  */
3669 static void
3670 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3671 {
3672 	struct bge_softc *sc = ifp->if_softc;
3673 	struct mii_data *mii;
3674 
3675 	BGE_LOCK(sc);
3676 
3677 	if (sc->bge_flags & BGE_FLAG_TBI) {
3678 		ifmr->ifm_status = IFM_AVALID;
3679 		ifmr->ifm_active = IFM_ETHER;
3680 		if (CSR_READ_4(sc, BGE_MAC_STS) &
3681 		    BGE_MACSTAT_TBI_PCS_SYNCHED)
3682 			ifmr->ifm_status |= IFM_ACTIVE;
3683 		else {
3684 			ifmr->ifm_active |= IFM_NONE;
3685 			BGE_UNLOCK(sc);
3686 			return;
3687 		}
3688 		ifmr->ifm_active |= IFM_1000_SX;
3689 		if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3690 			ifmr->ifm_active |= IFM_HDX;
3691 		else
3692 			ifmr->ifm_active |= IFM_FDX;
3693 		BGE_UNLOCK(sc);
3694 		return;
3695 	}
3696 
3697 	mii = device_get_softc(sc->bge_miibus);
3698 	mii_pollstat(mii);
3699 	ifmr->ifm_active = mii->mii_media_active;
3700 	ifmr->ifm_status = mii->mii_media_status;
3701 
3702 	BGE_UNLOCK(sc);
3703 }
3704 
3705 static int
3706 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
3707 {
3708 	struct bge_softc *sc = ifp->if_softc;
3709 	struct ifreq *ifr = (struct ifreq *) data;
3710 	struct mii_data *mii;
3711 	int flags, mask, error = 0;
3712 
3713 	switch (command) {
3714 	case SIOCSIFMTU:
3715 		if (ifr->ifr_mtu < ETHERMIN ||
3716 		    ((BGE_IS_JUMBO_CAPABLE(sc)) &&
3717 		    ifr->ifr_mtu > BGE_JUMBO_MTU) ||
3718 		    ((!BGE_IS_JUMBO_CAPABLE(sc)) &&
3719 		    ifr->ifr_mtu > ETHERMTU))
3720 			error = EINVAL;
3721 		else if (ifp->if_mtu != ifr->ifr_mtu) {
3722 			ifp->if_mtu = ifr->ifr_mtu;
3723 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3724 			bge_init(sc);
3725 		}
3726 		break;
3727 	case SIOCSIFFLAGS:
3728 		BGE_LOCK(sc);
3729 		if (ifp->if_flags & IFF_UP) {
3730 			/*
3731 			 * If only the state of the PROMISC flag changed,
3732 			 * then just use the 'set promisc mode' command
3733 			 * instead of reinitializing the entire NIC. Doing
3734 			 * a full re-init means reloading the firmware and
3735 			 * waiting for it to start up, which may take a
3736 			 * second or two.  Similarly for ALLMULTI.
3737 			 */
3738 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3739 				flags = ifp->if_flags ^ sc->bge_if_flags;
3740 				if (flags & IFF_PROMISC)
3741 					bge_setpromisc(sc);
3742 				if (flags & IFF_ALLMULTI)
3743 					bge_setmulti(sc);
3744 			} else
3745 				bge_init_locked(sc);
3746 		} else {
3747 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3748 				bge_stop(sc);
3749 			}
3750 		}
3751 		sc->bge_if_flags = ifp->if_flags;
3752 		BGE_UNLOCK(sc);
3753 		error = 0;
3754 		break;
3755 	case SIOCADDMULTI:
3756 	case SIOCDELMULTI:
3757 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3758 			BGE_LOCK(sc);
3759 			bge_setmulti(sc);
3760 			BGE_UNLOCK(sc);
3761 			error = 0;
3762 		}
3763 		break;
3764 	case SIOCSIFMEDIA:
3765 	case SIOCGIFMEDIA:
3766 		if (sc->bge_flags & BGE_FLAG_TBI) {
3767 			error = ifmedia_ioctl(ifp, ifr,
3768 			    &sc->bge_ifmedia, command);
3769 		} else {
3770 			mii = device_get_softc(sc->bge_miibus);
3771 			error = ifmedia_ioctl(ifp, ifr,
3772 			    &mii->mii_media, command);
3773 		}
3774 		break;
3775 	case SIOCSIFCAP:
3776 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3777 #ifdef DEVICE_POLLING
3778 		if (mask & IFCAP_POLLING) {
3779 			if (ifr->ifr_reqcap & IFCAP_POLLING) {
3780 				error = ether_poll_register(bge_poll, ifp);
3781 				if (error)
3782 					return (error);
3783 				BGE_LOCK(sc);
3784 				BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
3785 				    BGE_PCIMISCCTL_MASK_PCI_INTR);
3786 				CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3787 				ifp->if_capenable |= IFCAP_POLLING;
3788 				BGE_UNLOCK(sc);
3789 			} else {
3790 				error = ether_poll_deregister(ifp);
3791 				/* Enable interrupt even in error case */
3792 				BGE_LOCK(sc);
3793 				BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
3794 				    BGE_PCIMISCCTL_MASK_PCI_INTR);
3795 				CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3796 				ifp->if_capenable &= ~IFCAP_POLLING;
3797 				BGE_UNLOCK(sc);
3798 			}
3799 		}
3800 #endif
3801 		if (mask & IFCAP_HWCSUM) {
3802 			ifp->if_capenable ^= IFCAP_HWCSUM;
3803 			if (IFCAP_HWCSUM & ifp->if_capenable &&
3804 			    IFCAP_HWCSUM & ifp->if_capabilities)
3805 				ifp->if_hwassist = BGE_CSUM_FEATURES;
3806 			else
3807 				ifp->if_hwassist = 0;
3808 			VLAN_CAPABILITIES(ifp);
3809 		}
3810 		break;
3811 	default:
3812 		error = ether_ioctl(ifp, command, data);
3813 		break;
3814 	}
3815 
3816 	return (error);
3817 }
3818 
3819 static void
3820 bge_watchdog(struct bge_softc *sc)
3821 {
3822 	struct ifnet *ifp;
3823 
3824 	BGE_LOCK_ASSERT(sc);
3825 
3826 	if (sc->bge_timer == 0 || --sc->bge_timer)
3827 		return;
3828 
3829 	ifp = sc->bge_ifp;
3830 
3831 	if_printf(ifp, "watchdog timeout -- resetting\n");
3832 
3833 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3834 	bge_init_locked(sc);
3835 
3836 	ifp->if_oerrors++;
3837 }
3838 
3839 /*
3840  * Stop the adapter and free any mbufs allocated to the
3841  * RX and TX lists.
3842  */
3843 static void
3844 bge_stop(struct bge_softc *sc)
3845 {
3846 	struct ifnet *ifp;
3847 	struct ifmedia_entry *ifm;
3848 	struct mii_data *mii = NULL;
3849 	int mtmp, itmp;
3850 
3851 	BGE_LOCK_ASSERT(sc);
3852 
3853 	ifp = sc->bge_ifp;
3854 
3855 	if ((sc->bge_flags & BGE_FLAG_TBI) == 0)
3856 		mii = device_get_softc(sc->bge_miibus);
3857 
3858 	callout_stop(&sc->bge_stat_ch);
3859 
3860 	/*
3861 	 * Disable all of the receiver blocks.
3862 	 */
3863 	BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3864 	BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3865 	BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3866 	if (!(BGE_IS_5705_PLUS(sc)))
3867 		BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
3868 	BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3869 	BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3870 	BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3871 
3872 	/*
3873 	 * Disable all of the transmit blocks.
3874 	 */
3875 	BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3876 	BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3877 	BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3878 	BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3879 	BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3880 	if (!(BGE_IS_5705_PLUS(sc)))
3881 		BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
3882 	BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3883 
3884 	/*
3885 	 * Shut down all of the memory managers and related
3886 	 * state machines.
3887 	 */
3888 	BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3889 	BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3890 	if (!(BGE_IS_5705_PLUS(sc)))
3891 		BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
3892 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3893 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3894 	if (!(BGE_IS_5705_PLUS(sc))) {
3895 		BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
3896 		BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3897 	}
3898 
3899 	/* Disable host interrupts. */
3900 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3901 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3902 
3903 	/*
3904 	 * Tell firmware we're shutting down.
3905 	 */
3906 
3907 	bge_stop_fw(sc);
3908 	bge_sig_pre_reset(sc, BGE_RESET_STOP);
3909 	bge_reset(sc);
3910 	bge_sig_legacy(sc, BGE_RESET_STOP);
3911 	bge_sig_post_reset(sc, BGE_RESET_STOP);
3912 
3913 	/*
3914 	 * Keep the ASF firmware running if up.
3915 	 */
3916 	if (sc->bge_asf_mode & ASF_STACKUP)
3917 		BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3918 	else
3919 		BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3920 
3921 	/* Free the RX lists. */
3922 	bge_free_rx_ring_std(sc);
3923 
3924 	/* Free jumbo RX list. */
3925 	if (BGE_IS_JUMBO_CAPABLE(sc))
3926 		bge_free_rx_ring_jumbo(sc);
3927 
3928 	/* Free TX buffers. */
3929 	bge_free_tx_ring(sc);
3930 
3931 	/*
3932 	 * Isolate/power down the PHY, but leave the media selection
3933 	 * unchanged so that things will be put back to normal when
3934 	 * we bring the interface back up.
3935 	 */
3936 	if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
3937 		itmp = ifp->if_flags;
3938 		ifp->if_flags |= IFF_UP;
3939 		/*
3940 		 * If we are called from bge_detach(), mii is already NULL.
3941 		 */
3942 		if (mii != NULL) {
3943 			ifm = mii->mii_media.ifm_cur;
3944 			mtmp = ifm->ifm_media;
3945 			ifm->ifm_media = IFM_ETHER|IFM_NONE;
3946 			mii_mediachg(mii);
3947 			ifm->ifm_media = mtmp;
3948 		}
3949 		ifp->if_flags = itmp;
3950 	}
3951 
3952 	sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
3953 
3954 	/* Clear MAC's link state (PHY may still have link UP). */
3955 	if (bootverbose && sc->bge_link)
3956 		if_printf(sc->bge_ifp, "link DOWN\n");
3957 	sc->bge_link = 0;
3958 
3959 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3960 }
3961 
3962 /*
3963  * Stop all chip I/O so that the kernel's probe routines don't
3964  * get confused by errant DMAs when rebooting.
3965  */
3966 static void
3967 bge_shutdown(device_t dev)
3968 {
3969 	struct bge_softc *sc;
3970 
3971 	sc = device_get_softc(dev);
3972 
3973 	BGE_LOCK(sc);
3974 	bge_stop(sc);
3975 	bge_reset(sc);
3976 	BGE_UNLOCK(sc);
3977 }
3978 
3979 static int
3980 bge_suspend(device_t dev)
3981 {
3982 	struct bge_softc *sc;
3983 
3984 	sc = device_get_softc(dev);
3985 	BGE_LOCK(sc);
3986 	bge_stop(sc);
3987 	BGE_UNLOCK(sc);
3988 
3989 	return (0);
3990 }
3991 
3992 static int
3993 bge_resume(device_t dev)
3994 {
3995 	struct bge_softc *sc;
3996 	struct ifnet *ifp;
3997 
3998 	sc = device_get_softc(dev);
3999 	BGE_LOCK(sc);
4000 	ifp = sc->bge_ifp;
4001 	if (ifp->if_flags & IFF_UP) {
4002 		bge_init_locked(sc);
4003 		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4004 			bge_start_locked(ifp);
4005 	}
4006 	BGE_UNLOCK(sc);
4007 
4008 	return (0);
4009 }
4010 
4011 static void
4012 bge_link_upd(struct bge_softc *sc)
4013 {
4014 	struct mii_data *mii;
4015 	uint32_t link, status;
4016 
4017 	BGE_LOCK_ASSERT(sc);
4018 
4019 	/* Clear 'pending link event' flag. */
4020 	sc->bge_link_evt = 0;
4021 
4022 	/*
4023 	 * Process link state changes.
4024 	 * Grrr. The link status word in the status block does
4025 	 * not work correctly on the BCM5700 rev AX and BX chips,
4026 	 * according to all available information. Hence, we have
4027 	 * to enable MII interrupts in order to properly obtain
4028 	 * async link changes. Unfortunately, this also means that
4029 	 * we have to read the MAC status register to detect link
4030 	 * changes, thereby adding an additional register access to
4031 	 * the interrupt handler.
4032 	 *
4033 	 * XXX: perhaps link state detection procedure used for
4034 	 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
4035 	 */
4036 
4037 	if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
4038 	    sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
4039 		status = CSR_READ_4(sc, BGE_MAC_STS);
4040 		if (status & BGE_MACSTAT_MI_INTERRUPT) {
4041 			mii = device_get_softc(sc->bge_miibus);
4042 			mii_pollstat(mii);
4043 			if (!sc->bge_link &&
4044 			    mii->mii_media_status & IFM_ACTIVE &&
4045 			    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
4046 				sc->bge_link++;
4047 				if (bootverbose)
4048 					if_printf(sc->bge_ifp, "link UP\n");
4049 			} else if (sc->bge_link &&
4050 			    (!(mii->mii_media_status & IFM_ACTIVE) ||
4051 			    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
4052 				sc->bge_link = 0;
4053 				if (bootverbose)
4054 					if_printf(sc->bge_ifp, "link DOWN\n");
4055 			}
4056 
4057 			/* Clear the interrupt. */
4058 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
4059 			    BGE_EVTENB_MI_INTERRUPT);
4060 			bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
4061 			bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
4062 			    BRGPHY_INTRS);
4063 		}
4064 		return;
4065 	}
4066 
4067 	if (sc->bge_flags & BGE_FLAG_TBI) {
4068 		status = CSR_READ_4(sc, BGE_MAC_STS);
4069 		if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
4070 			if (!sc->bge_link) {
4071 				sc->bge_link++;
4072 				if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
4073 					BGE_CLRBIT(sc, BGE_MAC_MODE,
4074 					    BGE_MACMODE_TBI_SEND_CFGS);
4075 				CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
4076 				if (bootverbose)
4077 					if_printf(sc->bge_ifp, "link UP\n");
4078 				if_link_state_change(sc->bge_ifp,
4079 				    LINK_STATE_UP);
4080 			}
4081 		} else if (sc->bge_link) {
4082 			sc->bge_link = 0;
4083 			if (bootverbose)
4084 				if_printf(sc->bge_ifp, "link DOWN\n");
4085 			if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN);
4086 		}
4087 	/* Discard link events for MII/GMII cards if MI auto-polling disabled */
4088 	} else if (CSR_READ_4(sc, BGE_MI_MODE) & BGE_MIMODE_AUTOPOLL) {
4089 		/*
4090 		 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
4091 		 * in status word always set. Workaround this bug by reading
4092 		 * PHY link status directly.
4093 		 */
4094 		link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
4095 
4096 		if (link != sc->bge_link ||
4097 		    sc->bge_asicrev == BGE_ASICREV_BCM5700) {
4098 			mii = device_get_softc(sc->bge_miibus);
4099 			mii_pollstat(mii);
4100 			if (!sc->bge_link &&
4101 			    mii->mii_media_status & IFM_ACTIVE &&
4102 			    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
4103 				sc->bge_link++;
4104 				if (bootverbose)
4105 					if_printf(sc->bge_ifp, "link UP\n");
4106 			} else if (sc->bge_link &&
4107 			    (!(mii->mii_media_status & IFM_ACTIVE) ||
4108 			    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
4109 				sc->bge_link = 0;
4110 				if (bootverbose)
4111 					if_printf(sc->bge_ifp, "link DOWN\n");
4112 			}
4113 		}
4114 	}
4115 
4116 	/* Clear the attention. */
4117 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
4118 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
4119 	    BGE_MACSTAT_LINK_CHANGED);
4120 }
4121 
4122 static void
4123 bge_add_sysctls(struct bge_softc *sc)
4124 {
4125 	struct sysctl_ctx_list *ctx;
4126 	struct sysctl_oid_list *children;
4127 
4128 	ctx = device_get_sysctl_ctx(sc->bge_dev);
4129 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev));
4130 
4131 #ifdef BGE_REGISTER_DEBUG
4132 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info",
4133 	    CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_debug_info, "I",
4134 	    "Debug Information");
4135 
4136 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read",
4137 	    CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_reg_read, "I",
4138 	    "Register Read");
4139 
4140 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read",
4141 	    CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_mem_read, "I",
4142 	    "Memory Read");
4143 
4144 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "stat_IfHcInOctets",
4145 	    CTLFLAG_RD,
4146 	    &sc->bge_ldata.bge_stats->rxstats.ifHCInOctets.bge_addr_lo,
4147 	    "Bytes received");
4148 
4149 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "stat_IfHcOutOctets",
4150 	    CTLFLAG_RD,
4151 	    &sc->bge_ldata.bge_stats->txstats.ifHCOutOctets.bge_addr_lo,
4152 	    "Bytes received");
4153 #endif
4154 }
4155 
4156 #ifdef BGE_REGISTER_DEBUG
4157 static int
4158 bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
4159 {
4160 	struct bge_softc *sc;
4161 	uint16_t *sbdata;
4162 	int error;
4163 	int result;
4164 	int i, j;
4165 
4166 	result = -1;
4167 	error = sysctl_handle_int(oidp, &result, 0, req);
4168 	if (error || (req->newptr == NULL))
4169 		return (error);
4170 
4171 	if (result == 1) {
4172 		sc = (struct bge_softc *)arg1;
4173 
4174 		sbdata = (uint16_t *)sc->bge_ldata.bge_status_block;
4175 		printf("Status Block:\n");
4176 		for (i = 0x0; i < (BGE_STATUS_BLK_SZ / 4); ) {
4177 			printf("%06x:", i);
4178 			for (j = 0; j < 8; j++) {
4179 				printf(" %04x", sbdata[i]);
4180 				i += 4;
4181 			}
4182 			printf("\n");
4183 		}
4184 
4185 		printf("Registers:\n");
4186 		for (i = 0x800; i < 0xa00; ) {
4187 			printf("%06x:", i);
4188 			for (j = 0; j < 8; j++) {
4189 				printf(" %08x", CSR_READ_4(sc, i));
4190 				i += 4;
4191 			}
4192 			printf("\n");
4193 		}
4194 
4195 		printf("Hardware Flags:\n");
4196 		if (BGE_IS_575X_PLUS(sc))
4197 			printf(" - 575X Plus\n");
4198 		if (BGE_IS_5705_PLUS(sc))
4199 			printf(" - 5705 Plus\n");
4200 		if (BGE_IS_5714_FAMILY(sc))
4201 			printf(" - 5714 Family\n");
4202 		if (BGE_IS_5700_FAMILY(sc))
4203 			printf(" - 5700 Family\n");
4204 		if (sc->bge_flags & BGE_FLAG_JUMBO)
4205 			printf(" - Supports Jumbo Frames\n");
4206 		if (sc->bge_flags & BGE_FLAG_PCIX)
4207 			printf(" - PCI-X Bus\n");
4208 		if (sc->bge_flags & BGE_FLAG_PCIE)
4209 			printf(" - PCI Express Bus\n");
4210 		if (sc->bge_flags & BGE_FLAG_NO_3LED)
4211 			printf(" - No 3 LEDs\n");
4212 		if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG)
4213 			printf(" - RX Alignment Bug\n");
4214 	}
4215 
4216 	return (error);
4217 }
4218 
4219 static int
4220 bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
4221 {
4222 	struct bge_softc *sc;
4223 	int error;
4224 	uint16_t result;
4225 	uint32_t val;
4226 
4227 	result = -1;
4228 	error = sysctl_handle_int(oidp, &result, 0, req);
4229 	if (error || (req->newptr == NULL))
4230 		return (error);
4231 
4232 	if (result < 0x8000) {
4233 		sc = (struct bge_softc *)arg1;
4234 		val = CSR_READ_4(sc, result);
4235 		printf("reg 0x%06X = 0x%08X\n", result, val);
4236 	}
4237 
4238 	return (error);
4239 }
4240 
4241 static int
4242 bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS)
4243 {
4244 	struct bge_softc *sc;
4245 	int error;
4246 	uint16_t result;
4247 	uint32_t val;
4248 
4249 	result = -1;
4250 	error = sysctl_handle_int(oidp, &result, 0, req);
4251 	if (error || (req->newptr == NULL))
4252 		return (error);
4253 
4254 	if (result < 0x8000) {
4255 		sc = (struct bge_softc *)arg1;
4256 		val = bge_readmem_ind(sc, result);
4257 		printf("mem 0x%06X = 0x%08X\n", result, val);
4258 	}
4259 
4260 	return (error);
4261 }
4262 #endif
4263