xref: /freebsd/sys/dev/bce/if_bce.c (revision db612abe8df3355d1eb23bb3b50fdd97bc21e979)
1 /*-
2  * Copyright (c) 2006-2008 Broadcom Corporation
3  *	David Christensen <davidch@broadcom.com>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the name of Broadcom Corporation nor the name of its contributors
15  *    may be used to endorse or promote products derived from this software
16  *    without specific prior written consent.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
22  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 /*
35  * The following controllers are supported by this driver:
36  *   BCM5706C A2, A3
37  *   BCM5706S A2, A3
38  *   BCM5708C B1, B2
39  *   BCM5708S B1, B2
40  *
41  * The following controllers are not supported by this driver:
42  *   BCM5706C A0, A1 (pre-production)
43  *   BCM5706S A0, A1 (pre-production)
44  *   BCM5708C A0, B0 (pre-production)
45  *   BCM5708S A0, B0 (pre-production)
46  */
47 
48 #include "opt_bce.h"
49 
50 #include <dev/bce/if_bcereg.h>
51 #include <dev/bce/if_bcefw.h>
52 
53 /****************************************************************************/
54 /* BCE Debug Options                                                        */
55 /****************************************************************************/
56 #ifdef BCE_DEBUG
57 	u32 bce_debug = BCE_WARN;
58 
59 	/*          0 = Never              */
60 	/*          1 = 1 in 2,147,483,648 */
61 	/*        256 = 1 in     8,388,608 */
62 	/*       2048 = 1 in     1,048,576 */
63 	/*      65536 = 1 in        32,768 */
64 	/*    1048576 = 1 in         2,048 */
65 	/*  268435456 =	1 in             8 */
66 	/*  536870912 = 1 in             4 */
67 	/* 1073741824 = 1 in             2 */
68 
69 	/* Controls how often the l2_fhdr frame error check will fail. */
70 	int bce_debug_l2fhdr_status_check = 0;
71 
72 	/* Controls how often the unexpected attention check will fail. */
73 	int bce_debug_unexpected_attention = 0;
74 
75 	/* Controls how often to simulate an mbuf allocation failure. */
76 	int bce_debug_mbuf_allocation_failure = 0;
77 
78 	/* Controls how often to simulate a DMA mapping failure. */
79 	int bce_debug_dma_map_addr_failure = 0;
80 
81 	/* Controls how often to simulate a bootcode failure. */
82 	int bce_debug_bootcode_running_failure = 0;
83 #endif
84 
85 
86 /****************************************************************************/
87 /* PCI Device ID Table                                                      */
88 /*                                                                          */
89 /* Used by bce_probe() to identify the devices supported by this driver.    */
90 /****************************************************************************/
91 #define BCE_DEVDESC_MAX		64
92 
93 static struct bce_type bce_devs[] = {
94 	/* BCM5706C Controllers and OEM boards. */
95 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3101,
96 		"HP NC370T Multifunction Gigabit Server Adapter" },
97 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3106,
98 		"HP NC370i Multifunction Gigabit Server Adapter" },
99 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  PCI_ANY_ID,  PCI_ANY_ID,
100 		"Broadcom NetXtreme II BCM5706 1000Base-T" },
101 
102 	/* BCM5706S controllers and OEM boards. */
103 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102,
104 		"HP NC370F Multifunction Gigabit Server Adapter" },
105 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID,  PCI_ANY_ID,
106 		"Broadcom NetXtreme II BCM5706 1000Base-SX" },
107 
108 	/* BCM5708C controllers and OEM boards. */
109 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  PCI_ANY_ID,  PCI_ANY_ID,
110 		"Broadcom NetXtreme II BCM5708 1000Base-T" },
111 
112 	/* BCM5708S controllers and OEM boards. */
113 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708S,  PCI_ANY_ID,  PCI_ANY_ID,
114 		"Broadcom NetXtreme II BCM5708 1000Base-SX" },
115 	{ 0, 0, 0, 0, NULL }
116 };
117 
118 
119 /****************************************************************************/
120 /* Supported Flash NVRAM device data.                                       */
121 /****************************************************************************/
122 static struct flash_spec flash_table[] =
123 {
124 	/* Slow EEPROM */
125 	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
126 	 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
127 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
128 	 "EEPROM - slow"},
129 	/* Expansion entry 0001 */
130 	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
131 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
132 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
133 	 "Entry 0001"},
134 	/* Saifun SA25F010 (non-buffered flash) */
135 	/* strap, cfg1, & write1 need updates */
136 	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
137 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
138 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
139 	 "Non-buffered flash (128kB)"},
140 	/* Saifun SA25F020 (non-buffered flash) */
141 	/* strap, cfg1, & write1 need updates */
142 	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
143 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
144 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
145 	 "Non-buffered flash (256kB)"},
146 	/* Expansion entry 0100 */
147 	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
148 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
149 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
150 	 "Entry 0100"},
151 	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
152 	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
153 	 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
154 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
155 	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
156 	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
157 	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
158 	 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
159 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
160 	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
161 	/* Saifun SA25F005 (non-buffered flash) */
162 	/* strap, cfg1, & write1 need updates */
163 	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
164 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
165 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
166 	 "Non-buffered flash (64kB)"},
167 	/* Fast EEPROM */
168 	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
169 	 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
170 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
171 	 "EEPROM - fast"},
172 	/* Expansion entry 1001 */
173 	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
174 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
175 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
176 	 "Entry 1001"},
177 	/* Expansion entry 1010 */
178 	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
179 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
180 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
181 	 "Entry 1010"},
182 	/* ATMEL AT45DB011B (buffered flash) */
183 	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
184 	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
185 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
186 	 "Buffered flash (128kB)"},
187 	/* Expansion entry 1100 */
188 	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
189 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
190 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
191 	 "Entry 1100"},
192 	/* Expansion entry 1101 */
193 	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
194 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
196 	 "Entry 1101"},
197 	/* Ateml Expansion entry 1110 */
198 	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
199 	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
200 	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
201 	 "Entry 1110 (Atmel)"},
202 	/* ATMEL AT45DB021B (buffered flash) */
203 	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
204 	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
205 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
206 	 "Buffered flash (256kB)"},
207 };
208 
209 
210 /****************************************************************************/
211 /* FreeBSD device entry points.                                             */
212 /****************************************************************************/
213 static int  bce_probe				(device_t);
214 static int  bce_attach				(device_t);
215 static int  bce_detach				(device_t);
216 static int  bce_shutdown			(device_t);
217 
218 
219 /****************************************************************************/
220 /* BCE Debug Data Structure Dump Routines                                   */
221 /****************************************************************************/
222 #ifdef BCE_DEBUG
223 static u32  bce_ctx_rd				(struct bce_softc *, u32, u32);
224 static void bce_dump_mbuf 			(struct bce_softc *, struct mbuf *);
225 static void bce_dump_tx_mbuf_chain	(struct bce_softc *, u16, int);
226 static void bce_dump_rx_mbuf_chain	(struct bce_softc *, u16, int);
227 static void bce_dump_pg_mbuf_chain	(struct bce_softc *, u16, int);
228 static void bce_dump_txbd			(struct bce_softc *, int, struct tx_bd *);
229 static void bce_dump_rxbd			(struct bce_softc *, int, struct rx_bd *);
230 static void bce_dump_pgbd			(struct bce_softc *, int, struct rx_bd *);
231 static void bce_dump_l2fhdr			(struct bce_softc *, int, struct l2_fhdr *);
232 static void bce_dump_ctx			(struct bce_softc *, u16);
233 static void bce_dump_ftqs			(struct bce_softc *);
234 static void bce_dump_tx_chain		(struct bce_softc *, u16, int);
235 static void bce_dump_rx_chain		(struct bce_softc *, u16, int);
236 static void bce_dump_pg_chain		(struct bce_softc *, u16, int);
237 static void bce_dump_status_block	(struct bce_softc *);
238 static void bce_dump_stats_block	(struct bce_softc *);
239 static void bce_dump_driver_state	(struct bce_softc *);
240 static void bce_dump_hw_state		(struct bce_softc *);
241 static void bce_dump_bc_state		(struct bce_softc *);
242 static void bce_breakpoint			(struct bce_softc *);
243 #endif
244 
245 
246 /****************************************************************************/
247 /* BCE Register/Memory Access Routines                                      */
248 /****************************************************************************/
249 static u32  bce_reg_rd_ind			(struct bce_softc *, u32);
250 static void bce_reg_wr_ind			(struct bce_softc *, u32, u32);
251 static void bce_ctx_wr				(struct bce_softc *, u32, u32, u32);
252 static int  bce_miibus_read_reg		(device_t, int, int);
253 static int  bce_miibus_write_reg	(device_t, int, int, int);
254 static void bce_miibus_statchg		(device_t);
255 
256 
257 /****************************************************************************/
258 /* BCE NVRAM Access Routines                                                */
259 /****************************************************************************/
260 static int  bce_acquire_nvram_lock	(struct bce_softc *);
261 static int  bce_release_nvram_lock	(struct bce_softc *);
262 static void bce_enable_nvram_access	(struct bce_softc *);
263 static void	bce_disable_nvram_access(struct bce_softc *);
264 static int  bce_nvram_read_dword	(struct bce_softc *, u32, u8 *, u32);
265 static int  bce_init_nvram			(struct bce_softc *);
266 static int  bce_nvram_read			(struct bce_softc *, u32, u8 *, int);
267 static int  bce_nvram_test			(struct bce_softc *);
268 #ifdef BCE_NVRAM_WRITE_SUPPORT
269 static int  bce_enable_nvram_write	(struct bce_softc *);
270 static void bce_disable_nvram_write	(struct bce_softc *);
271 static int  bce_nvram_erase_page	(struct bce_softc *, u32);
272 static int  bce_nvram_write_dword	(struct bce_softc *, u32, u8 *, u32);
273 static int  bce_nvram_write			(struct bce_softc *, u32, u8 *, int);
274 #endif
275 
276 /****************************************************************************/
277 /*                                                                          */
278 /****************************************************************************/
279 static void bce_dma_map_addr		(void *, bus_dma_segment_t *, int, int);
280 static int  bce_dma_alloc			(device_t);
281 static void bce_dma_free			(struct bce_softc *);
282 static void bce_release_resources	(struct bce_softc *);
283 
284 /****************************************************************************/
285 /* BCE Firmware Synchronization and Load                                    */
286 /****************************************************************************/
287 static int  bce_fw_sync				(struct bce_softc *, u32);
288 static void bce_load_rv2p_fw		(struct bce_softc *, u32 *, u32, u32);
289 static void bce_load_cpu_fw			(struct bce_softc *, struct cpu_reg *, struct fw_info *);
290 static void bce_init_cpus			(struct bce_softc *);
291 
292 static void bce_stop				(struct bce_softc *);
293 static int  bce_reset				(struct bce_softc *, u32);
294 static int  bce_chipinit 			(struct bce_softc *);
295 static int  bce_blockinit 			(struct bce_softc *);
296 static int  bce_get_rx_buf			(struct bce_softc *, struct mbuf *, u16 *, u16 *, u32 *);
297 static int  bce_get_pg_buf			(struct bce_softc *, struct mbuf *, u16 *, u16 *);
298 
299 static int  bce_init_tx_chain		(struct bce_softc *);
300 static void bce_free_tx_chain		(struct bce_softc *);
301 
302 static int  bce_init_rx_chain		(struct bce_softc *);
303 static void bce_fill_rx_chain		(struct bce_softc *);
304 static void bce_free_rx_chain		(struct bce_softc *);
305 
306 static int  bce_init_pg_chain		(struct bce_softc *);
307 static void bce_fill_pg_chain		(struct bce_softc *);
308 static void bce_free_pg_chain		(struct bce_softc *);
309 
310 static int  bce_tx_encap			(struct bce_softc *, struct mbuf **);
311 static void bce_start_locked		(struct ifnet *);
312 static void bce_start				(struct ifnet *);
313 static int  bce_ioctl				(struct ifnet *, u_long, caddr_t);
314 static void bce_watchdog			(struct bce_softc *);
315 static int  bce_ifmedia_upd			(struct ifnet *);
316 static void bce_ifmedia_upd_locked	(struct ifnet *);
317 static void bce_ifmedia_sts			(struct ifnet *, struct ifmediareq *);
318 static void bce_init_locked			(struct bce_softc *);
319 static void bce_init				(void *);
320 static void bce_mgmt_init_locked	(struct bce_softc *sc);
321 
322 static void bce_init_ctx			(struct bce_softc *);
323 static void bce_get_mac_addr		(struct bce_softc *);
324 static void bce_set_mac_addr		(struct bce_softc *);
325 static void bce_phy_intr			(struct bce_softc *);
326 static inline u16 bce_get_hw_rx_cons(struct bce_softc *);
327 static void bce_rx_intr				(struct bce_softc *);
328 static void bce_tx_intr				(struct bce_softc *);
329 static void bce_disable_intr		(struct bce_softc *);
330 static void bce_enable_intr			(struct bce_softc *);
331 static void bce_intr				(void *);
332 static void bce_set_rx_mode			(struct bce_softc *);
333 static void bce_stats_update		(struct bce_softc *);
334 static void bce_tick				(void *);
335 static void bce_pulse				(void *);
336 static void bce_add_sysctls			(struct bce_softc *);
337 
338 
339 /****************************************************************************/
340 /* FreeBSD device dispatch table.                                           */
341 /****************************************************************************/
342 static device_method_t bce_methods[] = {
343 	/* Device interface (device_if.h) */
344 	DEVMETHOD(device_probe,		bce_probe),
345 	DEVMETHOD(device_attach,	bce_attach),
346 	DEVMETHOD(device_detach,	bce_detach),
347 	DEVMETHOD(device_shutdown,	bce_shutdown),
348 /* Supported by device interface but not used here. */
349 /*	DEVMETHOD(device_identify,	bce_identify),      */
350 /*	DEVMETHOD(device_suspend,	bce_suspend),       */
351 /*	DEVMETHOD(device_resume,	bce_resume),        */
352 /*	DEVMETHOD(device_quiesce,	bce_quiesce),       */
353 
354 	/* Bus interface (bus_if.h) */
355 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
356 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
357 
358 	/* MII interface (miibus_if.h) */
359 	DEVMETHOD(miibus_readreg,	bce_miibus_read_reg),
360 	DEVMETHOD(miibus_writereg,	bce_miibus_write_reg),
361 	DEVMETHOD(miibus_statchg,	bce_miibus_statchg),
362 /* Supported by MII interface but not used here.       */
363 /*	DEVMETHOD(miibus_linkchg,	bce_miibus_linkchg),   */
364 /*	DEVMETHOD(miibus_mediainit,	bce_miibus_mediainit), */
365 
366 	{ 0, 0 }
367 };
368 
369 static driver_t bce_driver = {
370 	"bce",
371 	bce_methods,
372 	sizeof(struct bce_softc)
373 };
374 
375 static devclass_t bce_devclass;
376 
377 MODULE_DEPEND(bce, pci, 1, 1, 1);
378 MODULE_DEPEND(bce, ether, 1, 1, 1);
379 MODULE_DEPEND(bce, miibus, 1, 1, 1);
380 
381 DRIVER_MODULE(bce, pci, bce_driver, bce_devclass, 0, 0);
382 DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, 0, 0);
383 
384 
385 /****************************************************************************/
386 /* Tunable device values                                                    */
387 /****************************************************************************/
388 static int bce_tso_enable = TRUE;
389 static int bce_msi_enable = 1;
390 
391 SYSCTL_NODE(_hw, OID_AUTO, bce, CTLFLAG_RD, 0, "bce driver parameters");
392 
393 /* Allowable values are TRUE or FALSE */
394 TUNABLE_INT("hw.bce.tso_enable", &bce_tso_enable);
395 SYSCTL_UINT(_hw_bce, OID_AUTO, tso_enable, CTLFLAG_RDTUN, &bce_tso_enable, 0,
396 "TSO Enable/Disable");
397 
398 /* Allowable values are 0 (IRQ only) and 1 (IRQ or MSI) */
399 TUNABLE_INT("hw.bce.msi_enable", &bce_msi_enable);
400 SYSCTL_UINT(_hw_bce, OID_AUTO, msi_enable, CTLFLAG_RDTUN, &bce_msi_enable, 0,
401 "MSI | INTx selector");
402 
403 /* ToDo: Add tunable to enable/disable strict MTU handling. */
404 /* Currently allows "loose" RX MTU checking (i.e. sets the  */
405 /* h/w RX MTU to the size of the largest receive buffer, or */
406 /* 2048 bytes).                                             */
407 
408 /****************************************************************************/
409 /* Device probe function.                                                   */
410 /*                                                                          */
411 /* Compares the device to the driver's list of supported devices and        */
412 /* reports back to the OS whether this is the right driver for the device.  */
413 /*                                                                          */
414 /* Returns:                                                                 */
415 /*   BUS_PROBE_DEFAULT on success, positive value on failure.               */
416 /****************************************************************************/
417 static int
418 bce_probe(device_t dev)
419 {
420 	struct bce_type *t;
421 	struct bce_softc *sc;
422 	char *descbuf;
423 	u16 vid = 0, did = 0, svid = 0, sdid = 0;
424 
425 	t = bce_devs;
426 
427 	sc = device_get_softc(dev);
428 	bzero(sc, sizeof(struct bce_softc));
429 	sc->bce_unit = device_get_unit(dev);
430 	sc->bce_dev = dev;
431 
432 	/* Get the data for the device to be probed. */
433 	vid  = pci_get_vendor(dev);
434 	did  = pci_get_device(dev);
435 	svid = pci_get_subvendor(dev);
436 	sdid = pci_get_subdevice(dev);
437 
438 	DBPRINT(sc, BCE_VERBOSE_LOAD,
439 		"%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, "
440 		"SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid);
441 
442 	/* Look through the list of known devices for a match. */
443 	while(t->bce_name != NULL) {
444 
445 		if ((vid == t->bce_vid) && (did == t->bce_did) &&
446 			((svid == t->bce_svid) || (t->bce_svid == PCI_ANY_ID)) &&
447 			((sdid == t->bce_sdid) || (t->bce_sdid == PCI_ANY_ID))) {
448 
449 			descbuf = malloc(BCE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
450 
451 			if (descbuf == NULL)
452 				return(ENOMEM);
453 
454 			/* Print out the device identity. */
455 			snprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d)",
456 				t->bce_name,
457 			    (((pci_read_config(dev, PCIR_REVID, 4) & 0xf0) >> 4) + 'A'),
458 			    (pci_read_config(dev, PCIR_REVID, 4) & 0xf));
459 
460 			device_set_desc_copy(dev, descbuf);
461 			free(descbuf, M_TEMP);
462 			return(BUS_PROBE_DEFAULT);
463 		}
464 		t++;
465 	}
466 
467 	return(ENXIO);
468 }
469 
470 
471 /****************************************************************************/
472 /* Device attach function.                                                  */
473 /*                                                                          */
474 /* Allocates device resources, performs secondary chip identification,      */
475 /* resets and initializes the hardware, and initializes driver instance     */
476 /* variables.                                                               */
477 /*                                                                          */
478 /* Returns:                                                                 */
479 /*   0 on success, positive value on failure.                               */
480 /****************************************************************************/
481 static int
482 bce_attach(device_t dev)
483 {
484 	struct bce_softc *sc;
485 	struct ifnet *ifp;
486 	u32 val;
487 	int count, rid, rc = 0;
488 
489 	sc = device_get_softc(dev);
490 	sc->bce_dev = dev;
491 
492 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
493 
494 	sc->bce_unit = device_get_unit(dev);
495 
496 	/* Set initial device and PHY flags */
497 	sc->bce_flags = 0;
498 	sc->bce_phy_flags = 0;
499 
500 	pci_enable_busmaster(dev);
501 
502 	/* Allocate PCI memory resources. */
503 	rid = PCIR_BAR(0);
504 	sc->bce_res_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
505 		&rid, RF_ACTIVE | PCI_RF_DENSE);
506 
507 	if (sc->bce_res_mem == NULL) {
508 		BCE_PRINTF("%s(%d): PCI memory allocation failed\n",
509 			__FILE__, __LINE__);
510 		rc = ENXIO;
511 		goto bce_attach_fail;
512 	}
513 
514 	/* Get various resource handles. */
515 	sc->bce_btag    = rman_get_bustag(sc->bce_res_mem);
516 	sc->bce_bhandle = rman_get_bushandle(sc->bce_res_mem);
517 	sc->bce_vhandle = (vm_offset_t) rman_get_virtual(sc->bce_res_mem);
518 
519 	/* If MSI is enabled in the driver, get the vector count. */
520 	count = bce_msi_enable ? pci_msi_count(dev) : 0;
521 
522 	/* Allocate PCI IRQ resources. */
523 	if (count == 1 && pci_alloc_msi(dev, &count) == 0 && count == 1) {
524 		rid = 1;
525 		sc->bce_flags |= BCE_USING_MSI_FLAG;
526 		DBPRINT(sc, BCE_VERBOSE_LOAD,
527 			"Allocating %d MSI interrupt(s)\n", count);
528 	} else {
529 		rid = 0;
530 		DBPRINT(sc, BCE_VERBOSE_LOAD, "Allocating IRQ interrupt\n");
531 	}
532 
533 	sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
534 	    RF_SHAREABLE | RF_ACTIVE);
535 
536 	if (sc->bce_res_irq == NULL) {
537 		BCE_PRINTF("%s(%d): PCI map interrupt failed!\n",
538 			__FILE__, __LINE__);
539 		rc = ENXIO;
540 		goto bce_attach_fail;
541 	}
542 
543 	/* Initialize mutex for the current device instance. */
544 	BCE_LOCK_INIT(sc, device_get_nameunit(dev));
545 
546 	/*
547 	 * Configure byte swap and enable indirect register access.
548 	 * Rely on CPU to do target byte swapping on big endian systems.
549 	 * Access to registers outside of PCI configurtion space are not
550 	 * valid until this is done.
551 	 */
552 	pci_write_config(dev, BCE_PCICFG_MISC_CONFIG,
553 			       BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
554 			       BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4);
555 
556 	/* Save ASIC revsion info. */
557 	sc->bce_chipid =  REG_RD(sc, BCE_MISC_ID);
558 
559 	/* Weed out any non-production controller revisions. */
560 	switch(BCE_CHIP_ID(sc)) {
561 		case BCE_CHIP_ID_5706_A0:
562 		case BCE_CHIP_ID_5706_A1:
563 		case BCE_CHIP_ID_5708_A0:
564 		case BCE_CHIP_ID_5708_B0:
565 			BCE_PRINTF("%s(%d): Unsupported controller revision (%c%d)!\n",
566 				__FILE__, __LINE__,
567 				(((pci_read_config(dev, PCIR_REVID, 4) & 0xf0) >> 4) + 'A'),
568 			    (pci_read_config(dev, PCIR_REVID, 4) & 0xf));
569 			rc = ENODEV;
570 			goto bce_attach_fail;
571 	}
572 
573 	/*
574 	 * The embedded PCIe to PCI-X bridge (EPB)
575 	 * in the 5708 cannot address memory above
576 	 * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043).
577 	 */
578 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)
579 		sc->max_bus_addr = BCE_BUS_SPACE_MAXADDR;
580 	else
581 		sc->max_bus_addr = BUS_SPACE_MAXADDR;
582 
583 	/*
584 	 * Find the base address for shared memory access.
585 	 * Newer versions of bootcode use a signature and offset
586 	 * while older versions use a fixed address.
587 	 */
588 	val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE);
589 	if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) == BCE_SHM_HDR_SIGNATURE_SIG)
590 		sc->bce_shmem_base = REG_RD_IND(sc, BCE_SHM_HDR_ADDR_0);
591 	else
592 		sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE;
593 
594 	DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "%s(): bce_shmem_base = 0x%08X\n",
595 		__FUNCTION__, sc->bce_shmem_base);
596 
597 	/* Fetch the bootcode revision. */
598 	sc->bce_fw_ver = REG_RD_IND(sc, sc->bce_shmem_base +
599 		BCE_DEV_INFO_BC_REV);
600 
601 	/* Check if any management firmware is running. */
602 	val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_PORT_FEATURE);
603 	if (val & (BCE_PORT_FEATURE_ASF_ENABLED | BCE_PORT_FEATURE_IMD_ENABLED))
604 		sc->bce_flags |= BCE_MFW_ENABLE_FLAG;
605 
606 	/* Get PCI bus information (speed and type). */
607 	val = REG_RD(sc, BCE_PCICFG_MISC_STATUS);
608 	if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) {
609 		u32 clkreg;
610 
611 		sc->bce_flags |= BCE_PCIX_FLAG;
612 
613 		clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS);
614 
615 		clkreg &= BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
616 		switch (clkreg) {
617 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
618 			sc->bus_speed_mhz = 133;
619 			break;
620 
621 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
622 			sc->bus_speed_mhz = 100;
623 			break;
624 
625 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
626 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
627 			sc->bus_speed_mhz = 66;
628 			break;
629 
630 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
631 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
632 			sc->bus_speed_mhz = 50;
633 			break;
634 
635 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
636 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
637 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
638 			sc->bus_speed_mhz = 33;
639 			break;
640 		}
641 	} else {
642 		if (val & BCE_PCICFG_MISC_STATUS_M66EN)
643 			sc->bus_speed_mhz = 66;
644 		else
645 			sc->bus_speed_mhz = 33;
646 	}
647 
648 	if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET)
649 		sc->bce_flags |= BCE_PCI_32BIT_FLAG;
650 
651 	/* Reset the controller and announce to bootcode that driver is present. */
652 	if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) {
653 		BCE_PRINTF("%s(%d): Controller reset failed!\n",
654 			__FILE__, __LINE__);
655 		rc = ENXIO;
656 		goto bce_attach_fail;
657 	}
658 
659 	/* Initialize the controller. */
660 	if (bce_chipinit(sc)) {
661 		BCE_PRINTF("%s(%d): Controller initialization failed!\n",
662 			__FILE__, __LINE__);
663 		rc = ENXIO;
664 		goto bce_attach_fail;
665 	}
666 
667 	/* Perform NVRAM test. */
668 	if (bce_nvram_test(sc)) {
669 		BCE_PRINTF("%s(%d): NVRAM test failed!\n",
670 			__FILE__, __LINE__);
671 		rc = ENXIO;
672 		goto bce_attach_fail;
673 	}
674 
675 	/* Fetch the permanent Ethernet MAC address. */
676 	bce_get_mac_addr(sc);
677 
678 	/*
679 	 * Trip points control how many BDs
680 	 * should be ready before generating an
681 	 * interrupt while ticks control how long
682 	 * a BD can sit in the chain before
683 	 * generating an interrupt.  Set the default
684 	 * values for the RX and TX chains.
685 	 */
686 
687 #ifdef BCE_DEBUG
688 	/* Force more frequent interrupts. */
689 	sc->bce_tx_quick_cons_trip_int = 1;
690 	sc->bce_tx_quick_cons_trip     = 1;
691 	sc->bce_tx_ticks_int           = 0;
692 	sc->bce_tx_ticks               = 0;
693 
694 	sc->bce_rx_quick_cons_trip_int = 1;
695 	sc->bce_rx_quick_cons_trip     = 1;
696 	sc->bce_rx_ticks_int           = 0;
697 	sc->bce_rx_ticks               = 0;
698 #else
699 	/* Improve throughput at the expense of increased latency. */
700 	sc->bce_tx_quick_cons_trip_int = 20;
701 	sc->bce_tx_quick_cons_trip     = 20;
702 	sc->bce_tx_ticks_int           = 80;
703 	sc->bce_tx_ticks               = 80;
704 
705 	sc->bce_rx_quick_cons_trip_int = 6;
706 	sc->bce_rx_quick_cons_trip     = 6;
707 	sc->bce_rx_ticks_int           = 18;
708 	sc->bce_rx_ticks               = 18;
709 #endif
710 
711 	/* Update statistics once every second. */
712 	sc->bce_stats_ticks = 1000000 & 0xffff00;
713 
714 	/*
715 	 * The SerDes based NetXtreme II controllers
716 	 * that support 2.5Gb operation (currently
717 	 * 5708S) use a PHY at address 2, otherwise
718 	 * the PHY is present at address 1.
719 	 */
720 	sc->bce_phy_addr = 1;
721 
722 	if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) {
723 		sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
724 		sc->bce_flags |= BCE_NO_WOL_FLAG;
725 		if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) {
726 			sc->bce_phy_addr = 2;
727 			val = REG_RD_IND(sc, sc->bce_shmem_base +
728 					 BCE_SHARED_HW_CFG_CONFIG);
729 			if (val & BCE_SHARED_HW_CFG_PHY_2_5G) {
730 				sc->bce_phy_flags |= BCE_PHY_2_5G_CAPABLE_FLAG;
731 				DBPRINT(sc, BCE_INFO_LOAD, "Found 2.5Gb capable adapter\n");
732 			}
733 		}
734 	}
735 
736 	/* Store data needed by PHY driver for backplane applications */
737 	sc->bce_shared_hw_cfg = REG_RD_IND(sc, sc->bce_shmem_base +
738 		BCE_SHARED_HW_CFG_CONFIG);
739 	sc->bce_port_hw_cfg   = REG_RD_IND(sc, sc->bce_shmem_base +
740 		BCE_SHARED_HW_CFG_CONFIG);
741 
742 	/* Allocate DMA memory resources. */
743 	if (bce_dma_alloc(dev)) {
744 		BCE_PRINTF("%s(%d): DMA resource allocation failed!\n",
745 		    __FILE__, __LINE__);
746 		rc = ENXIO;
747 		goto bce_attach_fail;
748 	}
749 
750 	/* Allocate an ifnet structure. */
751 	ifp = sc->bce_ifp = if_alloc(IFT_ETHER);
752 	if (ifp == NULL) {
753 		BCE_PRINTF("%s(%d): Interface allocation failed!\n",
754 			__FILE__, __LINE__);
755 		rc = ENXIO;
756 		goto bce_attach_fail;
757 	}
758 
759 	/* Initialize the ifnet interface. */
760 	ifp->if_softc        = sc;
761 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
762 	ifp->if_flags        = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
763 	ifp->if_ioctl        = bce_ioctl;
764 	ifp->if_start        = bce_start;
765 	ifp->if_init         = bce_init;
766 	ifp->if_mtu          = ETHERMTU;
767 
768 	if (bce_tso_enable) {
769 		ifp->if_hwassist = BCE_IF_HWASSIST | CSUM_TSO;
770 		ifp->if_capabilities = BCE_IF_CAPABILITIES | IFCAP_TSO4;
771 	} else {
772 		ifp->if_hwassist = BCE_IF_HWASSIST;
773 		ifp->if_capabilities = BCE_IF_CAPABILITIES;
774 	}
775 
776 	ifp->if_capenable    = ifp->if_capabilities;
777 
778 	/* Use standard mbuf sizes for buffer allocation. */
779 	sc->rx_bd_mbuf_alloc_size = MHLEN;
780 	sc->pg_bd_mbuf_alloc_size = MCLBYTES;
781 
782 	ifp->if_snd.ifq_drv_maxlen = USABLE_TX_BD;
783 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
784 	IFQ_SET_READY(&ifp->if_snd);
785 
786 	if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
787 		ifp->if_baudrate = IF_Mbps(2500ULL);
788 	else
789 		ifp->if_baudrate = IF_Mbps(1000);
790 
791 	/* Check for an MII child bus by probing the PHY. */
792 	if (mii_phy_probe(dev, &sc->bce_miibus, bce_ifmedia_upd,
793 		bce_ifmedia_sts)) {
794 		BCE_PRINTF("%s(%d): No PHY found on child MII bus!\n",
795 			__FILE__, __LINE__);
796 		rc = ENXIO;
797 		goto bce_attach_fail;
798 	}
799 
800 	/* Attach to the Ethernet interface list. */
801 	ether_ifattach(ifp, sc->eaddr);
802 
803 #if __FreeBSD_version < 500000
804 	callout_init(&sc->bce_tick_callout);
805 	callout_init(&sc->bce_pulse_callout);
806 #else
807 	callout_init_mtx(&sc->bce_tick_callout, &sc->bce_mtx, 0);
808 	callout_init_mtx(&sc->bce_pulse_callout, &sc->bce_mtx, 0);
809 #endif
810 
811 	/* Hookup IRQ last. */
812 	rc = bus_setup_intr(dev, sc->bce_res_irq, INTR_TYPE_NET | INTR_MPSAFE, NULL,
813 	   bce_intr, sc, &sc->bce_intrhand);
814 
815 	if (rc) {
816 		BCE_PRINTF("%s(%d): Failed to setup IRQ!\n",
817 			__FILE__, __LINE__);
818 		bce_detach(dev);
819 		goto bce_attach_exit;
820 	}
821 
822 	/*
823 	 * At this point we've acquired all the resources
824 	 * we need to run so there's no turning back, we're
825 	 * cleared for launch.
826 	 */
827 
828 	/* Print some important debugging info. */
829 	DBRUNMSG(BCE_INFO, bce_dump_driver_state(sc));
830 
831 	/* Add the supported sysctls to the kernel. */
832 	bce_add_sysctls(sc);
833 
834 	BCE_LOCK(sc);
835 	/*
836 	 * The chip reset earlier notified the bootcode that
837 	 * a driver is present.  We now need to start our pulse
838 	 * routine so that the bootcode is reminded that we're
839 	 * still running.
840 	 */
841 	bce_pulse(sc);
842 
843 	bce_mgmt_init_locked(sc);
844 	BCE_UNLOCK(sc);
845 
846 	/* Finally, print some useful adapter info */
847 	BCE_PRINTF("ASIC (0x%08X); ", sc->bce_chipid);
848 	printf("Rev (%c%d); ", ((BCE_CHIP_ID(sc) & 0xf000) >> 12) + 'A',
849 		((BCE_CHIP_ID(sc) & 0x0ff0) >> 4));
850 	printf("Bus (PCI%s, %s, %dMHz); ",
851 		((sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : ""),
852 		((sc->bce_flags & BCE_PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
853 		sc->bus_speed_mhz);
854 	printf("F/W (0x%08X); Flags( ", sc->bce_fw_ver);
855 	if (sc->bce_flags & BCE_MFW_ENABLE_FLAG)
856 		printf("MFW ");
857 	if (sc->bce_flags & BCE_USING_MSI_FLAG)
858 		printf("MSI ");
859 	if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
860 		printf("2.5G ");
861 	printf(")\n");
862 
863 	DBPRINT(sc, BCE_FATAL, "%s(): sc = %p\n",
864 		__FUNCTION__, sc);
865 
866 	goto bce_attach_exit;
867 
868 bce_attach_fail:
869 	bce_release_resources(sc);
870 
871 bce_attach_exit:
872 
873 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
874 
875 	return(rc);
876 }
877 
878 
879 /****************************************************************************/
880 /* Device detach function.                                                  */
881 /*                                                                          */
882 /* Stops the controller, resets the controller, and releases resources.     */
883 /*                                                                          */
884 /* Returns:                                                                 */
885 /*   0 on success, positive value on failure.                               */
886 /****************************************************************************/
887 static int
888 bce_detach(device_t dev)
889 {
890 	struct bce_softc *sc = device_get_softc(dev);
891 	struct ifnet *ifp;
892 	u32 msg;
893 
894 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
895 
896 	ifp = sc->bce_ifp;
897 
898 	/* Stop and reset the controller. */
899 	BCE_LOCK(sc);
900 
901 	/* Stop the pulse so the bootcode can go to driver absent state. */
902 	callout_stop(&sc->bce_pulse_callout);
903 
904 	bce_stop(sc);
905 	if (sc->bce_flags & BCE_NO_WOL_FLAG)
906 		msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
907 	else
908 		msg = BCE_DRV_MSG_CODE_UNLOAD;
909 	bce_reset(sc, msg);
910 
911 	BCE_UNLOCK(sc);
912 
913 	ether_ifdetach(ifp);
914 
915 	/* If we have a child device on the MII bus remove it too. */
916 	bus_generic_detach(dev);
917 	device_delete_child(dev, sc->bce_miibus);
918 
919 	/* Release all remaining resources. */
920 	bce_release_resources(sc);
921 
922 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
923 
924 	return(0);
925 }
926 
927 
928 /****************************************************************************/
929 /* Device shutdown function.                                                */
930 /*                                                                          */
931 /* Stops and resets the controller.                                         */
932 /*                                                                          */
933 /* Returns:                                                                 */
934 /*   0 on success, positive value on failure.                               */
935 /****************************************************************************/
936 static int
937 bce_shutdown(device_t dev)
938 {
939 	struct bce_softc *sc = device_get_softc(dev);
940 	u32 msg;
941 
942 	DBPRINT(sc, BCE_VERBOSE_SPECIAL, "Entering %s()\n", __FUNCTION__);
943 
944 	BCE_LOCK(sc);
945 	bce_stop(sc);
946 	if (sc->bce_flags & BCE_NO_WOL_FLAG)
947 		msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
948 	else
949 		msg = BCE_DRV_MSG_CODE_UNLOAD;
950 	bce_reset(sc, msg);
951 	BCE_UNLOCK(sc);
952 
953 	DBPRINT(sc, BCE_VERBOSE_SPECIAL, "Exiting %s()\n", __FUNCTION__);
954 
955 	return (0);
956 }
957 
958 
959 /****************************************************************************/
960 /* Indirect register read.                                                  */
961 /*                                                                          */
962 /* Reads NetXtreme II registers using an index/data register pair in PCI    */
963 /* configuration space.  Using this mechanism avoids issues with posted     */
964 /* reads but is much slower than memory-mapped I/O.                         */
965 /*                                                                          */
966 /* Returns:                                                                 */
967 /*   The value of the register.                                             */
968 /****************************************************************************/
969 static u32
970 bce_reg_rd_ind(struct bce_softc *sc, u32 offset)
971 {
972 	device_t dev;
973 	dev = sc->bce_dev;
974 
975 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
976 #ifdef BCE_DEBUG
977 	{
978 		u32 val;
979 		val = pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
980 		DBPRINT(sc, BCE_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
981 			__FUNCTION__, offset, val);
982 		return val;
983 	}
984 #else
985 	return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
986 #endif
987 }
988 
989 
990 /****************************************************************************/
991 /* Indirect register write.                                                 */
992 /*                                                                          */
993 /* Writes NetXtreme II registers using an index/data register pair in PCI   */
994 /* configuration space.  Using this mechanism avoids issues with posted     */
995 /* writes but is muchh slower than memory-mapped I/O.                       */
996 /*                                                                          */
997 /* Returns:                                                                 */
998 /*   Nothing.                                                               */
999 /****************************************************************************/
1000 static void
1001 bce_reg_wr_ind(struct bce_softc *sc, u32 offset, u32 val)
1002 {
1003 	device_t dev;
1004 	dev = sc->bce_dev;
1005 
1006 	DBPRINT(sc, BCE_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
1007 		__FUNCTION__, offset, val);
1008 
1009 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
1010 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4);
1011 }
1012 
1013 
1014 #ifdef BCE_DEBUG
1015 /****************************************************************************/
1016 /* Context memory read.                                                     */
1017 /*                                                                          */
1018 /* The NetXtreme II controller uses context memory to track connection      */
1019 /* information for L2 and higher network protocols.                         */
1020 /*                                                                          */
1021 /* Returns:                                                                 */
1022 /*   The requested 32 bit value of context memory.                          */
1023 /****************************************************************************/
1024 static u32
1025 bce_ctx_rd(struct bce_softc *sc, u32 cid_addr, u32 offset)
1026 {
1027 	u32 val;
1028 
1029 	offset += cid_addr;
1030 	REG_WR(sc, BCE_CTX_DATA_ADR, offset);
1031 	val = REG_RD(sc, BCE_CTX_DATA);
1032 
1033 	DBPRINT(sc, BCE_EXCESSIVE, "%s(); cid_addr = 0x%08X, offset = 0x%08X, "
1034 		"val = 0x%08X\n", __FUNCTION__, cid_addr, offset, val);
1035 
1036 	return(val);
1037 }
1038 #endif
1039 
1040 
1041 /****************************************************************************/
1042 /* Context memory write.                                                    */
1043 /*                                                                          */
1044 /* The NetXtreme II controller uses context memory to track connection      */
1045 /* information for L2 and higher network protocols.                         */
1046 /*                                                                          */
1047 /* Returns:                                                                 */
1048 /*   Nothing.                                                               */
1049 /****************************************************************************/
1050 static void
1051 bce_ctx_wr(struct bce_softc *sc, u32 cid_addr, u32 offset, u32 val)
1052 {
1053 
1054 	DBPRINT(sc, BCE_EXCESSIVE, "%s(); cid_addr = 0x%08X, offset = 0x%08X, "
1055 		"val = 0x%08X\n", __FUNCTION__, cid_addr, offset, val);
1056 
1057 	offset += cid_addr;
1058 	REG_WR(sc, BCE_CTX_DATA_ADR, offset);
1059 	REG_WR(sc, BCE_CTX_DATA, val);
1060 }
1061 
1062 
1063 /****************************************************************************/
1064 /* PHY register read.                                                       */
1065 /*                                                                          */
1066 /* Implements register reads on the MII bus.                                */
1067 /*                                                                          */
1068 /* Returns:                                                                 */
1069 /*   The value of the register.                                             */
1070 /****************************************************************************/
1071 static int
1072 bce_miibus_read_reg(device_t dev, int phy, int reg)
1073 {
1074 	struct bce_softc *sc;
1075 	u32 val;
1076 	int i;
1077 
1078 	sc = device_get_softc(dev);
1079 
1080 	/* Make sure we are accessing the correct PHY address. */
1081 	if (phy != sc->bce_phy_addr) {
1082 		DBPRINT(sc, BCE_EXCESSIVE_PHY, "Invalid PHY address %d for PHY read!\n", phy);
1083 		return(0);
1084 	}
1085 
1086 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1087 		val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1088 		val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1089 
1090 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1091 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1092 
1093 		DELAY(40);
1094 	}
1095 
1096 	val = BCE_MIPHY(phy) | BCE_MIREG(reg) |
1097 		BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT |
1098 		BCE_EMAC_MDIO_COMM_START_BUSY;
1099 	REG_WR(sc, BCE_EMAC_MDIO_COMM, val);
1100 
1101 	for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1102 		DELAY(10);
1103 
1104 		val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1105 		if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1106 			DELAY(5);
1107 
1108 			val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1109 			val &= BCE_EMAC_MDIO_COMM_DATA;
1110 
1111 			break;
1112 		}
1113 	}
1114 
1115 	if (val & BCE_EMAC_MDIO_COMM_START_BUSY) {
1116 		BCE_PRINTF("%s(%d): Error: PHY read timeout! phy = %d, reg = 0x%04X\n",
1117 			__FILE__, __LINE__, phy, reg);
1118 		val = 0x0;
1119 	} else {
1120 		val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1121 	}
1122 
1123 	DBPRINT(sc, BCE_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n",
1124 		__FUNCTION__, phy, (u16) reg & 0xffff, (u16) val & 0xffff);
1125 
1126 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1127 		val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1128 		val |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1129 
1130 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1131 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1132 
1133 		DELAY(40);
1134 	}
1135 
1136 	return (val & 0xffff);
1137 
1138 }
1139 
1140 
1141 /****************************************************************************/
1142 /* PHY register write.                                                      */
1143 /*                                                                          */
1144 /* Implements register writes on the MII bus.                               */
1145 /*                                                                          */
1146 /* Returns:                                                                 */
1147 /*   The value of the register.                                             */
1148 /****************************************************************************/
1149 static int
1150 bce_miibus_write_reg(device_t dev, int phy, int reg, int val)
1151 {
1152 	struct bce_softc *sc;
1153 	u32 val1;
1154 	int i;
1155 
1156 	sc = device_get_softc(dev);
1157 
1158 	/* Make sure we are accessing the correct PHY address. */
1159 	if (phy != sc->bce_phy_addr) {
1160 		DBPRINT(sc, BCE_EXCESSIVE_PHY, "Invalid PHY address %d for PHY write!\n", phy);
1161 		return(0);
1162 	}
1163 
1164 	DBPRINT(sc, BCE_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n",
1165 		__FUNCTION__, phy, (u16) reg & 0xffff, (u16) val & 0xffff);
1166 
1167 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1168 		val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1169 		val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1170 
1171 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1172 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1173 
1174 		DELAY(40);
1175 	}
1176 
1177 	val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val |
1178 		BCE_EMAC_MDIO_COMM_COMMAND_WRITE |
1179 		BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT;
1180 	REG_WR(sc, BCE_EMAC_MDIO_COMM, val1);
1181 
1182 	for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1183 		DELAY(10);
1184 
1185 		val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1186 		if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1187 			DELAY(5);
1188 			break;
1189 		}
1190 	}
1191 
1192 	if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY)
1193 		BCE_PRINTF("%s(%d): PHY write timeout!\n",
1194 			__FILE__, __LINE__);
1195 
1196 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1197 		val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1198 		val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1199 
1200 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1201 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1202 
1203 		DELAY(40);
1204 	}
1205 
1206 	return 0;
1207 }
1208 
1209 
1210 /****************************************************************************/
1211 /* MII bus status change.                                                   */
1212 /*                                                                          */
1213 /* Called by the MII bus driver when the PHY establishes link to set the    */
1214 /* MAC interface registers.                                                 */
1215 /*                                                                          */
1216 /* Returns:                                                                 */
1217 /*   Nothing.                                                               */
1218 /****************************************************************************/
1219 static void
1220 bce_miibus_statchg(device_t dev)
1221 {
1222 	struct bce_softc *sc;
1223 	struct mii_data *mii;
1224 	int val;
1225 
1226 	sc = device_get_softc(dev);
1227 
1228 	mii = device_get_softc(sc->bce_miibus);
1229 
1230 	val = REG_RD(sc, BCE_EMAC_MODE);
1231 	val &= ~(BCE_EMAC_MODE_PORT | BCE_EMAC_MODE_HALF_DUPLEX |
1232 		BCE_EMAC_MODE_MAC_LOOP | BCE_EMAC_MODE_FORCE_LINK |
1233 		BCE_EMAC_MODE_25G);
1234 
1235 	/* Set MII or GMII interface based on the speed negotiated by the PHY. */
1236 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1237 	case IFM_10_T:
1238 		if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) {
1239 			DBPRINT(sc, BCE_INFO, "Enabling 10Mb interface.\n");
1240 			val |= BCE_EMAC_MODE_PORT_MII_10;
1241 			break;
1242 		}
1243 		/* fall-through */
1244 	case IFM_100_TX:
1245 		DBPRINT(sc, BCE_INFO, "Enabling MII interface.\n");
1246 		val |= BCE_EMAC_MODE_PORT_MII;
1247 		break;
1248 	case IFM_2500_SX:
1249 		DBPRINT(sc, BCE_INFO, "Enabling 2.5G MAC mode.\n");
1250 		val |= BCE_EMAC_MODE_25G;
1251 		/* fall-through */
1252 	case IFM_1000_T:
1253 	case IFM_1000_SX:
1254 		DBPRINT(sc, BCE_INFO, "Enabling GMII interface.\n");
1255 		val |= BCE_EMAC_MODE_PORT_GMII;
1256 		break;
1257 	default:
1258 		DBPRINT(sc, BCE_INFO, "Unknown speed, enabling default GMII "
1259 			"interface.\n");
1260 		val |= BCE_EMAC_MODE_PORT_GMII;
1261 	}
1262 
1263 	/* Set half or full duplex based on the duplicity negotiated by the PHY. */
1264 	if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
1265 		DBPRINT(sc, BCE_INFO, "Setting Half-Duplex interface.\n");
1266 		val |= BCE_EMAC_MODE_HALF_DUPLEX;
1267 	} else
1268 		DBPRINT(sc, BCE_INFO, "Setting Full-Duplex interface.\n");
1269 
1270 	REG_WR(sc, BCE_EMAC_MODE, val);
1271 
1272 #if 0
1273 	/* ToDo: Enable flow control support in brgphy and bge. */
1274 	/* FLAG0 is set if RX is enabled and FLAG1 if TX is enabled */
1275 	if (mii->mii_media_active & IFM_FLAG0)
1276 		BCE_SETBIT(sc, BCE_EMAC_RX_MODE, BCE_EMAC_RX_MODE_FLOW_EN);
1277 	if (mii->mii_media_active & IFM_FLAG1)
1278 		BCE_SETBIT(sc, BCE_EMAC_RX_MODE, BCE_EMAC_TX_MODE_FLOW_EN);
1279 #endif
1280 
1281 }
1282 
1283 
1284 /****************************************************************************/
1285 /* Acquire NVRAM lock.                                                      */
1286 /*                                                                          */
1287 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock.  */
1288 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1289 /* for use by the driver.                                                   */
1290 /*                                                                          */
1291 /* Returns:                                                                 */
1292 /*   0 on success, positive value on failure.                               */
1293 /****************************************************************************/
1294 static int
1295 bce_acquire_nvram_lock(struct bce_softc *sc)
1296 {
1297 	u32 val;
1298 	int j;
1299 
1300 	DBPRINT(sc, BCE_VERBOSE_NVRAM, "Acquiring NVRAM lock.\n");
1301 
1302 	/* Request access to the flash interface. */
1303 	REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2);
1304 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1305 		val = REG_RD(sc, BCE_NVM_SW_ARB);
1306 		if (val & BCE_NVM_SW_ARB_ARB_ARB2)
1307 			break;
1308 
1309 		DELAY(5);
1310 	}
1311 
1312 	if (j >= NVRAM_TIMEOUT_COUNT) {
1313 		DBPRINT(sc, BCE_WARN, "Timeout acquiring NVRAM lock!\n");
1314 		return EBUSY;
1315 	}
1316 
1317 	return 0;
1318 }
1319 
1320 
1321 /****************************************************************************/
1322 /* Release NVRAM lock.                                                      */
1323 /*                                                                          */
1324 /* When the caller is finished accessing NVRAM the lock must be released.   */
1325 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1326 /* for use by the driver.                                                   */
1327 /*                                                                          */
1328 /* Returns:                                                                 */
1329 /*   0 on success, positive value on failure.                               */
1330 /****************************************************************************/
1331 static int
1332 bce_release_nvram_lock(struct bce_softc *sc)
1333 {
1334 	int j;
1335 	u32 val;
1336 
1337 	DBPRINT(sc, BCE_VERBOSE_NVRAM, "Releasing NVRAM lock.\n");
1338 
1339 	/*
1340 	 * Relinquish nvram interface.
1341 	 */
1342 	REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2);
1343 
1344 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1345 		val = REG_RD(sc, BCE_NVM_SW_ARB);
1346 		if (!(val & BCE_NVM_SW_ARB_ARB_ARB2))
1347 			break;
1348 
1349 		DELAY(5);
1350 	}
1351 
1352 	if (j >= NVRAM_TIMEOUT_COUNT) {
1353 		DBPRINT(sc, BCE_WARN, "Timeout reeasing NVRAM lock!\n");
1354 		return EBUSY;
1355 	}
1356 
1357 	return 0;
1358 }
1359 
1360 
1361 #ifdef BCE_NVRAM_WRITE_SUPPORT
1362 /****************************************************************************/
1363 /* Enable NVRAM write access.                                               */
1364 /*                                                                          */
1365 /* Before writing to NVRAM the caller must enable NVRAM writes.             */
1366 /*                                                                          */
1367 /* Returns:                                                                 */
1368 /*   0 on success, positive value on failure.                               */
1369 /****************************************************************************/
1370 static int
1371 bce_enable_nvram_write(struct bce_softc *sc)
1372 {
1373 	u32 val;
1374 
1375 	DBPRINT(sc, BCE_VERBOSE_NVRAM, "Enabling NVRAM write.\n");
1376 
1377 	val = REG_RD(sc, BCE_MISC_CFG);
1378 	REG_WR(sc, BCE_MISC_CFG, val | BCE_MISC_CFG_NVM_WR_EN_PCI);
1379 
1380 	if (!sc->bce_flash_info->buffered) {
1381 		int j;
1382 
1383 		REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1384 		REG_WR(sc, BCE_NVM_COMMAND,	BCE_NVM_COMMAND_WREN | BCE_NVM_COMMAND_DOIT);
1385 
1386 		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1387 			DELAY(5);
1388 
1389 			val = REG_RD(sc, BCE_NVM_COMMAND);
1390 			if (val & BCE_NVM_COMMAND_DONE)
1391 				break;
1392 		}
1393 
1394 		if (j >= NVRAM_TIMEOUT_COUNT) {
1395 			DBPRINT(sc, BCE_WARN, "Timeout writing NVRAM!\n");
1396 			return EBUSY;
1397 		}
1398 	}
1399 	return 0;
1400 }
1401 
1402 
1403 /****************************************************************************/
1404 /* Disable NVRAM write access.                                              */
1405 /*                                                                          */
1406 /* When the caller is finished writing to NVRAM write access must be        */
1407 /* disabled.                                                                */
1408 /*                                                                          */
1409 /* Returns:                                                                 */
1410 /*   Nothing.                                                               */
1411 /****************************************************************************/
1412 static void
1413 bce_disable_nvram_write(struct bce_softc *sc)
1414 {
1415 	u32 val;
1416 
1417 	DBPRINT(sc, BCE_VERBOSE_NVRAM,  "Disabling NVRAM write.\n");
1418 
1419 	val = REG_RD(sc, BCE_MISC_CFG);
1420 	REG_WR(sc, BCE_MISC_CFG, val & ~BCE_MISC_CFG_NVM_WR_EN);
1421 }
1422 #endif
1423 
1424 
1425 /****************************************************************************/
1426 /* Enable NVRAM access.                                                     */
1427 /*                                                                          */
1428 /* Before accessing NVRAM for read or write operations the caller must      */
1429 /* enabled NVRAM access.                                                    */
1430 /*                                                                          */
1431 /* Returns:                                                                 */
1432 /*   Nothing.                                                               */
1433 /****************************************************************************/
1434 static void
1435 bce_enable_nvram_access(struct bce_softc *sc)
1436 {
1437 	u32 val;
1438 
1439 	DBPRINT(sc, BCE_VERBOSE_NVRAM, "Enabling NVRAM access.\n");
1440 
1441 	val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1442 	/* Enable both bits, even on read. */
1443 	REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1444 	       val | BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN);
1445 }
1446 
1447 
1448 /****************************************************************************/
1449 /* Disable NVRAM access.                                                    */
1450 /*                                                                          */
1451 /* When the caller is finished accessing NVRAM access must be disabled.     */
1452 /*                                                                          */
1453 /* Returns:                                                                 */
1454 /*   Nothing.                                                               */
1455 /****************************************************************************/
1456 static void
1457 bce_disable_nvram_access(struct bce_softc *sc)
1458 {
1459 	u32 val;
1460 
1461 	DBPRINT(sc, BCE_VERBOSE_NVRAM, "Disabling NVRAM access.\n");
1462 
1463 	val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1464 
1465 	/* Disable both bits, even after read. */
1466 	REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1467 		val & ~(BCE_NVM_ACCESS_ENABLE_EN |
1468 			BCE_NVM_ACCESS_ENABLE_WR_EN));
1469 }
1470 
1471 
1472 #ifdef BCE_NVRAM_WRITE_SUPPORT
1473 /****************************************************************************/
1474 /* Erase NVRAM page before writing.                                         */
1475 /*                                                                          */
1476 /* Non-buffered flash parts require that a page be erased before it is      */
1477 /* written.                                                                 */
1478 /*                                                                          */
1479 /* Returns:                                                                 */
1480 /*   0 on success, positive value on failure.                               */
1481 /****************************************************************************/
1482 static int
1483 bce_nvram_erase_page(struct bce_softc *sc, u32 offset)
1484 {
1485 	u32 cmd;
1486 	int j;
1487 
1488 	/* Buffered flash doesn't require an erase. */
1489 	if (sc->bce_flash_info->buffered)
1490 		return 0;
1491 
1492 	DBPRINT(sc, BCE_VERBOSE_NVRAM, "Erasing NVRAM page.\n");
1493 
1494 	/* Build an erase command. */
1495 	cmd = BCE_NVM_COMMAND_ERASE | BCE_NVM_COMMAND_WR |
1496 	      BCE_NVM_COMMAND_DOIT;
1497 
1498 	/*
1499 	 * Clear the DONE bit separately, set the NVRAM adress to erase,
1500 	 * and issue the erase command.
1501 	 */
1502 	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1503 	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1504 	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1505 
1506 	/* Wait for completion. */
1507 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1508 		u32 val;
1509 
1510 		DELAY(5);
1511 
1512 		val = REG_RD(sc, BCE_NVM_COMMAND);
1513 		if (val & BCE_NVM_COMMAND_DONE)
1514 			break;
1515 	}
1516 
1517 	if (j >= NVRAM_TIMEOUT_COUNT) {
1518 		DBPRINT(sc, BCE_WARN, "Timeout erasing NVRAM.\n");
1519 		return EBUSY;
1520 	}
1521 
1522 	return 0;
1523 }
1524 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1525 
1526 
1527 /****************************************************************************/
1528 /* Read a dword (32 bits) from NVRAM.                                       */
1529 /*                                                                          */
1530 /* Read a 32 bit word from NVRAM.  The caller is assumed to have already    */
1531 /* obtained the NVRAM lock and enabled the controller for NVRAM access.     */
1532 /*                                                                          */
1533 /* Returns:                                                                 */
1534 /*   0 on success and the 32 bit value read, positive value on failure.     */
1535 /****************************************************************************/
1536 static int
1537 bce_nvram_read_dword(struct bce_softc *sc, u32 offset, u8 *ret_val,
1538 							u32 cmd_flags)
1539 {
1540 	u32 cmd;
1541 	int i, rc = 0;
1542 
1543 	/* Build the command word. */
1544 	cmd = BCE_NVM_COMMAND_DOIT | cmd_flags;
1545 
1546 	/* Calculate the offset for buffered flash. */
1547 	if (sc->bce_flash_info->buffered) {
1548 		offset = ((offset / sc->bce_flash_info->page_size) <<
1549 			   sc->bce_flash_info->page_bits) +
1550 			  (offset % sc->bce_flash_info->page_size);
1551 	}
1552 
1553 	/*
1554 	 * Clear the DONE bit separately, set the address to read,
1555 	 * and issue the read.
1556 	 */
1557 	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1558 	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1559 	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1560 
1561 	/* Wait for completion. */
1562 	for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1563 		u32 val;
1564 
1565 		DELAY(5);
1566 
1567 		val = REG_RD(sc, BCE_NVM_COMMAND);
1568 		if (val & BCE_NVM_COMMAND_DONE) {
1569 			val = REG_RD(sc, BCE_NVM_READ);
1570 
1571 			val = bce_be32toh(val);
1572 			memcpy(ret_val, &val, 4);
1573 			break;
1574 		}
1575 	}
1576 
1577 	/* Check for errors. */
1578 	if (i >= NVRAM_TIMEOUT_COUNT) {
1579 		BCE_PRINTF("%s(%d): Timeout error reading NVRAM at offset 0x%08X!\n",
1580 			__FILE__, __LINE__, offset);
1581 		rc = EBUSY;
1582 	}
1583 
1584 	return(rc);
1585 }
1586 
1587 
1588 #ifdef BCE_NVRAM_WRITE_SUPPORT
1589 /****************************************************************************/
1590 /* Write a dword (32 bits) to NVRAM.                                        */
1591 /*                                                                          */
1592 /* Write a 32 bit word to NVRAM.  The caller is assumed to have already     */
1593 /* obtained the NVRAM lock, enabled the controller for NVRAM access, and    */
1594 /* enabled NVRAM write access.                                              */
1595 /*                                                                          */
1596 /* Returns:                                                                 */
1597 /*   0 on success, positive value on failure.                               */
1598 /****************************************************************************/
1599 static int
1600 bce_nvram_write_dword(struct bce_softc *sc, u32 offset, u8 *val,
1601 	u32 cmd_flags)
1602 {
1603 	u32 cmd, val32;
1604 	int j;
1605 
1606 	/* Build the command word. */
1607 	cmd = BCE_NVM_COMMAND_DOIT | BCE_NVM_COMMAND_WR | cmd_flags;
1608 
1609 	/* Calculate the offset for buffered flash. */
1610 	if (sc->bce_flash_info->buffered) {
1611 		offset = ((offset / sc->bce_flash_info->page_size) <<
1612 			  sc->bce_flash_info->page_bits) +
1613 			 (offset % sc->bce_flash_info->page_size);
1614 	}
1615 
1616 	/*
1617 	 * Clear the DONE bit separately, convert NVRAM data to big-endian,
1618 	 * set the NVRAM address to write, and issue the write command
1619 	 */
1620 	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1621 	memcpy(&val32, val, 4);
1622 	val32 = htobe32(val32);
1623 	REG_WR(sc, BCE_NVM_WRITE, val32);
1624 	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1625 	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1626 
1627 	/* Wait for completion. */
1628 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1629 		DELAY(5);
1630 
1631 		if (REG_RD(sc, BCE_NVM_COMMAND) & BCE_NVM_COMMAND_DONE)
1632 			break;
1633 	}
1634 	if (j >= NVRAM_TIMEOUT_COUNT) {
1635 		BCE_PRINTF("%s(%d): Timeout error writing NVRAM at offset 0x%08X\n",
1636 			__FILE__, __LINE__, offset);
1637 		return EBUSY;
1638 	}
1639 
1640 	return 0;
1641 }
1642 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1643 
1644 
1645 /****************************************************************************/
1646 /* Initialize NVRAM access.                                                 */
1647 /*                                                                          */
1648 /* Identify the NVRAM device in use and prepare the NVRAM interface to      */
1649 /* access that device.                                                      */
1650 /*                                                                          */
1651 /* Returns:                                                                 */
1652 /*   0 on success, positive value on failure.                               */
1653 /****************************************************************************/
1654 static int
1655 bce_init_nvram(struct bce_softc *sc)
1656 {
1657 	u32 val;
1658 	int j, entry_count, rc;
1659 	struct flash_spec *flash;
1660 
1661 	DBPRINT(sc, BCE_VERBOSE_NVRAM, "Entering %s()\n", __FUNCTION__);
1662 
1663 	/* Determine the selected interface. */
1664 	val = REG_RD(sc, BCE_NVM_CFG1);
1665 
1666 	entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
1667 
1668 	rc = 0;
1669 
1670 	/*
1671 	 * Flash reconfiguration is required to support additional
1672 	 * NVRAM devices not directly supported in hardware.
1673 	 * Check if the flash interface was reconfigured
1674 	 * by the bootcode.
1675 	 */
1676 
1677 	if (val & 0x40000000) {
1678 		/* Flash interface reconfigured by bootcode. */
1679 
1680 		DBPRINT(sc,BCE_INFO_LOAD,
1681 			"bce_init_nvram(): Flash WAS reconfigured.\n");
1682 
1683 		for (j = 0, flash = &flash_table[0]; j < entry_count;
1684 		     j++, flash++) {
1685 			if ((val & FLASH_BACKUP_STRAP_MASK) ==
1686 			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
1687 				sc->bce_flash_info = flash;
1688 				break;
1689 			}
1690 		}
1691 	} else {
1692 		/* Flash interface not yet reconfigured. */
1693 		u32 mask;
1694 
1695 		DBPRINT(sc,BCE_INFO_LOAD,
1696 			"bce_init_nvram(): Flash was NOT reconfigured.\n");
1697 
1698 		if (val & (1 << 23))
1699 			mask = FLASH_BACKUP_STRAP_MASK;
1700 		else
1701 			mask = FLASH_STRAP_MASK;
1702 
1703 		/* Look for the matching NVRAM device configuration data. */
1704 		for (j = 0, flash = &flash_table[0]; j < entry_count; j++, flash++) {
1705 
1706 			/* Check if the device matches any of the known devices. */
1707 			if ((val & mask) == (flash->strapping & mask)) {
1708 				/* Found a device match. */
1709 				sc->bce_flash_info = flash;
1710 
1711 				/* Request access to the flash interface. */
1712 				if ((rc = bce_acquire_nvram_lock(sc)) != 0)
1713 					return rc;
1714 
1715 				/* Reconfigure the flash interface. */
1716 				bce_enable_nvram_access(sc);
1717 				REG_WR(sc, BCE_NVM_CFG1, flash->config1);
1718 				REG_WR(sc, BCE_NVM_CFG2, flash->config2);
1719 				REG_WR(sc, BCE_NVM_CFG3, flash->config3);
1720 				REG_WR(sc, BCE_NVM_WRITE1, flash->write1);
1721 				bce_disable_nvram_access(sc);
1722 				bce_release_nvram_lock(sc);
1723 
1724 				break;
1725 			}
1726 		}
1727 	}
1728 
1729 	/* Check if a matching device was found. */
1730 	if (j == entry_count) {
1731 		sc->bce_flash_info = NULL;
1732 		BCE_PRINTF("%s(%d): Unknown Flash NVRAM found!\n",
1733 			__FILE__, __LINE__);
1734 		rc = ENODEV;
1735 	}
1736 
1737 	/* Write the flash config data to the shared memory interface. */
1738 	val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_SHARED_HW_CFG_CONFIG2);
1739 	val &= BCE_SHARED_HW_CFG2_NVM_SIZE_MASK;
1740 	if (val)
1741 		sc->bce_flash_size = val;
1742 	else
1743 		sc->bce_flash_size = sc->bce_flash_info->total_size;
1744 
1745 	DBPRINT(sc, BCE_INFO_LOAD, "bce_init_nvram() flash->total_size = 0x%08X\n",
1746 		sc->bce_flash_info->total_size);
1747 
1748 	DBPRINT(sc, BCE_VERBOSE_NVRAM, "Exiting %s()\n", __FUNCTION__);
1749 
1750 	return rc;
1751 }
1752 
1753 
1754 /****************************************************************************/
1755 /* Read an arbitrary range of data from NVRAM.                              */
1756 /*                                                                          */
1757 /* Prepares the NVRAM interface for access and reads the requested data     */
1758 /* into the supplied buffer.                                                */
1759 /*                                                                          */
1760 /* Returns:                                                                 */
1761 /*   0 on success and the data read, positive value on failure.             */
1762 /****************************************************************************/
1763 static int
1764 bce_nvram_read(struct bce_softc *sc, u32 offset, u8 *ret_buf,
1765 	int buf_size)
1766 {
1767 	int rc = 0;
1768 	u32 cmd_flags, offset32, len32, extra;
1769 
1770 	if (buf_size == 0)
1771 		return 0;
1772 
1773 	/* Request access to the flash interface. */
1774 	if ((rc = bce_acquire_nvram_lock(sc)) != 0)
1775 		return rc;
1776 
1777 	/* Enable access to flash interface */
1778 	bce_enable_nvram_access(sc);
1779 
1780 	len32 = buf_size;
1781 	offset32 = offset;
1782 	extra = 0;
1783 
1784 	cmd_flags = 0;
1785 
1786 	if (offset32 & 3) {
1787 		u8 buf[4];
1788 		u32 pre_len;
1789 
1790 		offset32 &= ~3;
1791 		pre_len = 4 - (offset & 3);
1792 
1793 		if (pre_len >= len32) {
1794 			pre_len = len32;
1795 			cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST;
1796 		}
1797 		else {
1798 			cmd_flags = BCE_NVM_COMMAND_FIRST;
1799 		}
1800 
1801 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1802 
1803 		if (rc)
1804 			return rc;
1805 
1806 		memcpy(ret_buf, buf + (offset & 3), pre_len);
1807 
1808 		offset32 += 4;
1809 		ret_buf += pre_len;
1810 		len32 -= pre_len;
1811 	}
1812 
1813 	if (len32 & 3) {
1814 		extra = 4 - (len32 & 3);
1815 		len32 = (len32 + 4) & ~3;
1816 	}
1817 
1818 	if (len32 == 4) {
1819 		u8 buf[4];
1820 
1821 		if (cmd_flags)
1822 			cmd_flags = BCE_NVM_COMMAND_LAST;
1823 		else
1824 			cmd_flags = BCE_NVM_COMMAND_FIRST |
1825 				    BCE_NVM_COMMAND_LAST;
1826 
1827 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1828 
1829 		memcpy(ret_buf, buf, 4 - extra);
1830 	}
1831 	else if (len32 > 0) {
1832 		u8 buf[4];
1833 
1834 		/* Read the first word. */
1835 		if (cmd_flags)
1836 			cmd_flags = 0;
1837 		else
1838 			cmd_flags = BCE_NVM_COMMAND_FIRST;
1839 
1840 		rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
1841 
1842 		/* Advance to the next dword. */
1843 		offset32 += 4;
1844 		ret_buf += 4;
1845 		len32 -= 4;
1846 
1847 		while (len32 > 4 && rc == 0) {
1848 			rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0);
1849 
1850 			/* Advance to the next dword. */
1851 			offset32 += 4;
1852 			ret_buf += 4;
1853 			len32 -= 4;
1854 		}
1855 
1856 		if (rc)
1857 			return rc;
1858 
1859 		cmd_flags = BCE_NVM_COMMAND_LAST;
1860 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1861 
1862 		memcpy(ret_buf, buf, 4 - extra);
1863 	}
1864 
1865 	/* Disable access to flash interface and release the lock. */
1866 	bce_disable_nvram_access(sc);
1867 	bce_release_nvram_lock(sc);
1868 
1869 	return rc;
1870 }
1871 
1872 
1873 #ifdef BCE_NVRAM_WRITE_SUPPORT
1874 /****************************************************************************/
1875 /* Write an arbitrary range of data from NVRAM.                             */
1876 /*                                                                          */
1877 /* Prepares the NVRAM interface for write access and writes the requested   */
1878 /* data from the supplied buffer.  The caller is responsible for            */
1879 /* calculating any appropriate CRCs.                                        */
1880 /*                                                                          */
1881 /* Returns:                                                                 */
1882 /*   0 on success, positive value on failure.                               */
1883 /****************************************************************************/
1884 static int
1885 bce_nvram_write(struct bce_softc *sc, u32 offset, u8 *data_buf,
1886 	int buf_size)
1887 {
1888 	u32 written, offset32, len32;
1889 	u8 *buf, start[4], end[4];
1890 	int rc = 0;
1891 	int align_start, align_end;
1892 
1893 	buf = data_buf;
1894 	offset32 = offset;
1895 	len32 = buf_size;
1896 	align_start = align_end = 0;
1897 
1898 	if ((align_start = (offset32 & 3))) {
1899 		offset32 &= ~3;
1900 		len32 += align_start;
1901 		if ((rc = bce_nvram_read(sc, offset32, start, 4)))
1902 			return rc;
1903 	}
1904 
1905 	if (len32 & 3) {
1906 	       	if ((len32 > 4) || !align_start) {
1907 			align_end = 4 - (len32 & 3);
1908 			len32 += align_end;
1909 			if ((rc = bce_nvram_read(sc, offset32 + len32 - 4,
1910 				end, 4))) {
1911 				return rc;
1912 			}
1913 		}
1914 	}
1915 
1916 	if (align_start || align_end) {
1917 		buf = malloc(len32, M_DEVBUF, M_NOWAIT);
1918 		if (buf == 0)
1919 			return ENOMEM;
1920 		if (align_start) {
1921 			memcpy(buf, start, 4);
1922 		}
1923 		if (align_end) {
1924 			memcpy(buf + len32 - 4, end, 4);
1925 		}
1926 		memcpy(buf + align_start, data_buf, buf_size);
1927 	}
1928 
1929 	written = 0;
1930 	while ((written < len32) && (rc == 0)) {
1931 		u32 page_start, page_end, data_start, data_end;
1932 		u32 addr, cmd_flags;
1933 		int i;
1934 		u8 flash_buffer[264];
1935 
1936 	    /* Find the page_start addr */
1937 		page_start = offset32 + written;
1938 		page_start -= (page_start % sc->bce_flash_info->page_size);
1939 		/* Find the page_end addr */
1940 		page_end = page_start + sc->bce_flash_info->page_size;
1941 		/* Find the data_start addr */
1942 		data_start = (written == 0) ? offset32 : page_start;
1943 		/* Find the data_end addr */
1944 		data_end = (page_end > offset32 + len32) ?
1945 			(offset32 + len32) : page_end;
1946 
1947 		/* Request access to the flash interface. */
1948 		if ((rc = bce_acquire_nvram_lock(sc)) != 0)
1949 			goto nvram_write_end;
1950 
1951 		/* Enable access to flash interface */
1952 		bce_enable_nvram_access(sc);
1953 
1954 		cmd_flags = BCE_NVM_COMMAND_FIRST;
1955 		if (sc->bce_flash_info->buffered == 0) {
1956 			int j;
1957 
1958 			/* Read the whole page into the buffer
1959 			 * (non-buffer flash only) */
1960 			for (j = 0; j < sc->bce_flash_info->page_size; j += 4) {
1961 				if (j == (sc->bce_flash_info->page_size - 4)) {
1962 					cmd_flags |= BCE_NVM_COMMAND_LAST;
1963 				}
1964 				rc = bce_nvram_read_dword(sc,
1965 					page_start + j,
1966 					&flash_buffer[j],
1967 					cmd_flags);
1968 
1969 				if (rc)
1970 					goto nvram_write_end;
1971 
1972 				cmd_flags = 0;
1973 			}
1974 		}
1975 
1976 		/* Enable writes to flash interface (unlock write-protect) */
1977 		if ((rc = bce_enable_nvram_write(sc)) != 0)
1978 			goto nvram_write_end;
1979 
1980 		/* Erase the page */
1981 		if ((rc = bce_nvram_erase_page(sc, page_start)) != 0)
1982 			goto nvram_write_end;
1983 
1984 		/* Re-enable the write again for the actual write */
1985 		bce_enable_nvram_write(sc);
1986 
1987 		/* Loop to write back the buffer data from page_start to
1988 		 * data_start */
1989 		i = 0;
1990 		if (sc->bce_flash_info->buffered == 0) {
1991 			for (addr = page_start; addr < data_start;
1992 				addr += 4, i += 4) {
1993 
1994 				rc = bce_nvram_write_dword(sc, addr,
1995 					&flash_buffer[i], cmd_flags);
1996 
1997 				if (rc != 0)
1998 					goto nvram_write_end;
1999 
2000 				cmd_flags = 0;
2001 			}
2002 		}
2003 
2004 		/* Loop to write the new data from data_start to data_end */
2005 		for (addr = data_start; addr < data_end; addr += 4, i++) {
2006 			if ((addr == page_end - 4) ||
2007 				((sc->bce_flash_info->buffered) &&
2008 				 (addr == data_end - 4))) {
2009 
2010 				cmd_flags |= BCE_NVM_COMMAND_LAST;
2011 			}
2012 			rc = bce_nvram_write_dword(sc, addr, buf,
2013 				cmd_flags);
2014 
2015 			if (rc != 0)
2016 				goto nvram_write_end;
2017 
2018 			cmd_flags = 0;
2019 			buf += 4;
2020 		}
2021 
2022 		/* Loop to write back the buffer data from data_end
2023 		 * to page_end */
2024 		if (sc->bce_flash_info->buffered == 0) {
2025 			for (addr = data_end; addr < page_end;
2026 				addr += 4, i += 4) {
2027 
2028 				if (addr == page_end-4) {
2029 					cmd_flags = BCE_NVM_COMMAND_LAST;
2030                 		}
2031 				rc = bce_nvram_write_dword(sc, addr,
2032 					&flash_buffer[i], cmd_flags);
2033 
2034 				if (rc != 0)
2035 					goto nvram_write_end;
2036 
2037 				cmd_flags = 0;
2038 			}
2039 		}
2040 
2041 		/* Disable writes to flash interface (lock write-protect) */
2042 		bce_disable_nvram_write(sc);
2043 
2044 		/* Disable access to flash interface */
2045 		bce_disable_nvram_access(sc);
2046 		bce_release_nvram_lock(sc);
2047 
2048 		/* Increment written */
2049 		written += data_end - data_start;
2050 	}
2051 
2052 nvram_write_end:
2053 	if (align_start || align_end)
2054 		free(buf, M_DEVBUF);
2055 
2056 	return rc;
2057 }
2058 #endif /* BCE_NVRAM_WRITE_SUPPORT */
2059 
2060 
2061 /****************************************************************************/
2062 /* Verifies that NVRAM is accessible and contains valid data.               */
2063 /*                                                                          */
2064 /* Reads the configuration data from NVRAM and verifies that the CRC is     */
2065 /* correct.                                                                 */
2066 /*                                                                          */
2067 /* Returns:                                                                 */
2068 /*   0 on success, positive value on failure.                               */
2069 /****************************************************************************/
2070 static int
2071 bce_nvram_test(struct bce_softc *sc)
2072 {
2073 	u32 buf[BCE_NVRAM_SIZE / 4];
2074 	u8 *data = (u8 *) buf;
2075 	int rc = 0;
2076 	u32 magic, csum;
2077 
2078 
2079 	/*
2080 	 * Check that the device NVRAM is valid by reading
2081 	 * the magic value at offset 0.
2082 	 */
2083 	if ((rc = bce_nvram_read(sc, 0, data, 4)) != 0)
2084 		goto bce_nvram_test_done;
2085 
2086 
2087     magic = bce_be32toh(buf[0]);
2088 	if (magic != BCE_NVRAM_MAGIC) {
2089 		rc = ENODEV;
2090 		BCE_PRINTF("%s(%d): Invalid NVRAM magic value! Expected: 0x%08X, "
2091 			"Found: 0x%08X\n",
2092 			__FILE__, __LINE__, BCE_NVRAM_MAGIC, magic);
2093 		goto bce_nvram_test_done;
2094 	}
2095 
2096 	/*
2097 	 * Verify that the device NVRAM includes valid
2098 	 * configuration data.
2099 	 */
2100 	if ((rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE)) != 0)
2101 		goto bce_nvram_test_done;
2102 
2103 	csum = ether_crc32_le(data, 0x100);
2104 	if (csum != BCE_CRC32_RESIDUAL) {
2105 		rc = ENODEV;
2106 		BCE_PRINTF("%s(%d): Invalid Manufacturing Information NVRAM CRC! "
2107 			"Expected: 0x%08X, Found: 0x%08X\n",
2108 			__FILE__, __LINE__, BCE_CRC32_RESIDUAL, csum);
2109 		goto bce_nvram_test_done;
2110 	}
2111 
2112 	csum = ether_crc32_le(data + 0x100, 0x100);
2113 	if (csum != BCE_CRC32_RESIDUAL) {
2114 		BCE_PRINTF("%s(%d): Invalid Feature Configuration Information "
2115 			"NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
2116 			__FILE__, __LINE__, BCE_CRC32_RESIDUAL, csum);
2117 		rc = ENODEV;
2118 	}
2119 
2120 bce_nvram_test_done:
2121 	return rc;
2122 }
2123 
2124 
2125 /****************************************************************************/
2126 /* Free any DMA memory owned by the driver.                                 */
2127 /*                                                                          */
2128 /* Scans through each data structre that requires DMA memory and frees      */
2129 /* the memory if allocated.                                                 */
2130 /*                                                                          */
2131 /* Returns:                                                                 */
2132 /*   Nothing.                                                               */
2133 /****************************************************************************/
2134 static void
2135 bce_dma_free(struct bce_softc *sc)
2136 {
2137 	int i;
2138 
2139 	DBPRINT(sc,BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2140 
2141 	/* Destroy the status block. */
2142 	if (sc->status_block != NULL) {
2143 		bus_dmamem_free(
2144 			sc->status_tag,
2145 		    sc->status_block,
2146 		    sc->status_map);
2147 		sc->status_block = NULL;
2148 	}
2149 
2150 	if (sc->status_map != NULL) {
2151 		bus_dmamap_unload(
2152 			sc->status_tag,
2153 		    sc->status_map);
2154 		bus_dmamap_destroy(sc->status_tag,
2155 		    sc->status_map);
2156 		sc->status_map = NULL;
2157 	}
2158 
2159 	if (sc->status_tag != NULL) {
2160 		bus_dma_tag_destroy(sc->status_tag);
2161 		sc->status_tag = NULL;
2162 	}
2163 
2164 
2165 	/* Destroy the statistics block. */
2166 	if (sc->stats_block != NULL) {
2167 		bus_dmamem_free(
2168 			sc->stats_tag,
2169 		    sc->stats_block,
2170 		    sc->stats_map);
2171 		sc->stats_block = NULL;
2172 	}
2173 
2174 	if (sc->stats_map != NULL) {
2175 		bus_dmamap_unload(
2176 			sc->stats_tag,
2177 		    sc->stats_map);
2178 		bus_dmamap_destroy(sc->stats_tag,
2179 		    sc->stats_map);
2180 		sc->stats_map = NULL;
2181 	}
2182 
2183 	if (sc->stats_tag != NULL) {
2184 		bus_dma_tag_destroy(sc->stats_tag);
2185 		sc->stats_tag = NULL;
2186 	}
2187 
2188 
2189 	/* Free, unmap and destroy all TX buffer descriptor chain pages. */
2190 	for (i = 0; i < TX_PAGES; i++ ) {
2191 		if (sc->tx_bd_chain[i] != NULL) {
2192 			bus_dmamem_free(
2193 				sc->tx_bd_chain_tag,
2194 			    sc->tx_bd_chain[i],
2195 			    sc->tx_bd_chain_map[i]);
2196 			sc->tx_bd_chain[i] = NULL;
2197 		}
2198 
2199 		if (sc->tx_bd_chain_map[i] != NULL) {
2200 			bus_dmamap_unload(
2201 				sc->tx_bd_chain_tag,
2202 		    	sc->tx_bd_chain_map[i]);
2203 			bus_dmamap_destroy(
2204 				sc->tx_bd_chain_tag,
2205 			    sc->tx_bd_chain_map[i]);
2206 			sc->tx_bd_chain_map[i] = NULL;
2207 		}
2208 	}
2209 
2210 	/* Destroy the TX buffer descriptor tag. */
2211 	if (sc->tx_bd_chain_tag != NULL) {
2212 		bus_dma_tag_destroy(sc->tx_bd_chain_tag);
2213 		sc->tx_bd_chain_tag = NULL;
2214 	}
2215 
2216 
2217 	/* Free, unmap and destroy all RX buffer descriptor chain pages. */
2218 	for (i = 0; i < RX_PAGES; i++ ) {
2219 		if (sc->rx_bd_chain[i] != NULL) {
2220 			bus_dmamem_free(
2221 				sc->rx_bd_chain_tag,
2222 			    sc->rx_bd_chain[i],
2223 			    sc->rx_bd_chain_map[i]);
2224 			sc->rx_bd_chain[i] = NULL;
2225 		}
2226 
2227 		if (sc->rx_bd_chain_map[i] != NULL) {
2228 			bus_dmamap_unload(
2229 				sc->rx_bd_chain_tag,
2230 		    	sc->rx_bd_chain_map[i]);
2231 			bus_dmamap_destroy(
2232 				sc->rx_bd_chain_tag,
2233 			    sc->rx_bd_chain_map[i]);
2234 			sc->rx_bd_chain_map[i] = NULL;
2235 		}
2236 	}
2237 
2238 	/* Destroy the RX buffer descriptor tag. */
2239 	if (sc->rx_bd_chain_tag != NULL) {
2240 		bus_dma_tag_destroy(sc->rx_bd_chain_tag);
2241 		sc->rx_bd_chain_tag = NULL;
2242 	}
2243 
2244 
2245 	/* Free, unmap and destroy all page buffer descriptor chain pages. */
2246 	for (i = 0; i < PG_PAGES; i++ ) {
2247 		if (sc->pg_bd_chain[i] != NULL) {
2248 			bus_dmamem_free(
2249 				sc->pg_bd_chain_tag,
2250 			    sc->pg_bd_chain[i],
2251 			    sc->pg_bd_chain_map[i]);
2252 			sc->pg_bd_chain[i] = NULL;
2253 		}
2254 
2255 		if (sc->pg_bd_chain_map[i] != NULL) {
2256 			bus_dmamap_unload(
2257 				sc->pg_bd_chain_tag,
2258 		    	sc->pg_bd_chain_map[i]);
2259 			bus_dmamap_destroy(
2260 				sc->pg_bd_chain_tag,
2261 			    sc->pg_bd_chain_map[i]);
2262 			sc->pg_bd_chain_map[i] = NULL;
2263 		}
2264 	}
2265 
2266 	/* Destroy the page buffer descriptor tag. */
2267 	if (sc->pg_bd_chain_tag != NULL) {
2268 		bus_dma_tag_destroy(sc->pg_bd_chain_tag);
2269 		sc->pg_bd_chain_tag = NULL;
2270 	}
2271 
2272 
2273 	/* Unload and destroy the TX mbuf maps. */
2274 	for (i = 0; i < TOTAL_TX_BD; i++) {
2275 		if (sc->tx_mbuf_map[i] != NULL) {
2276 			bus_dmamap_unload(sc->tx_mbuf_tag,
2277 				sc->tx_mbuf_map[i]);
2278 			bus_dmamap_destroy(sc->tx_mbuf_tag,
2279 	 			sc->tx_mbuf_map[i]);
2280 			sc->tx_mbuf_map[i] = NULL;
2281 		}
2282 	}
2283 
2284 	/* Destroy the TX mbuf tag. */
2285 	if (sc->tx_mbuf_tag != NULL) {
2286 		bus_dma_tag_destroy(sc->tx_mbuf_tag);
2287 		sc->tx_mbuf_tag = NULL;
2288 	}
2289 
2290 	/* Unload and destroy the RX mbuf maps. */
2291 	for (i = 0; i < TOTAL_RX_BD; i++) {
2292 		if (sc->rx_mbuf_map[i] != NULL) {
2293 			bus_dmamap_unload(sc->rx_mbuf_tag,
2294 				sc->rx_mbuf_map[i]);
2295 			bus_dmamap_destroy(sc->rx_mbuf_tag,
2296 	 			sc->rx_mbuf_map[i]);
2297 			sc->rx_mbuf_map[i] = NULL;
2298 		}
2299 	}
2300 
2301 	/* Destroy the RX mbuf tag. */
2302 	if (sc->rx_mbuf_tag != NULL) {
2303 		bus_dma_tag_destroy(sc->rx_mbuf_tag);
2304 		sc->rx_mbuf_tag = NULL;
2305 	}
2306 
2307 	/* Unload and destroy the page mbuf maps. */
2308 	for (i = 0; i < TOTAL_PG_BD; i++) {
2309 		if (sc->pg_mbuf_map[i] != NULL) {
2310 			bus_dmamap_unload(sc->pg_mbuf_tag,
2311 				sc->pg_mbuf_map[i]);
2312 			bus_dmamap_destroy(sc->pg_mbuf_tag,
2313 	 			sc->pg_mbuf_map[i]);
2314 			sc->pg_mbuf_map[i] = NULL;
2315 		}
2316 	}
2317 
2318 	/* Destroy the page mbuf tag. */
2319 	if (sc->pg_mbuf_tag != NULL) {
2320 		bus_dma_tag_destroy(sc->pg_mbuf_tag);
2321 		sc->pg_mbuf_tag = NULL;
2322 	}
2323 
2324 	/* Destroy the parent tag */
2325 	if (sc->parent_tag != NULL) {
2326 		bus_dma_tag_destroy(sc->parent_tag);
2327 		sc->parent_tag = NULL;
2328 	}
2329 
2330 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2331 
2332 }
2333 
2334 
2335 /****************************************************************************/
2336 /* Get DMA memory from the OS.                                              */
2337 /*                                                                          */
2338 /* Validates that the OS has provided DMA buffers in response to a          */
2339 /* bus_dmamap_load() call and saves the physical address of those buffers.  */
2340 /* When the callback is used the OS will return 0 for the mapping function  */
2341 /* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any  */
2342 /* failures back to the caller.                                             */
2343 /*                                                                          */
2344 /* Returns:                                                                 */
2345 /*   Nothing.                                                               */
2346 /****************************************************************************/
2347 static void
2348 bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2349 {
2350 	bus_addr_t *busaddr = arg;
2351 
2352 	/* Simulate a mapping failure. */
2353 	DBRUNIF(DB_RANDOMTRUE(bce_debug_dma_map_addr_failure),
2354 		printf("bce: %s(%d): Simulating DMA mapping error.\n",
2355 			__FILE__, __LINE__);
2356 		error = ENOMEM);
2357 
2358 	/* Check for an error and signal the caller that an error occurred. */
2359 	if (error) {
2360 		printf("bce %s(%d): DMA mapping error! error = %d, "
2361 		    "nseg = %d\n", __FILE__, __LINE__, error, nseg);
2362 		*busaddr = 0;
2363 		return;
2364 	}
2365 
2366 	*busaddr = segs->ds_addr;
2367 	return;
2368 }
2369 
2370 
2371 /****************************************************************************/
2372 /* Allocate any DMA memory needed by the driver.                            */
2373 /*                                                                          */
2374 /* Allocates DMA memory needed for the various global structures needed by  */
2375 /* hardware.                                                                */
2376 /*                                                                          */
2377 /* Returns:                                                                 */
2378 /*   0 for success, positive value for failure.                             */
2379 /****************************************************************************/
2380 static int
2381 bce_dma_alloc(device_t dev)
2382 {
2383 	struct bce_softc *sc;
2384 	int i, error, rc = 0;
2385 	bus_addr_t busaddr;
2386 	bus_size_t max_size, max_seg_size;
2387 	int max_segments;
2388 
2389 	sc = device_get_softc(dev);
2390 
2391 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2392 
2393 	/*
2394 	 * Allocate the parent bus DMA tag appropriate for PCI.
2395 	 */
2396 	if (bus_dma_tag_create(NULL,
2397 			1,
2398 			BCE_DMA_BOUNDARY,
2399 			sc->max_bus_addr,
2400 			BUS_SPACE_MAXADDR,
2401 			NULL, NULL,
2402 			MAXBSIZE,
2403 			BUS_SPACE_UNRESTRICTED,
2404 			BUS_SPACE_MAXSIZE_32BIT,
2405 			0,
2406 			NULL, NULL,
2407 			&sc->parent_tag)) {
2408 		BCE_PRINTF("%s(%d): Could not allocate parent DMA tag!\n",
2409 			__FILE__, __LINE__);
2410 		rc = ENOMEM;
2411 		goto bce_dma_alloc_exit;
2412 	}
2413 
2414 	/*
2415 	 * Create a DMA tag for the status block, allocate and clear the
2416 	 * memory, map the memory into DMA space, and fetch the physical
2417 	 * address of the block.
2418 	 */
2419 	if (bus_dma_tag_create(sc->parent_tag,
2420 	    	BCE_DMA_ALIGN,
2421 	    	BCE_DMA_BOUNDARY,
2422 	    	sc->max_bus_addr,
2423 	    	BUS_SPACE_MAXADDR,
2424 	    	NULL, NULL,
2425 	    	BCE_STATUS_BLK_SZ,
2426 	    	1,
2427 	    	BCE_STATUS_BLK_SZ,
2428 	    	0,
2429 	    	NULL, NULL,
2430 	    	&sc->status_tag)) {
2431 		BCE_PRINTF("%s(%d): Could not allocate status block DMA tag!\n",
2432 			__FILE__, __LINE__);
2433 		rc = ENOMEM;
2434 		goto bce_dma_alloc_exit;
2435 	}
2436 
2437 	if(bus_dmamem_alloc(sc->status_tag,
2438 	    	(void **)&sc->status_block,
2439 	    	BUS_DMA_NOWAIT,
2440 	    	&sc->status_map)) {
2441 		BCE_PRINTF("%s(%d): Could not allocate status block DMA memory!\n",
2442 			__FILE__, __LINE__);
2443 		rc = ENOMEM;
2444 		goto bce_dma_alloc_exit;
2445 	}
2446 
2447 	bzero((char *)sc->status_block, BCE_STATUS_BLK_SZ);
2448 
2449 	error = bus_dmamap_load(sc->status_tag,
2450 	    	sc->status_map,
2451 	    	sc->status_block,
2452 	    	BCE_STATUS_BLK_SZ,
2453 	    	bce_dma_map_addr,
2454 	    	&busaddr,
2455 	    	BUS_DMA_NOWAIT);
2456 
2457 	if (error) {
2458 		BCE_PRINTF("%s(%d): Could not map status block DMA memory!\n",
2459 			__FILE__, __LINE__);
2460 		rc = ENOMEM;
2461 		goto bce_dma_alloc_exit;
2462 	}
2463 
2464 	sc->status_block_paddr = busaddr;
2465 	/* DRC - Fix for 64 bit addresses. */
2466 	DBPRINT(sc, BCE_INFO, "status_block_paddr = 0x%08X\n",
2467 		(u32) sc->status_block_paddr);
2468 
2469 	/*
2470 	 * Create a DMA tag for the statistics block, allocate and clear the
2471 	 * memory, map the memory into DMA space, and fetch the physical
2472 	 * address of the block.
2473 	 */
2474 	if (bus_dma_tag_create(sc->parent_tag,
2475 	    	BCE_DMA_ALIGN,
2476 	    	BCE_DMA_BOUNDARY,
2477 	    	sc->max_bus_addr,
2478 	    	BUS_SPACE_MAXADDR,
2479 	    	NULL, NULL,
2480 	    	BCE_STATS_BLK_SZ,
2481 	    	1,
2482 	    	BCE_STATS_BLK_SZ,
2483 	    	0,
2484 	    	NULL, NULL,
2485 	    	&sc->stats_tag)) {
2486 		BCE_PRINTF("%s(%d): Could not allocate statistics block DMA tag!\n",
2487 			__FILE__, __LINE__);
2488 		rc = ENOMEM;
2489 		goto bce_dma_alloc_exit;
2490 	}
2491 
2492 	if (bus_dmamem_alloc(sc->stats_tag,
2493 	    	(void **)&sc->stats_block,
2494 	    	BUS_DMA_NOWAIT,
2495 	    	&sc->stats_map)) {
2496 		BCE_PRINTF("%s(%d): Could not allocate statistics block DMA memory!\n",
2497 			__FILE__, __LINE__);
2498 		rc = ENOMEM;
2499 		goto bce_dma_alloc_exit;
2500 	}
2501 
2502 	bzero((char *)sc->stats_block, BCE_STATS_BLK_SZ);
2503 
2504 	error = bus_dmamap_load(sc->stats_tag,
2505 	    	sc->stats_map,
2506 	    	sc->stats_block,
2507 	    	BCE_STATS_BLK_SZ,
2508 	    	bce_dma_map_addr,
2509 	    	&busaddr,
2510 	    	BUS_DMA_NOWAIT);
2511 
2512 	if(error) {
2513 		BCE_PRINTF("%s(%d): Could not map statistics block DMA memory!\n",
2514 			__FILE__, __LINE__);
2515 		rc = ENOMEM;
2516 		goto bce_dma_alloc_exit;
2517 	}
2518 
2519 	sc->stats_block_paddr = busaddr;
2520 	/* DRC - Fix for 64 bit address. */
2521 	DBPRINT(sc,BCE_INFO, "stats_block_paddr = 0x%08X\n",
2522 		(u32) sc->stats_block_paddr);
2523 
2524 	/*
2525 	 * Create a DMA tag for the TX buffer descriptor chain,
2526 	 * allocate and clear the  memory, and fetch the
2527 	 * physical address of the block.
2528 	 */
2529 	if(bus_dma_tag_create(sc->parent_tag,
2530 			BCM_PAGE_SIZE,
2531 		    BCE_DMA_BOUNDARY,
2532 			sc->max_bus_addr,
2533 			BUS_SPACE_MAXADDR,
2534 			NULL, NULL,
2535 			BCE_TX_CHAIN_PAGE_SZ,
2536 			1,
2537 			BCE_TX_CHAIN_PAGE_SZ,
2538 			0,
2539 			NULL, NULL,
2540 			&sc->tx_bd_chain_tag)) {
2541 		BCE_PRINTF("%s(%d): Could not allocate TX descriptor chain DMA tag!\n",
2542 			__FILE__, __LINE__);
2543 		rc = ENOMEM;
2544 		goto bce_dma_alloc_exit;
2545 	}
2546 
2547 	for (i = 0; i < TX_PAGES; i++) {
2548 
2549 		if(bus_dmamem_alloc(sc->tx_bd_chain_tag,
2550 	    		(void **)&sc->tx_bd_chain[i],
2551 	    		BUS_DMA_NOWAIT,
2552 		    	&sc->tx_bd_chain_map[i])) {
2553 			BCE_PRINTF("%s(%d): Could not allocate TX descriptor "
2554 				"chain DMA memory!\n", __FILE__, __LINE__);
2555 			rc = ENOMEM;
2556 			goto bce_dma_alloc_exit;
2557 		}
2558 
2559 		error = bus_dmamap_load(sc->tx_bd_chain_tag,
2560 	    		sc->tx_bd_chain_map[i],
2561 	    		sc->tx_bd_chain[i],
2562 		    	BCE_TX_CHAIN_PAGE_SZ,
2563 		    	bce_dma_map_addr,
2564 	    		&busaddr,
2565 	    		BUS_DMA_NOWAIT);
2566 
2567 		if (error) {
2568 			BCE_PRINTF("%s(%d): Could not map TX descriptor chain DMA memory!\n",
2569 				__FILE__, __LINE__);
2570 			rc = ENOMEM;
2571 			goto bce_dma_alloc_exit;
2572 		}
2573 
2574 		sc->tx_bd_chain_paddr[i] = busaddr;
2575 		/* DRC - Fix for 64 bit systems. */
2576 		DBPRINT(sc, BCE_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n",
2577 			i, (u32) sc->tx_bd_chain_paddr[i]);
2578 	}
2579 
2580 	/* Check the required size before mapping to conserve resources. */
2581 	if (bce_tso_enable) {
2582 		max_size     = BCE_TSO_MAX_SIZE;
2583 		max_segments = BCE_MAX_SEGMENTS;
2584 		max_seg_size = BCE_TSO_MAX_SEG_SIZE;
2585 	} else {
2586 		max_size     = MCLBYTES * BCE_MAX_SEGMENTS;
2587 		max_segments = BCE_MAX_SEGMENTS;
2588 		max_seg_size = MCLBYTES;
2589 	}
2590 
2591 	/* Create a DMA tag for TX mbufs. */
2592 	if (bus_dma_tag_create(sc->parent_tag,
2593 			1,
2594 			BCE_DMA_BOUNDARY,
2595 			sc->max_bus_addr,
2596 			BUS_SPACE_MAXADDR,
2597 			NULL, NULL,
2598 			max_size,
2599 			max_segments,
2600 			max_seg_size,
2601 			0,
2602 			NULL, NULL,
2603 			&sc->tx_mbuf_tag)) {
2604 		BCE_PRINTF("%s(%d): Could not allocate TX mbuf DMA tag!\n",
2605 			__FILE__, __LINE__);
2606 		rc = ENOMEM;
2607 		goto bce_dma_alloc_exit;
2608 	}
2609 
2610 	/* Create DMA maps for the TX mbufs clusters. */
2611 	for (i = 0; i < TOTAL_TX_BD; i++) {
2612 		if (bus_dmamap_create(sc->tx_mbuf_tag, BUS_DMA_NOWAIT,
2613 			&sc->tx_mbuf_map[i])) {
2614 			BCE_PRINTF("%s(%d): Unable to create TX mbuf DMA map!\n",
2615 				__FILE__, __LINE__);
2616 			rc = ENOMEM;
2617 			goto bce_dma_alloc_exit;
2618 		}
2619 	}
2620 
2621 	/*
2622 	 * Create a DMA tag for the RX buffer descriptor chain,
2623 	 * allocate and clear the memory, and fetch the physical
2624 	 * address of the blocks.
2625 	 */
2626 	if (bus_dma_tag_create(sc->parent_tag,
2627 			BCM_PAGE_SIZE,
2628 			BCE_DMA_BOUNDARY,
2629 			BUS_SPACE_MAXADDR,
2630 			sc->max_bus_addr,
2631 			NULL, NULL,
2632 			BCE_RX_CHAIN_PAGE_SZ,
2633 			1,
2634 			BCE_RX_CHAIN_PAGE_SZ,
2635 			0,
2636 			NULL, NULL,
2637 			&sc->rx_bd_chain_tag)) {
2638 		BCE_PRINTF("%s(%d): Could not allocate RX descriptor chain DMA tag!\n",
2639 			__FILE__, __LINE__);
2640 		rc = ENOMEM;
2641 		goto bce_dma_alloc_exit;
2642 	}
2643 
2644 	for (i = 0; i < RX_PAGES; i++) {
2645 
2646 		if (bus_dmamem_alloc(sc->rx_bd_chain_tag,
2647 	    		(void **)&sc->rx_bd_chain[i],
2648 	    		BUS_DMA_NOWAIT,
2649 		    	&sc->rx_bd_chain_map[i])) {
2650 			BCE_PRINTF("%s(%d): Could not allocate RX descriptor chain "
2651 				"DMA memory!\n", __FILE__, __LINE__);
2652 			rc = ENOMEM;
2653 			goto bce_dma_alloc_exit;
2654 		}
2655 
2656 		bzero((char *)sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
2657 
2658 		error = bus_dmamap_load(sc->rx_bd_chain_tag,
2659 	    		sc->rx_bd_chain_map[i],
2660 	    		sc->rx_bd_chain[i],
2661 		    	BCE_RX_CHAIN_PAGE_SZ,
2662 		    	bce_dma_map_addr,
2663 	    		&busaddr,
2664 	    		BUS_DMA_NOWAIT);
2665 
2666 		if (error) {
2667 			BCE_PRINTF("%s(%d): Could not map RX descriptor chain DMA memory!\n",
2668 				__FILE__, __LINE__);
2669 			rc = ENOMEM;
2670 			goto bce_dma_alloc_exit;
2671 		}
2672 
2673 		sc->rx_bd_chain_paddr[i] = busaddr;
2674 		/* DRC - Fix for 64 bit systems. */
2675 		DBPRINT(sc, BCE_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n",
2676 			i, (u32) sc->rx_bd_chain_paddr[i]);
2677 	}
2678 
2679 	/*
2680 	 * Create a DMA tag for RX mbufs.
2681 	 */
2682 	max_size = max_seg_size = ((sc->rx_bd_mbuf_alloc_size < MCLBYTES) ?
2683 		MCLBYTES : sc->rx_bd_mbuf_alloc_size);
2684 
2685 	if (bus_dma_tag_create(sc->parent_tag,
2686 			1,
2687 			BCE_DMA_BOUNDARY,
2688 			sc->max_bus_addr,
2689 			BUS_SPACE_MAXADDR,
2690 			NULL, NULL,
2691 			max_size,
2692 			1,
2693 			max_seg_size,
2694 			0,
2695 			NULL, NULL,
2696 	    	&sc->rx_mbuf_tag)) {
2697 		BCE_PRINTF("%s(%d): Could not allocate RX mbuf DMA tag!\n",
2698 			__FILE__, __LINE__);
2699 		rc = ENOMEM;
2700 		goto bce_dma_alloc_exit;
2701 	}
2702 
2703 	/* Create DMA maps for the RX mbuf clusters. */
2704 	for (i = 0; i < TOTAL_RX_BD; i++) {
2705 		if (bus_dmamap_create(sc->rx_mbuf_tag, BUS_DMA_NOWAIT,
2706 				&sc->rx_mbuf_map[i])) {
2707 			BCE_PRINTF("%s(%d): Unable to create RX mbuf DMA map!\n",
2708 				__FILE__, __LINE__);
2709 			rc = ENOMEM;
2710 			goto bce_dma_alloc_exit;
2711 		}
2712 	}
2713 
2714 	/*
2715 	 * Create a DMA tag for the page buffer descriptor chain,
2716 	 * allocate and clear the memory, and fetch the physical
2717 	 * address of the blocks.
2718 	 */
2719 	if (bus_dma_tag_create(sc->parent_tag,
2720 			BCM_PAGE_SIZE,
2721 			BCE_DMA_BOUNDARY,
2722 			BUS_SPACE_MAXADDR,
2723 			sc->max_bus_addr,
2724 			NULL, NULL,
2725 			BCE_PG_CHAIN_PAGE_SZ,
2726 			1,
2727 			BCE_PG_CHAIN_PAGE_SZ,
2728 			0,
2729 			NULL, NULL,
2730 			&sc->pg_bd_chain_tag)) {
2731 		BCE_PRINTF("%s(%d): Could not allocate page descriptor chain DMA tag!\n",
2732 			__FILE__, __LINE__);
2733 		rc = ENOMEM;
2734 		goto bce_dma_alloc_exit;
2735 	}
2736 
2737 	for (i = 0; i < PG_PAGES; i++) {
2738 
2739 		if (bus_dmamem_alloc(sc->pg_bd_chain_tag,
2740 	    		(void **)&sc->pg_bd_chain[i],
2741 	    		BUS_DMA_NOWAIT,
2742 		    	&sc->pg_bd_chain_map[i])) {
2743 			BCE_PRINTF("%s(%d): Could not allocate page descriptor chain "
2744 				"DMA memory!\n", __FILE__, __LINE__);
2745 			rc = ENOMEM;
2746 			goto bce_dma_alloc_exit;
2747 		}
2748 
2749 		bzero((char *)sc->pg_bd_chain[i], BCE_PG_CHAIN_PAGE_SZ);
2750 
2751 		error = bus_dmamap_load(sc->pg_bd_chain_tag,
2752 	    		sc->pg_bd_chain_map[i],
2753 	    		sc->pg_bd_chain[i],
2754 		    	BCE_PG_CHAIN_PAGE_SZ,
2755 		    	bce_dma_map_addr,
2756 	    		&busaddr,
2757 	    		BUS_DMA_NOWAIT);
2758 
2759 		if (error) {
2760 			BCE_PRINTF("%s(%d): Could not map page descriptor chain DMA memory!\n",
2761 				__FILE__, __LINE__);
2762 			rc = ENOMEM;
2763 			goto bce_dma_alloc_exit;
2764 		}
2765 
2766 		sc->pg_bd_chain_paddr[i] = busaddr;
2767 		/* DRC - Fix for 64 bit systems. */
2768 		DBPRINT(sc, BCE_INFO, "pg_bd_chain_paddr[%d] = 0x%08X\n",
2769 			i, (u32) sc->pg_bd_chain_paddr[i]);
2770 	}
2771 
2772 	/*
2773 	 * Create a DMA tag for page mbufs.
2774 	 */
2775 	max_size = max_seg_size = ((sc->pg_bd_mbuf_alloc_size < MCLBYTES) ?
2776 		MCLBYTES : sc->rx_bd_mbuf_alloc_size);
2777 
2778 	if (bus_dma_tag_create(sc->parent_tag,
2779 			1,
2780 			BCE_DMA_BOUNDARY,
2781 			sc->max_bus_addr,
2782 			BUS_SPACE_MAXADDR,
2783 			NULL, NULL,
2784 			max_size,
2785 			1,
2786 			max_seg_size,
2787 			0,
2788 			NULL, NULL,
2789 	    	&sc->pg_mbuf_tag)) {
2790 		BCE_PRINTF("%s(%d): Could not allocate page mbuf DMA tag!\n",
2791 			__FILE__, __LINE__);
2792 		rc = ENOMEM;
2793 		goto bce_dma_alloc_exit;
2794 	}
2795 
2796 	/* Create DMA maps for the page mbuf clusters. */
2797 	for (i = 0; i < TOTAL_PG_BD; i++) {
2798 		if (bus_dmamap_create(sc->pg_mbuf_tag, BUS_DMA_NOWAIT,
2799 				&sc->pg_mbuf_map[i])) {
2800 			BCE_PRINTF("%s(%d): Unable to create page mbuf DMA map!\n",
2801 				__FILE__, __LINE__);
2802 			rc = ENOMEM;
2803 			goto bce_dma_alloc_exit;
2804 		}
2805 	}
2806 
2807 bce_dma_alloc_exit:
2808 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2809 
2810 	return(rc);
2811 }
2812 
2813 
2814 /****************************************************************************/
2815 /* Release all resources used by the driver.                                */
2816 /*                                                                          */
2817 /* Releases all resources acquired by the driver including interrupts,      */
2818 /* interrupt handler, interfaces, mutexes, and DMA memory.                  */
2819 /*                                                                          */
2820 /* Returns:                                                                 */
2821 /*   Nothing.                                                               */
2822 /****************************************************************************/
2823 static void
2824 bce_release_resources(struct bce_softc *sc)
2825 {
2826 	device_t dev;
2827 
2828 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2829 
2830 	dev = sc->bce_dev;
2831 
2832 	bce_dma_free(sc);
2833 
2834 	if (sc->bce_intrhand != NULL) {
2835 		DBPRINT(sc, BCE_INFO_RESET, "Removing interrupt handler.\n");
2836 		bus_teardown_intr(dev, sc->bce_res_irq, sc->bce_intrhand);
2837 	}
2838 
2839 	if (sc->bce_res_irq != NULL) {
2840 		DBPRINT(sc, BCE_INFO_RESET, "Releasing IRQ.\n");
2841 		bus_release_resource(dev, SYS_RES_IRQ, sc->bce_flags & BCE_USING_MSI_FLAG ? 1 : 0,
2842 			sc->bce_res_irq);
2843 	}
2844 
2845 	if (sc->bce_flags & BCE_USING_MSI_FLAG) {
2846 		DBPRINT(sc, BCE_INFO_RESET, "Releasing MSI vector.\n");
2847 		pci_release_msi(dev);
2848 	}
2849 
2850 	if (sc->bce_res_mem != NULL) {
2851 		DBPRINT(sc, BCE_INFO_RESET, "Releasing PCI memory.\n");
2852 		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), sc->bce_res_mem);
2853 	}
2854 
2855 	if (sc->bce_ifp != NULL) {
2856 		DBPRINT(sc, BCE_INFO_RESET, "Releasing IF.\n");
2857 		if_free(sc->bce_ifp);
2858 	}
2859 
2860 	if (mtx_initialized(&sc->bce_mtx))
2861 		BCE_LOCK_DESTROY(sc);
2862 
2863 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2864 
2865 }
2866 
2867 
2868 /****************************************************************************/
2869 /* Firmware synchronization.                                                */
2870 /*                                                                          */
2871 /* Before performing certain events such as a chip reset, synchronize with  */
2872 /* the firmware first.                                                      */
2873 /*                                                                          */
2874 /* Returns:                                                                 */
2875 /*   0 for success, positive value for failure.                             */
2876 /****************************************************************************/
2877 static int
2878 bce_fw_sync(struct bce_softc *sc, u32 msg_data)
2879 {
2880 	int i, rc = 0;
2881 	u32 val;
2882 
2883 	/* Don't waste any time if we've timed out before. */
2884 	if (sc->bce_fw_timed_out) {
2885 		rc = EBUSY;
2886 		goto bce_fw_sync_exit;
2887 	}
2888 
2889 	/* Increment the message sequence number. */
2890 	sc->bce_fw_wr_seq++;
2891 	msg_data |= sc->bce_fw_wr_seq;
2892 
2893  	DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "bce_fw_sync(): msg_data = 0x%08X\n", msg_data);
2894 
2895 	/* Send the message to the bootcode driver mailbox. */
2896 	REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_MB, msg_data);
2897 
2898 	/* Wait for the bootcode to acknowledge the message. */
2899 	for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
2900 		/* Check for a response in the bootcode firmware mailbox. */
2901 		val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_FW_MB);
2902 		if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ))
2903 			break;
2904 		DELAY(1000);
2905 	}
2906 
2907 	/* If we've timed out, tell the bootcode that we've stopped waiting. */
2908 	if (((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ)) &&
2909 		((msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0)) {
2910 
2911 		BCE_PRINTF("%s(%d): Firmware synchronization timeout! "
2912 			"msg_data = 0x%08X\n",
2913 			__FILE__, __LINE__, msg_data);
2914 
2915 		msg_data &= ~BCE_DRV_MSG_CODE;
2916 		msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT;
2917 
2918 		REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_MB, msg_data);
2919 
2920 		sc->bce_fw_timed_out = 1;
2921 		rc = EBUSY;
2922 	}
2923 
2924 bce_fw_sync_exit:
2925 	return (rc);
2926 }
2927 
2928 
2929 /****************************************************************************/
2930 /* Load Receive Virtual 2 Physical (RV2P) processor firmware.               */
2931 /*                                                                          */
2932 /* Returns:                                                                 */
2933 /*   Nothing.                                                               */
2934 /****************************************************************************/
2935 static void
2936 bce_load_rv2p_fw(struct bce_softc *sc, u32 *rv2p_code,
2937 	u32 rv2p_code_len, u32 rv2p_proc)
2938 {
2939 	int i;
2940 	u32 val;
2941 
2942 	/* Set the page size used by RV2P. */
2943 	if (rv2p_proc == RV2P_PROC2) {
2944 		BCE_RV2P_PROC2_CHG_MAX_BD_PAGE(USABLE_RX_BD_PER_PAGE);
2945 	}
2946 
2947 	for (i = 0; i < rv2p_code_len; i += 8) {
2948 		REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code);
2949 		rv2p_code++;
2950 		REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code);
2951 		rv2p_code++;
2952 
2953 		if (rv2p_proc == RV2P_PROC1) {
2954 			val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR;
2955 			REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val);
2956 		}
2957 		else {
2958 			val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR;
2959 			REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val);
2960 		}
2961 	}
2962 
2963 	/* Reset the processor, un-stall is done later. */
2964 	if (rv2p_proc == RV2P_PROC1) {
2965 		REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET);
2966 	}
2967 	else {
2968 		REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET);
2969 	}
2970 }
2971 
2972 
2973 /****************************************************************************/
2974 /* Load RISC processor firmware.                                            */
2975 /*                                                                          */
2976 /* Loads firmware from the file if_bcefw.h into the scratchpad memory       */
2977 /* associated with a particular processor.                                  */
2978 /*                                                                          */
2979 /* Returns:                                                                 */
2980 /*   Nothing.                                                               */
2981 /****************************************************************************/
2982 static void
2983 bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg,
2984 	struct fw_info *fw)
2985 {
2986 	u32 offset;
2987 	u32 val;
2988 
2989 	/* Halt the CPU. */
2990 	val = REG_RD_IND(sc, cpu_reg->mode);
2991 	val |= cpu_reg->mode_value_halt;
2992 	REG_WR_IND(sc, cpu_reg->mode, val);
2993 	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2994 
2995 	/* Load the Text area. */
2996 	offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2997 	if (fw->text) {
2998 		int j;
2999 
3000 		for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3001 			REG_WR_IND(sc, offset, fw->text[j]);
3002 	        }
3003 	}
3004 
3005 	/* Load the Data area. */
3006 	offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3007 	if (fw->data) {
3008 		int j;
3009 
3010 		for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3011 			REG_WR_IND(sc, offset, fw->data[j]);
3012 		}
3013 	}
3014 
3015 	/* Load the SBSS area. */
3016 	offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3017 	if (fw->sbss) {
3018 		int j;
3019 
3020 		for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3021 			REG_WR_IND(sc, offset, fw->sbss[j]);
3022 		}
3023 	}
3024 
3025 	/* Load the BSS area. */
3026 	offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3027 	if (fw->bss) {
3028 		int j;
3029 
3030 		for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3031 			REG_WR_IND(sc, offset, fw->bss[j]);
3032 		}
3033 	}
3034 
3035 	/* Load the Read-Only area. */
3036 	offset = cpu_reg->spad_base +
3037 		(fw->rodata_addr - cpu_reg->mips_view_base);
3038 	if (fw->rodata) {
3039 		int j;
3040 
3041 		for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3042 			REG_WR_IND(sc, offset, fw->rodata[j]);
3043 		}
3044 	}
3045 
3046 	/* Clear the pre-fetch instruction. */
3047 	REG_WR_IND(sc, cpu_reg->inst, 0);
3048 	REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
3049 
3050 	/* Start the CPU. */
3051 	val = REG_RD_IND(sc, cpu_reg->mode);
3052 	val &= ~cpu_reg->mode_value_halt;
3053 	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
3054 	REG_WR_IND(sc, cpu_reg->mode, val);
3055 }
3056 
3057 
3058 /****************************************************************************/
3059 /* Initialize the RV2P, RX, TX, TPAT, and COM CPUs.                         */
3060 /*                                                                          */
3061 /* Loads the firmware for each CPU and starts the CPU.                      */
3062 /*                                                                          */
3063 /* Returns:                                                                 */
3064 /*   Nothing.                                                               */
3065 /****************************************************************************/
3066 static void
3067 bce_init_cpus(struct bce_softc *sc)
3068 {
3069 	struct cpu_reg cpu_reg;
3070 	struct fw_info fw;
3071 
3072 	/* Initialize the RV2P processor. */
3073 	bce_load_rv2p_fw(sc, bce_rv2p_proc1, sizeof(bce_rv2p_proc1), RV2P_PROC1);
3074 	bce_load_rv2p_fw(sc, bce_rv2p_proc2, sizeof(bce_rv2p_proc2), RV2P_PROC2);
3075 
3076 	/* Initialize the RX Processor. */
3077 	cpu_reg.mode = BCE_RXP_CPU_MODE;
3078 	cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
3079 	cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
3080 	cpu_reg.state = BCE_RXP_CPU_STATE;
3081 	cpu_reg.state_value_clear = 0xffffff;
3082 	cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
3083 	cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
3084 	cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
3085 	cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
3086 	cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
3087 	cpu_reg.spad_base = BCE_RXP_SCRATCH;
3088 	cpu_reg.mips_view_base = 0x8000000;
3089 
3090 	fw.ver_major = bce_RXP_b06FwReleaseMajor;
3091 	fw.ver_minor = bce_RXP_b06FwReleaseMinor;
3092 	fw.ver_fix = bce_RXP_b06FwReleaseFix;
3093 	fw.start_addr = bce_RXP_b06FwStartAddr;
3094 
3095 	fw.text_addr = bce_RXP_b06FwTextAddr;
3096 	fw.text_len = bce_RXP_b06FwTextLen;
3097 	fw.text_index = 0;
3098 	fw.text = bce_RXP_b06FwText;
3099 
3100 	fw.data_addr = bce_RXP_b06FwDataAddr;
3101 	fw.data_len = bce_RXP_b06FwDataLen;
3102 	fw.data_index = 0;
3103 	fw.data = bce_RXP_b06FwData;
3104 
3105 	fw.sbss_addr = bce_RXP_b06FwSbssAddr;
3106 	fw.sbss_len = bce_RXP_b06FwSbssLen;
3107 	fw.sbss_index = 0;
3108 	fw.sbss = bce_RXP_b06FwSbss;
3109 
3110 	fw.bss_addr = bce_RXP_b06FwBssAddr;
3111 	fw.bss_len = bce_RXP_b06FwBssLen;
3112 	fw.bss_index = 0;
3113 	fw.bss = bce_RXP_b06FwBss;
3114 
3115 	fw.rodata_addr = bce_RXP_b06FwRodataAddr;
3116 	fw.rodata_len = bce_RXP_b06FwRodataLen;
3117 	fw.rodata_index = 0;
3118 	fw.rodata = bce_RXP_b06FwRodata;
3119 
3120 	DBPRINT(sc, BCE_INFO_RESET, "Loading RX firmware.\n");
3121 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
3122 
3123 	/* Initialize the TX Processor. */
3124 	cpu_reg.mode = BCE_TXP_CPU_MODE;
3125 	cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT;
3126 	cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA;
3127 	cpu_reg.state = BCE_TXP_CPU_STATE;
3128 	cpu_reg.state_value_clear = 0xffffff;
3129 	cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE;
3130 	cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK;
3131 	cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER;
3132 	cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION;
3133 	cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT;
3134 	cpu_reg.spad_base = BCE_TXP_SCRATCH;
3135 	cpu_reg.mips_view_base = 0x8000000;
3136 
3137 	fw.ver_major = bce_TXP_b06FwReleaseMajor;
3138 	fw.ver_minor = bce_TXP_b06FwReleaseMinor;
3139 	fw.ver_fix = bce_TXP_b06FwReleaseFix;
3140 	fw.start_addr = bce_TXP_b06FwStartAddr;
3141 
3142 	fw.text_addr = bce_TXP_b06FwTextAddr;
3143 	fw.text_len = bce_TXP_b06FwTextLen;
3144 	fw.text_index = 0;
3145 	fw.text = bce_TXP_b06FwText;
3146 
3147 	fw.data_addr = bce_TXP_b06FwDataAddr;
3148 	fw.data_len = bce_TXP_b06FwDataLen;
3149 	fw.data_index = 0;
3150 	fw.data = bce_TXP_b06FwData;
3151 
3152 	fw.sbss_addr = bce_TXP_b06FwSbssAddr;
3153 	fw.sbss_len = bce_TXP_b06FwSbssLen;
3154 	fw.sbss_index = 0;
3155 	fw.sbss = bce_TXP_b06FwSbss;
3156 
3157 	fw.bss_addr = bce_TXP_b06FwBssAddr;
3158 	fw.bss_len = bce_TXP_b06FwBssLen;
3159 	fw.bss_index = 0;
3160 	fw.bss = bce_TXP_b06FwBss;
3161 
3162 	fw.rodata_addr = bce_TXP_b06FwRodataAddr;
3163 	fw.rodata_len = bce_TXP_b06FwRodataLen;
3164 	fw.rodata_index = 0;
3165 	fw.rodata = bce_TXP_b06FwRodata;
3166 
3167 	DBPRINT(sc, BCE_INFO_RESET, "Loading TX firmware.\n");
3168 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
3169 
3170 	/* Initialize the TX Patch-up Processor. */
3171 	cpu_reg.mode = BCE_TPAT_CPU_MODE;
3172 	cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT;
3173 	cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA;
3174 	cpu_reg.state = BCE_TPAT_CPU_STATE;
3175 	cpu_reg.state_value_clear = 0xffffff;
3176 	cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE;
3177 	cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK;
3178 	cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER;
3179 	cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION;
3180 	cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT;
3181 	cpu_reg.spad_base = BCE_TPAT_SCRATCH;
3182 	cpu_reg.mips_view_base = 0x8000000;
3183 
3184 	fw.ver_major = bce_TPAT_b06FwReleaseMajor;
3185 	fw.ver_minor = bce_TPAT_b06FwReleaseMinor;
3186 	fw.ver_fix = bce_TPAT_b06FwReleaseFix;
3187 	fw.start_addr = bce_TPAT_b06FwStartAddr;
3188 
3189 	fw.text_addr = bce_TPAT_b06FwTextAddr;
3190 	fw.text_len = bce_TPAT_b06FwTextLen;
3191 	fw.text_index = 0;
3192 	fw.text = bce_TPAT_b06FwText;
3193 
3194 	fw.data_addr = bce_TPAT_b06FwDataAddr;
3195 	fw.data_len = bce_TPAT_b06FwDataLen;
3196 	fw.data_index = 0;
3197 	fw.data = bce_TPAT_b06FwData;
3198 
3199 	fw.sbss_addr = bce_TPAT_b06FwSbssAddr;
3200 	fw.sbss_len = bce_TPAT_b06FwSbssLen;
3201 	fw.sbss_index = 0;
3202 	fw.sbss = bce_TPAT_b06FwSbss;
3203 
3204 	fw.bss_addr = bce_TPAT_b06FwBssAddr;
3205 	fw.bss_len = bce_TPAT_b06FwBssLen;
3206 	fw.bss_index = 0;
3207 	fw.bss = bce_TPAT_b06FwBss;
3208 
3209 	fw.rodata_addr = bce_TPAT_b06FwRodataAddr;
3210 	fw.rodata_len = bce_TPAT_b06FwRodataLen;
3211 	fw.rodata_index = 0;
3212 	fw.rodata = bce_TPAT_b06FwRodata;
3213 
3214 	DBPRINT(sc, BCE_INFO_RESET, "Loading TPAT firmware.\n");
3215 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
3216 
3217 	/* Initialize the Completion Processor. */
3218 	cpu_reg.mode = BCE_COM_CPU_MODE;
3219 	cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT;
3220 	cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA;
3221 	cpu_reg.state = BCE_COM_CPU_STATE;
3222 	cpu_reg.state_value_clear = 0xffffff;
3223 	cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE;
3224 	cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK;
3225 	cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER;
3226 	cpu_reg.inst = BCE_COM_CPU_INSTRUCTION;
3227 	cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT;
3228 	cpu_reg.spad_base = BCE_COM_SCRATCH;
3229 	cpu_reg.mips_view_base = 0x8000000;
3230 
3231 	fw.ver_major = bce_COM_b06FwReleaseMajor;
3232 	fw.ver_minor = bce_COM_b06FwReleaseMinor;
3233 	fw.ver_fix = bce_COM_b06FwReleaseFix;
3234 	fw.start_addr = bce_COM_b06FwStartAddr;
3235 
3236 	fw.text_addr = bce_COM_b06FwTextAddr;
3237 	fw.text_len = bce_COM_b06FwTextLen;
3238 	fw.text_index = 0;
3239 	fw.text = bce_COM_b06FwText;
3240 
3241 	fw.data_addr = bce_COM_b06FwDataAddr;
3242 	fw.data_len = bce_COM_b06FwDataLen;
3243 	fw.data_index = 0;
3244 	fw.data = bce_COM_b06FwData;
3245 
3246 	fw.sbss_addr = bce_COM_b06FwSbssAddr;
3247 	fw.sbss_len = bce_COM_b06FwSbssLen;
3248 	fw.sbss_index = 0;
3249 	fw.sbss = bce_COM_b06FwSbss;
3250 
3251 	fw.bss_addr = bce_COM_b06FwBssAddr;
3252 	fw.bss_len = bce_COM_b06FwBssLen;
3253 	fw.bss_index = 0;
3254 	fw.bss = bce_COM_b06FwBss;
3255 
3256 	fw.rodata_addr = bce_COM_b06FwRodataAddr;
3257 	fw.rodata_len = bce_COM_b06FwRodataLen;
3258 	fw.rodata_index = 0;
3259 	fw.rodata = bce_COM_b06FwRodata;
3260 
3261 	DBPRINT(sc, BCE_INFO_RESET, "Loading COM firmware.\n");
3262 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
3263 
3264 	/* Initialize the Command Processor. */
3265 	cpu_reg.mode = BCE_CP_CPU_MODE;
3266 	cpu_reg.mode_value_halt = BCE_CP_CPU_MODE_SOFT_HALT;
3267 	cpu_reg.mode_value_sstep = BCE_CP_CPU_MODE_STEP_ENA;
3268 	cpu_reg.state = BCE_CP_CPU_STATE;
3269 	cpu_reg.state_value_clear = 0xffffff;
3270 	cpu_reg.gpr0 = BCE_CP_CPU_REG_FILE;
3271 	cpu_reg.evmask = BCE_CP_CPU_EVENT_MASK;
3272 	cpu_reg.pc = BCE_CP_CPU_PROGRAM_COUNTER;
3273 	cpu_reg.inst = BCE_CP_CPU_INSTRUCTION;
3274 	cpu_reg.bp = BCE_CP_CPU_HW_BREAKPOINT;
3275 	cpu_reg.spad_base = BCE_CP_SCRATCH;
3276 	cpu_reg.mips_view_base = 0x8000000;
3277 
3278 	fw.ver_major = bce_CP_b06FwReleaseMajor;
3279 	fw.ver_minor = bce_CP_b06FwReleaseMinor;
3280 	fw.ver_fix = bce_CP_b06FwReleaseFix;
3281 	fw.start_addr = bce_CP_b06FwStartAddr;
3282 
3283 	fw.text_addr = bce_CP_b06FwTextAddr;
3284 	fw.text_len = bce_CP_b06FwTextLen;
3285 	fw.text_index = 0;
3286 	fw.text = bce_CP_b06FwText;
3287 
3288 	fw.data_addr = bce_CP_b06FwDataAddr;
3289 	fw.data_len = bce_CP_b06FwDataLen;
3290 	fw.data_index = 0;
3291 	fw.data = bce_CP_b06FwData;
3292 
3293 	fw.sbss_addr = bce_CP_b06FwSbssAddr;
3294 	fw.sbss_len = bce_CP_b06FwSbssLen;
3295 	fw.sbss_index = 0;
3296 	fw.sbss = bce_CP_b06FwSbss;
3297 
3298 	fw.bss_addr = bce_CP_b06FwBssAddr;
3299 	fw.bss_len = bce_CP_b06FwBssLen;
3300 	fw.bss_index = 0;
3301 	fw.bss = bce_CP_b06FwBss;
3302 
3303 	fw.rodata_addr = bce_CP_b06FwRodataAddr;
3304 	fw.rodata_len = bce_CP_b06FwRodataLen;
3305 	fw.rodata_index = 0;
3306 	fw.rodata = bce_CP_b06FwRodata;
3307 
3308 	DBPRINT(sc, BCE_INFO_RESET, "Loading CP firmware.\n");
3309 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
3310 }
3311 
3312 
3313 /****************************************************************************/
3314 /* Initialize context memory.                                               */
3315 /*                                                                          */
3316 /* Clears the memory associated with each Context ID (CID).                 */
3317 /*                                                                          */
3318 /* Returns:                                                                 */
3319 /*   Nothing.                                                               */
3320 /****************************************************************************/
3321 static void
3322 bce_init_ctx(struct bce_softc *sc)
3323 {
3324 	u32 vcid = 96;
3325 
3326 	while (vcid) {
3327 		u32 vcid_addr, pcid_addr, offset;
3328 		int i;
3329 
3330 		vcid--;
3331 
3332    		vcid_addr = GET_CID_ADDR(vcid);
3333 		pcid_addr = vcid_addr;
3334 
3335 		for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
3336 			vcid_addr += (i << PHY_CTX_SHIFT);
3337 			pcid_addr += (i << PHY_CTX_SHIFT);
3338 
3339 			REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr);
3340 			REG_WR(sc, BCE_CTX_PAGE_TBL, pcid_addr);
3341 
3342 			/* Zero out the context. */
3343 			for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
3344 				CTX_WR(sc, vcid_addr, offset, 0);
3345 		}
3346 	}
3347 }
3348 
3349 
3350 /****************************************************************************/
3351 /* Fetch the permanent MAC address of the controller.                       */
3352 /*                                                                          */
3353 /* Returns:                                                                 */
3354 /*   Nothing.                                                               */
3355 /****************************************************************************/
3356 static void
3357 bce_get_mac_addr(struct bce_softc *sc)
3358 {
3359 	u32 mac_lo = 0, mac_hi = 0;
3360 
3361 	/*
3362 	 * The NetXtreme II bootcode populates various NIC
3363 	 * power-on and runtime configuration items in a
3364 	 * shared memory area.  The factory configured MAC
3365 	 * address is available from both NVRAM and the
3366 	 * shared memory area so we'll read the value from
3367 	 * shared memory for speed.
3368 	 */
3369 
3370 	mac_hi = REG_RD_IND(sc, sc->bce_shmem_base +
3371 		BCE_PORT_HW_CFG_MAC_UPPER);
3372 	mac_lo = REG_RD_IND(sc, sc->bce_shmem_base +
3373 		BCE_PORT_HW_CFG_MAC_LOWER);
3374 
3375 	if ((mac_lo == 0) && (mac_hi == 0)) {
3376 		BCE_PRINTF("%s(%d): Invalid Ethernet address!\n",
3377 			__FILE__, __LINE__);
3378 	} else {
3379 		sc->eaddr[0] = (u_char)(mac_hi >> 8);
3380 		sc->eaddr[1] = (u_char)(mac_hi >> 0);
3381 		sc->eaddr[2] = (u_char)(mac_lo >> 24);
3382 		sc->eaddr[3] = (u_char)(mac_lo >> 16);
3383 		sc->eaddr[4] = (u_char)(mac_lo >> 8);
3384 		sc->eaddr[5] = (u_char)(mac_lo >> 0);
3385 	}
3386 
3387 	DBPRINT(sc, BCE_INFO_MISC, "Permanent Ethernet address = %6D\n", sc->eaddr, ":");
3388 }
3389 
3390 
3391 /****************************************************************************/
3392 /* Program the MAC address.                                                 */
3393 /*                                                                          */
3394 /* Returns:                                                                 */
3395 /*   Nothing.                                                               */
3396 /****************************************************************************/
3397 static void
3398 bce_set_mac_addr(struct bce_softc *sc)
3399 {
3400 	u32 val;
3401 	u8 *mac_addr = sc->eaddr;
3402 
3403 	DBPRINT(sc, BCE_INFO_MISC, "Setting Ethernet address = %6D\n", sc->eaddr, ":");
3404 
3405 	val = (mac_addr[0] << 8) | mac_addr[1];
3406 
3407 	REG_WR(sc, BCE_EMAC_MAC_MATCH0, val);
3408 
3409 	val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
3410 		(mac_addr[4] << 8) | mac_addr[5];
3411 
3412 	REG_WR(sc, BCE_EMAC_MAC_MATCH1, val);
3413 }
3414 
3415 
3416 /****************************************************************************/
3417 /* Stop the controller.                                                     */
3418 /*                                                                          */
3419 /* Returns:                                                                 */
3420 /*   Nothing.                                                               */
3421 /****************************************************************************/
3422 static void
3423 bce_stop(struct bce_softc *sc)
3424 {
3425 	struct ifnet *ifp;
3426 	struct ifmedia_entry *ifm;
3427 	struct mii_data *mii = NULL;
3428 	int mtmp, itmp;
3429 
3430 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3431 
3432 	BCE_LOCK_ASSERT(sc);
3433 
3434 	ifp = sc->bce_ifp;
3435 
3436 	mii = device_get_softc(sc->bce_miibus);
3437 
3438 	callout_stop(&sc->bce_tick_callout);
3439 
3440 	/* Disable the transmit/receive blocks. */
3441 	REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, 0x5ffffff);
3442 	REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
3443 	DELAY(20);
3444 
3445 	bce_disable_intr(sc);
3446 
3447 	/* Free RX buffers. */
3448 	bce_free_pg_chain(sc);
3449 	bce_free_rx_chain(sc);
3450 
3451 	/* Free TX buffers. */
3452 	bce_free_tx_chain(sc);
3453 
3454 	/*
3455 	 * Isolate/power down the PHY, but leave the media selection
3456 	 * unchanged so that things will be put back to normal when
3457 	 * we bring the interface back up.
3458 	 */
3459 
3460 	itmp = ifp->if_flags;
3461 	ifp->if_flags |= IFF_UP;
3462 
3463 	/* If we are called from bce_detach(), mii is already NULL. */
3464 	if (mii != NULL) {
3465 		ifm = mii->mii_media.ifm_cur;
3466 		mtmp = ifm->ifm_media;
3467 		ifm->ifm_media = IFM_ETHER | IFM_NONE;
3468 		mii_mediachg(mii);
3469 		ifm->ifm_media = mtmp;
3470 	}
3471 
3472 	ifp->if_flags = itmp;
3473 	sc->watchdog_timer = 0;
3474 
3475 	sc->bce_link = 0;
3476 
3477 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3478 
3479 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3480 }
3481 
3482 
3483 static int
3484 bce_reset(struct bce_softc *sc, u32 reset_code)
3485 {
3486 	u32 val;
3487 	int i, rc = 0;
3488 
3489 	DBPRINT(sc, BCE_VERBOSE_RESET, "%s(): reset_code = 0x%08X\n",
3490 		__FUNCTION__, reset_code);
3491 
3492 	/* Wait for pending PCI transactions to complete. */
3493 	REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS,
3494 	       BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3495 	       BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3496 	       BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3497 	       BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3498 	val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
3499 	DELAY(5);
3500 
3501 	/* Assume bootcode is running. */
3502 	sc->bce_fw_timed_out = 0;
3503 
3504 	/* Give the firmware a chance to prepare for the reset. */
3505 	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code);
3506 	if (rc)
3507 		goto bce_reset_exit;
3508 
3509 	/* Set a firmware reminder that this is a soft reset. */
3510 	REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_RESET_SIGNATURE,
3511 		   BCE_DRV_RESET_SIGNATURE_MAGIC);
3512 
3513 	/* Dummy read to force the chip to complete all current transactions. */
3514 	val = REG_RD(sc, BCE_MISC_ID);
3515 
3516 	/* Chip reset. */
3517 	val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3518 	      BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3519 	      BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3520 	REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val);
3521 
3522 	/* Allow up to 30us for reset to complete. */
3523 	for (i = 0; i < 10; i++) {
3524 		val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG);
3525 		if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3526 			    BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3527 			break;
3528 		}
3529 		DELAY(10);
3530 	}
3531 
3532 	/* Check that reset completed successfully. */
3533 	if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3534 		   BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3535 		BCE_PRINTF("%s(%d): Reset failed!\n",
3536 			__FILE__, __LINE__);
3537 		rc = EBUSY;
3538 		goto bce_reset_exit;
3539 	}
3540 
3541 	/* Make sure byte swapping is properly configured. */
3542 	val = REG_RD(sc, BCE_PCI_SWAP_DIAG0);
3543 	if (val != 0x01020304) {
3544 		BCE_PRINTF("%s(%d): Byte swap is incorrect!\n",
3545 			__FILE__, __LINE__);
3546 		rc = ENODEV;
3547 		goto bce_reset_exit;
3548 	}
3549 
3550 	/* Just completed a reset, assume that firmware is running again. */
3551 	sc->bce_fw_timed_out = 0;
3552 
3553 	/* Wait for the firmware to finish its initialization. */
3554 	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code);
3555 	if (rc)
3556 		BCE_PRINTF("%s(%d): Firmware did not complete initialization!\n",
3557 			__FILE__, __LINE__);
3558 
3559 bce_reset_exit:
3560 	return (rc);
3561 }
3562 
3563 
3564 static int
3565 bce_chipinit(struct bce_softc *sc)
3566 {
3567 	u32 val;
3568 	int rc = 0;
3569 
3570 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3571 
3572 	/* Make sure the interrupt is not active. */
3573 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT);
3574 
3575 	/*
3576 	 * Initialize DMA byte/word swapping, configure the number of DMA
3577 	 * channels and PCI clock compensation delay.
3578 	 */
3579 	val = BCE_DMA_CONFIG_DATA_BYTE_SWAP |
3580 	      BCE_DMA_CONFIG_DATA_WORD_SWAP |
3581 #if BYTE_ORDER == BIG_ENDIAN
3582 	      BCE_DMA_CONFIG_CNTL_BYTE_SWAP |
3583 #endif
3584 	      BCE_DMA_CONFIG_CNTL_WORD_SWAP |
3585 	      DMA_READ_CHANS << 12 |
3586 	      DMA_WRITE_CHANS << 16;
3587 
3588 	val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY;
3589 
3590 	if ((sc->bce_flags & BCE_PCIX_FLAG) && (sc->bus_speed_mhz == 133))
3591 		val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP;
3592 
3593 	/*
3594 	 * This setting resolves a problem observed on certain Intel PCI
3595 	 * chipsets that cannot handle multiple outstanding DMA operations.
3596 	 * See errata E9_5706A1_65.
3597 	 */
3598 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
3599 	    (BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0) &&
3600 	    !(sc->bce_flags & BCE_PCIX_FLAG))
3601 		val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA;
3602 
3603 	REG_WR(sc, BCE_DMA_CONFIG, val);
3604 
3605 	/* Clear the PCI-X relaxed ordering bit. See errata E3_5708CA0_570. */
3606 	if (sc->bce_flags & BCE_PCIX_FLAG) {
3607 		u16 val;
3608 
3609 		val = pci_read_config(sc->bce_dev, BCE_PCI_PCIX_CMD, 2);
3610 		pci_write_config(sc->bce_dev, BCE_PCI_PCIX_CMD, val & ~0x2, 2);
3611 	}
3612 
3613 	/* Enable the RX_V2P and Context state machines before access. */
3614 	REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
3615 	       BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3616 	       BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3617 	       BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3618 
3619 	/* Initialize context mapping and zero out the quick contexts. */
3620 	bce_init_ctx(sc);
3621 
3622 	/* Initialize the on-boards CPUs */
3623 	bce_init_cpus(sc);
3624 
3625 	/* Prepare NVRAM for access. */
3626 	if (bce_init_nvram(sc)) {
3627 		rc = ENODEV;
3628 		goto bce_chipinit_exit;
3629 	}
3630 
3631 	/* Set the kernel bypass block size */
3632 	val = REG_RD(sc, BCE_MQ_CONFIG);
3633 	val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3634 	val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3635 	REG_WR(sc, BCE_MQ_CONFIG, val);
3636 
3637 	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3638 	REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val);
3639 	REG_WR(sc, BCE_MQ_KNL_WIND_END, val);
3640 
3641 	/* Set the page size and clear the RV2P processor stall bits. */
3642 	val = (BCM_PAGE_BITS - 8) << 24;
3643 	REG_WR(sc, BCE_RV2P_CONFIG, val);
3644 
3645 	/* Configure page size. */
3646 	val = REG_RD(sc, BCE_TBDR_CONFIG);
3647 	val &= ~BCE_TBDR_CONFIG_PAGE_SIZE;
3648 	val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3649 	REG_WR(sc, BCE_TBDR_CONFIG, val);
3650 
3651 bce_chipinit_exit:
3652 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3653 
3654 	return(rc);
3655 }
3656 
3657 
3658 /****************************************************************************/
3659 /* Initialize the controller in preparation to send/receive traffic.        */
3660 /*                                                                          */
3661 /* Returns:                                                                 */
3662 /*   0 for success, positive value for failure.                             */
3663 /****************************************************************************/
3664 static int
3665 bce_blockinit(struct bce_softc *sc)
3666 {
3667 	u32 reg, val;
3668 	int rc = 0;
3669 
3670 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3671 
3672 	/* Load the hardware default MAC address. */
3673 	bce_set_mac_addr(sc);
3674 
3675 	/* Set the Ethernet backoff seed value */
3676 	val = sc->eaddr[0]         + (sc->eaddr[1] << 8) +
3677 	      (sc->eaddr[2] << 16) + (sc->eaddr[3]     ) +
3678 	      (sc->eaddr[4] << 8)  + (sc->eaddr[5] << 16);
3679 	REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val);
3680 
3681 	sc->last_status_idx = 0;
3682 	sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE;
3683 
3684 	/* Set up link change interrupt generation. */
3685 	REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK);
3686 
3687 	/* Program the physical address of the status block. */
3688 	REG_WR(sc, BCE_HC_STATUS_ADDR_L,
3689 		BCE_ADDR_LO(sc->status_block_paddr));
3690 	REG_WR(sc, BCE_HC_STATUS_ADDR_H,
3691 		BCE_ADDR_HI(sc->status_block_paddr));
3692 
3693 	/* Program the physical address of the statistics block. */
3694 	REG_WR(sc, BCE_HC_STATISTICS_ADDR_L,
3695 		BCE_ADDR_LO(sc->stats_block_paddr));
3696 	REG_WR(sc, BCE_HC_STATISTICS_ADDR_H,
3697 		BCE_ADDR_HI(sc->stats_block_paddr));
3698 
3699 	/* Program various host coalescing parameters. */
3700 	REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
3701 		(sc->bce_tx_quick_cons_trip_int << 16) | sc->bce_tx_quick_cons_trip);
3702 	REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
3703 		(sc->bce_rx_quick_cons_trip_int << 16) | sc->bce_rx_quick_cons_trip);
3704 	REG_WR(sc, BCE_HC_COMP_PROD_TRIP,
3705 		(sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip);
3706 	REG_WR(sc, BCE_HC_TX_TICKS,
3707 		(sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
3708 	REG_WR(sc, BCE_HC_RX_TICKS,
3709 		(sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
3710 	REG_WR(sc, BCE_HC_COM_TICKS,
3711 		(sc->bce_com_ticks_int << 16) | sc->bce_com_ticks);
3712 	REG_WR(sc, BCE_HC_CMD_TICKS,
3713 		(sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks);
3714 	REG_WR(sc, BCE_HC_STATS_TICKS,
3715 		(sc->bce_stats_ticks & 0xffff00));
3716 	REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS,
3717 		0xbb8);  /* 3ms */
3718 	REG_WR(sc, BCE_HC_CONFIG,
3719 		(BCE_HC_CONFIG_RX_TMR_MODE | BCE_HC_CONFIG_TX_TMR_MODE |
3720 		BCE_HC_CONFIG_COLLECT_STATS));
3721 
3722 	/* Clear the internal statistics counters. */
3723 	REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW);
3724 
3725 	/* Verify that bootcode is running. */
3726 	reg = REG_RD_IND(sc, sc->bce_shmem_base + BCE_DEV_INFO_SIGNATURE);
3727 
3728 	DBRUNIF(DB_RANDOMTRUE(bce_debug_bootcode_running_failure),
3729 		BCE_PRINTF("%s(%d): Simulating bootcode failure.\n",
3730 			__FILE__, __LINE__);
3731 		reg = 0);
3732 
3733 	if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
3734 	    BCE_DEV_INFO_SIGNATURE_MAGIC) {
3735 		BCE_PRINTF("%s(%d): Bootcode not running! Found: 0x%08X, "
3736 			"Expected: 08%08X\n", __FILE__, __LINE__,
3737 			(reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK),
3738 			BCE_DEV_INFO_SIGNATURE_MAGIC);
3739 		rc = ENODEV;
3740 		goto bce_blockinit_exit;
3741 	}
3742 
3743 	/* Allow bootcode to apply any additional fixes before enabling MAC. */
3744 	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | BCE_DRV_MSG_CODE_RESET);
3745 
3746 	/* Enable link state change interrupt generation. */
3747 	REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3748 
3749 	/* Enable all remaining blocks in the MAC. */
3750 	REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 0x5ffffff);
3751 	REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
3752 	DELAY(20);
3753 
3754 bce_blockinit_exit:
3755 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3756 
3757 	return (rc);
3758 }
3759 
3760 
3761 /****************************************************************************/
3762 /* Encapsulate an mbuf into the rx_bd chain.                                */
3763 /*                                                                          */
3764 /* Returns:                                                                 */
3765 /*   0 for success, positive value for failure.                             */
3766 /****************************************************************************/
3767 static int
3768 bce_get_rx_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod,
3769 	u16 *chain_prod, u32 *prod_bseq)
3770 {
3771 	bus_dmamap_t map;
3772 	bus_dma_segment_t segs[BCE_MAX_SEGMENTS];
3773 	struct mbuf *m_new = NULL;
3774 	struct rx_bd *rxbd;
3775 	int nsegs, error, rc = 0;
3776 #ifdef BCE_DEBUG
3777 	u16 debug_chain_prod = *chain_prod;
3778 #endif
3779 
3780 	DBPRINT(sc, (BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD), "Entering %s()\n",
3781 		__FUNCTION__);
3782 
3783 	/* Make sure the inputs are valid. */
3784 	DBRUNIF((*chain_prod > MAX_RX_BD),
3785 		BCE_PRINTF("%s(%d): RX producer out of range: 0x%04X > 0x%04X\n",
3786 		__FILE__, __LINE__, *chain_prod, (u16) MAX_RX_BD));
3787 
3788 	DBPRINT(sc, BCE_VERBOSE, "%s(enter): prod = 0x%04X, chain_prod = 0x%04X, "
3789 		"prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod, *prod_bseq);
3790 
3791 	/* Update some debug statistic counters */
3792 	DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
3793 		sc->rx_low_watermark = sc->free_rx_bd);
3794 	DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), sc->rx_empty_count++);
3795 
3796 	/* Check whether this is a new mbuf allocation. */
3797 	if (m == NULL) {
3798 
3799 		/* Simulate an mbuf allocation failure. */
3800 		DBRUNIF(DB_RANDOMTRUE(bce_debug_mbuf_allocation_failure),
3801 			sc->mbuf_alloc_failed++;
3802 			sc->debug_mbuf_sim_alloc_failed++;
3803 			rc = ENOBUFS;
3804 			goto bce_get_rx_buf_exit);
3805 
3806 		/* This is a new mbuf allocation. */
3807 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
3808 		if (m_new == NULL) {
3809 			sc->mbuf_alloc_failed++;
3810 			rc = ENOBUFS;
3811 			goto bce_get_rx_buf_exit;
3812 		}
3813 
3814 		DBRUN(sc->debug_rx_mbuf_alloc++);
3815 	} else {
3816 		/* Reuse an existing mbuf. */
3817 		m_new = m;
3818 	}
3819 
3820 	M_ASSERTPKTHDR(m_new);
3821 
3822 	m_new->m_pkthdr.len = m_new->m_len = sc->rx_bd_mbuf_alloc_size;
3823 
3824 	/* ToDo: Consider calling m_fragment() to test error handling. */
3825 
3826 	/* Map the mbuf cluster into device memory. */
3827 	map = sc->rx_mbuf_map[*chain_prod];
3828 	error = bus_dmamap_load_mbuf_sg(sc->rx_mbuf_tag, map, m_new,
3829 	    segs, &nsegs, BUS_DMA_NOWAIT);
3830 
3831 	/* Handle any mapping errors. */
3832 	if (error) {
3833 		BCE_PRINTF("%s(%d): Error mapping mbuf into RX chain!\n",
3834 			__FILE__, __LINE__);
3835 
3836 		m_freem(m_new);
3837 		DBRUN(sc->debug_rx_mbuf_alloc--);
3838 
3839 		rc = ENOBUFS;
3840 		goto bce_get_rx_buf_exit;
3841 	}
3842 
3843 	/* All mbufs must map to a single segment. */
3844 	KASSERT(nsegs == 1, ("%s(): Too many segments returned (%d)!",
3845 		 __FUNCTION__, nsegs));
3846 
3847 	/* ToDo: Do we need bus_dmamap_sync(,,BUS_DMASYNC_PREWRITE) here? */
3848 
3849 	/* Setup the rx_bd for the segment. */
3850 	rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3851 
3852 	rxbd->rx_bd_haddr_lo  = htole32(BCE_ADDR_LO(segs[0].ds_addr));
3853 	rxbd->rx_bd_haddr_hi  = htole32(BCE_ADDR_HI(segs[0].ds_addr));
3854 	rxbd->rx_bd_len       = htole32(segs[0].ds_len);
3855 	rxbd->rx_bd_flags     = htole32(RX_BD_FLAGS_START | RX_BD_FLAGS_END);
3856 	*prod_bseq += segs[0].ds_len;
3857 
3858 	/* Save the mbuf and update our counter. */
3859 	sc->rx_mbuf_ptr[*chain_prod] = m_new;
3860 	sc->free_rx_bd -= nsegs;
3861 
3862 	DBRUNMSG(BCE_EXCESSIVE, bce_dump_rx_mbuf_chain(sc, debug_chain_prod,
3863 		nsegs));
3864 
3865 	DBPRINT(sc, BCE_VERBOSE, "%s(exit): prod = 0x%04X, chain_prod = 0x%04X, "
3866 		"prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod, *prod_bseq);
3867 
3868 bce_get_rx_buf_exit:
3869 	DBPRINT(sc, (BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD), "Exiting %s()\n",
3870 		__FUNCTION__);
3871 
3872 	return(rc);
3873 }
3874 
3875 
3876 /****************************************************************************/
3877 /* Encapsulate an mbuf cluster into the page chain.                        */
3878 /*                                                                          */
3879 /* Returns:                                                                 */
3880 /*   0 for success, positive value for failure.                             */
3881 /****************************************************************************/
3882 static int
3883 bce_get_pg_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod,
3884 	u16 *prod_idx)
3885 {
3886 	bus_dmamap_t map;
3887 	bus_addr_t busaddr;
3888 	struct mbuf *m_new = NULL;
3889 	struct rx_bd *pgbd;
3890 	int error, rc = 0;
3891 #ifdef BCE_DEBUG
3892 	u16 debug_prod_idx = *prod_idx;
3893 #endif
3894 
3895 	DBPRINT(sc, (BCE_VERBOSE_RESET | BCE_VERBOSE_RECV), "Entering %s()\n",
3896 		__FUNCTION__);
3897 
3898 	/* Make sure the inputs are valid. */
3899 	DBRUNIF((*prod_idx > MAX_PG_BD),
3900 		BCE_PRINTF("%s(%d): page producer out of range: 0x%04X > 0x%04X\n",
3901 		__FILE__, __LINE__, *prod_idx, (u16) MAX_PG_BD));
3902 
3903 	DBPRINT(sc, BCE_VERBOSE_RECV, "%s(enter): prod = 0x%04X, "
3904 		"chain_prod = 0x%04X\n", __FUNCTION__, *prod, *prod_idx);
3905 
3906 	/* Update counters if we've hit a new low or run out of pages. */
3907 	DBRUNIF((sc->free_pg_bd < sc->pg_low_watermark),
3908 		sc->pg_low_watermark = sc->free_pg_bd);
3909 	DBRUNIF((sc->free_pg_bd == sc->max_pg_bd), sc->pg_empty_count++);
3910 
3911 	/* Check whether this is a new mbuf allocation. */
3912 	if (m == NULL) {
3913 
3914 		/* Simulate an mbuf allocation failure. */
3915 		DBRUNIF(DB_RANDOMTRUE(bce_debug_mbuf_allocation_failure),
3916 			sc->mbuf_alloc_failed++;
3917 			sc->debug_mbuf_sim_alloc_failed++;
3918 			rc = ENOBUFS;
3919 			goto bce_get_pg_buf_exit);
3920 
3921 		/* This is a new mbuf allocation. */
3922 		m_new = m_getcl(M_DONTWAIT, MT_DATA, 0);
3923 		if (m_new == NULL) {
3924 			sc->mbuf_alloc_failed++;
3925 			rc = ENOBUFS;
3926 			goto bce_get_pg_buf_exit;
3927 		}
3928 
3929 		DBRUN(sc->debug_pg_mbuf_alloc++);
3930 	} else {
3931 		/* Reuse an existing mbuf. */
3932 		m_new = m;
3933 		m_new->m_data = m_new->m_ext.ext_buf;
3934 	}
3935 
3936 	m_new->m_len = sc->pg_bd_mbuf_alloc_size;
3937 
3938 	/* ToDo: Consider calling m_fragment() to test error handling. */
3939 
3940 	/* Map the mbuf cluster into device memory. */
3941 	map = sc->pg_mbuf_map[*prod_idx];
3942 	error = bus_dmamap_load(sc->pg_mbuf_tag, map, mtod(m_new, void *),
3943 	    sc->pg_bd_mbuf_alloc_size, bce_dma_map_addr, &busaddr, BUS_DMA_NOWAIT);
3944 
3945 	/* Handle any mapping errors. */
3946 	if (error) {
3947 		BCE_PRINTF("%s(%d): Error mapping mbuf into page chain!\n",
3948 			__FILE__, __LINE__);
3949 
3950 		m_freem(m_new);
3951 		DBRUN(sc->debug_pg_mbuf_alloc--);
3952 
3953 		rc = ENOBUFS;
3954 		goto bce_get_pg_buf_exit;
3955 	}
3956 
3957 	/* ToDo: Do we need bus_dmamap_sync(,,BUS_DMASYNC_PREWRITE) here? */
3958 
3959 	/*
3960 	 * The page chain uses the same rx_bd data structure
3961 	 * as the receive chain but doesn't require a byte sequence (bseq).
3962 	 */
3963 	pgbd = &sc->pg_bd_chain[PG_PAGE(*prod_idx)][PG_IDX(*prod_idx)];
3964 
3965 	pgbd->rx_bd_haddr_lo  = htole32(BCE_ADDR_LO(busaddr));
3966 	pgbd->rx_bd_haddr_hi  = htole32(BCE_ADDR_HI(busaddr));
3967 	pgbd->rx_bd_len       = htole32(sc->pg_bd_mbuf_alloc_size);
3968 	pgbd->rx_bd_flags     = htole32(RX_BD_FLAGS_START | RX_BD_FLAGS_END);
3969 
3970 	/* Save the mbuf and update our counter. */
3971 	sc->pg_mbuf_ptr[*prod_idx] = m_new;
3972 	sc->free_pg_bd--;
3973 
3974 	DBRUNMSG(BCE_VERBOSE_RECV, bce_dump_pg_mbuf_chain(sc, debug_prod_idx,
3975 		1));
3976 
3977 	DBPRINT(sc, BCE_VERBOSE_RECV, "%s(exit): prod = 0x%04X, "
3978 		"prod_idx = 0x%04X\n", __FUNCTION__, *prod, *prod_idx);
3979 
3980 bce_get_pg_buf_exit:
3981 	DBPRINT(sc, (BCE_VERBOSE_RESET | BCE_VERBOSE_RECV), "Exiting %s()\n",
3982 		__FUNCTION__);
3983 
3984 	return(rc);
3985 }
3986 
3987 
3988 /****************************************************************************/
3989 /* Allocate memory and initialize the TX data structures.                   */
3990 /*                                                                          */
3991 /* Returns:                                                                 */
3992 /*   0 for success, positive value for failure.                             */
3993 /****************************************************************************/
3994 static int
3995 bce_init_tx_chain(struct bce_softc *sc)
3996 {
3997 	struct tx_bd *txbd;
3998 	u32 val;
3999 	int i, rc = 0;
4000 
4001 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4002 
4003 	/* Set the initial TX producer/consumer indices. */
4004 	sc->tx_prod        = 0;
4005 	sc->tx_cons        = 0;
4006 	sc->tx_prod_bseq   = 0;
4007 	sc->used_tx_bd     = 0;
4008 	sc->max_tx_bd      = USABLE_TX_BD;
4009 	DBRUN(sc->tx_hi_watermark = USABLE_TX_BD);
4010 	DBRUN(sc->tx_full_count = 0);
4011 
4012 	/*
4013 	 * The NetXtreme II supports a linked-list structre called
4014 	 * a Buffer Descriptor Chain (or BD chain).  A BD chain
4015 	 * consists of a series of 1 or more chain pages, each of which
4016 	 * consists of a fixed number of BD entries.
4017 	 * The last BD entry on each page is a pointer to the next page
4018 	 * in the chain, and the last pointer in the BD chain
4019 	 * points back to the beginning of the chain.
4020 	 */
4021 
4022 	/* Set the TX next pointer chain entries. */
4023 	for (i = 0; i < TX_PAGES; i++) {
4024 		int j;
4025 
4026 		txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
4027 
4028 		/* Check if we've reached the last page. */
4029 		if (i == (TX_PAGES - 1))
4030 			j = 0;
4031 		else
4032 			j = i + 1;
4033 
4034 		txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->tx_bd_chain_paddr[j]));
4035 		txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->tx_bd_chain_paddr[j]));
4036 	}
4037 
4038 	/* Initialize the context ID for an L2 TX chain. */
4039 	val = BCE_L2CTX_TYPE_TYPE_L2;
4040 	val |= BCE_L2CTX_TYPE_SIZE_L2;
4041 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TYPE, val);
4042 
4043 	val = BCE_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4044 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_CMD_TYPE, val);
4045 
4046 	/* Point the hardware to the first page in the chain. */
4047 	val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]);
4048 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TBDR_BHADDR_HI, val);
4049 	val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]);
4050 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TBDR_BHADDR_LO, val);
4051 
4052 	DBRUNMSG(BCE_VERBOSE_SEND, bce_dump_tx_chain(sc, 0, TOTAL_TX_BD));
4053 
4054 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4055 
4056 	return(rc);
4057 }
4058 
4059 
4060 /****************************************************************************/
4061 /* Free memory and clear the TX data structures.                            */
4062 /*                                                                          */
4063 /* Returns:                                                                 */
4064 /*   Nothing.                                                               */
4065 /****************************************************************************/
4066 static void
4067 bce_free_tx_chain(struct bce_softc *sc)
4068 {
4069 	int i;
4070 
4071 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4072 
4073 	/* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
4074 	for (i = 0; i < TOTAL_TX_BD; i++) {
4075 		if (sc->tx_mbuf_ptr[i] != NULL) {
4076 			if (sc->tx_mbuf_map != NULL)
4077 				bus_dmamap_sync(sc->tx_mbuf_tag, sc->tx_mbuf_map[i],
4078 					BUS_DMASYNC_POSTWRITE);
4079 			m_freem(sc->tx_mbuf_ptr[i]);
4080 			sc->tx_mbuf_ptr[i] = NULL;
4081 			DBRUN(sc->debug_tx_mbuf_alloc--);
4082 		}
4083 	}
4084 
4085 	/* Clear each TX chain page. */
4086 	for (i = 0; i < TX_PAGES; i++)
4087 		bzero((char *)sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ);
4088 
4089 	sc->used_tx_bd     = 0;
4090 
4091 	/* Check if we lost any mbufs in the process. */
4092 	DBRUNIF((sc->debug_tx_mbuf_alloc),
4093 		BCE_PRINTF("%s(%d): Memory leak! Lost %d mbufs "
4094 			"from tx chain!\n",
4095 			__FILE__, __LINE__, sc->debug_tx_mbuf_alloc));
4096 
4097 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4098 }
4099 
4100 
4101 /****************************************************************************/
4102 /* Allocate memory and initialize the RX data structures.                   */
4103 /*                                                                          */
4104 /* Returns:                                                                 */
4105 /*   0 for success, positive value for failure.                             */
4106 /****************************************************************************/
4107 static int
4108 bce_init_rx_chain(struct bce_softc *sc)
4109 {
4110 	struct rx_bd *rxbd;
4111 	int i, rc = 0;
4112 	u32 val;
4113 
4114 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4115 
4116 	/* Initialize the RX producer and consumer indices. */
4117 	sc->rx_prod        = 0;
4118 	sc->rx_cons        = 0;
4119 	sc->rx_prod_bseq   = 0;
4120 	sc->free_rx_bd     = USABLE_RX_BD;
4121 	sc->max_rx_bd      = USABLE_RX_BD;
4122 	DBRUN(sc->rx_low_watermark = sc->max_rx_bd);
4123 	DBRUN(sc->rx_empty_count = 0);
4124 
4125 	/* Initialize the RX next pointer chain entries. */
4126 	for (i = 0; i < RX_PAGES; i++) {
4127 		int j;
4128 
4129 		rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
4130 
4131 		/* Check if we've reached the last page. */
4132 		if (i == (RX_PAGES - 1))
4133 			j = 0;
4134 		else
4135 			j = i + 1;
4136 
4137 		/* Setup the chain page pointers. */
4138 		rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->rx_bd_chain_paddr[j]));
4139 		rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->rx_bd_chain_paddr[j]));
4140 	}
4141 
4142 	/* Initialize the context ID for an L2 RX chain. */
4143 	val = BCE_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4144 	val |= BCE_L2CTX_CTX_TYPE_SIZE_L2;
4145 	val |= 0x02 << 8;
4146 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_CTX_TYPE, val);
4147 
4148 	/* Point the hardware to the first page in the chain. */
4149 	val = BCE_ADDR_HI(sc->rx_bd_chain_paddr[0]);
4150 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_BDHADDR_HI, val);
4151 	val = BCE_ADDR_LO(sc->rx_bd_chain_paddr[0]);
4152 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_BDHADDR_LO, val);
4153 
4154 	/* Fill up the RX chain. */
4155 	bce_fill_rx_chain(sc);
4156 
4157 	for (i = 0; i < RX_PAGES; i++) {
4158 		bus_dmamap_sync(
4159 			sc->rx_bd_chain_tag,
4160 	    	sc->rx_bd_chain_map[i],
4161 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4162 	}
4163 
4164 	DBRUNMSG(BCE_VERBOSE_RECV, bce_dump_rx_chain(sc, 0, TOTAL_RX_BD));
4165 
4166 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4167 
4168 	return(rc);
4169 }
4170 
4171 
4172 /****************************************************************************/
4173 /* Add mbufs to the RX chain until its full or an mbuf allocation error     */
4174 /* occurs.                                                                  */
4175 /*                                                                          */
4176 /* Returns:                                                                 */
4177 /*   Nothing                                                                */
4178 /****************************************************************************/
4179 static void
4180 bce_fill_rx_chain(struct bce_softc *sc)
4181 {
4182 	u16 prod, prod_idx;
4183 	u32 prod_bseq;
4184 
4185 	DBPRINT(sc, BCE_VERBOSE_RECV, "Entering %s()\n", __FUNCTION__);
4186 
4187 	prod      = sc->rx_prod;
4188 	prod_bseq = sc->rx_prod_bseq;
4189 
4190 	/* Keep filling the RX chain until it's full. */
4191 	while (sc->free_rx_bd > 0) {
4192 		prod_idx = RX_CHAIN_IDX(prod);
4193 		if (bce_get_rx_buf(sc, NULL, &prod, &prod_idx, &prod_bseq)) {
4194 			/* Bail out if we can't add an mbuf to the chain. */
4195 			break;
4196 		}
4197 		prod = NEXT_RX_BD(prod);
4198 	}
4199 
4200 	/* Save the RX chain producer index. */
4201 	sc->rx_prod      = prod;
4202 	sc->rx_prod_bseq = prod_bseq;
4203 
4204 	DBRUNIF(((prod & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE),
4205 		BCE_PRINTF("%s(): Invalid rx_prod value: 0x%04X\n",
4206 		__FUNCTION__, sc->rx_prod));
4207 
4208 	/* Tell the chip about the waiting rx_bd's. */
4209 	REG_WR16(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BDIDX, sc->rx_prod);
4210 	REG_WR(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
4211 
4212 	DBPRINT(sc, BCE_VERBOSE_RECV, "Exiting %s()\n", __FUNCTION__);
4213 }
4214 
4215 
4216 /****************************************************************************/
4217 /* Free memory and clear the RX data structures.                            */
4218 /*                                                                          */
4219 /* Returns:                                                                 */
4220 /*   Nothing.                                                               */
4221 /****************************************************************************/
4222 static void
4223 bce_free_rx_chain(struct bce_softc *sc)
4224 {
4225 	int i;
4226 
4227 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4228 
4229 	/* Clear the jumbo page chain support. */
4230 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_PG_BUF_SIZE, 0);
4231 
4232 	/* Free any mbufs still in the RX mbuf chain. */
4233 	for (i = 0; i < TOTAL_RX_BD; i++) {
4234 		if (sc->rx_mbuf_ptr[i] != NULL) {
4235 			if (sc->rx_mbuf_map[i] != NULL)
4236 				bus_dmamap_sync(sc->rx_mbuf_tag, sc->rx_mbuf_map[i],
4237 					BUS_DMASYNC_POSTREAD);
4238 			m_freem(sc->rx_mbuf_ptr[i]);
4239 			sc->rx_mbuf_ptr[i] = NULL;
4240 			DBRUN(sc->debug_rx_mbuf_alloc--);
4241 		}
4242 	}
4243 
4244 	/* Clear each RX chain page. */
4245 	for (i = 0; i < RX_PAGES; i++)
4246 		bzero((char *)sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
4247 
4248 	sc->free_rx_bd = sc->max_rx_bd;
4249 
4250 	/* Check if we lost any mbufs in the process. */
4251 	DBRUNIF((sc->debug_rx_mbuf_alloc),
4252 		BCE_PRINTF("%s(): Memory leak! Lost %d mbufs from rx chain!\n",
4253 			__FUNCTION__, sc->debug_rx_mbuf_alloc));
4254 
4255 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4256 }
4257 
4258 
4259 /****************************************************************************/
4260 /* Allocate memory and initialize the page data structures.                 */
4261 /* Assumes that bce_init_rx_chain() has not already been called.            */
4262 /*                                                                          */
4263 /* Returns:                                                                 */
4264 /*   0 for success, positive value for failure.                             */
4265 /****************************************************************************/
4266 static int
4267 bce_init_pg_chain(struct bce_softc *sc)
4268 {
4269 	struct rx_bd *pgbd;
4270 	int i, rc = 0;
4271 	u32 val;
4272 
4273 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4274 
4275 	/* Initialize the page producer and consumer indices. */
4276 	sc->pg_prod        = 0;
4277 	sc->pg_cons        = 0;
4278 	sc->free_pg_bd     = USABLE_PG_BD;
4279 	sc->max_pg_bd      = USABLE_PG_BD;
4280 	DBRUN(sc->pg_low_watermark = sc->max_pg_bd);
4281 	DBRUN(sc->pg_empty_count = 0);
4282 
4283 	/* Initialize the page next pointer chain entries. */
4284 	for (i = 0; i < PG_PAGES; i++) {
4285 		int j;
4286 
4287 		pgbd = &sc->pg_bd_chain[i][USABLE_PG_BD_PER_PAGE];
4288 
4289 		/* Check if we've reached the last page. */
4290 		if (i == (PG_PAGES - 1))
4291 			j = 0;
4292 		else
4293 			j = i + 1;
4294 
4295 		/* Setup the chain page pointers. */
4296 		pgbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->pg_bd_chain_paddr[j]));
4297 		pgbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->pg_bd_chain_paddr[j]));
4298 	}
4299 
4300 	/* Point the hardware to the first page in the page chain. */
4301 	val = BCE_ADDR_HI(sc->pg_bd_chain_paddr[0]);
4302 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_PG_BDHADDR_HI, val);
4303 	val = BCE_ADDR_LO(sc->pg_bd_chain_paddr[0]);
4304 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_PG_BDHADDR_LO, val);
4305 
4306 	/* Configure the rx_bd and page chain mbuf cluster size. */
4307 	val = (sc->rx_bd_mbuf_alloc_size << 16) | sc->pg_bd_mbuf_alloc_size;
4308 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_PG_BUF_SIZE, val);
4309 
4310 	/* Configure the context reserved for jumbo support. */
4311 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RBDC_KEY,
4312 		BCE_L2CTX_RBDC_JUMBO_KEY);
4313 
4314 	/* Fill up the page chain. */
4315 	bce_fill_pg_chain(sc);
4316 
4317 	for (i = 0; i < PG_PAGES; i++) {
4318 		bus_dmamap_sync(
4319 			sc->pg_bd_chain_tag,
4320 	    	sc->pg_bd_chain_map[i],
4321 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4322 	}
4323 
4324 	DBRUNMSG(BCE_VERBOSE_RECV, bce_dump_pg_chain(sc, 0, TOTAL_PG_BD));
4325 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4326 
4327 	return(rc);
4328 }
4329 
4330 /****************************************************************************/
4331 /* Add mbufs to the page chain until its full or an mbuf allocation error   */
4332 /* occurs.                                                                  */
4333 /*                                                                          */
4334 /* Returns:                                                                 */
4335 /*   Nothing                                                                */
4336 /****************************************************************************/
4337 static void
4338 bce_fill_pg_chain(struct bce_softc *sc)
4339 {
4340 	u16 prod, prod_idx;
4341 
4342 	DBPRINT(sc, BCE_EXCESSIVE_RECV, "Entering %s()\n", __FUNCTION__);
4343 
4344 	prod = sc->pg_prod;
4345 
4346 	/* Keep filling the page chain until it's full. */
4347 	while (sc->free_pg_bd > 0) {
4348 		prod_idx = PG_CHAIN_IDX(prod);
4349 		if (bce_get_pg_buf(sc, NULL, &prod, &prod_idx)) {
4350 			/* Bail out if we can't add an mbuf to the chain. */
4351 			break;
4352 		}
4353 		prod = NEXT_PG_BD(prod);
4354 	}
4355 
4356 	/* Save the page chain producer index. */
4357 	sc->pg_prod = prod;
4358 
4359 	DBRUNIF(((prod & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE),
4360 		BCE_PRINTF("%s(): Invalid pg_prod value: 0x%04X\n",
4361 		__FUNCTION__, sc->pg_prod));
4362 
4363 	/* Tell the chip about the new rx_bd's in the page chain. */
4364 	REG_WR16(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_PG_BDIDX, sc->pg_prod);
4365 
4366 	DBPRINT(sc, BCE_EXCESSIVE_RECV, "Exiting %s()\n", __FUNCTION__);
4367 }
4368 
4369 
4370 /****************************************************************************/
4371 /* Free memory and clear the RX data structures.                            */
4372 /*                                                                          */
4373 /* Returns:                                                                 */
4374 /*   Nothing.                                                               */
4375 /****************************************************************************/
4376 static void
4377 bce_free_pg_chain(struct bce_softc *sc)
4378 {
4379 	int i;
4380 
4381 	DBPRINT(sc, BCE_EXCESSIVE_RESET, "Entering %s()\n", __FUNCTION__);
4382 
4383 	/* Free any mbufs still in the mbuf page chain. */
4384 	for (i = 0; i < TOTAL_PG_BD; i++) {
4385 		if (sc->pg_mbuf_ptr[i] != NULL) {
4386 			if (sc->pg_mbuf_map[i] != NULL)
4387 				bus_dmamap_sync(sc->pg_mbuf_tag, sc->pg_mbuf_map[i],
4388 					BUS_DMASYNC_POSTREAD);
4389 			m_freem(sc->pg_mbuf_ptr[i]);
4390 			sc->pg_mbuf_ptr[i] = NULL;
4391 			DBRUN(sc->debug_pg_mbuf_alloc--);
4392 		}
4393 	}
4394 
4395 	/* Clear each page chain pages. */
4396 	for (i = 0; i < PG_PAGES; i++)
4397 		bzero((char *)sc->pg_bd_chain[i], BCE_PG_CHAIN_PAGE_SZ);
4398 
4399 	sc->free_pg_bd = sc->max_pg_bd;
4400 
4401 	/* Check if we lost any mbufs in the process. */
4402 	DBRUNIF((sc->debug_pg_mbuf_alloc),
4403 		BCE_PRINTF("%s(): Memory leak! Lost %d mbufs from page chain!\n",
4404 			__FUNCTION__, sc->debug_pg_mbuf_alloc));
4405 
4406 	DBPRINT(sc, BCE_EXCESSIVE_RESET, "Exiting %s()\n", __FUNCTION__);
4407 }
4408 
4409 
4410 /****************************************************************************/
4411 /* Set media options.                                                       */
4412 /*                                                                          */
4413 /* Returns:                                                                 */
4414 /*   0 for success, positive value for failure.                             */
4415 /****************************************************************************/
4416 static int
4417 bce_ifmedia_upd(struct ifnet *ifp)
4418 {
4419 	struct bce_softc *sc;
4420 
4421 	sc = ifp->if_softc;
4422 	BCE_LOCK(sc);
4423 	bce_ifmedia_upd_locked(ifp);
4424 	BCE_UNLOCK(sc);
4425 	return (0);
4426 }
4427 
4428 
4429 /****************************************************************************/
4430 /* Set media options.                                                       */
4431 /*                                                                          */
4432 /* Returns:                                                                 */
4433 /*   Nothing.                                                               */
4434 /****************************************************************************/
4435 static void
4436 bce_ifmedia_upd_locked(struct ifnet *ifp)
4437 {
4438 	struct bce_softc *sc;
4439 	struct mii_data *mii;
4440 	struct ifmedia *ifm;
4441 
4442 	sc = ifp->if_softc;
4443 	ifm = &sc->bce_ifmedia;
4444 	BCE_LOCK_ASSERT(sc);
4445 
4446 	mii = device_get_softc(sc->bce_miibus);
4447 
4448 	/* Make sure the MII bus has been enumerated. */
4449 	if (mii) {
4450 		sc->bce_link = 0;
4451 		if (mii->mii_instance) {
4452 			struct mii_softc *miisc;
4453 
4454 			LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
4455 				mii_phy_reset(miisc);
4456 		}
4457 		mii_mediachg(mii);
4458 	}
4459 }
4460 
4461 
4462 /****************************************************************************/
4463 /* Reports current media status.                                            */
4464 /*                                                                          */
4465 /* Returns:                                                                 */
4466 /*   Nothing.                                                               */
4467 /****************************************************************************/
4468 static void
4469 bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
4470 {
4471 	struct bce_softc *sc;
4472 	struct mii_data *mii;
4473 
4474 	sc = ifp->if_softc;
4475 
4476 	BCE_LOCK(sc);
4477 
4478 	mii = device_get_softc(sc->bce_miibus);
4479 
4480 	mii_pollstat(mii);
4481 	ifmr->ifm_active = mii->mii_media_active;
4482 	ifmr->ifm_status = mii->mii_media_status;
4483 
4484 	BCE_UNLOCK(sc);
4485 }
4486 
4487 
4488 /****************************************************************************/
4489 /* Handles PHY generated interrupt events.                                  */
4490 /*                                                                          */
4491 /* Returns:                                                                 */
4492 /*   Nothing.                                                               */
4493 /****************************************************************************/
4494 static void
4495 bce_phy_intr(struct bce_softc *sc)
4496 {
4497 	u32 new_link_state, old_link_state;
4498 
4499 	new_link_state = sc->status_block->status_attn_bits &
4500 		STATUS_ATTN_BITS_LINK_STATE;
4501 	old_link_state = sc->status_block->status_attn_bits_ack &
4502 		STATUS_ATTN_BITS_LINK_STATE;
4503 
4504 	/* Handle any changes if the link state has changed. */
4505 	if (new_link_state != old_link_state) {
4506 
4507 		DBRUNMSG(BCE_VERBOSE_INTR, bce_dump_status_block(sc));
4508 
4509 		sc->bce_link = 0;
4510 		callout_stop(&sc->bce_tick_callout);
4511 		bce_tick(sc);
4512 
4513 		/* Update the status_attn_bits_ack field in the status block. */
4514 		if (new_link_state) {
4515 			REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD,
4516 				STATUS_ATTN_BITS_LINK_STATE);
4517 			DBPRINT(sc, BCE_INFO_MISC, "Link is now UP.\n");
4518 		}
4519 		else {
4520 			REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD,
4521 				STATUS_ATTN_BITS_LINK_STATE);
4522 			DBPRINT(sc, BCE_INFO_MISC, "Link is now DOWN.\n");
4523 		}
4524 
4525 	}
4526 
4527 	/* Acknowledge the link change interrupt. */
4528 	REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE);
4529 }
4530 
4531 
4532 /****************************************************************************/
4533 /* Reads the receive consumer value from the status block (skipping over    */
4534 /* chain page pointer if necessary).                                        */
4535 /*                                                                          */
4536 /* Returns:                                                                 */
4537 /*   hw_cons                                                                */
4538 /****************************************************************************/
4539 static inline u16
4540 bce_get_hw_rx_cons(struct bce_softc *sc)
4541 {
4542 	u16 hw_cons = sc->status_block->status_rx_quick_consumer_index0;
4543 
4544 	if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
4545 		hw_cons++;
4546 
4547 	return hw_cons;
4548 }
4549 
4550 
4551 /****************************************************************************/
4552 /* Handles received frame interrupt events.                                 */
4553 /*                                                                          */
4554 /* Returns:                                                                 */
4555 /*   Nothing.                                                               */
4556 /****************************************************************************/
4557 static void
4558 bce_rx_intr(struct bce_softc *sc)
4559 {
4560 	struct ifnet *ifp = sc->bce_ifp;
4561 	struct l2_fhdr *l2fhdr;
4562 	unsigned int pages, pkt_len, rem_len;
4563 	u16 sw_rx_cons, sw_rx_cons_idx, sw_pg_cons, sw_pg_cons_idx, hw_rx_cons;
4564 	u32 status;
4565 
4566 
4567 #ifdef BCE_DEBUG
4568 	u32 timer_start, timer_end;
4569 	timer_start = REG_RD(sc, BCE_TIMER_25MHZ_FREE_RUN);
4570 	sc->rx_interrupts++;
4571 #endif
4572 
4573 	/* Prepare the RX chain pages to be accessed by the host CPU. */
4574 	for (int i = 0; i < RX_PAGES; i++)
4575 		bus_dmamap_sync(sc->rx_bd_chain_tag,
4576 		    sc->rx_bd_chain_map[i], BUS_DMASYNC_POSTWRITE);
4577 
4578 	/* Prepare the page chain pages to be accessed by the host CPU. */
4579 	for (int i = 0; i < PG_PAGES; i++)
4580 		bus_dmamap_sync(sc->pg_bd_chain_tag,
4581 		    sc->pg_bd_chain_map[i], BUS_DMASYNC_POSTWRITE);
4582 
4583 	/* Get the hardware's view of the RX consumer index. */
4584 	hw_rx_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc);
4585 
4586 	/* Get working copies of the driver's view of the consumer indices. */
4587 	sw_rx_cons = sc->rx_cons;
4588 	sw_pg_cons = sc->pg_cons;
4589 
4590 	DBPRINT(sc, BCE_INFO_RECV, "%s(enter): rx_prod = 0x%04X, "
4591 		"rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
4592 		__FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
4593 
4594 	/* Update some debug statistics counters */
4595 	DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
4596 		sc->rx_low_watermark = sc->free_rx_bd);
4597 	DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), sc->rx_empty_count++);
4598 
4599 	/* Scan through the receive chain as long as there is work to do */
4600 	/* ToDo: Consider setting a limit on the number of packets processed. */
4601 	while (sw_rx_cons != hw_rx_cons) {
4602 		struct mbuf *m0;
4603 
4604 		/* Convert the producer/consumer indices to an actual rx_bd index. */
4605 		sw_rx_cons_idx = RX_CHAIN_IDX(sw_rx_cons);
4606 
4607 		/* Unmap the mbuf from DMA space. */
4608 		bus_dmamap_sync(sc->rx_mbuf_tag,
4609 		    sc->rx_mbuf_map[sw_rx_cons_idx],
4610 	    	BUS_DMASYNC_POSTREAD);
4611 		bus_dmamap_unload(sc->rx_mbuf_tag,
4612 		    sc->rx_mbuf_map[sw_rx_cons_idx]);
4613 
4614 		/* Remove the mbuf from the RX chain. */
4615 		m0 = sc->rx_mbuf_ptr[sw_rx_cons_idx];
4616 		sc->rx_mbuf_ptr[sw_rx_cons_idx] = NULL;
4617 		DBRUN(sc->debug_rx_mbuf_alloc--);
4618 		sc->free_rx_bd++;
4619 
4620 		/*
4621 		 * Frames received on the NetXteme II are prepended
4622 		 * with an l2_fhdr structure which provides status
4623 		 * information about the received frame (including
4624 		 * VLAN tags and checksum info).  The frames are also
4625 		 * automatically adjusted to align the IP header
4626 		 * (i.e. two null bytes are inserted before the
4627 		 * Ethernet header).  As a result the data DMA'd by
4628 		 * the controller into the mbuf is as follows:
4629 		 * +---------+-----+---------------------+-----+
4630 		 * | l2_fhdr | pad | packet data         | FCS |
4631 		 * +---------+-----+---------------------+-----+
4632 		 * The l2_fhdr needs to be checked and skipped and
4633 		 * the FCS needs to be stripped before sending the
4634 		 * packet up the stack.
4635 		 */
4636 		l2fhdr  = mtod(m0, struct l2_fhdr *);
4637 
4638 		/* Get the packet data + FCS length and the status. */
4639 		pkt_len = l2fhdr->l2_fhdr_pkt_len;
4640 		status  = l2fhdr->l2_fhdr_status;
4641 
4642 		/*
4643 		 * Skip over the l2_fhdr and pad, resulting in the
4644 		 * following data in the mbuf:
4645 		 * +---------------------+-----+
4646 		 * | packet data         | FCS |
4647 		 * +---------------------+-----+
4648 		 */
4649 		m_adj(m0, sizeof(struct l2_fhdr) + ETHER_ALIGN);
4650 
4651 		/*
4652 		 * Check whether the received frame fits in a single
4653 		 * mbuf or not (i.e. packet data + FCS <=
4654 		 * sc->rx_bd_mbuf_alloc_size bytes).
4655 		 */
4656 		if (pkt_len > m0->m_len) {
4657 			/*
4658 			 * The received frame is larger than a single mbuf.
4659 			 * If the frame was a TCP frame then only the TCP
4660 			 * header is placed in the mbuf, the remaining
4661 			 * payload (including FCS) is placed in the page
4662 			 * chain, the SPLIT flag is set, and the header
4663 			 * length is placed in the IP checksum field.
4664 			 * If the frame is not a TCP frame then the mbuf
4665 			 * is filled and the remaining bytes are placed
4666 			 * in the page chain.
4667 			 */
4668 
4669 			DBPRINT(sc, BCE_INFO_RECV, "%s(): Found a large packet.\n",
4670 				__FUNCTION__);
4671 
4672 		 	if (status & L2_FHDR_STATUS_SPLIT)
4673 				m0->m_len = l2fhdr->l2_fhdr_ip_xsum;
4674 
4675 			rem_len = pkt_len - m0->m_len;
4676 
4677 			/* Calculate how many pages to pull off the page chain. */
4678 			/* ToDo: The following assumes that mbuf clusters are 2KB. */
4679 			pages = (rem_len + sc->pg_bd_mbuf_alloc_size) >> 11;
4680 
4681 			/* Pull mbufs off the page chain for the remaining data. */
4682 			while (rem_len > 0) {
4683 				struct mbuf *m_pg;
4684 
4685 				sw_pg_cons_idx = PG_CHAIN_IDX(sw_pg_cons);
4686 
4687 				/* Remove the mbuf from the page chain. */
4688 				m_pg = sc->pg_mbuf_ptr[sw_pg_cons_idx];
4689 				sc->pg_mbuf_ptr[sw_pg_cons_idx] = NULL;
4690 				DBRUN(sc->debug_pg_mbuf_alloc--);
4691 				sc->free_pg_bd++;
4692 
4693 				/* Unmap the page chain mbuf from DMA space. */
4694 				bus_dmamap_sync(sc->pg_mbuf_tag,
4695 					sc->pg_mbuf_map[sw_pg_cons_idx],
4696 					BUS_DMASYNC_POSTREAD);
4697 				bus_dmamap_unload(sc->pg_mbuf_tag,
4698 					sc->pg_mbuf_map[sw_pg_cons_idx]);
4699 
4700 				/* Adjust the mbuf length. */
4701 				if (rem_len < m_pg->m_len) {
4702 					/* The mbuf chain is complete. */
4703 					m_pg->m_len = rem_len;
4704 					rem_len = 0;
4705 				} else {
4706 					/* More packet data is waiting. */
4707 					rem_len -= m_pg->m_len;
4708 				}
4709 
4710 				/* Concatenate the mbuf cluster to the mbuf. */
4711 				m_cat(m0, m_pg);
4712 
4713 				sw_pg_cons = NEXT_PG_BD(sw_pg_cons);
4714 			}
4715 
4716 			/* Set the total packet length. */
4717 			m0->m_pkthdr.len = pkt_len;
4718 
4719 		} else {
4720 			/*
4721 			 * The received packet is small and fits in a
4722 			 * single mbuf (i.e. the l2_fhdr + pad + packet +
4723 			 * FCS <= MHLEN).  In other words, the packet is
4724 			 * 154 bytes or less in size.
4725 			 */
4726 
4727 			DBPRINT(sc, BCE_INFO_RECV, "%s(): Found a small packet.\n",
4728 				__FUNCTION__);
4729 
4730 			/* Set the total packet length. */
4731 			m0->m_pkthdr.len = m0->m_len = pkt_len;
4732 		}
4733 
4734 		/* Remove the trailing Ethernet FCS. */
4735 		m_adj(m0, -ETHER_CRC_LEN);
4736 
4737 		/* Check that the resulting mbuf chain is valid. */
4738 		DBRUN(m_sanity(m0, FALSE));
4739 
4740 		DBRUNIF((m0->m_len < ETHER_HDR_LEN),
4741 			BCE_PRINTF("%s(): Unexpected length = %d!.\n",
4742 				__FUNCTION__, m0->m_len);
4743 			bce_breakpoint(sc));
4744 
4745 		DBRUNIF(DB_RANDOMTRUE(bce_debug_l2fhdr_status_check),
4746 			BCE_PRINTF("Simulating l2_fhdr status error.\n");
4747 			status = status | L2_FHDR_ERRORS_PHY_DECODE);
4748 
4749 		/* Check the received frame for errors. */
4750 		if (status & (L2_FHDR_ERRORS_BAD_CRC |
4751 			L2_FHDR_ERRORS_PHY_DECODE | L2_FHDR_ERRORS_ALIGNMENT |
4752 			L2_FHDR_ERRORS_TOO_SHORT  | L2_FHDR_ERRORS_GIANT_FRAME)) {
4753 
4754 			/* Log the error and release the mbuf. */
4755 			ifp->if_ierrors++;
4756 			DBRUN(sc->l2fhdr_status_errors++);
4757 
4758 			m_freem(m0);
4759 			m0 = NULL;
4760 			goto bce_rx_int_next_rx;
4761 		}
4762 
4763 		/* Send the packet to the appropriate interface. */
4764 		m0->m_pkthdr.rcvif = ifp;
4765 
4766 		/* Assume no hardware checksum. */
4767 		m0->m_pkthdr.csum_flags = 0;
4768 
4769 		/* Validate the checksum if offload enabled. */
4770 		if (ifp->if_capenable & IFCAP_RXCSUM) {
4771 
4772 			/* Check for an IP datagram. */
4773 		 	if (!(status & L2_FHDR_STATUS_SPLIT) &&
4774 				(status & L2_FHDR_STATUS_IP_DATAGRAM)) {
4775 				m0->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
4776 
4777 				/* Check if the IP checksum is valid. */
4778 				if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff) == 0)
4779 					m0->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4780 			}
4781 
4782 			/* Check for a valid TCP/UDP frame. */
4783 			if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
4784 				L2_FHDR_STATUS_UDP_DATAGRAM)) {
4785 
4786 				/* Check for a good TCP/UDP checksum. */
4787 				if ((status & (L2_FHDR_ERRORS_TCP_XSUM |
4788 					      L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
4789 					m0->m_pkthdr.csum_data =
4790 					    l2fhdr->l2_fhdr_tcp_udp_xsum;
4791 					m0->m_pkthdr.csum_flags |= (CSUM_DATA_VALID
4792 						| CSUM_PSEUDO_HDR);
4793 				}
4794 			}
4795 		}
4796 
4797 		/*
4798 		 * If we received a packet with a vlan tag,
4799 		 * attach that information to the packet.
4800 		 */
4801 		if (status & L2_FHDR_STATUS_L2_VLAN_TAG) {
4802 #if __FreeBSD_version < 700000
4803 			VLAN_INPUT_TAG(ifp, m0, l2fhdr->l2_fhdr_vlan_tag, continue);
4804 #else
4805 			m0->m_pkthdr.ether_vtag = l2fhdr->l2_fhdr_vlan_tag;
4806 			m0->m_flags |= M_VLANTAG;
4807 #endif
4808 		}
4809 
4810 		/* Pass the mbuf off to the upper layers. */
4811 		ifp->if_ipackets++;
4812 
4813 bce_rx_int_next_rx:
4814 		sw_rx_cons = NEXT_RX_BD(sw_rx_cons);
4815 
4816 		/* If we have a packet, pass it up the stack */
4817 		if (m0) {
4818 			/* Make sure we don't lose our place when we release the lock. */
4819 			sc->rx_cons = sw_rx_cons;
4820 			sc->pg_cons = sw_pg_cons;
4821 
4822 			BCE_UNLOCK(sc);
4823 			(*ifp->if_input)(ifp, m0);
4824 			BCE_LOCK(sc);
4825 
4826 			/* Recover our place. */
4827 			sw_rx_cons = sc->rx_cons;
4828 			sw_pg_cons = sc->pg_cons;
4829 		}
4830 
4831 		/* Refresh hw_cons to see if there's new work */
4832 		if (sw_rx_cons == hw_rx_cons)
4833 			hw_rx_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc);
4834 	}
4835 
4836 	/* No new packets to process.  Refill the RX and page chains and exit. */
4837 	sc->pg_cons = sw_pg_cons;
4838 	bce_fill_pg_chain(sc);
4839 
4840 	sc->rx_cons = sw_rx_cons;
4841 	bce_fill_rx_chain(sc);
4842 
4843 	for (int i = 0; i < RX_PAGES; i++)
4844 		bus_dmamap_sync(sc->rx_bd_chain_tag,
4845 		    sc->rx_bd_chain_map[i], BUS_DMASYNC_PREWRITE);
4846 
4847 	for (int i = 0; i < PG_PAGES; i++)
4848 		bus_dmamap_sync(sc->pg_bd_chain_tag,
4849 		    sc->pg_bd_chain_map[i], BUS_DMASYNC_PREWRITE);
4850 
4851 	DBPRINT(sc, BCE_INFO_RECV, "%s(exit): rx_prod = 0x%04X, "
4852 		"rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
4853 		__FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
4854 
4855 #ifdef BCE_DEBUG
4856 	timer_end = REG_RD(sc, BCE_TIMER_25MHZ_FREE_RUN);
4857 	sc->rx_intr_time += (u64) (timer_start > timer_end ?
4858 		(timer_start - timer_end) : (~timer_start + timer_end + 1));
4859 #endif
4860 }
4861 
4862 
4863 /****************************************************************************/
4864 /* Reads the transmit consumer value from the status block (skipping over   */
4865 /* chain page pointer if necessary).                                        */
4866 /*                                                                          */
4867 /* Returns:                                                                 */
4868 /*   hw_cons                                                                */
4869 /****************************************************************************/
4870 static inline u16
4871 bce_get_hw_tx_cons(struct bce_softc *sc)
4872 {
4873 	u16 hw_cons = sc->status_block->status_tx_quick_consumer_index0;
4874 
4875 	if ((hw_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4876 		hw_cons++;
4877 
4878 	return hw_cons;
4879 }
4880 
4881 
4882 /****************************************************************************/
4883 /* Handles transmit completion interrupt events.                            */
4884 /*                                                                          */
4885 /* Returns:                                                                 */
4886 /*   Nothing.                                                               */
4887 /****************************************************************************/
4888 static void
4889 bce_tx_intr(struct bce_softc *sc)
4890 {
4891 	struct ifnet *ifp = sc->bce_ifp;
4892 	u16 hw_tx_cons, sw_tx_cons, sw_tx_chain_cons;
4893 
4894 #ifdef BCE_DEBUG
4895 	u32 timer_start, timer_end;
4896 	timer_start = REG_RD(sc, BCE_TIMER_25MHZ_FREE_RUN);
4897 	sc->tx_interrupts++;
4898 #endif
4899 
4900 	BCE_LOCK_ASSERT(sc);
4901 
4902 	/* Get the hardware's view of the TX consumer index. */
4903 	hw_tx_cons = sc->hw_tx_cons = bce_get_hw_tx_cons(sc);
4904 	sw_tx_cons = sc->tx_cons;
4905 
4906 	/* Prevent speculative reads from getting ahead of the status block. */
4907 	bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4908 		BUS_SPACE_BARRIER_READ);
4909 
4910 	/* Cycle through any completed TX chain page entries. */
4911 	while (sw_tx_cons != hw_tx_cons) {
4912 #ifdef BCE_DEBUG
4913 		struct tx_bd *txbd = NULL;
4914 #endif
4915 		sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons);
4916 
4917 		DBPRINT(sc, BCE_INFO_SEND,
4918 			"%s(): hw_tx_cons = 0x%04X, sw_tx_cons = 0x%04X, "
4919 			"sw_tx_chain_cons = 0x%04X\n",
4920 			__FUNCTION__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons);
4921 
4922 		DBRUNIF((sw_tx_chain_cons > MAX_TX_BD),
4923 			BCE_PRINTF("%s(%d): TX chain consumer out of range! "
4924 				" 0x%04X > 0x%04X\n", __FILE__, __LINE__, sw_tx_chain_cons,
4925 				(int) MAX_TX_BD);
4926 			bce_breakpoint(sc));
4927 
4928 		DBRUN(txbd = &sc->tx_bd_chain[TX_PAGE(sw_tx_chain_cons)]
4929 				[TX_IDX(sw_tx_chain_cons)]);
4930 
4931 		DBRUNIF((txbd == NULL),
4932 			BCE_PRINTF("%s(%d): Unexpected NULL tx_bd[0x%04X]!\n",
4933 				__FILE__, __LINE__, sw_tx_chain_cons);
4934 			bce_breakpoint(sc));
4935 
4936 		DBRUNMSG(BCE_INFO_SEND, BCE_PRINTF("%s(): ", __FUNCTION__);
4937 			bce_dump_txbd(sc, sw_tx_chain_cons, txbd));
4938 
4939 		/*
4940 		 * Free the associated mbuf. Remember
4941 		 * that only the last tx_bd of a packet
4942 		 * has an mbuf pointer and DMA map.
4943 		 */
4944 		if (sc->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) {
4945 
4946 			/* Validate that this is the last tx_bd. */
4947 			DBRUNIF((!(txbd->tx_bd_flags & TX_BD_FLAGS_END)),
4948 				BCE_PRINTF("%s(%d): tx_bd END flag not set but "
4949 				"txmbuf == NULL!\n", __FILE__, __LINE__);
4950 				bce_breakpoint(sc));
4951 
4952 			DBRUNMSG(BCE_INFO_SEND,
4953 				BCE_PRINTF("%s(): Unloading map/freeing mbuf "
4954 					"from tx_bd[0x%04X]\n", __FUNCTION__, sw_tx_chain_cons));
4955 
4956 			/* Unmap the mbuf. */
4957 			bus_dmamap_unload(sc->tx_mbuf_tag,
4958 			    sc->tx_mbuf_map[sw_tx_chain_cons]);
4959 
4960 			/* Free the mbuf. */
4961 			m_freem(sc->tx_mbuf_ptr[sw_tx_chain_cons]);
4962 			sc->tx_mbuf_ptr[sw_tx_chain_cons] = NULL;
4963 			DBRUN(sc->debug_tx_mbuf_alloc--);
4964 
4965 			ifp->if_opackets++;
4966 		}
4967 
4968 		sc->used_tx_bd--;
4969 		sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
4970 
4971 		/* Refresh hw_cons to see if there's new work. */
4972 		hw_tx_cons = sc->hw_tx_cons = bce_get_hw_tx_cons(sc);
4973 
4974 		/* Prevent speculative reads from getting ahead of the status block. */
4975 		bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4976 			BUS_SPACE_BARRIER_READ);
4977 	}
4978 
4979 	/* Clear the TX timeout timer. */
4980 	sc->watchdog_timer = 0;
4981 
4982 	/* Clear the tx hardware queue full flag. */
4983 	if (sc->used_tx_bd < sc->max_tx_bd) {
4984 		DBRUNIF((ifp->if_drv_flags & IFF_DRV_OACTIVE),
4985 			DBPRINT(sc, BCE_INFO_SEND,
4986 				"%s(): Open TX chain! %d/%d (used/total)\n",
4987 				__FUNCTION__, sc->used_tx_bd, sc->max_tx_bd));
4988 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4989 	}
4990 
4991 	sc->tx_cons = sw_tx_cons;
4992 #ifdef BCE_DEBUG
4993 	timer_end = REG_RD(sc, BCE_TIMER_25MHZ_FREE_RUN);
4994 	sc->tx_intr_time += (u64) (timer_start > timer_end ?
4995 		(timer_start - timer_end) : (~timer_start + timer_end + 1));
4996 #endif
4997 }
4998 
4999 
5000 /****************************************************************************/
5001 /* Disables interrupt generation.                                           */
5002 /*                                                                          */
5003 /* Returns:                                                                 */
5004 /*   Nothing.                                                               */
5005 /****************************************************************************/
5006 static void
5007 bce_disable_intr(struct bce_softc *sc)
5008 {
5009 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5010 	       BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5011 	REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
5012 }
5013 
5014 
5015 /****************************************************************************/
5016 /* Enables interrupt generation.                                            */
5017 /*                                                                          */
5018 /* Returns:                                                                 */
5019 /*   Nothing.                                                               */
5020 /****************************************************************************/
5021 static void
5022 bce_enable_intr(struct bce_softc *sc)
5023 {
5024 	u32 val;
5025 
5026 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5027 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
5028 	       BCE_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx);
5029 
5030 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5031 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
5032 
5033 	val = REG_RD(sc, BCE_HC_COMMAND);
5034 	REG_WR(sc, BCE_HC_COMMAND, val | BCE_HC_COMMAND_COAL_NOW);
5035 }
5036 
5037 
5038 /****************************************************************************/
5039 /* Handles controller initialization.                                       */
5040 /*                                                                          */
5041 /* Returns:                                                                 */
5042 /*   Nothing.                                                               */
5043 /****************************************************************************/
5044 static void
5045 bce_init_locked(struct bce_softc *sc)
5046 {
5047 	struct ifnet *ifp;
5048 	u32 ether_mtu = 0;
5049 
5050 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
5051 
5052 	BCE_LOCK_ASSERT(sc);
5053 
5054 	ifp = sc->bce_ifp;
5055 
5056 	/* Check if the driver is still running and bail out if it is. */
5057 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5058 		goto bce_init_locked_exit;
5059 
5060 	bce_stop(sc);
5061 
5062 	if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) {
5063 		BCE_PRINTF("%s(%d): Controller reset failed!\n",
5064 			__FILE__, __LINE__);
5065 		goto bce_init_locked_exit;
5066 	}
5067 
5068 	if (bce_chipinit(sc)) {
5069 		BCE_PRINTF("%s(%d): Controller initialization failed!\n",
5070 			__FILE__, __LINE__);
5071 		goto bce_init_locked_exit;
5072 	}
5073 
5074 	if (bce_blockinit(sc)) {
5075 		BCE_PRINTF("%s(%d): Block initialization failed!\n",
5076 			__FILE__, __LINE__);
5077 		goto bce_init_locked_exit;
5078 	}
5079 
5080 	/* Load our MAC address. */
5081 	bcopy(IF_LLADDR(sc->bce_ifp), sc->eaddr, ETHER_ADDR_LEN);
5082 	bce_set_mac_addr(sc);
5083 
5084 	/* Calculate and program the hardware Ethernet MTU size. */
5085 	if (ifp->if_mtu <= sc->pg_bd_mbuf_alloc_size)
5086 		/* Be generous on receive if we have room. */
5087 		ether_mtu = sc->pg_bd_mbuf_alloc_size;
5088 	else
5089 		ether_mtu = ifp->if_mtu;
5090 
5091 	ether_mtu += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN;
5092 
5093 	DBPRINT(sc, BCE_INFO_MISC, "%s(): setting h/w mtu = %d\n", __FUNCTION__,
5094 		ether_mtu);
5095 
5096 	/* Program the mtu, enabling jumbo frame support if necessary. */
5097 	if (ether_mtu > (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN))
5098 		REG_WR(sc, BCE_EMAC_RX_MTU_SIZE,
5099 			min(ether_mtu, BCE_MAX_JUMBO_ETHER_MTU) |
5100 			BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA);
5101 	else
5102 		REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu);
5103 
5104 	DBPRINT(sc, BCE_INFO_LOAD,
5105 		"%s(): rx_bd_mbuf_alloc_size = %d, pg_bd_mbuf_alloc_size = %d\n",
5106 		__FUNCTION__, sc->rx_bd_mbuf_alloc_size, sc->pg_bd_mbuf_alloc_size);
5107 
5108 	/* Program appropriate promiscuous/multicast filtering. */
5109 	bce_set_rx_mode(sc);
5110 
5111 	/* Init page buffer descriptor chain. */
5112 	bce_init_pg_chain(sc);
5113 
5114 	/* Init RX buffer descriptor chain. */
5115 	bce_init_rx_chain(sc);
5116 
5117 	/* Init TX buffer descriptor chain. */
5118 	bce_init_tx_chain(sc);
5119 
5120 	/* Enable host interrupts. */
5121 	bce_enable_intr(sc);
5122 
5123 	bce_ifmedia_upd_locked(ifp);
5124 
5125 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
5126 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
5127 
5128 	callout_reset(&sc->bce_tick_callout, hz, bce_tick, sc);
5129 
5130 bce_init_locked_exit:
5131 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
5132 
5133 	return;
5134 }
5135 
5136 
5137 /****************************************************************************/
5138 /* Initialize the controller just enough so that any management firmware    */
5139 /* running on the device will continue to operate correctly.                */
5140 /*                                                                          */
5141 /* Returns:                                                                 */
5142 /*   Nothing.                                                               */
5143 /****************************************************************************/
5144 static void
5145 bce_mgmt_init_locked(struct bce_softc *sc)
5146 {
5147 	struct ifnet *ifp;
5148 
5149 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
5150 
5151 	BCE_LOCK_ASSERT(sc);
5152 
5153 	/* Bail out if management firmware is not running. */
5154 	if (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG)) {
5155 		DBPRINT(sc, BCE_VERBOSE_SPECIAL,
5156 			"No management firmware running...\n");
5157 		goto bce_mgmt_init_locked_exit;
5158 	}
5159 
5160 	ifp = sc->bce_ifp;
5161 
5162 	/* Enable all critical blocks in the MAC. */
5163 	REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 0x5ffffff);
5164 	REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
5165 	DELAY(20);
5166 
5167 	bce_ifmedia_upd_locked(ifp);
5168 bce_mgmt_init_locked_exit:
5169 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
5170 
5171 	return;
5172 }
5173 
5174 
5175 /****************************************************************************/
5176 /* Handles controller initialization when called from an unlocked routine.  */
5177 /*                                                                          */
5178 /* Returns:                                                                 */
5179 /*   Nothing.                                                               */
5180 /****************************************************************************/
5181 static void
5182 bce_init(void *xsc)
5183 {
5184 	struct bce_softc *sc = xsc;
5185 
5186 	BCE_LOCK(sc);
5187 	bce_init_locked(sc);
5188 	BCE_UNLOCK(sc);
5189 }
5190 
5191 
5192 /****************************************************************************/
5193 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
5194 /* memory visible to the controller.                                        */
5195 /*                                                                          */
5196 /* Returns:                                                                 */
5197 /*   0 for success, positive value for failure.                             */
5198 /* Modified:                                                                */
5199 /*   m_head: May be set to NULL if MBUF is excessively fragmented.          */
5200 /****************************************************************************/
5201 static int
5202 bce_tx_encap(struct bce_softc *sc, struct mbuf **m_head)
5203 {
5204 	bus_dma_segment_t segs[BCE_MAX_SEGMENTS];
5205 	bus_dmamap_t map;
5206 	struct tx_bd *txbd = NULL;
5207 	struct mbuf *m0;
5208 	struct ether_vlan_header *eh;
5209 	struct ip *ip;
5210 	struct tcphdr *th;
5211 	u16 prod, chain_prod, etype, mss = 0, vlan_tag = 0, flags = 0;
5212 	u32 prod_bseq;
5213 	int hdr_len = 0, e_hlen = 0, ip_hlen = 0, tcp_hlen = 0, ip_len = 0;
5214 
5215 
5216 #ifdef BCE_DEBUG
5217 	u16 debug_prod;
5218 #endif
5219 	int i, error, nsegs, rc = 0;
5220 
5221 	/* Transfer any checksum offload flags to the bd. */
5222 	m0 = *m_head;
5223 	if (m0->m_pkthdr.csum_flags) {
5224 		if (m0->m_pkthdr.csum_flags & CSUM_IP)
5225 			flags |= TX_BD_FLAGS_IP_CKSUM;
5226 		if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
5227 			flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5228 		if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5229 			/* For TSO the controller needs two pieces of info, */
5230 			/* the MSS and the IP+TCP options length.           */
5231 			mss = htole16(m0->m_pkthdr.tso_segsz);
5232 
5233 			/* Map the header and find the Ethernet type & header length */
5234 			eh = mtod(m0, struct ether_vlan_header *);
5235 			if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
5236 				etype = ntohs(eh->evl_proto);
5237 				e_hlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
5238 			} else {
5239 				etype = ntohs(eh->evl_encap_proto);
5240 				e_hlen = ETHER_HDR_LEN;
5241 			}
5242 
5243 			/* Check for supported TSO Ethernet types (only IPv4 for now) */
5244 			switch (etype) {
5245 				case ETHERTYPE_IP:
5246 					ip = (struct ip *)(m0->m_data + e_hlen);
5247 
5248 					/* TSO only supported for TCP protocol */
5249 					if (ip->ip_p != IPPROTO_TCP) {
5250 						BCE_PRINTF("%s(%d): TSO enabled for non-TCP frame!.\n",
5251 							__FILE__, __LINE__);
5252 						goto bce_tx_encap_skip_tso;
5253 					}
5254 
5255 					/* Get IP header length in bytes (min 20) */
5256 					ip_hlen = ip->ip_hl << 2;
5257 
5258 					/* Get the TCP header length in bytes (min 20) */
5259 					th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
5260 					tcp_hlen = (th->th_off << 2);
5261 
5262 					/* IP header length and checksum will be calc'd by hardware */
5263 					ip_len = ip->ip_len;
5264 					ip->ip_len = 0;
5265 					ip->ip_sum = 0;
5266 					break;
5267 				case ETHERTYPE_IPV6:
5268 					BCE_PRINTF("%s(%d): TSO over IPv6 not supported!.\n",
5269 						__FILE__, __LINE__);
5270 					goto bce_tx_encap_skip_tso;
5271 				default:
5272 					BCE_PRINTF("%s(%d): TSO enabled for unsupported protocol!.\n",
5273 						__FILE__, __LINE__);
5274 					goto bce_tx_encap_skip_tso;
5275 			}
5276 
5277 			hdr_len = e_hlen + ip_hlen + tcp_hlen;
5278 
5279 			DBPRINT(sc, BCE_EXCESSIVE_SEND,
5280 				"%s(): hdr_len = %d, e_hlen = %d, ip_hlen = %d, tcp_hlen = %d, ip_len = %d\n",
5281 				 __FUNCTION__, hdr_len, e_hlen, ip_hlen, tcp_hlen, ip_len);
5282 
5283 			/* Set the LSO flag in the TX BD */
5284 			flags |= TX_BD_FLAGS_SW_LSO;
5285 			/* Set the length of IP + TCP options (in 32 bit words) */
5286 			flags |= (((ip_hlen + tcp_hlen - 40) >> 2) << 8);
5287 
5288 bce_tx_encap_skip_tso:
5289 			DBRUN(sc->requested_tso_frames++);
5290 		}
5291 	}
5292 
5293 	/* Transfer any VLAN tags to the bd. */
5294 	if (m0->m_flags & M_VLANTAG) {
5295 		flags |= TX_BD_FLAGS_VLAN_TAG;
5296 		vlan_tag = m0->m_pkthdr.ether_vtag;
5297 	}
5298 
5299 	/* Map the mbuf into DMAable memory. */
5300 	prod = sc->tx_prod;
5301 	chain_prod = TX_CHAIN_IDX(prod);
5302 	map = sc->tx_mbuf_map[chain_prod];
5303 
5304 	/* Map the mbuf into our DMA address space. */
5305 	error = bus_dmamap_load_mbuf_sg(sc->tx_mbuf_tag, map, m0,
5306 	    segs, &nsegs, BUS_DMA_NOWAIT);
5307 
5308 	/* Check if the DMA mapping was successful */
5309 	if (error == EFBIG) {
5310 
5311 		/* The mbuf is too fragmented for our DMA mapping. */
5312    		DBPRINT(sc, BCE_WARN, "%s(): fragmented mbuf (%d pieces)\n",
5313 			__FUNCTION__, nsegs);
5314 		DBRUN(bce_dump_mbuf(sc, m0););
5315 
5316 		/* Try to defrag the mbuf. */
5317 		m0 = m_defrag(*m_head, M_DONTWAIT);
5318 		if (m0 == NULL) {
5319 			/* Defrag was unsuccessful */
5320 			m_freem(*m_head);
5321 			*m_head = NULL;
5322 			sc->mbuf_alloc_failed++;
5323 			return (ENOBUFS);
5324 		}
5325 
5326 		/* Defrag was successful, try mapping again */
5327 		*m_head = m0;
5328 		error = bus_dmamap_load_mbuf_sg(sc->tx_mbuf_tag, map, m0,
5329 		    segs, &nsegs, BUS_DMA_NOWAIT);
5330 
5331 		/* Still getting an error after a defrag. */
5332 		if (error == ENOMEM) {
5333 			/* Insufficient DMA buffers available. */
5334 			sc->tx_dma_map_failures++;
5335 			return (error);
5336 		} else if (error != 0) {
5337 			/* Still can't map the mbuf, release it and return an error. */
5338 			BCE_PRINTF(
5339 			    "%s(%d): Unknown error mapping mbuf into TX chain!\n",
5340 			    __FILE__, __LINE__);
5341 			m_freem(m0);
5342 			*m_head = NULL;
5343 			sc->tx_dma_map_failures++;
5344 			return (ENOBUFS);
5345 		}
5346 	} else if (error == ENOMEM) {
5347 		/* Insufficient DMA buffers available. */
5348 		sc->tx_dma_map_failures++;
5349 		return (error);
5350 	} else if (error != 0) {
5351 		m_freem(m0);
5352 		*m_head = NULL;
5353 		sc->tx_dma_map_failures++;
5354 		return (error);
5355 	}
5356 
5357 	/* Make sure there's room in the chain */
5358 	if (nsegs > (sc->max_tx_bd - sc->used_tx_bd)) {
5359 		bus_dmamap_unload(sc->tx_mbuf_tag, map);
5360 		return (ENOBUFS);
5361 	}
5362 
5363 	/* prod points to an empty tx_bd at this point. */
5364 	prod_bseq  = sc->tx_prod_bseq;
5365 
5366 #ifdef BCE_DEBUG
5367 	debug_prod = chain_prod;
5368 #endif
5369 
5370 	DBPRINT(sc, BCE_INFO_SEND,
5371 		"%s(start): prod = 0x%04X, chain_prod = 0x%04X, "
5372 		"prod_bseq = 0x%08X\n",
5373 		__FUNCTION__, prod, chain_prod, prod_bseq);
5374 
5375 	/*
5376 	 * Cycle through each mbuf segment that makes up
5377 	 * the outgoing frame, gathering the mapping info
5378 	 * for that segment and creating a tx_bd for
5379 	 * the mbuf.
5380 	 */
5381 	for (i = 0; i < nsegs ; i++) {
5382 
5383 		chain_prod = TX_CHAIN_IDX(prod);
5384 		txbd= &sc->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
5385 
5386 		txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[i].ds_addr));
5387 		txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[i].ds_addr));
5388 		txbd->tx_bd_mss_nbytes = htole32(mss << 16) | htole16(segs[i].ds_len);
5389 		txbd->tx_bd_vlan_tag = htole16(vlan_tag);
5390 		txbd->tx_bd_flags = htole16(flags);
5391 		prod_bseq += segs[i].ds_len;
5392 		if (i == 0)
5393 			txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START);
5394 		prod = NEXT_TX_BD(prod);
5395 	}
5396 
5397 	/* Set the END flag on the last TX buffer descriptor. */
5398 	txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END);
5399 
5400 	DBRUNMSG(BCE_EXCESSIVE_SEND, bce_dump_tx_chain(sc, debug_prod, nsegs));
5401 
5402 	DBPRINT(sc, BCE_INFO_SEND,
5403 		"%s( end ): prod = 0x%04X, chain_prod = 0x%04X, "
5404 		"prod_bseq = 0x%08X\n",
5405 		__FUNCTION__, prod, chain_prod, prod_bseq);
5406 
5407 	/*
5408 	 * Ensure that the mbuf pointer for this transmission
5409 	 * is placed at the array index of the last
5410 	 * descriptor in this chain.  This is done
5411 	 * because a single map is used for all
5412 	 * segments of the mbuf and we don't want to
5413 	 * unload the map before all of the segments
5414 	 * have been freed.
5415 	 */
5416 	sc->tx_mbuf_ptr[chain_prod] = m0;
5417 	sc->used_tx_bd += nsegs;
5418 
5419 	/* Update some debug statistic counters */
5420 	DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark),
5421 		sc->tx_hi_watermark = sc->used_tx_bd);
5422 	DBRUNIF((sc->used_tx_bd == sc->max_tx_bd), sc->tx_full_count++);
5423 	DBRUNIF(sc->debug_tx_mbuf_alloc++);
5424 
5425 	DBRUNMSG(BCE_EXCESSIVE_SEND, bce_dump_tx_mbuf_chain(sc, chain_prod, 1));
5426 
5427 	/* prod points to the next free tx_bd at this point. */
5428 	sc->tx_prod = prod;
5429 	sc->tx_prod_bseq = prod_bseq;
5430 
5431 	return(rc);
5432 }
5433 
5434 
5435 /****************************************************************************/
5436 /* Main transmit routine when called from another routine with a lock.      */
5437 /*                                                                          */
5438 /* Returns:                                                                 */
5439 /*   Nothing.                                                               */
5440 /****************************************************************************/
5441 static void
5442 bce_start_locked(struct ifnet *ifp)
5443 {
5444 	struct bce_softc *sc = ifp->if_softc;
5445 	struct mbuf *m_head = NULL;
5446 	int count = 0;
5447 	u16 tx_prod, tx_chain_prod;
5448 
5449 	/* prod points to the next free tx_bd. */
5450 	tx_prod = sc->tx_prod;
5451 	tx_chain_prod = TX_CHAIN_IDX(tx_prod);
5452 
5453 	DBPRINT(sc, BCE_INFO_SEND,
5454 		"%s(enter): tx_prod = 0x%04X, tx_chain_prod = 0x%04X, "
5455 		"tx_prod_bseq = 0x%08X\n",
5456 		__FUNCTION__, tx_prod, tx_chain_prod, sc->tx_prod_bseq);
5457 
5458 	/* If there's no link or the transmit queue is empty then just exit. */
5459 	if (!sc->bce_link) {
5460 		DBPRINT(sc, BCE_INFO_SEND, "%s(): No link.\n",
5461 			__FUNCTION__);
5462 		goto bce_start_locked_exit;
5463 	}
5464 
5465 	if (IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
5466 		DBPRINT(sc, BCE_INFO_SEND, "%s(): Transmit queue empty.\n",
5467 			__FUNCTION__);
5468 		goto bce_start_locked_exit;
5469 	}
5470 
5471 	/*
5472 	 * Keep adding entries while there is space in the ring.
5473 	 */
5474 	while (sc->used_tx_bd < sc->max_tx_bd) {
5475 
5476 		/* Check for any frames to send. */
5477 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
5478 		if (m_head == NULL)
5479 			break;
5480 
5481 		/*
5482 		 * Pack the data into the transmit ring. If we
5483 		 * don't have room, place the mbuf back at the
5484 		 * head of the queue and set the OACTIVE flag
5485 		 * to wait for the NIC to drain the chain.
5486 		 */
5487 		if (bce_tx_encap(sc, &m_head)) {
5488 			if (m_head != NULL)
5489 				IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
5490 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
5491 			DBPRINT(sc, BCE_INFO_SEND,
5492 				"TX chain is closed for business! Total tx_bd used = %d\n",
5493 				sc->used_tx_bd);
5494 			break;
5495 		}
5496 
5497 		count++;
5498 
5499 		/* Send a copy of the frame to any BPF listeners. */
5500 		ETHER_BPF_MTAP(ifp, m_head);
5501 	}
5502 
5503 	if (count == 0) {
5504 		/* no packets were dequeued */
5505 		DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): No packets were dequeued\n",
5506 			__FUNCTION__);
5507 		goto bce_start_locked_exit;
5508 	}
5509 
5510 	/* Update the driver's counters. */
5511 	tx_chain_prod = TX_CHAIN_IDX(sc->tx_prod);
5512 
5513 	/* Start the transmit. */
5514 	REG_WR16(sc, MB_TX_CID_ADDR + BCE_L2CTX_TX_HOST_BIDX, sc->tx_prod);
5515 	REG_WR(sc, MB_TX_CID_ADDR + BCE_L2CTX_TX_HOST_BSEQ, sc->tx_prod_bseq);
5516 
5517 	/* Set the tx timeout. */
5518 	sc->watchdog_timer = BCE_TX_TIMEOUT;
5519 
5520 bce_start_locked_exit:
5521 	DBPRINT(sc, BCE_INFO_SEND,
5522 		"%s(exit ): tx_prod = 0x%04X, tx_chain_prod = 0x%04X, "
5523 		"tx_prod_bseq = 0x%08X\n",
5524 		__FUNCTION__, tx_prod, tx_chain_prod, sc->tx_prod_bseq);
5525 
5526 	return;
5527 }
5528 
5529 
5530 /****************************************************************************/
5531 /* Main transmit routine when called from another routine without a lock.   */
5532 /*                                                                          */
5533 /* Returns:                                                                 */
5534 /*   Nothing.                                                               */
5535 /****************************************************************************/
5536 static void
5537 bce_start(struct ifnet *ifp)
5538 {
5539 	struct bce_softc *sc = ifp->if_softc;
5540 
5541 	BCE_LOCK(sc);
5542 	bce_start_locked(ifp);
5543 	BCE_UNLOCK(sc);
5544 }
5545 
5546 
5547 /****************************************************************************/
5548 /* Handles any IOCTL calls from the operating system.                       */
5549 /*                                                                          */
5550 /* Returns:                                                                 */
5551 /*   0 for success, positive value for failure.                             */
5552 /****************************************************************************/
5553 static int
5554 bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
5555 {
5556 	struct bce_softc *sc = ifp->if_softc;
5557 	struct ifreq *ifr = (struct ifreq *) data;
5558 	struct mii_data *mii;
5559 	int mask, error = 0;
5560 
5561 	switch(command) {
5562 
5563 		/* Set the interface MTU. */
5564 		case SIOCSIFMTU:
5565 			/* Check that the MTU setting is supported. */
5566 			if ((ifr->ifr_mtu < BCE_MIN_MTU) ||
5567 				(ifr->ifr_mtu > BCE_MAX_JUMBO_MTU)) {
5568 				error = EINVAL;
5569 				break;
5570 			}
5571 
5572 			DBPRINT(sc, BCE_INFO_MISC,
5573 				"SIOCSIFMTU: Changing MTU from %d to %d\n",
5574 				(int) ifp->if_mtu, (int) ifr->ifr_mtu);
5575 
5576 			BCE_LOCK(sc);
5577 			ifp->if_mtu = ifr->ifr_mtu;
5578 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5579 			bce_init_locked(sc);
5580 			BCE_UNLOCK(sc);
5581 			break;
5582 
5583 		/* Set interface flags. */
5584 		case SIOCSIFFLAGS:
5585 			DBPRINT(sc, BCE_VERBOSE_SPECIAL, "Received SIOCSIFFLAGS\n");
5586 
5587 			BCE_LOCK(sc);
5588 
5589 			/* Check if the interface is up. */
5590 			if (ifp->if_flags & IFF_UP) {
5591 				if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5592 					/* Change promiscuous/multicast flags as necessary. */
5593 					bce_set_rx_mode(sc);
5594 				} else {
5595 					/* Start the HW */
5596 					bce_init_locked(sc);
5597 				}
5598 			} else {
5599 				/* The interface is down, check if driver is running. */
5600 				if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5601 					bce_stop(sc);
5602 
5603 					/* If MFW is running, restart the controller a bit. */
5604 					if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
5605 						bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
5606 						bce_chipinit(sc);
5607 						bce_mgmt_init_locked(sc);
5608 					}
5609 				}
5610 			}
5611 
5612 			BCE_UNLOCK(sc);
5613 			error = 0;
5614 
5615 			break;
5616 
5617 		/* Add/Delete multicast address */
5618 		case SIOCADDMULTI:
5619 		case SIOCDELMULTI:
5620 			DBPRINT(sc, BCE_VERBOSE_MISC, "Received SIOCADDMULTI/SIOCDELMULTI\n");
5621 
5622 			BCE_LOCK(sc);
5623 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5624 				bce_set_rx_mode(sc);
5625 				error = 0;
5626 			}
5627 			BCE_UNLOCK(sc);
5628 
5629 			break;
5630 
5631 		/* Set/Get Interface media */
5632 		case SIOCSIFMEDIA:
5633 		case SIOCGIFMEDIA:
5634 			DBPRINT(sc, BCE_VERBOSE_MISC, "Received SIOCSIFMEDIA/SIOCGIFMEDIA\n");
5635 
5636 			mii = device_get_softc(sc->bce_miibus);
5637 			error = ifmedia_ioctl(ifp, ifr,
5638 			    &mii->mii_media, command);
5639 			break;
5640 
5641 		/* Set interface capability */
5642 		case SIOCSIFCAP:
5643 			mask = ifr->ifr_reqcap ^ ifp->if_capenable;
5644 			DBPRINT(sc, BCE_INFO_MISC, "Received SIOCSIFCAP = 0x%08X\n", (u32) mask);
5645 
5646 			/* Toggle the TX checksum capabilites enable flag. */
5647 			if (mask & IFCAP_TXCSUM) {
5648 				ifp->if_capenable ^= IFCAP_TXCSUM;
5649 				if (IFCAP_TXCSUM & ifp->if_capenable)
5650 					ifp->if_hwassist = BCE_IF_HWASSIST;
5651 				else
5652 					ifp->if_hwassist = 0;
5653 			}
5654 
5655 			/* Toggle the RX checksum capabilities enable flag. */
5656 			if (mask & IFCAP_RXCSUM) {
5657 				ifp->if_capenable ^= IFCAP_RXCSUM;
5658 				if (IFCAP_RXCSUM & ifp->if_capenable)
5659 					ifp->if_hwassist = BCE_IF_HWASSIST;
5660 				else
5661 					ifp->if_hwassist = 0;
5662 			}
5663 
5664 			/* Toggle the TSO capabilities enable flag. */
5665 			if (bce_tso_enable && (mask & IFCAP_TSO4)) {
5666 				ifp->if_capenable ^= IFCAP_TSO4;
5667 				if (IFCAP_RXCSUM & ifp->if_capenable)
5668 					ifp->if_hwassist = BCE_IF_HWASSIST;
5669 				else
5670 					ifp->if_hwassist = 0;
5671 			}
5672 
5673 			/* Toggle VLAN_MTU capabilities enable flag. */
5674 			if (mask & IFCAP_VLAN_MTU) {
5675 				BCE_PRINTF("%s(%d): Changing VLAN_MTU not supported.\n",
5676 					__FILE__, __LINE__);
5677 			}
5678 
5679 			/* Toggle VLANHWTAG capabilities enabled flag. */
5680 			if (mask & IFCAP_VLAN_HWTAGGING) {
5681 				if (sc->bce_flags & BCE_MFW_ENABLE_FLAG)
5682 					BCE_PRINTF("%s(%d): Cannot change VLAN_HWTAGGING while "
5683 						"management firmware (ASF/IPMI/UMP) is running!\n",
5684 						__FILE__, __LINE__);
5685 				else
5686 					BCE_PRINTF("%s(%d): Changing VLAN_HWTAGGING not supported!\n",
5687 						__FILE__, __LINE__);
5688 			}
5689 
5690 			break;
5691 		default:
5692 			/* We don't know how to handle the IOCTL, pass it on. */
5693 			error = ether_ioctl(ifp, command, data);
5694 			break;
5695 	}
5696 
5697 	return(error);
5698 }
5699 
5700 
5701 /****************************************************************************/
5702 /* Transmit timeout handler.                                                */
5703 /*                                                                          */
5704 /* Returns:                                                                 */
5705 /*   Nothing.                                                               */
5706 /****************************************************************************/
5707 static void
5708 bce_watchdog(struct bce_softc *sc)
5709 {
5710 
5711 	BCE_LOCK_ASSERT(sc);
5712 
5713 	if (sc->watchdog_timer == 0 || --sc->watchdog_timer)
5714 		return;
5715 
5716 	/*
5717 	 * If we are in this routine because of pause frames, then
5718 	 * don't reset the hardware.
5719 	 */
5720 	if (REG_RD(sc, BCE_EMAC_TX_STATUS) & BCE_EMAC_TX_STATUS_XOFFED)
5721 		return;
5722 
5723 	BCE_PRINTF("%s(%d): Watchdog timeout occurred, resetting!\n",
5724 		__FILE__, __LINE__);
5725 
5726 	DBRUNMSG(BCE_VERBOSE_SEND,
5727 		bce_dump_driver_state(sc);
5728 		bce_dump_status_block(sc));
5729 
5730 	/* DBRUN(BCE_FATAL, bce_breakpoint(sc)); */
5731 
5732 	sc->bce_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5733 
5734 	bce_init_locked(sc);
5735 	sc->bce_ifp->if_oerrors++;
5736 
5737 }
5738 
5739 
5740 /*
5741  * Interrupt handler.
5742  */
5743 /****************************************************************************/
5744 /* Main interrupt entry point.  Verifies that the controller generated the  */
5745 /* interrupt and then calls a separate routine for handle the various       */
5746 /* interrupt causes (PHY, TX, RX).                                          */
5747 /*                                                                          */
5748 /* Returns:                                                                 */
5749 /*   0 for success, positive value for failure.                             */
5750 /****************************************************************************/
5751 static void
5752 bce_intr(void *xsc)
5753 {
5754 	struct bce_softc *sc;
5755 	struct ifnet *ifp;
5756 	u32 status_attn_bits;
5757 	u16 hw_rx_cons, hw_tx_cons;
5758 
5759 	sc = xsc;
5760 	ifp = sc->bce_ifp;
5761 
5762 	DBPRINT(sc, BCE_EXCESSIVE, "Entering %s()\n", __FUNCTION__);
5763 	BCE_LOCK(sc);
5764 
5765 	DBRUN(sc->interrupts_generated++);
5766 
5767 	bus_dmamap_sync(sc->status_tag, sc->status_map,
5768 	    BUS_DMASYNC_POSTWRITE);
5769 
5770 	/*
5771 	 * If the hardware status block index
5772 	 * matches the last value read by the
5773 	 * driver and we haven't asserted our
5774 	 * interrupt then there's nothing to do.
5775 	 */
5776 	if ((sc->status_block->status_idx == sc->last_status_idx) &&
5777 		(REG_RD(sc, BCE_PCICFG_MISC_STATUS) & BCE_PCICFG_MISC_STATUS_INTA_VALUE))
5778 		goto bce_intr_exit;
5779 
5780 	/* Ack the interrupt and stop others from occuring. */
5781 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5782 		BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
5783 		BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5784 
5785 	/* Check if the hardware has finished any work. */
5786 	hw_rx_cons = bce_get_hw_rx_cons(sc);
5787 	hw_tx_cons = bce_get_hw_tx_cons(sc);
5788 
5789 	/* Keep processing data as long as there is work to do. */
5790 	for (;;) {
5791 
5792 		status_attn_bits = sc->status_block->status_attn_bits;
5793 
5794 		DBRUNIF(DB_RANDOMTRUE(bce_debug_unexpected_attention),
5795 			BCE_PRINTF("Simulating unexpected status attention bit set.");
5796 			status_attn_bits = status_attn_bits | STATUS_ATTN_BITS_PARITY_ERROR);
5797 
5798 		/* Was it a link change interrupt? */
5799 		if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5800 			(sc->status_block->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE))
5801 			bce_phy_intr(sc);
5802 
5803 		/* If any other attention is asserted then the chip is toast. */
5804 		if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
5805 			(sc->status_block->status_attn_bits_ack &
5806 			~STATUS_ATTN_BITS_LINK_STATE))) {
5807 
5808 			DBRUN(sc->unexpected_attentions++);
5809 
5810 			BCE_PRINTF("%s(%d): Fatal attention detected: 0x%08X\n",
5811 				__FILE__, __LINE__, sc->status_block->status_attn_bits);
5812 
5813 			DBRUNMSG(BCE_FATAL,
5814 				if (bce_debug_unexpected_attention == 0)
5815 					bce_breakpoint(sc));
5816 
5817 			bce_init_locked(sc);
5818 			goto bce_intr_exit;
5819 		}
5820 
5821 		/* Check for any completed RX frames. */
5822 		if (hw_rx_cons != sc->hw_rx_cons)
5823 			bce_rx_intr(sc);
5824 
5825 		/* Check for any completed TX frames. */
5826 		if (hw_tx_cons != sc->hw_tx_cons)
5827 			bce_tx_intr(sc);
5828 
5829 		/* Save the status block index value for use during the next interrupt. */
5830 		sc->last_status_idx = sc->status_block->status_idx;
5831 
5832 		/* Prevent speculative reads from getting ahead of the status block. */
5833 		bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
5834 			BUS_SPACE_BARRIER_READ);
5835 
5836 		/* If there's no work left then exit the interrupt service routine. */
5837 		hw_rx_cons = bce_get_hw_rx_cons(sc);
5838 		hw_tx_cons = bce_get_hw_tx_cons(sc);
5839 
5840 		if ((hw_rx_cons == sc->hw_rx_cons) && (hw_tx_cons == sc->hw_tx_cons))
5841 			break;
5842 
5843 	}
5844 
5845 	bus_dmamap_sync(sc->status_tag,	sc->status_map,
5846 	    BUS_DMASYNC_PREWRITE);
5847 
5848 	/* Re-enable interrupts. */
5849 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5850 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx |
5851 	       BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5852 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5853 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
5854 
5855 	/* Handle any frames that arrived while handling the interrupt. */
5856 	if (ifp->if_drv_flags & IFF_DRV_RUNNING && !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
5857 		bce_start_locked(ifp);
5858 
5859 bce_intr_exit:
5860 	BCE_UNLOCK(sc);
5861 }
5862 
5863 
5864 /****************************************************************************/
5865 /* Programs the various packet receive modes (broadcast and multicast).     */
5866 /*                                                                          */
5867 /* Returns:                                                                 */
5868 /*   Nothing.                                                               */
5869 /****************************************************************************/
5870 static void
5871 bce_set_rx_mode(struct bce_softc *sc)
5872 {
5873 	struct ifnet *ifp;
5874 	struct ifmultiaddr *ifma;
5875 	u32 hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 };
5876 	u32 rx_mode, sort_mode;
5877 	int h, i;
5878 
5879 	BCE_LOCK_ASSERT(sc);
5880 
5881 	ifp = sc->bce_ifp;
5882 
5883 	/* Initialize receive mode default settings. */
5884 	rx_mode   = sc->rx_mode & ~(BCE_EMAC_RX_MODE_PROMISCUOUS |
5885 			    BCE_EMAC_RX_MODE_KEEP_VLAN_TAG);
5886 	sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN;
5887 
5888 	/*
5889 	 * ASF/IPMI/UMP firmware requires that VLAN tag stripping
5890 	 * be enbled.
5891 	 */
5892 	if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) &&
5893 		(!(sc->bce_flags & BCE_MFW_ENABLE_FLAG)))
5894 		rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG;
5895 
5896 	/*
5897 	 * Check for promiscuous, all multicast, or selected
5898 	 * multicast address filtering.
5899 	 */
5900 	if (ifp->if_flags & IFF_PROMISC) {
5901 		DBPRINT(sc, BCE_INFO_MISC, "Enabling promiscuous mode.\n");
5902 
5903 		/* Enable promiscuous mode. */
5904 		rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS;
5905 		sort_mode |= BCE_RPM_SORT_USER0_PROM_EN;
5906 	} else if (ifp->if_flags & IFF_ALLMULTI) {
5907 		DBPRINT(sc, BCE_INFO_MISC, "Enabling all multicast mode.\n");
5908 
5909 		/* Enable all multicast addresses. */
5910 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
5911 			REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), 0xffffffff);
5912        	}
5913 		sort_mode |= BCE_RPM_SORT_USER0_MC_EN;
5914 	} else {
5915 		/* Accept one or more multicast(s). */
5916 		DBPRINT(sc, BCE_INFO_MISC, "Enabling selective multicast mode.\n");
5917 
5918 		IF_ADDR_LOCK(ifp);
5919 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
5920 			if (ifma->ifma_addr->sa_family != AF_LINK)
5921 				continue;
5922 			h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
5923 			    ifma->ifma_addr), ETHER_ADDR_LEN) & 0xFF;
5924 			    hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F);
5925 		}
5926 		IF_ADDR_UNLOCK(ifp);
5927 
5928 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++)
5929 			REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), hashes[i]);
5930 
5931 		sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN;
5932 	}
5933 
5934 	/* Only make changes if the recive mode has actually changed. */
5935 	if (rx_mode != sc->rx_mode) {
5936 		DBPRINT(sc, BCE_VERBOSE_MISC, "Enabling new receive mode: 0x%08X\n",
5937 			rx_mode);
5938 
5939 		sc->rx_mode = rx_mode;
5940 		REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode);
5941 	}
5942 
5943 	/* Disable and clear the exisitng sort before enabling a new sort. */
5944 	REG_WR(sc, BCE_RPM_SORT_USER0, 0x0);
5945 	REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode);
5946 	REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA);
5947 }
5948 
5949 
5950 /****************************************************************************/
5951 /* Called periodically to updates statistics from the controllers           */
5952 /* statistics block.                                                        */
5953 /*                                                                          */
5954 /* Returns:                                                                 */
5955 /*   Nothing.                                                               */
5956 /****************************************************************************/
5957 static void
5958 bce_stats_update(struct bce_softc *sc)
5959 {
5960 	struct ifnet *ifp;
5961 	struct statistics_block *stats;
5962 
5963 	DBPRINT(sc, BCE_EXCESSIVE, "Entering %s()\n", __FUNCTION__);
5964 
5965 	ifp = sc->bce_ifp;
5966 
5967 	stats = (struct statistics_block *) sc->stats_block;
5968 
5969 	/*
5970 	 * Update the interface statistics from the
5971 	 * hardware statistics.
5972 	 */
5973 	ifp->if_collisions = (u_long) stats->stat_EtherStatsCollisions;
5974 
5975 	ifp->if_ierrors = (u_long) stats->stat_EtherStatsUndersizePkts +
5976 				      (u_long) stats->stat_EtherStatsOverrsizePkts +
5977 					  (u_long) stats->stat_IfInMBUFDiscards +
5978 					  (u_long) stats->stat_Dot3StatsAlignmentErrors +
5979 					  (u_long) stats->stat_Dot3StatsFCSErrors;
5980 
5981 	ifp->if_oerrors = (u_long) stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
5982 					  (u_long) stats->stat_Dot3StatsExcessiveCollisions +
5983 					  (u_long) stats->stat_Dot3StatsLateCollisions;
5984 
5985 	/*
5986 	 * Certain controllers don't report
5987 	 * carrier sense errors correctly.
5988 	 * See errata E11_5708CA0_1165.
5989 	 */
5990 	if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
5991 	    !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0))
5992 		ifp->if_oerrors += (u_long) stats->stat_Dot3StatsCarrierSenseErrors;
5993 
5994 	/*
5995 	 * Update the sysctl statistics from the
5996 	 * hardware statistics.
5997 	 */
5998 	sc->stat_IfHCInOctets =
5999 		((u64) stats->stat_IfHCInOctets_hi << 32) +
6000 		 (u64) stats->stat_IfHCInOctets_lo;
6001 
6002 	sc->stat_IfHCInBadOctets =
6003 		((u64) stats->stat_IfHCInBadOctets_hi << 32) +
6004 		 (u64) stats->stat_IfHCInBadOctets_lo;
6005 
6006 	sc->stat_IfHCOutOctets =
6007 		((u64) stats->stat_IfHCOutOctets_hi << 32) +
6008 		 (u64) stats->stat_IfHCOutOctets_lo;
6009 
6010 	sc->stat_IfHCOutBadOctets =
6011 		((u64) stats->stat_IfHCOutBadOctets_hi << 32) +
6012 		 (u64) stats->stat_IfHCOutBadOctets_lo;
6013 
6014 	sc->stat_IfHCInUcastPkts =
6015 		((u64) stats->stat_IfHCInUcastPkts_hi << 32) +
6016 		 (u64) stats->stat_IfHCInUcastPkts_lo;
6017 
6018 	sc->stat_IfHCInMulticastPkts =
6019 		((u64) stats->stat_IfHCInMulticastPkts_hi << 32) +
6020 		 (u64) stats->stat_IfHCInMulticastPkts_lo;
6021 
6022 	sc->stat_IfHCInBroadcastPkts =
6023 		((u64) stats->stat_IfHCInBroadcastPkts_hi << 32) +
6024 		 (u64) stats->stat_IfHCInBroadcastPkts_lo;
6025 
6026 	sc->stat_IfHCOutUcastPkts =
6027 		((u64) stats->stat_IfHCOutUcastPkts_hi << 32) +
6028 		 (u64) stats->stat_IfHCOutUcastPkts_lo;
6029 
6030 	sc->stat_IfHCOutMulticastPkts =
6031 		((u64) stats->stat_IfHCOutMulticastPkts_hi << 32) +
6032 		 (u64) stats->stat_IfHCOutMulticastPkts_lo;
6033 
6034 	sc->stat_IfHCOutBroadcastPkts =
6035 		((u64) stats->stat_IfHCOutBroadcastPkts_hi << 32) +
6036 		 (u64) stats->stat_IfHCOutBroadcastPkts_lo;
6037 
6038 	sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
6039 		stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
6040 
6041 	sc->stat_Dot3StatsCarrierSenseErrors =
6042 		stats->stat_Dot3StatsCarrierSenseErrors;
6043 
6044 	sc->stat_Dot3StatsFCSErrors =
6045 		stats->stat_Dot3StatsFCSErrors;
6046 
6047 	sc->stat_Dot3StatsAlignmentErrors =
6048 		stats->stat_Dot3StatsAlignmentErrors;
6049 
6050 	sc->stat_Dot3StatsSingleCollisionFrames =
6051 		stats->stat_Dot3StatsSingleCollisionFrames;
6052 
6053 	sc->stat_Dot3StatsMultipleCollisionFrames =
6054 		stats->stat_Dot3StatsMultipleCollisionFrames;
6055 
6056 	sc->stat_Dot3StatsDeferredTransmissions =
6057 		stats->stat_Dot3StatsDeferredTransmissions;
6058 
6059 	sc->stat_Dot3StatsExcessiveCollisions =
6060 		stats->stat_Dot3StatsExcessiveCollisions;
6061 
6062 	sc->stat_Dot3StatsLateCollisions =
6063 		stats->stat_Dot3StatsLateCollisions;
6064 
6065 	sc->stat_EtherStatsCollisions =
6066 		stats->stat_EtherStatsCollisions;
6067 
6068 	sc->stat_EtherStatsFragments =
6069 		stats->stat_EtherStatsFragments;
6070 
6071 	sc->stat_EtherStatsJabbers =
6072 		stats->stat_EtherStatsJabbers;
6073 
6074 	sc->stat_EtherStatsUndersizePkts =
6075 		stats->stat_EtherStatsUndersizePkts;
6076 
6077 	sc->stat_EtherStatsOverrsizePkts =
6078 		stats->stat_EtherStatsOverrsizePkts;
6079 
6080 	sc->stat_EtherStatsPktsRx64Octets =
6081 		stats->stat_EtherStatsPktsRx64Octets;
6082 
6083 	sc->stat_EtherStatsPktsRx65Octetsto127Octets =
6084 		stats->stat_EtherStatsPktsRx65Octetsto127Octets;
6085 
6086 	sc->stat_EtherStatsPktsRx128Octetsto255Octets =
6087 		stats->stat_EtherStatsPktsRx128Octetsto255Octets;
6088 
6089 	sc->stat_EtherStatsPktsRx256Octetsto511Octets =
6090 		stats->stat_EtherStatsPktsRx256Octetsto511Octets;
6091 
6092 	sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
6093 		stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
6094 
6095 	sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
6096 		stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
6097 
6098 	sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
6099 		stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
6100 
6101 	sc->stat_EtherStatsPktsTx64Octets =
6102 		stats->stat_EtherStatsPktsTx64Octets;
6103 
6104 	sc->stat_EtherStatsPktsTx65Octetsto127Octets =
6105 		stats->stat_EtherStatsPktsTx65Octetsto127Octets;
6106 
6107 	sc->stat_EtherStatsPktsTx128Octetsto255Octets =
6108 		stats->stat_EtherStatsPktsTx128Octetsto255Octets;
6109 
6110 	sc->stat_EtherStatsPktsTx256Octetsto511Octets =
6111 		stats->stat_EtherStatsPktsTx256Octetsto511Octets;
6112 
6113 	sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
6114 		stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
6115 
6116 	sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
6117 		stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
6118 
6119 	sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
6120 		stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
6121 
6122 	sc->stat_XonPauseFramesReceived =
6123 		stats->stat_XonPauseFramesReceived;
6124 
6125 	sc->stat_XoffPauseFramesReceived =
6126 		stats->stat_XoffPauseFramesReceived;
6127 
6128 	sc->stat_OutXonSent =
6129 		stats->stat_OutXonSent;
6130 
6131 	sc->stat_OutXoffSent =
6132 		stats->stat_OutXoffSent;
6133 
6134 	sc->stat_FlowControlDone =
6135 		stats->stat_FlowControlDone;
6136 
6137 	sc->stat_MacControlFramesReceived =
6138 		stats->stat_MacControlFramesReceived;
6139 
6140 	sc->stat_XoffStateEntered =
6141 		stats->stat_XoffStateEntered;
6142 
6143 	sc->stat_IfInFramesL2FilterDiscards =
6144 		stats->stat_IfInFramesL2FilterDiscards;
6145 
6146 	sc->stat_IfInRuleCheckerDiscards =
6147 		stats->stat_IfInRuleCheckerDiscards;
6148 
6149 	sc->stat_IfInFTQDiscards =
6150 		stats->stat_IfInFTQDiscards;
6151 
6152 	sc->stat_IfInMBUFDiscards =
6153 		stats->stat_IfInMBUFDiscards;
6154 
6155 	sc->stat_IfInRuleCheckerP4Hit =
6156 		stats->stat_IfInRuleCheckerP4Hit;
6157 
6158 	sc->stat_CatchupInRuleCheckerDiscards =
6159 		stats->stat_CatchupInRuleCheckerDiscards;
6160 
6161 	sc->stat_CatchupInFTQDiscards =
6162 		stats->stat_CatchupInFTQDiscards;
6163 
6164 	sc->stat_CatchupInMBUFDiscards =
6165 		stats->stat_CatchupInMBUFDiscards;
6166 
6167 	sc->stat_CatchupInRuleCheckerP4Hit =
6168 		stats->stat_CatchupInRuleCheckerP4Hit;
6169 
6170 	sc->com_no_buffers = REG_RD_IND(sc, 0x120084);
6171 
6172 	DBPRINT(sc, BCE_EXCESSIVE, "Exiting %s()\n", __FUNCTION__);
6173 }
6174 
6175 
6176 /****************************************************************************/
6177 /* Periodic function to notify the bootcode that the driver is still        */
6178 /* present.                                                                 */
6179 /*                                                                          */
6180 /* Returns:                                                                 */
6181 /*   Nothing.                                                               */
6182 /****************************************************************************/
6183 static void
6184 bce_pulse(void *xsc)
6185 {
6186 	struct bce_softc *sc = xsc;
6187 	u32 msg;
6188 
6189 	DBPRINT(sc, BCE_EXCESSIVE_MISC, "pulse\n");
6190 
6191 	BCE_LOCK_ASSERT(sc);
6192 
6193 	/* Tell the firmware that the driver is still running. */
6194 	msg = (u32) ++sc->bce_fw_drv_pulse_wr_seq;
6195 	REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_PULSE_MB, msg);
6196 
6197 	/* Schedule the next pulse. */
6198 	callout_reset(&sc->bce_pulse_callout, hz, bce_pulse, sc);
6199 
6200 	return;
6201 }
6202 
6203 
6204 /****************************************************************************/
6205 /* Periodic function to perform maintenance tasks.                          */
6206 /*                                                                          */
6207 /* Returns:                                                                 */
6208 /*   Nothing.                                                               */
6209 /****************************************************************************/
6210 static void
6211 bce_tick(void *xsc)
6212 {
6213 	struct bce_softc *sc = xsc;
6214 	struct mii_data *mii;
6215 	struct ifnet *ifp;
6216 
6217 	ifp = sc->bce_ifp;
6218 
6219 	BCE_LOCK_ASSERT(sc);
6220 
6221 	/* Schedule the next tick. */
6222 	callout_reset(&sc->bce_tick_callout, hz, bce_tick, sc);
6223 
6224 	/* Update the statistics from the hardware statistics block. */
6225 	bce_stats_update(sc);
6226 
6227 	/* Top off the receive and page chains. */
6228 	bce_fill_pg_chain(sc);
6229 	bce_fill_rx_chain(sc);
6230 
6231 	/* Check that chip hasn't hung. */
6232 	bce_watchdog(sc);
6233 
6234 	/* If link is up already up then we're done. */
6235 	if (sc->bce_link)
6236 		goto bce_tick_locked_exit;
6237 
6238 	mii = device_get_softc(sc->bce_miibus);
6239 	mii_tick(mii);
6240 
6241 	/* Check if the link has come up. */
6242 	if (!sc->bce_link && mii->mii_media_status & IFM_ACTIVE &&
6243 	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
6244 		sc->bce_link++;
6245 		if ((IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
6246 		    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) &&
6247 		    bootverbose)
6248 			BCE_PRINTF("Gigabit link up\n");
6249 
6250 		/* Now that link is up, handle any outstanding TX traffic. */
6251 		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
6252 			bce_start_locked(ifp);
6253 	}
6254 
6255 bce_tick_locked_exit:
6256 	return;
6257 }
6258 
6259 
6260 #ifdef BCE_DEBUG
6261 /****************************************************************************/
6262 /* Allows the driver state to be dumped through the sysctl interface.       */
6263 /*                                                                          */
6264 /* Returns:                                                                 */
6265 /*   0 for success, positive value for failure.                             */
6266 /****************************************************************************/
6267 static int
6268 bce_sysctl_driver_state(SYSCTL_HANDLER_ARGS)
6269 {
6270         int error;
6271         int result;
6272         struct bce_softc *sc;
6273 
6274         result = -1;
6275         error = sysctl_handle_int(oidp, &result, 0, req);
6276 
6277         if (error || !req->newptr)
6278                 return (error);
6279 
6280         if (result == 1) {
6281                 sc = (struct bce_softc *)arg1;
6282                 bce_dump_driver_state(sc);
6283         }
6284 
6285         return error;
6286 }
6287 
6288 
6289 /****************************************************************************/
6290 /* Allows the hardware state to be dumped through the sysctl interface.     */
6291 /*                                                                          */
6292 /* Returns:                                                                 */
6293 /*   0 for success, positive value for failure.                             */
6294 /****************************************************************************/
6295 static int
6296 bce_sysctl_hw_state(SYSCTL_HANDLER_ARGS)
6297 {
6298         int error;
6299         int result;
6300         struct bce_softc *sc;
6301 
6302         result = -1;
6303         error = sysctl_handle_int(oidp, &result, 0, req);
6304 
6305         if (error || !req->newptr)
6306                 return (error);
6307 
6308         if (result == 1) {
6309                 sc = (struct bce_softc *)arg1;
6310                 bce_dump_hw_state(sc);
6311         }
6312 
6313         return error;
6314 }
6315 
6316 
6317 /****************************************************************************/
6318 /* Allows the bootcode state to be dumped through the sysctl interface.     */
6319 /*                                                                          */
6320 /* Returns:                                                                 */
6321 /*   0 for success, positive value for failure.                             */
6322 /****************************************************************************/
6323 static int
6324 bce_sysctl_bc_state(SYSCTL_HANDLER_ARGS)
6325 {
6326         int error;
6327         int result;
6328         struct bce_softc *sc;
6329 
6330         result = -1;
6331         error = sysctl_handle_int(oidp, &result, 0, req);
6332 
6333         if (error || !req->newptr)
6334                 return (error);
6335 
6336         if (result == 1) {
6337                 sc = (struct bce_softc *)arg1;
6338                 bce_dump_bc_state(sc);
6339         }
6340 
6341         return error;
6342 }
6343 
6344 
6345 /****************************************************************************/
6346 /* Provides a sysctl interface to allow dumping the RX chain.               */
6347 /*                                                                          */
6348 /* Returns:                                                                 */
6349 /*   0 for success, positive value for failure.                             */
6350 /****************************************************************************/
6351 static int
6352 bce_sysctl_dump_rx_chain(SYSCTL_HANDLER_ARGS)
6353 {
6354         int error;
6355         int result;
6356         struct bce_softc *sc;
6357 
6358         result = -1;
6359         error = sysctl_handle_int(oidp, &result, 0, req);
6360 
6361         if (error || !req->newptr)
6362                 return (error);
6363 
6364         if (result == 1) {
6365                 sc = (struct bce_softc *)arg1;
6366                 bce_dump_rx_chain(sc, 0, TOTAL_RX_BD);
6367         }
6368 
6369         return error;
6370 }
6371 
6372 
6373 /****************************************************************************/
6374 /* Provides a sysctl interface to allow dumping the TX chain.               */
6375 /*                                                                          */
6376 /* Returns:                                                                 */
6377 /*   0 for success, positive value for failure.                             */
6378 /****************************************************************************/
6379 static int
6380 bce_sysctl_dump_tx_chain(SYSCTL_HANDLER_ARGS)
6381 {
6382         int error;
6383         int result;
6384         struct bce_softc *sc;
6385 
6386         result = -1;
6387         error = sysctl_handle_int(oidp, &result, 0, req);
6388 
6389         if (error || !req->newptr)
6390                 return (error);
6391 
6392         if (result == 1) {
6393                 sc = (struct bce_softc *)arg1;
6394                 bce_dump_tx_chain(sc, 0, USABLE_TX_BD);
6395         }
6396 
6397         return error;
6398 }
6399 
6400 
6401 /****************************************************************************/
6402 /* Provides a sysctl interface to allow dumping the page chain.             */
6403 /*                                                                          */
6404 /* Returns:                                                                 */
6405 /*   0 for success, positive value for failure.                             */
6406 /****************************************************************************/
6407 static int
6408 bce_sysctl_dump_pg_chain(SYSCTL_HANDLER_ARGS)
6409 {
6410         int error;
6411         int result;
6412         struct bce_softc *sc;
6413 
6414         result = -1;
6415         error = sysctl_handle_int(oidp, &result, 0, req);
6416 
6417         if (error || !req->newptr)
6418                 return (error);
6419 
6420         if (result == 1) {
6421                 sc = (struct bce_softc *)arg1;
6422                 bce_dump_pg_chain(sc, 0, TOTAL_PG_BD);
6423         }
6424 
6425         return error;
6426 }
6427 
6428 
6429 /****************************************************************************/
6430 /* Provides a sysctl interface to allow reading arbitrary registers in the  */
6431 /* device.  DO NOT ENABLE ON PRODUCTION SYSTEMS!                            */
6432 /*                                                                          */
6433 /* Returns:                                                                 */
6434 /*   0 for success, positive value for failure.                             */
6435 /****************************************************************************/
6436 static int
6437 bce_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
6438 {
6439 	struct bce_softc *sc;
6440 	int error;
6441 	u32 val, result;
6442 
6443 	result = -1;
6444 	error = sysctl_handle_int(oidp, &result, 0, req);
6445 	if (error || (req->newptr == NULL))
6446 		return (error);
6447 
6448 	/* Make sure the register is accessible. */
6449 	if (result < 0x8000) {
6450 		sc = (struct bce_softc *)arg1;
6451 		val = REG_RD(sc, result);
6452 		BCE_PRINTF("reg 0x%08X = 0x%08X\n", result, val);
6453 	} else if (result < 0x0280000) {
6454 		sc = (struct bce_softc *)arg1;
6455 		val = REG_RD_IND(sc, result);
6456 		BCE_PRINTF("reg 0x%08X = 0x%08X\n", result, val);
6457 	}
6458 
6459 	return (error);
6460 }
6461 
6462 
6463 /****************************************************************************/
6464 /* Provides a sysctl interface to allow reading arbitrary PHY registers in  */
6465 /* the device.  DO NOT ENABLE ON PRODUCTION SYSTEMS!                        */
6466 /*                                                                          */
6467 /* Returns:                                                                 */
6468 /*   0 for success, positive value for failure.                             */
6469 /****************************************************************************/
6470 static int
6471 bce_sysctl_phy_read(SYSCTL_HANDLER_ARGS)
6472 {
6473 	struct bce_softc *sc;
6474 	device_t dev;
6475 	int error, result;
6476 	u16 val;
6477 
6478 	result = -1;
6479 	error = sysctl_handle_int(oidp, &result, 0, req);
6480 	if (error || (req->newptr == NULL))
6481 		return (error);
6482 
6483 	/* Make sure the register is accessible. */
6484 	if (result < 0x20) {
6485 		sc = (struct bce_softc *)arg1;
6486 		dev = sc->bce_dev;
6487 		val = bce_miibus_read_reg(dev, sc->bce_phy_addr, result);
6488 		BCE_PRINTF("phy 0x%02X = 0x%04X\n", result, val);
6489 	}
6490 	return (error);
6491 }
6492 
6493 
6494 /****************************************************************************/
6495 /* Provides a sysctl interface to forcing the driver to dump state and      */
6496 /* enter the debugger.  DO NOT ENABLE ON PRODUCTION SYSTEMS!                */
6497 /*                                                                          */
6498 /* Returns:                                                                 */
6499 /*   0 for success, positive value for failure.                             */
6500 /****************************************************************************/
6501 static int
6502 bce_sysctl_breakpoint(SYSCTL_HANDLER_ARGS)
6503 {
6504         int error;
6505         int result;
6506         struct bce_softc *sc;
6507 
6508         result = -1;
6509         error = sysctl_handle_int(oidp, &result, 0, req);
6510 
6511         if (error || !req->newptr)
6512                 return (error);
6513 
6514         if (result == 1) {
6515                 sc = (struct bce_softc *)arg1;
6516                 bce_breakpoint(sc);
6517         }
6518 
6519         return error;
6520 }
6521 #endif
6522 
6523 
6524 /****************************************************************************/
6525 /* Adds any sysctl parameters for tuning or debugging purposes.             */
6526 /*                                                                          */
6527 /* Returns:                                                                 */
6528 /*   0 for success, positive value for failure.                             */
6529 /****************************************************************************/
6530 static void
6531 bce_add_sysctls(struct bce_softc *sc)
6532 {
6533 	struct sysctl_ctx_list *ctx;
6534 	struct sysctl_oid_list *children;
6535 
6536 	ctx = device_get_sysctl_ctx(sc->bce_dev);
6537 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bce_dev));
6538 
6539 #ifdef BCE_DEBUG
6540 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6541 		"rx_low_watermark",
6542 		CTLFLAG_RD, &sc->rx_low_watermark,
6543 		0, "Lowest level of free rx_bd's");
6544 
6545 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6546 		"rx_empty_count",
6547 		CTLFLAG_RD, &sc->rx_empty_count,
6548 		0, "Number of times the RX chain was empty");
6549 
6550 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6551 		"tx_hi_watermark",
6552 		CTLFLAG_RD, &sc->tx_hi_watermark,
6553 		0, "Highest level of used tx_bd's");
6554 
6555 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6556 		"tx_full_count",
6557 		CTLFLAG_RD, &sc->tx_full_count,
6558 		0, "Number of times the TX chain was full");
6559 
6560 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6561 		"l2fhdr_status_errors",
6562 		CTLFLAG_RD, &sc->l2fhdr_status_errors,
6563 		0, "l2_fhdr status errors");
6564 
6565 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6566 		"unexpected_attentions",
6567 		CTLFLAG_RD, &sc->unexpected_attentions,
6568 		0, "Unexpected attentions");
6569 
6570 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6571 		"lost_status_block_updates",
6572 		CTLFLAG_RD, &sc->lost_status_block_updates,
6573 		0, "Lost status block updates");
6574 
6575 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6576 		"debug_mbuf_sim_alloc_failed",
6577 		CTLFLAG_RD, &sc->debug_mbuf_sim_alloc_failed,
6578 		0, "Simulated mbuf cluster allocation failures");
6579 
6580 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6581 		"requested_tso_frames",
6582 		CTLFLAG_RD, &sc->requested_tso_frames,
6583 		0, "Number of TSO frames received");
6584 
6585 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6586 		"rx_interrupts",
6587 		CTLFLAG_RD, &sc->rx_interrupts,
6588 		0, "Number of RX interrupts");
6589 
6590 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6591 		"tx_interrupts",
6592 		CTLFLAG_RD, &sc->tx_interrupts,
6593 		0, "Number of TX interrupts");
6594 
6595 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6596 		"rx_intr_time",
6597 		CTLFLAG_RD, &sc->rx_intr_time,
6598 		"RX interrupt time");
6599 
6600 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6601 		"tx_intr_time",
6602 		CTLFLAG_RD, &sc->tx_intr_time,
6603 		"TX interrupt time");
6604 
6605 #endif
6606 
6607 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6608 		"mbuf_alloc_failed",
6609 		CTLFLAG_RD, &sc->mbuf_alloc_failed,
6610 		0, "mbuf cluster allocation failures");
6611 
6612 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6613 		"tx_dma_map_failures",
6614 		CTLFLAG_RD, &sc->tx_dma_map_failures,
6615 		0, "tx dma mapping failures");
6616 
6617 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6618 		"stat_IfHcInOctets",
6619 		CTLFLAG_RD, &sc->stat_IfHCInOctets,
6620 		"Bytes received");
6621 
6622 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6623 		"stat_IfHCInBadOctets",
6624 		CTLFLAG_RD, &sc->stat_IfHCInBadOctets,
6625 		"Bad bytes received");
6626 
6627 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6628 		"stat_IfHCOutOctets",
6629 		CTLFLAG_RD, &sc->stat_IfHCOutOctets,
6630 		"Bytes sent");
6631 
6632 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6633 		"stat_IfHCOutBadOctets",
6634 		CTLFLAG_RD, &sc->stat_IfHCOutBadOctets,
6635 		"Bad bytes sent");
6636 
6637 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6638 		"stat_IfHCInUcastPkts",
6639 		CTLFLAG_RD, &sc->stat_IfHCInUcastPkts,
6640 		"Unicast packets received");
6641 
6642 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6643 		"stat_IfHCInMulticastPkts",
6644 		CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts,
6645 		"Multicast packets received");
6646 
6647 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6648 		"stat_IfHCInBroadcastPkts",
6649 		CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts,
6650 		"Broadcast packets received");
6651 
6652 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6653 		"stat_IfHCOutUcastPkts",
6654 		CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts,
6655 		"Unicast packets sent");
6656 
6657 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6658 		"stat_IfHCOutMulticastPkts",
6659 		CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts,
6660 		"Multicast packets sent");
6661 
6662 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6663 		"stat_IfHCOutBroadcastPkts",
6664 		CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts,
6665 		"Broadcast packets sent");
6666 
6667 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6668 		"stat_emac_tx_stat_dot3statsinternalmactransmiterrors",
6669 		CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors,
6670 		0, "Internal MAC transmit errors");
6671 
6672 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6673 		"stat_Dot3StatsCarrierSenseErrors",
6674 		CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors,
6675 		0, "Carrier sense errors");
6676 
6677 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6678 		"stat_Dot3StatsFCSErrors",
6679 		CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors,
6680 		0, "Frame check sequence errors");
6681 
6682 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6683 		"stat_Dot3StatsAlignmentErrors",
6684 		CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors,
6685 		0, "Alignment errors");
6686 
6687 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6688 		"stat_Dot3StatsSingleCollisionFrames",
6689 		CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames,
6690 		0, "Single Collision Frames");
6691 
6692 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6693 		"stat_Dot3StatsMultipleCollisionFrames",
6694 		CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames,
6695 		0, "Multiple Collision Frames");
6696 
6697 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6698 		"stat_Dot3StatsDeferredTransmissions",
6699 		CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions,
6700 		0, "Deferred Transmissions");
6701 
6702 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6703 		"stat_Dot3StatsExcessiveCollisions",
6704 		CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions,
6705 		0, "Excessive Collisions");
6706 
6707 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6708 		"stat_Dot3StatsLateCollisions",
6709 		CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions,
6710 		0, "Late Collisions");
6711 
6712 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6713 		"stat_EtherStatsCollisions",
6714 		CTLFLAG_RD, &sc->stat_EtherStatsCollisions,
6715 		0, "Collisions");
6716 
6717 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6718 		"stat_EtherStatsFragments",
6719 		CTLFLAG_RD, &sc->stat_EtherStatsFragments,
6720 		0, "Fragments");
6721 
6722 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6723 		"stat_EtherStatsJabbers",
6724 		CTLFLAG_RD, &sc->stat_EtherStatsJabbers,
6725 		0, "Jabbers");
6726 
6727 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6728 		"stat_EtherStatsUndersizePkts",
6729 		CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts,
6730 		0, "Undersize packets");
6731 
6732 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6733 		"stat_EtherStatsOverrsizePkts",
6734 		CTLFLAG_RD, &sc->stat_EtherStatsOverrsizePkts,
6735 		0, "stat_EtherStatsOverrsizePkts");
6736 
6737 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6738 		"stat_EtherStatsPktsRx64Octets",
6739 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets,
6740 		0, "Bytes received in 64 byte packets");
6741 
6742 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6743 		"stat_EtherStatsPktsRx65Octetsto127Octets",
6744 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets,
6745 		0, "Bytes received in 65 to 127 byte packets");
6746 
6747 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6748 		"stat_EtherStatsPktsRx128Octetsto255Octets",
6749 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets,
6750 		0, "Bytes received in 128 to 255 byte packets");
6751 
6752 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6753 		"stat_EtherStatsPktsRx256Octetsto511Octets",
6754 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets,
6755 		0, "Bytes received in 256 to 511 byte packets");
6756 
6757 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6758 		"stat_EtherStatsPktsRx512Octetsto1023Octets",
6759 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets,
6760 		0, "Bytes received in 512 to 1023 byte packets");
6761 
6762 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6763 		"stat_EtherStatsPktsRx1024Octetsto1522Octets",
6764 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets,
6765 		0, "Bytes received in 1024 t0 1522 byte packets");
6766 
6767 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6768 		"stat_EtherStatsPktsRx1523Octetsto9022Octets",
6769 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets,
6770 		0, "Bytes received in 1523 to 9022 byte packets");
6771 
6772 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6773 		"stat_EtherStatsPktsTx64Octets",
6774 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets,
6775 		0, "Bytes sent in 64 byte packets");
6776 
6777 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6778 		"stat_EtherStatsPktsTx65Octetsto127Octets",
6779 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets,
6780 		0, "Bytes sent in 65 to 127 byte packets");
6781 
6782 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6783 		"stat_EtherStatsPktsTx128Octetsto255Octets",
6784 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets,
6785 		0, "Bytes sent in 128 to 255 byte packets");
6786 
6787 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6788 		"stat_EtherStatsPktsTx256Octetsto511Octets",
6789 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets,
6790 		0, "Bytes sent in 256 to 511 byte packets");
6791 
6792 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6793 		"stat_EtherStatsPktsTx512Octetsto1023Octets",
6794 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets,
6795 		0, "Bytes sent in 512 to 1023 byte packets");
6796 
6797 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6798 		"stat_EtherStatsPktsTx1024Octetsto1522Octets",
6799 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets,
6800 		0, "Bytes sent in 1024 to 1522 byte packets");
6801 
6802 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6803 		"stat_EtherStatsPktsTx1523Octetsto9022Octets",
6804 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets,
6805 		0, "Bytes sent in 1523 to 9022 byte packets");
6806 
6807 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6808 		"stat_XonPauseFramesReceived",
6809 		CTLFLAG_RD, &sc->stat_XonPauseFramesReceived,
6810 		0, "XON pause frames receved");
6811 
6812 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6813 		"stat_XoffPauseFramesReceived",
6814 		CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived,
6815 		0, "XOFF pause frames received");
6816 
6817 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6818 		"stat_OutXonSent",
6819 		CTLFLAG_RD, &sc->stat_OutXonSent,
6820 		0, "XON pause frames sent");
6821 
6822 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6823 		"stat_OutXoffSent",
6824 		CTLFLAG_RD, &sc->stat_OutXoffSent,
6825 		0, "XOFF pause frames sent");
6826 
6827 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6828 		"stat_FlowControlDone",
6829 		CTLFLAG_RD, &sc->stat_FlowControlDone,
6830 		0, "Flow control done");
6831 
6832 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6833 		"stat_MacControlFramesReceived",
6834 		CTLFLAG_RD, &sc->stat_MacControlFramesReceived,
6835 		0, "MAC control frames received");
6836 
6837 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6838 		"stat_XoffStateEntered",
6839 		CTLFLAG_RD, &sc->stat_XoffStateEntered,
6840 		0, "XOFF state entered");
6841 
6842 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6843 		"stat_IfInFramesL2FilterDiscards",
6844 		CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards,
6845 		0, "Received L2 packets discarded");
6846 
6847 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6848 		"stat_IfInRuleCheckerDiscards",
6849 		CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards,
6850 		0, "Received packets discarded by rule");
6851 
6852 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6853 		"stat_IfInFTQDiscards",
6854 		CTLFLAG_RD, &sc->stat_IfInFTQDiscards,
6855 		0, "Received packet FTQ discards");
6856 
6857 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6858 		"stat_IfInMBUFDiscards",
6859 		CTLFLAG_RD, &sc->stat_IfInMBUFDiscards,
6860 		0, "Received packets discarded due to lack of controller buffer memory");
6861 
6862 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6863 		"stat_IfInRuleCheckerP4Hit",
6864 		CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit,
6865 		0, "Received packets rule checker hits");
6866 
6867 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6868 		"stat_CatchupInRuleCheckerDiscards",
6869 		CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards,
6870 		0, "Received packets discarded in Catchup path");
6871 
6872 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6873 		"stat_CatchupInFTQDiscards",
6874 		CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards,
6875 		0, "Received packets discarded in FTQ in Catchup path");
6876 
6877 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6878 		"stat_CatchupInMBUFDiscards",
6879 		CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards,
6880 		0, "Received packets discarded in controller buffer memory in Catchup path");
6881 
6882 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6883 		"stat_CatchupInRuleCheckerP4Hit",
6884 		CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit,
6885 		0, "Received packets rule checker hits in Catchup path");
6886 
6887 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6888 		"com_no_buffers",
6889 		CTLFLAG_RD, &sc->com_no_buffers,
6890 		0, "Valid packets received but no RX buffers available");
6891 
6892 #ifdef BCE_DEBUG
6893 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6894 		"driver_state", CTLTYPE_INT | CTLFLAG_RW,
6895 		(void *)sc, 0,
6896 		bce_sysctl_driver_state, "I", "Drive state information");
6897 
6898 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6899 		"hw_state", CTLTYPE_INT | CTLFLAG_RW,
6900 		(void *)sc, 0,
6901 		bce_sysctl_hw_state, "I", "Hardware state information");
6902 
6903 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6904 		"bc_state", CTLTYPE_INT | CTLFLAG_RW,
6905 		(void *)sc, 0,
6906 		bce_sysctl_bc_state, "I", "Bootcode state information");
6907 
6908 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6909 		"dump_rx_chain", CTLTYPE_INT | CTLFLAG_RW,
6910 		(void *)sc, 0,
6911 		bce_sysctl_dump_rx_chain, "I", "Dump rx_bd chain");
6912 
6913 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6914 		"dump_tx_chain", CTLTYPE_INT | CTLFLAG_RW,
6915 		(void *)sc, 0,
6916 		bce_sysctl_dump_tx_chain, "I", "Dump tx_bd chain");
6917 
6918 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6919 		"dump_pg_chain", CTLTYPE_INT | CTLFLAG_RW,
6920 		(void *)sc, 0,
6921 		bce_sysctl_dump_pg_chain, "I", "Dump page chain");
6922 
6923 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6924 		"breakpoint", CTLTYPE_INT | CTLFLAG_RW,
6925 		(void *)sc, 0,
6926 		bce_sysctl_breakpoint, "I", "Driver breakpoint");
6927 
6928 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6929 		"reg_read", CTLTYPE_INT | CTLFLAG_RW,
6930 		(void *)sc, 0,
6931 		bce_sysctl_reg_read, "I", "Register read");
6932 
6933 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6934 		"phy_read", CTLTYPE_INT | CTLFLAG_RW,
6935 		(void *)sc, 0,
6936 		bce_sysctl_phy_read, "I", "PHY register read");
6937 
6938 #endif
6939 
6940 }
6941 
6942 
6943 /****************************************************************************/
6944 /* BCE Debug Routines                                                       */
6945 /****************************************************************************/
6946 #ifdef BCE_DEBUG
6947 
6948 /****************************************************************************/
6949 /* Freezes the controller to allow for a cohesive state dump.               */
6950 /*                                                                          */
6951 /* Returns:                                                                 */
6952 /*   Nothing.                                                               */
6953 /****************************************************************************/
6954 static void
6955 bce_freeze_controller(struct bce_softc *sc)
6956 {
6957 	u32 val;
6958 	val = REG_RD(sc, BCE_MISC_COMMAND);
6959 	val |= BCE_MISC_COMMAND_DISABLE_ALL;
6960 	REG_WR(sc, BCE_MISC_COMMAND, val);
6961 
6962 }
6963 
6964 
6965 /****************************************************************************/
6966 /* Unfreezes the controller after a freeze operation.  This may not always  */
6967 /* work and the controller will require a reset!                            */
6968 /*                                                                          */
6969 /* Returns:                                                                 */
6970 /*   Nothing.                                                               */
6971 /****************************************************************************/
6972 static void
6973 bce_unfreeze_controller(struct bce_softc *sc)
6974 {
6975 	u32 val;
6976 	val = REG_RD(sc, BCE_MISC_COMMAND);
6977 	val |= BCE_MISC_COMMAND_ENABLE_ALL;
6978 	REG_WR(sc, BCE_MISC_COMMAND, val);
6979 
6980 }
6981 
6982 /****************************************************************************/
6983 /* Prints out information about an mbuf.                                    */
6984 /*                                                                          */
6985 /* Returns:                                                                 */
6986 /*   Nothing.                                                               */
6987 /****************************************************************************/
6988 static void
6989 bce_dump_mbuf(struct bce_softc *sc, struct mbuf *m)
6990 {
6991 	struct mbuf *mp = m;
6992 
6993 	if (m == NULL) {
6994 		BCE_PRINTF("mbuf: null pointer\n");
6995 		return;
6996 	}
6997 
6998 	while (mp) {
6999 		BCE_PRINTF("mbuf: %p, m_len = %d, m_flags = 0x%b, m_data = %p\n",
7000 			mp, mp->m_len, mp->m_flags,
7001 			"\20\1M_EXT\2M_PKTHDR\3M_EOR\4M_RDONLY",
7002 			mp->m_data);
7003 
7004 		if (mp->m_flags & M_PKTHDR) {
7005 			BCE_PRINTF("- m_pkthdr: len = %d, flags = 0x%b, csum_flags = %b\n",
7006 				mp->m_pkthdr.len, mp->m_flags,
7007 				"\20\12M_BCAST\13M_MCAST\14M_FRAG\15M_FIRSTFRAG"
7008 				"\16M_LASTFRAG\21M_VLANTAG\22M_PROMISC\23M_NOFREE",
7009 				mp->m_pkthdr.csum_flags,
7010 				"\20\1CSUM_IP\2CSUM_TCP\3CSUM_UDP\4CSUM_IP_FRAGS"
7011 				"\5CSUM_FRAGMENT\6CSUM_TSO\11CSUM_IP_CHECKED"
7012 				"\12CSUM_IP_VALID\13CSUM_DATA_VALID\14CSUM_PSEUDO_HDR");
7013 		}
7014 
7015 		if (mp->m_flags & M_EXT) {
7016 			BCE_PRINTF("- m_ext: %p, ext_size = %d, type = ",
7017 				mp->m_ext.ext_buf, mp->m_ext.ext_size);
7018 			switch (mp->m_ext.ext_type) {
7019 				case EXT_CLUSTER:    printf("EXT_CLUSTER\n"); break;
7020 				case EXT_SFBUF:      printf("EXT_SFBUF\n"); break;
7021 				case EXT_JUMBO9:     printf("EXT_JUMBO9\n"); break;
7022 				case EXT_JUMBO16:    printf("EXT_JUMBO16\n"); break;
7023 				case EXT_PACKET:     printf("EXT_PACKET\n"); break;
7024 				case EXT_MBUF:       printf("EXT_MBUF\n"); break;
7025 				case EXT_NET_DRV:    printf("EXT_NET_DRV\n"); break;
7026 				case EXT_MOD_TYPE:   printf("EXT_MDD_TYPE\n"); break;
7027 				case EXT_DISPOSABLE: printf("EXT_DISPOSABLE\n"); break;
7028 				case EXT_EXTREF:     printf("EXT_EXTREF\n"); break;
7029 				default:             printf("UNKNOWN\n");
7030 			}
7031 		}
7032 
7033 		mp = mp->m_next;
7034 	}
7035 }
7036 
7037 
7038 /****************************************************************************/
7039 /* Prints out the mbufs in the TX mbuf chain.                               */
7040 /*                                                                          */
7041 /* Returns:                                                                 */
7042 /*   Nothing.                                                               */
7043 /****************************************************************************/
7044 static void
7045 bce_dump_tx_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count)
7046 {
7047 	struct mbuf *m;
7048 
7049 	BCE_PRINTF(
7050 		"----------------------------"
7051 		"  tx mbuf data  "
7052 		"----------------------------\n");
7053 
7054 	for (int i = 0; i < count; i++) {
7055 	 	m = sc->tx_mbuf_ptr[chain_prod];
7056 		BCE_PRINTF("txmbuf[0x%04X]\n", chain_prod);
7057 		bce_dump_mbuf(sc, m);
7058 		chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod));
7059 	}
7060 
7061 	BCE_PRINTF(
7062 		"----------------------------"
7063 		"----------------"
7064 		"----------------------------\n");
7065 }
7066 
7067 
7068 /****************************************************************************/
7069 /* Prints out the mbufs in the RX mbuf chain.                               */
7070 /*                                                                          */
7071 /* Returns:                                                                 */
7072 /*   Nothing.                                                               */
7073 /****************************************************************************/
7074 static void
7075 bce_dump_rx_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count)
7076 {
7077 	struct mbuf *m;
7078 
7079 	BCE_PRINTF(
7080 		"----------------------------"
7081 		"  rx mbuf data  "
7082 		"----------------------------\n");
7083 
7084 	for (int i = 0; i < count; i++) {
7085 	 	m = sc->rx_mbuf_ptr[chain_prod];
7086 		BCE_PRINTF("rxmbuf[0x%04X]\n", chain_prod);
7087 		bce_dump_mbuf(sc, m);
7088 		chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod));
7089 	}
7090 
7091 
7092 	BCE_PRINTF(
7093 		"----------------------------"
7094 		"----------------"
7095 		"----------------------------\n");
7096 }
7097 
7098 
7099 /****************************************************************************/
7100 /* Prints out the mbufs in the mbuf page chain.                             */
7101 /*                                                                          */
7102 /* Returns:                                                                 */
7103 /*   Nothing.                                                               */
7104 /****************************************************************************/
7105 static void
7106 bce_dump_pg_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count)
7107 {
7108 	struct mbuf *m;
7109 
7110 	BCE_PRINTF(
7111 		"----------------------------"
7112 		"  pg mbuf data  "
7113 		"----------------------------\n");
7114 
7115 	for (int i = 0; i < count; i++) {
7116 	 	m = sc->pg_mbuf_ptr[chain_prod];
7117 		BCE_PRINTF("pgmbuf[0x%04X]\n", chain_prod);
7118 		bce_dump_mbuf(sc, m);
7119 		chain_prod = PG_CHAIN_IDX(NEXT_PG_BD(chain_prod));
7120 	}
7121 
7122 
7123 	BCE_PRINTF(
7124 		"----------------------------"
7125 		"----------------"
7126 		"----------------------------\n");
7127 }
7128 
7129 
7130 /****************************************************************************/
7131 /* Prints out a tx_bd structure.                                            */
7132 /*                                                                          */
7133 /* Returns:                                                                 */
7134 /*   Nothing.                                                               */
7135 /****************************************************************************/
7136 static void
7137 bce_dump_txbd(struct bce_softc *sc, int idx, struct tx_bd *txbd)
7138 {
7139 	if (idx > MAX_TX_BD)
7140 		/* Index out of range. */
7141 		BCE_PRINTF("tx_bd[0x%04X]: Invalid tx_bd index!\n", idx);
7142 	else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
7143 		/* TX Chain page pointer. */
7144 		BCE_PRINTF("tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n",
7145 			idx, txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo);
7146 	else {
7147 			/* Normal tx_bd entry. */
7148 			BCE_PRINTF("tx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, "
7149 				"vlan tag= 0x%04X, flags = 0x%04X (", idx,
7150 				txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo,
7151 				txbd->tx_bd_mss_nbytes, txbd->tx_bd_vlan_tag,
7152 				txbd->tx_bd_flags);
7153 
7154 			if (txbd->tx_bd_flags & TX_BD_FLAGS_CONN_FAULT)
7155 				printf(" CONN_FAULT");
7156 
7157 			if (txbd->tx_bd_flags & TX_BD_FLAGS_TCP_UDP_CKSUM)
7158 				printf(" TCP_UDP_CKSUM");
7159 
7160 			if (txbd->tx_bd_flags & TX_BD_FLAGS_IP_CKSUM)
7161 				printf(" IP_CKSUM");
7162 
7163 			if (txbd->tx_bd_flags & TX_BD_FLAGS_VLAN_TAG)
7164 				printf("  VLAN");
7165 
7166 			if (txbd->tx_bd_flags & TX_BD_FLAGS_COAL_NOW)
7167 				printf(" COAL_NOW");
7168 
7169 			if (txbd->tx_bd_flags & TX_BD_FLAGS_DONT_GEN_CRC)
7170 				printf(" DONT_GEN_CRC");
7171 
7172 			if (txbd->tx_bd_flags & TX_BD_FLAGS_START)
7173 				printf(" START");
7174 
7175 			if (txbd->tx_bd_flags & TX_BD_FLAGS_END)
7176 				printf(" END");
7177 
7178 			if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_LSO)
7179 				printf(" LSO");
7180 
7181 			if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_OPTION_WORD)
7182 				printf(" OPTION_WORD");
7183 
7184 			if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_FLAGS)
7185 				printf(" FLAGS");
7186 
7187 			if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_SNAP)
7188 				printf(" SNAP");
7189 
7190 			printf(" )\n");
7191 		}
7192 
7193 }
7194 
7195 
7196 /****************************************************************************/
7197 /* Prints out a rx_bd structure.                                            */
7198 /*                                                                          */
7199 /* Returns:                                                                 */
7200 /*   Nothing.                                                               */
7201 /****************************************************************************/
7202 static void
7203 bce_dump_rxbd(struct bce_softc *sc, int idx, struct rx_bd *rxbd)
7204 {
7205 	if (idx > MAX_RX_BD)
7206 		/* Index out of range. */
7207 		BCE_PRINTF("rx_bd[0x%04X]: Invalid rx_bd index!\n", idx);
7208 	else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
7209 		/* RX Chain page pointer. */
7210 		BCE_PRINTF("rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n",
7211 			idx, rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo);
7212 	else
7213 		/* Normal rx_bd entry. */
7214 		BCE_PRINTF("rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, "
7215 			"flags = 0x%08X\n", idx,
7216 			rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo,
7217 			rxbd->rx_bd_len, rxbd->rx_bd_flags);
7218 }
7219 
7220 
7221 /****************************************************************************/
7222 /* Prints out a rx_bd structure in the page chain.                          */
7223 /*                                                                          */
7224 /* Returns:                                                                 */
7225 /*   Nothing.                                                               */
7226 /****************************************************************************/
7227 static void
7228 bce_dump_pgbd(struct bce_softc *sc, int idx, struct rx_bd *pgbd)
7229 {
7230 	if (idx > MAX_PG_BD)
7231 		/* Index out of range. */
7232 		BCE_PRINTF("pg_bd[0x%04X]: Invalid pg_bd index!\n", idx);
7233 	else if ((idx & USABLE_PG_BD_PER_PAGE) == USABLE_PG_BD_PER_PAGE)
7234 		/* Page Chain page pointer. */
7235 		BCE_PRINTF("px_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n",
7236 			idx, pgbd->rx_bd_haddr_hi, pgbd->rx_bd_haddr_lo);
7237 	else
7238 		/* Normal rx_bd entry. */
7239 		BCE_PRINTF("pg_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, "
7240 			"flags = 0x%08X\n", idx,
7241 			pgbd->rx_bd_haddr_hi, pgbd->rx_bd_haddr_lo,
7242 			pgbd->rx_bd_len, pgbd->rx_bd_flags);
7243 }
7244 
7245 
7246 /****************************************************************************/
7247 /* Prints out a l2_fhdr structure.                                          */
7248 /*                                                                          */
7249 /* Returns:                                                                 */
7250 /*   Nothing.                                                               */
7251 /****************************************************************************/
7252 static void
7253 bce_dump_l2fhdr(struct bce_softc *sc, int idx, struct l2_fhdr *l2fhdr)
7254 {
7255 	BCE_PRINTF("l2_fhdr[0x%04X]: status = 0x%b, "
7256 		"pkt_len = %d, vlan = 0x%04x, ip_xsum/hdr_len = 0x%04X, "
7257 		"tcp_udp_xsum = 0x%04X\n", idx,
7258 		l2fhdr->l2_fhdr_status, BCE_L2FHDR_PRINTFB,
7259 		l2fhdr->l2_fhdr_pkt_len, l2fhdr->l2_fhdr_vlan_tag,
7260 		l2fhdr->l2_fhdr_ip_xsum, l2fhdr->l2_fhdr_tcp_udp_xsum);
7261 }
7262 
7263 
7264 /****************************************************************************/
7265 /* Prints out the L2 context memory.  (Only useful for CID 0 to 15. )       */
7266 /*                                                                          */
7267 /* Returns:                                                                 */
7268 /*   Nothing.                                                               */
7269 /****************************************************************************/
7270 static void
7271 bce_dump_ctx(struct bce_softc *sc, u16 cid)
7272 {
7273 	if (cid < TX_CID) {
7274 		BCE_PRINTF(
7275 			"----------------------------"
7276 			"    CTX Data    "
7277 			"----------------------------\n");
7278 
7279 		BCE_PRINTF("     0x%04X - (CID) Context ID\n", cid);
7280 		BCE_PRINTF(" 0x%08X - (L2CTX_HOST_BDIDX) host rx producer index\n",
7281 			CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_HOST_BDIDX));
7282 		BCE_PRINTF(" 0x%08X - (L2CTX_HOST_BSEQ) host byte sequence\n",
7283 			CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_HOST_BSEQ));
7284 		BCE_PRINTF(" 0x%08X - (L2CTX_NX_BSEQ) h/w byte sequence\n",
7285 			CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_NX_BSEQ));
7286 		BCE_PRINTF(" 0x%08X - (L2CTX_NX_BDHADDR_HI) h/w buffer descriptor address\n",
7287 			CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_NX_BDHADDR_HI));
7288 		BCE_PRINTF(" 0x%08X - (L2CTX_NX_BDHADDR_LO) h/w buffer descriptor address\n",
7289 			CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_NX_BDHADDR_LO));
7290 		BCE_PRINTF(" 0x%08X - (L2CTX_NX_BDIDX) h/w rx consumer index\n",
7291 			CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_NX_BDIDX));
7292 		BCE_PRINTF(" 0x%08X - (L2CTX_HOST_PG_BDIDX) host page producer index\n",
7293 			CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_HOST_PG_BDIDX));
7294 		BCE_PRINTF(" 0x%08X - (L2CTX_PG_BUF_SIZE) host rx_bd/page buffer size\n",
7295 			CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_PG_BUF_SIZE));
7296 		BCE_PRINTF(" 0x%08X - (L2CTX_NX_PG_BDHADDR_HI) h/w page chain address\n",
7297 			CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_NX_PG_BDHADDR_HI));
7298 		BCE_PRINTF(" 0x%08X - (L2CTX_NX_PG_BDHADDR_LO) h/w page chain address\n",
7299 			CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_NX_PG_BDHADDR_LO));
7300 		BCE_PRINTF(" 0x%08X - (L2CTX_NX_PG_BDIDX) h/w page consumer index\n",
7301 			CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_NX_PG_BDIDX));
7302 
7303 		BCE_PRINTF(
7304 			"----------------------------"
7305 			"----------------"
7306 			"----------------------------\n");
7307 	}
7308 }
7309 
7310 
7311 /****************************************************************************/
7312 /* Prints out the FTQ data.                                                 */
7313 /*                                                                          */
7314 /* Returns:                                                                */
7315 /*   Nothing.                                                               */
7316 /****************************************************************************/
7317 static void
7318 bce_dump_ftqs(struct bce_softc *sc)
7319 {
7320 	u32 cmd, ctl, cur_depth, max_depth, valid_cnt;
7321 
7322 	BCE_PRINTF(
7323 		"----------------------------"
7324 		"    FTQ Data    "
7325 		"----------------------------\n");
7326 
7327 	BCE_PRINTF("  FTQ   Command    Control   Depth_Now  Max_Depth  Valid_Cnt\n");
7328 	BCE_PRINTF(" ----- ---------- ---------- ---------- ---------- ----------\n");
7329 
7330 	/* Setup the generic statistic counters for the FTQ valid count. */
7331 	REG_WR(sc, BCE_HC_STAT_GEN_SEL_0,
7332 		((BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PPQ_VALID_CNT << 24) |
7333 		 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXPCQ_VALID_CNT  << 16) |
7334 		 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXPQ_VALID_CNT   <<  8) |
7335 		 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RLUPQ_VALID_CNT)));
7336 	REG_WR(sc, BCE_HC_STAT_GEN_SEL_1,
7337 		((BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TSCHQ_VALID_CNT  << 24) |
7338 		 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RDMAQ_VALID_CNT  << 16) |
7339 		 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PTQ_VALID_CNT <<  8) |
7340 		 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PMQ_VALID_CNT)));
7341 	REG_WR(sc, BCE_HC_STAT_GEN_SEL_2,
7342 		((BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TPATQ_VALID_CNT  << 24) |
7343 		 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TDMAQ_VALID_CNT  << 16) |
7344 		 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXPQ_VALID_CNT   <<  8) |
7345 		 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TBDRQ_VALID_CNT)));
7346 	REG_WR(sc, BCE_HC_STAT_GEN_SEL_3,
7347 		((BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMQ_VALID_CNT   << 24) |
7348 		 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMTQ_VALID_CNT  << 16) |
7349 		 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMXQ_VALID_CNT  <<  8) |
7350 		 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TASQ_VALID_CNT)));
7351 
7352 
7353 	cmd = REG_RD(sc, BCE_RLUP_FTQ_CMD);
7354 	ctl = REG_RD(sc, BCE_RLUP_FTQ_CTL);
7355 	cur_depth = (ctl & BCE_RLUP_FTQ_CTL_CUR_DEPTH) >> 22;
7356 	max_depth = (ctl & BCE_RLUP_FTQ_CTL_MAX_DEPTH) >> 12;
7357 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT0);
7358 	BCE_PRINTF(" RLUP  0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7359 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7360 
7361 	cmd = REG_RD_IND(sc, BCE_RXP_FTQ_CMD);
7362 	ctl = REG_RD_IND(sc, BCE_RXP_FTQ_CTL);
7363 	cur_depth = (ctl & BCE_RXP_FTQ_CTL_CUR_DEPTH) >> 22;
7364 	max_depth = (ctl & BCE_RXP_FTQ_CTL_MAX_DEPTH) >> 12;
7365 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT1);
7366 	BCE_PRINTF(" RXP   0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7367 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7368 
7369 	cmd = REG_RD_IND(sc, BCE_RXP_CFTQ_CMD);
7370 	ctl = REG_RD_IND(sc, BCE_RXP_CFTQ_CTL);
7371 	cur_depth = (ctl & BCE_RXP_CFTQ_CTL_CUR_DEPTH) >> 22;
7372 	max_depth = (ctl & BCE_RXP_CFTQ_CTL_MAX_DEPTH) >> 12;
7373 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT2);
7374 	BCE_PRINTF(" RXPC  0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7375 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7376 
7377 	cmd = REG_RD(sc, BCE_RV2P_PFTQ_CMD);
7378 	ctl = REG_RD(sc, BCE_RV2P_PFTQ_CTL);
7379 	cur_depth = (ctl & BCE_RV2P_PFTQ_CTL_CUR_DEPTH) >> 22;
7380 	max_depth = (ctl & BCE_RV2P_PFTQ_CTL_MAX_DEPTH) >> 12;
7381 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT3);
7382 	BCE_PRINTF(" RV2PP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7383 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7384 
7385 	cmd = REG_RD(sc, BCE_RV2P_MFTQ_CMD);
7386 	ctl = REG_RD(sc, BCE_RV2P_MFTQ_CTL);
7387 	cur_depth = (ctl & BCE_RV2P_MFTQ_CTL_CUR_DEPTH) >> 22;
7388 	max_depth = (ctl & BCE_RV2P_MFTQ_CTL_MAX_DEPTH) >> 12;
7389 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT4);
7390 	BCE_PRINTF(" RV2PM 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7391 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7392 
7393 	cmd = REG_RD(sc, BCE_RV2P_TFTQ_CMD);
7394 	ctl = REG_RD(sc, BCE_RV2P_TFTQ_CTL);
7395 	cur_depth = (ctl & BCE_RV2P_TFTQ_CTL_CUR_DEPTH) >> 22;
7396 	max_depth = (ctl & BCE_RV2P_TFTQ_CTL_MAX_DEPTH) >> 12;
7397 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT5);
7398 	BCE_PRINTF(" RV2PT 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7399 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7400 
7401 	cmd = REG_RD(sc, BCE_RDMA_FTQ_CMD);
7402 	ctl = REG_RD(sc, BCE_RDMA_FTQ_CTL);
7403 	cur_depth = (ctl & BCE_RDMA_FTQ_CTL_CUR_DEPTH) >> 22;
7404 	max_depth = (ctl & BCE_RDMA_FTQ_CTL_MAX_DEPTH) >> 12;
7405 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT6);
7406 	BCE_PRINTF(" RDMA  0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7407 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7408 
7409 	cmd = REG_RD(sc, BCE_TSCH_FTQ_CMD);
7410 	ctl = REG_RD(sc, BCE_TSCH_FTQ_CTL);
7411 	cur_depth = (ctl & BCE_TSCH_FTQ_CTL_CUR_DEPTH) >> 22;
7412 	max_depth = (ctl & BCE_TSCH_FTQ_CTL_MAX_DEPTH) >> 12;
7413 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT7);
7414 	BCE_PRINTF(" TSCH  0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7415 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7416 
7417 	cmd = REG_RD(sc, BCE_TBDR_FTQ_CMD);
7418 	ctl = REG_RD(sc, BCE_TBDR_FTQ_CTL);
7419 	cur_depth = (ctl & BCE_TBDR_FTQ_CTL_CUR_DEPTH) >> 22;
7420 	max_depth = (ctl & BCE_TBDR_FTQ_CTL_MAX_DEPTH) >> 12;
7421 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT8);
7422 	BCE_PRINTF(" TBDR  0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7423 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7424 
7425 	cmd = REG_RD_IND(sc, BCE_TXP_FTQ_CMD);
7426 	ctl = REG_RD_IND(sc, BCE_TXP_FTQ_CTL);
7427 	cur_depth = (ctl & BCE_TXP_FTQ_CTL_CUR_DEPTH) >> 22;
7428 	max_depth = (ctl & BCE_TXP_FTQ_CTL_MAX_DEPTH) >> 12;
7429 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT9);
7430 	BCE_PRINTF(" TXP   0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7431 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7432 
7433 	cmd = REG_RD(sc, BCE_TDMA_FTQ_CMD);
7434 	ctl = REG_RD(sc, BCE_TDMA_FTQ_CTL);
7435 	cur_depth = (ctl & BCE_TDMA_FTQ_CTL_CUR_DEPTH) >> 22;
7436 	max_depth = (ctl & BCE_TDMA_FTQ_CTL_MAX_DEPTH) >> 12;
7437 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT10);
7438 	BCE_PRINTF(" TDMA  0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7439 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7440 
7441 
7442 	cmd = REG_RD_IND(sc, BCE_TPAT_FTQ_CMD);
7443 	ctl = REG_RD_IND(sc, BCE_TPAT_FTQ_CTL);
7444 	cur_depth = (ctl & BCE_TPAT_FTQ_CTL_CUR_DEPTH) >> 22;
7445 	max_depth = (ctl & BCE_TPAT_FTQ_CTL_MAX_DEPTH) >> 12;
7446 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT11);
7447 	BCE_PRINTF(" TPAT  0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7448 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7449 
7450 	cmd = REG_RD_IND(sc, BCE_TAS_FTQ_CMD);
7451 	ctl = REG_RD_IND(sc, BCE_TAS_FTQ_CTL);
7452 	cur_depth = (ctl & BCE_TAS_FTQ_CTL_CUR_DEPTH) >> 22;
7453 	max_depth = (ctl & BCE_TAS_FTQ_CTL_MAX_DEPTH) >> 12;
7454 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT12);
7455 	BCE_PRINTF(" TAS   0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7456 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7457 
7458 	cmd = REG_RD_IND(sc, BCE_COM_COMXQ_FTQ_CMD);
7459 	ctl = REG_RD_IND(sc, BCE_COM_COMXQ_FTQ_CTL);
7460 	cur_depth = (ctl & BCE_COM_COMXQ_FTQ_CTL_CUR_DEPTH) >> 22;
7461 	max_depth = (ctl & BCE_COM_COMXQ_FTQ_CTL_MAX_DEPTH) >> 12;
7462 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT13);
7463 	BCE_PRINTF(" COMX  0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7464 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7465 
7466 	cmd = REG_RD_IND(sc, BCE_COM_COMTQ_FTQ_CMD);
7467 	ctl = REG_RD_IND(sc, BCE_COM_COMTQ_FTQ_CTL);
7468 	cur_depth = (ctl & BCE_COM_COMTQ_FTQ_CTL_CUR_DEPTH) >> 22;
7469 	max_depth = (ctl & BCE_COM_COMTQ_FTQ_CTL_MAX_DEPTH) >> 12;
7470 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT14);
7471 	BCE_PRINTF(" COMT  0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7472 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7473 
7474 	cmd = REG_RD_IND(sc, BCE_COM_COMQ_FTQ_CMD);
7475 	ctl = REG_RD_IND(sc, BCE_COM_COMQ_FTQ_CTL);
7476 	cur_depth = (ctl & BCE_COM_COMQ_FTQ_CTL_CUR_DEPTH) >> 22;
7477 	max_depth = (ctl & BCE_COM_COMQ_FTQ_CTL_MAX_DEPTH) >> 12;
7478 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT15);
7479 	BCE_PRINTF(" COMX  0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7480 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7481 
7482 	/* Setup the generic statistic counters for the FTQ valid count. */
7483 	REG_WR(sc, BCE_HC_STAT_GEN_SEL_0,
7484 		 ((BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_CSQ_VALID_CNT  << 16) |
7485 		  (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_CPQ_VALID_CNT  <<  8) |
7486 		  (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_MGMQ_VALID_CNT)));
7487 
7488 	cmd = REG_RD_IND(sc, BCE_MCP_MCPQ_FTQ_CMD);
7489 	ctl = REG_RD_IND(sc, BCE_MCP_MCPQ_FTQ_CTL);
7490 	cur_depth = (ctl & BCE_MCP_MCPQ_FTQ_CTL_CUR_DEPTH) >> 22;
7491 	max_depth = (ctl & BCE_MCP_MCPQ_FTQ_CTL_MAX_DEPTH) >> 12;
7492 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT0);
7493 	BCE_PRINTF(" MCP   0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7494 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7495 
7496 	cmd = REG_RD_IND(sc, BCE_CP_CPQ_FTQ_CMD);
7497 	ctl = REG_RD_IND(sc, BCE_CP_CPQ_FTQ_CTL);
7498 	cur_depth = (ctl & BCE_CP_CPQ_FTQ_CTL_CUR_DEPTH) >> 22;
7499 	max_depth = (ctl & BCE_CP_CPQ_FTQ_CTL_MAX_DEPTH) >> 12;
7500 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT1);
7501 	BCE_PRINTF(" CP    0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7502 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7503 
7504 	cmd = REG_RD(sc, BCE_CSCH_CH_FTQ_CMD);
7505 	ctl = REG_RD(sc, BCE_CSCH_CH_FTQ_CTL);
7506 	cur_depth = (ctl & BCE_CSCH_CH_FTQ_CTL_CUR_DEPTH) >> 22;
7507 	max_depth = (ctl & BCE_CSCH_CH_FTQ_CTL_MAX_DEPTH) >> 12;
7508 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT2);
7509 	BCE_PRINTF(" CS    0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7510 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7511 
7512 	BCE_PRINTF(
7513 		"----------------------------"
7514 		"----------------"
7515 		"----------------------------\n");
7516 }
7517 
7518 
7519 /****************************************************************************/
7520 /* Prints out the TX chain.                                                 */
7521 /*                                                                          */
7522 /* Returns:                                                                 */
7523 /*   Nothing.                                                               */
7524 /****************************************************************************/
7525 static void
7526 bce_dump_tx_chain(struct bce_softc *sc, u16 tx_prod, int count)
7527 {
7528 	struct tx_bd *txbd;
7529 
7530 	/* First some info about the tx_bd chain structure. */
7531 	BCE_PRINTF(
7532 		"----------------------------"
7533 		"  tx_bd  chain  "
7534 		"----------------------------\n");
7535 
7536 	BCE_PRINTF("page size      = 0x%08X, tx chain pages        = 0x%08X\n",
7537 		(u32) BCM_PAGE_SIZE, (u32) TX_PAGES);
7538 
7539 	BCE_PRINTF("tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n",
7540 		(u32) TOTAL_TX_BD_PER_PAGE, (u32) USABLE_TX_BD_PER_PAGE);
7541 
7542 	BCE_PRINTF("total tx_bd    = 0x%08X\n", (u32) TOTAL_TX_BD);
7543 
7544 	BCE_PRINTF(
7545 		"----------------------------"
7546 		"   tx_bd data   "
7547 		"----------------------------\n");
7548 
7549 	/* Now print out the tx_bd's themselves. */
7550 	for (int i = 0; i < count; i++) {
7551 	 	txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)];
7552 		bce_dump_txbd(sc, tx_prod, txbd);
7553 		tx_prod = NEXT_TX_BD(tx_prod);
7554 	}
7555 
7556 	BCE_PRINTF(
7557 		"----------------------------"
7558 		"----------------"
7559 		"----------------------------\n");
7560 }
7561 
7562 
7563 /****************************************************************************/
7564 /* Prints out the RX chain.                                                 */
7565 /*                                                                          */
7566 /* Returns:                                                                 */
7567 /*   Nothing.                                                               */
7568 /****************************************************************************/
7569 static void
7570 bce_dump_rx_chain(struct bce_softc *sc, u16 rx_prod, int count)
7571 {
7572 	struct rx_bd *rxbd;
7573 
7574 	/* First some info about the rx_bd chain structure. */
7575 	BCE_PRINTF(
7576 		"----------------------------"
7577 		"  rx_bd  chain  "
7578 		"----------------------------\n");
7579 
7580 	BCE_PRINTF("page size      = 0x%08X, rx chain pages        = 0x%08X\n",
7581 		(u32) BCM_PAGE_SIZE, (u32) RX_PAGES);
7582 
7583 	BCE_PRINTF("rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
7584 		(u32) TOTAL_RX_BD_PER_PAGE, (u32) USABLE_RX_BD_PER_PAGE);
7585 
7586 	BCE_PRINTF("total rx_bd    = 0x%08X\n", (u32) TOTAL_RX_BD);
7587 
7588 	BCE_PRINTF(
7589 		"----------------------------"
7590 		"   rx_bd data   "
7591 		"----------------------------\n");
7592 
7593 	/* Now print out the rx_bd's themselves. */
7594 	for (int i = 0; i < count; i++) {
7595 		rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)];
7596 		bce_dump_rxbd(sc, rx_prod, rxbd);
7597 		rx_prod = RX_CHAIN_IDX(rx_prod + 1);
7598 	}
7599 
7600 	BCE_PRINTF(
7601 		"----------------------------"
7602 		"----------------"
7603 		"----------------------------\n");
7604 }
7605 
7606 
7607 /****************************************************************************/
7608 /* Prints out the page chain.                                               */
7609 /*                                                                          */
7610 /* Returns:                                                                 */
7611 /*   Nothing.                                                               */
7612 /****************************************************************************/
7613 static void
7614 bce_dump_pg_chain(struct bce_softc *sc, u16 pg_prod, int count)
7615 {
7616 	struct rx_bd *pgbd;
7617 
7618 	/* First some info about the page chain structure. */
7619 	BCE_PRINTF(
7620 		"----------------------------"
7621 		"   page chain   "
7622 		"----------------------------\n");
7623 
7624 	BCE_PRINTF("page size      = 0x%08X, pg chain pages        = 0x%08X\n",
7625 		(u32) BCM_PAGE_SIZE, (u32) PG_PAGES);
7626 
7627 	BCE_PRINTF("rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
7628 		(u32) TOTAL_PG_BD_PER_PAGE, (u32) USABLE_PG_BD_PER_PAGE);
7629 
7630 	BCE_PRINTF("total rx_bd    = 0x%08X, max_pg_bd             = 0x%08X\n",
7631 		(u32) TOTAL_PG_BD, (u32) MAX_PG_BD);
7632 
7633 	BCE_PRINTF(
7634 		"----------------------------"
7635 		"   page data    "
7636 		"----------------------------\n");
7637 
7638 	/* Now print out the rx_bd's themselves. */
7639 	for (int i = 0; i < count; i++) {
7640 		pgbd = &sc->pg_bd_chain[PG_PAGE(pg_prod)][PG_IDX(pg_prod)];
7641 		bce_dump_pgbd(sc, pg_prod, pgbd);
7642 		pg_prod = PG_CHAIN_IDX(pg_prod + 1);
7643 	}
7644 
7645 	BCE_PRINTF(
7646 		"----------------------------"
7647 		"----------------"
7648 		"----------------------------\n");
7649 }
7650 
7651 
7652 /****************************************************************************/
7653 /* Prints out the status block from host memory.                            */
7654 /*                                                                          */
7655 /* Returns:                                                                 */
7656 /*   Nothing.                                                               */
7657 /****************************************************************************/
7658 static void
7659 bce_dump_status_block(struct bce_softc *sc)
7660 {
7661 	struct status_block *sblk;
7662 
7663 	sblk = sc->status_block;
7664 
7665    	BCE_PRINTF(
7666 		"----------------------------"
7667 		"  Status Block  "
7668 		"----------------------------\n");
7669 
7670 	BCE_PRINTF("    0x%08X - attn_bits\n",
7671 		sblk->status_attn_bits);
7672 
7673 	BCE_PRINTF("    0x%08X - attn_bits_ack\n",
7674 		sblk->status_attn_bits_ack);
7675 
7676 	BCE_PRINTF("0x%04X(0x%04X) - rx_cons0\n",
7677 		sblk->status_rx_quick_consumer_index0,
7678 		(u16) RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index0));
7679 
7680 	BCE_PRINTF("0x%04X(0x%04X) - tx_cons0\n",
7681 		sblk->status_tx_quick_consumer_index0,
7682 		(u16) TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index0));
7683 
7684 	BCE_PRINTF("        0x%04X - status_idx\n", sblk->status_idx);
7685 
7686 	/* Theses indices are not used for normal L2 drivers. */
7687 	if (sblk->status_rx_quick_consumer_index1)
7688 		BCE_PRINTF("0x%04X(0x%04X) - rx_cons1\n",
7689 			sblk->status_rx_quick_consumer_index1,
7690 			(u16) RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index1));
7691 
7692 	if (sblk->status_tx_quick_consumer_index1)
7693 		BCE_PRINTF("0x%04X(0x%04X) - tx_cons1\n",
7694 			sblk->status_tx_quick_consumer_index1,
7695 			(u16) TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index1));
7696 
7697 	if (sblk->status_rx_quick_consumer_index2)
7698 		BCE_PRINTF("0x%04X(0x%04X)- rx_cons2\n",
7699 			sblk->status_rx_quick_consumer_index2,
7700 			(u16) RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index2));
7701 
7702 	if (sblk->status_tx_quick_consumer_index2)
7703 		BCE_PRINTF("0x%04X(0x%04X) - tx_cons2\n",
7704 			sblk->status_tx_quick_consumer_index2,
7705 			(u16) TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index2));
7706 
7707 	if (sblk->status_rx_quick_consumer_index3)
7708 		BCE_PRINTF("0x%04X(0x%04X) - rx_cons3\n",
7709 			sblk->status_rx_quick_consumer_index3,
7710 			(u16) RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index3));
7711 
7712 	if (sblk->status_tx_quick_consumer_index3)
7713 		BCE_PRINTF("0x%04X(0x%04X) - tx_cons3\n",
7714 			sblk->status_tx_quick_consumer_index3,
7715 			(u16) TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index3));
7716 
7717 	if (sblk->status_rx_quick_consumer_index4 ||
7718 		sblk->status_rx_quick_consumer_index5)
7719 		BCE_PRINTF("rx_cons4  = 0x%08X, rx_cons5      = 0x%08X\n",
7720 			sblk->status_rx_quick_consumer_index4,
7721 			sblk->status_rx_quick_consumer_index5);
7722 
7723 	if (sblk->status_rx_quick_consumer_index6 ||
7724 		sblk->status_rx_quick_consumer_index7)
7725 		BCE_PRINTF("rx_cons6  = 0x%08X, rx_cons7      = 0x%08X\n",
7726 			sblk->status_rx_quick_consumer_index6,
7727 			sblk->status_rx_quick_consumer_index7);
7728 
7729 	if (sblk->status_rx_quick_consumer_index8 ||
7730 		sblk->status_rx_quick_consumer_index9)
7731 		BCE_PRINTF("rx_cons8  = 0x%08X, rx_cons9      = 0x%08X\n",
7732 			sblk->status_rx_quick_consumer_index8,
7733 			sblk->status_rx_quick_consumer_index9);
7734 
7735 	if (sblk->status_rx_quick_consumer_index10 ||
7736 		sblk->status_rx_quick_consumer_index11)
7737 		BCE_PRINTF("rx_cons10 = 0x%08X, rx_cons11     = 0x%08X\n",
7738 			sblk->status_rx_quick_consumer_index10,
7739 			sblk->status_rx_quick_consumer_index11);
7740 
7741 	if (sblk->status_rx_quick_consumer_index12 ||
7742 		sblk->status_rx_quick_consumer_index13)
7743 		BCE_PRINTF("rx_cons12 = 0x%08X, rx_cons13     = 0x%08X\n",
7744 			sblk->status_rx_quick_consumer_index12,
7745 			sblk->status_rx_quick_consumer_index13);
7746 
7747 	if (sblk->status_rx_quick_consumer_index14 ||
7748 		sblk->status_rx_quick_consumer_index15)
7749 		BCE_PRINTF("rx_cons14 = 0x%08X, rx_cons15     = 0x%08X\n",
7750 			sblk->status_rx_quick_consumer_index14,
7751 			sblk->status_rx_quick_consumer_index15);
7752 
7753 	if (sblk->status_completion_producer_index ||
7754 		sblk->status_cmd_consumer_index)
7755 		BCE_PRINTF("com_prod  = 0x%08X, cmd_cons      = 0x%08X\n",
7756 			sblk->status_completion_producer_index,
7757 			sblk->status_cmd_consumer_index);
7758 
7759 	BCE_PRINTF(
7760 		"----------------------------"
7761 		"----------------"
7762 		"----------------------------\n");
7763 }
7764 
7765 
7766 /****************************************************************************/
7767 /* Prints out the statistics block from host memory.                        */
7768 /*                                                                          */
7769 /* Returns:                                                                 */
7770 /*   Nothing.                                                               */
7771 /****************************************************************************/
7772 static void
7773 bce_dump_stats_block(struct bce_softc *sc)
7774 {
7775 	struct statistics_block *sblk;
7776 
7777 	sblk = sc->stats_block;
7778 
7779 	BCE_PRINTF(
7780 		"---------------"
7781 		" Stats Block  (All Stats Not Shown Are 0) "
7782 		"---------------\n");
7783 
7784 	if (sblk->stat_IfHCInOctets_hi
7785 		|| sblk->stat_IfHCInOctets_lo)
7786 		BCE_PRINTF("0x%08X:%08X : "
7787 			"IfHcInOctets\n",
7788 			sblk->stat_IfHCInOctets_hi,
7789 			sblk->stat_IfHCInOctets_lo);
7790 
7791 	if (sblk->stat_IfHCInBadOctets_hi
7792 		|| sblk->stat_IfHCInBadOctets_lo)
7793 		BCE_PRINTF("0x%08X:%08X : "
7794 			"IfHcInBadOctets\n",
7795 			sblk->stat_IfHCInBadOctets_hi,
7796 			sblk->stat_IfHCInBadOctets_lo);
7797 
7798 	if (sblk->stat_IfHCOutOctets_hi
7799 		|| sblk->stat_IfHCOutOctets_lo)
7800 		BCE_PRINTF("0x%08X:%08X : "
7801 			"IfHcOutOctets\n",
7802 			sblk->stat_IfHCOutOctets_hi,
7803 			sblk->stat_IfHCOutOctets_lo);
7804 
7805 	if (sblk->stat_IfHCOutBadOctets_hi
7806 		|| sblk->stat_IfHCOutBadOctets_lo)
7807 		BCE_PRINTF("0x%08X:%08X : "
7808 			"IfHcOutBadOctets\n",
7809 			sblk->stat_IfHCOutBadOctets_hi,
7810 			sblk->stat_IfHCOutBadOctets_lo);
7811 
7812 	if (sblk->stat_IfHCInUcastPkts_hi
7813 		|| sblk->stat_IfHCInUcastPkts_lo)
7814 		BCE_PRINTF("0x%08X:%08X : "
7815 			"IfHcInUcastPkts\n",
7816 			sblk->stat_IfHCInUcastPkts_hi,
7817 			sblk->stat_IfHCInUcastPkts_lo);
7818 
7819 	if (sblk->stat_IfHCInBroadcastPkts_hi
7820 		|| sblk->stat_IfHCInBroadcastPkts_lo)
7821 		BCE_PRINTF("0x%08X:%08X : "
7822 			"IfHcInBroadcastPkts\n",
7823 			sblk->stat_IfHCInBroadcastPkts_hi,
7824 			sblk->stat_IfHCInBroadcastPkts_lo);
7825 
7826 	if (sblk->stat_IfHCInMulticastPkts_hi
7827 		|| sblk->stat_IfHCInMulticastPkts_lo)
7828 		BCE_PRINTF("0x%08X:%08X : "
7829 			"IfHcInMulticastPkts\n",
7830 			sblk->stat_IfHCInMulticastPkts_hi,
7831 			sblk->stat_IfHCInMulticastPkts_lo);
7832 
7833 	if (sblk->stat_IfHCOutUcastPkts_hi
7834 		|| sblk->stat_IfHCOutUcastPkts_lo)
7835 		BCE_PRINTF("0x%08X:%08X : "
7836 			"IfHcOutUcastPkts\n",
7837 			sblk->stat_IfHCOutUcastPkts_hi,
7838 			sblk->stat_IfHCOutUcastPkts_lo);
7839 
7840 	if (sblk->stat_IfHCOutBroadcastPkts_hi
7841 		|| sblk->stat_IfHCOutBroadcastPkts_lo)
7842 		BCE_PRINTF("0x%08X:%08X : "
7843 			"IfHcOutBroadcastPkts\n",
7844 			sblk->stat_IfHCOutBroadcastPkts_hi,
7845 			sblk->stat_IfHCOutBroadcastPkts_lo);
7846 
7847 	if (sblk->stat_IfHCOutMulticastPkts_hi
7848 		|| sblk->stat_IfHCOutMulticastPkts_lo)
7849 		BCE_PRINTF("0x%08X:%08X : "
7850 			"IfHcOutMulticastPkts\n",
7851 			sblk->stat_IfHCOutMulticastPkts_hi,
7852 			sblk->stat_IfHCOutMulticastPkts_lo);
7853 
7854 	if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors)
7855 		BCE_PRINTF("         0x%08X : "
7856 			"emac_tx_stat_dot3statsinternalmactransmiterrors\n",
7857 			sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors);
7858 
7859 	if (sblk->stat_Dot3StatsCarrierSenseErrors)
7860 		BCE_PRINTF("         0x%08X : Dot3StatsCarrierSenseErrors\n",
7861 			sblk->stat_Dot3StatsCarrierSenseErrors);
7862 
7863 	if (sblk->stat_Dot3StatsFCSErrors)
7864 		BCE_PRINTF("         0x%08X : Dot3StatsFCSErrors\n",
7865 			sblk->stat_Dot3StatsFCSErrors);
7866 
7867 	if (sblk->stat_Dot3StatsAlignmentErrors)
7868 		BCE_PRINTF("         0x%08X : Dot3StatsAlignmentErrors\n",
7869 			sblk->stat_Dot3StatsAlignmentErrors);
7870 
7871 	if (sblk->stat_Dot3StatsSingleCollisionFrames)
7872 		BCE_PRINTF("         0x%08X : Dot3StatsSingleCollisionFrames\n",
7873 			sblk->stat_Dot3StatsSingleCollisionFrames);
7874 
7875 	if (sblk->stat_Dot3StatsMultipleCollisionFrames)
7876 		BCE_PRINTF("         0x%08X : Dot3StatsMultipleCollisionFrames\n",
7877 			sblk->stat_Dot3StatsMultipleCollisionFrames);
7878 
7879 	if (sblk->stat_Dot3StatsDeferredTransmissions)
7880 		BCE_PRINTF("         0x%08X : Dot3StatsDeferredTransmissions\n",
7881 			sblk->stat_Dot3StatsDeferredTransmissions);
7882 
7883 	if (sblk->stat_Dot3StatsExcessiveCollisions)
7884 		BCE_PRINTF("         0x%08X : Dot3StatsExcessiveCollisions\n",
7885 			sblk->stat_Dot3StatsExcessiveCollisions);
7886 
7887 	if (sblk->stat_Dot3StatsLateCollisions)
7888 		BCE_PRINTF("         0x%08X : Dot3StatsLateCollisions\n",
7889 			sblk->stat_Dot3StatsLateCollisions);
7890 
7891 	if (sblk->stat_EtherStatsCollisions)
7892 		BCE_PRINTF("         0x%08X : EtherStatsCollisions\n",
7893 			sblk->stat_EtherStatsCollisions);
7894 
7895 	if (sblk->stat_EtherStatsFragments)
7896 		BCE_PRINTF("         0x%08X : EtherStatsFragments\n",
7897 			sblk->stat_EtherStatsFragments);
7898 
7899 	if (sblk->stat_EtherStatsJabbers)
7900 		BCE_PRINTF("         0x%08X : EtherStatsJabbers\n",
7901 			sblk->stat_EtherStatsJabbers);
7902 
7903 	if (sblk->stat_EtherStatsUndersizePkts)
7904 		BCE_PRINTF("         0x%08X : EtherStatsUndersizePkts\n",
7905 			sblk->stat_EtherStatsUndersizePkts);
7906 
7907 	if (sblk->stat_EtherStatsOverrsizePkts)
7908 		BCE_PRINTF("         0x%08X : EtherStatsOverrsizePkts\n",
7909 			sblk->stat_EtherStatsOverrsizePkts);
7910 
7911 	if (sblk->stat_EtherStatsPktsRx64Octets)
7912 		BCE_PRINTF("         0x%08X : EtherStatsPktsRx64Octets\n",
7913 			sblk->stat_EtherStatsPktsRx64Octets);
7914 
7915 	if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets)
7916 		BCE_PRINTF("         0x%08X : EtherStatsPktsRx65Octetsto127Octets\n",
7917 			sblk->stat_EtherStatsPktsRx65Octetsto127Octets);
7918 
7919 	if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets)
7920 		BCE_PRINTF("         0x%08X : EtherStatsPktsRx128Octetsto255Octets\n",
7921 			sblk->stat_EtherStatsPktsRx128Octetsto255Octets);
7922 
7923 	if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets)
7924 		BCE_PRINTF("         0x%08X : EtherStatsPktsRx256Octetsto511Octets\n",
7925 			sblk->stat_EtherStatsPktsRx256Octetsto511Octets);
7926 
7927 	if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets)
7928 		BCE_PRINTF("         0x%08X : EtherStatsPktsRx512Octetsto1023Octets\n",
7929 			sblk->stat_EtherStatsPktsRx512Octetsto1023Octets);
7930 
7931 	if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets)
7932 		BCE_PRINTF("         0x%08X : EtherStatsPktsRx1024Octetsto1522Octets\n",
7933 			sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets);
7934 
7935 	if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets)
7936 		BCE_PRINTF("         0x%08X : EtherStatsPktsRx1523Octetsto9022Octets\n",
7937 			sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets);
7938 
7939 	if (sblk->stat_EtherStatsPktsTx64Octets)
7940 		BCE_PRINTF("         0x%08X : EtherStatsPktsTx64Octets\n",
7941 			sblk->stat_EtherStatsPktsTx64Octets);
7942 
7943 	if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets)
7944 		BCE_PRINTF("         0x%08X : EtherStatsPktsTx65Octetsto127Octets\n",
7945 			sblk->stat_EtherStatsPktsTx65Octetsto127Octets);
7946 
7947 	if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets)
7948 		BCE_PRINTF("         0x%08X : EtherStatsPktsTx128Octetsto255Octets\n",
7949 			sblk->stat_EtherStatsPktsTx128Octetsto255Octets);
7950 
7951 	if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets)
7952 		BCE_PRINTF("         0x%08X : EtherStatsPktsTx256Octetsto511Octets\n",
7953 			sblk->stat_EtherStatsPktsTx256Octetsto511Octets);
7954 
7955 	if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets)
7956 		BCE_PRINTF("         0x%08X : EtherStatsPktsTx512Octetsto1023Octets\n",
7957 			sblk->stat_EtherStatsPktsTx512Octetsto1023Octets);
7958 
7959 	if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets)
7960 		BCE_PRINTF("         0x%08X : EtherStatsPktsTx1024Octetsto1522Octets\n",
7961 			sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets);
7962 
7963 	if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets)
7964 		BCE_PRINTF("         0x%08X : EtherStatsPktsTx1523Octetsto9022Octets\n",
7965 			sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets);
7966 
7967 	if (sblk->stat_XonPauseFramesReceived)
7968 		BCE_PRINTF("         0x%08X : XonPauseFramesReceived\n",
7969 			sblk->stat_XonPauseFramesReceived);
7970 
7971 	if (sblk->stat_XoffPauseFramesReceived)
7972 	   BCE_PRINTF("          0x%08X : XoffPauseFramesReceived\n",
7973 			sblk->stat_XoffPauseFramesReceived);
7974 
7975 	if (sblk->stat_OutXonSent)
7976 		BCE_PRINTF("         0x%08X : OutXonSent\n",
7977 			sblk->stat_OutXonSent);
7978 
7979 	if (sblk->stat_OutXoffSent)
7980 		BCE_PRINTF("         0x%08X : OutXoffSent\n",
7981 			sblk->stat_OutXoffSent);
7982 
7983 	if (sblk->stat_FlowControlDone)
7984 		BCE_PRINTF("         0x%08X : FlowControlDone\n",
7985 			sblk->stat_FlowControlDone);
7986 
7987 	if (sblk->stat_MacControlFramesReceived)
7988 		BCE_PRINTF("         0x%08X : MacControlFramesReceived\n",
7989 			sblk->stat_MacControlFramesReceived);
7990 
7991 	if (sblk->stat_XoffStateEntered)
7992 		BCE_PRINTF("         0x%08X : XoffStateEntered\n",
7993 			sblk->stat_XoffStateEntered);
7994 
7995 	if (sblk->stat_IfInFramesL2FilterDiscards)
7996 		BCE_PRINTF("         0x%08X : IfInFramesL2FilterDiscards\n",
7997 			sblk->stat_IfInFramesL2FilterDiscards);
7998 
7999 	if (sblk->stat_IfInRuleCheckerDiscards)
8000 		BCE_PRINTF("         0x%08X : IfInRuleCheckerDiscards\n",
8001 			sblk->stat_IfInRuleCheckerDiscards);
8002 
8003 	if (sblk->stat_IfInFTQDiscards)
8004 		BCE_PRINTF("         0x%08X : IfInFTQDiscards\n",
8005 			sblk->stat_IfInFTQDiscards);
8006 
8007 	if (sblk->stat_IfInMBUFDiscards)
8008 		BCE_PRINTF("         0x%08X : IfInMBUFDiscards\n",
8009 			sblk->stat_IfInMBUFDiscards);
8010 
8011 	if (sblk->stat_IfInRuleCheckerP4Hit)
8012 		BCE_PRINTF("         0x%08X : IfInRuleCheckerP4Hit\n",
8013 			sblk->stat_IfInRuleCheckerP4Hit);
8014 
8015 	if (sblk->stat_CatchupInRuleCheckerDiscards)
8016 		BCE_PRINTF("         0x%08X : CatchupInRuleCheckerDiscards\n",
8017 			sblk->stat_CatchupInRuleCheckerDiscards);
8018 
8019 	if (sblk->stat_CatchupInFTQDiscards)
8020 		BCE_PRINTF("         0x%08X : CatchupInFTQDiscards\n",
8021 			sblk->stat_CatchupInFTQDiscards);
8022 
8023 	if (sblk->stat_CatchupInMBUFDiscards)
8024 		BCE_PRINTF("         0x%08X : CatchupInMBUFDiscards\n",
8025 			sblk->stat_CatchupInMBUFDiscards);
8026 
8027 	if (sblk->stat_CatchupInRuleCheckerP4Hit)
8028 		BCE_PRINTF("         0x%08X : CatchupInRuleCheckerP4Hit\n",
8029 			sblk->stat_CatchupInRuleCheckerP4Hit);
8030 
8031 	BCE_PRINTF(
8032 		"----------------------------"
8033 		"----------------"
8034 		"----------------------------\n");
8035 }
8036 
8037 
8038 /****************************************************************************/
8039 /* Prints out a summary of the driver state.                                */
8040 /*                                                                          */
8041 /* Returns:                                                                 */
8042 /*   Nothing.                                                               */
8043 /****************************************************************************/
8044 static void
8045 bce_dump_driver_state(struct bce_softc *sc)
8046 {
8047 	u32 val_hi, val_lo;
8048 
8049 	BCE_PRINTF(
8050 		"-----------------------------"
8051 		" Driver State "
8052 		"-----------------------------\n");
8053 
8054 	val_hi = BCE_ADDR_HI(sc);
8055 	val_lo = BCE_ADDR_LO(sc);
8056 	BCE_PRINTF("0x%08X:%08X - (sc) driver softc structure virtual address\n",
8057 		val_hi, val_lo);
8058 
8059 	val_hi = BCE_ADDR_HI(sc->bce_vhandle);
8060 	val_lo = BCE_ADDR_LO(sc->bce_vhandle);
8061 	BCE_PRINTF("0x%08X:%08X - (sc->bce_vhandle) PCI BAR virtual address\n",
8062 		val_hi, val_lo);
8063 
8064 	val_hi = BCE_ADDR_HI(sc->status_block);
8065 	val_lo = BCE_ADDR_LO(sc->status_block);
8066 	BCE_PRINTF("0x%08X:%08X - (sc->status_block) status block virtual address\n",
8067 		val_hi, val_lo);
8068 
8069 	val_hi = BCE_ADDR_HI(sc->stats_block);
8070 	val_lo = BCE_ADDR_LO(sc->stats_block);
8071 	BCE_PRINTF("0x%08X:%08X - (sc->stats_block) statistics block virtual address\n",
8072 		val_hi, val_lo);
8073 
8074 	val_hi = BCE_ADDR_HI(sc->tx_bd_chain);
8075 	val_lo = BCE_ADDR_LO(sc->tx_bd_chain);
8076 	BCE_PRINTF(
8077 		"0x%08X:%08X - (sc->tx_bd_chain) tx_bd chain virtual adddress\n",
8078 		val_hi, val_lo);
8079 
8080 	val_hi = BCE_ADDR_HI(sc->rx_bd_chain);
8081 	val_lo = BCE_ADDR_LO(sc->rx_bd_chain);
8082 	BCE_PRINTF(
8083 		"0x%08X:%08X - (sc->rx_bd_chain) rx_bd chain virtual address\n",
8084 		val_hi, val_lo);
8085 
8086 	val_hi = BCE_ADDR_HI(sc->pg_bd_chain);
8087 	val_lo = BCE_ADDR_LO(sc->pg_bd_chain);
8088 	BCE_PRINTF(
8089 		"0x%08X:%08X - (sc->pg_bd_chain) page chain virtual address\n",
8090 		val_hi, val_lo);
8091 
8092 	val_hi = BCE_ADDR_HI(sc->tx_mbuf_ptr);
8093 	val_lo = BCE_ADDR_LO(sc->tx_mbuf_ptr);
8094 	BCE_PRINTF(
8095 		"0x%08X:%08X - (sc->tx_mbuf_ptr) tx mbuf chain virtual address\n",
8096 		val_hi, val_lo);
8097 
8098 	val_hi = BCE_ADDR_HI(sc->rx_mbuf_ptr);
8099 	val_lo = BCE_ADDR_LO(sc->rx_mbuf_ptr);
8100 	BCE_PRINTF(
8101 		"0x%08X:%08X - (sc->rx_mbuf_ptr) rx mbuf chain virtual address\n",
8102 		val_hi, val_lo);
8103 
8104 	val_hi = BCE_ADDR_HI(sc->pg_mbuf_ptr);
8105 	val_lo = BCE_ADDR_LO(sc->pg_mbuf_ptr);
8106 	BCE_PRINTF(
8107 		"0x%08X:%08X - (sc->pg_mbuf_ptr) page mbuf chain virtual address\n",
8108 		val_hi, val_lo);
8109 
8110 	BCE_PRINTF("         0x%08X - (sc->interrupts_generated) h/w intrs\n",
8111 		sc->interrupts_generated);
8112 
8113 	BCE_PRINTF("         0x%08X - (sc->rx_interrupts) rx interrupts handled\n",
8114 		sc->rx_interrupts);
8115 
8116 	BCE_PRINTF("         0x%08X - (sc->tx_interrupts) tx interrupts handled\n",
8117 		sc->tx_interrupts);
8118 
8119 	BCE_PRINTF("         0x%08X - (sc->last_status_idx) status block index\n",
8120 		sc->last_status_idx);
8121 
8122 	BCE_PRINTF("     0x%04X(0x%04X) - (sc->tx_prod) tx producer index\n",
8123 		sc->tx_prod, (u16) TX_CHAIN_IDX(sc->tx_prod));
8124 
8125 	BCE_PRINTF("     0x%04X(0x%04X) - (sc->tx_cons) tx consumer index\n",
8126 		sc->tx_cons, (u16) TX_CHAIN_IDX(sc->tx_cons));
8127 
8128 	BCE_PRINTF("         0x%08X - (sc->tx_prod_bseq) tx producer bseq index\n",
8129 		sc->tx_prod_bseq);
8130 
8131 	BCE_PRINTF("         0x%08X - (sc->debug_tx_mbuf_alloc) tx mbufs allocated\n",
8132 		sc->debug_tx_mbuf_alloc);
8133 
8134 	BCE_PRINTF("         0x%08X - (sc->used_tx_bd) used tx_bd's\n",
8135 		sc->used_tx_bd);
8136 
8137 	BCE_PRINTF("0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n",
8138 		sc->tx_hi_watermark, sc->max_tx_bd);
8139 
8140 	BCE_PRINTF("     0x%04X(0x%04X) - (sc->rx_prod) rx producer index\n",
8141 		sc->rx_prod, (u16) RX_CHAIN_IDX(sc->rx_prod));
8142 
8143 	BCE_PRINTF("     0x%04X(0x%04X) - (sc->rx_cons) rx consumer index\n",
8144 		sc->rx_cons, (u16) RX_CHAIN_IDX(sc->rx_cons));
8145 
8146 	BCE_PRINTF("         0x%08X - (sc->rx_prod_bseq) rx producer bseq index\n",
8147 		sc->rx_prod_bseq);
8148 
8149 	BCE_PRINTF("         0x%08X - (sc->debug_rx_mbuf_alloc) rx mbufs allocated\n",
8150 		sc->debug_rx_mbuf_alloc);
8151 
8152 	BCE_PRINTF("         0x%08X - (sc->free_rx_bd) free rx_bd's\n",
8153 		sc->free_rx_bd);
8154 
8155 	BCE_PRINTF("     0x%04X(0x%04X) - (sc->pg_prod) page producer index\n",
8156 		sc->pg_prod, (u16) PG_CHAIN_IDX(sc->pg_prod));
8157 
8158 	BCE_PRINTF("     0x%04X(0x%04X) - (sc->pg_cons) page consumer index\n",
8159 		sc->pg_cons, (u16) PG_CHAIN_IDX(sc->pg_cons));
8160 
8161 	BCE_PRINTF("         0x%08X - (sc->debug_pg_mbuf_alloc) page mbufs allocated\n",
8162 		sc->debug_pg_mbuf_alloc);
8163 
8164 	BCE_PRINTF("         0x%08X - (sc->free_pg_bd) free page rx_bd's\n",
8165 		sc->free_pg_bd);
8166 
8167 	BCE_PRINTF("0x%08X/%08X - (sc->pg_low_watermark) page low watermark\n",
8168 		sc->pg_low_watermark, sc->max_pg_bd);
8169 
8170 	BCE_PRINTF("         0x%08X - (sc->mbuf_alloc_failed) "
8171 		"mbuf alloc failures\n",
8172 		sc->mbuf_alloc_failed);
8173 
8174 	BCE_PRINTF("         0x%08X - (sc->debug_mbuf_sim_alloc_failed) "
8175 		"simulated mbuf alloc failures\n",
8176 		sc->debug_mbuf_sim_alloc_failed);
8177 
8178 	BCE_PRINTF(
8179 		"----------------------------"
8180 		"----------------"
8181 		"----------------------------\n");
8182 }
8183 
8184 
8185 /****************************************************************************/
8186 /* Prints out the hardware state through a summary of important register,   */
8187 /* followed by a complete register dump.                                    */
8188 /*                                                                          */
8189 /* Returns:                                                                 */
8190 /*   Nothing.                                                               */
8191 /****************************************************************************/
8192 static void
8193 bce_dump_hw_state(struct bce_softc *sc)
8194 {
8195 	u32 val;
8196 
8197 	BCE_PRINTF(
8198 		"----------------------------"
8199 		" Hardware State "
8200 		"----------------------------\n");
8201 
8202 	BCE_PRINTF("0x%08X - bootcode version\n", sc->bce_fw_ver);
8203 
8204 	val = REG_RD(sc, BCE_MISC_ENABLE_STATUS_BITS);
8205 	BCE_PRINTF("0x%08X - (0x%06X) misc_enable_status_bits\n",
8206 		val, BCE_MISC_ENABLE_STATUS_BITS);
8207 
8208 	val = REG_RD(sc, BCE_DMA_STATUS);
8209 	BCE_PRINTF("0x%08X - (0x%06X) dma_status\n", val, BCE_DMA_STATUS);
8210 
8211 	val = REG_RD(sc, BCE_CTX_STATUS);
8212 	BCE_PRINTF("0x%08X - (0x%06X) ctx_status\n", val, BCE_CTX_STATUS);
8213 
8214 	val = REG_RD(sc, BCE_EMAC_STATUS);
8215 	BCE_PRINTF("0x%08X - (0x%06X) emac_status\n", val, BCE_EMAC_STATUS);
8216 
8217 	val = REG_RD(sc, BCE_RPM_STATUS);
8218 	BCE_PRINTF("0x%08X - (0x%06X) rpm_status\n", val, BCE_RPM_STATUS);
8219 
8220 	val = REG_RD(sc, 0x2004);
8221 	BCE_PRINTF("0x%08X - (0x%06X) rlup_status\n", val, 0x2004);
8222 
8223 	val = REG_RD(sc, BCE_RV2P_STATUS);
8224 	BCE_PRINTF("0x%08X - (0x%06X) rv2p_status\n", val, BCE_RV2P_STATUS);
8225 
8226 	val = REG_RD(sc, 0x2c04);
8227 	BCE_PRINTF("0x%08X - (0x%06X) rdma_status\n", val, 0x2c04);
8228 
8229 	val = REG_RD(sc, BCE_TBDR_STATUS);
8230 	BCE_PRINTF("0x%08X - (0x%06X) tbdr_status\n", val, BCE_TBDR_STATUS);
8231 
8232 	val = REG_RD(sc, BCE_TDMA_STATUS);
8233 	BCE_PRINTF("0x%08X - (0x%06X) tdma_status\n", val, BCE_TDMA_STATUS);
8234 
8235 	val = REG_RD(sc, BCE_HC_STATUS);
8236 	BCE_PRINTF("0x%08X - (0x%06X) hc_status\n", val, BCE_HC_STATUS);
8237 
8238 	val = REG_RD_IND(sc, BCE_TXP_CPU_STATE);
8239 	BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_state\n", val, BCE_TXP_CPU_STATE);
8240 
8241 	val = REG_RD_IND(sc, BCE_TPAT_CPU_STATE);
8242 	BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_state\n", val, BCE_TPAT_CPU_STATE);
8243 
8244 	val = REG_RD_IND(sc, BCE_RXP_CPU_STATE);
8245 	BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_state\n", val, BCE_RXP_CPU_STATE);
8246 
8247 	val = REG_RD_IND(sc, BCE_COM_CPU_STATE);
8248 	BCE_PRINTF("0x%08X - (0x%06X) com_cpu_state\n", val, BCE_COM_CPU_STATE);
8249 
8250 	val = REG_RD_IND(sc, BCE_MCP_CPU_STATE);
8251 	BCE_PRINTF("0x%08X - (0x%06X) mcp_cpu_state\n", val, BCE_MCP_CPU_STATE);
8252 
8253 	val = REG_RD_IND(sc, BCE_CP_CPU_STATE);
8254 	BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_state\n", val, BCE_CP_CPU_STATE);
8255 
8256 	BCE_PRINTF(
8257 		"----------------------------"
8258 		"----------------"
8259 		"----------------------------\n");
8260 
8261 	BCE_PRINTF(
8262 		"----------------------------"
8263 		" Register  Dump "
8264 		"----------------------------\n");
8265 
8266 	for (int i = 0x400; i < 0x8000; i += 0x10) {
8267 		BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
8268 			i, REG_RD(sc, i), REG_RD(sc, i + 0x4),
8269 			REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC));
8270 	}
8271 
8272 	BCE_PRINTF(
8273 		"----------------------------"
8274 		"----------------"
8275 		"----------------------------\n");
8276 }
8277 
8278 
8279 /****************************************************************************/
8280 /* Prints out the bootcode state.                                           */
8281 /*                                                                          */
8282 /* Returns:                                                                 */
8283 /*   Nothing.                                                               */
8284 /****************************************************************************/
8285 static void
8286 bce_dump_bc_state(struct bce_softc *sc)
8287 {
8288 	u32 val;
8289 
8290 	BCE_PRINTF(
8291 		"----------------------------"
8292 		" Bootcode State "
8293 		"----------------------------\n");
8294 
8295 	BCE_PRINTF("0x%08X - bootcode version\n", sc->bce_fw_ver);
8296 
8297 	val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_BC_RESET_TYPE);
8298 	BCE_PRINTF("0x%08X - (0x%06X) reset_type\n",
8299 		val, BCE_BC_RESET_TYPE);
8300 
8301 	val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_BC_STATE);
8302 	BCE_PRINTF("0x%08X - (0x%06X) state\n",
8303 		val, BCE_BC_STATE);
8304 
8305 	val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_BC_CONDITION);
8306 	BCE_PRINTF("0x%08X - (0x%06X) condition\n",
8307 		val, BCE_BC_CONDITION);
8308 
8309 	val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_BC_STATE_DEBUG_CMD);
8310 	BCE_PRINTF("0x%08X - (0x%06X) debug_cmd\n",
8311 		val, BCE_BC_STATE_DEBUG_CMD);
8312 
8313 	BCE_PRINTF(
8314 		"----------------------------"
8315 		"----------------"
8316 		"----------------------------\n");
8317 }
8318 
8319 
8320 /****************************************************************************/
8321 /* Prints out the TXP state.                                                */
8322 /*                                                                          */
8323 /* Returns:                                                                 */
8324 /*   Nothing.                                                               */
8325 /****************************************************************************/
8326 static void
8327 bce_dump_txp_state(struct bce_softc *sc)
8328 {
8329 	u32 val1;
8330 
8331 	BCE_PRINTF(
8332 		"----------------------------"
8333 		"   TXP  State   "
8334 		"----------------------------\n");
8335 
8336 	val1 = REG_RD_IND(sc, BCE_TXP_CPU_MODE);
8337 	BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_mode\n", val1, BCE_TXP_CPU_MODE);
8338 
8339 	val1 = REG_RD_IND(sc, BCE_TXP_CPU_STATE);
8340 	BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_state\n", val1, BCE_TXP_CPU_STATE);
8341 
8342 	val1 = REG_RD_IND(sc, BCE_TXP_CPU_EVENT_MASK);
8343 	BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_event_mask\n", val1, BCE_TXP_CPU_EVENT_MASK);
8344 
8345 	BCE_PRINTF(
8346 		"----------------------------"
8347 		" Register  Dump "
8348 		"----------------------------\n");
8349 
8350 	for (int i = BCE_TXP_CPU_MODE; i < 0x68000; i += 0x10) {
8351 		/* Skip the big blank spaces */
8352 		if (i < 0x454000 && i > 0x5ffff)
8353 			BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
8354 				i, REG_RD_IND(sc, i), REG_RD_IND(sc, i + 0x4),
8355 				REG_RD_IND(sc, i + 0x8), REG_RD_IND(sc, i + 0xC));
8356 	}
8357 
8358 	BCE_PRINTF(
8359 		"----------------------------"
8360 		"----------------"
8361 		"----------------------------\n");
8362 }
8363 
8364 
8365 /****************************************************************************/
8366 /* Prints out the RXP state.                                                */
8367 /*                                                                          */
8368 /* Returns:                                                                 */
8369 /*   Nothing.                                                               */
8370 /****************************************************************************/
8371 static void
8372 bce_dump_rxp_state(struct bce_softc *sc)
8373 {
8374 	u32 val1;
8375 
8376 	BCE_PRINTF(
8377 		"----------------------------"
8378 		"   RXP  State   "
8379 		"----------------------------\n");
8380 
8381 	val1 = REG_RD_IND(sc, BCE_RXP_CPU_MODE);
8382 	BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_mode\n", val1, BCE_RXP_CPU_MODE);
8383 
8384 	val1 = REG_RD_IND(sc, BCE_RXP_CPU_STATE);
8385 	BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_state\n", val1, BCE_RXP_CPU_STATE);
8386 
8387 	val1 = REG_RD_IND(sc, BCE_RXP_CPU_EVENT_MASK);
8388 	BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_event_mask\n", val1, BCE_RXP_CPU_EVENT_MASK);
8389 
8390 	BCE_PRINTF(
8391 		"----------------------------"
8392 		" Register  Dump "
8393 		"----------------------------\n");
8394 
8395 	for (int i = BCE_RXP_CPU_MODE; i < 0xe8fff; i += 0x10) {
8396 		/* Skip the big blank sapces */
8397 		if (i < 0xc5400 && i > 0xdffff)
8398 			BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
8399 	 			i, REG_RD_IND(sc, i), REG_RD_IND(sc, i + 0x4),
8400 				REG_RD_IND(sc, i + 0x8), REG_RD_IND(sc, i + 0xC));
8401 	}
8402 
8403 	BCE_PRINTF(
8404 		"----------------------------"
8405 		"----------------"
8406 		"----------------------------\n");
8407 }
8408 
8409 
8410 /****************************************************************************/
8411 /* Prints out the TPAT state.                                               */
8412 /*                                                                          */
8413 /* Returns:                                                                 */
8414 /*   Nothing.                                                               */
8415 /****************************************************************************/
8416 static void
8417 bce_dump_tpat_state(struct bce_softc *sc)
8418 {
8419 	u32 val1;
8420 
8421 	BCE_PRINTF(
8422 		"----------------------------"
8423 		"   TPAT State   "
8424 		"----------------------------\n");
8425 
8426 	val1 = REG_RD_IND(sc, BCE_TPAT_CPU_MODE);
8427 	BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_mode\n", val1, BCE_TPAT_CPU_MODE);
8428 
8429 	val1 = REG_RD_IND(sc, BCE_TPAT_CPU_STATE);
8430 	BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_state\n", val1, BCE_TPAT_CPU_STATE);
8431 
8432 	val1 = REG_RD_IND(sc, BCE_TPAT_CPU_EVENT_MASK);
8433 	BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_event_mask\n", val1, BCE_TPAT_CPU_EVENT_MASK);
8434 
8435 	BCE_PRINTF(
8436 		"----------------------------"
8437 		" Register  Dump "
8438 		"----------------------------\n");
8439 
8440 	for (int i = BCE_TPAT_CPU_MODE; i < 0xa3fff; i += 0x10) {
8441 		/* Skip the big blank spaces */
8442 		if (i < 0x854000 && i > 0x9ffff)
8443 			BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
8444 				i, REG_RD_IND(sc, i), REG_RD_IND(sc, i + 0x4),
8445 				REG_RD_IND(sc, i + 0x8), REG_RD_IND(sc, i + 0xC));
8446 	}
8447 
8448 	BCE_PRINTF(
8449 		"----------------------------"
8450 		"----------------"
8451 		"----------------------------\n");
8452 }
8453 
8454 
8455 /* ToDo: Add CP and COM proccessor state dumps. */
8456 
8457 
8458 /****************************************************************************/
8459 /* Prints out the driver state and then enters the debugger.                */
8460 /*                                                                          */
8461 /* Returns:                                                                 */
8462 /*   Nothing.                                                               */
8463 /****************************************************************************/
8464 static void
8465 bce_breakpoint(struct bce_softc *sc)
8466 {
8467 
8468 	/*
8469 	 * Unreachable code to silence compiler warnings
8470 	 * about unused functions.
8471 	 */
8472 	if (0) {
8473 		bce_freeze_controller(sc);
8474 		bce_unfreeze_controller(sc);
8475    		bce_dump_txbd(sc, 0, NULL);
8476 		bce_dump_rxbd(sc, 0, NULL);
8477 		bce_dump_pgbd(sc, 0, NULL);
8478 		bce_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD);
8479 		bce_dump_rx_mbuf_chain(sc, 0, USABLE_RX_BD);
8480 		bce_dump_pg_mbuf_chain(sc, 0, USABLE_PG_BD);
8481 		bce_dump_l2fhdr(sc, 0, NULL);
8482 		bce_dump_ctx(sc, RX_CID);
8483 		bce_dump_ftqs(sc);
8484 		bce_dump_tx_chain(sc, 0, USABLE_TX_BD);
8485 		bce_dump_rx_chain(sc, 0, USABLE_RX_BD);
8486 		bce_dump_pg_chain(sc, 0, USABLE_PG_BD);
8487 		bce_dump_status_block(sc);
8488 		bce_dump_stats_block(sc);
8489 		bce_dump_driver_state(sc);
8490 		bce_dump_hw_state(sc);
8491 		bce_dump_bc_state(sc);
8492 		bce_dump_txp_state(sc);
8493 		bce_dump_rxp_state(sc);
8494 		bce_dump_tpat_state(sc);
8495 	}
8496 
8497 	bce_dump_status_block(sc);
8498 	bce_dump_driver_state(sc);
8499 
8500 	/* Call the debugger. */
8501 	breakpoint();
8502 
8503 	return;
8504 }
8505 #endif
8506 
8507