xref: /freebsd/sys/dev/bce/if_bce.c (revision 2227a3e9e1a0bcba8481a8067ee8c4b9a96fdda3)
1 /*-
2  * Copyright (c) 2006-2008 Broadcom Corporation
3  *	David Christensen <davidch@broadcom.com>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the name of Broadcom Corporation nor the name of its contributors
15  *    may be used to endorse or promote products derived from this software
16  *    without specific prior written consent.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
22  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 /*
35  * The following controllers are supported by this driver:
36  *   BCM5706C A2, A3
37  *   BCM5706S A2, A3
38  *   BCM5708C B1, B2
39  *   BCM5708S B1, B2
40  *
41  * The following controllers are not supported by this driver:
42  *   BCM5706C A0, A1 (pre-production)
43  *   BCM5706S A0, A1 (pre-production)
44  *   BCM5708C A0, B0 (pre-production)
45  *   BCM5708S A0, B0 (pre-production)
46  */
47 
48 #include "opt_bce.h"
49 
50 #include <dev/bce/if_bcereg.h>
51 #include <dev/bce/if_bcefw.h>
52 
53 /****************************************************************************/
54 /* BCE Debug Options                                                        */
55 /****************************************************************************/
56 #ifdef BCE_DEBUG
57 	u32 bce_debug = BCE_WARN;
58 
59 	/*          0 = Never              */
60 	/*          1 = 1 in 2,147,483,648 */
61 	/*        256 = 1 in     8,388,608 */
62 	/*       2048 = 1 in     1,048,576 */
63 	/*      65536 = 1 in        32,768 */
64 	/*    1048576 = 1 in         2,048 */
65 	/*  268435456 =	1 in             8 */
66 	/*  536870912 = 1 in             4 */
67 	/* 1073741824 = 1 in             2 */
68 
69 	/* Controls how often the l2_fhdr frame error check will fail. */
70 	int bce_debug_l2fhdr_status_check = 0;
71 
72 	/* Controls how often the unexpected attention check will fail. */
73 	int bce_debug_unexpected_attention = 0;
74 
75 	/* Controls how often to simulate an mbuf allocation failure. */
76 	int bce_debug_mbuf_allocation_failure = 0;
77 
78 	/* Controls how often to simulate a DMA mapping failure. */
79 	int bce_debug_dma_map_addr_failure = 0;
80 
81 	/* Controls how often to simulate a bootcode failure. */
82 	int bce_debug_bootcode_running_failure = 0;
83 #endif
84 
85 
86 /****************************************************************************/
87 /* PCI Device ID Table                                                      */
88 /*                                                                          */
89 /* Used by bce_probe() to identify the devices supported by this driver.    */
90 /****************************************************************************/
91 #define BCE_DEVDESC_MAX		64
92 
93 static struct bce_type bce_devs[] = {
94 	/* BCM5706C Controllers and OEM boards. */
95 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3101,
96 		"HP NC370T Multifunction Gigabit Server Adapter" },
97 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3106,
98 		"HP NC370i Multifunction Gigabit Server Adapter" },
99 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  PCI_ANY_ID,  PCI_ANY_ID,
100 		"Broadcom NetXtreme II BCM5706 1000Base-T" },
101 
102 	/* BCM5706S controllers and OEM boards. */
103 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102,
104 		"HP NC370F Multifunction Gigabit Server Adapter" },
105 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID,  PCI_ANY_ID,
106 		"Broadcom NetXtreme II BCM5706 1000Base-SX" },
107 
108 	/* BCM5708C controllers and OEM boards. */
109 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  PCI_ANY_ID,  PCI_ANY_ID,
110 		"Broadcom NetXtreme II BCM5708 1000Base-T" },
111 
112 	/* BCM5708S controllers and OEM boards. */
113 	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708S,  PCI_ANY_ID,  PCI_ANY_ID,
114 		"Broadcom NetXtreme II BCM5708 1000Base-SX" },
115 	{ 0, 0, 0, 0, NULL }
116 };
117 
118 
119 /****************************************************************************/
120 /* Supported Flash NVRAM device data.                                       */
121 /****************************************************************************/
122 static struct flash_spec flash_table[] =
123 {
124 	/* Slow EEPROM */
125 	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
126 	 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
127 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
128 	 "EEPROM - slow"},
129 	/* Expansion entry 0001 */
130 	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
131 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
132 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
133 	 "Entry 0001"},
134 	/* Saifun SA25F010 (non-buffered flash) */
135 	/* strap, cfg1, & write1 need updates */
136 	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
137 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
138 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
139 	 "Non-buffered flash (128kB)"},
140 	/* Saifun SA25F020 (non-buffered flash) */
141 	/* strap, cfg1, & write1 need updates */
142 	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
143 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
144 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
145 	 "Non-buffered flash (256kB)"},
146 	/* Expansion entry 0100 */
147 	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
148 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
149 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
150 	 "Entry 0100"},
151 	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
152 	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
153 	 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
154 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
155 	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
156 	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
157 	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
158 	 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
159 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
160 	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
161 	/* Saifun SA25F005 (non-buffered flash) */
162 	/* strap, cfg1, & write1 need updates */
163 	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
164 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
165 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
166 	 "Non-buffered flash (64kB)"},
167 	/* Fast EEPROM */
168 	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
169 	 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
170 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
171 	 "EEPROM - fast"},
172 	/* Expansion entry 1001 */
173 	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
174 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
175 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
176 	 "Entry 1001"},
177 	/* Expansion entry 1010 */
178 	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
179 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
180 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
181 	 "Entry 1010"},
182 	/* ATMEL AT45DB011B (buffered flash) */
183 	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
184 	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
185 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
186 	 "Buffered flash (128kB)"},
187 	/* Expansion entry 1100 */
188 	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
189 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
190 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
191 	 "Entry 1100"},
192 	/* Expansion entry 1101 */
193 	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
194 	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
196 	 "Entry 1101"},
197 	/* Ateml Expansion entry 1110 */
198 	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
199 	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
200 	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
201 	 "Entry 1110 (Atmel)"},
202 	/* ATMEL AT45DB021B (buffered flash) */
203 	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
204 	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
205 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
206 	 "Buffered flash (256kB)"},
207 };
208 
209 
210 /****************************************************************************/
211 /* FreeBSD device entry points.                                             */
212 /****************************************************************************/
213 static int  bce_probe				(device_t);
214 static int  bce_attach				(device_t);
215 static int  bce_detach				(device_t);
216 static int  bce_shutdown			(device_t);
217 
218 
219 /****************************************************************************/
220 /* BCE Debug Data Structure Dump Routines                                   */
221 /****************************************************************************/
222 #ifdef BCE_DEBUG
223 static u32  bce_ctx_rd				(struct bce_softc *, u32, u32);
224 static void bce_dump_mbuf 			(struct bce_softc *, struct mbuf *);
225 static void bce_dump_tx_mbuf_chain	(struct bce_softc *, u16, int);
226 static void bce_dump_rx_mbuf_chain	(struct bce_softc *, u16, int);
227 static void bce_dump_pg_mbuf_chain	(struct bce_softc *, u16, int);
228 static void bce_dump_txbd			(struct bce_softc *, int, struct tx_bd *);
229 static void bce_dump_rxbd			(struct bce_softc *, int, struct rx_bd *);
230 static void bce_dump_pgbd			(struct bce_softc *, int, struct rx_bd *);
231 static void bce_dump_l2fhdr			(struct bce_softc *, int, struct l2_fhdr *);
232 static void bce_dump_ctx			(struct bce_softc *, u16);
233 static void bce_dump_ftqs			(struct bce_softc *);
234 static void bce_dump_tx_chain		(struct bce_softc *, u16, int);
235 static void bce_dump_rx_chain		(struct bce_softc *, u16, int);
236 static void bce_dump_pg_chain		(struct bce_softc *, u16, int);
237 static void bce_dump_status_block	(struct bce_softc *);
238 static void bce_dump_stats_block	(struct bce_softc *);
239 static void bce_dump_driver_state	(struct bce_softc *);
240 static void bce_dump_hw_state		(struct bce_softc *);
241 static void bce_dump_bc_state		(struct bce_softc *);
242 static void bce_breakpoint			(struct bce_softc *);
243 #endif
244 
245 
246 /****************************************************************************/
247 /* BCE Register/Memory Access Routines                                      */
248 /****************************************************************************/
249 static u32  bce_reg_rd_ind			(struct bce_softc *, u32);
250 static void bce_reg_wr_ind			(struct bce_softc *, u32, u32);
251 static void bce_ctx_wr				(struct bce_softc *, u32, u32, u32);
252 static int  bce_miibus_read_reg		(device_t, int, int);
253 static int  bce_miibus_write_reg	(device_t, int, int, int);
254 static void bce_miibus_statchg		(device_t);
255 
256 
257 /****************************************************************************/
258 /* BCE NVRAM Access Routines                                                */
259 /****************************************************************************/
260 static int  bce_acquire_nvram_lock	(struct bce_softc *);
261 static int  bce_release_nvram_lock	(struct bce_softc *);
262 static void bce_enable_nvram_access	(struct bce_softc *);
263 static void	bce_disable_nvram_access(struct bce_softc *);
264 static int  bce_nvram_read_dword	(struct bce_softc *, u32, u8 *, u32);
265 static int  bce_init_nvram			(struct bce_softc *);
266 static int  bce_nvram_read			(struct bce_softc *, u32, u8 *, int);
267 static int  bce_nvram_test			(struct bce_softc *);
268 #ifdef BCE_NVRAM_WRITE_SUPPORT
269 static int  bce_enable_nvram_write	(struct bce_softc *);
270 static void bce_disable_nvram_write	(struct bce_softc *);
271 static int  bce_nvram_erase_page	(struct bce_softc *, u32);
272 static int  bce_nvram_write_dword	(struct bce_softc *, u32, u8 *, u32);
273 static int  bce_nvram_write			(struct bce_softc *, u32, u8 *, int);
274 #endif
275 
276 /****************************************************************************/
277 /*                                                                          */
278 /****************************************************************************/
279 static void bce_dma_map_addr		(void *, bus_dma_segment_t *, int, int);
280 static int  bce_dma_alloc			(device_t);
281 static void bce_dma_free			(struct bce_softc *);
282 static void bce_release_resources	(struct bce_softc *);
283 
284 /****************************************************************************/
285 /* BCE Firmware Synchronization and Load                                    */
286 /****************************************************************************/
287 static int  bce_fw_sync				(struct bce_softc *, u32);
288 static void bce_load_rv2p_fw		(struct bce_softc *, u32 *, u32, u32);
289 static void bce_load_cpu_fw			(struct bce_softc *, struct cpu_reg *, struct fw_info *);
290 static void bce_init_cpus			(struct bce_softc *);
291 
292 static void bce_stop				(struct bce_softc *);
293 static int  bce_reset				(struct bce_softc *, u32);
294 static int  bce_chipinit 			(struct bce_softc *);
295 static int  bce_blockinit 			(struct bce_softc *);
296 static int  bce_get_rx_buf			(struct bce_softc *, struct mbuf *, u16 *, u16 *, u32 *);
297 static int  bce_get_pg_buf			(struct bce_softc *, struct mbuf *, u16 *, u16 *);
298 
299 static int  bce_init_tx_chain		(struct bce_softc *);
300 static void bce_free_tx_chain		(struct bce_softc *);
301 
302 static int  bce_init_rx_chain		(struct bce_softc *);
303 static void bce_fill_rx_chain		(struct bce_softc *);
304 static void bce_free_rx_chain		(struct bce_softc *);
305 
306 static int  bce_init_pg_chain		(struct bce_softc *);
307 static void bce_fill_pg_chain		(struct bce_softc *);
308 static void bce_free_pg_chain		(struct bce_softc *);
309 
310 static int  bce_tx_encap			(struct bce_softc *, struct mbuf **);
311 static void bce_start_locked		(struct ifnet *);
312 static void bce_start				(struct ifnet *);
313 static int  bce_ioctl				(struct ifnet *, u_long, caddr_t);
314 static void bce_watchdog			(struct bce_softc *);
315 static int  bce_ifmedia_upd			(struct ifnet *);
316 static void bce_ifmedia_upd_locked	(struct ifnet *);
317 static void bce_ifmedia_sts			(struct ifnet *, struct ifmediareq *);
318 static void bce_init_locked			(struct bce_softc *);
319 static void bce_init				(void *);
320 static void bce_mgmt_init_locked	(struct bce_softc *sc);
321 
322 static void bce_init_ctx			(struct bce_softc *);
323 static void bce_get_mac_addr		(struct bce_softc *);
324 static void bce_set_mac_addr		(struct bce_softc *);
325 static void bce_phy_intr			(struct bce_softc *);
326 static inline u16 bce_get_hw_rx_cons(struct bce_softc *);
327 static void bce_rx_intr				(struct bce_softc *);
328 static void bce_tx_intr				(struct bce_softc *);
329 static void bce_disable_intr		(struct bce_softc *);
330 static void bce_enable_intr			(struct bce_softc *);
331 static void bce_intr				(void *);
332 static void bce_set_rx_mode			(struct bce_softc *);
333 static void bce_stats_update		(struct bce_softc *);
334 static void bce_tick				(void *);
335 static void bce_pulse				(void *);
336 static void bce_add_sysctls			(struct bce_softc *);
337 
338 
339 /****************************************************************************/
340 /* FreeBSD device dispatch table.                                           */
341 /****************************************************************************/
342 static device_method_t bce_methods[] = {
343 	/* Device interface (device_if.h) */
344 	DEVMETHOD(device_probe,		bce_probe),
345 	DEVMETHOD(device_attach,	bce_attach),
346 	DEVMETHOD(device_detach,	bce_detach),
347 	DEVMETHOD(device_shutdown,	bce_shutdown),
348 /* Supported by device interface but not used here. */
349 /*	DEVMETHOD(device_identify,	bce_identify),      */
350 /*	DEVMETHOD(device_suspend,	bce_suspend),       */
351 /*	DEVMETHOD(device_resume,	bce_resume),        */
352 /*	DEVMETHOD(device_quiesce,	bce_quiesce),       */
353 
354 	/* Bus interface (bus_if.h) */
355 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
356 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
357 
358 	/* MII interface (miibus_if.h) */
359 	DEVMETHOD(miibus_readreg,	bce_miibus_read_reg),
360 	DEVMETHOD(miibus_writereg,	bce_miibus_write_reg),
361 	DEVMETHOD(miibus_statchg,	bce_miibus_statchg),
362 /* Supported by MII interface but not used here.       */
363 /*	DEVMETHOD(miibus_linkchg,	bce_miibus_linkchg),   */
364 /*	DEVMETHOD(miibus_mediainit,	bce_miibus_mediainit), */
365 
366 	{ 0, 0 }
367 };
368 
369 static driver_t bce_driver = {
370 	"bce",
371 	bce_methods,
372 	sizeof(struct bce_softc)
373 };
374 
375 static devclass_t bce_devclass;
376 
377 MODULE_DEPEND(bce, pci, 1, 1, 1);
378 MODULE_DEPEND(bce, ether, 1, 1, 1);
379 MODULE_DEPEND(bce, miibus, 1, 1, 1);
380 
381 DRIVER_MODULE(bce, pci, bce_driver, bce_devclass, 0, 0);
382 DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, 0, 0);
383 
384 
385 /****************************************************************************/
386 /* Tunable device values                                                    */
387 /****************************************************************************/
388 static int bce_tso_enable = TRUE;
389 static int bce_msi_enable = 1;
390 
391 SYSCTL_NODE(_hw, OID_AUTO, bce, CTLFLAG_RD, 0, "bce driver parameters");
392 
393 /* Allowable values are TRUE or FALSE */
394 TUNABLE_INT("hw.bce.tso_enable", &bce_tso_enable);
395 SYSCTL_UINT(_hw_bce, OID_AUTO, tso_enable, CTLFLAG_RDTUN, &bce_tso_enable, 0,
396 "TSO Enable/Disable");
397 
398 /* Allowable values are 0 (IRQ only) and 1 (IRQ or MSI) */
399 TUNABLE_INT("hw.bce.msi_enable", &bce_msi_enable);
400 SYSCTL_UINT(_hw_bce, OID_AUTO, msi_enable, CTLFLAG_RDTUN, &bce_msi_enable, 0,
401 "MSI | INTx selector");
402 
403 /* ToDo: Add tunable to enable/disable strict MTU handling. */
404 /* Currently allows "loose" RX MTU checking (i.e. sets the  */
405 /* h/w RX MTU to the size of the largest receive buffer, or */
406 /* 2048 bytes).                                             */
407 
408 /****************************************************************************/
409 /* Device probe function.                                                   */
410 /*                                                                          */
411 /* Compares the device to the driver's list of supported devices and        */
412 /* reports back to the OS whether this is the right driver for the device.  */
413 /*                                                                          */
414 /* Returns:                                                                 */
415 /*   BUS_PROBE_DEFAULT on success, positive value on failure.               */
416 /****************************************************************************/
417 static int
418 bce_probe(device_t dev)
419 {
420 	struct bce_type *t;
421 	struct bce_softc *sc;
422 	char *descbuf;
423 	u16 vid = 0, did = 0, svid = 0, sdid = 0;
424 
425 	t = bce_devs;
426 
427 	sc = device_get_softc(dev);
428 	bzero(sc, sizeof(struct bce_softc));
429 	sc->bce_unit = device_get_unit(dev);
430 	sc->bce_dev = dev;
431 
432 	/* Get the data for the device to be probed. */
433 	vid  = pci_get_vendor(dev);
434 	did  = pci_get_device(dev);
435 	svid = pci_get_subvendor(dev);
436 	sdid = pci_get_subdevice(dev);
437 
438 	DBPRINT(sc, BCE_VERBOSE_LOAD,
439 		"%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, "
440 		"SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid);
441 
442 	/* Look through the list of known devices for a match. */
443 	while(t->bce_name != NULL) {
444 
445 		if ((vid == t->bce_vid) && (did == t->bce_did) &&
446 			((svid == t->bce_svid) || (t->bce_svid == PCI_ANY_ID)) &&
447 			((sdid == t->bce_sdid) || (t->bce_sdid == PCI_ANY_ID))) {
448 
449 			descbuf = malloc(BCE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
450 
451 			if (descbuf == NULL)
452 				return(ENOMEM);
453 
454 			/* Print out the device identity. */
455 			snprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d)",
456 				t->bce_name,
457 			    (((pci_read_config(dev, PCIR_REVID, 4) & 0xf0) >> 4) + 'A'),
458 			    (pci_read_config(dev, PCIR_REVID, 4) & 0xf));
459 
460 			device_set_desc_copy(dev, descbuf);
461 			free(descbuf, M_TEMP);
462 			return(BUS_PROBE_DEFAULT);
463 		}
464 		t++;
465 	}
466 
467 	return(ENXIO);
468 }
469 
470 
471 /****************************************************************************/
472 /* Device attach function.                                                  */
473 /*                                                                          */
474 /* Allocates device resources, performs secondary chip identification,      */
475 /* resets and initializes the hardware, and initializes driver instance     */
476 /* variables.                                                               */
477 /*                                                                          */
478 /* Returns:                                                                 */
479 /*   0 on success, positive value on failure.                               */
480 /****************************************************************************/
481 static int
482 bce_attach(device_t dev)
483 {
484 	struct bce_softc *sc;
485 	struct ifnet *ifp;
486 	u32 val;
487 	int count, rid, rc = 0;
488 
489 	sc = device_get_softc(dev);
490 	sc->bce_dev = dev;
491 
492 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
493 
494 	sc->bce_unit = device_get_unit(dev);
495 
496 	/* Set initial device and PHY flags */
497 	sc->bce_flags = 0;
498 	sc->bce_phy_flags = 0;
499 
500 	pci_enable_busmaster(dev);
501 
502 	/* Allocate PCI memory resources. */
503 	rid = PCIR_BAR(0);
504 	sc->bce_res_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
505 		&rid, RF_ACTIVE);
506 
507 	if (sc->bce_res_mem == NULL) {
508 		BCE_PRINTF("%s(%d): PCI memory allocation failed\n",
509 			__FILE__, __LINE__);
510 		rc = ENXIO;
511 		goto bce_attach_fail;
512 	}
513 
514 	/* Get various resource handles. */
515 	sc->bce_btag    = rman_get_bustag(sc->bce_res_mem);
516 	sc->bce_bhandle = rman_get_bushandle(sc->bce_res_mem);
517 	sc->bce_vhandle = (vm_offset_t) rman_get_virtual(sc->bce_res_mem);
518 
519 	/* If MSI is enabled in the driver, get the vector count. */
520 	count = bce_msi_enable ? pci_msi_count(dev) : 0;
521 
522 	/* Allocate PCI IRQ resources. */
523 	if (count == 1 && pci_alloc_msi(dev, &count) == 0 && count == 1) {
524 		rid = 1;
525 		sc->bce_flags |= BCE_USING_MSI_FLAG;
526 		DBPRINT(sc, BCE_VERBOSE_LOAD,
527 			"Allocating %d MSI interrupt(s)\n", count);
528 	} else {
529 		rid = 0;
530 		DBPRINT(sc, BCE_VERBOSE_LOAD, "Allocating IRQ interrupt\n");
531 	}
532 
533 	sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
534 	    RF_SHAREABLE | RF_ACTIVE);
535 
536 	if (sc->bce_res_irq == NULL) {
537 		BCE_PRINTF("%s(%d): PCI map interrupt failed!\n",
538 			__FILE__, __LINE__);
539 		rc = ENXIO;
540 		goto bce_attach_fail;
541 	}
542 
543 	/* Initialize mutex for the current device instance. */
544 	BCE_LOCK_INIT(sc, device_get_nameunit(dev));
545 
546 	/*
547 	 * Configure byte swap and enable indirect register access.
548 	 * Rely on CPU to do target byte swapping on big endian systems.
549 	 * Access to registers outside of PCI configurtion space are not
550 	 * valid until this is done.
551 	 */
552 	pci_write_config(dev, BCE_PCICFG_MISC_CONFIG,
553 			       BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
554 			       BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4);
555 
556 	/* Save ASIC revsion info. */
557 	sc->bce_chipid =  REG_RD(sc, BCE_MISC_ID);
558 
559 	/* Weed out any non-production controller revisions. */
560 	switch(BCE_CHIP_ID(sc)) {
561 		case BCE_CHIP_ID_5706_A0:
562 		case BCE_CHIP_ID_5706_A1:
563 		case BCE_CHIP_ID_5708_A0:
564 		case BCE_CHIP_ID_5708_B0:
565 			BCE_PRINTF("%s(%d): Unsupported controller revision (%c%d)!\n",
566 				__FILE__, __LINE__,
567 				(((pci_read_config(dev, PCIR_REVID, 4) & 0xf0) >> 4) + 'A'),
568 			    (pci_read_config(dev, PCIR_REVID, 4) & 0xf));
569 			rc = ENODEV;
570 			goto bce_attach_fail;
571 	}
572 
573 	/*
574 	 * The embedded PCIe to PCI-X bridge (EPB)
575 	 * in the 5708 cannot address memory above
576 	 * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043).
577 	 */
578 	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)
579 		sc->max_bus_addr = BCE_BUS_SPACE_MAXADDR;
580 	else
581 		sc->max_bus_addr = BUS_SPACE_MAXADDR;
582 
583 	/*
584 	 * Find the base address for shared memory access.
585 	 * Newer versions of bootcode use a signature and offset
586 	 * while older versions use a fixed address.
587 	 */
588 	val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE);
589 	if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) == BCE_SHM_HDR_SIGNATURE_SIG)
590 		sc->bce_shmem_base = REG_RD_IND(sc, BCE_SHM_HDR_ADDR_0);
591 	else
592 		sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE;
593 
594 	DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "%s(): bce_shmem_base = 0x%08X\n",
595 		__FUNCTION__, sc->bce_shmem_base);
596 
597 	/* Fetch the bootcode revision. */
598 	sc->bce_fw_ver = REG_RD_IND(sc, sc->bce_shmem_base +
599 		BCE_DEV_INFO_BC_REV);
600 
601 	/* Check if any management firmware is running. */
602 	val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_PORT_FEATURE);
603 	if (val & (BCE_PORT_FEATURE_ASF_ENABLED | BCE_PORT_FEATURE_IMD_ENABLED))
604 		sc->bce_flags |= BCE_MFW_ENABLE_FLAG;
605 
606 	/* Get PCI bus information (speed and type). */
607 	val = REG_RD(sc, BCE_PCICFG_MISC_STATUS);
608 	if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) {
609 		u32 clkreg;
610 
611 		sc->bce_flags |= BCE_PCIX_FLAG;
612 
613 		clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS);
614 
615 		clkreg &= BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
616 		switch (clkreg) {
617 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
618 			sc->bus_speed_mhz = 133;
619 			break;
620 
621 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
622 			sc->bus_speed_mhz = 100;
623 			break;
624 
625 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
626 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
627 			sc->bus_speed_mhz = 66;
628 			break;
629 
630 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
631 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
632 			sc->bus_speed_mhz = 50;
633 			break;
634 
635 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
636 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
637 		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
638 			sc->bus_speed_mhz = 33;
639 			break;
640 		}
641 	} else {
642 		if (val & BCE_PCICFG_MISC_STATUS_M66EN)
643 			sc->bus_speed_mhz = 66;
644 		else
645 			sc->bus_speed_mhz = 33;
646 	}
647 
648 	if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET)
649 		sc->bce_flags |= BCE_PCI_32BIT_FLAG;
650 
651 	/* Reset the controller and announce to bootcode that driver is present. */
652 	if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) {
653 		BCE_PRINTF("%s(%d): Controller reset failed!\n",
654 			__FILE__, __LINE__);
655 		rc = ENXIO;
656 		goto bce_attach_fail;
657 	}
658 
659 	/* Initialize the controller. */
660 	if (bce_chipinit(sc)) {
661 		BCE_PRINTF("%s(%d): Controller initialization failed!\n",
662 			__FILE__, __LINE__);
663 		rc = ENXIO;
664 		goto bce_attach_fail;
665 	}
666 
667 	/* Perform NVRAM test. */
668 	if (bce_nvram_test(sc)) {
669 		BCE_PRINTF("%s(%d): NVRAM test failed!\n",
670 			__FILE__, __LINE__);
671 		rc = ENXIO;
672 		goto bce_attach_fail;
673 	}
674 
675 	/* Fetch the permanent Ethernet MAC address. */
676 	bce_get_mac_addr(sc);
677 
678 	/*
679 	 * Trip points control how many BDs
680 	 * should be ready before generating an
681 	 * interrupt while ticks control how long
682 	 * a BD can sit in the chain before
683 	 * generating an interrupt.  Set the default
684 	 * values for the RX and TX chains.
685 	 */
686 
687 #ifdef BCE_DEBUG
688 	/* Force more frequent interrupts. */
689 	sc->bce_tx_quick_cons_trip_int = 1;
690 	sc->bce_tx_quick_cons_trip     = 1;
691 	sc->bce_tx_ticks_int           = 0;
692 	sc->bce_tx_ticks               = 0;
693 
694 	sc->bce_rx_quick_cons_trip_int = 1;
695 	sc->bce_rx_quick_cons_trip     = 1;
696 	sc->bce_rx_ticks_int           = 0;
697 	sc->bce_rx_ticks               = 0;
698 #else
699 	/* Improve throughput at the expense of increased latency. */
700 	sc->bce_tx_quick_cons_trip_int = 20;
701 	sc->bce_tx_quick_cons_trip     = 20;
702 	sc->bce_tx_ticks_int           = 80;
703 	sc->bce_tx_ticks               = 80;
704 
705 	sc->bce_rx_quick_cons_trip_int = 6;
706 	sc->bce_rx_quick_cons_trip     = 6;
707 	sc->bce_rx_ticks_int           = 18;
708 	sc->bce_rx_ticks               = 18;
709 #endif
710 
711 	/* Update statistics once every second. */
712 	sc->bce_stats_ticks = 1000000 & 0xffff00;
713 
714 	/*
715 	 * The SerDes based NetXtreme II controllers
716 	 * that support 2.5Gb operation (currently
717 	 * 5708S) use a PHY at address 2, otherwise
718 	 * the PHY is present at address 1.
719 	 */
720 	sc->bce_phy_addr = 1;
721 
722 	if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) {
723 		sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
724 		sc->bce_flags |= BCE_NO_WOL_FLAG;
725 		if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) {
726 			sc->bce_phy_addr = 2;
727 			val = REG_RD_IND(sc, sc->bce_shmem_base +
728 					 BCE_SHARED_HW_CFG_CONFIG);
729 			if (val & BCE_SHARED_HW_CFG_PHY_2_5G) {
730 				sc->bce_phy_flags |= BCE_PHY_2_5G_CAPABLE_FLAG;
731 				DBPRINT(sc, BCE_INFO_LOAD, "Found 2.5Gb capable adapter\n");
732 			}
733 		}
734 	}
735 
736 	/* Store data needed by PHY driver for backplane applications */
737 	sc->bce_shared_hw_cfg = REG_RD_IND(sc, sc->bce_shmem_base +
738 		BCE_SHARED_HW_CFG_CONFIG);
739 	sc->bce_port_hw_cfg   = REG_RD_IND(sc, sc->bce_shmem_base +
740 		BCE_SHARED_HW_CFG_CONFIG);
741 
742 	/* Allocate DMA memory resources. */
743 	if (bce_dma_alloc(dev)) {
744 		BCE_PRINTF("%s(%d): DMA resource allocation failed!\n",
745 		    __FILE__, __LINE__);
746 		rc = ENXIO;
747 		goto bce_attach_fail;
748 	}
749 
750 	/* Allocate an ifnet structure. */
751 	ifp = sc->bce_ifp = if_alloc(IFT_ETHER);
752 	if (ifp == NULL) {
753 		BCE_PRINTF("%s(%d): Interface allocation failed!\n",
754 			__FILE__, __LINE__);
755 		rc = ENXIO;
756 		goto bce_attach_fail;
757 	}
758 
759 	/* Initialize the ifnet interface. */
760 	ifp->if_softc        = sc;
761 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
762 	ifp->if_flags        = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
763 	ifp->if_ioctl        = bce_ioctl;
764 	ifp->if_start        = bce_start;
765 	ifp->if_init         = bce_init;
766 	ifp->if_mtu          = ETHERMTU;
767 
768 	if (bce_tso_enable) {
769 		ifp->if_hwassist = BCE_IF_HWASSIST | CSUM_TSO;
770 		ifp->if_capabilities = BCE_IF_CAPABILITIES | IFCAP_TSO4;
771 	} else {
772 		ifp->if_hwassist = BCE_IF_HWASSIST;
773 		ifp->if_capabilities = BCE_IF_CAPABILITIES;
774 	}
775 
776 	ifp->if_capenable    = ifp->if_capabilities;
777 
778 	/* Use standard mbuf sizes for buffer allocation. */
779 #ifdef BCE_USE_SPLIT_HEADER
780 	sc->rx_bd_mbuf_alloc_size = MHLEN;
781 #else
782 	sc->rx_bd_mbuf_alloc_size = MCLBYTES;
783 #endif
784 	sc->pg_bd_mbuf_alloc_size = MCLBYTES;
785 
786 	ifp->if_snd.ifq_drv_maxlen = USABLE_TX_BD;
787 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
788 	IFQ_SET_READY(&ifp->if_snd);
789 
790 	if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
791 		ifp->if_baudrate = IF_Mbps(2500ULL);
792 	else
793 		ifp->if_baudrate = IF_Mbps(1000);
794 
795 	/* Check for an MII child bus by probing the PHY. */
796 	if (mii_phy_probe(dev, &sc->bce_miibus, bce_ifmedia_upd,
797 		bce_ifmedia_sts)) {
798 		BCE_PRINTF("%s(%d): No PHY found on child MII bus!\n",
799 			__FILE__, __LINE__);
800 		rc = ENXIO;
801 		goto bce_attach_fail;
802 	}
803 
804 	/* Attach to the Ethernet interface list. */
805 	ether_ifattach(ifp, sc->eaddr);
806 
807 #if __FreeBSD_version < 500000
808 	callout_init(&sc->bce_tick_callout);
809 	callout_init(&sc->bce_pulse_callout);
810 #else
811 	callout_init_mtx(&sc->bce_tick_callout, &sc->bce_mtx, 0);
812 	callout_init_mtx(&sc->bce_pulse_callout, &sc->bce_mtx, 0);
813 #endif
814 
815 	/* Hookup IRQ last. */
816 	rc = bus_setup_intr(dev, sc->bce_res_irq, INTR_TYPE_NET | INTR_MPSAFE, NULL,
817 	   bce_intr, sc, &sc->bce_intrhand);
818 
819 	if (rc) {
820 		BCE_PRINTF("%s(%d): Failed to setup IRQ!\n",
821 			__FILE__, __LINE__);
822 		bce_detach(dev);
823 		goto bce_attach_exit;
824 	}
825 
826 	/*
827 	 * At this point we've acquired all the resources
828 	 * we need to run so there's no turning back, we're
829 	 * cleared for launch.
830 	 */
831 
832 	/* Print some important debugging info. */
833 	DBRUNMSG(BCE_INFO, bce_dump_driver_state(sc));
834 
835 	/* Add the supported sysctls to the kernel. */
836 	bce_add_sysctls(sc);
837 
838 	BCE_LOCK(sc);
839 	/*
840 	 * The chip reset earlier notified the bootcode that
841 	 * a driver is present.  We now need to start our pulse
842 	 * routine so that the bootcode is reminded that we're
843 	 * still running.
844 	 */
845 	bce_pulse(sc);
846 
847 	bce_mgmt_init_locked(sc);
848 	BCE_UNLOCK(sc);
849 
850 	/* Finally, print some useful adapter info */
851 	BCE_PRINTF("ASIC (0x%08X); ", sc->bce_chipid);
852 	printf("Rev (%c%d); ", ((BCE_CHIP_ID(sc) & 0xf000) >> 12) + 'A',
853 		((BCE_CHIP_ID(sc) & 0x0ff0) >> 4));
854 	printf("Bus (PCI%s, %s, %dMHz); ",
855 		((sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : ""),
856 		((sc->bce_flags & BCE_PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
857 		sc->bus_speed_mhz);
858 	printf("F/W (0x%08X); Flags( ", sc->bce_fw_ver);
859 	if (sc->bce_flags & BCE_MFW_ENABLE_FLAG)
860 		printf("MFW ");
861 	if (sc->bce_flags & BCE_USING_MSI_FLAG)
862 		printf("MSI ");
863 	if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
864 		printf("2.5G ");
865 	printf(")\n");
866 
867 	DBPRINT(sc, BCE_FATAL, "%s(): sc = %p\n",
868 		__FUNCTION__, sc);
869 
870 	goto bce_attach_exit;
871 
872 bce_attach_fail:
873 	bce_release_resources(sc);
874 
875 bce_attach_exit:
876 
877 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
878 
879 	return(rc);
880 }
881 
882 
883 /****************************************************************************/
884 /* Device detach function.                                                  */
885 /*                                                                          */
886 /* Stops the controller, resets the controller, and releases resources.     */
887 /*                                                                          */
888 /* Returns:                                                                 */
889 /*   0 on success, positive value on failure.                               */
890 /****************************************************************************/
891 static int
892 bce_detach(device_t dev)
893 {
894 	struct bce_softc *sc = device_get_softc(dev);
895 	struct ifnet *ifp;
896 	u32 msg;
897 
898 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
899 
900 	ifp = sc->bce_ifp;
901 
902 	/* Stop and reset the controller. */
903 	BCE_LOCK(sc);
904 
905 	/* Stop the pulse so the bootcode can go to driver absent state. */
906 	callout_stop(&sc->bce_pulse_callout);
907 
908 	bce_stop(sc);
909 	if (sc->bce_flags & BCE_NO_WOL_FLAG)
910 		msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
911 	else
912 		msg = BCE_DRV_MSG_CODE_UNLOAD;
913 	bce_reset(sc, msg);
914 
915 	BCE_UNLOCK(sc);
916 
917 	ether_ifdetach(ifp);
918 
919 	/* If we have a child device on the MII bus remove it too. */
920 	bus_generic_detach(dev);
921 	device_delete_child(dev, sc->bce_miibus);
922 
923 	/* Release all remaining resources. */
924 	bce_release_resources(sc);
925 
926 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
927 
928 	return(0);
929 }
930 
931 
932 /****************************************************************************/
933 /* Device shutdown function.                                                */
934 /*                                                                          */
935 /* Stops and resets the controller.                                         */
936 /*                                                                          */
937 /* Returns:                                                                 */
938 /*   0 on success, positive value on failure.                               */
939 /****************************************************************************/
940 static int
941 bce_shutdown(device_t dev)
942 {
943 	struct bce_softc *sc = device_get_softc(dev);
944 	u32 msg;
945 
946 	DBPRINT(sc, BCE_VERBOSE_SPECIAL, "Entering %s()\n", __FUNCTION__);
947 
948 	BCE_LOCK(sc);
949 	bce_stop(sc);
950 	if (sc->bce_flags & BCE_NO_WOL_FLAG)
951 		msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
952 	else
953 		msg = BCE_DRV_MSG_CODE_UNLOAD;
954 	bce_reset(sc, msg);
955 	BCE_UNLOCK(sc);
956 
957 	DBPRINT(sc, BCE_VERBOSE_SPECIAL, "Exiting %s()\n", __FUNCTION__);
958 
959 	return (0);
960 }
961 
962 
963 /****************************************************************************/
964 /* Indirect register read.                                                  */
965 /*                                                                          */
966 /* Reads NetXtreme II registers using an index/data register pair in PCI    */
967 /* configuration space.  Using this mechanism avoids issues with posted     */
968 /* reads but is much slower than memory-mapped I/O.                         */
969 /*                                                                          */
970 /* Returns:                                                                 */
971 /*   The value of the register.                                             */
972 /****************************************************************************/
973 static u32
974 bce_reg_rd_ind(struct bce_softc *sc, u32 offset)
975 {
976 	device_t dev;
977 	dev = sc->bce_dev;
978 
979 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
980 #ifdef BCE_DEBUG
981 	{
982 		u32 val;
983 		val = pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
984 		DBPRINT(sc, BCE_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
985 			__FUNCTION__, offset, val);
986 		return val;
987 	}
988 #else
989 	return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
990 #endif
991 }
992 
993 
994 /****************************************************************************/
995 /* Indirect register write.                                                 */
996 /*                                                                          */
997 /* Writes NetXtreme II registers using an index/data register pair in PCI   */
998 /* configuration space.  Using this mechanism avoids issues with posted     */
999 /* writes but is muchh slower than memory-mapped I/O.                       */
1000 /*                                                                          */
1001 /* Returns:                                                                 */
1002 /*   Nothing.                                                               */
1003 /****************************************************************************/
1004 static void
1005 bce_reg_wr_ind(struct bce_softc *sc, u32 offset, u32 val)
1006 {
1007 	device_t dev;
1008 	dev = sc->bce_dev;
1009 
1010 	DBPRINT(sc, BCE_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
1011 		__FUNCTION__, offset, val);
1012 
1013 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
1014 	pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4);
1015 }
1016 
1017 
1018 #ifdef BCE_DEBUG
1019 /****************************************************************************/
1020 /* Context memory read.                                                     */
1021 /*                                                                          */
1022 /* The NetXtreme II controller uses context memory to track connection      */
1023 /* information for L2 and higher network protocols.                         */
1024 /*                                                                          */
1025 /* Returns:                                                                 */
1026 /*   The requested 32 bit value of context memory.                          */
1027 /****************************************************************************/
1028 static u32
1029 bce_ctx_rd(struct bce_softc *sc, u32 cid_addr, u32 offset)
1030 {
1031 	u32 val;
1032 
1033 	offset += cid_addr;
1034 	REG_WR(sc, BCE_CTX_DATA_ADR, offset);
1035 	val = REG_RD(sc, BCE_CTX_DATA);
1036 
1037 	DBPRINT(sc, BCE_EXCESSIVE, "%s(); cid_addr = 0x%08X, offset = 0x%08X, "
1038 		"val = 0x%08X\n", __FUNCTION__, cid_addr, offset, val);
1039 
1040 	return(val);
1041 }
1042 #endif
1043 
1044 
1045 /****************************************************************************/
1046 /* Context memory write.                                                    */
1047 /*                                                                          */
1048 /* The NetXtreme II controller uses context memory to track connection      */
1049 /* information for L2 and higher network protocols.                         */
1050 /*                                                                          */
1051 /* Returns:                                                                 */
1052 /*   Nothing.                                                               */
1053 /****************************************************************************/
1054 static void
1055 bce_ctx_wr(struct bce_softc *sc, u32 cid_addr, u32 offset, u32 val)
1056 {
1057 
1058 	DBPRINT(sc, BCE_EXCESSIVE, "%s(); cid_addr = 0x%08X, offset = 0x%08X, "
1059 		"val = 0x%08X\n", __FUNCTION__, cid_addr, offset, val);
1060 
1061 	offset += cid_addr;
1062 	REG_WR(sc, BCE_CTX_DATA_ADR, offset);
1063 	REG_WR(sc, BCE_CTX_DATA, val);
1064 }
1065 
1066 
1067 /****************************************************************************/
1068 /* PHY register read.                                                       */
1069 /*                                                                          */
1070 /* Implements register reads on the MII bus.                                */
1071 /*                                                                          */
1072 /* Returns:                                                                 */
1073 /*   The value of the register.                                             */
1074 /****************************************************************************/
1075 static int
1076 bce_miibus_read_reg(device_t dev, int phy, int reg)
1077 {
1078 	struct bce_softc *sc;
1079 	u32 val;
1080 	int i;
1081 
1082 	sc = device_get_softc(dev);
1083 
1084 	/* Make sure we are accessing the correct PHY address. */
1085 	if (phy != sc->bce_phy_addr) {
1086 		DBPRINT(sc, BCE_EXCESSIVE_PHY, "Invalid PHY address %d for PHY read!\n", phy);
1087 		return(0);
1088 	}
1089 
1090 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1091 		val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1092 		val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1093 
1094 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1095 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1096 
1097 		DELAY(40);
1098 	}
1099 
1100 	val = BCE_MIPHY(phy) | BCE_MIREG(reg) |
1101 		BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT |
1102 		BCE_EMAC_MDIO_COMM_START_BUSY;
1103 	REG_WR(sc, BCE_EMAC_MDIO_COMM, val);
1104 
1105 	for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1106 		DELAY(10);
1107 
1108 		val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1109 		if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1110 			DELAY(5);
1111 
1112 			val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1113 			val &= BCE_EMAC_MDIO_COMM_DATA;
1114 
1115 			break;
1116 		}
1117 	}
1118 
1119 	if (val & BCE_EMAC_MDIO_COMM_START_BUSY) {
1120 		BCE_PRINTF("%s(%d): Error: PHY read timeout! phy = %d, reg = 0x%04X\n",
1121 			__FILE__, __LINE__, phy, reg);
1122 		val = 0x0;
1123 	} else {
1124 		val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1125 	}
1126 
1127 	DBPRINT(sc, BCE_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n",
1128 		__FUNCTION__, phy, (u16) reg & 0xffff, (u16) val & 0xffff);
1129 
1130 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1131 		val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1132 		val |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1133 
1134 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1135 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1136 
1137 		DELAY(40);
1138 	}
1139 
1140 	return (val & 0xffff);
1141 
1142 }
1143 
1144 
1145 /****************************************************************************/
1146 /* PHY register write.                                                      */
1147 /*                                                                          */
1148 /* Implements register writes on the MII bus.                               */
1149 /*                                                                          */
1150 /* Returns:                                                                 */
1151 /*   The value of the register.                                             */
1152 /****************************************************************************/
1153 static int
1154 bce_miibus_write_reg(device_t dev, int phy, int reg, int val)
1155 {
1156 	struct bce_softc *sc;
1157 	u32 val1;
1158 	int i;
1159 
1160 	sc = device_get_softc(dev);
1161 
1162 	/* Make sure we are accessing the correct PHY address. */
1163 	if (phy != sc->bce_phy_addr) {
1164 		DBPRINT(sc, BCE_EXCESSIVE_PHY, "Invalid PHY address %d for PHY write!\n", phy);
1165 		return(0);
1166 	}
1167 
1168 	DBPRINT(sc, BCE_EXCESSIVE, "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n",
1169 		__FUNCTION__, phy, (u16) reg & 0xffff, (u16) val & 0xffff);
1170 
1171 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1172 		val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1173 		val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1174 
1175 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1176 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1177 
1178 		DELAY(40);
1179 	}
1180 
1181 	val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val |
1182 		BCE_EMAC_MDIO_COMM_COMMAND_WRITE |
1183 		BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT;
1184 	REG_WR(sc, BCE_EMAC_MDIO_COMM, val1);
1185 
1186 	for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1187 		DELAY(10);
1188 
1189 		val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1190 		if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1191 			DELAY(5);
1192 			break;
1193 		}
1194 	}
1195 
1196 	if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY)
1197 		BCE_PRINTF("%s(%d): PHY write timeout!\n",
1198 			__FILE__, __LINE__);
1199 
1200 	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1201 		val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1202 		val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1203 
1204 		REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1205 		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1206 
1207 		DELAY(40);
1208 	}
1209 
1210 	return 0;
1211 }
1212 
1213 
1214 /****************************************************************************/
1215 /* MII bus status change.                                                   */
1216 /*                                                                          */
1217 /* Called by the MII bus driver when the PHY establishes link to set the    */
1218 /* MAC interface registers.                                                 */
1219 /*                                                                          */
1220 /* Returns:                                                                 */
1221 /*   Nothing.                                                               */
1222 /****************************************************************************/
1223 static void
1224 bce_miibus_statchg(device_t dev)
1225 {
1226 	struct bce_softc *sc;
1227 	struct mii_data *mii;
1228 	int val;
1229 
1230 	sc = device_get_softc(dev);
1231 
1232 	mii = device_get_softc(sc->bce_miibus);
1233 
1234 	val = REG_RD(sc, BCE_EMAC_MODE);
1235 	val &= ~(BCE_EMAC_MODE_PORT | BCE_EMAC_MODE_HALF_DUPLEX |
1236 		BCE_EMAC_MODE_MAC_LOOP | BCE_EMAC_MODE_FORCE_LINK |
1237 		BCE_EMAC_MODE_25G);
1238 
1239 	/* Set MII or GMII interface based on the speed negotiated by the PHY. */
1240 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1241 	case IFM_10_T:
1242 		if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) {
1243 			DBPRINT(sc, BCE_INFO, "Enabling 10Mb interface.\n");
1244 			val |= BCE_EMAC_MODE_PORT_MII_10;
1245 			break;
1246 		}
1247 		/* fall-through */
1248 	case IFM_100_TX:
1249 		DBPRINT(sc, BCE_INFO, "Enabling MII interface.\n");
1250 		val |= BCE_EMAC_MODE_PORT_MII;
1251 		break;
1252 	case IFM_2500_SX:
1253 		DBPRINT(sc, BCE_INFO, "Enabling 2.5G MAC mode.\n");
1254 		val |= BCE_EMAC_MODE_25G;
1255 		/* fall-through */
1256 	case IFM_1000_T:
1257 	case IFM_1000_SX:
1258 		DBPRINT(sc, BCE_INFO, "Enabling GMII interface.\n");
1259 		val |= BCE_EMAC_MODE_PORT_GMII;
1260 		break;
1261 	default:
1262 		DBPRINT(sc, BCE_INFO, "Unknown speed, enabling default GMII "
1263 			"interface.\n");
1264 		val |= BCE_EMAC_MODE_PORT_GMII;
1265 	}
1266 
1267 	/* Set half or full duplex based on the duplicity negotiated by the PHY. */
1268 	if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
1269 		DBPRINT(sc, BCE_INFO, "Setting Half-Duplex interface.\n");
1270 		val |= BCE_EMAC_MODE_HALF_DUPLEX;
1271 	} else
1272 		DBPRINT(sc, BCE_INFO, "Setting Full-Duplex interface.\n");
1273 
1274 	REG_WR(sc, BCE_EMAC_MODE, val);
1275 
1276 #if 0
1277 	/* ToDo: Enable flow control support in brgphy and bge. */
1278 	/* FLAG0 is set if RX is enabled and FLAG1 if TX is enabled */
1279 	if (mii->mii_media_active & IFM_FLAG0)
1280 		BCE_SETBIT(sc, BCE_EMAC_RX_MODE, BCE_EMAC_RX_MODE_FLOW_EN);
1281 	if (mii->mii_media_active & IFM_FLAG1)
1282 		BCE_SETBIT(sc, BCE_EMAC_RX_MODE, BCE_EMAC_TX_MODE_FLOW_EN);
1283 #endif
1284 
1285 }
1286 
1287 
1288 /****************************************************************************/
1289 /* Acquire NVRAM lock.                                                      */
1290 /*                                                                          */
1291 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock.  */
1292 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1293 /* for use by the driver.                                                   */
1294 /*                                                                          */
1295 /* Returns:                                                                 */
1296 /*   0 on success, positive value on failure.                               */
1297 /****************************************************************************/
1298 static int
1299 bce_acquire_nvram_lock(struct bce_softc *sc)
1300 {
1301 	u32 val;
1302 	int j;
1303 
1304 	DBPRINT(sc, BCE_VERBOSE_NVRAM, "Acquiring NVRAM lock.\n");
1305 
1306 	/* Request access to the flash interface. */
1307 	REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2);
1308 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1309 		val = REG_RD(sc, BCE_NVM_SW_ARB);
1310 		if (val & BCE_NVM_SW_ARB_ARB_ARB2)
1311 			break;
1312 
1313 		DELAY(5);
1314 	}
1315 
1316 	if (j >= NVRAM_TIMEOUT_COUNT) {
1317 		DBPRINT(sc, BCE_WARN, "Timeout acquiring NVRAM lock!\n");
1318 		return EBUSY;
1319 	}
1320 
1321 	return 0;
1322 }
1323 
1324 
1325 /****************************************************************************/
1326 /* Release NVRAM lock.                                                      */
1327 /*                                                                          */
1328 /* When the caller is finished accessing NVRAM the lock must be released.   */
1329 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1330 /* for use by the driver.                                                   */
1331 /*                                                                          */
1332 /* Returns:                                                                 */
1333 /*   0 on success, positive value on failure.                               */
1334 /****************************************************************************/
1335 static int
1336 bce_release_nvram_lock(struct bce_softc *sc)
1337 {
1338 	int j;
1339 	u32 val;
1340 
1341 	DBPRINT(sc, BCE_VERBOSE_NVRAM, "Releasing NVRAM lock.\n");
1342 
1343 	/*
1344 	 * Relinquish nvram interface.
1345 	 */
1346 	REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2);
1347 
1348 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1349 		val = REG_RD(sc, BCE_NVM_SW_ARB);
1350 		if (!(val & BCE_NVM_SW_ARB_ARB_ARB2))
1351 			break;
1352 
1353 		DELAY(5);
1354 	}
1355 
1356 	if (j >= NVRAM_TIMEOUT_COUNT) {
1357 		DBPRINT(sc, BCE_WARN, "Timeout reeasing NVRAM lock!\n");
1358 		return EBUSY;
1359 	}
1360 
1361 	return 0;
1362 }
1363 
1364 
1365 #ifdef BCE_NVRAM_WRITE_SUPPORT
1366 /****************************************************************************/
1367 /* Enable NVRAM write access.                                               */
1368 /*                                                                          */
1369 /* Before writing to NVRAM the caller must enable NVRAM writes.             */
1370 /*                                                                          */
1371 /* Returns:                                                                 */
1372 /*   0 on success, positive value on failure.                               */
1373 /****************************************************************************/
1374 static int
1375 bce_enable_nvram_write(struct bce_softc *sc)
1376 {
1377 	u32 val;
1378 
1379 	DBPRINT(sc, BCE_VERBOSE_NVRAM, "Enabling NVRAM write.\n");
1380 
1381 	val = REG_RD(sc, BCE_MISC_CFG);
1382 	REG_WR(sc, BCE_MISC_CFG, val | BCE_MISC_CFG_NVM_WR_EN_PCI);
1383 
1384 	if (!sc->bce_flash_info->buffered) {
1385 		int j;
1386 
1387 		REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1388 		REG_WR(sc, BCE_NVM_COMMAND,	BCE_NVM_COMMAND_WREN | BCE_NVM_COMMAND_DOIT);
1389 
1390 		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1391 			DELAY(5);
1392 
1393 			val = REG_RD(sc, BCE_NVM_COMMAND);
1394 			if (val & BCE_NVM_COMMAND_DONE)
1395 				break;
1396 		}
1397 
1398 		if (j >= NVRAM_TIMEOUT_COUNT) {
1399 			DBPRINT(sc, BCE_WARN, "Timeout writing NVRAM!\n");
1400 			return EBUSY;
1401 		}
1402 	}
1403 	return 0;
1404 }
1405 
1406 
1407 /****************************************************************************/
1408 /* Disable NVRAM write access.                                              */
1409 /*                                                                          */
1410 /* When the caller is finished writing to NVRAM write access must be        */
1411 /* disabled.                                                                */
1412 /*                                                                          */
1413 /* Returns:                                                                 */
1414 /*   Nothing.                                                               */
1415 /****************************************************************************/
1416 static void
1417 bce_disable_nvram_write(struct bce_softc *sc)
1418 {
1419 	u32 val;
1420 
1421 	DBPRINT(sc, BCE_VERBOSE_NVRAM,  "Disabling NVRAM write.\n");
1422 
1423 	val = REG_RD(sc, BCE_MISC_CFG);
1424 	REG_WR(sc, BCE_MISC_CFG, val & ~BCE_MISC_CFG_NVM_WR_EN);
1425 }
1426 #endif
1427 
1428 
1429 /****************************************************************************/
1430 /* Enable NVRAM access.                                                     */
1431 /*                                                                          */
1432 /* Before accessing NVRAM for read or write operations the caller must      */
1433 /* enabled NVRAM access.                                                    */
1434 /*                                                                          */
1435 /* Returns:                                                                 */
1436 /*   Nothing.                                                               */
1437 /****************************************************************************/
1438 static void
1439 bce_enable_nvram_access(struct bce_softc *sc)
1440 {
1441 	u32 val;
1442 
1443 	DBPRINT(sc, BCE_VERBOSE_NVRAM, "Enabling NVRAM access.\n");
1444 
1445 	val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1446 	/* Enable both bits, even on read. */
1447 	REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1448 	       val | BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN);
1449 }
1450 
1451 
1452 /****************************************************************************/
1453 /* Disable NVRAM access.                                                    */
1454 /*                                                                          */
1455 /* When the caller is finished accessing NVRAM access must be disabled.     */
1456 /*                                                                          */
1457 /* Returns:                                                                 */
1458 /*   Nothing.                                                               */
1459 /****************************************************************************/
1460 static void
1461 bce_disable_nvram_access(struct bce_softc *sc)
1462 {
1463 	u32 val;
1464 
1465 	DBPRINT(sc, BCE_VERBOSE_NVRAM, "Disabling NVRAM access.\n");
1466 
1467 	val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1468 
1469 	/* Disable both bits, even after read. */
1470 	REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1471 		val & ~(BCE_NVM_ACCESS_ENABLE_EN |
1472 			BCE_NVM_ACCESS_ENABLE_WR_EN));
1473 }
1474 
1475 
1476 #ifdef BCE_NVRAM_WRITE_SUPPORT
1477 /****************************************************************************/
1478 /* Erase NVRAM page before writing.                                         */
1479 /*                                                                          */
1480 /* Non-buffered flash parts require that a page be erased before it is      */
1481 /* written.                                                                 */
1482 /*                                                                          */
1483 /* Returns:                                                                 */
1484 /*   0 on success, positive value on failure.                               */
1485 /****************************************************************************/
1486 static int
1487 bce_nvram_erase_page(struct bce_softc *sc, u32 offset)
1488 {
1489 	u32 cmd;
1490 	int j;
1491 
1492 	/* Buffered flash doesn't require an erase. */
1493 	if (sc->bce_flash_info->buffered)
1494 		return 0;
1495 
1496 	DBPRINT(sc, BCE_VERBOSE_NVRAM, "Erasing NVRAM page.\n");
1497 
1498 	/* Build an erase command. */
1499 	cmd = BCE_NVM_COMMAND_ERASE | BCE_NVM_COMMAND_WR |
1500 	      BCE_NVM_COMMAND_DOIT;
1501 
1502 	/*
1503 	 * Clear the DONE bit separately, set the NVRAM adress to erase,
1504 	 * and issue the erase command.
1505 	 */
1506 	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1507 	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1508 	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1509 
1510 	/* Wait for completion. */
1511 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1512 		u32 val;
1513 
1514 		DELAY(5);
1515 
1516 		val = REG_RD(sc, BCE_NVM_COMMAND);
1517 		if (val & BCE_NVM_COMMAND_DONE)
1518 			break;
1519 	}
1520 
1521 	if (j >= NVRAM_TIMEOUT_COUNT) {
1522 		DBPRINT(sc, BCE_WARN, "Timeout erasing NVRAM.\n");
1523 		return EBUSY;
1524 	}
1525 
1526 	return 0;
1527 }
1528 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1529 
1530 
1531 /****************************************************************************/
1532 /* Read a dword (32 bits) from NVRAM.                                       */
1533 /*                                                                          */
1534 /* Read a 32 bit word from NVRAM.  The caller is assumed to have already    */
1535 /* obtained the NVRAM lock and enabled the controller for NVRAM access.     */
1536 /*                                                                          */
1537 /* Returns:                                                                 */
1538 /*   0 on success and the 32 bit value read, positive value on failure.     */
1539 /****************************************************************************/
1540 static int
1541 bce_nvram_read_dword(struct bce_softc *sc, u32 offset, u8 *ret_val,
1542 							u32 cmd_flags)
1543 {
1544 	u32 cmd;
1545 	int i, rc = 0;
1546 
1547 	/* Build the command word. */
1548 	cmd = BCE_NVM_COMMAND_DOIT | cmd_flags;
1549 
1550 	/* Calculate the offset for buffered flash. */
1551 	if (sc->bce_flash_info->buffered) {
1552 		offset = ((offset / sc->bce_flash_info->page_size) <<
1553 			   sc->bce_flash_info->page_bits) +
1554 			  (offset % sc->bce_flash_info->page_size);
1555 	}
1556 
1557 	/*
1558 	 * Clear the DONE bit separately, set the address to read,
1559 	 * and issue the read.
1560 	 */
1561 	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1562 	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1563 	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1564 
1565 	/* Wait for completion. */
1566 	for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1567 		u32 val;
1568 
1569 		DELAY(5);
1570 
1571 		val = REG_RD(sc, BCE_NVM_COMMAND);
1572 		if (val & BCE_NVM_COMMAND_DONE) {
1573 			val = REG_RD(sc, BCE_NVM_READ);
1574 
1575 			val = bce_be32toh(val);
1576 			memcpy(ret_val, &val, 4);
1577 			break;
1578 		}
1579 	}
1580 
1581 	/* Check for errors. */
1582 	if (i >= NVRAM_TIMEOUT_COUNT) {
1583 		BCE_PRINTF("%s(%d): Timeout error reading NVRAM at offset 0x%08X!\n",
1584 			__FILE__, __LINE__, offset);
1585 		rc = EBUSY;
1586 	}
1587 
1588 	return(rc);
1589 }
1590 
1591 
1592 #ifdef BCE_NVRAM_WRITE_SUPPORT
1593 /****************************************************************************/
1594 /* Write a dword (32 bits) to NVRAM.                                        */
1595 /*                                                                          */
1596 /* Write a 32 bit word to NVRAM.  The caller is assumed to have already     */
1597 /* obtained the NVRAM lock, enabled the controller for NVRAM access, and    */
1598 /* enabled NVRAM write access.                                              */
1599 /*                                                                          */
1600 /* Returns:                                                                 */
1601 /*   0 on success, positive value on failure.                               */
1602 /****************************************************************************/
1603 static int
1604 bce_nvram_write_dword(struct bce_softc *sc, u32 offset, u8 *val,
1605 	u32 cmd_flags)
1606 {
1607 	u32 cmd, val32;
1608 	int j;
1609 
1610 	/* Build the command word. */
1611 	cmd = BCE_NVM_COMMAND_DOIT | BCE_NVM_COMMAND_WR | cmd_flags;
1612 
1613 	/* Calculate the offset for buffered flash. */
1614 	if (sc->bce_flash_info->buffered) {
1615 		offset = ((offset / sc->bce_flash_info->page_size) <<
1616 			  sc->bce_flash_info->page_bits) +
1617 			 (offset % sc->bce_flash_info->page_size);
1618 	}
1619 
1620 	/*
1621 	 * Clear the DONE bit separately, convert NVRAM data to big-endian,
1622 	 * set the NVRAM address to write, and issue the write command
1623 	 */
1624 	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1625 	memcpy(&val32, val, 4);
1626 	val32 = htobe32(val32);
1627 	REG_WR(sc, BCE_NVM_WRITE, val32);
1628 	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1629 	REG_WR(sc, BCE_NVM_COMMAND, cmd);
1630 
1631 	/* Wait for completion. */
1632 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1633 		DELAY(5);
1634 
1635 		if (REG_RD(sc, BCE_NVM_COMMAND) & BCE_NVM_COMMAND_DONE)
1636 			break;
1637 	}
1638 	if (j >= NVRAM_TIMEOUT_COUNT) {
1639 		BCE_PRINTF("%s(%d): Timeout error writing NVRAM at offset 0x%08X\n",
1640 			__FILE__, __LINE__, offset);
1641 		return EBUSY;
1642 	}
1643 
1644 	return 0;
1645 }
1646 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1647 
1648 
1649 /****************************************************************************/
1650 /* Initialize NVRAM access.                                                 */
1651 /*                                                                          */
1652 /* Identify the NVRAM device in use and prepare the NVRAM interface to      */
1653 /* access that device.                                                      */
1654 /*                                                                          */
1655 /* Returns:                                                                 */
1656 /*   0 on success, positive value on failure.                               */
1657 /****************************************************************************/
1658 static int
1659 bce_init_nvram(struct bce_softc *sc)
1660 {
1661 	u32 val;
1662 	int j, entry_count, rc;
1663 	struct flash_spec *flash;
1664 
1665 	DBPRINT(sc, BCE_VERBOSE_NVRAM, "Entering %s()\n", __FUNCTION__);
1666 
1667 	/* Determine the selected interface. */
1668 	val = REG_RD(sc, BCE_NVM_CFG1);
1669 
1670 	entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
1671 
1672 	rc = 0;
1673 
1674 	/*
1675 	 * Flash reconfiguration is required to support additional
1676 	 * NVRAM devices not directly supported in hardware.
1677 	 * Check if the flash interface was reconfigured
1678 	 * by the bootcode.
1679 	 */
1680 
1681 	if (val & 0x40000000) {
1682 		/* Flash interface reconfigured by bootcode. */
1683 
1684 		DBPRINT(sc,BCE_INFO_LOAD,
1685 			"bce_init_nvram(): Flash WAS reconfigured.\n");
1686 
1687 		for (j = 0, flash = &flash_table[0]; j < entry_count;
1688 		     j++, flash++) {
1689 			if ((val & FLASH_BACKUP_STRAP_MASK) ==
1690 			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
1691 				sc->bce_flash_info = flash;
1692 				break;
1693 			}
1694 		}
1695 	} else {
1696 		/* Flash interface not yet reconfigured. */
1697 		u32 mask;
1698 
1699 		DBPRINT(sc,BCE_INFO_LOAD,
1700 			"bce_init_nvram(): Flash was NOT reconfigured.\n");
1701 
1702 		if (val & (1 << 23))
1703 			mask = FLASH_BACKUP_STRAP_MASK;
1704 		else
1705 			mask = FLASH_STRAP_MASK;
1706 
1707 		/* Look for the matching NVRAM device configuration data. */
1708 		for (j = 0, flash = &flash_table[0]; j < entry_count; j++, flash++) {
1709 
1710 			/* Check if the device matches any of the known devices. */
1711 			if ((val & mask) == (flash->strapping & mask)) {
1712 				/* Found a device match. */
1713 				sc->bce_flash_info = flash;
1714 
1715 				/* Request access to the flash interface. */
1716 				if ((rc = bce_acquire_nvram_lock(sc)) != 0)
1717 					return rc;
1718 
1719 				/* Reconfigure the flash interface. */
1720 				bce_enable_nvram_access(sc);
1721 				REG_WR(sc, BCE_NVM_CFG1, flash->config1);
1722 				REG_WR(sc, BCE_NVM_CFG2, flash->config2);
1723 				REG_WR(sc, BCE_NVM_CFG3, flash->config3);
1724 				REG_WR(sc, BCE_NVM_WRITE1, flash->write1);
1725 				bce_disable_nvram_access(sc);
1726 				bce_release_nvram_lock(sc);
1727 
1728 				break;
1729 			}
1730 		}
1731 	}
1732 
1733 	/* Check if a matching device was found. */
1734 	if (j == entry_count) {
1735 		sc->bce_flash_info = NULL;
1736 		BCE_PRINTF("%s(%d): Unknown Flash NVRAM found!\n",
1737 			__FILE__, __LINE__);
1738 		rc = ENODEV;
1739 	}
1740 
1741 	/* Write the flash config data to the shared memory interface. */
1742 	val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_SHARED_HW_CFG_CONFIG2);
1743 	val &= BCE_SHARED_HW_CFG2_NVM_SIZE_MASK;
1744 	if (val)
1745 		sc->bce_flash_size = val;
1746 	else
1747 		sc->bce_flash_size = sc->bce_flash_info->total_size;
1748 
1749 	DBPRINT(sc, BCE_INFO_LOAD, "bce_init_nvram() flash->total_size = 0x%08X\n",
1750 		sc->bce_flash_info->total_size);
1751 
1752 	DBPRINT(sc, BCE_VERBOSE_NVRAM, "Exiting %s()\n", __FUNCTION__);
1753 
1754 	return rc;
1755 }
1756 
1757 
1758 /****************************************************************************/
1759 /* Read an arbitrary range of data from NVRAM.                              */
1760 /*                                                                          */
1761 /* Prepares the NVRAM interface for access and reads the requested data     */
1762 /* into the supplied buffer.                                                */
1763 /*                                                                          */
1764 /* Returns:                                                                 */
1765 /*   0 on success and the data read, positive value on failure.             */
1766 /****************************************************************************/
1767 static int
1768 bce_nvram_read(struct bce_softc *sc, u32 offset, u8 *ret_buf,
1769 	int buf_size)
1770 {
1771 	int rc = 0;
1772 	u32 cmd_flags, offset32, len32, extra;
1773 
1774 	if (buf_size == 0)
1775 		return 0;
1776 
1777 	/* Request access to the flash interface. */
1778 	if ((rc = bce_acquire_nvram_lock(sc)) != 0)
1779 		return rc;
1780 
1781 	/* Enable access to flash interface */
1782 	bce_enable_nvram_access(sc);
1783 
1784 	len32 = buf_size;
1785 	offset32 = offset;
1786 	extra = 0;
1787 
1788 	cmd_flags = 0;
1789 
1790 	if (offset32 & 3) {
1791 		u8 buf[4];
1792 		u32 pre_len;
1793 
1794 		offset32 &= ~3;
1795 		pre_len = 4 - (offset & 3);
1796 
1797 		if (pre_len >= len32) {
1798 			pre_len = len32;
1799 			cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST;
1800 		}
1801 		else {
1802 			cmd_flags = BCE_NVM_COMMAND_FIRST;
1803 		}
1804 
1805 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1806 
1807 		if (rc)
1808 			return rc;
1809 
1810 		memcpy(ret_buf, buf + (offset & 3), pre_len);
1811 
1812 		offset32 += 4;
1813 		ret_buf += pre_len;
1814 		len32 -= pre_len;
1815 	}
1816 
1817 	if (len32 & 3) {
1818 		extra = 4 - (len32 & 3);
1819 		len32 = (len32 + 4) & ~3;
1820 	}
1821 
1822 	if (len32 == 4) {
1823 		u8 buf[4];
1824 
1825 		if (cmd_flags)
1826 			cmd_flags = BCE_NVM_COMMAND_LAST;
1827 		else
1828 			cmd_flags = BCE_NVM_COMMAND_FIRST |
1829 				    BCE_NVM_COMMAND_LAST;
1830 
1831 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1832 
1833 		memcpy(ret_buf, buf, 4 - extra);
1834 	}
1835 	else if (len32 > 0) {
1836 		u8 buf[4];
1837 
1838 		/* Read the first word. */
1839 		if (cmd_flags)
1840 			cmd_flags = 0;
1841 		else
1842 			cmd_flags = BCE_NVM_COMMAND_FIRST;
1843 
1844 		rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
1845 
1846 		/* Advance to the next dword. */
1847 		offset32 += 4;
1848 		ret_buf += 4;
1849 		len32 -= 4;
1850 
1851 		while (len32 > 4 && rc == 0) {
1852 			rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0);
1853 
1854 			/* Advance to the next dword. */
1855 			offset32 += 4;
1856 			ret_buf += 4;
1857 			len32 -= 4;
1858 		}
1859 
1860 		if (rc)
1861 			return rc;
1862 
1863 		cmd_flags = BCE_NVM_COMMAND_LAST;
1864 		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1865 
1866 		memcpy(ret_buf, buf, 4 - extra);
1867 	}
1868 
1869 	/* Disable access to flash interface and release the lock. */
1870 	bce_disable_nvram_access(sc);
1871 	bce_release_nvram_lock(sc);
1872 
1873 	return rc;
1874 }
1875 
1876 
1877 #ifdef BCE_NVRAM_WRITE_SUPPORT
1878 /****************************************************************************/
1879 /* Write an arbitrary range of data from NVRAM.                             */
1880 /*                                                                          */
1881 /* Prepares the NVRAM interface for write access and writes the requested   */
1882 /* data from the supplied buffer.  The caller is responsible for            */
1883 /* calculating any appropriate CRCs.                                        */
1884 /*                                                                          */
1885 /* Returns:                                                                 */
1886 /*   0 on success, positive value on failure.                               */
1887 /****************************************************************************/
1888 static int
1889 bce_nvram_write(struct bce_softc *sc, u32 offset, u8 *data_buf,
1890 	int buf_size)
1891 {
1892 	u32 written, offset32, len32;
1893 	u8 *buf, start[4], end[4];
1894 	int rc = 0;
1895 	int align_start, align_end;
1896 
1897 	buf = data_buf;
1898 	offset32 = offset;
1899 	len32 = buf_size;
1900 	align_start = align_end = 0;
1901 
1902 	if ((align_start = (offset32 & 3))) {
1903 		offset32 &= ~3;
1904 		len32 += align_start;
1905 		if ((rc = bce_nvram_read(sc, offset32, start, 4)))
1906 			return rc;
1907 	}
1908 
1909 	if (len32 & 3) {
1910 	       	if ((len32 > 4) || !align_start) {
1911 			align_end = 4 - (len32 & 3);
1912 			len32 += align_end;
1913 			if ((rc = bce_nvram_read(sc, offset32 + len32 - 4,
1914 				end, 4))) {
1915 				return rc;
1916 			}
1917 		}
1918 	}
1919 
1920 	if (align_start || align_end) {
1921 		buf = malloc(len32, M_DEVBUF, M_NOWAIT);
1922 		if (buf == 0)
1923 			return ENOMEM;
1924 		if (align_start) {
1925 			memcpy(buf, start, 4);
1926 		}
1927 		if (align_end) {
1928 			memcpy(buf + len32 - 4, end, 4);
1929 		}
1930 		memcpy(buf + align_start, data_buf, buf_size);
1931 	}
1932 
1933 	written = 0;
1934 	while ((written < len32) && (rc == 0)) {
1935 		u32 page_start, page_end, data_start, data_end;
1936 		u32 addr, cmd_flags;
1937 		int i;
1938 		u8 flash_buffer[264];
1939 
1940 	    /* Find the page_start addr */
1941 		page_start = offset32 + written;
1942 		page_start -= (page_start % sc->bce_flash_info->page_size);
1943 		/* Find the page_end addr */
1944 		page_end = page_start + sc->bce_flash_info->page_size;
1945 		/* Find the data_start addr */
1946 		data_start = (written == 0) ? offset32 : page_start;
1947 		/* Find the data_end addr */
1948 		data_end = (page_end > offset32 + len32) ?
1949 			(offset32 + len32) : page_end;
1950 
1951 		/* Request access to the flash interface. */
1952 		if ((rc = bce_acquire_nvram_lock(sc)) != 0)
1953 			goto nvram_write_end;
1954 
1955 		/* Enable access to flash interface */
1956 		bce_enable_nvram_access(sc);
1957 
1958 		cmd_flags = BCE_NVM_COMMAND_FIRST;
1959 		if (sc->bce_flash_info->buffered == 0) {
1960 			int j;
1961 
1962 			/* Read the whole page into the buffer
1963 			 * (non-buffer flash only) */
1964 			for (j = 0; j < sc->bce_flash_info->page_size; j += 4) {
1965 				if (j == (sc->bce_flash_info->page_size - 4)) {
1966 					cmd_flags |= BCE_NVM_COMMAND_LAST;
1967 				}
1968 				rc = bce_nvram_read_dword(sc,
1969 					page_start + j,
1970 					&flash_buffer[j],
1971 					cmd_flags);
1972 
1973 				if (rc)
1974 					goto nvram_write_end;
1975 
1976 				cmd_flags = 0;
1977 			}
1978 		}
1979 
1980 		/* Enable writes to flash interface (unlock write-protect) */
1981 		if ((rc = bce_enable_nvram_write(sc)) != 0)
1982 			goto nvram_write_end;
1983 
1984 		/* Erase the page */
1985 		if ((rc = bce_nvram_erase_page(sc, page_start)) != 0)
1986 			goto nvram_write_end;
1987 
1988 		/* Re-enable the write again for the actual write */
1989 		bce_enable_nvram_write(sc);
1990 
1991 		/* Loop to write back the buffer data from page_start to
1992 		 * data_start */
1993 		i = 0;
1994 		if (sc->bce_flash_info->buffered == 0) {
1995 			for (addr = page_start; addr < data_start;
1996 				addr += 4, i += 4) {
1997 
1998 				rc = bce_nvram_write_dword(sc, addr,
1999 					&flash_buffer[i], cmd_flags);
2000 
2001 				if (rc != 0)
2002 					goto nvram_write_end;
2003 
2004 				cmd_flags = 0;
2005 			}
2006 		}
2007 
2008 		/* Loop to write the new data from data_start to data_end */
2009 		for (addr = data_start; addr < data_end; addr += 4, i++) {
2010 			if ((addr == page_end - 4) ||
2011 				((sc->bce_flash_info->buffered) &&
2012 				 (addr == data_end - 4))) {
2013 
2014 				cmd_flags |= BCE_NVM_COMMAND_LAST;
2015 			}
2016 			rc = bce_nvram_write_dword(sc, addr, buf,
2017 				cmd_flags);
2018 
2019 			if (rc != 0)
2020 				goto nvram_write_end;
2021 
2022 			cmd_flags = 0;
2023 			buf += 4;
2024 		}
2025 
2026 		/* Loop to write back the buffer data from data_end
2027 		 * to page_end */
2028 		if (sc->bce_flash_info->buffered == 0) {
2029 			for (addr = data_end; addr < page_end;
2030 				addr += 4, i += 4) {
2031 
2032 				if (addr == page_end-4) {
2033 					cmd_flags = BCE_NVM_COMMAND_LAST;
2034                 		}
2035 				rc = bce_nvram_write_dword(sc, addr,
2036 					&flash_buffer[i], cmd_flags);
2037 
2038 				if (rc != 0)
2039 					goto nvram_write_end;
2040 
2041 				cmd_flags = 0;
2042 			}
2043 		}
2044 
2045 		/* Disable writes to flash interface (lock write-protect) */
2046 		bce_disable_nvram_write(sc);
2047 
2048 		/* Disable access to flash interface */
2049 		bce_disable_nvram_access(sc);
2050 		bce_release_nvram_lock(sc);
2051 
2052 		/* Increment written */
2053 		written += data_end - data_start;
2054 	}
2055 
2056 nvram_write_end:
2057 	if (align_start || align_end)
2058 		free(buf, M_DEVBUF);
2059 
2060 	return rc;
2061 }
2062 #endif /* BCE_NVRAM_WRITE_SUPPORT */
2063 
2064 
2065 /****************************************************************************/
2066 /* Verifies that NVRAM is accessible and contains valid data.               */
2067 /*                                                                          */
2068 /* Reads the configuration data from NVRAM and verifies that the CRC is     */
2069 /* correct.                                                                 */
2070 /*                                                                          */
2071 /* Returns:                                                                 */
2072 /*   0 on success, positive value on failure.                               */
2073 /****************************************************************************/
2074 static int
2075 bce_nvram_test(struct bce_softc *sc)
2076 {
2077 	u32 buf[BCE_NVRAM_SIZE / 4];
2078 	u8 *data = (u8 *) buf;
2079 	int rc = 0;
2080 	u32 magic, csum;
2081 
2082 
2083 	/*
2084 	 * Check that the device NVRAM is valid by reading
2085 	 * the magic value at offset 0.
2086 	 */
2087 	if ((rc = bce_nvram_read(sc, 0, data, 4)) != 0)
2088 		goto bce_nvram_test_done;
2089 
2090 
2091     magic = bce_be32toh(buf[0]);
2092 	if (magic != BCE_NVRAM_MAGIC) {
2093 		rc = ENODEV;
2094 		BCE_PRINTF("%s(%d): Invalid NVRAM magic value! Expected: 0x%08X, "
2095 			"Found: 0x%08X\n",
2096 			__FILE__, __LINE__, BCE_NVRAM_MAGIC, magic);
2097 		goto bce_nvram_test_done;
2098 	}
2099 
2100 	/*
2101 	 * Verify that the device NVRAM includes valid
2102 	 * configuration data.
2103 	 */
2104 	if ((rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE)) != 0)
2105 		goto bce_nvram_test_done;
2106 
2107 	csum = ether_crc32_le(data, 0x100);
2108 	if (csum != BCE_CRC32_RESIDUAL) {
2109 		rc = ENODEV;
2110 		BCE_PRINTF("%s(%d): Invalid Manufacturing Information NVRAM CRC! "
2111 			"Expected: 0x%08X, Found: 0x%08X\n",
2112 			__FILE__, __LINE__, BCE_CRC32_RESIDUAL, csum);
2113 		goto bce_nvram_test_done;
2114 	}
2115 
2116 	csum = ether_crc32_le(data + 0x100, 0x100);
2117 	if (csum != BCE_CRC32_RESIDUAL) {
2118 		BCE_PRINTF("%s(%d): Invalid Feature Configuration Information "
2119 			"NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
2120 			__FILE__, __LINE__, BCE_CRC32_RESIDUAL, csum);
2121 		rc = ENODEV;
2122 	}
2123 
2124 bce_nvram_test_done:
2125 	return rc;
2126 }
2127 
2128 
2129 /****************************************************************************/
2130 /* Free any DMA memory owned by the driver.                                 */
2131 /*                                                                          */
2132 /* Scans through each data structre that requires DMA memory and frees      */
2133 /* the memory if allocated.                                                 */
2134 /*                                                                          */
2135 /* Returns:                                                                 */
2136 /*   Nothing.                                                               */
2137 /****************************************************************************/
2138 static void
2139 bce_dma_free(struct bce_softc *sc)
2140 {
2141 	int i;
2142 
2143 	DBPRINT(sc,BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2144 
2145 	/* Destroy the status block. */
2146 	if (sc->status_block != NULL) {
2147 		bus_dmamem_free(
2148 			sc->status_tag,
2149 		    sc->status_block,
2150 		    sc->status_map);
2151 		sc->status_block = NULL;
2152 	}
2153 
2154 	if (sc->status_map != NULL) {
2155 		bus_dmamap_unload(
2156 			sc->status_tag,
2157 		    sc->status_map);
2158 		bus_dmamap_destroy(sc->status_tag,
2159 		    sc->status_map);
2160 		sc->status_map = NULL;
2161 	}
2162 
2163 	if (sc->status_tag != NULL) {
2164 		bus_dma_tag_destroy(sc->status_tag);
2165 		sc->status_tag = NULL;
2166 	}
2167 
2168 
2169 	/* Destroy the statistics block. */
2170 	if (sc->stats_block != NULL) {
2171 		bus_dmamem_free(
2172 			sc->stats_tag,
2173 		    sc->stats_block,
2174 		    sc->stats_map);
2175 		sc->stats_block = NULL;
2176 	}
2177 
2178 	if (sc->stats_map != NULL) {
2179 		bus_dmamap_unload(
2180 			sc->stats_tag,
2181 		    sc->stats_map);
2182 		bus_dmamap_destroy(sc->stats_tag,
2183 		    sc->stats_map);
2184 		sc->stats_map = NULL;
2185 	}
2186 
2187 	if (sc->stats_tag != NULL) {
2188 		bus_dma_tag_destroy(sc->stats_tag);
2189 		sc->stats_tag = NULL;
2190 	}
2191 
2192 
2193 	/* Free, unmap and destroy all TX buffer descriptor chain pages. */
2194 	for (i = 0; i < TX_PAGES; i++ ) {
2195 		if (sc->tx_bd_chain[i] != NULL) {
2196 			bus_dmamem_free(
2197 				sc->tx_bd_chain_tag,
2198 			    sc->tx_bd_chain[i],
2199 			    sc->tx_bd_chain_map[i]);
2200 			sc->tx_bd_chain[i] = NULL;
2201 		}
2202 
2203 		if (sc->tx_bd_chain_map[i] != NULL) {
2204 			bus_dmamap_unload(
2205 				sc->tx_bd_chain_tag,
2206 		    	sc->tx_bd_chain_map[i]);
2207 			bus_dmamap_destroy(
2208 				sc->tx_bd_chain_tag,
2209 			    sc->tx_bd_chain_map[i]);
2210 			sc->tx_bd_chain_map[i] = NULL;
2211 		}
2212 	}
2213 
2214 	/* Destroy the TX buffer descriptor tag. */
2215 	if (sc->tx_bd_chain_tag != NULL) {
2216 		bus_dma_tag_destroy(sc->tx_bd_chain_tag);
2217 		sc->tx_bd_chain_tag = NULL;
2218 	}
2219 
2220 
2221 	/* Free, unmap and destroy all RX buffer descriptor chain pages. */
2222 	for (i = 0; i < RX_PAGES; i++ ) {
2223 		if (sc->rx_bd_chain[i] != NULL) {
2224 			bus_dmamem_free(
2225 				sc->rx_bd_chain_tag,
2226 			    sc->rx_bd_chain[i],
2227 			    sc->rx_bd_chain_map[i]);
2228 			sc->rx_bd_chain[i] = NULL;
2229 		}
2230 
2231 		if (sc->rx_bd_chain_map[i] != NULL) {
2232 			bus_dmamap_unload(
2233 				sc->rx_bd_chain_tag,
2234 		    	sc->rx_bd_chain_map[i]);
2235 			bus_dmamap_destroy(
2236 				sc->rx_bd_chain_tag,
2237 			    sc->rx_bd_chain_map[i]);
2238 			sc->rx_bd_chain_map[i] = NULL;
2239 		}
2240 	}
2241 
2242 	/* Destroy the RX buffer descriptor tag. */
2243 	if (sc->rx_bd_chain_tag != NULL) {
2244 		bus_dma_tag_destroy(sc->rx_bd_chain_tag);
2245 		sc->rx_bd_chain_tag = NULL;
2246 	}
2247 
2248 
2249 	/* Free, unmap and destroy all page buffer descriptor chain pages. */
2250 	for (i = 0; i < PG_PAGES; i++ ) {
2251 		if (sc->pg_bd_chain[i] != NULL) {
2252 			bus_dmamem_free(
2253 				sc->pg_bd_chain_tag,
2254 			    sc->pg_bd_chain[i],
2255 			    sc->pg_bd_chain_map[i]);
2256 			sc->pg_bd_chain[i] = NULL;
2257 		}
2258 
2259 		if (sc->pg_bd_chain_map[i] != NULL) {
2260 			bus_dmamap_unload(
2261 				sc->pg_bd_chain_tag,
2262 		    	sc->pg_bd_chain_map[i]);
2263 			bus_dmamap_destroy(
2264 				sc->pg_bd_chain_tag,
2265 			    sc->pg_bd_chain_map[i]);
2266 			sc->pg_bd_chain_map[i] = NULL;
2267 		}
2268 	}
2269 
2270 	/* Destroy the page buffer descriptor tag. */
2271 	if (sc->pg_bd_chain_tag != NULL) {
2272 		bus_dma_tag_destroy(sc->pg_bd_chain_tag);
2273 		sc->pg_bd_chain_tag = NULL;
2274 	}
2275 
2276 
2277 	/* Unload and destroy the TX mbuf maps. */
2278 	for (i = 0; i < TOTAL_TX_BD; i++) {
2279 		if (sc->tx_mbuf_map[i] != NULL) {
2280 			bus_dmamap_unload(sc->tx_mbuf_tag,
2281 				sc->tx_mbuf_map[i]);
2282 			bus_dmamap_destroy(sc->tx_mbuf_tag,
2283 	 			sc->tx_mbuf_map[i]);
2284 			sc->tx_mbuf_map[i] = NULL;
2285 		}
2286 	}
2287 
2288 	/* Destroy the TX mbuf tag. */
2289 	if (sc->tx_mbuf_tag != NULL) {
2290 		bus_dma_tag_destroy(sc->tx_mbuf_tag);
2291 		sc->tx_mbuf_tag = NULL;
2292 	}
2293 
2294 	/* Unload and destroy the RX mbuf maps. */
2295 	for (i = 0; i < TOTAL_RX_BD; i++) {
2296 		if (sc->rx_mbuf_map[i] != NULL) {
2297 			bus_dmamap_unload(sc->rx_mbuf_tag,
2298 				sc->rx_mbuf_map[i]);
2299 			bus_dmamap_destroy(sc->rx_mbuf_tag,
2300 	 			sc->rx_mbuf_map[i]);
2301 			sc->rx_mbuf_map[i] = NULL;
2302 		}
2303 	}
2304 
2305 	/* Destroy the RX mbuf tag. */
2306 	if (sc->rx_mbuf_tag != NULL) {
2307 		bus_dma_tag_destroy(sc->rx_mbuf_tag);
2308 		sc->rx_mbuf_tag = NULL;
2309 	}
2310 
2311 	/* Unload and destroy the page mbuf maps. */
2312 	for (i = 0; i < TOTAL_PG_BD; i++) {
2313 		if (sc->pg_mbuf_map[i] != NULL) {
2314 			bus_dmamap_unload(sc->pg_mbuf_tag,
2315 				sc->pg_mbuf_map[i]);
2316 			bus_dmamap_destroy(sc->pg_mbuf_tag,
2317 	 			sc->pg_mbuf_map[i]);
2318 			sc->pg_mbuf_map[i] = NULL;
2319 		}
2320 	}
2321 
2322 	/* Destroy the page mbuf tag. */
2323 	if (sc->pg_mbuf_tag != NULL) {
2324 		bus_dma_tag_destroy(sc->pg_mbuf_tag);
2325 		sc->pg_mbuf_tag = NULL;
2326 	}
2327 
2328 	/* Destroy the parent tag */
2329 	if (sc->parent_tag != NULL) {
2330 		bus_dma_tag_destroy(sc->parent_tag);
2331 		sc->parent_tag = NULL;
2332 	}
2333 
2334 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2335 
2336 }
2337 
2338 
2339 /****************************************************************************/
2340 /* Get DMA memory from the OS.                                              */
2341 /*                                                                          */
2342 /* Validates that the OS has provided DMA buffers in response to a          */
2343 /* bus_dmamap_load() call and saves the physical address of those buffers.  */
2344 /* When the callback is used the OS will return 0 for the mapping function  */
2345 /* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any  */
2346 /* failures back to the caller.                                             */
2347 /*                                                                          */
2348 /* Returns:                                                                 */
2349 /*   Nothing.                                                               */
2350 /****************************************************************************/
2351 static void
2352 bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2353 {
2354 	bus_addr_t *busaddr = arg;
2355 
2356 	/* Simulate a mapping failure. */
2357 	DBRUNIF(DB_RANDOMTRUE(bce_debug_dma_map_addr_failure),
2358 		printf("bce: %s(%d): Simulating DMA mapping error.\n",
2359 			__FILE__, __LINE__);
2360 		error = ENOMEM);
2361 
2362 	/* Check for an error and signal the caller that an error occurred. */
2363 	if (error) {
2364 		printf("bce %s(%d): DMA mapping error! error = %d, "
2365 		    "nseg = %d\n", __FILE__, __LINE__, error, nseg);
2366 		*busaddr = 0;
2367 		return;
2368 	}
2369 
2370 	*busaddr = segs->ds_addr;
2371 	return;
2372 }
2373 
2374 
2375 /****************************************************************************/
2376 /* Allocate any DMA memory needed by the driver.                            */
2377 /*                                                                          */
2378 /* Allocates DMA memory needed for the various global structures needed by  */
2379 /* hardware.                                                                */
2380 /*                                                                          */
2381 /* Returns:                                                                 */
2382 /*   0 for success, positive value for failure.                             */
2383 /****************************************************************************/
2384 static int
2385 bce_dma_alloc(device_t dev)
2386 {
2387 	struct bce_softc *sc;
2388 	int i, error, rc = 0;
2389 	bus_addr_t busaddr;
2390 	bus_size_t max_size, max_seg_size;
2391 	int max_segments;
2392 
2393 	sc = device_get_softc(dev);
2394 
2395 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2396 
2397 	/*
2398 	 * Allocate the parent bus DMA tag appropriate for PCI.
2399 	 */
2400 	if (bus_dma_tag_create(NULL,
2401 			1,
2402 			BCE_DMA_BOUNDARY,
2403 			sc->max_bus_addr,
2404 			BUS_SPACE_MAXADDR,
2405 			NULL, NULL,
2406 			MAXBSIZE,
2407 			BUS_SPACE_UNRESTRICTED,
2408 			BUS_SPACE_MAXSIZE_32BIT,
2409 			0,
2410 			NULL, NULL,
2411 			&sc->parent_tag)) {
2412 		BCE_PRINTF("%s(%d): Could not allocate parent DMA tag!\n",
2413 			__FILE__, __LINE__);
2414 		rc = ENOMEM;
2415 		goto bce_dma_alloc_exit;
2416 	}
2417 
2418 	/*
2419 	 * Create a DMA tag for the status block, allocate and clear the
2420 	 * memory, map the memory into DMA space, and fetch the physical
2421 	 * address of the block.
2422 	 */
2423 	if (bus_dma_tag_create(sc->parent_tag,
2424 	    	BCE_DMA_ALIGN,
2425 	    	BCE_DMA_BOUNDARY,
2426 	    	sc->max_bus_addr,
2427 	    	BUS_SPACE_MAXADDR,
2428 	    	NULL, NULL,
2429 	    	BCE_STATUS_BLK_SZ,
2430 	    	1,
2431 	    	BCE_STATUS_BLK_SZ,
2432 	    	0,
2433 	    	NULL, NULL,
2434 	    	&sc->status_tag)) {
2435 		BCE_PRINTF("%s(%d): Could not allocate status block DMA tag!\n",
2436 			__FILE__, __LINE__);
2437 		rc = ENOMEM;
2438 		goto bce_dma_alloc_exit;
2439 	}
2440 
2441 	if(bus_dmamem_alloc(sc->status_tag,
2442 	    	(void **)&sc->status_block,
2443 	    	BUS_DMA_NOWAIT,
2444 	    	&sc->status_map)) {
2445 		BCE_PRINTF("%s(%d): Could not allocate status block DMA memory!\n",
2446 			__FILE__, __LINE__);
2447 		rc = ENOMEM;
2448 		goto bce_dma_alloc_exit;
2449 	}
2450 
2451 	bzero((char *)sc->status_block, BCE_STATUS_BLK_SZ);
2452 
2453 	error = bus_dmamap_load(sc->status_tag,
2454 	    	sc->status_map,
2455 	    	sc->status_block,
2456 	    	BCE_STATUS_BLK_SZ,
2457 	    	bce_dma_map_addr,
2458 	    	&busaddr,
2459 	    	BUS_DMA_NOWAIT);
2460 
2461 	if (error) {
2462 		BCE_PRINTF("%s(%d): Could not map status block DMA memory!\n",
2463 			__FILE__, __LINE__);
2464 		rc = ENOMEM;
2465 		goto bce_dma_alloc_exit;
2466 	}
2467 
2468 	sc->status_block_paddr = busaddr;
2469 	/* DRC - Fix for 64 bit addresses. */
2470 	DBPRINT(sc, BCE_INFO, "status_block_paddr = 0x%08X\n",
2471 		(u32) sc->status_block_paddr);
2472 
2473 	/*
2474 	 * Create a DMA tag for the statistics block, allocate and clear the
2475 	 * memory, map the memory into DMA space, and fetch the physical
2476 	 * address of the block.
2477 	 */
2478 	if (bus_dma_tag_create(sc->parent_tag,
2479 	    	BCE_DMA_ALIGN,
2480 	    	BCE_DMA_BOUNDARY,
2481 	    	sc->max_bus_addr,
2482 	    	BUS_SPACE_MAXADDR,
2483 	    	NULL, NULL,
2484 	    	BCE_STATS_BLK_SZ,
2485 	    	1,
2486 	    	BCE_STATS_BLK_SZ,
2487 	    	0,
2488 	    	NULL, NULL,
2489 	    	&sc->stats_tag)) {
2490 		BCE_PRINTF("%s(%d): Could not allocate statistics block DMA tag!\n",
2491 			__FILE__, __LINE__);
2492 		rc = ENOMEM;
2493 		goto bce_dma_alloc_exit;
2494 	}
2495 
2496 	if (bus_dmamem_alloc(sc->stats_tag,
2497 	    	(void **)&sc->stats_block,
2498 	    	BUS_DMA_NOWAIT,
2499 	    	&sc->stats_map)) {
2500 		BCE_PRINTF("%s(%d): Could not allocate statistics block DMA memory!\n",
2501 			__FILE__, __LINE__);
2502 		rc = ENOMEM;
2503 		goto bce_dma_alloc_exit;
2504 	}
2505 
2506 	bzero((char *)sc->stats_block, BCE_STATS_BLK_SZ);
2507 
2508 	error = bus_dmamap_load(sc->stats_tag,
2509 	    	sc->stats_map,
2510 	    	sc->stats_block,
2511 	    	BCE_STATS_BLK_SZ,
2512 	    	bce_dma_map_addr,
2513 	    	&busaddr,
2514 	    	BUS_DMA_NOWAIT);
2515 
2516 	if(error) {
2517 		BCE_PRINTF("%s(%d): Could not map statistics block DMA memory!\n",
2518 			__FILE__, __LINE__);
2519 		rc = ENOMEM;
2520 		goto bce_dma_alloc_exit;
2521 	}
2522 
2523 	sc->stats_block_paddr = busaddr;
2524 	/* DRC - Fix for 64 bit address. */
2525 	DBPRINT(sc,BCE_INFO, "stats_block_paddr = 0x%08X\n",
2526 		(u32) sc->stats_block_paddr);
2527 
2528 	/*
2529 	 * Create a DMA tag for the TX buffer descriptor chain,
2530 	 * allocate and clear the  memory, and fetch the
2531 	 * physical address of the block.
2532 	 */
2533 	if(bus_dma_tag_create(sc->parent_tag,
2534 			BCM_PAGE_SIZE,
2535 		    BCE_DMA_BOUNDARY,
2536 			sc->max_bus_addr,
2537 			BUS_SPACE_MAXADDR,
2538 			NULL, NULL,
2539 			BCE_TX_CHAIN_PAGE_SZ,
2540 			1,
2541 			BCE_TX_CHAIN_PAGE_SZ,
2542 			0,
2543 			NULL, NULL,
2544 			&sc->tx_bd_chain_tag)) {
2545 		BCE_PRINTF("%s(%d): Could not allocate TX descriptor chain DMA tag!\n",
2546 			__FILE__, __LINE__);
2547 		rc = ENOMEM;
2548 		goto bce_dma_alloc_exit;
2549 	}
2550 
2551 	for (i = 0; i < TX_PAGES; i++) {
2552 
2553 		if(bus_dmamem_alloc(sc->tx_bd_chain_tag,
2554 	    		(void **)&sc->tx_bd_chain[i],
2555 	    		BUS_DMA_NOWAIT,
2556 		    	&sc->tx_bd_chain_map[i])) {
2557 			BCE_PRINTF("%s(%d): Could not allocate TX descriptor "
2558 				"chain DMA memory!\n", __FILE__, __LINE__);
2559 			rc = ENOMEM;
2560 			goto bce_dma_alloc_exit;
2561 		}
2562 
2563 		error = bus_dmamap_load(sc->tx_bd_chain_tag,
2564 	    		sc->tx_bd_chain_map[i],
2565 	    		sc->tx_bd_chain[i],
2566 		    	BCE_TX_CHAIN_PAGE_SZ,
2567 		    	bce_dma_map_addr,
2568 	    		&busaddr,
2569 	    		BUS_DMA_NOWAIT);
2570 
2571 		if (error) {
2572 			BCE_PRINTF("%s(%d): Could not map TX descriptor chain DMA memory!\n",
2573 				__FILE__, __LINE__);
2574 			rc = ENOMEM;
2575 			goto bce_dma_alloc_exit;
2576 		}
2577 
2578 		sc->tx_bd_chain_paddr[i] = busaddr;
2579 		/* DRC - Fix for 64 bit systems. */
2580 		DBPRINT(sc, BCE_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n",
2581 			i, (u32) sc->tx_bd_chain_paddr[i]);
2582 	}
2583 
2584 	/* Check the required size before mapping to conserve resources. */
2585 	if (bce_tso_enable) {
2586 		max_size     = BCE_TSO_MAX_SIZE;
2587 		max_segments = BCE_MAX_SEGMENTS;
2588 		max_seg_size = BCE_TSO_MAX_SEG_SIZE;
2589 	} else {
2590 		max_size     = MCLBYTES * BCE_MAX_SEGMENTS;
2591 		max_segments = BCE_MAX_SEGMENTS;
2592 		max_seg_size = MCLBYTES;
2593 	}
2594 
2595 	/* Create a DMA tag for TX mbufs. */
2596 	if (bus_dma_tag_create(sc->parent_tag,
2597 			1,
2598 			BCE_DMA_BOUNDARY,
2599 			sc->max_bus_addr,
2600 			BUS_SPACE_MAXADDR,
2601 			NULL, NULL,
2602 			max_size,
2603 			max_segments,
2604 			max_seg_size,
2605 			0,
2606 			NULL, NULL,
2607 			&sc->tx_mbuf_tag)) {
2608 		BCE_PRINTF("%s(%d): Could not allocate TX mbuf DMA tag!\n",
2609 			__FILE__, __LINE__);
2610 		rc = ENOMEM;
2611 		goto bce_dma_alloc_exit;
2612 	}
2613 
2614 	/* Create DMA maps for the TX mbufs clusters. */
2615 	for (i = 0; i < TOTAL_TX_BD; i++) {
2616 		if (bus_dmamap_create(sc->tx_mbuf_tag, BUS_DMA_NOWAIT,
2617 			&sc->tx_mbuf_map[i])) {
2618 			BCE_PRINTF("%s(%d): Unable to create TX mbuf DMA map!\n",
2619 				__FILE__, __LINE__);
2620 			rc = ENOMEM;
2621 			goto bce_dma_alloc_exit;
2622 		}
2623 	}
2624 
2625 	/*
2626 	 * Create a DMA tag for the RX buffer descriptor chain,
2627 	 * allocate and clear the memory, and fetch the physical
2628 	 * address of the blocks.
2629 	 */
2630 	if (bus_dma_tag_create(sc->parent_tag,
2631 			BCM_PAGE_SIZE,
2632 			BCE_DMA_BOUNDARY,
2633 			BUS_SPACE_MAXADDR,
2634 			sc->max_bus_addr,
2635 			NULL, NULL,
2636 			BCE_RX_CHAIN_PAGE_SZ,
2637 			1,
2638 			BCE_RX_CHAIN_PAGE_SZ,
2639 			0,
2640 			NULL, NULL,
2641 			&sc->rx_bd_chain_tag)) {
2642 		BCE_PRINTF("%s(%d): Could not allocate RX descriptor chain DMA tag!\n",
2643 			__FILE__, __LINE__);
2644 		rc = ENOMEM;
2645 		goto bce_dma_alloc_exit;
2646 	}
2647 
2648 	for (i = 0; i < RX_PAGES; i++) {
2649 
2650 		if (bus_dmamem_alloc(sc->rx_bd_chain_tag,
2651 	    		(void **)&sc->rx_bd_chain[i],
2652 	    		BUS_DMA_NOWAIT,
2653 		    	&sc->rx_bd_chain_map[i])) {
2654 			BCE_PRINTF("%s(%d): Could not allocate RX descriptor chain "
2655 				"DMA memory!\n", __FILE__, __LINE__);
2656 			rc = ENOMEM;
2657 			goto bce_dma_alloc_exit;
2658 		}
2659 
2660 		bzero((char *)sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
2661 
2662 		error = bus_dmamap_load(sc->rx_bd_chain_tag,
2663 	    		sc->rx_bd_chain_map[i],
2664 	    		sc->rx_bd_chain[i],
2665 		    	BCE_RX_CHAIN_PAGE_SZ,
2666 		    	bce_dma_map_addr,
2667 	    		&busaddr,
2668 	    		BUS_DMA_NOWAIT);
2669 
2670 		if (error) {
2671 			BCE_PRINTF("%s(%d): Could not map RX descriptor chain DMA memory!\n",
2672 				__FILE__, __LINE__);
2673 			rc = ENOMEM;
2674 			goto bce_dma_alloc_exit;
2675 		}
2676 
2677 		sc->rx_bd_chain_paddr[i] = busaddr;
2678 		/* DRC - Fix for 64 bit systems. */
2679 		DBPRINT(sc, BCE_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n",
2680 			i, (u32) sc->rx_bd_chain_paddr[i]);
2681 	}
2682 
2683 	/*
2684 	 * Create a DMA tag for RX mbufs.
2685 	 */
2686 	max_size = max_seg_size = ((sc->rx_bd_mbuf_alloc_size < MCLBYTES) ?
2687 		MCLBYTES : sc->rx_bd_mbuf_alloc_size);
2688 
2689 	if (bus_dma_tag_create(sc->parent_tag,
2690 			1,
2691 			BCE_DMA_BOUNDARY,
2692 			sc->max_bus_addr,
2693 			BUS_SPACE_MAXADDR,
2694 			NULL, NULL,
2695 			max_size,
2696 			1,
2697 			max_seg_size,
2698 			0,
2699 			NULL, NULL,
2700 	    	&sc->rx_mbuf_tag)) {
2701 		BCE_PRINTF("%s(%d): Could not allocate RX mbuf DMA tag!\n",
2702 			__FILE__, __LINE__);
2703 		rc = ENOMEM;
2704 		goto bce_dma_alloc_exit;
2705 	}
2706 
2707 	/* Create DMA maps for the RX mbuf clusters. */
2708 	for (i = 0; i < TOTAL_RX_BD; i++) {
2709 		if (bus_dmamap_create(sc->rx_mbuf_tag, BUS_DMA_NOWAIT,
2710 				&sc->rx_mbuf_map[i])) {
2711 			BCE_PRINTF("%s(%d): Unable to create RX mbuf DMA map!\n",
2712 				__FILE__, __LINE__);
2713 			rc = ENOMEM;
2714 			goto bce_dma_alloc_exit;
2715 		}
2716 	}
2717 
2718 	/*
2719 	 * Create a DMA tag for the page buffer descriptor chain,
2720 	 * allocate and clear the memory, and fetch the physical
2721 	 * address of the blocks.
2722 	 */
2723 	if (bus_dma_tag_create(sc->parent_tag,
2724 			BCM_PAGE_SIZE,
2725 			BCE_DMA_BOUNDARY,
2726 			BUS_SPACE_MAXADDR,
2727 			sc->max_bus_addr,
2728 			NULL, NULL,
2729 			BCE_PG_CHAIN_PAGE_SZ,
2730 			1,
2731 			BCE_PG_CHAIN_PAGE_SZ,
2732 			0,
2733 			NULL, NULL,
2734 			&sc->pg_bd_chain_tag)) {
2735 		BCE_PRINTF("%s(%d): Could not allocate page descriptor chain DMA tag!\n",
2736 			__FILE__, __LINE__);
2737 		rc = ENOMEM;
2738 		goto bce_dma_alloc_exit;
2739 	}
2740 
2741 	for (i = 0; i < PG_PAGES; i++) {
2742 
2743 		if (bus_dmamem_alloc(sc->pg_bd_chain_tag,
2744 	    		(void **)&sc->pg_bd_chain[i],
2745 	    		BUS_DMA_NOWAIT,
2746 		    	&sc->pg_bd_chain_map[i])) {
2747 			BCE_PRINTF("%s(%d): Could not allocate page descriptor chain "
2748 				"DMA memory!\n", __FILE__, __LINE__);
2749 			rc = ENOMEM;
2750 			goto bce_dma_alloc_exit;
2751 		}
2752 
2753 		bzero((char *)sc->pg_bd_chain[i], BCE_PG_CHAIN_PAGE_SZ);
2754 
2755 		error = bus_dmamap_load(sc->pg_bd_chain_tag,
2756 	    		sc->pg_bd_chain_map[i],
2757 	    		sc->pg_bd_chain[i],
2758 		    	BCE_PG_CHAIN_PAGE_SZ,
2759 		    	bce_dma_map_addr,
2760 	    		&busaddr,
2761 	    		BUS_DMA_NOWAIT);
2762 
2763 		if (error) {
2764 			BCE_PRINTF("%s(%d): Could not map page descriptor chain DMA memory!\n",
2765 				__FILE__, __LINE__);
2766 			rc = ENOMEM;
2767 			goto bce_dma_alloc_exit;
2768 		}
2769 
2770 		sc->pg_bd_chain_paddr[i] = busaddr;
2771 		/* DRC - Fix for 64 bit systems. */
2772 		DBPRINT(sc, BCE_INFO, "pg_bd_chain_paddr[%d] = 0x%08X\n",
2773 			i, (u32) sc->pg_bd_chain_paddr[i]);
2774 	}
2775 
2776 	/*
2777 	 * Create a DMA tag for page mbufs.
2778 	 */
2779 	max_size = max_seg_size = ((sc->pg_bd_mbuf_alloc_size < MCLBYTES) ?
2780 		MCLBYTES : sc->rx_bd_mbuf_alloc_size);
2781 
2782 	if (bus_dma_tag_create(sc->parent_tag,
2783 			1,
2784 			BCE_DMA_BOUNDARY,
2785 			sc->max_bus_addr,
2786 			BUS_SPACE_MAXADDR,
2787 			NULL, NULL,
2788 			max_size,
2789 			1,
2790 			max_seg_size,
2791 			0,
2792 			NULL, NULL,
2793 	    	&sc->pg_mbuf_tag)) {
2794 		BCE_PRINTF("%s(%d): Could not allocate page mbuf DMA tag!\n",
2795 			__FILE__, __LINE__);
2796 		rc = ENOMEM;
2797 		goto bce_dma_alloc_exit;
2798 	}
2799 
2800 	/* Create DMA maps for the page mbuf clusters. */
2801 	for (i = 0; i < TOTAL_PG_BD; i++) {
2802 		if (bus_dmamap_create(sc->pg_mbuf_tag, BUS_DMA_NOWAIT,
2803 				&sc->pg_mbuf_map[i])) {
2804 			BCE_PRINTF("%s(%d): Unable to create page mbuf DMA map!\n",
2805 				__FILE__, __LINE__);
2806 			rc = ENOMEM;
2807 			goto bce_dma_alloc_exit;
2808 		}
2809 	}
2810 
2811 bce_dma_alloc_exit:
2812 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2813 
2814 	return(rc);
2815 }
2816 
2817 
2818 /****************************************************************************/
2819 /* Release all resources used by the driver.                                */
2820 /*                                                                          */
2821 /* Releases all resources acquired by the driver including interrupts,      */
2822 /* interrupt handler, interfaces, mutexes, and DMA memory.                  */
2823 /*                                                                          */
2824 /* Returns:                                                                 */
2825 /*   Nothing.                                                               */
2826 /****************************************************************************/
2827 static void
2828 bce_release_resources(struct bce_softc *sc)
2829 {
2830 	device_t dev;
2831 
2832 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
2833 
2834 	dev = sc->bce_dev;
2835 
2836 	bce_dma_free(sc);
2837 
2838 	if (sc->bce_intrhand != NULL) {
2839 		DBPRINT(sc, BCE_INFO_RESET, "Removing interrupt handler.\n");
2840 		bus_teardown_intr(dev, sc->bce_res_irq, sc->bce_intrhand);
2841 	}
2842 
2843 	if (sc->bce_res_irq != NULL) {
2844 		DBPRINT(sc, BCE_INFO_RESET, "Releasing IRQ.\n");
2845 		bus_release_resource(dev, SYS_RES_IRQ, sc->bce_flags & BCE_USING_MSI_FLAG ? 1 : 0,
2846 			sc->bce_res_irq);
2847 	}
2848 
2849 	if (sc->bce_flags & BCE_USING_MSI_FLAG) {
2850 		DBPRINT(sc, BCE_INFO_RESET, "Releasing MSI vector.\n");
2851 		pci_release_msi(dev);
2852 	}
2853 
2854 	if (sc->bce_res_mem != NULL) {
2855 		DBPRINT(sc, BCE_INFO_RESET, "Releasing PCI memory.\n");
2856 		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), sc->bce_res_mem);
2857 	}
2858 
2859 	if (sc->bce_ifp != NULL) {
2860 		DBPRINT(sc, BCE_INFO_RESET, "Releasing IF.\n");
2861 		if_free(sc->bce_ifp);
2862 	}
2863 
2864 	if (mtx_initialized(&sc->bce_mtx))
2865 		BCE_LOCK_DESTROY(sc);
2866 
2867 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
2868 
2869 }
2870 
2871 
2872 /****************************************************************************/
2873 /* Firmware synchronization.                                                */
2874 /*                                                                          */
2875 /* Before performing certain events such as a chip reset, synchronize with  */
2876 /* the firmware first.                                                      */
2877 /*                                                                          */
2878 /* Returns:                                                                 */
2879 /*   0 for success, positive value for failure.                             */
2880 /****************************************************************************/
2881 static int
2882 bce_fw_sync(struct bce_softc *sc, u32 msg_data)
2883 {
2884 	int i, rc = 0;
2885 	u32 val;
2886 
2887 	/* Don't waste any time if we've timed out before. */
2888 	if (sc->bce_fw_timed_out) {
2889 		rc = EBUSY;
2890 		goto bce_fw_sync_exit;
2891 	}
2892 
2893 	/* Increment the message sequence number. */
2894 	sc->bce_fw_wr_seq++;
2895 	msg_data |= sc->bce_fw_wr_seq;
2896 
2897  	DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "bce_fw_sync(): msg_data = 0x%08X\n", msg_data);
2898 
2899 	/* Send the message to the bootcode driver mailbox. */
2900 	REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_MB, msg_data);
2901 
2902 	/* Wait for the bootcode to acknowledge the message. */
2903 	for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
2904 		/* Check for a response in the bootcode firmware mailbox. */
2905 		val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_FW_MB);
2906 		if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ))
2907 			break;
2908 		DELAY(1000);
2909 	}
2910 
2911 	/* If we've timed out, tell the bootcode that we've stopped waiting. */
2912 	if (((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ)) &&
2913 		((msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0)) {
2914 
2915 		BCE_PRINTF("%s(%d): Firmware synchronization timeout! "
2916 			"msg_data = 0x%08X\n",
2917 			__FILE__, __LINE__, msg_data);
2918 
2919 		msg_data &= ~BCE_DRV_MSG_CODE;
2920 		msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT;
2921 
2922 		REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_MB, msg_data);
2923 
2924 		sc->bce_fw_timed_out = 1;
2925 		rc = EBUSY;
2926 	}
2927 
2928 bce_fw_sync_exit:
2929 	return (rc);
2930 }
2931 
2932 
2933 /****************************************************************************/
2934 /* Load Receive Virtual 2 Physical (RV2P) processor firmware.               */
2935 /*                                                                          */
2936 /* Returns:                                                                 */
2937 /*   Nothing.                                                               */
2938 /****************************************************************************/
2939 static void
2940 bce_load_rv2p_fw(struct bce_softc *sc, u32 *rv2p_code,
2941 	u32 rv2p_code_len, u32 rv2p_proc)
2942 {
2943 	int i;
2944 	u32 val;
2945 
2946 	/* Set the page size used by RV2P. */
2947 	if (rv2p_proc == RV2P_PROC2) {
2948 		BCE_RV2P_PROC2_CHG_MAX_BD_PAGE(USABLE_RX_BD_PER_PAGE);
2949 	}
2950 
2951 	for (i = 0; i < rv2p_code_len; i += 8) {
2952 		REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code);
2953 		rv2p_code++;
2954 		REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code);
2955 		rv2p_code++;
2956 
2957 		if (rv2p_proc == RV2P_PROC1) {
2958 			val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR;
2959 			REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val);
2960 		}
2961 		else {
2962 			val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR;
2963 			REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val);
2964 		}
2965 	}
2966 
2967 	/* Reset the processor, un-stall is done later. */
2968 	if (rv2p_proc == RV2P_PROC1) {
2969 		REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET);
2970 	}
2971 	else {
2972 		REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET);
2973 	}
2974 }
2975 
2976 
2977 /****************************************************************************/
2978 /* Load RISC processor firmware.                                            */
2979 /*                                                                          */
2980 /* Loads firmware from the file if_bcefw.h into the scratchpad memory       */
2981 /* associated with a particular processor.                                  */
2982 /*                                                                          */
2983 /* Returns:                                                                 */
2984 /*   Nothing.                                                               */
2985 /****************************************************************************/
2986 static void
2987 bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg,
2988 	struct fw_info *fw)
2989 {
2990 	u32 offset;
2991 	u32 val;
2992 
2993 	/* Halt the CPU. */
2994 	val = REG_RD_IND(sc, cpu_reg->mode);
2995 	val |= cpu_reg->mode_value_halt;
2996 	REG_WR_IND(sc, cpu_reg->mode, val);
2997 	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2998 
2999 	/* Load the Text area. */
3000 	offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3001 	if (fw->text) {
3002 		int j;
3003 
3004 		for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3005 			REG_WR_IND(sc, offset, fw->text[j]);
3006 	        }
3007 	}
3008 
3009 	/* Load the Data area. */
3010 	offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3011 	if (fw->data) {
3012 		int j;
3013 
3014 		for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3015 			REG_WR_IND(sc, offset, fw->data[j]);
3016 		}
3017 	}
3018 
3019 	/* Load the SBSS area. */
3020 	offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3021 	if (fw->sbss) {
3022 		int j;
3023 
3024 		for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3025 			REG_WR_IND(sc, offset, fw->sbss[j]);
3026 		}
3027 	}
3028 
3029 	/* Load the BSS area. */
3030 	offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3031 	if (fw->bss) {
3032 		int j;
3033 
3034 		for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3035 			REG_WR_IND(sc, offset, fw->bss[j]);
3036 		}
3037 	}
3038 
3039 	/* Load the Read-Only area. */
3040 	offset = cpu_reg->spad_base +
3041 		(fw->rodata_addr - cpu_reg->mips_view_base);
3042 	if (fw->rodata) {
3043 		int j;
3044 
3045 		for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3046 			REG_WR_IND(sc, offset, fw->rodata[j]);
3047 		}
3048 	}
3049 
3050 	/* Clear the pre-fetch instruction. */
3051 	REG_WR_IND(sc, cpu_reg->inst, 0);
3052 	REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
3053 
3054 	/* Start the CPU. */
3055 	val = REG_RD_IND(sc, cpu_reg->mode);
3056 	val &= ~cpu_reg->mode_value_halt;
3057 	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
3058 	REG_WR_IND(sc, cpu_reg->mode, val);
3059 }
3060 
3061 
3062 /****************************************************************************/
3063 /* Initialize the RV2P, RX, TX, TPAT, and COM CPUs.                         */
3064 /*                                                                          */
3065 /* Loads the firmware for each CPU and starts the CPU.                      */
3066 /*                                                                          */
3067 /* Returns:                                                                 */
3068 /*   Nothing.                                                               */
3069 /****************************************************************************/
3070 static void
3071 bce_init_cpus(struct bce_softc *sc)
3072 {
3073 	struct cpu_reg cpu_reg;
3074 	struct fw_info fw;
3075 
3076 	/* Initialize the RV2P processor. */
3077 	bce_load_rv2p_fw(sc, bce_rv2p_proc1, sizeof(bce_rv2p_proc1), RV2P_PROC1);
3078 	bce_load_rv2p_fw(sc, bce_rv2p_proc2, sizeof(bce_rv2p_proc2), RV2P_PROC2);
3079 
3080 	/* Initialize the RX Processor. */
3081 	cpu_reg.mode = BCE_RXP_CPU_MODE;
3082 	cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
3083 	cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
3084 	cpu_reg.state = BCE_RXP_CPU_STATE;
3085 	cpu_reg.state_value_clear = 0xffffff;
3086 	cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
3087 	cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
3088 	cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
3089 	cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
3090 	cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
3091 	cpu_reg.spad_base = BCE_RXP_SCRATCH;
3092 	cpu_reg.mips_view_base = 0x8000000;
3093 
3094 	fw.ver_major = bce_RXP_b06FwReleaseMajor;
3095 	fw.ver_minor = bce_RXP_b06FwReleaseMinor;
3096 	fw.ver_fix = bce_RXP_b06FwReleaseFix;
3097 	fw.start_addr = bce_RXP_b06FwStartAddr;
3098 
3099 	fw.text_addr = bce_RXP_b06FwTextAddr;
3100 	fw.text_len = bce_RXP_b06FwTextLen;
3101 	fw.text_index = 0;
3102 	fw.text = bce_RXP_b06FwText;
3103 
3104 	fw.data_addr = bce_RXP_b06FwDataAddr;
3105 	fw.data_len = bce_RXP_b06FwDataLen;
3106 	fw.data_index = 0;
3107 	fw.data = bce_RXP_b06FwData;
3108 
3109 	fw.sbss_addr = bce_RXP_b06FwSbssAddr;
3110 	fw.sbss_len = bce_RXP_b06FwSbssLen;
3111 	fw.sbss_index = 0;
3112 	fw.sbss = bce_RXP_b06FwSbss;
3113 
3114 	fw.bss_addr = bce_RXP_b06FwBssAddr;
3115 	fw.bss_len = bce_RXP_b06FwBssLen;
3116 	fw.bss_index = 0;
3117 	fw.bss = bce_RXP_b06FwBss;
3118 
3119 	fw.rodata_addr = bce_RXP_b06FwRodataAddr;
3120 	fw.rodata_len = bce_RXP_b06FwRodataLen;
3121 	fw.rodata_index = 0;
3122 	fw.rodata = bce_RXP_b06FwRodata;
3123 
3124 	DBPRINT(sc, BCE_INFO_RESET, "Loading RX firmware.\n");
3125 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
3126 
3127 	/* Initialize the TX Processor. */
3128 	cpu_reg.mode = BCE_TXP_CPU_MODE;
3129 	cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT;
3130 	cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA;
3131 	cpu_reg.state = BCE_TXP_CPU_STATE;
3132 	cpu_reg.state_value_clear = 0xffffff;
3133 	cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE;
3134 	cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK;
3135 	cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER;
3136 	cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION;
3137 	cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT;
3138 	cpu_reg.spad_base = BCE_TXP_SCRATCH;
3139 	cpu_reg.mips_view_base = 0x8000000;
3140 
3141 	fw.ver_major = bce_TXP_b06FwReleaseMajor;
3142 	fw.ver_minor = bce_TXP_b06FwReleaseMinor;
3143 	fw.ver_fix = bce_TXP_b06FwReleaseFix;
3144 	fw.start_addr = bce_TXP_b06FwStartAddr;
3145 
3146 	fw.text_addr = bce_TXP_b06FwTextAddr;
3147 	fw.text_len = bce_TXP_b06FwTextLen;
3148 	fw.text_index = 0;
3149 	fw.text = bce_TXP_b06FwText;
3150 
3151 	fw.data_addr = bce_TXP_b06FwDataAddr;
3152 	fw.data_len = bce_TXP_b06FwDataLen;
3153 	fw.data_index = 0;
3154 	fw.data = bce_TXP_b06FwData;
3155 
3156 	fw.sbss_addr = bce_TXP_b06FwSbssAddr;
3157 	fw.sbss_len = bce_TXP_b06FwSbssLen;
3158 	fw.sbss_index = 0;
3159 	fw.sbss = bce_TXP_b06FwSbss;
3160 
3161 	fw.bss_addr = bce_TXP_b06FwBssAddr;
3162 	fw.bss_len = bce_TXP_b06FwBssLen;
3163 	fw.bss_index = 0;
3164 	fw.bss = bce_TXP_b06FwBss;
3165 
3166 	fw.rodata_addr = bce_TXP_b06FwRodataAddr;
3167 	fw.rodata_len = bce_TXP_b06FwRodataLen;
3168 	fw.rodata_index = 0;
3169 	fw.rodata = bce_TXP_b06FwRodata;
3170 
3171 	DBPRINT(sc, BCE_INFO_RESET, "Loading TX firmware.\n");
3172 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
3173 
3174 	/* Initialize the TX Patch-up Processor. */
3175 	cpu_reg.mode = BCE_TPAT_CPU_MODE;
3176 	cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT;
3177 	cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA;
3178 	cpu_reg.state = BCE_TPAT_CPU_STATE;
3179 	cpu_reg.state_value_clear = 0xffffff;
3180 	cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE;
3181 	cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK;
3182 	cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER;
3183 	cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION;
3184 	cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT;
3185 	cpu_reg.spad_base = BCE_TPAT_SCRATCH;
3186 	cpu_reg.mips_view_base = 0x8000000;
3187 
3188 	fw.ver_major = bce_TPAT_b06FwReleaseMajor;
3189 	fw.ver_minor = bce_TPAT_b06FwReleaseMinor;
3190 	fw.ver_fix = bce_TPAT_b06FwReleaseFix;
3191 	fw.start_addr = bce_TPAT_b06FwStartAddr;
3192 
3193 	fw.text_addr = bce_TPAT_b06FwTextAddr;
3194 	fw.text_len = bce_TPAT_b06FwTextLen;
3195 	fw.text_index = 0;
3196 	fw.text = bce_TPAT_b06FwText;
3197 
3198 	fw.data_addr = bce_TPAT_b06FwDataAddr;
3199 	fw.data_len = bce_TPAT_b06FwDataLen;
3200 	fw.data_index = 0;
3201 	fw.data = bce_TPAT_b06FwData;
3202 
3203 	fw.sbss_addr = bce_TPAT_b06FwSbssAddr;
3204 	fw.sbss_len = bce_TPAT_b06FwSbssLen;
3205 	fw.sbss_index = 0;
3206 	fw.sbss = bce_TPAT_b06FwSbss;
3207 
3208 	fw.bss_addr = bce_TPAT_b06FwBssAddr;
3209 	fw.bss_len = bce_TPAT_b06FwBssLen;
3210 	fw.bss_index = 0;
3211 	fw.bss = bce_TPAT_b06FwBss;
3212 
3213 	fw.rodata_addr = bce_TPAT_b06FwRodataAddr;
3214 	fw.rodata_len = bce_TPAT_b06FwRodataLen;
3215 	fw.rodata_index = 0;
3216 	fw.rodata = bce_TPAT_b06FwRodata;
3217 
3218 	DBPRINT(sc, BCE_INFO_RESET, "Loading TPAT firmware.\n");
3219 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
3220 
3221 	/* Initialize the Completion Processor. */
3222 	cpu_reg.mode = BCE_COM_CPU_MODE;
3223 	cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT;
3224 	cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA;
3225 	cpu_reg.state = BCE_COM_CPU_STATE;
3226 	cpu_reg.state_value_clear = 0xffffff;
3227 	cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE;
3228 	cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK;
3229 	cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER;
3230 	cpu_reg.inst = BCE_COM_CPU_INSTRUCTION;
3231 	cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT;
3232 	cpu_reg.spad_base = BCE_COM_SCRATCH;
3233 	cpu_reg.mips_view_base = 0x8000000;
3234 
3235 	fw.ver_major = bce_COM_b06FwReleaseMajor;
3236 	fw.ver_minor = bce_COM_b06FwReleaseMinor;
3237 	fw.ver_fix = bce_COM_b06FwReleaseFix;
3238 	fw.start_addr = bce_COM_b06FwStartAddr;
3239 
3240 	fw.text_addr = bce_COM_b06FwTextAddr;
3241 	fw.text_len = bce_COM_b06FwTextLen;
3242 	fw.text_index = 0;
3243 	fw.text = bce_COM_b06FwText;
3244 
3245 	fw.data_addr = bce_COM_b06FwDataAddr;
3246 	fw.data_len = bce_COM_b06FwDataLen;
3247 	fw.data_index = 0;
3248 	fw.data = bce_COM_b06FwData;
3249 
3250 	fw.sbss_addr = bce_COM_b06FwSbssAddr;
3251 	fw.sbss_len = bce_COM_b06FwSbssLen;
3252 	fw.sbss_index = 0;
3253 	fw.sbss = bce_COM_b06FwSbss;
3254 
3255 	fw.bss_addr = bce_COM_b06FwBssAddr;
3256 	fw.bss_len = bce_COM_b06FwBssLen;
3257 	fw.bss_index = 0;
3258 	fw.bss = bce_COM_b06FwBss;
3259 
3260 	fw.rodata_addr = bce_COM_b06FwRodataAddr;
3261 	fw.rodata_len = bce_COM_b06FwRodataLen;
3262 	fw.rodata_index = 0;
3263 	fw.rodata = bce_COM_b06FwRodata;
3264 
3265 	DBPRINT(sc, BCE_INFO_RESET, "Loading COM firmware.\n");
3266 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
3267 
3268 	/* Initialize the Command Processor. */
3269 	cpu_reg.mode = BCE_CP_CPU_MODE;
3270 	cpu_reg.mode_value_halt = BCE_CP_CPU_MODE_SOFT_HALT;
3271 	cpu_reg.mode_value_sstep = BCE_CP_CPU_MODE_STEP_ENA;
3272 	cpu_reg.state = BCE_CP_CPU_STATE;
3273 	cpu_reg.state_value_clear = 0xffffff;
3274 	cpu_reg.gpr0 = BCE_CP_CPU_REG_FILE;
3275 	cpu_reg.evmask = BCE_CP_CPU_EVENT_MASK;
3276 	cpu_reg.pc = BCE_CP_CPU_PROGRAM_COUNTER;
3277 	cpu_reg.inst = BCE_CP_CPU_INSTRUCTION;
3278 	cpu_reg.bp = BCE_CP_CPU_HW_BREAKPOINT;
3279 	cpu_reg.spad_base = BCE_CP_SCRATCH;
3280 	cpu_reg.mips_view_base = 0x8000000;
3281 
3282 	fw.ver_major = bce_CP_b06FwReleaseMajor;
3283 	fw.ver_minor = bce_CP_b06FwReleaseMinor;
3284 	fw.ver_fix = bce_CP_b06FwReleaseFix;
3285 	fw.start_addr = bce_CP_b06FwStartAddr;
3286 
3287 	fw.text_addr = bce_CP_b06FwTextAddr;
3288 	fw.text_len = bce_CP_b06FwTextLen;
3289 	fw.text_index = 0;
3290 	fw.text = bce_CP_b06FwText;
3291 
3292 	fw.data_addr = bce_CP_b06FwDataAddr;
3293 	fw.data_len = bce_CP_b06FwDataLen;
3294 	fw.data_index = 0;
3295 	fw.data = bce_CP_b06FwData;
3296 
3297 	fw.sbss_addr = bce_CP_b06FwSbssAddr;
3298 	fw.sbss_len = bce_CP_b06FwSbssLen;
3299 	fw.sbss_index = 0;
3300 	fw.sbss = bce_CP_b06FwSbss;
3301 
3302 	fw.bss_addr = bce_CP_b06FwBssAddr;
3303 	fw.bss_len = bce_CP_b06FwBssLen;
3304 	fw.bss_index = 0;
3305 	fw.bss = bce_CP_b06FwBss;
3306 
3307 	fw.rodata_addr = bce_CP_b06FwRodataAddr;
3308 	fw.rodata_len = bce_CP_b06FwRodataLen;
3309 	fw.rodata_index = 0;
3310 	fw.rodata = bce_CP_b06FwRodata;
3311 
3312 	DBPRINT(sc, BCE_INFO_RESET, "Loading CP firmware.\n");
3313 	bce_load_cpu_fw(sc, &cpu_reg, &fw);
3314 }
3315 
3316 
3317 /****************************************************************************/
3318 /* Initialize context memory.                                               */
3319 /*                                                                          */
3320 /* Clears the memory associated with each Context ID (CID).                 */
3321 /*                                                                          */
3322 /* Returns:                                                                 */
3323 /*   Nothing.                                                               */
3324 /****************************************************************************/
3325 static void
3326 bce_init_ctx(struct bce_softc *sc)
3327 {
3328 	u32 vcid = 96;
3329 
3330 	while (vcid) {
3331 		u32 vcid_addr, pcid_addr, offset;
3332 		int i;
3333 
3334 		vcid--;
3335 
3336    		vcid_addr = GET_CID_ADDR(vcid);
3337 		pcid_addr = vcid_addr;
3338 
3339 		for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
3340 			vcid_addr += (i << PHY_CTX_SHIFT);
3341 			pcid_addr += (i << PHY_CTX_SHIFT);
3342 
3343 			REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr);
3344 			REG_WR(sc, BCE_CTX_PAGE_TBL, pcid_addr);
3345 
3346 			/* Zero out the context. */
3347 			for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
3348 				CTX_WR(sc, vcid_addr, offset, 0);
3349 		}
3350 	}
3351 }
3352 
3353 
3354 /****************************************************************************/
3355 /* Fetch the permanent MAC address of the controller.                       */
3356 /*                                                                          */
3357 /* Returns:                                                                 */
3358 /*   Nothing.                                                               */
3359 /****************************************************************************/
3360 static void
3361 bce_get_mac_addr(struct bce_softc *sc)
3362 {
3363 	u32 mac_lo = 0, mac_hi = 0;
3364 
3365 	/*
3366 	 * The NetXtreme II bootcode populates various NIC
3367 	 * power-on and runtime configuration items in a
3368 	 * shared memory area.  The factory configured MAC
3369 	 * address is available from both NVRAM and the
3370 	 * shared memory area so we'll read the value from
3371 	 * shared memory for speed.
3372 	 */
3373 
3374 	mac_hi = REG_RD_IND(sc, sc->bce_shmem_base +
3375 		BCE_PORT_HW_CFG_MAC_UPPER);
3376 	mac_lo = REG_RD_IND(sc, sc->bce_shmem_base +
3377 		BCE_PORT_HW_CFG_MAC_LOWER);
3378 
3379 	if ((mac_lo == 0) && (mac_hi == 0)) {
3380 		BCE_PRINTF("%s(%d): Invalid Ethernet address!\n",
3381 			__FILE__, __LINE__);
3382 	} else {
3383 		sc->eaddr[0] = (u_char)(mac_hi >> 8);
3384 		sc->eaddr[1] = (u_char)(mac_hi >> 0);
3385 		sc->eaddr[2] = (u_char)(mac_lo >> 24);
3386 		sc->eaddr[3] = (u_char)(mac_lo >> 16);
3387 		sc->eaddr[4] = (u_char)(mac_lo >> 8);
3388 		sc->eaddr[5] = (u_char)(mac_lo >> 0);
3389 	}
3390 
3391 	DBPRINT(sc, BCE_INFO_MISC, "Permanent Ethernet address = %6D\n", sc->eaddr, ":");
3392 }
3393 
3394 
3395 /****************************************************************************/
3396 /* Program the MAC address.                                                 */
3397 /*                                                                          */
3398 /* Returns:                                                                 */
3399 /*   Nothing.                                                               */
3400 /****************************************************************************/
3401 static void
3402 bce_set_mac_addr(struct bce_softc *sc)
3403 {
3404 	u32 val;
3405 	u8 *mac_addr = sc->eaddr;
3406 
3407 	DBPRINT(sc, BCE_INFO_MISC, "Setting Ethernet address = %6D\n", sc->eaddr, ":");
3408 
3409 	val = (mac_addr[0] << 8) | mac_addr[1];
3410 
3411 	REG_WR(sc, BCE_EMAC_MAC_MATCH0, val);
3412 
3413 	val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
3414 		(mac_addr[4] << 8) | mac_addr[5];
3415 
3416 	REG_WR(sc, BCE_EMAC_MAC_MATCH1, val);
3417 }
3418 
3419 
3420 /****************************************************************************/
3421 /* Stop the controller.                                                     */
3422 /*                                                                          */
3423 /* Returns:                                                                 */
3424 /*   Nothing.                                                               */
3425 /****************************************************************************/
3426 static void
3427 bce_stop(struct bce_softc *sc)
3428 {
3429 	struct ifnet *ifp;
3430 	struct ifmedia_entry *ifm;
3431 	struct mii_data *mii = NULL;
3432 	int mtmp, itmp;
3433 
3434 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3435 
3436 	BCE_LOCK_ASSERT(sc);
3437 
3438 	ifp = sc->bce_ifp;
3439 
3440 	mii = device_get_softc(sc->bce_miibus);
3441 
3442 	callout_stop(&sc->bce_tick_callout);
3443 
3444 	/* Disable the transmit/receive blocks. */
3445 	REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, 0x5ffffff);
3446 	REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
3447 	DELAY(20);
3448 
3449 	bce_disable_intr(sc);
3450 
3451 	/* Free RX buffers. */
3452 	bce_free_pg_chain(sc);
3453 	bce_free_rx_chain(sc);
3454 
3455 	/* Free TX buffers. */
3456 	bce_free_tx_chain(sc);
3457 
3458 	/*
3459 	 * Isolate/power down the PHY, but leave the media selection
3460 	 * unchanged so that things will be put back to normal when
3461 	 * we bring the interface back up.
3462 	 */
3463 
3464 	itmp = ifp->if_flags;
3465 	ifp->if_flags |= IFF_UP;
3466 
3467 	/* If we are called from bce_detach(), mii is already NULL. */
3468 	if (mii != NULL) {
3469 		ifm = mii->mii_media.ifm_cur;
3470 		mtmp = ifm->ifm_media;
3471 		ifm->ifm_media = IFM_ETHER | IFM_NONE;
3472 		mii_mediachg(mii);
3473 		ifm->ifm_media = mtmp;
3474 	}
3475 
3476 	ifp->if_flags = itmp;
3477 	sc->watchdog_timer = 0;
3478 
3479 	sc->bce_link = 0;
3480 
3481 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3482 
3483 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3484 }
3485 
3486 
3487 static int
3488 bce_reset(struct bce_softc *sc, u32 reset_code)
3489 {
3490 	u32 val;
3491 	int i, rc = 0;
3492 
3493 	DBPRINT(sc, BCE_VERBOSE_RESET, "%s(): reset_code = 0x%08X\n",
3494 		__FUNCTION__, reset_code);
3495 
3496 	/* Wait for pending PCI transactions to complete. */
3497 	REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS,
3498 	       BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3499 	       BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3500 	       BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3501 	       BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3502 	val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
3503 	DELAY(5);
3504 
3505 	/* Assume bootcode is running. */
3506 	sc->bce_fw_timed_out = 0;
3507 
3508 	/* Give the firmware a chance to prepare for the reset. */
3509 	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code);
3510 	if (rc)
3511 		goto bce_reset_exit;
3512 
3513 	/* Set a firmware reminder that this is a soft reset. */
3514 	REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_RESET_SIGNATURE,
3515 		   BCE_DRV_RESET_SIGNATURE_MAGIC);
3516 
3517 	/* Dummy read to force the chip to complete all current transactions. */
3518 	val = REG_RD(sc, BCE_MISC_ID);
3519 
3520 	/* Chip reset. */
3521 	val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3522 	      BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3523 	      BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3524 	REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val);
3525 
3526 	/* Allow up to 30us for reset to complete. */
3527 	for (i = 0; i < 10; i++) {
3528 		val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG);
3529 		if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3530 			    BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3531 			break;
3532 		}
3533 		DELAY(10);
3534 	}
3535 
3536 	/* Check that reset completed successfully. */
3537 	if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3538 		   BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3539 		BCE_PRINTF("%s(%d): Reset failed!\n",
3540 			__FILE__, __LINE__);
3541 		rc = EBUSY;
3542 		goto bce_reset_exit;
3543 	}
3544 
3545 	/* Make sure byte swapping is properly configured. */
3546 	val = REG_RD(sc, BCE_PCI_SWAP_DIAG0);
3547 	if (val != 0x01020304) {
3548 		BCE_PRINTF("%s(%d): Byte swap is incorrect!\n",
3549 			__FILE__, __LINE__);
3550 		rc = ENODEV;
3551 		goto bce_reset_exit;
3552 	}
3553 
3554 	/* Just completed a reset, assume that firmware is running again. */
3555 	sc->bce_fw_timed_out = 0;
3556 
3557 	/* Wait for the firmware to finish its initialization. */
3558 	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code);
3559 	if (rc)
3560 		BCE_PRINTF("%s(%d): Firmware did not complete initialization!\n",
3561 			__FILE__, __LINE__);
3562 
3563 bce_reset_exit:
3564 	return (rc);
3565 }
3566 
3567 
3568 static int
3569 bce_chipinit(struct bce_softc *sc)
3570 {
3571 	u32 val;
3572 	int rc = 0;
3573 
3574 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3575 
3576 	/* Make sure the interrupt is not active. */
3577 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT);
3578 
3579 	/*
3580 	 * Initialize DMA byte/word swapping, configure the number of DMA
3581 	 * channels and PCI clock compensation delay.
3582 	 */
3583 	val = BCE_DMA_CONFIG_DATA_BYTE_SWAP |
3584 	      BCE_DMA_CONFIG_DATA_WORD_SWAP |
3585 #if BYTE_ORDER == BIG_ENDIAN
3586 	      BCE_DMA_CONFIG_CNTL_BYTE_SWAP |
3587 #endif
3588 	      BCE_DMA_CONFIG_CNTL_WORD_SWAP |
3589 	      DMA_READ_CHANS << 12 |
3590 	      DMA_WRITE_CHANS << 16;
3591 
3592 	val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY;
3593 
3594 	if ((sc->bce_flags & BCE_PCIX_FLAG) && (sc->bus_speed_mhz == 133))
3595 		val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP;
3596 
3597 	/*
3598 	 * This setting resolves a problem observed on certain Intel PCI
3599 	 * chipsets that cannot handle multiple outstanding DMA operations.
3600 	 * See errata E9_5706A1_65.
3601 	 */
3602 	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
3603 	    (BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0) &&
3604 	    !(sc->bce_flags & BCE_PCIX_FLAG))
3605 		val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA;
3606 
3607 	REG_WR(sc, BCE_DMA_CONFIG, val);
3608 
3609 	/* Clear the PCI-X relaxed ordering bit. See errata E3_5708CA0_570. */
3610 	if (sc->bce_flags & BCE_PCIX_FLAG) {
3611 		u16 val;
3612 
3613 		val = pci_read_config(sc->bce_dev, BCE_PCI_PCIX_CMD, 2);
3614 		pci_write_config(sc->bce_dev, BCE_PCI_PCIX_CMD, val & ~0x2, 2);
3615 	}
3616 
3617 	/* Enable the RX_V2P and Context state machines before access. */
3618 	REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
3619 	       BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3620 	       BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3621 	       BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3622 
3623 	/* Initialize context mapping and zero out the quick contexts. */
3624 	bce_init_ctx(sc);
3625 
3626 	/* Initialize the on-boards CPUs */
3627 	bce_init_cpus(sc);
3628 
3629 	/* Prepare NVRAM for access. */
3630 	if (bce_init_nvram(sc)) {
3631 		rc = ENODEV;
3632 		goto bce_chipinit_exit;
3633 	}
3634 
3635 	/* Set the kernel bypass block size */
3636 	val = REG_RD(sc, BCE_MQ_CONFIG);
3637 	val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3638 	val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3639 	REG_WR(sc, BCE_MQ_CONFIG, val);
3640 
3641 	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3642 	REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val);
3643 	REG_WR(sc, BCE_MQ_KNL_WIND_END, val);
3644 
3645 	/* Set the page size and clear the RV2P processor stall bits. */
3646 	val = (BCM_PAGE_BITS - 8) << 24;
3647 	REG_WR(sc, BCE_RV2P_CONFIG, val);
3648 
3649 	/* Configure page size. */
3650 	val = REG_RD(sc, BCE_TBDR_CONFIG);
3651 	val &= ~BCE_TBDR_CONFIG_PAGE_SIZE;
3652 	val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3653 	REG_WR(sc, BCE_TBDR_CONFIG, val);
3654 
3655 bce_chipinit_exit:
3656 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3657 
3658 	return(rc);
3659 }
3660 
3661 
3662 /****************************************************************************/
3663 /* Initialize the controller in preparation to send/receive traffic.        */
3664 /*                                                                          */
3665 /* Returns:                                                                 */
3666 /*   0 for success, positive value for failure.                             */
3667 /****************************************************************************/
3668 static int
3669 bce_blockinit(struct bce_softc *sc)
3670 {
3671 	u32 reg, val;
3672 	int rc = 0;
3673 
3674 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
3675 
3676 	/* Load the hardware default MAC address. */
3677 	bce_set_mac_addr(sc);
3678 
3679 	/* Set the Ethernet backoff seed value */
3680 	val = sc->eaddr[0]         + (sc->eaddr[1] << 8) +
3681 	      (sc->eaddr[2] << 16) + (sc->eaddr[3]     ) +
3682 	      (sc->eaddr[4] << 8)  + (sc->eaddr[5] << 16);
3683 	REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val);
3684 
3685 	sc->last_status_idx = 0;
3686 	sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE;
3687 
3688 	/* Set up link change interrupt generation. */
3689 	REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK);
3690 
3691 	/* Program the physical address of the status block. */
3692 	REG_WR(sc, BCE_HC_STATUS_ADDR_L,
3693 		BCE_ADDR_LO(sc->status_block_paddr));
3694 	REG_WR(sc, BCE_HC_STATUS_ADDR_H,
3695 		BCE_ADDR_HI(sc->status_block_paddr));
3696 
3697 	/* Program the physical address of the statistics block. */
3698 	REG_WR(sc, BCE_HC_STATISTICS_ADDR_L,
3699 		BCE_ADDR_LO(sc->stats_block_paddr));
3700 	REG_WR(sc, BCE_HC_STATISTICS_ADDR_H,
3701 		BCE_ADDR_HI(sc->stats_block_paddr));
3702 
3703 	/* Program various host coalescing parameters. */
3704 	REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
3705 		(sc->bce_tx_quick_cons_trip_int << 16) | sc->bce_tx_quick_cons_trip);
3706 	REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
3707 		(sc->bce_rx_quick_cons_trip_int << 16) | sc->bce_rx_quick_cons_trip);
3708 	REG_WR(sc, BCE_HC_COMP_PROD_TRIP,
3709 		(sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip);
3710 	REG_WR(sc, BCE_HC_TX_TICKS,
3711 		(sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
3712 	REG_WR(sc, BCE_HC_RX_TICKS,
3713 		(sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
3714 	REG_WR(sc, BCE_HC_COM_TICKS,
3715 		(sc->bce_com_ticks_int << 16) | sc->bce_com_ticks);
3716 	REG_WR(sc, BCE_HC_CMD_TICKS,
3717 		(sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks);
3718 	REG_WR(sc, BCE_HC_STATS_TICKS,
3719 		(sc->bce_stats_ticks & 0xffff00));
3720 	REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS,
3721 		0xbb8);  /* 3ms */
3722 	REG_WR(sc, BCE_HC_CONFIG,
3723 		(BCE_HC_CONFIG_RX_TMR_MODE | BCE_HC_CONFIG_TX_TMR_MODE |
3724 		BCE_HC_CONFIG_COLLECT_STATS));
3725 
3726 	/* Clear the internal statistics counters. */
3727 	REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW);
3728 
3729 	/* Verify that bootcode is running. */
3730 	reg = REG_RD_IND(sc, sc->bce_shmem_base + BCE_DEV_INFO_SIGNATURE);
3731 
3732 	DBRUNIF(DB_RANDOMTRUE(bce_debug_bootcode_running_failure),
3733 		BCE_PRINTF("%s(%d): Simulating bootcode failure.\n",
3734 			__FILE__, __LINE__);
3735 		reg = 0);
3736 
3737 	if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
3738 	    BCE_DEV_INFO_SIGNATURE_MAGIC) {
3739 		BCE_PRINTF("%s(%d): Bootcode not running! Found: 0x%08X, "
3740 			"Expected: 08%08X\n", __FILE__, __LINE__,
3741 			(reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK),
3742 			BCE_DEV_INFO_SIGNATURE_MAGIC);
3743 		rc = ENODEV;
3744 		goto bce_blockinit_exit;
3745 	}
3746 
3747 	/* Allow bootcode to apply any additional fixes before enabling MAC. */
3748 	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | BCE_DRV_MSG_CODE_RESET);
3749 
3750 	/* Enable link state change interrupt generation. */
3751 	REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3752 
3753 	/* Enable all remaining blocks in the MAC. */
3754 	REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 0x5ffffff);
3755 	REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
3756 	DELAY(20);
3757 
3758 bce_blockinit_exit:
3759 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
3760 
3761 	return (rc);
3762 }
3763 
3764 
3765 /****************************************************************************/
3766 /* Encapsulate an mbuf into the rx_bd chain.                                */
3767 /*                                                                          */
3768 /* Returns:                                                                 */
3769 /*   0 for success, positive value for failure.                             */
3770 /****************************************************************************/
3771 static int
3772 bce_get_rx_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod,
3773 	u16 *chain_prod, u32 *prod_bseq)
3774 {
3775 	bus_dmamap_t map;
3776 	bus_dma_segment_t segs[BCE_MAX_SEGMENTS];
3777 	struct mbuf *m_new = NULL;
3778 	struct rx_bd *rxbd;
3779 	int nsegs, error, rc = 0;
3780 #ifdef BCE_DEBUG
3781 	u16 debug_chain_prod = *chain_prod;
3782 #endif
3783 
3784 	DBPRINT(sc, (BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD), "Entering %s()\n",
3785 		__FUNCTION__);
3786 
3787 	/* Make sure the inputs are valid. */
3788 	DBRUNIF((*chain_prod > MAX_RX_BD),
3789 		BCE_PRINTF("%s(%d): RX producer out of range: 0x%04X > 0x%04X\n",
3790 		__FILE__, __LINE__, *chain_prod, (u16) MAX_RX_BD));
3791 
3792 	DBPRINT(sc, BCE_VERBOSE, "%s(enter): prod = 0x%04X, chain_prod = 0x%04X, "
3793 		"prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod, *prod_bseq);
3794 
3795 	/* Update some debug statistic counters */
3796 	DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
3797 		sc->rx_low_watermark = sc->free_rx_bd);
3798 	DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), sc->rx_empty_count++);
3799 
3800 	/* Check whether this is a new mbuf allocation. */
3801 	if (m == NULL) {
3802 
3803 		/* Simulate an mbuf allocation failure. */
3804 		DBRUNIF(DB_RANDOMTRUE(bce_debug_mbuf_allocation_failure),
3805 			sc->mbuf_alloc_failed++;
3806 			sc->debug_mbuf_sim_alloc_failed++;
3807 			rc = ENOBUFS;
3808 			goto bce_get_rx_buf_exit);
3809 
3810 		/* This is a new mbuf allocation. */
3811 #ifdef BCE_USE_SPLIT_HEADER
3812 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
3813 #else
3814 		m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
3815 #endif
3816 		if (m_new == NULL) {
3817 			sc->mbuf_alloc_failed++;
3818 			rc = ENOBUFS;
3819 			goto bce_get_rx_buf_exit;
3820 		}
3821 
3822 		DBRUN(sc->debug_rx_mbuf_alloc++);
3823 	} else {
3824 		/* Reuse an existing mbuf. */
3825 		m_new = m;
3826 	}
3827 
3828 	M_ASSERTPKTHDR(m_new);
3829 
3830 	m_new->m_pkthdr.len = m_new->m_len = sc->rx_bd_mbuf_alloc_size;
3831 
3832 	/* ToDo: Consider calling m_fragment() to test error handling. */
3833 
3834 	/* Map the mbuf cluster into device memory. */
3835 	map = sc->rx_mbuf_map[*chain_prod];
3836 	error = bus_dmamap_load_mbuf_sg(sc->rx_mbuf_tag, map, m_new,
3837 	    segs, &nsegs, BUS_DMA_NOWAIT);
3838 
3839 	/* Handle any mapping errors. */
3840 	if (error) {
3841 		BCE_PRINTF("%s(%d): Error mapping mbuf into RX chain!\n",
3842 			__FILE__, __LINE__);
3843 
3844 		m_freem(m_new);
3845 		DBRUN(sc->debug_rx_mbuf_alloc--);
3846 
3847 		rc = ENOBUFS;
3848 		goto bce_get_rx_buf_exit;
3849 	}
3850 
3851 	/* All mbufs must map to a single segment. */
3852 	KASSERT(nsegs == 1, ("%s(): Too many segments returned (%d)!",
3853 		 __FUNCTION__, nsegs));
3854 
3855 	/* ToDo: Do we need bus_dmamap_sync(,,BUS_DMASYNC_PREWRITE) here? */
3856 
3857 	/* Setup the rx_bd for the segment. */
3858 	rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
3859 
3860 	rxbd->rx_bd_haddr_lo  = htole32(BCE_ADDR_LO(segs[0].ds_addr));
3861 	rxbd->rx_bd_haddr_hi  = htole32(BCE_ADDR_HI(segs[0].ds_addr));
3862 	rxbd->rx_bd_len       = htole32(segs[0].ds_len);
3863 	rxbd->rx_bd_flags     = htole32(RX_BD_FLAGS_START | RX_BD_FLAGS_END);
3864 	*prod_bseq += segs[0].ds_len;
3865 
3866 	/* Save the mbuf and update our counter. */
3867 	sc->rx_mbuf_ptr[*chain_prod] = m_new;
3868 	sc->free_rx_bd -= nsegs;
3869 
3870 	DBRUNMSG(BCE_EXCESSIVE, bce_dump_rx_mbuf_chain(sc, debug_chain_prod,
3871 		nsegs));
3872 
3873 	DBPRINT(sc, BCE_VERBOSE, "%s(exit): prod = 0x%04X, chain_prod = 0x%04X, "
3874 		"prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod, *prod_bseq);
3875 
3876 bce_get_rx_buf_exit:
3877 	DBPRINT(sc, (BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD), "Exiting %s()\n",
3878 		__FUNCTION__);
3879 
3880 	return(rc);
3881 }
3882 
3883 
3884 /****************************************************************************/
3885 /* Encapsulate an mbuf cluster into the page chain.                        */
3886 /*                                                                          */
3887 /* Returns:                                                                 */
3888 /*   0 for success, positive value for failure.                             */
3889 /****************************************************************************/
3890 static int
3891 bce_get_pg_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod,
3892 	u16 *prod_idx)
3893 {
3894 	bus_dmamap_t map;
3895 	bus_addr_t busaddr;
3896 	struct mbuf *m_new = NULL;
3897 	struct rx_bd *pgbd;
3898 	int error, rc = 0;
3899 #ifdef BCE_DEBUG
3900 	u16 debug_prod_idx = *prod_idx;
3901 #endif
3902 
3903 	DBPRINT(sc, (BCE_VERBOSE_RESET | BCE_VERBOSE_RECV), "Entering %s()\n",
3904 		__FUNCTION__);
3905 
3906 	/* Make sure the inputs are valid. */
3907 	DBRUNIF((*prod_idx > MAX_PG_BD),
3908 		BCE_PRINTF("%s(%d): page producer out of range: 0x%04X > 0x%04X\n",
3909 		__FILE__, __LINE__, *prod_idx, (u16) MAX_PG_BD));
3910 
3911 	DBPRINT(sc, BCE_VERBOSE_RECV, "%s(enter): prod = 0x%04X, "
3912 		"chain_prod = 0x%04X\n", __FUNCTION__, *prod, *prod_idx);
3913 
3914 	/* Update counters if we've hit a new low or run out of pages. */
3915 	DBRUNIF((sc->free_pg_bd < sc->pg_low_watermark),
3916 		sc->pg_low_watermark = sc->free_pg_bd);
3917 	DBRUNIF((sc->free_pg_bd == sc->max_pg_bd), sc->pg_empty_count++);
3918 
3919 	/* Check whether this is a new mbuf allocation. */
3920 	if (m == NULL) {
3921 
3922 		/* Simulate an mbuf allocation failure. */
3923 		DBRUNIF(DB_RANDOMTRUE(bce_debug_mbuf_allocation_failure),
3924 			sc->mbuf_alloc_failed++;
3925 			sc->debug_mbuf_sim_alloc_failed++;
3926 			rc = ENOBUFS;
3927 			goto bce_get_pg_buf_exit);
3928 
3929 		/* This is a new mbuf allocation. */
3930 		m_new = m_getcl(M_DONTWAIT, MT_DATA, 0);
3931 		if (m_new == NULL) {
3932 			sc->mbuf_alloc_failed++;
3933 			rc = ENOBUFS;
3934 			goto bce_get_pg_buf_exit;
3935 		}
3936 
3937 		DBRUN(sc->debug_pg_mbuf_alloc++);
3938 	} else {
3939 		/* Reuse an existing mbuf. */
3940 		m_new = m;
3941 		m_new->m_data = m_new->m_ext.ext_buf;
3942 	}
3943 
3944 	m_new->m_len = sc->pg_bd_mbuf_alloc_size;
3945 
3946 	/* ToDo: Consider calling m_fragment() to test error handling. */
3947 
3948 	/* Map the mbuf cluster into device memory. */
3949 	map = sc->pg_mbuf_map[*prod_idx];
3950 	error = bus_dmamap_load(sc->pg_mbuf_tag, map, mtod(m_new, void *),
3951 	    sc->pg_bd_mbuf_alloc_size, bce_dma_map_addr, &busaddr, BUS_DMA_NOWAIT);
3952 
3953 	/* Handle any mapping errors. */
3954 	if (error) {
3955 		BCE_PRINTF("%s(%d): Error mapping mbuf into page chain!\n",
3956 			__FILE__, __LINE__);
3957 
3958 		m_freem(m_new);
3959 		DBRUN(sc->debug_pg_mbuf_alloc--);
3960 
3961 		rc = ENOBUFS;
3962 		goto bce_get_pg_buf_exit;
3963 	}
3964 
3965 	/* ToDo: Do we need bus_dmamap_sync(,,BUS_DMASYNC_PREWRITE) here? */
3966 
3967 	/*
3968 	 * The page chain uses the same rx_bd data structure
3969 	 * as the receive chain but doesn't require a byte sequence (bseq).
3970 	 */
3971 	pgbd = &sc->pg_bd_chain[PG_PAGE(*prod_idx)][PG_IDX(*prod_idx)];
3972 
3973 	pgbd->rx_bd_haddr_lo  = htole32(BCE_ADDR_LO(busaddr));
3974 	pgbd->rx_bd_haddr_hi  = htole32(BCE_ADDR_HI(busaddr));
3975 	pgbd->rx_bd_len       = htole32(sc->pg_bd_mbuf_alloc_size);
3976 	pgbd->rx_bd_flags     = htole32(RX_BD_FLAGS_START | RX_BD_FLAGS_END);
3977 
3978 	/* Save the mbuf and update our counter. */
3979 	sc->pg_mbuf_ptr[*prod_idx] = m_new;
3980 	sc->free_pg_bd--;
3981 
3982 	DBRUNMSG(BCE_VERBOSE_RECV, bce_dump_pg_mbuf_chain(sc, debug_prod_idx,
3983 		1));
3984 
3985 	DBPRINT(sc, BCE_VERBOSE_RECV, "%s(exit): prod = 0x%04X, "
3986 		"prod_idx = 0x%04X\n", __FUNCTION__, *prod, *prod_idx);
3987 
3988 bce_get_pg_buf_exit:
3989 	DBPRINT(sc, (BCE_VERBOSE_RESET | BCE_VERBOSE_RECV), "Exiting %s()\n",
3990 		__FUNCTION__);
3991 
3992 	return(rc);
3993 }
3994 
3995 
3996 /****************************************************************************/
3997 /* Allocate memory and initialize the TX data structures.                   */
3998 /*                                                                          */
3999 /* Returns:                                                                 */
4000 /*   0 for success, positive value for failure.                             */
4001 /****************************************************************************/
4002 static int
4003 bce_init_tx_chain(struct bce_softc *sc)
4004 {
4005 	struct tx_bd *txbd;
4006 	u32 val;
4007 	int i, rc = 0;
4008 
4009 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4010 
4011 	/* Set the initial TX producer/consumer indices. */
4012 	sc->tx_prod        = 0;
4013 	sc->tx_cons        = 0;
4014 	sc->tx_prod_bseq   = 0;
4015 	sc->used_tx_bd     = 0;
4016 	sc->max_tx_bd      = USABLE_TX_BD;
4017 	DBRUN(sc->tx_hi_watermark = USABLE_TX_BD);
4018 	DBRUN(sc->tx_full_count = 0);
4019 
4020 	/*
4021 	 * The NetXtreme II supports a linked-list structre called
4022 	 * a Buffer Descriptor Chain (or BD chain).  A BD chain
4023 	 * consists of a series of 1 or more chain pages, each of which
4024 	 * consists of a fixed number of BD entries.
4025 	 * The last BD entry on each page is a pointer to the next page
4026 	 * in the chain, and the last pointer in the BD chain
4027 	 * points back to the beginning of the chain.
4028 	 */
4029 
4030 	/* Set the TX next pointer chain entries. */
4031 	for (i = 0; i < TX_PAGES; i++) {
4032 		int j;
4033 
4034 		txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
4035 
4036 		/* Check if we've reached the last page. */
4037 		if (i == (TX_PAGES - 1))
4038 			j = 0;
4039 		else
4040 			j = i + 1;
4041 
4042 		txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->tx_bd_chain_paddr[j]));
4043 		txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->tx_bd_chain_paddr[j]));
4044 	}
4045 
4046 	/* Initialize the context ID for an L2 TX chain. */
4047 	val = BCE_L2CTX_TYPE_TYPE_L2;
4048 	val |= BCE_L2CTX_TYPE_SIZE_L2;
4049 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TYPE, val);
4050 
4051 	val = BCE_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4052 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_CMD_TYPE, val);
4053 
4054 	/* Point the hardware to the first page in the chain. */
4055 	val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]);
4056 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TBDR_BHADDR_HI, val);
4057 	val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]);
4058 	CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TBDR_BHADDR_LO, val);
4059 
4060 	DBRUNMSG(BCE_VERBOSE_SEND, bce_dump_tx_chain(sc, 0, TOTAL_TX_BD));
4061 
4062 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4063 
4064 	return(rc);
4065 }
4066 
4067 
4068 /****************************************************************************/
4069 /* Free memory and clear the TX data structures.                            */
4070 /*                                                                          */
4071 /* Returns:                                                                 */
4072 /*   Nothing.                                                               */
4073 /****************************************************************************/
4074 static void
4075 bce_free_tx_chain(struct bce_softc *sc)
4076 {
4077 	int i;
4078 
4079 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4080 
4081 	/* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
4082 	for (i = 0; i < TOTAL_TX_BD; i++) {
4083 		if (sc->tx_mbuf_ptr[i] != NULL) {
4084 			if (sc->tx_mbuf_map != NULL)
4085 				bus_dmamap_sync(sc->tx_mbuf_tag, sc->tx_mbuf_map[i],
4086 					BUS_DMASYNC_POSTWRITE);
4087 			m_freem(sc->tx_mbuf_ptr[i]);
4088 			sc->tx_mbuf_ptr[i] = NULL;
4089 			DBRUN(sc->debug_tx_mbuf_alloc--);
4090 		}
4091 	}
4092 
4093 	/* Clear each TX chain page. */
4094 	for (i = 0; i < TX_PAGES; i++)
4095 		bzero((char *)sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ);
4096 
4097 	sc->used_tx_bd     = 0;
4098 
4099 	/* Check if we lost any mbufs in the process. */
4100 	DBRUNIF((sc->debug_tx_mbuf_alloc),
4101 		BCE_PRINTF("%s(%d): Memory leak! Lost %d mbufs "
4102 			"from tx chain!\n",
4103 			__FILE__, __LINE__, sc->debug_tx_mbuf_alloc));
4104 
4105 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4106 }
4107 
4108 
4109 /****************************************************************************/
4110 /* Allocate memory and initialize the RX data structures.                   */
4111 /*                                                                          */
4112 /* Returns:                                                                 */
4113 /*   0 for success, positive value for failure.                             */
4114 /****************************************************************************/
4115 static int
4116 bce_init_rx_chain(struct bce_softc *sc)
4117 {
4118 	struct rx_bd *rxbd;
4119 	int i, rc = 0;
4120 	u32 val;
4121 
4122 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4123 
4124 	/* Initialize the RX producer and consumer indices. */
4125 	sc->rx_prod        = 0;
4126 	sc->rx_cons        = 0;
4127 	sc->rx_prod_bseq   = 0;
4128 	sc->free_rx_bd     = USABLE_RX_BD;
4129 	sc->max_rx_bd      = USABLE_RX_BD;
4130 	DBRUN(sc->rx_low_watermark = sc->max_rx_bd);
4131 	DBRUN(sc->rx_empty_count = 0);
4132 
4133 	/* Initialize the RX next pointer chain entries. */
4134 	for (i = 0; i < RX_PAGES; i++) {
4135 		int j;
4136 
4137 		rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
4138 
4139 		/* Check if we've reached the last page. */
4140 		if (i == (RX_PAGES - 1))
4141 			j = 0;
4142 		else
4143 			j = i + 1;
4144 
4145 		/* Setup the chain page pointers. */
4146 		rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->rx_bd_chain_paddr[j]));
4147 		rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->rx_bd_chain_paddr[j]));
4148 	}
4149 
4150 	/* Initialize the context ID for an L2 RX chain. */
4151 	val = BCE_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4152 	val |= BCE_L2CTX_CTX_TYPE_SIZE_L2;
4153 	val |= 0x02 << 8;
4154 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_CTX_TYPE, val);
4155 
4156 	/* Point the hardware to the first page in the chain. */
4157 	val = BCE_ADDR_HI(sc->rx_bd_chain_paddr[0]);
4158 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_BDHADDR_HI, val);
4159 	val = BCE_ADDR_LO(sc->rx_bd_chain_paddr[0]);
4160 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_BDHADDR_LO, val);
4161 
4162 	/* Fill up the RX chain. */
4163 	bce_fill_rx_chain(sc);
4164 
4165 	for (i = 0; i < RX_PAGES; i++) {
4166 		bus_dmamap_sync(
4167 			sc->rx_bd_chain_tag,
4168 	    	sc->rx_bd_chain_map[i],
4169 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4170 	}
4171 
4172 	DBRUNMSG(BCE_VERBOSE_RECV, bce_dump_rx_chain(sc, 0, TOTAL_RX_BD));
4173 
4174 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4175 
4176 	return(rc);
4177 }
4178 
4179 
4180 /****************************************************************************/
4181 /* Add mbufs to the RX chain until its full or an mbuf allocation error     */
4182 /* occurs.                                                                  */
4183 /*                                                                          */
4184 /* Returns:                                                                 */
4185 /*   Nothing                                                                */
4186 /****************************************************************************/
4187 static void
4188 bce_fill_rx_chain(struct bce_softc *sc)
4189 {
4190 	u16 prod, prod_idx;
4191 	u32 prod_bseq;
4192 
4193 	DBPRINT(sc, BCE_VERBOSE_RECV, "Entering %s()\n", __FUNCTION__);
4194 
4195 	prod      = sc->rx_prod;
4196 	prod_bseq = sc->rx_prod_bseq;
4197 
4198 	/* Keep filling the RX chain until it's full. */
4199 	while (sc->free_rx_bd > 0) {
4200 		prod_idx = RX_CHAIN_IDX(prod);
4201 		if (bce_get_rx_buf(sc, NULL, &prod, &prod_idx, &prod_bseq)) {
4202 			/* Bail out if we can't add an mbuf to the chain. */
4203 			break;
4204 		}
4205 		prod = NEXT_RX_BD(prod);
4206 	}
4207 
4208 	/* Save the RX chain producer index. */
4209 	sc->rx_prod      = prod;
4210 	sc->rx_prod_bseq = prod_bseq;
4211 
4212 	DBRUNIF(((prod & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE),
4213 		BCE_PRINTF("%s(): Invalid rx_prod value: 0x%04X\n",
4214 		__FUNCTION__, sc->rx_prod));
4215 
4216 	/* Tell the chip about the waiting rx_bd's. */
4217 	REG_WR16(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BDIDX, sc->rx_prod);
4218 	REG_WR(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
4219 
4220 	DBPRINT(sc, BCE_VERBOSE_RECV, "Exiting %s()\n", __FUNCTION__);
4221 }
4222 
4223 
4224 /****************************************************************************/
4225 /* Free memory and clear the RX data structures.                            */
4226 /*                                                                          */
4227 /* Returns:                                                                 */
4228 /*   Nothing.                                                               */
4229 /****************************************************************************/
4230 static void
4231 bce_free_rx_chain(struct bce_softc *sc)
4232 {
4233 	int i;
4234 
4235 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4236 
4237 	/* Clear the jumbo page chain support. */
4238 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_PG_BUF_SIZE, 0);
4239 
4240 	/* Free any mbufs still in the RX mbuf chain. */
4241 	for (i = 0; i < TOTAL_RX_BD; i++) {
4242 		if (sc->rx_mbuf_ptr[i] != NULL) {
4243 			if (sc->rx_mbuf_map[i] != NULL)
4244 				bus_dmamap_sync(sc->rx_mbuf_tag, sc->rx_mbuf_map[i],
4245 					BUS_DMASYNC_POSTREAD);
4246 			m_freem(sc->rx_mbuf_ptr[i]);
4247 			sc->rx_mbuf_ptr[i] = NULL;
4248 			DBRUN(sc->debug_rx_mbuf_alloc--);
4249 		}
4250 	}
4251 
4252 	/* Clear each RX chain page. */
4253 	for (i = 0; i < RX_PAGES; i++)
4254 		bzero((char *)sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
4255 
4256 	sc->free_rx_bd = sc->max_rx_bd;
4257 
4258 	/* Check if we lost any mbufs in the process. */
4259 	DBRUNIF((sc->debug_rx_mbuf_alloc),
4260 		BCE_PRINTF("%s(): Memory leak! Lost %d mbufs from rx chain!\n",
4261 			__FUNCTION__, sc->debug_rx_mbuf_alloc));
4262 
4263 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4264 }
4265 
4266 
4267 /****************************************************************************/
4268 /* Allocate memory and initialize the page data structures.                 */
4269 /* Assumes that bce_init_rx_chain() has not already been called.            */
4270 /*                                                                          */
4271 /* Returns:                                                                 */
4272 /*   0 for success, positive value for failure.                             */
4273 /****************************************************************************/
4274 static int
4275 bce_init_pg_chain(struct bce_softc *sc)
4276 {
4277 	struct rx_bd *pgbd;
4278 	int i, rc = 0;
4279 	u32 val;
4280 
4281 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
4282 
4283 	/* Initialize the page producer and consumer indices. */
4284 	sc->pg_prod        = 0;
4285 	sc->pg_cons        = 0;
4286 	sc->free_pg_bd     = USABLE_PG_BD;
4287 	sc->max_pg_bd      = USABLE_PG_BD;
4288 	DBRUN(sc->pg_low_watermark = sc->max_pg_bd);
4289 	DBRUN(sc->pg_empty_count = 0);
4290 
4291 	/* Initialize the page next pointer chain entries. */
4292 	for (i = 0; i < PG_PAGES; i++) {
4293 		int j;
4294 
4295 		pgbd = &sc->pg_bd_chain[i][USABLE_PG_BD_PER_PAGE];
4296 
4297 		/* Check if we've reached the last page. */
4298 		if (i == (PG_PAGES - 1))
4299 			j = 0;
4300 		else
4301 			j = i + 1;
4302 
4303 		/* Setup the chain page pointers. */
4304 		pgbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->pg_bd_chain_paddr[j]));
4305 		pgbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->pg_bd_chain_paddr[j]));
4306 	}
4307 
4308 	/* Point the hardware to the first page in the page chain. */
4309 	val = BCE_ADDR_HI(sc->pg_bd_chain_paddr[0]);
4310 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_PG_BDHADDR_HI, val);
4311 	val = BCE_ADDR_LO(sc->pg_bd_chain_paddr[0]);
4312 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_PG_BDHADDR_LO, val);
4313 
4314 	/* Configure the rx_bd and page chain mbuf cluster size. */
4315 	val = (sc->rx_bd_mbuf_alloc_size << 16) | sc->pg_bd_mbuf_alloc_size;
4316 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_PG_BUF_SIZE, val);
4317 
4318 	/* Configure the context reserved for jumbo support. */
4319 	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RBDC_KEY,
4320 		BCE_L2CTX_RBDC_JUMBO_KEY);
4321 
4322 	/* Fill up the page chain. */
4323 	bce_fill_pg_chain(sc);
4324 
4325 	for (i = 0; i < PG_PAGES; i++) {
4326 		bus_dmamap_sync(
4327 			sc->pg_bd_chain_tag,
4328 	    	sc->pg_bd_chain_map[i],
4329 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4330 	}
4331 
4332 	DBRUNMSG(BCE_VERBOSE_RECV, bce_dump_pg_chain(sc, 0, TOTAL_PG_BD));
4333 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
4334 
4335 	return(rc);
4336 }
4337 
4338 /****************************************************************************/
4339 /* Add mbufs to the page chain until its full or an mbuf allocation error   */
4340 /* occurs.                                                                  */
4341 /*                                                                          */
4342 /* Returns:                                                                 */
4343 /*   Nothing                                                                */
4344 /****************************************************************************/
4345 static void
4346 bce_fill_pg_chain(struct bce_softc *sc)
4347 {
4348 	u16 prod, prod_idx;
4349 
4350 	DBPRINT(sc, BCE_EXCESSIVE_RECV, "Entering %s()\n", __FUNCTION__);
4351 
4352 	prod = sc->pg_prod;
4353 
4354 	/* Keep filling the page chain until it's full. */
4355 	while (sc->free_pg_bd > 0) {
4356 		prod_idx = PG_CHAIN_IDX(prod);
4357 		if (bce_get_pg_buf(sc, NULL, &prod, &prod_idx)) {
4358 			/* Bail out if we can't add an mbuf to the chain. */
4359 			break;
4360 		}
4361 		prod = NEXT_PG_BD(prod);
4362 	}
4363 
4364 	/* Save the page chain producer index. */
4365 	sc->pg_prod = prod;
4366 
4367 	DBRUNIF(((prod & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE),
4368 		BCE_PRINTF("%s(): Invalid pg_prod value: 0x%04X\n",
4369 		__FUNCTION__, sc->pg_prod));
4370 
4371 	/* Tell the chip about the new rx_bd's in the page chain. */
4372 	REG_WR16(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_PG_BDIDX, sc->pg_prod);
4373 
4374 	DBPRINT(sc, BCE_EXCESSIVE_RECV, "Exiting %s()\n", __FUNCTION__);
4375 }
4376 
4377 
4378 /****************************************************************************/
4379 /* Free memory and clear the RX data structures.                            */
4380 /*                                                                          */
4381 /* Returns:                                                                 */
4382 /*   Nothing.                                                               */
4383 /****************************************************************************/
4384 static void
4385 bce_free_pg_chain(struct bce_softc *sc)
4386 {
4387 	int i;
4388 
4389 	DBPRINT(sc, BCE_EXCESSIVE_RESET, "Entering %s()\n", __FUNCTION__);
4390 
4391 	/* Free any mbufs still in the mbuf page chain. */
4392 	for (i = 0; i < TOTAL_PG_BD; i++) {
4393 		if (sc->pg_mbuf_ptr[i] != NULL) {
4394 			if (sc->pg_mbuf_map[i] != NULL)
4395 				bus_dmamap_sync(sc->pg_mbuf_tag, sc->pg_mbuf_map[i],
4396 					BUS_DMASYNC_POSTREAD);
4397 			m_freem(sc->pg_mbuf_ptr[i]);
4398 			sc->pg_mbuf_ptr[i] = NULL;
4399 			DBRUN(sc->debug_pg_mbuf_alloc--);
4400 		}
4401 	}
4402 
4403 	/* Clear each page chain pages. */
4404 	for (i = 0; i < PG_PAGES; i++)
4405 		bzero((char *)sc->pg_bd_chain[i], BCE_PG_CHAIN_PAGE_SZ);
4406 
4407 	sc->free_pg_bd = sc->max_pg_bd;
4408 
4409 	/* Check if we lost any mbufs in the process. */
4410 	DBRUNIF((sc->debug_pg_mbuf_alloc),
4411 		BCE_PRINTF("%s(): Memory leak! Lost %d mbufs from page chain!\n",
4412 			__FUNCTION__, sc->debug_pg_mbuf_alloc));
4413 
4414 	DBPRINT(sc, BCE_EXCESSIVE_RESET, "Exiting %s()\n", __FUNCTION__);
4415 }
4416 
4417 
4418 /****************************************************************************/
4419 /* Set media options.                                                       */
4420 /*                                                                          */
4421 /* Returns:                                                                 */
4422 /*   0 for success, positive value for failure.                             */
4423 /****************************************************************************/
4424 static int
4425 bce_ifmedia_upd(struct ifnet *ifp)
4426 {
4427 	struct bce_softc *sc;
4428 
4429 	sc = ifp->if_softc;
4430 	BCE_LOCK(sc);
4431 	bce_ifmedia_upd_locked(ifp);
4432 	BCE_UNLOCK(sc);
4433 	return (0);
4434 }
4435 
4436 
4437 /****************************************************************************/
4438 /* Set media options.                                                       */
4439 /*                                                                          */
4440 /* Returns:                                                                 */
4441 /*   Nothing.                                                               */
4442 /****************************************************************************/
4443 static void
4444 bce_ifmedia_upd_locked(struct ifnet *ifp)
4445 {
4446 	struct bce_softc *sc;
4447 	struct mii_data *mii;
4448 	struct ifmedia *ifm;
4449 
4450 	sc = ifp->if_softc;
4451 	ifm = &sc->bce_ifmedia;
4452 	BCE_LOCK_ASSERT(sc);
4453 
4454 	mii = device_get_softc(sc->bce_miibus);
4455 
4456 	/* Make sure the MII bus has been enumerated. */
4457 	if (mii) {
4458 		sc->bce_link = 0;
4459 		if (mii->mii_instance) {
4460 			struct mii_softc *miisc;
4461 
4462 			LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
4463 				mii_phy_reset(miisc);
4464 		}
4465 		mii_mediachg(mii);
4466 	}
4467 }
4468 
4469 
4470 /****************************************************************************/
4471 /* Reports current media status.                                            */
4472 /*                                                                          */
4473 /* Returns:                                                                 */
4474 /*   Nothing.                                                               */
4475 /****************************************************************************/
4476 static void
4477 bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
4478 {
4479 	struct bce_softc *sc;
4480 	struct mii_data *mii;
4481 
4482 	sc = ifp->if_softc;
4483 
4484 	BCE_LOCK(sc);
4485 
4486 	mii = device_get_softc(sc->bce_miibus);
4487 
4488 	mii_pollstat(mii);
4489 	ifmr->ifm_active = mii->mii_media_active;
4490 	ifmr->ifm_status = mii->mii_media_status;
4491 
4492 	BCE_UNLOCK(sc);
4493 }
4494 
4495 
4496 /****************************************************************************/
4497 /* Handles PHY generated interrupt events.                                  */
4498 /*                                                                          */
4499 /* Returns:                                                                 */
4500 /*   Nothing.                                                               */
4501 /****************************************************************************/
4502 static void
4503 bce_phy_intr(struct bce_softc *sc)
4504 {
4505 	u32 new_link_state, old_link_state;
4506 
4507 	new_link_state = sc->status_block->status_attn_bits &
4508 		STATUS_ATTN_BITS_LINK_STATE;
4509 	old_link_state = sc->status_block->status_attn_bits_ack &
4510 		STATUS_ATTN_BITS_LINK_STATE;
4511 
4512 	/* Handle any changes if the link state has changed. */
4513 	if (new_link_state != old_link_state) {
4514 
4515 		DBRUNMSG(BCE_VERBOSE_INTR, bce_dump_status_block(sc));
4516 
4517 		sc->bce_link = 0;
4518 		callout_stop(&sc->bce_tick_callout);
4519 		bce_tick(sc);
4520 
4521 		/* Update the status_attn_bits_ack field in the status block. */
4522 		if (new_link_state) {
4523 			REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD,
4524 				STATUS_ATTN_BITS_LINK_STATE);
4525 			DBPRINT(sc, BCE_INFO_MISC, "Link is now UP.\n");
4526 		}
4527 		else {
4528 			REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD,
4529 				STATUS_ATTN_BITS_LINK_STATE);
4530 			DBPRINT(sc, BCE_INFO_MISC, "Link is now DOWN.\n");
4531 		}
4532 
4533 	}
4534 
4535 	/* Acknowledge the link change interrupt. */
4536 	REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE);
4537 }
4538 
4539 
4540 /****************************************************************************/
4541 /* Reads the receive consumer value from the status block (skipping over    */
4542 /* chain page pointer if necessary).                                        */
4543 /*                                                                          */
4544 /* Returns:                                                                 */
4545 /*   hw_cons                                                                */
4546 /****************************************************************************/
4547 static inline u16
4548 bce_get_hw_rx_cons(struct bce_softc *sc)
4549 {
4550 	u16 hw_cons = sc->status_block->status_rx_quick_consumer_index0;
4551 
4552 	if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
4553 		hw_cons++;
4554 
4555 	return hw_cons;
4556 }
4557 
4558 
4559 /****************************************************************************/
4560 /* Handles received frame interrupt events.                                 */
4561 /*                                                                          */
4562 /* Returns:                                                                 */
4563 /*   Nothing.                                                               */
4564 /****************************************************************************/
4565 static void
4566 bce_rx_intr(struct bce_softc *sc)
4567 {
4568 	struct ifnet *ifp = sc->bce_ifp;
4569 	struct l2_fhdr *l2fhdr;
4570 	unsigned int pages, pkt_len, rem_len;
4571 	u16 sw_rx_cons, sw_rx_cons_idx, sw_pg_cons, sw_pg_cons_idx, hw_rx_cons;
4572 	u32 status;
4573 
4574 
4575 #ifdef BCE_DEBUG
4576 	u32 timer_start, timer_end;
4577 	timer_start = REG_RD(sc, BCE_TIMER_25MHZ_FREE_RUN);
4578 	sc->rx_interrupts++;
4579 #endif
4580 
4581 	/* Prepare the RX chain pages to be accessed by the host CPU. */
4582 	for (int i = 0; i < RX_PAGES; i++)
4583 		bus_dmamap_sync(sc->rx_bd_chain_tag,
4584 		    sc->rx_bd_chain_map[i], BUS_DMASYNC_POSTWRITE);
4585 
4586 	/* Prepare the page chain pages to be accessed by the host CPU. */
4587 	for (int i = 0; i < PG_PAGES; i++)
4588 		bus_dmamap_sync(sc->pg_bd_chain_tag,
4589 		    sc->pg_bd_chain_map[i], BUS_DMASYNC_POSTWRITE);
4590 
4591 	/* Get the hardware's view of the RX consumer index. */
4592 	hw_rx_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc);
4593 
4594 	/* Get working copies of the driver's view of the consumer indices. */
4595 	sw_rx_cons = sc->rx_cons;
4596 	sw_pg_cons = sc->pg_cons;
4597 
4598 	DBPRINT(sc, BCE_INFO_RECV, "%s(enter): rx_prod = 0x%04X, "
4599 		"rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
4600 		__FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
4601 
4602 	/* Update some debug statistics counters */
4603 	DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
4604 		sc->rx_low_watermark = sc->free_rx_bd);
4605 	DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), sc->rx_empty_count++);
4606 
4607 	/* Scan through the receive chain as long as there is work to do */
4608 	/* ToDo: Consider setting a limit on the number of packets processed. */
4609 	while (sw_rx_cons != hw_rx_cons) {
4610 		struct mbuf *m0;
4611 
4612 		/* Convert the producer/consumer indices to an actual rx_bd index. */
4613 		sw_rx_cons_idx = RX_CHAIN_IDX(sw_rx_cons);
4614 
4615 		/* Unmap the mbuf from DMA space. */
4616 		bus_dmamap_sync(sc->rx_mbuf_tag,
4617 		    sc->rx_mbuf_map[sw_rx_cons_idx],
4618 	    	BUS_DMASYNC_POSTREAD);
4619 		bus_dmamap_unload(sc->rx_mbuf_tag,
4620 		    sc->rx_mbuf_map[sw_rx_cons_idx]);
4621 
4622 		/* Remove the mbuf from the RX chain. */
4623 		m0 = sc->rx_mbuf_ptr[sw_rx_cons_idx];
4624 		sc->rx_mbuf_ptr[sw_rx_cons_idx] = NULL;
4625 		DBRUN(sc->debug_rx_mbuf_alloc--);
4626 		sc->free_rx_bd++;
4627 
4628 		/*
4629 		 * Frames received on the NetXteme II are prepended
4630 		 * with an l2_fhdr structure which provides status
4631 		 * information about the received frame (including
4632 		 * VLAN tags and checksum info).  The frames are also
4633 		 * automatically adjusted to align the IP header
4634 		 * (i.e. two null bytes are inserted before the
4635 		 * Ethernet header).  As a result the data DMA'd by
4636 		 * the controller into the mbuf is as follows:
4637 		 * +---------+-----+---------------------+-----+
4638 		 * | l2_fhdr | pad | packet data         | FCS |
4639 		 * +---------+-----+---------------------+-----+
4640 		 * The l2_fhdr needs to be checked and skipped and
4641 		 * the FCS needs to be stripped before sending the
4642 		 * packet up the stack.
4643 		 */
4644 		l2fhdr  = mtod(m0, struct l2_fhdr *);
4645 
4646 		/* Get the packet data + FCS length and the status. */
4647 		pkt_len = l2fhdr->l2_fhdr_pkt_len;
4648 		status  = l2fhdr->l2_fhdr_status;
4649 
4650 		/*
4651 		 * Skip over the l2_fhdr and pad, resulting in the
4652 		 * following data in the mbuf:
4653 		 * +---------------------+-----+
4654 		 * | packet data         | FCS |
4655 		 * +---------------------+-----+
4656 		 */
4657 		m_adj(m0, sizeof(struct l2_fhdr) + ETHER_ALIGN);
4658 
4659 		/*
4660 		 * Check whether the received frame fits in a single
4661 		 * mbuf or not (i.e. packet data + FCS <=
4662 		 * sc->rx_bd_mbuf_alloc_size bytes).
4663 		 */
4664 		if (pkt_len > m0->m_len) {
4665 			/*
4666 			 * The received frame is larger than a single mbuf.
4667 			 * If the frame was a TCP frame then only the TCP
4668 			 * header is placed in the mbuf, the remaining
4669 			 * payload (including FCS) is placed in the page
4670 			 * chain, the SPLIT flag is set, and the header
4671 			 * length is placed in the IP checksum field.
4672 			 * If the frame is not a TCP frame then the mbuf
4673 			 * is filled and the remaining bytes are placed
4674 			 * in the page chain.
4675 			 */
4676 
4677 			DBPRINT(sc, BCE_INFO_RECV, "%s(): Found a large packet.\n",
4678 				__FUNCTION__);
4679 
4680 		 	if (status & L2_FHDR_STATUS_SPLIT)
4681 				m0->m_len = l2fhdr->l2_fhdr_ip_xsum;
4682 
4683 			rem_len = pkt_len - m0->m_len;
4684 
4685 			/* Calculate how many pages to pull off the page chain. */
4686 			/* ToDo: The following assumes that mbuf clusters are 2KB. */
4687 			pages = (rem_len + sc->pg_bd_mbuf_alloc_size) >> 11;
4688 
4689 			/* Pull mbufs off the page chain for the remaining data. */
4690 			while (rem_len > 0) {
4691 				struct mbuf *m_pg;
4692 
4693 				sw_pg_cons_idx = PG_CHAIN_IDX(sw_pg_cons);
4694 
4695 				/* Remove the mbuf from the page chain. */
4696 				m_pg = sc->pg_mbuf_ptr[sw_pg_cons_idx];
4697 				sc->pg_mbuf_ptr[sw_pg_cons_idx] = NULL;
4698 				DBRUN(sc->debug_pg_mbuf_alloc--);
4699 				sc->free_pg_bd++;
4700 
4701 				/* Unmap the page chain mbuf from DMA space. */
4702 				bus_dmamap_sync(sc->pg_mbuf_tag,
4703 					sc->pg_mbuf_map[sw_pg_cons_idx],
4704 					BUS_DMASYNC_POSTREAD);
4705 				bus_dmamap_unload(sc->pg_mbuf_tag,
4706 					sc->pg_mbuf_map[sw_pg_cons_idx]);
4707 
4708 				/* Adjust the mbuf length. */
4709 				if (rem_len < m_pg->m_len) {
4710 					/* The mbuf chain is complete. */
4711 					m_pg->m_len = rem_len;
4712 					rem_len = 0;
4713 				} else {
4714 					/* More packet data is waiting. */
4715 					rem_len -= m_pg->m_len;
4716 				}
4717 
4718 				/* Concatenate the mbuf cluster to the mbuf. */
4719 				m_cat(m0, m_pg);
4720 
4721 				sw_pg_cons = NEXT_PG_BD(sw_pg_cons);
4722 			}
4723 
4724 			/* Set the total packet length. */
4725 			m0->m_pkthdr.len = pkt_len;
4726 
4727 		} else {
4728 			/*
4729 			 * The received packet is small and fits in a
4730 			 * single mbuf (i.e. the l2_fhdr + pad + packet +
4731 			 * FCS <= MHLEN).  In other words, the packet is
4732 			 * 154 bytes or less in size.
4733 			 */
4734 
4735 			DBPRINT(sc, BCE_INFO_RECV, "%s(): Found a small packet.\n",
4736 				__FUNCTION__);
4737 
4738 			/* Set the total packet length. */
4739 			m0->m_pkthdr.len = m0->m_len = pkt_len;
4740 		}
4741 
4742 		/* Remove the trailing Ethernet FCS. */
4743 		m_adj(m0, -ETHER_CRC_LEN);
4744 
4745 		/* Check that the resulting mbuf chain is valid. */
4746 		DBRUN(m_sanity(m0, FALSE));
4747 
4748 		DBRUNIF((m0->m_len < ETHER_HDR_LEN),
4749 			BCE_PRINTF("%s(): Unexpected length = %d!.\n",
4750 				__FUNCTION__, m0->m_len);
4751 			bce_breakpoint(sc));
4752 
4753 		DBRUNIF(DB_RANDOMTRUE(bce_debug_l2fhdr_status_check),
4754 			BCE_PRINTF("Simulating l2_fhdr status error.\n");
4755 			status = status | L2_FHDR_ERRORS_PHY_DECODE);
4756 
4757 		/* Check the received frame for errors. */
4758 		if (status & (L2_FHDR_ERRORS_BAD_CRC |
4759 			L2_FHDR_ERRORS_PHY_DECODE | L2_FHDR_ERRORS_ALIGNMENT |
4760 			L2_FHDR_ERRORS_TOO_SHORT  | L2_FHDR_ERRORS_GIANT_FRAME)) {
4761 
4762 			/* Log the error and release the mbuf. */
4763 			ifp->if_ierrors++;
4764 			DBRUN(sc->l2fhdr_status_errors++);
4765 
4766 			m_freem(m0);
4767 			m0 = NULL;
4768 			goto bce_rx_int_next_rx;
4769 		}
4770 
4771 		/* Send the packet to the appropriate interface. */
4772 		m0->m_pkthdr.rcvif = ifp;
4773 
4774 		/* Assume no hardware checksum. */
4775 		m0->m_pkthdr.csum_flags = 0;
4776 
4777 		/* Validate the checksum if offload enabled. */
4778 		if (ifp->if_capenable & IFCAP_RXCSUM) {
4779 
4780 			/* Check for an IP datagram. */
4781 		 	if (!(status & L2_FHDR_STATUS_SPLIT) &&
4782 				(status & L2_FHDR_STATUS_IP_DATAGRAM)) {
4783 				m0->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
4784 
4785 				/* Check if the IP checksum is valid. */
4786 				if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff) == 0)
4787 					m0->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4788 			}
4789 
4790 			/* Check for a valid TCP/UDP frame. */
4791 			if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
4792 				L2_FHDR_STATUS_UDP_DATAGRAM)) {
4793 
4794 				/* Check for a good TCP/UDP checksum. */
4795 				if ((status & (L2_FHDR_ERRORS_TCP_XSUM |
4796 					      L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
4797 					m0->m_pkthdr.csum_data =
4798 					    l2fhdr->l2_fhdr_tcp_udp_xsum;
4799 					m0->m_pkthdr.csum_flags |= (CSUM_DATA_VALID
4800 						| CSUM_PSEUDO_HDR);
4801 				}
4802 			}
4803 		}
4804 
4805 		/*
4806 		 * If we received a packet with a vlan tag,
4807 		 * attach that information to the packet.
4808 		 */
4809 		if (status & L2_FHDR_STATUS_L2_VLAN_TAG) {
4810 #if __FreeBSD_version < 700000
4811 			VLAN_INPUT_TAG(ifp, m0, l2fhdr->l2_fhdr_vlan_tag, continue);
4812 #else
4813 			m0->m_pkthdr.ether_vtag = l2fhdr->l2_fhdr_vlan_tag;
4814 			m0->m_flags |= M_VLANTAG;
4815 #endif
4816 		}
4817 
4818 		/* Pass the mbuf off to the upper layers. */
4819 		ifp->if_ipackets++;
4820 
4821 bce_rx_int_next_rx:
4822 		sw_rx_cons = NEXT_RX_BD(sw_rx_cons);
4823 
4824 		/* If we have a packet, pass it up the stack */
4825 		if (m0) {
4826 			/* Make sure we don't lose our place when we release the lock. */
4827 			sc->rx_cons = sw_rx_cons;
4828 			sc->pg_cons = sw_pg_cons;
4829 
4830 			BCE_UNLOCK(sc);
4831 			(*ifp->if_input)(ifp, m0);
4832 			BCE_LOCK(sc);
4833 
4834 			/* Recover our place. */
4835 			sw_rx_cons = sc->rx_cons;
4836 			sw_pg_cons = sc->pg_cons;
4837 		}
4838 
4839 		/* Refresh hw_cons to see if there's new work */
4840 		if (sw_rx_cons == hw_rx_cons)
4841 			hw_rx_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc);
4842 	}
4843 
4844 	/* No new packets to process.  Refill the RX and page chains and exit. */
4845 	sc->pg_cons = sw_pg_cons;
4846 	bce_fill_pg_chain(sc);
4847 
4848 	sc->rx_cons = sw_rx_cons;
4849 	bce_fill_rx_chain(sc);
4850 
4851 	for (int i = 0; i < RX_PAGES; i++)
4852 		bus_dmamap_sync(sc->rx_bd_chain_tag,
4853 		    sc->rx_bd_chain_map[i], BUS_DMASYNC_PREWRITE);
4854 
4855 	for (int i = 0; i < PG_PAGES; i++)
4856 		bus_dmamap_sync(sc->pg_bd_chain_tag,
4857 		    sc->pg_bd_chain_map[i], BUS_DMASYNC_PREWRITE);
4858 
4859 	DBPRINT(sc, BCE_INFO_RECV, "%s(exit): rx_prod = 0x%04X, "
4860 		"rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
4861 		__FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
4862 
4863 #ifdef BCE_DEBUG
4864 	timer_end = REG_RD(sc, BCE_TIMER_25MHZ_FREE_RUN);
4865 	sc->rx_intr_time += (u64) (timer_start > timer_end ?
4866 		(timer_start - timer_end) : (~timer_start + timer_end + 1));
4867 #endif
4868 }
4869 
4870 
4871 /****************************************************************************/
4872 /* Reads the transmit consumer value from the status block (skipping over   */
4873 /* chain page pointer if necessary).                                        */
4874 /*                                                                          */
4875 /* Returns:                                                                 */
4876 /*   hw_cons                                                                */
4877 /****************************************************************************/
4878 static inline u16
4879 bce_get_hw_tx_cons(struct bce_softc *sc)
4880 {
4881 	u16 hw_cons = sc->status_block->status_tx_quick_consumer_index0;
4882 
4883 	if ((hw_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4884 		hw_cons++;
4885 
4886 	return hw_cons;
4887 }
4888 
4889 
4890 /****************************************************************************/
4891 /* Handles transmit completion interrupt events.                            */
4892 /*                                                                          */
4893 /* Returns:                                                                 */
4894 /*   Nothing.                                                               */
4895 /****************************************************************************/
4896 static void
4897 bce_tx_intr(struct bce_softc *sc)
4898 {
4899 	struct ifnet *ifp = sc->bce_ifp;
4900 	u16 hw_tx_cons, sw_tx_cons, sw_tx_chain_cons;
4901 
4902 #ifdef BCE_DEBUG
4903 	u32 timer_start, timer_end;
4904 	timer_start = REG_RD(sc, BCE_TIMER_25MHZ_FREE_RUN);
4905 	sc->tx_interrupts++;
4906 #endif
4907 
4908 	BCE_LOCK_ASSERT(sc);
4909 
4910 	/* Get the hardware's view of the TX consumer index. */
4911 	hw_tx_cons = sc->hw_tx_cons = bce_get_hw_tx_cons(sc);
4912 	sw_tx_cons = sc->tx_cons;
4913 
4914 	/* Prevent speculative reads from getting ahead of the status block. */
4915 	bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4916 		BUS_SPACE_BARRIER_READ);
4917 
4918 	/* Cycle through any completed TX chain page entries. */
4919 	while (sw_tx_cons != hw_tx_cons) {
4920 #ifdef BCE_DEBUG
4921 		struct tx_bd *txbd = NULL;
4922 #endif
4923 		sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons);
4924 
4925 		DBPRINT(sc, BCE_INFO_SEND,
4926 			"%s(): hw_tx_cons = 0x%04X, sw_tx_cons = 0x%04X, "
4927 			"sw_tx_chain_cons = 0x%04X\n",
4928 			__FUNCTION__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons);
4929 
4930 		DBRUNIF((sw_tx_chain_cons > MAX_TX_BD),
4931 			BCE_PRINTF("%s(%d): TX chain consumer out of range! "
4932 				" 0x%04X > 0x%04X\n", __FILE__, __LINE__, sw_tx_chain_cons,
4933 				(int) MAX_TX_BD);
4934 			bce_breakpoint(sc));
4935 
4936 		DBRUN(txbd = &sc->tx_bd_chain[TX_PAGE(sw_tx_chain_cons)]
4937 				[TX_IDX(sw_tx_chain_cons)]);
4938 
4939 		DBRUNIF((txbd == NULL),
4940 			BCE_PRINTF("%s(%d): Unexpected NULL tx_bd[0x%04X]!\n",
4941 				__FILE__, __LINE__, sw_tx_chain_cons);
4942 			bce_breakpoint(sc));
4943 
4944 		DBRUNMSG(BCE_INFO_SEND, BCE_PRINTF("%s(): ", __FUNCTION__);
4945 			bce_dump_txbd(sc, sw_tx_chain_cons, txbd));
4946 
4947 		/*
4948 		 * Free the associated mbuf. Remember
4949 		 * that only the last tx_bd of a packet
4950 		 * has an mbuf pointer and DMA map.
4951 		 */
4952 		if (sc->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) {
4953 
4954 			/* Validate that this is the last tx_bd. */
4955 			DBRUNIF((!(txbd->tx_bd_flags & TX_BD_FLAGS_END)),
4956 				BCE_PRINTF("%s(%d): tx_bd END flag not set but "
4957 				"txmbuf == NULL!\n", __FILE__, __LINE__);
4958 				bce_breakpoint(sc));
4959 
4960 			DBRUNMSG(BCE_INFO_SEND,
4961 				BCE_PRINTF("%s(): Unloading map/freeing mbuf "
4962 					"from tx_bd[0x%04X]\n", __FUNCTION__, sw_tx_chain_cons));
4963 
4964 			/* Unmap the mbuf. */
4965 			bus_dmamap_unload(sc->tx_mbuf_tag,
4966 			    sc->tx_mbuf_map[sw_tx_chain_cons]);
4967 
4968 			/* Free the mbuf. */
4969 			m_freem(sc->tx_mbuf_ptr[sw_tx_chain_cons]);
4970 			sc->tx_mbuf_ptr[sw_tx_chain_cons] = NULL;
4971 			DBRUN(sc->debug_tx_mbuf_alloc--);
4972 
4973 			ifp->if_opackets++;
4974 		}
4975 
4976 		sc->used_tx_bd--;
4977 		sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
4978 
4979 		/* Refresh hw_cons to see if there's new work. */
4980 		hw_tx_cons = sc->hw_tx_cons = bce_get_hw_tx_cons(sc);
4981 
4982 		/* Prevent speculative reads from getting ahead of the status block. */
4983 		bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
4984 			BUS_SPACE_BARRIER_READ);
4985 	}
4986 
4987 	/* Clear the TX timeout timer. */
4988 	sc->watchdog_timer = 0;
4989 
4990 	/* Clear the tx hardware queue full flag. */
4991 	if (sc->used_tx_bd < sc->max_tx_bd) {
4992 		DBRUNIF((ifp->if_drv_flags & IFF_DRV_OACTIVE),
4993 			DBPRINT(sc, BCE_INFO_SEND,
4994 				"%s(): Open TX chain! %d/%d (used/total)\n",
4995 				__FUNCTION__, sc->used_tx_bd, sc->max_tx_bd));
4996 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4997 	}
4998 
4999 	sc->tx_cons = sw_tx_cons;
5000 #ifdef BCE_DEBUG
5001 	timer_end = REG_RD(sc, BCE_TIMER_25MHZ_FREE_RUN);
5002 	sc->tx_intr_time += (u64) (timer_start > timer_end ?
5003 		(timer_start - timer_end) : (~timer_start + timer_end + 1));
5004 #endif
5005 }
5006 
5007 
5008 /****************************************************************************/
5009 /* Disables interrupt generation.                                           */
5010 /*                                                                          */
5011 /* Returns:                                                                 */
5012 /*   Nothing.                                                               */
5013 /****************************************************************************/
5014 static void
5015 bce_disable_intr(struct bce_softc *sc)
5016 {
5017 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5018 	       BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5019 	REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
5020 }
5021 
5022 
5023 /****************************************************************************/
5024 /* Enables interrupt generation.                                            */
5025 /*                                                                          */
5026 /* Returns:                                                                 */
5027 /*   Nothing.                                                               */
5028 /****************************************************************************/
5029 static void
5030 bce_enable_intr(struct bce_softc *sc)
5031 {
5032 	u32 val;
5033 
5034 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5035 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
5036 	       BCE_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx);
5037 
5038 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5039 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
5040 
5041 	val = REG_RD(sc, BCE_HC_COMMAND);
5042 	REG_WR(sc, BCE_HC_COMMAND, val | BCE_HC_COMMAND_COAL_NOW);
5043 }
5044 
5045 
5046 /****************************************************************************/
5047 /* Handles controller initialization.                                       */
5048 /*                                                                          */
5049 /* Returns:                                                                 */
5050 /*   Nothing.                                                               */
5051 /****************************************************************************/
5052 static void
5053 bce_init_locked(struct bce_softc *sc)
5054 {
5055 	struct ifnet *ifp;
5056 	u32 ether_mtu = 0;
5057 
5058 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
5059 
5060 	BCE_LOCK_ASSERT(sc);
5061 
5062 	ifp = sc->bce_ifp;
5063 
5064 	/* Check if the driver is still running and bail out if it is. */
5065 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5066 		goto bce_init_locked_exit;
5067 
5068 	bce_stop(sc);
5069 
5070 	if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) {
5071 		BCE_PRINTF("%s(%d): Controller reset failed!\n",
5072 			__FILE__, __LINE__);
5073 		goto bce_init_locked_exit;
5074 	}
5075 
5076 	if (bce_chipinit(sc)) {
5077 		BCE_PRINTF("%s(%d): Controller initialization failed!\n",
5078 			__FILE__, __LINE__);
5079 		goto bce_init_locked_exit;
5080 	}
5081 
5082 	if (bce_blockinit(sc)) {
5083 		BCE_PRINTF("%s(%d): Block initialization failed!\n",
5084 			__FILE__, __LINE__);
5085 		goto bce_init_locked_exit;
5086 	}
5087 
5088 	/* Load our MAC address. */
5089 	bcopy(IF_LLADDR(sc->bce_ifp), sc->eaddr, ETHER_ADDR_LEN);
5090 	bce_set_mac_addr(sc);
5091 
5092 	/* Calculate and program the hardware Ethernet MTU size. */
5093 	if (ifp->if_mtu <= sc->pg_bd_mbuf_alloc_size)
5094 		/* Be generous on receive if we have room. */
5095 		ether_mtu = sc->pg_bd_mbuf_alloc_size;
5096 	else
5097 		ether_mtu = ifp->if_mtu;
5098 
5099 	ether_mtu += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN;
5100 
5101 	DBPRINT(sc, BCE_INFO_MISC, "%s(): setting h/w mtu = %d\n", __FUNCTION__,
5102 		ether_mtu);
5103 
5104 	/* Program the mtu, enabling jumbo frame support if necessary. */
5105 	if (ether_mtu > (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN))
5106 		REG_WR(sc, BCE_EMAC_RX_MTU_SIZE,
5107 			min(ether_mtu, BCE_MAX_JUMBO_ETHER_MTU) |
5108 			BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA);
5109 	else
5110 		REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu);
5111 
5112 	DBPRINT(sc, BCE_INFO_LOAD,
5113 		"%s(): rx_bd_mbuf_alloc_size = %d, pg_bd_mbuf_alloc_size = %d\n",
5114 		__FUNCTION__, sc->rx_bd_mbuf_alloc_size, sc->pg_bd_mbuf_alloc_size);
5115 
5116 	/* Program appropriate promiscuous/multicast filtering. */
5117 	bce_set_rx_mode(sc);
5118 
5119 	/* Init page buffer descriptor chain. */
5120 	bce_init_pg_chain(sc);
5121 
5122 	/* Init RX buffer descriptor chain. */
5123 	bce_init_rx_chain(sc);
5124 
5125 	/* Init TX buffer descriptor chain. */
5126 	bce_init_tx_chain(sc);
5127 
5128 	/* Enable host interrupts. */
5129 	bce_enable_intr(sc);
5130 
5131 	bce_ifmedia_upd_locked(ifp);
5132 
5133 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
5134 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
5135 
5136 	callout_reset(&sc->bce_tick_callout, hz, bce_tick, sc);
5137 
5138 bce_init_locked_exit:
5139 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
5140 
5141 	return;
5142 }
5143 
5144 
5145 /****************************************************************************/
5146 /* Initialize the controller just enough so that any management firmware    */
5147 /* running on the device will continue to operate correctly.                */
5148 /*                                                                          */
5149 /* Returns:                                                                 */
5150 /*   Nothing.                                                               */
5151 /****************************************************************************/
5152 static void
5153 bce_mgmt_init_locked(struct bce_softc *sc)
5154 {
5155 	struct ifnet *ifp;
5156 
5157 	DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
5158 
5159 	BCE_LOCK_ASSERT(sc);
5160 
5161 	/* Bail out if management firmware is not running. */
5162 	if (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG)) {
5163 		DBPRINT(sc, BCE_VERBOSE_SPECIAL,
5164 			"No management firmware running...\n");
5165 		goto bce_mgmt_init_locked_exit;
5166 	}
5167 
5168 	ifp = sc->bce_ifp;
5169 
5170 	/* Enable all critical blocks in the MAC. */
5171 	REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 0x5ffffff);
5172 	REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
5173 	DELAY(20);
5174 
5175 	bce_ifmedia_upd_locked(ifp);
5176 bce_mgmt_init_locked_exit:
5177 	DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
5178 
5179 	return;
5180 }
5181 
5182 
5183 /****************************************************************************/
5184 /* Handles controller initialization when called from an unlocked routine.  */
5185 /*                                                                          */
5186 /* Returns:                                                                 */
5187 /*   Nothing.                                                               */
5188 /****************************************************************************/
5189 static void
5190 bce_init(void *xsc)
5191 {
5192 	struct bce_softc *sc = xsc;
5193 
5194 	BCE_LOCK(sc);
5195 	bce_init_locked(sc);
5196 	BCE_UNLOCK(sc);
5197 }
5198 
5199 
5200 /****************************************************************************/
5201 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
5202 /* memory visible to the controller.                                        */
5203 /*                                                                          */
5204 /* Returns:                                                                 */
5205 /*   0 for success, positive value for failure.                             */
5206 /* Modified:                                                                */
5207 /*   m_head: May be set to NULL if MBUF is excessively fragmented.          */
5208 /****************************************************************************/
5209 static int
5210 bce_tx_encap(struct bce_softc *sc, struct mbuf **m_head)
5211 {
5212 	bus_dma_segment_t segs[BCE_MAX_SEGMENTS];
5213 	bus_dmamap_t map;
5214 	struct tx_bd *txbd = NULL;
5215 	struct mbuf *m0;
5216 	struct ether_vlan_header *eh;
5217 	struct ip *ip;
5218 	struct tcphdr *th;
5219 	u16 prod, chain_prod, etype, mss = 0, vlan_tag = 0, flags = 0;
5220 	u32 prod_bseq;
5221 	int hdr_len = 0, e_hlen = 0, ip_hlen = 0, tcp_hlen = 0, ip_len = 0;
5222 
5223 
5224 #ifdef BCE_DEBUG
5225 	u16 debug_prod;
5226 #endif
5227 	int i, error, nsegs, rc = 0;
5228 
5229 	/* Transfer any checksum offload flags to the bd. */
5230 	m0 = *m_head;
5231 	if (m0->m_pkthdr.csum_flags) {
5232 		if (m0->m_pkthdr.csum_flags & CSUM_IP)
5233 			flags |= TX_BD_FLAGS_IP_CKSUM;
5234 		if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
5235 			flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5236 		if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5237 			/* For TSO the controller needs two pieces of info, */
5238 			/* the MSS and the IP+TCP options length.           */
5239 			mss = htole16(m0->m_pkthdr.tso_segsz);
5240 
5241 			/* Map the header and find the Ethernet type & header length */
5242 			eh = mtod(m0, struct ether_vlan_header *);
5243 			if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
5244 				etype = ntohs(eh->evl_proto);
5245 				e_hlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
5246 			} else {
5247 				etype = ntohs(eh->evl_encap_proto);
5248 				e_hlen = ETHER_HDR_LEN;
5249 			}
5250 
5251 			/* Check for supported TSO Ethernet types (only IPv4 for now) */
5252 			switch (etype) {
5253 				case ETHERTYPE_IP:
5254 					ip = (struct ip *)(m0->m_data + e_hlen);
5255 
5256 					/* TSO only supported for TCP protocol */
5257 					if (ip->ip_p != IPPROTO_TCP) {
5258 						BCE_PRINTF("%s(%d): TSO enabled for non-TCP frame!.\n",
5259 							__FILE__, __LINE__);
5260 						goto bce_tx_encap_skip_tso;
5261 					}
5262 
5263 					/* Get IP header length in bytes (min 20) */
5264 					ip_hlen = ip->ip_hl << 2;
5265 
5266 					/* Get the TCP header length in bytes (min 20) */
5267 					th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
5268 					tcp_hlen = (th->th_off << 2);
5269 
5270 					/* IP header length and checksum will be calc'd by hardware */
5271 					ip_len = ip->ip_len;
5272 					ip->ip_len = 0;
5273 					ip->ip_sum = 0;
5274 					break;
5275 				case ETHERTYPE_IPV6:
5276 					BCE_PRINTF("%s(%d): TSO over IPv6 not supported!.\n",
5277 						__FILE__, __LINE__);
5278 					goto bce_tx_encap_skip_tso;
5279 				default:
5280 					BCE_PRINTF("%s(%d): TSO enabled for unsupported protocol!.\n",
5281 						__FILE__, __LINE__);
5282 					goto bce_tx_encap_skip_tso;
5283 			}
5284 
5285 			hdr_len = e_hlen + ip_hlen + tcp_hlen;
5286 
5287 			DBPRINT(sc, BCE_EXCESSIVE_SEND,
5288 				"%s(): hdr_len = %d, e_hlen = %d, ip_hlen = %d, tcp_hlen = %d, ip_len = %d\n",
5289 				 __FUNCTION__, hdr_len, e_hlen, ip_hlen, tcp_hlen, ip_len);
5290 
5291 			/* Set the LSO flag in the TX BD */
5292 			flags |= TX_BD_FLAGS_SW_LSO;
5293 			/* Set the length of IP + TCP options (in 32 bit words) */
5294 			flags |= (((ip_hlen + tcp_hlen - 40) >> 2) << 8);
5295 
5296 bce_tx_encap_skip_tso:
5297 			DBRUN(sc->requested_tso_frames++);
5298 		}
5299 	}
5300 
5301 	/* Transfer any VLAN tags to the bd. */
5302 	if (m0->m_flags & M_VLANTAG) {
5303 		flags |= TX_BD_FLAGS_VLAN_TAG;
5304 		vlan_tag = m0->m_pkthdr.ether_vtag;
5305 	}
5306 
5307 	/* Map the mbuf into DMAable memory. */
5308 	prod = sc->tx_prod;
5309 	chain_prod = TX_CHAIN_IDX(prod);
5310 	map = sc->tx_mbuf_map[chain_prod];
5311 
5312 	/* Map the mbuf into our DMA address space. */
5313 	error = bus_dmamap_load_mbuf_sg(sc->tx_mbuf_tag, map, m0,
5314 	    segs, &nsegs, BUS_DMA_NOWAIT);
5315 
5316 	/* Check if the DMA mapping was successful */
5317 	if (error == EFBIG) {
5318 
5319 		/* The mbuf is too fragmented for our DMA mapping. */
5320    		DBPRINT(sc, BCE_WARN, "%s(): fragmented mbuf (%d pieces)\n",
5321 			__FUNCTION__, nsegs);
5322 		DBRUN(bce_dump_mbuf(sc, m0););
5323 
5324 		/* Try to defrag the mbuf. */
5325 		m0 = m_defrag(*m_head, M_DONTWAIT);
5326 		if (m0 == NULL) {
5327 			/* Defrag was unsuccessful */
5328 			m_freem(*m_head);
5329 			*m_head = NULL;
5330 			sc->mbuf_alloc_failed++;
5331 			return (ENOBUFS);
5332 		}
5333 
5334 		/* Defrag was successful, try mapping again */
5335 		*m_head = m0;
5336 		error = bus_dmamap_load_mbuf_sg(sc->tx_mbuf_tag, map, m0,
5337 		    segs, &nsegs, BUS_DMA_NOWAIT);
5338 
5339 		/* Still getting an error after a defrag. */
5340 		if (error == ENOMEM) {
5341 			/* Insufficient DMA buffers available. */
5342 			sc->tx_dma_map_failures++;
5343 			return (error);
5344 		} else if (error != 0) {
5345 			/* Still can't map the mbuf, release it and return an error. */
5346 			BCE_PRINTF(
5347 			    "%s(%d): Unknown error mapping mbuf into TX chain!\n",
5348 			    __FILE__, __LINE__);
5349 			m_freem(m0);
5350 			*m_head = NULL;
5351 			sc->tx_dma_map_failures++;
5352 			return (ENOBUFS);
5353 		}
5354 	} else if (error == ENOMEM) {
5355 		/* Insufficient DMA buffers available. */
5356 		sc->tx_dma_map_failures++;
5357 		return (error);
5358 	} else if (error != 0) {
5359 		m_freem(m0);
5360 		*m_head = NULL;
5361 		sc->tx_dma_map_failures++;
5362 		return (error);
5363 	}
5364 
5365 	/* Make sure there's room in the chain */
5366 	if (nsegs > (sc->max_tx_bd - sc->used_tx_bd)) {
5367 		bus_dmamap_unload(sc->tx_mbuf_tag, map);
5368 		return (ENOBUFS);
5369 	}
5370 
5371 	/* prod points to an empty tx_bd at this point. */
5372 	prod_bseq  = sc->tx_prod_bseq;
5373 
5374 #ifdef BCE_DEBUG
5375 	debug_prod = chain_prod;
5376 #endif
5377 
5378 	DBPRINT(sc, BCE_INFO_SEND,
5379 		"%s(start): prod = 0x%04X, chain_prod = 0x%04X, "
5380 		"prod_bseq = 0x%08X\n",
5381 		__FUNCTION__, prod, chain_prod, prod_bseq);
5382 
5383 	/*
5384 	 * Cycle through each mbuf segment that makes up
5385 	 * the outgoing frame, gathering the mapping info
5386 	 * for that segment and creating a tx_bd for
5387 	 * the mbuf.
5388 	 */
5389 	for (i = 0; i < nsegs ; i++) {
5390 
5391 		chain_prod = TX_CHAIN_IDX(prod);
5392 		txbd= &sc->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
5393 
5394 		txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[i].ds_addr));
5395 		txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[i].ds_addr));
5396 		txbd->tx_bd_mss_nbytes = htole32(mss << 16) | htole16(segs[i].ds_len);
5397 		txbd->tx_bd_vlan_tag = htole16(vlan_tag);
5398 		txbd->tx_bd_flags = htole16(flags);
5399 		prod_bseq += segs[i].ds_len;
5400 		if (i == 0)
5401 			txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START);
5402 		prod = NEXT_TX_BD(prod);
5403 	}
5404 
5405 	/* Set the END flag on the last TX buffer descriptor. */
5406 	txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END);
5407 
5408 	DBRUNMSG(BCE_EXCESSIVE_SEND, bce_dump_tx_chain(sc, debug_prod, nsegs));
5409 
5410 	DBPRINT(sc, BCE_INFO_SEND,
5411 		"%s( end ): prod = 0x%04X, chain_prod = 0x%04X, "
5412 		"prod_bseq = 0x%08X\n",
5413 		__FUNCTION__, prod, chain_prod, prod_bseq);
5414 
5415 	/*
5416 	 * Ensure that the mbuf pointer for this transmission
5417 	 * is placed at the array index of the last
5418 	 * descriptor in this chain.  This is done
5419 	 * because a single map is used for all
5420 	 * segments of the mbuf and we don't want to
5421 	 * unload the map before all of the segments
5422 	 * have been freed.
5423 	 */
5424 	sc->tx_mbuf_ptr[chain_prod] = m0;
5425 	sc->used_tx_bd += nsegs;
5426 
5427 	/* Update some debug statistic counters */
5428 	DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark),
5429 		sc->tx_hi_watermark = sc->used_tx_bd);
5430 	DBRUNIF((sc->used_tx_bd == sc->max_tx_bd), sc->tx_full_count++);
5431 	DBRUNIF(sc->debug_tx_mbuf_alloc++);
5432 
5433 	DBRUNMSG(BCE_EXCESSIVE_SEND, bce_dump_tx_mbuf_chain(sc, chain_prod, 1));
5434 
5435 	/* prod points to the next free tx_bd at this point. */
5436 	sc->tx_prod = prod;
5437 	sc->tx_prod_bseq = prod_bseq;
5438 
5439 	return(rc);
5440 }
5441 
5442 
5443 /****************************************************************************/
5444 /* Main transmit routine when called from another routine with a lock.      */
5445 /*                                                                          */
5446 /* Returns:                                                                 */
5447 /*   Nothing.                                                               */
5448 /****************************************************************************/
5449 static void
5450 bce_start_locked(struct ifnet *ifp)
5451 {
5452 	struct bce_softc *sc = ifp->if_softc;
5453 	struct mbuf *m_head = NULL;
5454 	int count = 0;
5455 	u16 tx_prod, tx_chain_prod;
5456 
5457 	/* prod points to the next free tx_bd. */
5458 	tx_prod = sc->tx_prod;
5459 	tx_chain_prod = TX_CHAIN_IDX(tx_prod);
5460 
5461 	DBPRINT(sc, BCE_INFO_SEND,
5462 		"%s(enter): tx_prod = 0x%04X, tx_chain_prod = 0x%04X, "
5463 		"tx_prod_bseq = 0x%08X\n",
5464 		__FUNCTION__, tx_prod, tx_chain_prod, sc->tx_prod_bseq);
5465 
5466 	/* If there's no link or the transmit queue is empty then just exit. */
5467 	if (!sc->bce_link) {
5468 		DBPRINT(sc, BCE_INFO_SEND, "%s(): No link.\n",
5469 			__FUNCTION__);
5470 		goto bce_start_locked_exit;
5471 	}
5472 
5473 	if (IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
5474 		DBPRINT(sc, BCE_INFO_SEND, "%s(): Transmit queue empty.\n",
5475 			__FUNCTION__);
5476 		goto bce_start_locked_exit;
5477 	}
5478 
5479 	/*
5480 	 * Keep adding entries while there is space in the ring.
5481 	 */
5482 	while (sc->used_tx_bd < sc->max_tx_bd) {
5483 
5484 		/* Check for any frames to send. */
5485 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
5486 		if (m_head == NULL)
5487 			break;
5488 
5489 		/*
5490 		 * Pack the data into the transmit ring. If we
5491 		 * don't have room, place the mbuf back at the
5492 		 * head of the queue and set the OACTIVE flag
5493 		 * to wait for the NIC to drain the chain.
5494 		 */
5495 		if (bce_tx_encap(sc, &m_head)) {
5496 			if (m_head != NULL)
5497 				IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
5498 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
5499 			DBPRINT(sc, BCE_INFO_SEND,
5500 				"TX chain is closed for business! Total tx_bd used = %d\n",
5501 				sc->used_tx_bd);
5502 			break;
5503 		}
5504 
5505 		count++;
5506 
5507 		/* Send a copy of the frame to any BPF listeners. */
5508 		ETHER_BPF_MTAP(ifp, m_head);
5509 	}
5510 
5511 	if (count == 0) {
5512 		/* no packets were dequeued */
5513 		DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): No packets were dequeued\n",
5514 			__FUNCTION__);
5515 		goto bce_start_locked_exit;
5516 	}
5517 
5518 	/* Update the driver's counters. */
5519 	tx_chain_prod = TX_CHAIN_IDX(sc->tx_prod);
5520 
5521 	/* Start the transmit. */
5522 	REG_WR16(sc, MB_TX_CID_ADDR + BCE_L2CTX_TX_HOST_BIDX, sc->tx_prod);
5523 	REG_WR(sc, MB_TX_CID_ADDR + BCE_L2CTX_TX_HOST_BSEQ, sc->tx_prod_bseq);
5524 
5525 	/* Set the tx timeout. */
5526 	sc->watchdog_timer = BCE_TX_TIMEOUT;
5527 
5528 bce_start_locked_exit:
5529 	DBPRINT(sc, BCE_INFO_SEND,
5530 		"%s(exit ): tx_prod = 0x%04X, tx_chain_prod = 0x%04X, "
5531 		"tx_prod_bseq = 0x%08X\n",
5532 		__FUNCTION__, tx_prod, tx_chain_prod, sc->tx_prod_bseq);
5533 
5534 	return;
5535 }
5536 
5537 
5538 /****************************************************************************/
5539 /* Main transmit routine when called from another routine without a lock.   */
5540 /*                                                                          */
5541 /* Returns:                                                                 */
5542 /*   Nothing.                                                               */
5543 /****************************************************************************/
5544 static void
5545 bce_start(struct ifnet *ifp)
5546 {
5547 	struct bce_softc *sc = ifp->if_softc;
5548 
5549 	BCE_LOCK(sc);
5550 	bce_start_locked(ifp);
5551 	BCE_UNLOCK(sc);
5552 }
5553 
5554 
5555 /****************************************************************************/
5556 /* Handles any IOCTL calls from the operating system.                       */
5557 /*                                                                          */
5558 /* Returns:                                                                 */
5559 /*   0 for success, positive value for failure.                             */
5560 /****************************************************************************/
5561 static int
5562 bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
5563 {
5564 	struct bce_softc *sc = ifp->if_softc;
5565 	struct ifreq *ifr = (struct ifreq *) data;
5566 	struct mii_data *mii;
5567 	int mask, error = 0;
5568 
5569 	switch(command) {
5570 
5571 		/* Set the interface MTU. */
5572 		case SIOCSIFMTU:
5573 			/* Check that the MTU setting is supported. */
5574 			if ((ifr->ifr_mtu < BCE_MIN_MTU) ||
5575 				(ifr->ifr_mtu > BCE_MAX_JUMBO_MTU)) {
5576 				error = EINVAL;
5577 				break;
5578 			}
5579 
5580 			DBPRINT(sc, BCE_INFO_MISC,
5581 				"SIOCSIFMTU: Changing MTU from %d to %d\n",
5582 				(int) ifp->if_mtu, (int) ifr->ifr_mtu);
5583 
5584 			BCE_LOCK(sc);
5585 			ifp->if_mtu = ifr->ifr_mtu;
5586 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5587 			bce_init_locked(sc);
5588 			BCE_UNLOCK(sc);
5589 			break;
5590 
5591 		/* Set interface flags. */
5592 		case SIOCSIFFLAGS:
5593 			DBPRINT(sc, BCE_VERBOSE_SPECIAL, "Received SIOCSIFFLAGS\n");
5594 
5595 			BCE_LOCK(sc);
5596 
5597 			/* Check if the interface is up. */
5598 			if (ifp->if_flags & IFF_UP) {
5599 				if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5600 					/* Change promiscuous/multicast flags as necessary. */
5601 					bce_set_rx_mode(sc);
5602 				} else {
5603 					/* Start the HW */
5604 					bce_init_locked(sc);
5605 				}
5606 			} else {
5607 				/* The interface is down, check if driver is running. */
5608 				if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5609 					bce_stop(sc);
5610 
5611 					/* If MFW is running, restart the controller a bit. */
5612 					if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
5613 						bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
5614 						bce_chipinit(sc);
5615 						bce_mgmt_init_locked(sc);
5616 					}
5617 				}
5618 			}
5619 
5620 			BCE_UNLOCK(sc);
5621 			error = 0;
5622 
5623 			break;
5624 
5625 		/* Add/Delete multicast address */
5626 		case SIOCADDMULTI:
5627 		case SIOCDELMULTI:
5628 			DBPRINT(sc, BCE_VERBOSE_MISC, "Received SIOCADDMULTI/SIOCDELMULTI\n");
5629 
5630 			BCE_LOCK(sc);
5631 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5632 				bce_set_rx_mode(sc);
5633 				error = 0;
5634 			}
5635 			BCE_UNLOCK(sc);
5636 
5637 			break;
5638 
5639 		/* Set/Get Interface media */
5640 		case SIOCSIFMEDIA:
5641 		case SIOCGIFMEDIA:
5642 			DBPRINT(sc, BCE_VERBOSE_MISC, "Received SIOCSIFMEDIA/SIOCGIFMEDIA\n");
5643 
5644 			mii = device_get_softc(sc->bce_miibus);
5645 			error = ifmedia_ioctl(ifp, ifr,
5646 			    &mii->mii_media, command);
5647 			break;
5648 
5649 		/* Set interface capability */
5650 		case SIOCSIFCAP:
5651 			mask = ifr->ifr_reqcap ^ ifp->if_capenable;
5652 			DBPRINT(sc, BCE_INFO_MISC, "Received SIOCSIFCAP = 0x%08X\n", (u32) mask);
5653 
5654 			/* Toggle the TX checksum capabilites enable flag. */
5655 			if (mask & IFCAP_TXCSUM) {
5656 				ifp->if_capenable ^= IFCAP_TXCSUM;
5657 				if (IFCAP_TXCSUM & ifp->if_capenable)
5658 					ifp->if_hwassist = BCE_IF_HWASSIST;
5659 				else
5660 					ifp->if_hwassist = 0;
5661 			}
5662 
5663 			/* Toggle the RX checksum capabilities enable flag. */
5664 			if (mask & IFCAP_RXCSUM) {
5665 				ifp->if_capenable ^= IFCAP_RXCSUM;
5666 				if (IFCAP_RXCSUM & ifp->if_capenable)
5667 					ifp->if_hwassist = BCE_IF_HWASSIST;
5668 				else
5669 					ifp->if_hwassist = 0;
5670 			}
5671 
5672 			/* Toggle the TSO capabilities enable flag. */
5673 			if (bce_tso_enable && (mask & IFCAP_TSO4)) {
5674 				ifp->if_capenable ^= IFCAP_TSO4;
5675 				if (IFCAP_RXCSUM & ifp->if_capenable)
5676 					ifp->if_hwassist = BCE_IF_HWASSIST;
5677 				else
5678 					ifp->if_hwassist = 0;
5679 			}
5680 
5681 			/* Toggle VLAN_MTU capabilities enable flag. */
5682 			if (mask & IFCAP_VLAN_MTU) {
5683 				BCE_PRINTF("%s(%d): Changing VLAN_MTU not supported.\n",
5684 					__FILE__, __LINE__);
5685 			}
5686 
5687 			/* Toggle VLANHWTAG capabilities enabled flag. */
5688 			if (mask & IFCAP_VLAN_HWTAGGING) {
5689 				if (sc->bce_flags & BCE_MFW_ENABLE_FLAG)
5690 					BCE_PRINTF("%s(%d): Cannot change VLAN_HWTAGGING while "
5691 						"management firmware (ASF/IPMI/UMP) is running!\n",
5692 						__FILE__, __LINE__);
5693 				else
5694 					BCE_PRINTF("%s(%d): Changing VLAN_HWTAGGING not supported!\n",
5695 						__FILE__, __LINE__);
5696 			}
5697 
5698 			break;
5699 		default:
5700 			/* We don't know how to handle the IOCTL, pass it on. */
5701 			error = ether_ioctl(ifp, command, data);
5702 			break;
5703 	}
5704 
5705 	return(error);
5706 }
5707 
5708 
5709 /****************************************************************************/
5710 /* Transmit timeout handler.                                                */
5711 /*                                                                          */
5712 /* Returns:                                                                 */
5713 /*   Nothing.                                                               */
5714 /****************************************************************************/
5715 static void
5716 bce_watchdog(struct bce_softc *sc)
5717 {
5718 
5719 	BCE_LOCK_ASSERT(sc);
5720 
5721 	if (sc->watchdog_timer == 0 || --sc->watchdog_timer)
5722 		return;
5723 
5724 	/*
5725 	 * If we are in this routine because of pause frames, then
5726 	 * don't reset the hardware.
5727 	 */
5728 	if (REG_RD(sc, BCE_EMAC_TX_STATUS) & BCE_EMAC_TX_STATUS_XOFFED)
5729 		return;
5730 
5731 	BCE_PRINTF("%s(%d): Watchdog timeout occurred, resetting!\n",
5732 		__FILE__, __LINE__);
5733 
5734 	DBRUNMSG(BCE_VERBOSE_SEND,
5735 		bce_dump_driver_state(sc);
5736 		bce_dump_status_block(sc));
5737 
5738 	/* DBRUN(BCE_FATAL, bce_breakpoint(sc)); */
5739 
5740 	sc->bce_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5741 
5742 	bce_init_locked(sc);
5743 	sc->bce_ifp->if_oerrors++;
5744 
5745 }
5746 
5747 
5748 /*
5749  * Interrupt handler.
5750  */
5751 /****************************************************************************/
5752 /* Main interrupt entry point.  Verifies that the controller generated the  */
5753 /* interrupt and then calls a separate routine for handle the various       */
5754 /* interrupt causes (PHY, TX, RX).                                          */
5755 /*                                                                          */
5756 /* Returns:                                                                 */
5757 /*   0 for success, positive value for failure.                             */
5758 /****************************************************************************/
5759 static void
5760 bce_intr(void *xsc)
5761 {
5762 	struct bce_softc *sc;
5763 	struct ifnet *ifp;
5764 	u32 status_attn_bits;
5765 	u16 hw_rx_cons, hw_tx_cons;
5766 
5767 	sc = xsc;
5768 	ifp = sc->bce_ifp;
5769 
5770 	DBPRINT(sc, BCE_EXCESSIVE, "Entering %s()\n", __FUNCTION__);
5771 	BCE_LOCK(sc);
5772 
5773 	DBRUN(sc->interrupts_generated++);
5774 
5775 	bus_dmamap_sync(sc->status_tag, sc->status_map,
5776 	    BUS_DMASYNC_POSTWRITE);
5777 
5778 	/*
5779 	 * If the hardware status block index
5780 	 * matches the last value read by the
5781 	 * driver and we haven't asserted our
5782 	 * interrupt then there's nothing to do.
5783 	 */
5784 	if ((sc->status_block->status_idx == sc->last_status_idx) &&
5785 		(REG_RD(sc, BCE_PCICFG_MISC_STATUS) & BCE_PCICFG_MISC_STATUS_INTA_VALUE))
5786 		goto bce_intr_exit;
5787 
5788 	/* Ack the interrupt and stop others from occuring. */
5789 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5790 		BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
5791 		BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5792 
5793 	/* Check if the hardware has finished any work. */
5794 	hw_rx_cons = bce_get_hw_rx_cons(sc);
5795 	hw_tx_cons = bce_get_hw_tx_cons(sc);
5796 
5797 	/* Keep processing data as long as there is work to do. */
5798 	for (;;) {
5799 
5800 		status_attn_bits = sc->status_block->status_attn_bits;
5801 
5802 		DBRUNIF(DB_RANDOMTRUE(bce_debug_unexpected_attention),
5803 			BCE_PRINTF("Simulating unexpected status attention bit set.");
5804 			status_attn_bits = status_attn_bits | STATUS_ATTN_BITS_PARITY_ERROR);
5805 
5806 		/* Was it a link change interrupt? */
5807 		if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5808 			(sc->status_block->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE))
5809 			bce_phy_intr(sc);
5810 
5811 		/* If any other attention is asserted then the chip is toast. */
5812 		if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
5813 			(sc->status_block->status_attn_bits_ack &
5814 			~STATUS_ATTN_BITS_LINK_STATE))) {
5815 
5816 			DBRUN(sc->unexpected_attentions++);
5817 
5818 			BCE_PRINTF("%s(%d): Fatal attention detected: 0x%08X\n",
5819 				__FILE__, __LINE__, sc->status_block->status_attn_bits);
5820 
5821 			DBRUNMSG(BCE_FATAL,
5822 				if (bce_debug_unexpected_attention == 0)
5823 					bce_breakpoint(sc));
5824 
5825 			bce_init_locked(sc);
5826 			goto bce_intr_exit;
5827 		}
5828 
5829 		/* Check for any completed RX frames. */
5830 		if (hw_rx_cons != sc->hw_rx_cons)
5831 			bce_rx_intr(sc);
5832 
5833 		/* Check for any completed TX frames. */
5834 		if (hw_tx_cons != sc->hw_tx_cons)
5835 			bce_tx_intr(sc);
5836 
5837 		/* Save the status block index value for use during the next interrupt. */
5838 		sc->last_status_idx = sc->status_block->status_idx;
5839 
5840 		/* Prevent speculative reads from getting ahead of the status block. */
5841 		bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
5842 			BUS_SPACE_BARRIER_READ);
5843 
5844 		/* If there's no work left then exit the interrupt service routine. */
5845 		hw_rx_cons = bce_get_hw_rx_cons(sc);
5846 		hw_tx_cons = bce_get_hw_tx_cons(sc);
5847 
5848 		if ((hw_rx_cons == sc->hw_rx_cons) && (hw_tx_cons == sc->hw_tx_cons))
5849 			break;
5850 
5851 	}
5852 
5853 	bus_dmamap_sync(sc->status_tag,	sc->status_map,
5854 	    BUS_DMASYNC_PREWRITE);
5855 
5856 	/* Re-enable interrupts. */
5857 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5858 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx |
5859 	       BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5860 	REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5861 	       BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
5862 
5863 	/* Handle any frames that arrived while handling the interrupt. */
5864 	if (ifp->if_drv_flags & IFF_DRV_RUNNING && !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
5865 		bce_start_locked(ifp);
5866 
5867 bce_intr_exit:
5868 	BCE_UNLOCK(sc);
5869 }
5870 
5871 
5872 /****************************************************************************/
5873 /* Programs the various packet receive modes (broadcast and multicast).     */
5874 /*                                                                          */
5875 /* Returns:                                                                 */
5876 /*   Nothing.                                                               */
5877 /****************************************************************************/
5878 static void
5879 bce_set_rx_mode(struct bce_softc *sc)
5880 {
5881 	struct ifnet *ifp;
5882 	struct ifmultiaddr *ifma;
5883 	u32 hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 };
5884 	u32 rx_mode, sort_mode;
5885 	int h, i;
5886 
5887 	BCE_LOCK_ASSERT(sc);
5888 
5889 	ifp = sc->bce_ifp;
5890 
5891 	/* Initialize receive mode default settings. */
5892 	rx_mode   = sc->rx_mode & ~(BCE_EMAC_RX_MODE_PROMISCUOUS |
5893 			    BCE_EMAC_RX_MODE_KEEP_VLAN_TAG);
5894 	sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN;
5895 
5896 	/*
5897 	 * ASF/IPMI/UMP firmware requires that VLAN tag stripping
5898 	 * be enbled.
5899 	 */
5900 	if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) &&
5901 		(!(sc->bce_flags & BCE_MFW_ENABLE_FLAG)))
5902 		rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG;
5903 
5904 	/*
5905 	 * Check for promiscuous, all multicast, or selected
5906 	 * multicast address filtering.
5907 	 */
5908 	if (ifp->if_flags & IFF_PROMISC) {
5909 		DBPRINT(sc, BCE_INFO_MISC, "Enabling promiscuous mode.\n");
5910 
5911 		/* Enable promiscuous mode. */
5912 		rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS;
5913 		sort_mode |= BCE_RPM_SORT_USER0_PROM_EN;
5914 	} else if (ifp->if_flags & IFF_ALLMULTI) {
5915 		DBPRINT(sc, BCE_INFO_MISC, "Enabling all multicast mode.\n");
5916 
5917 		/* Enable all multicast addresses. */
5918 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
5919 			REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), 0xffffffff);
5920        	}
5921 		sort_mode |= BCE_RPM_SORT_USER0_MC_EN;
5922 	} else {
5923 		/* Accept one or more multicast(s). */
5924 		DBPRINT(sc, BCE_INFO_MISC, "Enabling selective multicast mode.\n");
5925 
5926 		IF_ADDR_LOCK(ifp);
5927 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
5928 			if (ifma->ifma_addr->sa_family != AF_LINK)
5929 				continue;
5930 			h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
5931 			    ifma->ifma_addr), ETHER_ADDR_LEN) & 0xFF;
5932 			    hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F);
5933 		}
5934 		IF_ADDR_UNLOCK(ifp);
5935 
5936 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++)
5937 			REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), hashes[i]);
5938 
5939 		sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN;
5940 	}
5941 
5942 	/* Only make changes if the recive mode has actually changed. */
5943 	if (rx_mode != sc->rx_mode) {
5944 		DBPRINT(sc, BCE_VERBOSE_MISC, "Enabling new receive mode: 0x%08X\n",
5945 			rx_mode);
5946 
5947 		sc->rx_mode = rx_mode;
5948 		REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode);
5949 	}
5950 
5951 	/* Disable and clear the exisitng sort before enabling a new sort. */
5952 	REG_WR(sc, BCE_RPM_SORT_USER0, 0x0);
5953 	REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode);
5954 	REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA);
5955 }
5956 
5957 
5958 /****************************************************************************/
5959 /* Called periodically to updates statistics from the controllers           */
5960 /* statistics block.                                                        */
5961 /*                                                                          */
5962 /* Returns:                                                                 */
5963 /*   Nothing.                                                               */
5964 /****************************************************************************/
5965 static void
5966 bce_stats_update(struct bce_softc *sc)
5967 {
5968 	struct ifnet *ifp;
5969 	struct statistics_block *stats;
5970 
5971 	DBPRINT(sc, BCE_EXCESSIVE, "Entering %s()\n", __FUNCTION__);
5972 
5973 	ifp = sc->bce_ifp;
5974 
5975 	stats = (struct statistics_block *) sc->stats_block;
5976 
5977 	/*
5978 	 * Update the interface statistics from the
5979 	 * hardware statistics.
5980 	 */
5981 	ifp->if_collisions = (u_long) stats->stat_EtherStatsCollisions;
5982 
5983 	ifp->if_ierrors = (u_long) stats->stat_EtherStatsUndersizePkts +
5984 				      (u_long) stats->stat_EtherStatsOverrsizePkts +
5985 					  (u_long) stats->stat_IfInMBUFDiscards +
5986 					  (u_long) stats->stat_Dot3StatsAlignmentErrors +
5987 					  (u_long) stats->stat_Dot3StatsFCSErrors;
5988 
5989 	ifp->if_oerrors = (u_long) stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
5990 					  (u_long) stats->stat_Dot3StatsExcessiveCollisions +
5991 					  (u_long) stats->stat_Dot3StatsLateCollisions;
5992 
5993 	/*
5994 	 * Certain controllers don't report
5995 	 * carrier sense errors correctly.
5996 	 * See errata E11_5708CA0_1165.
5997 	 */
5998 	if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
5999 	    !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0))
6000 		ifp->if_oerrors += (u_long) stats->stat_Dot3StatsCarrierSenseErrors;
6001 
6002 	/*
6003 	 * Update the sysctl statistics from the
6004 	 * hardware statistics.
6005 	 */
6006 	sc->stat_IfHCInOctets =
6007 		((u64) stats->stat_IfHCInOctets_hi << 32) +
6008 		 (u64) stats->stat_IfHCInOctets_lo;
6009 
6010 	sc->stat_IfHCInBadOctets =
6011 		((u64) stats->stat_IfHCInBadOctets_hi << 32) +
6012 		 (u64) stats->stat_IfHCInBadOctets_lo;
6013 
6014 	sc->stat_IfHCOutOctets =
6015 		((u64) stats->stat_IfHCOutOctets_hi << 32) +
6016 		 (u64) stats->stat_IfHCOutOctets_lo;
6017 
6018 	sc->stat_IfHCOutBadOctets =
6019 		((u64) stats->stat_IfHCOutBadOctets_hi << 32) +
6020 		 (u64) stats->stat_IfHCOutBadOctets_lo;
6021 
6022 	sc->stat_IfHCInUcastPkts =
6023 		((u64) stats->stat_IfHCInUcastPkts_hi << 32) +
6024 		 (u64) stats->stat_IfHCInUcastPkts_lo;
6025 
6026 	sc->stat_IfHCInMulticastPkts =
6027 		((u64) stats->stat_IfHCInMulticastPkts_hi << 32) +
6028 		 (u64) stats->stat_IfHCInMulticastPkts_lo;
6029 
6030 	sc->stat_IfHCInBroadcastPkts =
6031 		((u64) stats->stat_IfHCInBroadcastPkts_hi << 32) +
6032 		 (u64) stats->stat_IfHCInBroadcastPkts_lo;
6033 
6034 	sc->stat_IfHCOutUcastPkts =
6035 		((u64) stats->stat_IfHCOutUcastPkts_hi << 32) +
6036 		 (u64) stats->stat_IfHCOutUcastPkts_lo;
6037 
6038 	sc->stat_IfHCOutMulticastPkts =
6039 		((u64) stats->stat_IfHCOutMulticastPkts_hi << 32) +
6040 		 (u64) stats->stat_IfHCOutMulticastPkts_lo;
6041 
6042 	sc->stat_IfHCOutBroadcastPkts =
6043 		((u64) stats->stat_IfHCOutBroadcastPkts_hi << 32) +
6044 		 (u64) stats->stat_IfHCOutBroadcastPkts_lo;
6045 
6046 	sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
6047 		stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
6048 
6049 	sc->stat_Dot3StatsCarrierSenseErrors =
6050 		stats->stat_Dot3StatsCarrierSenseErrors;
6051 
6052 	sc->stat_Dot3StatsFCSErrors =
6053 		stats->stat_Dot3StatsFCSErrors;
6054 
6055 	sc->stat_Dot3StatsAlignmentErrors =
6056 		stats->stat_Dot3StatsAlignmentErrors;
6057 
6058 	sc->stat_Dot3StatsSingleCollisionFrames =
6059 		stats->stat_Dot3StatsSingleCollisionFrames;
6060 
6061 	sc->stat_Dot3StatsMultipleCollisionFrames =
6062 		stats->stat_Dot3StatsMultipleCollisionFrames;
6063 
6064 	sc->stat_Dot3StatsDeferredTransmissions =
6065 		stats->stat_Dot3StatsDeferredTransmissions;
6066 
6067 	sc->stat_Dot3StatsExcessiveCollisions =
6068 		stats->stat_Dot3StatsExcessiveCollisions;
6069 
6070 	sc->stat_Dot3StatsLateCollisions =
6071 		stats->stat_Dot3StatsLateCollisions;
6072 
6073 	sc->stat_EtherStatsCollisions =
6074 		stats->stat_EtherStatsCollisions;
6075 
6076 	sc->stat_EtherStatsFragments =
6077 		stats->stat_EtherStatsFragments;
6078 
6079 	sc->stat_EtherStatsJabbers =
6080 		stats->stat_EtherStatsJabbers;
6081 
6082 	sc->stat_EtherStatsUndersizePkts =
6083 		stats->stat_EtherStatsUndersizePkts;
6084 
6085 	sc->stat_EtherStatsOverrsizePkts =
6086 		stats->stat_EtherStatsOverrsizePkts;
6087 
6088 	sc->stat_EtherStatsPktsRx64Octets =
6089 		stats->stat_EtherStatsPktsRx64Octets;
6090 
6091 	sc->stat_EtherStatsPktsRx65Octetsto127Octets =
6092 		stats->stat_EtherStatsPktsRx65Octetsto127Octets;
6093 
6094 	sc->stat_EtherStatsPktsRx128Octetsto255Octets =
6095 		stats->stat_EtherStatsPktsRx128Octetsto255Octets;
6096 
6097 	sc->stat_EtherStatsPktsRx256Octetsto511Octets =
6098 		stats->stat_EtherStatsPktsRx256Octetsto511Octets;
6099 
6100 	sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
6101 		stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
6102 
6103 	sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
6104 		stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
6105 
6106 	sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
6107 		stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
6108 
6109 	sc->stat_EtherStatsPktsTx64Octets =
6110 		stats->stat_EtherStatsPktsTx64Octets;
6111 
6112 	sc->stat_EtherStatsPktsTx65Octetsto127Octets =
6113 		stats->stat_EtherStatsPktsTx65Octetsto127Octets;
6114 
6115 	sc->stat_EtherStatsPktsTx128Octetsto255Octets =
6116 		stats->stat_EtherStatsPktsTx128Octetsto255Octets;
6117 
6118 	sc->stat_EtherStatsPktsTx256Octetsto511Octets =
6119 		stats->stat_EtherStatsPktsTx256Octetsto511Octets;
6120 
6121 	sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
6122 		stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
6123 
6124 	sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
6125 		stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
6126 
6127 	sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
6128 		stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
6129 
6130 	sc->stat_XonPauseFramesReceived =
6131 		stats->stat_XonPauseFramesReceived;
6132 
6133 	sc->stat_XoffPauseFramesReceived =
6134 		stats->stat_XoffPauseFramesReceived;
6135 
6136 	sc->stat_OutXonSent =
6137 		stats->stat_OutXonSent;
6138 
6139 	sc->stat_OutXoffSent =
6140 		stats->stat_OutXoffSent;
6141 
6142 	sc->stat_FlowControlDone =
6143 		stats->stat_FlowControlDone;
6144 
6145 	sc->stat_MacControlFramesReceived =
6146 		stats->stat_MacControlFramesReceived;
6147 
6148 	sc->stat_XoffStateEntered =
6149 		stats->stat_XoffStateEntered;
6150 
6151 	sc->stat_IfInFramesL2FilterDiscards =
6152 		stats->stat_IfInFramesL2FilterDiscards;
6153 
6154 	sc->stat_IfInRuleCheckerDiscards =
6155 		stats->stat_IfInRuleCheckerDiscards;
6156 
6157 	sc->stat_IfInFTQDiscards =
6158 		stats->stat_IfInFTQDiscards;
6159 
6160 	sc->stat_IfInMBUFDiscards =
6161 		stats->stat_IfInMBUFDiscards;
6162 
6163 	sc->stat_IfInRuleCheckerP4Hit =
6164 		stats->stat_IfInRuleCheckerP4Hit;
6165 
6166 	sc->stat_CatchupInRuleCheckerDiscards =
6167 		stats->stat_CatchupInRuleCheckerDiscards;
6168 
6169 	sc->stat_CatchupInFTQDiscards =
6170 		stats->stat_CatchupInFTQDiscards;
6171 
6172 	sc->stat_CatchupInMBUFDiscards =
6173 		stats->stat_CatchupInMBUFDiscards;
6174 
6175 	sc->stat_CatchupInRuleCheckerP4Hit =
6176 		stats->stat_CatchupInRuleCheckerP4Hit;
6177 
6178 	sc->com_no_buffers = REG_RD_IND(sc, 0x120084);
6179 
6180 	DBPRINT(sc, BCE_EXCESSIVE, "Exiting %s()\n", __FUNCTION__);
6181 }
6182 
6183 
6184 /****************************************************************************/
6185 /* Periodic function to notify the bootcode that the driver is still        */
6186 /* present.                                                                 */
6187 /*                                                                          */
6188 /* Returns:                                                                 */
6189 /*   Nothing.                                                               */
6190 /****************************************************************************/
6191 static void
6192 bce_pulse(void *xsc)
6193 {
6194 	struct bce_softc *sc = xsc;
6195 	u32 msg;
6196 
6197 	DBPRINT(sc, BCE_EXCESSIVE_MISC, "pulse\n");
6198 
6199 	BCE_LOCK_ASSERT(sc);
6200 
6201 	/* Tell the firmware that the driver is still running. */
6202 	msg = (u32) ++sc->bce_fw_drv_pulse_wr_seq;
6203 	REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_PULSE_MB, msg);
6204 
6205 	/* Schedule the next pulse. */
6206 	callout_reset(&sc->bce_pulse_callout, hz, bce_pulse, sc);
6207 
6208 	return;
6209 }
6210 
6211 
6212 /****************************************************************************/
6213 /* Periodic function to perform maintenance tasks.                          */
6214 /*                                                                          */
6215 /* Returns:                                                                 */
6216 /*   Nothing.                                                               */
6217 /****************************************************************************/
6218 static void
6219 bce_tick(void *xsc)
6220 {
6221 	struct bce_softc *sc = xsc;
6222 	struct mii_data *mii;
6223 	struct ifnet *ifp;
6224 
6225 	ifp = sc->bce_ifp;
6226 
6227 	BCE_LOCK_ASSERT(sc);
6228 
6229 	/* Schedule the next tick. */
6230 	callout_reset(&sc->bce_tick_callout, hz, bce_tick, sc);
6231 
6232 	/* Update the statistics from the hardware statistics block. */
6233 	bce_stats_update(sc);
6234 
6235 	/* Top off the receive and page chains. */
6236 	bce_fill_pg_chain(sc);
6237 	bce_fill_rx_chain(sc);
6238 
6239 	/* Check that chip hasn't hung. */
6240 	bce_watchdog(sc);
6241 
6242 	/* If link is up already up then we're done. */
6243 	if (sc->bce_link)
6244 		goto bce_tick_locked_exit;
6245 
6246 	mii = device_get_softc(sc->bce_miibus);
6247 	mii_tick(mii);
6248 
6249 	/* Check if the link has come up. */
6250 	if (!sc->bce_link && mii->mii_media_status & IFM_ACTIVE &&
6251 	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
6252 		sc->bce_link++;
6253 		if ((IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
6254 		    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) &&
6255 		    bootverbose)
6256 			BCE_PRINTF("Gigabit link up\n");
6257 
6258 		/* Now that link is up, handle any outstanding TX traffic. */
6259 		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
6260 			bce_start_locked(ifp);
6261 	}
6262 
6263 bce_tick_locked_exit:
6264 	return;
6265 }
6266 
6267 
6268 #ifdef BCE_DEBUG
6269 /****************************************************************************/
6270 /* Allows the driver state to be dumped through the sysctl interface.       */
6271 /*                                                                          */
6272 /* Returns:                                                                 */
6273 /*   0 for success, positive value for failure.                             */
6274 /****************************************************************************/
6275 static int
6276 bce_sysctl_driver_state(SYSCTL_HANDLER_ARGS)
6277 {
6278         int error;
6279         int result;
6280         struct bce_softc *sc;
6281 
6282         result = -1;
6283         error = sysctl_handle_int(oidp, &result, 0, req);
6284 
6285         if (error || !req->newptr)
6286                 return (error);
6287 
6288         if (result == 1) {
6289                 sc = (struct bce_softc *)arg1;
6290                 bce_dump_driver_state(sc);
6291         }
6292 
6293         return error;
6294 }
6295 
6296 
6297 /****************************************************************************/
6298 /* Allows the hardware state to be dumped through the sysctl interface.     */
6299 /*                                                                          */
6300 /* Returns:                                                                 */
6301 /*   0 for success, positive value for failure.                             */
6302 /****************************************************************************/
6303 static int
6304 bce_sysctl_hw_state(SYSCTL_HANDLER_ARGS)
6305 {
6306         int error;
6307         int result;
6308         struct bce_softc *sc;
6309 
6310         result = -1;
6311         error = sysctl_handle_int(oidp, &result, 0, req);
6312 
6313         if (error || !req->newptr)
6314                 return (error);
6315 
6316         if (result == 1) {
6317                 sc = (struct bce_softc *)arg1;
6318                 bce_dump_hw_state(sc);
6319         }
6320 
6321         return error;
6322 }
6323 
6324 
6325 /****************************************************************************/
6326 /* Allows the bootcode state to be dumped through the sysctl interface.     */
6327 /*                                                                          */
6328 /* Returns:                                                                 */
6329 /*   0 for success, positive value for failure.                             */
6330 /****************************************************************************/
6331 static int
6332 bce_sysctl_bc_state(SYSCTL_HANDLER_ARGS)
6333 {
6334         int error;
6335         int result;
6336         struct bce_softc *sc;
6337 
6338         result = -1;
6339         error = sysctl_handle_int(oidp, &result, 0, req);
6340 
6341         if (error || !req->newptr)
6342                 return (error);
6343 
6344         if (result == 1) {
6345                 sc = (struct bce_softc *)arg1;
6346                 bce_dump_bc_state(sc);
6347         }
6348 
6349         return error;
6350 }
6351 
6352 
6353 /****************************************************************************/
6354 /* Provides a sysctl interface to allow dumping the RX chain.               */
6355 /*                                                                          */
6356 /* Returns:                                                                 */
6357 /*   0 for success, positive value for failure.                             */
6358 /****************************************************************************/
6359 static int
6360 bce_sysctl_dump_rx_chain(SYSCTL_HANDLER_ARGS)
6361 {
6362         int error;
6363         int result;
6364         struct bce_softc *sc;
6365 
6366         result = -1;
6367         error = sysctl_handle_int(oidp, &result, 0, req);
6368 
6369         if (error || !req->newptr)
6370                 return (error);
6371 
6372         if (result == 1) {
6373                 sc = (struct bce_softc *)arg1;
6374                 bce_dump_rx_chain(sc, 0, TOTAL_RX_BD);
6375         }
6376 
6377         return error;
6378 }
6379 
6380 
6381 /****************************************************************************/
6382 /* Provides a sysctl interface to allow dumping the TX chain.               */
6383 /*                                                                          */
6384 /* Returns:                                                                 */
6385 /*   0 for success, positive value for failure.                             */
6386 /****************************************************************************/
6387 static int
6388 bce_sysctl_dump_tx_chain(SYSCTL_HANDLER_ARGS)
6389 {
6390         int error;
6391         int result;
6392         struct bce_softc *sc;
6393 
6394         result = -1;
6395         error = sysctl_handle_int(oidp, &result, 0, req);
6396 
6397         if (error || !req->newptr)
6398                 return (error);
6399 
6400         if (result == 1) {
6401                 sc = (struct bce_softc *)arg1;
6402                 bce_dump_tx_chain(sc, 0, USABLE_TX_BD);
6403         }
6404 
6405         return error;
6406 }
6407 
6408 
6409 /****************************************************************************/
6410 /* Provides a sysctl interface to allow dumping the page chain.             */
6411 /*                                                                          */
6412 /* Returns:                                                                 */
6413 /*   0 for success, positive value for failure.                             */
6414 /****************************************************************************/
6415 static int
6416 bce_sysctl_dump_pg_chain(SYSCTL_HANDLER_ARGS)
6417 {
6418         int error;
6419         int result;
6420         struct bce_softc *sc;
6421 
6422         result = -1;
6423         error = sysctl_handle_int(oidp, &result, 0, req);
6424 
6425         if (error || !req->newptr)
6426                 return (error);
6427 
6428         if (result == 1) {
6429                 sc = (struct bce_softc *)arg1;
6430                 bce_dump_pg_chain(sc, 0, TOTAL_PG_BD);
6431         }
6432 
6433         return error;
6434 }
6435 
6436 
6437 /****************************************************************************/
6438 /* Provides a sysctl interface to allow reading arbitrary registers in the  */
6439 /* device.  DO NOT ENABLE ON PRODUCTION SYSTEMS!                            */
6440 /*                                                                          */
6441 /* Returns:                                                                 */
6442 /*   0 for success, positive value for failure.                             */
6443 /****************************************************************************/
6444 static int
6445 bce_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
6446 {
6447 	struct bce_softc *sc;
6448 	int error;
6449 	u32 val, result;
6450 
6451 	result = -1;
6452 	error = sysctl_handle_int(oidp, &result, 0, req);
6453 	if (error || (req->newptr == NULL))
6454 		return (error);
6455 
6456 	/* Make sure the register is accessible. */
6457 	if (result < 0x8000) {
6458 		sc = (struct bce_softc *)arg1;
6459 		val = REG_RD(sc, result);
6460 		BCE_PRINTF("reg 0x%08X = 0x%08X\n", result, val);
6461 	} else if (result < 0x0280000) {
6462 		sc = (struct bce_softc *)arg1;
6463 		val = REG_RD_IND(sc, result);
6464 		BCE_PRINTF("reg 0x%08X = 0x%08X\n", result, val);
6465 	}
6466 
6467 	return (error);
6468 }
6469 
6470 
6471 /****************************************************************************/
6472 /* Provides a sysctl interface to allow reading arbitrary PHY registers in  */
6473 /* the device.  DO NOT ENABLE ON PRODUCTION SYSTEMS!                        */
6474 /*                                                                          */
6475 /* Returns:                                                                 */
6476 /*   0 for success, positive value for failure.                             */
6477 /****************************************************************************/
6478 static int
6479 bce_sysctl_phy_read(SYSCTL_HANDLER_ARGS)
6480 {
6481 	struct bce_softc *sc;
6482 	device_t dev;
6483 	int error, result;
6484 	u16 val;
6485 
6486 	result = -1;
6487 	error = sysctl_handle_int(oidp, &result, 0, req);
6488 	if (error || (req->newptr == NULL))
6489 		return (error);
6490 
6491 	/* Make sure the register is accessible. */
6492 	if (result < 0x20) {
6493 		sc = (struct bce_softc *)arg1;
6494 		dev = sc->bce_dev;
6495 		val = bce_miibus_read_reg(dev, sc->bce_phy_addr, result);
6496 		BCE_PRINTF("phy 0x%02X = 0x%04X\n", result, val);
6497 	}
6498 	return (error);
6499 }
6500 
6501 
6502 /****************************************************************************/
6503 /* Provides a sysctl interface to forcing the driver to dump state and      */
6504 /* enter the debugger.  DO NOT ENABLE ON PRODUCTION SYSTEMS!                */
6505 /*                                                                          */
6506 /* Returns:                                                                 */
6507 /*   0 for success, positive value for failure.                             */
6508 /****************************************************************************/
6509 static int
6510 bce_sysctl_breakpoint(SYSCTL_HANDLER_ARGS)
6511 {
6512         int error;
6513         int result;
6514         struct bce_softc *sc;
6515 
6516         result = -1;
6517         error = sysctl_handle_int(oidp, &result, 0, req);
6518 
6519         if (error || !req->newptr)
6520                 return (error);
6521 
6522         if (result == 1) {
6523                 sc = (struct bce_softc *)arg1;
6524                 bce_breakpoint(sc);
6525         }
6526 
6527         return error;
6528 }
6529 #endif
6530 
6531 
6532 /****************************************************************************/
6533 /* Adds any sysctl parameters for tuning or debugging purposes.             */
6534 /*                                                                          */
6535 /* Returns:                                                                 */
6536 /*   0 for success, positive value for failure.                             */
6537 /****************************************************************************/
6538 static void
6539 bce_add_sysctls(struct bce_softc *sc)
6540 {
6541 	struct sysctl_ctx_list *ctx;
6542 	struct sysctl_oid_list *children;
6543 
6544 	ctx = device_get_sysctl_ctx(sc->bce_dev);
6545 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bce_dev));
6546 
6547 #ifdef BCE_DEBUG
6548 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6549 		"rx_low_watermark",
6550 		CTLFLAG_RD, &sc->rx_low_watermark,
6551 		0, "Lowest level of free rx_bd's");
6552 
6553 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6554 		"rx_empty_count",
6555 		CTLFLAG_RD, &sc->rx_empty_count,
6556 		0, "Number of times the RX chain was empty");
6557 
6558 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6559 		"tx_hi_watermark",
6560 		CTLFLAG_RD, &sc->tx_hi_watermark,
6561 		0, "Highest level of used tx_bd's");
6562 
6563 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6564 		"tx_full_count",
6565 		CTLFLAG_RD, &sc->tx_full_count,
6566 		0, "Number of times the TX chain was full");
6567 
6568 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6569 		"l2fhdr_status_errors",
6570 		CTLFLAG_RD, &sc->l2fhdr_status_errors,
6571 		0, "l2_fhdr status errors");
6572 
6573 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6574 		"unexpected_attentions",
6575 		CTLFLAG_RD, &sc->unexpected_attentions,
6576 		0, "Unexpected attentions");
6577 
6578 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6579 		"lost_status_block_updates",
6580 		CTLFLAG_RD, &sc->lost_status_block_updates,
6581 		0, "Lost status block updates");
6582 
6583 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6584 		"debug_mbuf_sim_alloc_failed",
6585 		CTLFLAG_RD, &sc->debug_mbuf_sim_alloc_failed,
6586 		0, "Simulated mbuf cluster allocation failures");
6587 
6588 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6589 		"requested_tso_frames",
6590 		CTLFLAG_RD, &sc->requested_tso_frames,
6591 		0, "Number of TSO frames received");
6592 
6593 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6594 		"rx_interrupts",
6595 		CTLFLAG_RD, &sc->rx_interrupts,
6596 		0, "Number of RX interrupts");
6597 
6598 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6599 		"tx_interrupts",
6600 		CTLFLAG_RD, &sc->tx_interrupts,
6601 		0, "Number of TX interrupts");
6602 
6603 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6604 		"rx_intr_time",
6605 		CTLFLAG_RD, &sc->rx_intr_time,
6606 		"RX interrupt time");
6607 
6608 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6609 		"tx_intr_time",
6610 		CTLFLAG_RD, &sc->tx_intr_time,
6611 		"TX interrupt time");
6612 
6613 #endif
6614 
6615 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6616 		"mbuf_alloc_failed",
6617 		CTLFLAG_RD, &sc->mbuf_alloc_failed,
6618 		0, "mbuf cluster allocation failures");
6619 
6620 	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6621 		"tx_dma_map_failures",
6622 		CTLFLAG_RD, &sc->tx_dma_map_failures,
6623 		0, "tx dma mapping failures");
6624 
6625 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6626 		"stat_IfHcInOctets",
6627 		CTLFLAG_RD, &sc->stat_IfHCInOctets,
6628 		"Bytes received");
6629 
6630 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6631 		"stat_IfHCInBadOctets",
6632 		CTLFLAG_RD, &sc->stat_IfHCInBadOctets,
6633 		"Bad bytes received");
6634 
6635 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6636 		"stat_IfHCOutOctets",
6637 		CTLFLAG_RD, &sc->stat_IfHCOutOctets,
6638 		"Bytes sent");
6639 
6640 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6641 		"stat_IfHCOutBadOctets",
6642 		CTLFLAG_RD, &sc->stat_IfHCOutBadOctets,
6643 		"Bad bytes sent");
6644 
6645 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6646 		"stat_IfHCInUcastPkts",
6647 		CTLFLAG_RD, &sc->stat_IfHCInUcastPkts,
6648 		"Unicast packets received");
6649 
6650 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6651 		"stat_IfHCInMulticastPkts",
6652 		CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts,
6653 		"Multicast packets received");
6654 
6655 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6656 		"stat_IfHCInBroadcastPkts",
6657 		CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts,
6658 		"Broadcast packets received");
6659 
6660 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6661 		"stat_IfHCOutUcastPkts",
6662 		CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts,
6663 		"Unicast packets sent");
6664 
6665 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6666 		"stat_IfHCOutMulticastPkts",
6667 		CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts,
6668 		"Multicast packets sent");
6669 
6670 	SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6671 		"stat_IfHCOutBroadcastPkts",
6672 		CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts,
6673 		"Broadcast packets sent");
6674 
6675 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6676 		"stat_emac_tx_stat_dot3statsinternalmactransmiterrors",
6677 		CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors,
6678 		0, "Internal MAC transmit errors");
6679 
6680 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6681 		"stat_Dot3StatsCarrierSenseErrors",
6682 		CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors,
6683 		0, "Carrier sense errors");
6684 
6685 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6686 		"stat_Dot3StatsFCSErrors",
6687 		CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors,
6688 		0, "Frame check sequence errors");
6689 
6690 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6691 		"stat_Dot3StatsAlignmentErrors",
6692 		CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors,
6693 		0, "Alignment errors");
6694 
6695 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6696 		"stat_Dot3StatsSingleCollisionFrames",
6697 		CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames,
6698 		0, "Single Collision Frames");
6699 
6700 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6701 		"stat_Dot3StatsMultipleCollisionFrames",
6702 		CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames,
6703 		0, "Multiple Collision Frames");
6704 
6705 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6706 		"stat_Dot3StatsDeferredTransmissions",
6707 		CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions,
6708 		0, "Deferred Transmissions");
6709 
6710 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6711 		"stat_Dot3StatsExcessiveCollisions",
6712 		CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions,
6713 		0, "Excessive Collisions");
6714 
6715 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6716 		"stat_Dot3StatsLateCollisions",
6717 		CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions,
6718 		0, "Late Collisions");
6719 
6720 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6721 		"stat_EtherStatsCollisions",
6722 		CTLFLAG_RD, &sc->stat_EtherStatsCollisions,
6723 		0, "Collisions");
6724 
6725 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6726 		"stat_EtherStatsFragments",
6727 		CTLFLAG_RD, &sc->stat_EtherStatsFragments,
6728 		0, "Fragments");
6729 
6730 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6731 		"stat_EtherStatsJabbers",
6732 		CTLFLAG_RD, &sc->stat_EtherStatsJabbers,
6733 		0, "Jabbers");
6734 
6735 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6736 		"stat_EtherStatsUndersizePkts",
6737 		CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts,
6738 		0, "Undersize packets");
6739 
6740 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6741 		"stat_EtherStatsOverrsizePkts",
6742 		CTLFLAG_RD, &sc->stat_EtherStatsOverrsizePkts,
6743 		0, "stat_EtherStatsOverrsizePkts");
6744 
6745 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6746 		"stat_EtherStatsPktsRx64Octets",
6747 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets,
6748 		0, "Bytes received in 64 byte packets");
6749 
6750 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6751 		"stat_EtherStatsPktsRx65Octetsto127Octets",
6752 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets,
6753 		0, "Bytes received in 65 to 127 byte packets");
6754 
6755 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6756 		"stat_EtherStatsPktsRx128Octetsto255Octets",
6757 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets,
6758 		0, "Bytes received in 128 to 255 byte packets");
6759 
6760 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6761 		"stat_EtherStatsPktsRx256Octetsto511Octets",
6762 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets,
6763 		0, "Bytes received in 256 to 511 byte packets");
6764 
6765 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6766 		"stat_EtherStatsPktsRx512Octetsto1023Octets",
6767 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets,
6768 		0, "Bytes received in 512 to 1023 byte packets");
6769 
6770 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6771 		"stat_EtherStatsPktsRx1024Octetsto1522Octets",
6772 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets,
6773 		0, "Bytes received in 1024 t0 1522 byte packets");
6774 
6775 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6776 		"stat_EtherStatsPktsRx1523Octetsto9022Octets",
6777 		CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets,
6778 		0, "Bytes received in 1523 to 9022 byte packets");
6779 
6780 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6781 		"stat_EtherStatsPktsTx64Octets",
6782 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets,
6783 		0, "Bytes sent in 64 byte packets");
6784 
6785 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6786 		"stat_EtherStatsPktsTx65Octetsto127Octets",
6787 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets,
6788 		0, "Bytes sent in 65 to 127 byte packets");
6789 
6790 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6791 		"stat_EtherStatsPktsTx128Octetsto255Octets",
6792 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets,
6793 		0, "Bytes sent in 128 to 255 byte packets");
6794 
6795 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6796 		"stat_EtherStatsPktsTx256Octetsto511Octets",
6797 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets,
6798 		0, "Bytes sent in 256 to 511 byte packets");
6799 
6800 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6801 		"stat_EtherStatsPktsTx512Octetsto1023Octets",
6802 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets,
6803 		0, "Bytes sent in 512 to 1023 byte packets");
6804 
6805 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6806 		"stat_EtherStatsPktsTx1024Octetsto1522Octets",
6807 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets,
6808 		0, "Bytes sent in 1024 to 1522 byte packets");
6809 
6810 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6811 		"stat_EtherStatsPktsTx1523Octetsto9022Octets",
6812 		CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets,
6813 		0, "Bytes sent in 1523 to 9022 byte packets");
6814 
6815 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6816 		"stat_XonPauseFramesReceived",
6817 		CTLFLAG_RD, &sc->stat_XonPauseFramesReceived,
6818 		0, "XON pause frames receved");
6819 
6820 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6821 		"stat_XoffPauseFramesReceived",
6822 		CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived,
6823 		0, "XOFF pause frames received");
6824 
6825 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6826 		"stat_OutXonSent",
6827 		CTLFLAG_RD, &sc->stat_OutXonSent,
6828 		0, "XON pause frames sent");
6829 
6830 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6831 		"stat_OutXoffSent",
6832 		CTLFLAG_RD, &sc->stat_OutXoffSent,
6833 		0, "XOFF pause frames sent");
6834 
6835 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6836 		"stat_FlowControlDone",
6837 		CTLFLAG_RD, &sc->stat_FlowControlDone,
6838 		0, "Flow control done");
6839 
6840 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6841 		"stat_MacControlFramesReceived",
6842 		CTLFLAG_RD, &sc->stat_MacControlFramesReceived,
6843 		0, "MAC control frames received");
6844 
6845 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6846 		"stat_XoffStateEntered",
6847 		CTLFLAG_RD, &sc->stat_XoffStateEntered,
6848 		0, "XOFF state entered");
6849 
6850 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6851 		"stat_IfInFramesL2FilterDiscards",
6852 		CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards,
6853 		0, "Received L2 packets discarded");
6854 
6855 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6856 		"stat_IfInRuleCheckerDiscards",
6857 		CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards,
6858 		0, "Received packets discarded by rule");
6859 
6860 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6861 		"stat_IfInFTQDiscards",
6862 		CTLFLAG_RD, &sc->stat_IfInFTQDiscards,
6863 		0, "Received packet FTQ discards");
6864 
6865 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6866 		"stat_IfInMBUFDiscards",
6867 		CTLFLAG_RD, &sc->stat_IfInMBUFDiscards,
6868 		0, "Received packets discarded due to lack of controller buffer memory");
6869 
6870 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6871 		"stat_IfInRuleCheckerP4Hit",
6872 		CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit,
6873 		0, "Received packets rule checker hits");
6874 
6875 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6876 		"stat_CatchupInRuleCheckerDiscards",
6877 		CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards,
6878 		0, "Received packets discarded in Catchup path");
6879 
6880 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6881 		"stat_CatchupInFTQDiscards",
6882 		CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards,
6883 		0, "Received packets discarded in FTQ in Catchup path");
6884 
6885 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6886 		"stat_CatchupInMBUFDiscards",
6887 		CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards,
6888 		0, "Received packets discarded in controller buffer memory in Catchup path");
6889 
6890 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6891 		"stat_CatchupInRuleCheckerP4Hit",
6892 		CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit,
6893 		0, "Received packets rule checker hits in Catchup path");
6894 
6895 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6896 		"com_no_buffers",
6897 		CTLFLAG_RD, &sc->com_no_buffers,
6898 		0, "Valid packets received but no RX buffers available");
6899 
6900 #ifdef BCE_DEBUG
6901 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6902 		"driver_state", CTLTYPE_INT | CTLFLAG_RW,
6903 		(void *)sc, 0,
6904 		bce_sysctl_driver_state, "I", "Drive state information");
6905 
6906 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6907 		"hw_state", CTLTYPE_INT | CTLFLAG_RW,
6908 		(void *)sc, 0,
6909 		bce_sysctl_hw_state, "I", "Hardware state information");
6910 
6911 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6912 		"bc_state", CTLTYPE_INT | CTLFLAG_RW,
6913 		(void *)sc, 0,
6914 		bce_sysctl_bc_state, "I", "Bootcode state information");
6915 
6916 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6917 		"dump_rx_chain", CTLTYPE_INT | CTLFLAG_RW,
6918 		(void *)sc, 0,
6919 		bce_sysctl_dump_rx_chain, "I", "Dump rx_bd chain");
6920 
6921 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6922 		"dump_tx_chain", CTLTYPE_INT | CTLFLAG_RW,
6923 		(void *)sc, 0,
6924 		bce_sysctl_dump_tx_chain, "I", "Dump tx_bd chain");
6925 
6926 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6927 		"dump_pg_chain", CTLTYPE_INT | CTLFLAG_RW,
6928 		(void *)sc, 0,
6929 		bce_sysctl_dump_pg_chain, "I", "Dump page chain");
6930 
6931 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6932 		"breakpoint", CTLTYPE_INT | CTLFLAG_RW,
6933 		(void *)sc, 0,
6934 		bce_sysctl_breakpoint, "I", "Driver breakpoint");
6935 
6936 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6937 		"reg_read", CTLTYPE_INT | CTLFLAG_RW,
6938 		(void *)sc, 0,
6939 		bce_sysctl_reg_read, "I", "Register read");
6940 
6941 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
6942 		"phy_read", CTLTYPE_INT | CTLFLAG_RW,
6943 		(void *)sc, 0,
6944 		bce_sysctl_phy_read, "I", "PHY register read");
6945 
6946 #endif
6947 
6948 }
6949 
6950 
6951 /****************************************************************************/
6952 /* BCE Debug Routines                                                       */
6953 /****************************************************************************/
6954 #ifdef BCE_DEBUG
6955 
6956 /****************************************************************************/
6957 /* Freezes the controller to allow for a cohesive state dump.               */
6958 /*                                                                          */
6959 /* Returns:                                                                 */
6960 /*   Nothing.                                                               */
6961 /****************************************************************************/
6962 static void
6963 bce_freeze_controller(struct bce_softc *sc)
6964 {
6965 	u32 val;
6966 	val = REG_RD(sc, BCE_MISC_COMMAND);
6967 	val |= BCE_MISC_COMMAND_DISABLE_ALL;
6968 	REG_WR(sc, BCE_MISC_COMMAND, val);
6969 
6970 }
6971 
6972 
6973 /****************************************************************************/
6974 /* Unfreezes the controller after a freeze operation.  This may not always  */
6975 /* work and the controller will require a reset!                            */
6976 /*                                                                          */
6977 /* Returns:                                                                 */
6978 /*   Nothing.                                                               */
6979 /****************************************************************************/
6980 static void
6981 bce_unfreeze_controller(struct bce_softc *sc)
6982 {
6983 	u32 val;
6984 	val = REG_RD(sc, BCE_MISC_COMMAND);
6985 	val |= BCE_MISC_COMMAND_ENABLE_ALL;
6986 	REG_WR(sc, BCE_MISC_COMMAND, val);
6987 
6988 }
6989 
6990 /****************************************************************************/
6991 /* Prints out information about an mbuf.                                    */
6992 /*                                                                          */
6993 /* Returns:                                                                 */
6994 /*   Nothing.                                                               */
6995 /****************************************************************************/
6996 static void
6997 bce_dump_mbuf(struct bce_softc *sc, struct mbuf *m)
6998 {
6999 	struct mbuf *mp = m;
7000 
7001 	if (m == NULL) {
7002 		BCE_PRINTF("mbuf: null pointer\n");
7003 		return;
7004 	}
7005 
7006 	while (mp) {
7007 		BCE_PRINTF("mbuf: %p, m_len = %d, m_flags = 0x%b, m_data = %p\n",
7008 			mp, mp->m_len, mp->m_flags,
7009 			"\20\1M_EXT\2M_PKTHDR\3M_EOR\4M_RDONLY",
7010 			mp->m_data);
7011 
7012 		if (mp->m_flags & M_PKTHDR) {
7013 			BCE_PRINTF("- m_pkthdr: len = %d, flags = 0x%b, csum_flags = %b\n",
7014 				mp->m_pkthdr.len, mp->m_flags,
7015 				"\20\12M_BCAST\13M_MCAST\14M_FRAG\15M_FIRSTFRAG"
7016 				"\16M_LASTFRAG\21M_VLANTAG\22M_PROMISC\23M_NOFREE",
7017 				mp->m_pkthdr.csum_flags,
7018 				"\20\1CSUM_IP\2CSUM_TCP\3CSUM_UDP\4CSUM_IP_FRAGS"
7019 				"\5CSUM_FRAGMENT\6CSUM_TSO\11CSUM_IP_CHECKED"
7020 				"\12CSUM_IP_VALID\13CSUM_DATA_VALID\14CSUM_PSEUDO_HDR");
7021 		}
7022 
7023 		if (mp->m_flags & M_EXT) {
7024 			BCE_PRINTF("- m_ext: %p, ext_size = %d, type = ",
7025 				mp->m_ext.ext_buf, mp->m_ext.ext_size);
7026 			switch (mp->m_ext.ext_type) {
7027 				case EXT_CLUSTER:    printf("EXT_CLUSTER\n"); break;
7028 				case EXT_SFBUF:      printf("EXT_SFBUF\n"); break;
7029 				case EXT_JUMBO9:     printf("EXT_JUMBO9\n"); break;
7030 				case EXT_JUMBO16:    printf("EXT_JUMBO16\n"); break;
7031 				case EXT_PACKET:     printf("EXT_PACKET\n"); break;
7032 				case EXT_MBUF:       printf("EXT_MBUF\n"); break;
7033 				case EXT_NET_DRV:    printf("EXT_NET_DRV\n"); break;
7034 				case EXT_MOD_TYPE:   printf("EXT_MDD_TYPE\n"); break;
7035 				case EXT_DISPOSABLE: printf("EXT_DISPOSABLE\n"); break;
7036 				case EXT_EXTREF:     printf("EXT_EXTREF\n"); break;
7037 				default:             printf("UNKNOWN\n");
7038 			}
7039 		}
7040 
7041 		mp = mp->m_next;
7042 	}
7043 }
7044 
7045 
7046 /****************************************************************************/
7047 /* Prints out the mbufs in the TX mbuf chain.                               */
7048 /*                                                                          */
7049 /* Returns:                                                                 */
7050 /*   Nothing.                                                               */
7051 /****************************************************************************/
7052 static void
7053 bce_dump_tx_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count)
7054 {
7055 	struct mbuf *m;
7056 
7057 	BCE_PRINTF(
7058 		"----------------------------"
7059 		"  tx mbuf data  "
7060 		"----------------------------\n");
7061 
7062 	for (int i = 0; i < count; i++) {
7063 	 	m = sc->tx_mbuf_ptr[chain_prod];
7064 		BCE_PRINTF("txmbuf[0x%04X]\n", chain_prod);
7065 		bce_dump_mbuf(sc, m);
7066 		chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod));
7067 	}
7068 
7069 	BCE_PRINTF(
7070 		"----------------------------"
7071 		"----------------"
7072 		"----------------------------\n");
7073 }
7074 
7075 
7076 /****************************************************************************/
7077 /* Prints out the mbufs in the RX mbuf chain.                               */
7078 /*                                                                          */
7079 /* Returns:                                                                 */
7080 /*   Nothing.                                                               */
7081 /****************************************************************************/
7082 static void
7083 bce_dump_rx_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count)
7084 {
7085 	struct mbuf *m;
7086 
7087 	BCE_PRINTF(
7088 		"----------------------------"
7089 		"  rx mbuf data  "
7090 		"----------------------------\n");
7091 
7092 	for (int i = 0; i < count; i++) {
7093 	 	m = sc->rx_mbuf_ptr[chain_prod];
7094 		BCE_PRINTF("rxmbuf[0x%04X]\n", chain_prod);
7095 		bce_dump_mbuf(sc, m);
7096 		chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod));
7097 	}
7098 
7099 
7100 	BCE_PRINTF(
7101 		"----------------------------"
7102 		"----------------"
7103 		"----------------------------\n");
7104 }
7105 
7106 
7107 /****************************************************************************/
7108 /* Prints out the mbufs in the mbuf page chain.                             */
7109 /*                                                                          */
7110 /* Returns:                                                                 */
7111 /*   Nothing.                                                               */
7112 /****************************************************************************/
7113 static void
7114 bce_dump_pg_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count)
7115 {
7116 	struct mbuf *m;
7117 
7118 	BCE_PRINTF(
7119 		"----------------------------"
7120 		"  pg mbuf data  "
7121 		"----------------------------\n");
7122 
7123 	for (int i = 0; i < count; i++) {
7124 	 	m = sc->pg_mbuf_ptr[chain_prod];
7125 		BCE_PRINTF("pgmbuf[0x%04X]\n", chain_prod);
7126 		bce_dump_mbuf(sc, m);
7127 		chain_prod = PG_CHAIN_IDX(NEXT_PG_BD(chain_prod));
7128 	}
7129 
7130 
7131 	BCE_PRINTF(
7132 		"----------------------------"
7133 		"----------------"
7134 		"----------------------------\n");
7135 }
7136 
7137 
7138 /****************************************************************************/
7139 /* Prints out a tx_bd structure.                                            */
7140 /*                                                                          */
7141 /* Returns:                                                                 */
7142 /*   Nothing.                                                               */
7143 /****************************************************************************/
7144 static void
7145 bce_dump_txbd(struct bce_softc *sc, int idx, struct tx_bd *txbd)
7146 {
7147 	if (idx > MAX_TX_BD)
7148 		/* Index out of range. */
7149 		BCE_PRINTF("tx_bd[0x%04X]: Invalid tx_bd index!\n", idx);
7150 	else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
7151 		/* TX Chain page pointer. */
7152 		BCE_PRINTF("tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n",
7153 			idx, txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo);
7154 	else {
7155 			/* Normal tx_bd entry. */
7156 			BCE_PRINTF("tx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, "
7157 				"vlan tag= 0x%04X, flags = 0x%04X (", idx,
7158 				txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo,
7159 				txbd->tx_bd_mss_nbytes, txbd->tx_bd_vlan_tag,
7160 				txbd->tx_bd_flags);
7161 
7162 			if (txbd->tx_bd_flags & TX_BD_FLAGS_CONN_FAULT)
7163 				printf(" CONN_FAULT");
7164 
7165 			if (txbd->tx_bd_flags & TX_BD_FLAGS_TCP_UDP_CKSUM)
7166 				printf(" TCP_UDP_CKSUM");
7167 
7168 			if (txbd->tx_bd_flags & TX_BD_FLAGS_IP_CKSUM)
7169 				printf(" IP_CKSUM");
7170 
7171 			if (txbd->tx_bd_flags & TX_BD_FLAGS_VLAN_TAG)
7172 				printf("  VLAN");
7173 
7174 			if (txbd->tx_bd_flags & TX_BD_FLAGS_COAL_NOW)
7175 				printf(" COAL_NOW");
7176 
7177 			if (txbd->tx_bd_flags & TX_BD_FLAGS_DONT_GEN_CRC)
7178 				printf(" DONT_GEN_CRC");
7179 
7180 			if (txbd->tx_bd_flags & TX_BD_FLAGS_START)
7181 				printf(" START");
7182 
7183 			if (txbd->tx_bd_flags & TX_BD_FLAGS_END)
7184 				printf(" END");
7185 
7186 			if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_LSO)
7187 				printf(" LSO");
7188 
7189 			if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_OPTION_WORD)
7190 				printf(" OPTION_WORD");
7191 
7192 			if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_FLAGS)
7193 				printf(" FLAGS");
7194 
7195 			if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_SNAP)
7196 				printf(" SNAP");
7197 
7198 			printf(" )\n");
7199 		}
7200 
7201 }
7202 
7203 
7204 /****************************************************************************/
7205 /* Prints out a rx_bd structure.                                            */
7206 /*                                                                          */
7207 /* Returns:                                                                 */
7208 /*   Nothing.                                                               */
7209 /****************************************************************************/
7210 static void
7211 bce_dump_rxbd(struct bce_softc *sc, int idx, struct rx_bd *rxbd)
7212 {
7213 	if (idx > MAX_RX_BD)
7214 		/* Index out of range. */
7215 		BCE_PRINTF("rx_bd[0x%04X]: Invalid rx_bd index!\n", idx);
7216 	else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
7217 		/* RX Chain page pointer. */
7218 		BCE_PRINTF("rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n",
7219 			idx, rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo);
7220 	else
7221 		/* Normal rx_bd entry. */
7222 		BCE_PRINTF("rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, "
7223 			"flags = 0x%08X\n", idx,
7224 			rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo,
7225 			rxbd->rx_bd_len, rxbd->rx_bd_flags);
7226 }
7227 
7228 
7229 /****************************************************************************/
7230 /* Prints out a rx_bd structure in the page chain.                          */
7231 /*                                                                          */
7232 /* Returns:                                                                 */
7233 /*   Nothing.                                                               */
7234 /****************************************************************************/
7235 static void
7236 bce_dump_pgbd(struct bce_softc *sc, int idx, struct rx_bd *pgbd)
7237 {
7238 	if (idx > MAX_PG_BD)
7239 		/* Index out of range. */
7240 		BCE_PRINTF("pg_bd[0x%04X]: Invalid pg_bd index!\n", idx);
7241 	else if ((idx & USABLE_PG_BD_PER_PAGE) == USABLE_PG_BD_PER_PAGE)
7242 		/* Page Chain page pointer. */
7243 		BCE_PRINTF("px_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n",
7244 			idx, pgbd->rx_bd_haddr_hi, pgbd->rx_bd_haddr_lo);
7245 	else
7246 		/* Normal rx_bd entry. */
7247 		BCE_PRINTF("pg_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, "
7248 			"flags = 0x%08X\n", idx,
7249 			pgbd->rx_bd_haddr_hi, pgbd->rx_bd_haddr_lo,
7250 			pgbd->rx_bd_len, pgbd->rx_bd_flags);
7251 }
7252 
7253 
7254 /****************************************************************************/
7255 /* Prints out a l2_fhdr structure.                                          */
7256 /*                                                                          */
7257 /* Returns:                                                                 */
7258 /*   Nothing.                                                               */
7259 /****************************************************************************/
7260 static void
7261 bce_dump_l2fhdr(struct bce_softc *sc, int idx, struct l2_fhdr *l2fhdr)
7262 {
7263 	BCE_PRINTF("l2_fhdr[0x%04X]: status = 0x%b, "
7264 		"pkt_len = %d, vlan = 0x%04x, ip_xsum/hdr_len = 0x%04X, "
7265 		"tcp_udp_xsum = 0x%04X\n", idx,
7266 		l2fhdr->l2_fhdr_status, BCE_L2FHDR_PRINTFB,
7267 		l2fhdr->l2_fhdr_pkt_len, l2fhdr->l2_fhdr_vlan_tag,
7268 		l2fhdr->l2_fhdr_ip_xsum, l2fhdr->l2_fhdr_tcp_udp_xsum);
7269 }
7270 
7271 
7272 /****************************************************************************/
7273 /* Prints out the L2 context memory.  (Only useful for CID 0 to 15. )       */
7274 /*                                                                          */
7275 /* Returns:                                                                 */
7276 /*   Nothing.                                                               */
7277 /****************************************************************************/
7278 static void
7279 bce_dump_ctx(struct bce_softc *sc, u16 cid)
7280 {
7281 	if (cid < TX_CID) {
7282 		BCE_PRINTF(
7283 			"----------------------------"
7284 			"    CTX Data    "
7285 			"----------------------------\n");
7286 
7287 		BCE_PRINTF("     0x%04X - (CID) Context ID\n", cid);
7288 		BCE_PRINTF(" 0x%08X - (L2CTX_HOST_BDIDX) host rx producer index\n",
7289 			CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_HOST_BDIDX));
7290 		BCE_PRINTF(" 0x%08X - (L2CTX_HOST_BSEQ) host byte sequence\n",
7291 			CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_HOST_BSEQ));
7292 		BCE_PRINTF(" 0x%08X - (L2CTX_NX_BSEQ) h/w byte sequence\n",
7293 			CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_NX_BSEQ));
7294 		BCE_PRINTF(" 0x%08X - (L2CTX_NX_BDHADDR_HI) h/w buffer descriptor address\n",
7295 			CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_NX_BDHADDR_HI));
7296 		BCE_PRINTF(" 0x%08X - (L2CTX_NX_BDHADDR_LO) h/w buffer descriptor address\n",
7297 			CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_NX_BDHADDR_LO));
7298 		BCE_PRINTF(" 0x%08X - (L2CTX_NX_BDIDX) h/w rx consumer index\n",
7299 			CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_NX_BDIDX));
7300 		BCE_PRINTF(" 0x%08X - (L2CTX_HOST_PG_BDIDX) host page producer index\n",
7301 			CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_HOST_PG_BDIDX));
7302 		BCE_PRINTF(" 0x%08X - (L2CTX_PG_BUF_SIZE) host rx_bd/page buffer size\n",
7303 			CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_PG_BUF_SIZE));
7304 		BCE_PRINTF(" 0x%08X - (L2CTX_NX_PG_BDHADDR_HI) h/w page chain address\n",
7305 			CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_NX_PG_BDHADDR_HI));
7306 		BCE_PRINTF(" 0x%08X - (L2CTX_NX_PG_BDHADDR_LO) h/w page chain address\n",
7307 			CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_NX_PG_BDHADDR_LO));
7308 		BCE_PRINTF(" 0x%08X - (L2CTX_NX_PG_BDIDX) h/w page consumer index\n",
7309 			CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_NX_PG_BDIDX));
7310 
7311 		BCE_PRINTF(
7312 			"----------------------------"
7313 			"----------------"
7314 			"----------------------------\n");
7315 	}
7316 }
7317 
7318 
7319 /****************************************************************************/
7320 /* Prints out the FTQ data.                                                 */
7321 /*                                                                          */
7322 /* Returns:                                                                */
7323 /*   Nothing.                                                               */
7324 /****************************************************************************/
7325 static void
7326 bce_dump_ftqs(struct bce_softc *sc)
7327 {
7328 	u32 cmd, ctl, cur_depth, max_depth, valid_cnt;
7329 
7330 	BCE_PRINTF(
7331 		"----------------------------"
7332 		"    FTQ Data    "
7333 		"----------------------------\n");
7334 
7335 	BCE_PRINTF("  FTQ   Command    Control   Depth_Now  Max_Depth  Valid_Cnt\n");
7336 	BCE_PRINTF(" ----- ---------- ---------- ---------- ---------- ----------\n");
7337 
7338 	/* Setup the generic statistic counters for the FTQ valid count. */
7339 	REG_WR(sc, BCE_HC_STAT_GEN_SEL_0,
7340 		((BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PPQ_VALID_CNT << 24) |
7341 		 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXPCQ_VALID_CNT  << 16) |
7342 		 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXPQ_VALID_CNT   <<  8) |
7343 		 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RLUPQ_VALID_CNT)));
7344 	REG_WR(sc, BCE_HC_STAT_GEN_SEL_1,
7345 		((BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TSCHQ_VALID_CNT  << 24) |
7346 		 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RDMAQ_VALID_CNT  << 16) |
7347 		 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PTQ_VALID_CNT <<  8) |
7348 		 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PMQ_VALID_CNT)));
7349 	REG_WR(sc, BCE_HC_STAT_GEN_SEL_2,
7350 		((BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TPATQ_VALID_CNT  << 24) |
7351 		 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TDMAQ_VALID_CNT  << 16) |
7352 		 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXPQ_VALID_CNT   <<  8) |
7353 		 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TBDRQ_VALID_CNT)));
7354 	REG_WR(sc, BCE_HC_STAT_GEN_SEL_3,
7355 		((BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMQ_VALID_CNT   << 24) |
7356 		 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMTQ_VALID_CNT  << 16) |
7357 		 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMXQ_VALID_CNT  <<  8) |
7358 		 (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TASQ_VALID_CNT)));
7359 
7360 
7361 	cmd = REG_RD(sc, BCE_RLUP_FTQ_CMD);
7362 	ctl = REG_RD(sc, BCE_RLUP_FTQ_CTL);
7363 	cur_depth = (ctl & BCE_RLUP_FTQ_CTL_CUR_DEPTH) >> 22;
7364 	max_depth = (ctl & BCE_RLUP_FTQ_CTL_MAX_DEPTH) >> 12;
7365 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT0);
7366 	BCE_PRINTF(" RLUP  0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7367 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7368 
7369 	cmd = REG_RD_IND(sc, BCE_RXP_FTQ_CMD);
7370 	ctl = REG_RD_IND(sc, BCE_RXP_FTQ_CTL);
7371 	cur_depth = (ctl & BCE_RXP_FTQ_CTL_CUR_DEPTH) >> 22;
7372 	max_depth = (ctl & BCE_RXP_FTQ_CTL_MAX_DEPTH) >> 12;
7373 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT1);
7374 	BCE_PRINTF(" RXP   0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7375 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7376 
7377 	cmd = REG_RD_IND(sc, BCE_RXP_CFTQ_CMD);
7378 	ctl = REG_RD_IND(sc, BCE_RXP_CFTQ_CTL);
7379 	cur_depth = (ctl & BCE_RXP_CFTQ_CTL_CUR_DEPTH) >> 22;
7380 	max_depth = (ctl & BCE_RXP_CFTQ_CTL_MAX_DEPTH) >> 12;
7381 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT2);
7382 	BCE_PRINTF(" RXPC  0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7383 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7384 
7385 	cmd = REG_RD(sc, BCE_RV2P_PFTQ_CMD);
7386 	ctl = REG_RD(sc, BCE_RV2P_PFTQ_CTL);
7387 	cur_depth = (ctl & BCE_RV2P_PFTQ_CTL_CUR_DEPTH) >> 22;
7388 	max_depth = (ctl & BCE_RV2P_PFTQ_CTL_MAX_DEPTH) >> 12;
7389 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT3);
7390 	BCE_PRINTF(" RV2PP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7391 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7392 
7393 	cmd = REG_RD(sc, BCE_RV2P_MFTQ_CMD);
7394 	ctl = REG_RD(sc, BCE_RV2P_MFTQ_CTL);
7395 	cur_depth = (ctl & BCE_RV2P_MFTQ_CTL_CUR_DEPTH) >> 22;
7396 	max_depth = (ctl & BCE_RV2P_MFTQ_CTL_MAX_DEPTH) >> 12;
7397 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT4);
7398 	BCE_PRINTF(" RV2PM 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7399 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7400 
7401 	cmd = REG_RD(sc, BCE_RV2P_TFTQ_CMD);
7402 	ctl = REG_RD(sc, BCE_RV2P_TFTQ_CTL);
7403 	cur_depth = (ctl & BCE_RV2P_TFTQ_CTL_CUR_DEPTH) >> 22;
7404 	max_depth = (ctl & BCE_RV2P_TFTQ_CTL_MAX_DEPTH) >> 12;
7405 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT5);
7406 	BCE_PRINTF(" RV2PT 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7407 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7408 
7409 	cmd = REG_RD(sc, BCE_RDMA_FTQ_CMD);
7410 	ctl = REG_RD(sc, BCE_RDMA_FTQ_CTL);
7411 	cur_depth = (ctl & BCE_RDMA_FTQ_CTL_CUR_DEPTH) >> 22;
7412 	max_depth = (ctl & BCE_RDMA_FTQ_CTL_MAX_DEPTH) >> 12;
7413 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT6);
7414 	BCE_PRINTF(" RDMA  0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7415 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7416 
7417 	cmd = REG_RD(sc, BCE_TSCH_FTQ_CMD);
7418 	ctl = REG_RD(sc, BCE_TSCH_FTQ_CTL);
7419 	cur_depth = (ctl & BCE_TSCH_FTQ_CTL_CUR_DEPTH) >> 22;
7420 	max_depth = (ctl & BCE_TSCH_FTQ_CTL_MAX_DEPTH) >> 12;
7421 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT7);
7422 	BCE_PRINTF(" TSCH  0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7423 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7424 
7425 	cmd = REG_RD(sc, BCE_TBDR_FTQ_CMD);
7426 	ctl = REG_RD(sc, BCE_TBDR_FTQ_CTL);
7427 	cur_depth = (ctl & BCE_TBDR_FTQ_CTL_CUR_DEPTH) >> 22;
7428 	max_depth = (ctl & BCE_TBDR_FTQ_CTL_MAX_DEPTH) >> 12;
7429 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT8);
7430 	BCE_PRINTF(" TBDR  0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7431 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7432 
7433 	cmd = REG_RD_IND(sc, BCE_TXP_FTQ_CMD);
7434 	ctl = REG_RD_IND(sc, BCE_TXP_FTQ_CTL);
7435 	cur_depth = (ctl & BCE_TXP_FTQ_CTL_CUR_DEPTH) >> 22;
7436 	max_depth = (ctl & BCE_TXP_FTQ_CTL_MAX_DEPTH) >> 12;
7437 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT9);
7438 	BCE_PRINTF(" TXP   0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7439 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7440 
7441 	cmd = REG_RD(sc, BCE_TDMA_FTQ_CMD);
7442 	ctl = REG_RD(sc, BCE_TDMA_FTQ_CTL);
7443 	cur_depth = (ctl & BCE_TDMA_FTQ_CTL_CUR_DEPTH) >> 22;
7444 	max_depth = (ctl & BCE_TDMA_FTQ_CTL_MAX_DEPTH) >> 12;
7445 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT10);
7446 	BCE_PRINTF(" TDMA  0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7447 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7448 
7449 
7450 	cmd = REG_RD_IND(sc, BCE_TPAT_FTQ_CMD);
7451 	ctl = REG_RD_IND(sc, BCE_TPAT_FTQ_CTL);
7452 	cur_depth = (ctl & BCE_TPAT_FTQ_CTL_CUR_DEPTH) >> 22;
7453 	max_depth = (ctl & BCE_TPAT_FTQ_CTL_MAX_DEPTH) >> 12;
7454 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT11);
7455 	BCE_PRINTF(" TPAT  0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7456 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7457 
7458 	cmd = REG_RD_IND(sc, BCE_TAS_FTQ_CMD);
7459 	ctl = REG_RD_IND(sc, BCE_TAS_FTQ_CTL);
7460 	cur_depth = (ctl & BCE_TAS_FTQ_CTL_CUR_DEPTH) >> 22;
7461 	max_depth = (ctl & BCE_TAS_FTQ_CTL_MAX_DEPTH) >> 12;
7462 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT12);
7463 	BCE_PRINTF(" TAS   0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7464 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7465 
7466 	cmd = REG_RD_IND(sc, BCE_COM_COMXQ_FTQ_CMD);
7467 	ctl = REG_RD_IND(sc, BCE_COM_COMXQ_FTQ_CTL);
7468 	cur_depth = (ctl & BCE_COM_COMXQ_FTQ_CTL_CUR_DEPTH) >> 22;
7469 	max_depth = (ctl & BCE_COM_COMXQ_FTQ_CTL_MAX_DEPTH) >> 12;
7470 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT13);
7471 	BCE_PRINTF(" COMX  0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7472 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7473 
7474 	cmd = REG_RD_IND(sc, BCE_COM_COMTQ_FTQ_CMD);
7475 	ctl = REG_RD_IND(sc, BCE_COM_COMTQ_FTQ_CTL);
7476 	cur_depth = (ctl & BCE_COM_COMTQ_FTQ_CTL_CUR_DEPTH) >> 22;
7477 	max_depth = (ctl & BCE_COM_COMTQ_FTQ_CTL_MAX_DEPTH) >> 12;
7478 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT14);
7479 	BCE_PRINTF(" COMT  0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7480 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7481 
7482 	cmd = REG_RD_IND(sc, BCE_COM_COMQ_FTQ_CMD);
7483 	ctl = REG_RD_IND(sc, BCE_COM_COMQ_FTQ_CTL);
7484 	cur_depth = (ctl & BCE_COM_COMQ_FTQ_CTL_CUR_DEPTH) >> 22;
7485 	max_depth = (ctl & BCE_COM_COMQ_FTQ_CTL_MAX_DEPTH) >> 12;
7486 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT15);
7487 	BCE_PRINTF(" COMX  0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7488 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7489 
7490 	/* Setup the generic statistic counters for the FTQ valid count. */
7491 	REG_WR(sc, BCE_HC_STAT_GEN_SEL_0,
7492 		 ((BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_CSQ_VALID_CNT  << 16) |
7493 		  (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_CPQ_VALID_CNT  <<  8) |
7494 		  (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_MGMQ_VALID_CNT)));
7495 
7496 	cmd = REG_RD_IND(sc, BCE_MCP_MCPQ_FTQ_CMD);
7497 	ctl = REG_RD_IND(sc, BCE_MCP_MCPQ_FTQ_CTL);
7498 	cur_depth = (ctl & BCE_MCP_MCPQ_FTQ_CTL_CUR_DEPTH) >> 22;
7499 	max_depth = (ctl & BCE_MCP_MCPQ_FTQ_CTL_MAX_DEPTH) >> 12;
7500 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT0);
7501 	BCE_PRINTF(" MCP   0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7502 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7503 
7504 	cmd = REG_RD_IND(sc, BCE_CP_CPQ_FTQ_CMD);
7505 	ctl = REG_RD_IND(sc, BCE_CP_CPQ_FTQ_CTL);
7506 	cur_depth = (ctl & BCE_CP_CPQ_FTQ_CTL_CUR_DEPTH) >> 22;
7507 	max_depth = (ctl & BCE_CP_CPQ_FTQ_CTL_MAX_DEPTH) >> 12;
7508 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT1);
7509 	BCE_PRINTF(" CP    0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7510 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7511 
7512 	cmd = REG_RD(sc, BCE_CSCH_CH_FTQ_CMD);
7513 	ctl = REG_RD(sc, BCE_CSCH_CH_FTQ_CTL);
7514 	cur_depth = (ctl & BCE_CSCH_CH_FTQ_CTL_CUR_DEPTH) >> 22;
7515 	max_depth = (ctl & BCE_CSCH_CH_FTQ_CTL_MAX_DEPTH) >> 12;
7516 	valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT2);
7517 	BCE_PRINTF(" CS    0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
7518 		cmd, ctl, cur_depth, max_depth, valid_cnt);
7519 
7520 	BCE_PRINTF(
7521 		"----------------------------"
7522 		"----------------"
7523 		"----------------------------\n");
7524 }
7525 
7526 
7527 /****************************************************************************/
7528 /* Prints out the TX chain.                                                 */
7529 /*                                                                          */
7530 /* Returns:                                                                 */
7531 /*   Nothing.                                                               */
7532 /****************************************************************************/
7533 static void
7534 bce_dump_tx_chain(struct bce_softc *sc, u16 tx_prod, int count)
7535 {
7536 	struct tx_bd *txbd;
7537 
7538 	/* First some info about the tx_bd chain structure. */
7539 	BCE_PRINTF(
7540 		"----------------------------"
7541 		"  tx_bd  chain  "
7542 		"----------------------------\n");
7543 
7544 	BCE_PRINTF("page size      = 0x%08X, tx chain pages        = 0x%08X\n",
7545 		(u32) BCM_PAGE_SIZE, (u32) TX_PAGES);
7546 
7547 	BCE_PRINTF("tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n",
7548 		(u32) TOTAL_TX_BD_PER_PAGE, (u32) USABLE_TX_BD_PER_PAGE);
7549 
7550 	BCE_PRINTF("total tx_bd    = 0x%08X\n", (u32) TOTAL_TX_BD);
7551 
7552 	BCE_PRINTF(
7553 		"----------------------------"
7554 		"   tx_bd data   "
7555 		"----------------------------\n");
7556 
7557 	/* Now print out the tx_bd's themselves. */
7558 	for (int i = 0; i < count; i++) {
7559 	 	txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)];
7560 		bce_dump_txbd(sc, tx_prod, txbd);
7561 		tx_prod = NEXT_TX_BD(tx_prod);
7562 	}
7563 
7564 	BCE_PRINTF(
7565 		"----------------------------"
7566 		"----------------"
7567 		"----------------------------\n");
7568 }
7569 
7570 
7571 /****************************************************************************/
7572 /* Prints out the RX chain.                                                 */
7573 /*                                                                          */
7574 /* Returns:                                                                 */
7575 /*   Nothing.                                                               */
7576 /****************************************************************************/
7577 static void
7578 bce_dump_rx_chain(struct bce_softc *sc, u16 rx_prod, int count)
7579 {
7580 	struct rx_bd *rxbd;
7581 
7582 	/* First some info about the rx_bd chain structure. */
7583 	BCE_PRINTF(
7584 		"----------------------------"
7585 		"  rx_bd  chain  "
7586 		"----------------------------\n");
7587 
7588 	BCE_PRINTF("page size      = 0x%08X, rx chain pages        = 0x%08X\n",
7589 		(u32) BCM_PAGE_SIZE, (u32) RX_PAGES);
7590 
7591 	BCE_PRINTF("rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
7592 		(u32) TOTAL_RX_BD_PER_PAGE, (u32) USABLE_RX_BD_PER_PAGE);
7593 
7594 	BCE_PRINTF("total rx_bd    = 0x%08X\n", (u32) TOTAL_RX_BD);
7595 
7596 	BCE_PRINTF(
7597 		"----------------------------"
7598 		"   rx_bd data   "
7599 		"----------------------------\n");
7600 
7601 	/* Now print out the rx_bd's themselves. */
7602 	for (int i = 0; i < count; i++) {
7603 		rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)];
7604 		bce_dump_rxbd(sc, rx_prod, rxbd);
7605 		rx_prod = RX_CHAIN_IDX(rx_prod + 1);
7606 	}
7607 
7608 	BCE_PRINTF(
7609 		"----------------------------"
7610 		"----------------"
7611 		"----------------------------\n");
7612 }
7613 
7614 
7615 /****************************************************************************/
7616 /* Prints out the page chain.                                               */
7617 /*                                                                          */
7618 /* Returns:                                                                 */
7619 /*   Nothing.                                                               */
7620 /****************************************************************************/
7621 static void
7622 bce_dump_pg_chain(struct bce_softc *sc, u16 pg_prod, int count)
7623 {
7624 	struct rx_bd *pgbd;
7625 
7626 	/* First some info about the page chain structure. */
7627 	BCE_PRINTF(
7628 		"----------------------------"
7629 		"   page chain   "
7630 		"----------------------------\n");
7631 
7632 	BCE_PRINTF("page size      = 0x%08X, pg chain pages        = 0x%08X\n",
7633 		(u32) BCM_PAGE_SIZE, (u32) PG_PAGES);
7634 
7635 	BCE_PRINTF("rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
7636 		(u32) TOTAL_PG_BD_PER_PAGE, (u32) USABLE_PG_BD_PER_PAGE);
7637 
7638 	BCE_PRINTF("total rx_bd    = 0x%08X, max_pg_bd             = 0x%08X\n",
7639 		(u32) TOTAL_PG_BD, (u32) MAX_PG_BD);
7640 
7641 	BCE_PRINTF(
7642 		"----------------------------"
7643 		"   page data    "
7644 		"----------------------------\n");
7645 
7646 	/* Now print out the rx_bd's themselves. */
7647 	for (int i = 0; i < count; i++) {
7648 		pgbd = &sc->pg_bd_chain[PG_PAGE(pg_prod)][PG_IDX(pg_prod)];
7649 		bce_dump_pgbd(sc, pg_prod, pgbd);
7650 		pg_prod = PG_CHAIN_IDX(pg_prod + 1);
7651 	}
7652 
7653 	BCE_PRINTF(
7654 		"----------------------------"
7655 		"----------------"
7656 		"----------------------------\n");
7657 }
7658 
7659 
7660 /****************************************************************************/
7661 /* Prints out the status block from host memory.                            */
7662 /*                                                                          */
7663 /* Returns:                                                                 */
7664 /*   Nothing.                                                               */
7665 /****************************************************************************/
7666 static void
7667 bce_dump_status_block(struct bce_softc *sc)
7668 {
7669 	struct status_block *sblk;
7670 
7671 	sblk = sc->status_block;
7672 
7673    	BCE_PRINTF(
7674 		"----------------------------"
7675 		"  Status Block  "
7676 		"----------------------------\n");
7677 
7678 	BCE_PRINTF("    0x%08X - attn_bits\n",
7679 		sblk->status_attn_bits);
7680 
7681 	BCE_PRINTF("    0x%08X - attn_bits_ack\n",
7682 		sblk->status_attn_bits_ack);
7683 
7684 	BCE_PRINTF("0x%04X(0x%04X) - rx_cons0\n",
7685 		sblk->status_rx_quick_consumer_index0,
7686 		(u16) RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index0));
7687 
7688 	BCE_PRINTF("0x%04X(0x%04X) - tx_cons0\n",
7689 		sblk->status_tx_quick_consumer_index0,
7690 		(u16) TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index0));
7691 
7692 	BCE_PRINTF("        0x%04X - status_idx\n", sblk->status_idx);
7693 
7694 	/* Theses indices are not used for normal L2 drivers. */
7695 	if (sblk->status_rx_quick_consumer_index1)
7696 		BCE_PRINTF("0x%04X(0x%04X) - rx_cons1\n",
7697 			sblk->status_rx_quick_consumer_index1,
7698 			(u16) RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index1));
7699 
7700 	if (sblk->status_tx_quick_consumer_index1)
7701 		BCE_PRINTF("0x%04X(0x%04X) - tx_cons1\n",
7702 			sblk->status_tx_quick_consumer_index1,
7703 			(u16) TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index1));
7704 
7705 	if (sblk->status_rx_quick_consumer_index2)
7706 		BCE_PRINTF("0x%04X(0x%04X)- rx_cons2\n",
7707 			sblk->status_rx_quick_consumer_index2,
7708 			(u16) RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index2));
7709 
7710 	if (sblk->status_tx_quick_consumer_index2)
7711 		BCE_PRINTF("0x%04X(0x%04X) - tx_cons2\n",
7712 			sblk->status_tx_quick_consumer_index2,
7713 			(u16) TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index2));
7714 
7715 	if (sblk->status_rx_quick_consumer_index3)
7716 		BCE_PRINTF("0x%04X(0x%04X) - rx_cons3\n",
7717 			sblk->status_rx_quick_consumer_index3,
7718 			(u16) RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index3));
7719 
7720 	if (sblk->status_tx_quick_consumer_index3)
7721 		BCE_PRINTF("0x%04X(0x%04X) - tx_cons3\n",
7722 			sblk->status_tx_quick_consumer_index3,
7723 			(u16) TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index3));
7724 
7725 	if (sblk->status_rx_quick_consumer_index4 ||
7726 		sblk->status_rx_quick_consumer_index5)
7727 		BCE_PRINTF("rx_cons4  = 0x%08X, rx_cons5      = 0x%08X\n",
7728 			sblk->status_rx_quick_consumer_index4,
7729 			sblk->status_rx_quick_consumer_index5);
7730 
7731 	if (sblk->status_rx_quick_consumer_index6 ||
7732 		sblk->status_rx_quick_consumer_index7)
7733 		BCE_PRINTF("rx_cons6  = 0x%08X, rx_cons7      = 0x%08X\n",
7734 			sblk->status_rx_quick_consumer_index6,
7735 			sblk->status_rx_quick_consumer_index7);
7736 
7737 	if (sblk->status_rx_quick_consumer_index8 ||
7738 		sblk->status_rx_quick_consumer_index9)
7739 		BCE_PRINTF("rx_cons8  = 0x%08X, rx_cons9      = 0x%08X\n",
7740 			sblk->status_rx_quick_consumer_index8,
7741 			sblk->status_rx_quick_consumer_index9);
7742 
7743 	if (sblk->status_rx_quick_consumer_index10 ||
7744 		sblk->status_rx_quick_consumer_index11)
7745 		BCE_PRINTF("rx_cons10 = 0x%08X, rx_cons11     = 0x%08X\n",
7746 			sblk->status_rx_quick_consumer_index10,
7747 			sblk->status_rx_quick_consumer_index11);
7748 
7749 	if (sblk->status_rx_quick_consumer_index12 ||
7750 		sblk->status_rx_quick_consumer_index13)
7751 		BCE_PRINTF("rx_cons12 = 0x%08X, rx_cons13     = 0x%08X\n",
7752 			sblk->status_rx_quick_consumer_index12,
7753 			sblk->status_rx_quick_consumer_index13);
7754 
7755 	if (sblk->status_rx_quick_consumer_index14 ||
7756 		sblk->status_rx_quick_consumer_index15)
7757 		BCE_PRINTF("rx_cons14 = 0x%08X, rx_cons15     = 0x%08X\n",
7758 			sblk->status_rx_quick_consumer_index14,
7759 			sblk->status_rx_quick_consumer_index15);
7760 
7761 	if (sblk->status_completion_producer_index ||
7762 		sblk->status_cmd_consumer_index)
7763 		BCE_PRINTF("com_prod  = 0x%08X, cmd_cons      = 0x%08X\n",
7764 			sblk->status_completion_producer_index,
7765 			sblk->status_cmd_consumer_index);
7766 
7767 	BCE_PRINTF(
7768 		"----------------------------"
7769 		"----------------"
7770 		"----------------------------\n");
7771 }
7772 
7773 
7774 /****************************************************************************/
7775 /* Prints out the statistics block from host memory.                        */
7776 /*                                                                          */
7777 /* Returns:                                                                 */
7778 /*   Nothing.                                                               */
7779 /****************************************************************************/
7780 static void
7781 bce_dump_stats_block(struct bce_softc *sc)
7782 {
7783 	struct statistics_block *sblk;
7784 
7785 	sblk = sc->stats_block;
7786 
7787 	BCE_PRINTF(
7788 		"---------------"
7789 		" Stats Block  (All Stats Not Shown Are 0) "
7790 		"---------------\n");
7791 
7792 	if (sblk->stat_IfHCInOctets_hi
7793 		|| sblk->stat_IfHCInOctets_lo)
7794 		BCE_PRINTF("0x%08X:%08X : "
7795 			"IfHcInOctets\n",
7796 			sblk->stat_IfHCInOctets_hi,
7797 			sblk->stat_IfHCInOctets_lo);
7798 
7799 	if (sblk->stat_IfHCInBadOctets_hi
7800 		|| sblk->stat_IfHCInBadOctets_lo)
7801 		BCE_PRINTF("0x%08X:%08X : "
7802 			"IfHcInBadOctets\n",
7803 			sblk->stat_IfHCInBadOctets_hi,
7804 			sblk->stat_IfHCInBadOctets_lo);
7805 
7806 	if (sblk->stat_IfHCOutOctets_hi
7807 		|| sblk->stat_IfHCOutOctets_lo)
7808 		BCE_PRINTF("0x%08X:%08X : "
7809 			"IfHcOutOctets\n",
7810 			sblk->stat_IfHCOutOctets_hi,
7811 			sblk->stat_IfHCOutOctets_lo);
7812 
7813 	if (sblk->stat_IfHCOutBadOctets_hi
7814 		|| sblk->stat_IfHCOutBadOctets_lo)
7815 		BCE_PRINTF("0x%08X:%08X : "
7816 			"IfHcOutBadOctets\n",
7817 			sblk->stat_IfHCOutBadOctets_hi,
7818 			sblk->stat_IfHCOutBadOctets_lo);
7819 
7820 	if (sblk->stat_IfHCInUcastPkts_hi
7821 		|| sblk->stat_IfHCInUcastPkts_lo)
7822 		BCE_PRINTF("0x%08X:%08X : "
7823 			"IfHcInUcastPkts\n",
7824 			sblk->stat_IfHCInUcastPkts_hi,
7825 			sblk->stat_IfHCInUcastPkts_lo);
7826 
7827 	if (sblk->stat_IfHCInBroadcastPkts_hi
7828 		|| sblk->stat_IfHCInBroadcastPkts_lo)
7829 		BCE_PRINTF("0x%08X:%08X : "
7830 			"IfHcInBroadcastPkts\n",
7831 			sblk->stat_IfHCInBroadcastPkts_hi,
7832 			sblk->stat_IfHCInBroadcastPkts_lo);
7833 
7834 	if (sblk->stat_IfHCInMulticastPkts_hi
7835 		|| sblk->stat_IfHCInMulticastPkts_lo)
7836 		BCE_PRINTF("0x%08X:%08X : "
7837 			"IfHcInMulticastPkts\n",
7838 			sblk->stat_IfHCInMulticastPkts_hi,
7839 			sblk->stat_IfHCInMulticastPkts_lo);
7840 
7841 	if (sblk->stat_IfHCOutUcastPkts_hi
7842 		|| sblk->stat_IfHCOutUcastPkts_lo)
7843 		BCE_PRINTF("0x%08X:%08X : "
7844 			"IfHcOutUcastPkts\n",
7845 			sblk->stat_IfHCOutUcastPkts_hi,
7846 			sblk->stat_IfHCOutUcastPkts_lo);
7847 
7848 	if (sblk->stat_IfHCOutBroadcastPkts_hi
7849 		|| sblk->stat_IfHCOutBroadcastPkts_lo)
7850 		BCE_PRINTF("0x%08X:%08X : "
7851 			"IfHcOutBroadcastPkts\n",
7852 			sblk->stat_IfHCOutBroadcastPkts_hi,
7853 			sblk->stat_IfHCOutBroadcastPkts_lo);
7854 
7855 	if (sblk->stat_IfHCOutMulticastPkts_hi
7856 		|| sblk->stat_IfHCOutMulticastPkts_lo)
7857 		BCE_PRINTF("0x%08X:%08X : "
7858 			"IfHcOutMulticastPkts\n",
7859 			sblk->stat_IfHCOutMulticastPkts_hi,
7860 			sblk->stat_IfHCOutMulticastPkts_lo);
7861 
7862 	if (sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors)
7863 		BCE_PRINTF("         0x%08X : "
7864 			"emac_tx_stat_dot3statsinternalmactransmiterrors\n",
7865 			sblk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors);
7866 
7867 	if (sblk->stat_Dot3StatsCarrierSenseErrors)
7868 		BCE_PRINTF("         0x%08X : Dot3StatsCarrierSenseErrors\n",
7869 			sblk->stat_Dot3StatsCarrierSenseErrors);
7870 
7871 	if (sblk->stat_Dot3StatsFCSErrors)
7872 		BCE_PRINTF("         0x%08X : Dot3StatsFCSErrors\n",
7873 			sblk->stat_Dot3StatsFCSErrors);
7874 
7875 	if (sblk->stat_Dot3StatsAlignmentErrors)
7876 		BCE_PRINTF("         0x%08X : Dot3StatsAlignmentErrors\n",
7877 			sblk->stat_Dot3StatsAlignmentErrors);
7878 
7879 	if (sblk->stat_Dot3StatsSingleCollisionFrames)
7880 		BCE_PRINTF("         0x%08X : Dot3StatsSingleCollisionFrames\n",
7881 			sblk->stat_Dot3StatsSingleCollisionFrames);
7882 
7883 	if (sblk->stat_Dot3StatsMultipleCollisionFrames)
7884 		BCE_PRINTF("         0x%08X : Dot3StatsMultipleCollisionFrames\n",
7885 			sblk->stat_Dot3StatsMultipleCollisionFrames);
7886 
7887 	if (sblk->stat_Dot3StatsDeferredTransmissions)
7888 		BCE_PRINTF("         0x%08X : Dot3StatsDeferredTransmissions\n",
7889 			sblk->stat_Dot3StatsDeferredTransmissions);
7890 
7891 	if (sblk->stat_Dot3StatsExcessiveCollisions)
7892 		BCE_PRINTF("         0x%08X : Dot3StatsExcessiveCollisions\n",
7893 			sblk->stat_Dot3StatsExcessiveCollisions);
7894 
7895 	if (sblk->stat_Dot3StatsLateCollisions)
7896 		BCE_PRINTF("         0x%08X : Dot3StatsLateCollisions\n",
7897 			sblk->stat_Dot3StatsLateCollisions);
7898 
7899 	if (sblk->stat_EtherStatsCollisions)
7900 		BCE_PRINTF("         0x%08X : EtherStatsCollisions\n",
7901 			sblk->stat_EtherStatsCollisions);
7902 
7903 	if (sblk->stat_EtherStatsFragments)
7904 		BCE_PRINTF("         0x%08X : EtherStatsFragments\n",
7905 			sblk->stat_EtherStatsFragments);
7906 
7907 	if (sblk->stat_EtherStatsJabbers)
7908 		BCE_PRINTF("         0x%08X : EtherStatsJabbers\n",
7909 			sblk->stat_EtherStatsJabbers);
7910 
7911 	if (sblk->stat_EtherStatsUndersizePkts)
7912 		BCE_PRINTF("         0x%08X : EtherStatsUndersizePkts\n",
7913 			sblk->stat_EtherStatsUndersizePkts);
7914 
7915 	if (sblk->stat_EtherStatsOverrsizePkts)
7916 		BCE_PRINTF("         0x%08X : EtherStatsOverrsizePkts\n",
7917 			sblk->stat_EtherStatsOverrsizePkts);
7918 
7919 	if (sblk->stat_EtherStatsPktsRx64Octets)
7920 		BCE_PRINTF("         0x%08X : EtherStatsPktsRx64Octets\n",
7921 			sblk->stat_EtherStatsPktsRx64Octets);
7922 
7923 	if (sblk->stat_EtherStatsPktsRx65Octetsto127Octets)
7924 		BCE_PRINTF("         0x%08X : EtherStatsPktsRx65Octetsto127Octets\n",
7925 			sblk->stat_EtherStatsPktsRx65Octetsto127Octets);
7926 
7927 	if (sblk->stat_EtherStatsPktsRx128Octetsto255Octets)
7928 		BCE_PRINTF("         0x%08X : EtherStatsPktsRx128Octetsto255Octets\n",
7929 			sblk->stat_EtherStatsPktsRx128Octetsto255Octets);
7930 
7931 	if (sblk->stat_EtherStatsPktsRx256Octetsto511Octets)
7932 		BCE_PRINTF("         0x%08X : EtherStatsPktsRx256Octetsto511Octets\n",
7933 			sblk->stat_EtherStatsPktsRx256Octetsto511Octets);
7934 
7935 	if (sblk->stat_EtherStatsPktsRx512Octetsto1023Octets)
7936 		BCE_PRINTF("         0x%08X : EtherStatsPktsRx512Octetsto1023Octets\n",
7937 			sblk->stat_EtherStatsPktsRx512Octetsto1023Octets);
7938 
7939 	if (sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets)
7940 		BCE_PRINTF("         0x%08X : EtherStatsPktsRx1024Octetsto1522Octets\n",
7941 			sblk->stat_EtherStatsPktsRx1024Octetsto1522Octets);
7942 
7943 	if (sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets)
7944 		BCE_PRINTF("         0x%08X : EtherStatsPktsRx1523Octetsto9022Octets\n",
7945 			sblk->stat_EtherStatsPktsRx1523Octetsto9022Octets);
7946 
7947 	if (sblk->stat_EtherStatsPktsTx64Octets)
7948 		BCE_PRINTF("         0x%08X : EtherStatsPktsTx64Octets\n",
7949 			sblk->stat_EtherStatsPktsTx64Octets);
7950 
7951 	if (sblk->stat_EtherStatsPktsTx65Octetsto127Octets)
7952 		BCE_PRINTF("         0x%08X : EtherStatsPktsTx65Octetsto127Octets\n",
7953 			sblk->stat_EtherStatsPktsTx65Octetsto127Octets);
7954 
7955 	if (sblk->stat_EtherStatsPktsTx128Octetsto255Octets)
7956 		BCE_PRINTF("         0x%08X : EtherStatsPktsTx128Octetsto255Octets\n",
7957 			sblk->stat_EtherStatsPktsTx128Octetsto255Octets);
7958 
7959 	if (sblk->stat_EtherStatsPktsTx256Octetsto511Octets)
7960 		BCE_PRINTF("         0x%08X : EtherStatsPktsTx256Octetsto511Octets\n",
7961 			sblk->stat_EtherStatsPktsTx256Octetsto511Octets);
7962 
7963 	if (sblk->stat_EtherStatsPktsTx512Octetsto1023Octets)
7964 		BCE_PRINTF("         0x%08X : EtherStatsPktsTx512Octetsto1023Octets\n",
7965 			sblk->stat_EtherStatsPktsTx512Octetsto1023Octets);
7966 
7967 	if (sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets)
7968 		BCE_PRINTF("         0x%08X : EtherStatsPktsTx1024Octetsto1522Octets\n",
7969 			sblk->stat_EtherStatsPktsTx1024Octetsto1522Octets);
7970 
7971 	if (sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets)
7972 		BCE_PRINTF("         0x%08X : EtherStatsPktsTx1523Octetsto9022Octets\n",
7973 			sblk->stat_EtherStatsPktsTx1523Octetsto9022Octets);
7974 
7975 	if (sblk->stat_XonPauseFramesReceived)
7976 		BCE_PRINTF("         0x%08X : XonPauseFramesReceived\n",
7977 			sblk->stat_XonPauseFramesReceived);
7978 
7979 	if (sblk->stat_XoffPauseFramesReceived)
7980 	   BCE_PRINTF("          0x%08X : XoffPauseFramesReceived\n",
7981 			sblk->stat_XoffPauseFramesReceived);
7982 
7983 	if (sblk->stat_OutXonSent)
7984 		BCE_PRINTF("         0x%08X : OutXonSent\n",
7985 			sblk->stat_OutXonSent);
7986 
7987 	if (sblk->stat_OutXoffSent)
7988 		BCE_PRINTF("         0x%08X : OutXoffSent\n",
7989 			sblk->stat_OutXoffSent);
7990 
7991 	if (sblk->stat_FlowControlDone)
7992 		BCE_PRINTF("         0x%08X : FlowControlDone\n",
7993 			sblk->stat_FlowControlDone);
7994 
7995 	if (sblk->stat_MacControlFramesReceived)
7996 		BCE_PRINTF("         0x%08X : MacControlFramesReceived\n",
7997 			sblk->stat_MacControlFramesReceived);
7998 
7999 	if (sblk->stat_XoffStateEntered)
8000 		BCE_PRINTF("         0x%08X : XoffStateEntered\n",
8001 			sblk->stat_XoffStateEntered);
8002 
8003 	if (sblk->stat_IfInFramesL2FilterDiscards)
8004 		BCE_PRINTF("         0x%08X : IfInFramesL2FilterDiscards\n",
8005 			sblk->stat_IfInFramesL2FilterDiscards);
8006 
8007 	if (sblk->stat_IfInRuleCheckerDiscards)
8008 		BCE_PRINTF("         0x%08X : IfInRuleCheckerDiscards\n",
8009 			sblk->stat_IfInRuleCheckerDiscards);
8010 
8011 	if (sblk->stat_IfInFTQDiscards)
8012 		BCE_PRINTF("         0x%08X : IfInFTQDiscards\n",
8013 			sblk->stat_IfInFTQDiscards);
8014 
8015 	if (sblk->stat_IfInMBUFDiscards)
8016 		BCE_PRINTF("         0x%08X : IfInMBUFDiscards\n",
8017 			sblk->stat_IfInMBUFDiscards);
8018 
8019 	if (sblk->stat_IfInRuleCheckerP4Hit)
8020 		BCE_PRINTF("         0x%08X : IfInRuleCheckerP4Hit\n",
8021 			sblk->stat_IfInRuleCheckerP4Hit);
8022 
8023 	if (sblk->stat_CatchupInRuleCheckerDiscards)
8024 		BCE_PRINTF("         0x%08X : CatchupInRuleCheckerDiscards\n",
8025 			sblk->stat_CatchupInRuleCheckerDiscards);
8026 
8027 	if (sblk->stat_CatchupInFTQDiscards)
8028 		BCE_PRINTF("         0x%08X : CatchupInFTQDiscards\n",
8029 			sblk->stat_CatchupInFTQDiscards);
8030 
8031 	if (sblk->stat_CatchupInMBUFDiscards)
8032 		BCE_PRINTF("         0x%08X : CatchupInMBUFDiscards\n",
8033 			sblk->stat_CatchupInMBUFDiscards);
8034 
8035 	if (sblk->stat_CatchupInRuleCheckerP4Hit)
8036 		BCE_PRINTF("         0x%08X : CatchupInRuleCheckerP4Hit\n",
8037 			sblk->stat_CatchupInRuleCheckerP4Hit);
8038 
8039 	BCE_PRINTF(
8040 		"----------------------------"
8041 		"----------------"
8042 		"----------------------------\n");
8043 }
8044 
8045 
8046 /****************************************************************************/
8047 /* Prints out a summary of the driver state.                                */
8048 /*                                                                          */
8049 /* Returns:                                                                 */
8050 /*   Nothing.                                                               */
8051 /****************************************************************************/
8052 static void
8053 bce_dump_driver_state(struct bce_softc *sc)
8054 {
8055 	u32 val_hi, val_lo;
8056 
8057 	BCE_PRINTF(
8058 		"-----------------------------"
8059 		" Driver State "
8060 		"-----------------------------\n");
8061 
8062 	val_hi = BCE_ADDR_HI(sc);
8063 	val_lo = BCE_ADDR_LO(sc);
8064 	BCE_PRINTF("0x%08X:%08X - (sc) driver softc structure virtual address\n",
8065 		val_hi, val_lo);
8066 
8067 	val_hi = BCE_ADDR_HI(sc->bce_vhandle);
8068 	val_lo = BCE_ADDR_LO(sc->bce_vhandle);
8069 	BCE_PRINTF("0x%08X:%08X - (sc->bce_vhandle) PCI BAR virtual address\n",
8070 		val_hi, val_lo);
8071 
8072 	val_hi = BCE_ADDR_HI(sc->status_block);
8073 	val_lo = BCE_ADDR_LO(sc->status_block);
8074 	BCE_PRINTF("0x%08X:%08X - (sc->status_block) status block virtual address\n",
8075 		val_hi, val_lo);
8076 
8077 	val_hi = BCE_ADDR_HI(sc->stats_block);
8078 	val_lo = BCE_ADDR_LO(sc->stats_block);
8079 	BCE_PRINTF("0x%08X:%08X - (sc->stats_block) statistics block virtual address\n",
8080 		val_hi, val_lo);
8081 
8082 	val_hi = BCE_ADDR_HI(sc->tx_bd_chain);
8083 	val_lo = BCE_ADDR_LO(sc->tx_bd_chain);
8084 	BCE_PRINTF(
8085 		"0x%08X:%08X - (sc->tx_bd_chain) tx_bd chain virtual adddress\n",
8086 		val_hi, val_lo);
8087 
8088 	val_hi = BCE_ADDR_HI(sc->rx_bd_chain);
8089 	val_lo = BCE_ADDR_LO(sc->rx_bd_chain);
8090 	BCE_PRINTF(
8091 		"0x%08X:%08X - (sc->rx_bd_chain) rx_bd chain virtual address\n",
8092 		val_hi, val_lo);
8093 
8094 	val_hi = BCE_ADDR_HI(sc->pg_bd_chain);
8095 	val_lo = BCE_ADDR_LO(sc->pg_bd_chain);
8096 	BCE_PRINTF(
8097 		"0x%08X:%08X - (sc->pg_bd_chain) page chain virtual address\n",
8098 		val_hi, val_lo);
8099 
8100 	val_hi = BCE_ADDR_HI(sc->tx_mbuf_ptr);
8101 	val_lo = BCE_ADDR_LO(sc->tx_mbuf_ptr);
8102 	BCE_PRINTF(
8103 		"0x%08X:%08X - (sc->tx_mbuf_ptr) tx mbuf chain virtual address\n",
8104 		val_hi, val_lo);
8105 
8106 	val_hi = BCE_ADDR_HI(sc->rx_mbuf_ptr);
8107 	val_lo = BCE_ADDR_LO(sc->rx_mbuf_ptr);
8108 	BCE_PRINTF(
8109 		"0x%08X:%08X - (sc->rx_mbuf_ptr) rx mbuf chain virtual address\n",
8110 		val_hi, val_lo);
8111 
8112 	val_hi = BCE_ADDR_HI(sc->pg_mbuf_ptr);
8113 	val_lo = BCE_ADDR_LO(sc->pg_mbuf_ptr);
8114 	BCE_PRINTF(
8115 		"0x%08X:%08X - (sc->pg_mbuf_ptr) page mbuf chain virtual address\n",
8116 		val_hi, val_lo);
8117 
8118 	BCE_PRINTF("         0x%08X - (sc->interrupts_generated) h/w intrs\n",
8119 		sc->interrupts_generated);
8120 
8121 	BCE_PRINTF("         0x%08X - (sc->rx_interrupts) rx interrupts handled\n",
8122 		sc->rx_interrupts);
8123 
8124 	BCE_PRINTF("         0x%08X - (sc->tx_interrupts) tx interrupts handled\n",
8125 		sc->tx_interrupts);
8126 
8127 	BCE_PRINTF("         0x%08X - (sc->last_status_idx) status block index\n",
8128 		sc->last_status_idx);
8129 
8130 	BCE_PRINTF("     0x%04X(0x%04X) - (sc->tx_prod) tx producer index\n",
8131 		sc->tx_prod, (u16) TX_CHAIN_IDX(sc->tx_prod));
8132 
8133 	BCE_PRINTF("     0x%04X(0x%04X) - (sc->tx_cons) tx consumer index\n",
8134 		sc->tx_cons, (u16) TX_CHAIN_IDX(sc->tx_cons));
8135 
8136 	BCE_PRINTF("         0x%08X - (sc->tx_prod_bseq) tx producer bseq index\n",
8137 		sc->tx_prod_bseq);
8138 
8139 	BCE_PRINTF("         0x%08X - (sc->debug_tx_mbuf_alloc) tx mbufs allocated\n",
8140 		sc->debug_tx_mbuf_alloc);
8141 
8142 	BCE_PRINTF("         0x%08X - (sc->used_tx_bd) used tx_bd's\n",
8143 		sc->used_tx_bd);
8144 
8145 	BCE_PRINTF("0x%08X/%08X - (sc->tx_hi_watermark) tx hi watermark\n",
8146 		sc->tx_hi_watermark, sc->max_tx_bd);
8147 
8148 	BCE_PRINTF("     0x%04X(0x%04X) - (sc->rx_prod) rx producer index\n",
8149 		sc->rx_prod, (u16) RX_CHAIN_IDX(sc->rx_prod));
8150 
8151 	BCE_PRINTF("     0x%04X(0x%04X) - (sc->rx_cons) rx consumer index\n",
8152 		sc->rx_cons, (u16) RX_CHAIN_IDX(sc->rx_cons));
8153 
8154 	BCE_PRINTF("         0x%08X - (sc->rx_prod_bseq) rx producer bseq index\n",
8155 		sc->rx_prod_bseq);
8156 
8157 	BCE_PRINTF("         0x%08X - (sc->debug_rx_mbuf_alloc) rx mbufs allocated\n",
8158 		sc->debug_rx_mbuf_alloc);
8159 
8160 	BCE_PRINTF("         0x%08X - (sc->free_rx_bd) free rx_bd's\n",
8161 		sc->free_rx_bd);
8162 
8163 	BCE_PRINTF("     0x%04X(0x%04X) - (sc->pg_prod) page producer index\n",
8164 		sc->pg_prod, (u16) PG_CHAIN_IDX(sc->pg_prod));
8165 
8166 	BCE_PRINTF("     0x%04X(0x%04X) - (sc->pg_cons) page consumer index\n",
8167 		sc->pg_cons, (u16) PG_CHAIN_IDX(sc->pg_cons));
8168 
8169 	BCE_PRINTF("         0x%08X - (sc->debug_pg_mbuf_alloc) page mbufs allocated\n",
8170 		sc->debug_pg_mbuf_alloc);
8171 
8172 	BCE_PRINTF("         0x%08X - (sc->free_pg_bd) free page rx_bd's\n",
8173 		sc->free_pg_bd);
8174 
8175 	BCE_PRINTF("0x%08X/%08X - (sc->pg_low_watermark) page low watermark\n",
8176 		sc->pg_low_watermark, sc->max_pg_bd);
8177 
8178 	BCE_PRINTF("         0x%08X - (sc->mbuf_alloc_failed) "
8179 		"mbuf alloc failures\n",
8180 		sc->mbuf_alloc_failed);
8181 
8182 	BCE_PRINTF("         0x%08X - (sc->debug_mbuf_sim_alloc_failed) "
8183 		"simulated mbuf alloc failures\n",
8184 		sc->debug_mbuf_sim_alloc_failed);
8185 
8186 	BCE_PRINTF(
8187 		"----------------------------"
8188 		"----------------"
8189 		"----------------------------\n");
8190 }
8191 
8192 
8193 /****************************************************************************/
8194 /* Prints out the hardware state through a summary of important register,   */
8195 /* followed by a complete register dump.                                    */
8196 /*                                                                          */
8197 /* Returns:                                                                 */
8198 /*   Nothing.                                                               */
8199 /****************************************************************************/
8200 static void
8201 bce_dump_hw_state(struct bce_softc *sc)
8202 {
8203 	u32 val;
8204 
8205 	BCE_PRINTF(
8206 		"----------------------------"
8207 		" Hardware State "
8208 		"----------------------------\n");
8209 
8210 	BCE_PRINTF("0x%08X - bootcode version\n", sc->bce_fw_ver);
8211 
8212 	val = REG_RD(sc, BCE_MISC_ENABLE_STATUS_BITS);
8213 	BCE_PRINTF("0x%08X - (0x%06X) misc_enable_status_bits\n",
8214 		val, BCE_MISC_ENABLE_STATUS_BITS);
8215 
8216 	val = REG_RD(sc, BCE_DMA_STATUS);
8217 	BCE_PRINTF("0x%08X - (0x%06X) dma_status\n", val, BCE_DMA_STATUS);
8218 
8219 	val = REG_RD(sc, BCE_CTX_STATUS);
8220 	BCE_PRINTF("0x%08X - (0x%06X) ctx_status\n", val, BCE_CTX_STATUS);
8221 
8222 	val = REG_RD(sc, BCE_EMAC_STATUS);
8223 	BCE_PRINTF("0x%08X - (0x%06X) emac_status\n", val, BCE_EMAC_STATUS);
8224 
8225 	val = REG_RD(sc, BCE_RPM_STATUS);
8226 	BCE_PRINTF("0x%08X - (0x%06X) rpm_status\n", val, BCE_RPM_STATUS);
8227 
8228 	val = REG_RD(sc, 0x2004);
8229 	BCE_PRINTF("0x%08X - (0x%06X) rlup_status\n", val, 0x2004);
8230 
8231 	val = REG_RD(sc, BCE_RV2P_STATUS);
8232 	BCE_PRINTF("0x%08X - (0x%06X) rv2p_status\n", val, BCE_RV2P_STATUS);
8233 
8234 	val = REG_RD(sc, 0x2c04);
8235 	BCE_PRINTF("0x%08X - (0x%06X) rdma_status\n", val, 0x2c04);
8236 
8237 	val = REG_RD(sc, BCE_TBDR_STATUS);
8238 	BCE_PRINTF("0x%08X - (0x%06X) tbdr_status\n", val, BCE_TBDR_STATUS);
8239 
8240 	val = REG_RD(sc, BCE_TDMA_STATUS);
8241 	BCE_PRINTF("0x%08X - (0x%06X) tdma_status\n", val, BCE_TDMA_STATUS);
8242 
8243 	val = REG_RD(sc, BCE_HC_STATUS);
8244 	BCE_PRINTF("0x%08X - (0x%06X) hc_status\n", val, BCE_HC_STATUS);
8245 
8246 	val = REG_RD_IND(sc, BCE_TXP_CPU_STATE);
8247 	BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_state\n", val, BCE_TXP_CPU_STATE);
8248 
8249 	val = REG_RD_IND(sc, BCE_TPAT_CPU_STATE);
8250 	BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_state\n", val, BCE_TPAT_CPU_STATE);
8251 
8252 	val = REG_RD_IND(sc, BCE_RXP_CPU_STATE);
8253 	BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_state\n", val, BCE_RXP_CPU_STATE);
8254 
8255 	val = REG_RD_IND(sc, BCE_COM_CPU_STATE);
8256 	BCE_PRINTF("0x%08X - (0x%06X) com_cpu_state\n", val, BCE_COM_CPU_STATE);
8257 
8258 	val = REG_RD_IND(sc, BCE_MCP_CPU_STATE);
8259 	BCE_PRINTF("0x%08X - (0x%06X) mcp_cpu_state\n", val, BCE_MCP_CPU_STATE);
8260 
8261 	val = REG_RD_IND(sc, BCE_CP_CPU_STATE);
8262 	BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_state\n", val, BCE_CP_CPU_STATE);
8263 
8264 	BCE_PRINTF(
8265 		"----------------------------"
8266 		"----------------"
8267 		"----------------------------\n");
8268 
8269 	BCE_PRINTF(
8270 		"----------------------------"
8271 		" Register  Dump "
8272 		"----------------------------\n");
8273 
8274 	for (int i = 0x400; i < 0x8000; i += 0x10) {
8275 		BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
8276 			i, REG_RD(sc, i), REG_RD(sc, i + 0x4),
8277 			REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC));
8278 	}
8279 
8280 	BCE_PRINTF(
8281 		"----------------------------"
8282 		"----------------"
8283 		"----------------------------\n");
8284 }
8285 
8286 
8287 /****************************************************************************/
8288 /* Prints out the bootcode state.                                           */
8289 /*                                                                          */
8290 /* Returns:                                                                 */
8291 /*   Nothing.                                                               */
8292 /****************************************************************************/
8293 static void
8294 bce_dump_bc_state(struct bce_softc *sc)
8295 {
8296 	u32 val;
8297 
8298 	BCE_PRINTF(
8299 		"----------------------------"
8300 		" Bootcode State "
8301 		"----------------------------\n");
8302 
8303 	BCE_PRINTF("0x%08X - bootcode version\n", sc->bce_fw_ver);
8304 
8305 	val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_BC_RESET_TYPE);
8306 	BCE_PRINTF("0x%08X - (0x%06X) reset_type\n",
8307 		val, BCE_BC_RESET_TYPE);
8308 
8309 	val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_BC_STATE);
8310 	BCE_PRINTF("0x%08X - (0x%06X) state\n",
8311 		val, BCE_BC_STATE);
8312 
8313 	val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_BC_CONDITION);
8314 	BCE_PRINTF("0x%08X - (0x%06X) condition\n",
8315 		val, BCE_BC_CONDITION);
8316 
8317 	val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_BC_STATE_DEBUG_CMD);
8318 	BCE_PRINTF("0x%08X - (0x%06X) debug_cmd\n",
8319 		val, BCE_BC_STATE_DEBUG_CMD);
8320 
8321 	BCE_PRINTF(
8322 		"----------------------------"
8323 		"----------------"
8324 		"----------------------------\n");
8325 }
8326 
8327 
8328 /****************************************************************************/
8329 /* Prints out the TXP state.                                                */
8330 /*                                                                          */
8331 /* Returns:                                                                 */
8332 /*   Nothing.                                                               */
8333 /****************************************************************************/
8334 static void
8335 bce_dump_txp_state(struct bce_softc *sc)
8336 {
8337 	u32 val1;
8338 
8339 	BCE_PRINTF(
8340 		"----------------------------"
8341 		"   TXP  State   "
8342 		"----------------------------\n");
8343 
8344 	val1 = REG_RD_IND(sc, BCE_TXP_CPU_MODE);
8345 	BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_mode\n", val1, BCE_TXP_CPU_MODE);
8346 
8347 	val1 = REG_RD_IND(sc, BCE_TXP_CPU_STATE);
8348 	BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_state\n", val1, BCE_TXP_CPU_STATE);
8349 
8350 	val1 = REG_RD_IND(sc, BCE_TXP_CPU_EVENT_MASK);
8351 	BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_event_mask\n", val1, BCE_TXP_CPU_EVENT_MASK);
8352 
8353 	BCE_PRINTF(
8354 		"----------------------------"
8355 		" Register  Dump "
8356 		"----------------------------\n");
8357 
8358 	for (int i = BCE_TXP_CPU_MODE; i < 0x68000; i += 0x10) {
8359 		/* Skip the big blank spaces */
8360 		if (i < 0x454000 && i > 0x5ffff)
8361 			BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
8362 				i, REG_RD_IND(sc, i), REG_RD_IND(sc, i + 0x4),
8363 				REG_RD_IND(sc, i + 0x8), REG_RD_IND(sc, i + 0xC));
8364 	}
8365 
8366 	BCE_PRINTF(
8367 		"----------------------------"
8368 		"----------------"
8369 		"----------------------------\n");
8370 }
8371 
8372 
8373 /****************************************************************************/
8374 /* Prints out the RXP state.                                                */
8375 /*                                                                          */
8376 /* Returns:                                                                 */
8377 /*   Nothing.                                                               */
8378 /****************************************************************************/
8379 static void
8380 bce_dump_rxp_state(struct bce_softc *sc)
8381 {
8382 	u32 val1;
8383 
8384 	BCE_PRINTF(
8385 		"----------------------------"
8386 		"   RXP  State   "
8387 		"----------------------------\n");
8388 
8389 	val1 = REG_RD_IND(sc, BCE_RXP_CPU_MODE);
8390 	BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_mode\n", val1, BCE_RXP_CPU_MODE);
8391 
8392 	val1 = REG_RD_IND(sc, BCE_RXP_CPU_STATE);
8393 	BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_state\n", val1, BCE_RXP_CPU_STATE);
8394 
8395 	val1 = REG_RD_IND(sc, BCE_RXP_CPU_EVENT_MASK);
8396 	BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_event_mask\n", val1, BCE_RXP_CPU_EVENT_MASK);
8397 
8398 	BCE_PRINTF(
8399 		"----------------------------"
8400 		" Register  Dump "
8401 		"----------------------------\n");
8402 
8403 	for (int i = BCE_RXP_CPU_MODE; i < 0xe8fff; i += 0x10) {
8404 		/* Skip the big blank sapces */
8405 		if (i < 0xc5400 && i > 0xdffff)
8406 			BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
8407 	 			i, REG_RD_IND(sc, i), REG_RD_IND(sc, i + 0x4),
8408 				REG_RD_IND(sc, i + 0x8), REG_RD_IND(sc, i + 0xC));
8409 	}
8410 
8411 	BCE_PRINTF(
8412 		"----------------------------"
8413 		"----------------"
8414 		"----------------------------\n");
8415 }
8416 
8417 
8418 /****************************************************************************/
8419 /* Prints out the TPAT state.                                               */
8420 /*                                                                          */
8421 /* Returns:                                                                 */
8422 /*   Nothing.                                                               */
8423 /****************************************************************************/
8424 static void
8425 bce_dump_tpat_state(struct bce_softc *sc)
8426 {
8427 	u32 val1;
8428 
8429 	BCE_PRINTF(
8430 		"----------------------------"
8431 		"   TPAT State   "
8432 		"----------------------------\n");
8433 
8434 	val1 = REG_RD_IND(sc, BCE_TPAT_CPU_MODE);
8435 	BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_mode\n", val1, BCE_TPAT_CPU_MODE);
8436 
8437 	val1 = REG_RD_IND(sc, BCE_TPAT_CPU_STATE);
8438 	BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_state\n", val1, BCE_TPAT_CPU_STATE);
8439 
8440 	val1 = REG_RD_IND(sc, BCE_TPAT_CPU_EVENT_MASK);
8441 	BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_event_mask\n", val1, BCE_TPAT_CPU_EVENT_MASK);
8442 
8443 	BCE_PRINTF(
8444 		"----------------------------"
8445 		" Register  Dump "
8446 		"----------------------------\n");
8447 
8448 	for (int i = BCE_TPAT_CPU_MODE; i < 0xa3fff; i += 0x10) {
8449 		/* Skip the big blank spaces */
8450 		if (i < 0x854000 && i > 0x9ffff)
8451 			BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
8452 				i, REG_RD_IND(sc, i), REG_RD_IND(sc, i + 0x4),
8453 				REG_RD_IND(sc, i + 0x8), REG_RD_IND(sc, i + 0xC));
8454 	}
8455 
8456 	BCE_PRINTF(
8457 		"----------------------------"
8458 		"----------------"
8459 		"----------------------------\n");
8460 }
8461 
8462 
8463 /* ToDo: Add CP and COM proccessor state dumps. */
8464 
8465 
8466 /****************************************************************************/
8467 /* Prints out the driver state and then enters the debugger.                */
8468 /*                                                                          */
8469 /* Returns:                                                                 */
8470 /*   Nothing.                                                               */
8471 /****************************************************************************/
8472 static void
8473 bce_breakpoint(struct bce_softc *sc)
8474 {
8475 
8476 	/*
8477 	 * Unreachable code to silence compiler warnings
8478 	 * about unused functions.
8479 	 */
8480 	if (0) {
8481 		bce_freeze_controller(sc);
8482 		bce_unfreeze_controller(sc);
8483    		bce_dump_txbd(sc, 0, NULL);
8484 		bce_dump_rxbd(sc, 0, NULL);
8485 		bce_dump_pgbd(sc, 0, NULL);
8486 		bce_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD);
8487 		bce_dump_rx_mbuf_chain(sc, 0, USABLE_RX_BD);
8488 		bce_dump_pg_mbuf_chain(sc, 0, USABLE_PG_BD);
8489 		bce_dump_l2fhdr(sc, 0, NULL);
8490 		bce_dump_ctx(sc, RX_CID);
8491 		bce_dump_ftqs(sc);
8492 		bce_dump_tx_chain(sc, 0, USABLE_TX_BD);
8493 		bce_dump_rx_chain(sc, 0, USABLE_RX_BD);
8494 		bce_dump_pg_chain(sc, 0, USABLE_PG_BD);
8495 		bce_dump_status_block(sc);
8496 		bce_dump_stats_block(sc);
8497 		bce_dump_driver_state(sc);
8498 		bce_dump_hw_state(sc);
8499 		bce_dump_bc_state(sc);
8500 		bce_dump_txp_state(sc);
8501 		bce_dump_rxp_state(sc);
8502 		bce_dump_tpat_state(sc);
8503 	}
8504 
8505 	bce_dump_status_block(sc);
8506 	bce_dump_driver_state(sc);
8507 
8508 	/* Call the debugger. */
8509 	breakpoint();
8510 
8511 	return;
8512 }
8513 #endif
8514 
8515